summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog362
-rw-r--r--gcc/alias.c6
-rw-r--r--gcc/alloc-pool.c8
-rw-r--r--gcc/alloc-pool.h2
-rw-r--r--gcc/attribs.c2
-rw-r--r--gcc/auto-inc-dec.c212
-rw-r--r--gcc/basic-block.h2
-rw-r--r--gcc/bb-reorder.c4
-rw-r--r--gcc/bt-load.c2
-rw-r--r--gcc/builtins.c262
-rw-r--r--gcc/builtins.def2
-rw-r--r--gcc/c-common.c160
-rw-r--r--gcc/c-common.h6
-rw-r--r--gcc/c-cppbuiltin.c24
-rw-r--r--gcc/c-decl.c48
-rw-r--r--gcc/c-format.c2
-rw-r--r--gcc/c-lex.c12
-rw-r--r--gcc/c-omp.c6
-rw-r--r--gcc/c-opts.c10
-rw-r--r--gcc/c-parser.c56
-rw-r--r--gcc/c-pretty-print.c4
-rw-r--r--gcc/c-tree.h2
-rw-r--r--gcc/c-typeck.c64
-rw-r--r--gcc/caller-save.c24
-rw-r--r--gcc/calls.c4
-rw-r--r--gcc/cfg.c24
-rw-r--r--gcc/cfganal.c46
-rw-r--r--gcc/cfgexpand.c20
-rw-r--r--gcc/cfghooks.c2
-rw-r--r--gcc/cfghooks.h2
-rw-r--r--gcc/cfglayout.c4
-rw-r--r--gcc/cfgloop.c14
-rw-r--r--gcc/cfgloop.h6
-rw-r--r--gcc/cfgloopmanip.c22
-rw-r--r--gcc/cfgrtl.c8
-rw-r--r--gcc/cgraph.c8
-rw-r--r--gcc/cgraph.h6
-rw-r--r--gcc/cgraphbuild.c6
-rw-r--r--gcc/cgraphunit.c20
-rw-r--r--gcc/cif-code.def2
-rw-r--r--gcc/collect2.c24
-rw-r--r--gcc/combine.c16
-rw-r--r--gcc/convert.c6
-rw-r--r--gcc/coverage.c8
-rw-r--r--gcc/crtstuff.c6
-rw-r--r--gcc/cse.c40
-rw-r--r--gcc/cselib.c46
-rw-r--r--gcc/dbgcnt.c10
-rw-r--r--gcc/dbgcnt.def4
-rw-r--r--gcc/dbgcnt.h2
-rw-r--r--gcc/dbxout.c40
-rw-r--r--gcc/dce.c32
-rw-r--r--gcc/ddg.c10
-rw-r--r--gcc/ddg.h2
-rw-r--r--gcc/defaults.h6
-rw-r--r--gcc/df-byte-scan.c62
-rw-r--r--gcc/df-core.c178
-rw-r--r--gcc/df-problems.c552
-rw-r--r--gcc/df-scan.c580
-rw-r--r--gcc/df.h100
-rw-r--r--gcc/dfp.c22
-rw-r--r--gcc/diagnostic.c18
-rw-r--r--gcc/diagnostic.h4
-rw-r--r--gcc/dominance.c30
-rw-r--r--gcc/domwalk.c24
-rw-r--r--gcc/double-int.c14
-rw-r--r--gcc/double-int.h12
-rw-r--r--gcc/dse.c386
-rw-r--r--gcc/dwarf2asm.c4
-rw-r--r--gcc/dwarf2asm.h2
-rw-r--r--gcc/dwarf2out.c28
-rw-r--r--gcc/ebitmap.c16
-rw-r--r--gcc/ebitmap.h6
-rw-r--r--gcc/emit-rtl.c38
-rw-r--r--gcc/et-forest.c12
-rw-r--r--gcc/except.c12
-rw-r--r--gcc/except.h2
-rw-r--r--gcc/expmed.c24
-rw-r--r--gcc/expr.c18
-rw-r--r--gcc/expr.h6
-rw-r--r--gcc/final.c2
-rw-r--r--gcc/flags.h10
-rw-r--r--gcc/fold-const.c114
-rw-r--r--gcc/function.c42
-rw-r--r--gcc/function.h10
-rw-r--r--gcc/fwprop.c2
-rw-r--r--gcc/gcc.c16
-rw-r--r--gcc/gcov-dump.c8
-rw-r--r--gcc/gcov-io.c22
-rw-r--r--gcc/gcov-io.h12
-rw-r--r--gcc/gcov.c10
-rw-r--r--gcc/gcse.c32
-rw-r--r--gcc/genattr.c2
-rw-r--r--gcc/genattrtab.c22
-rw-r--r--gcc/genautomata.c48
-rw-r--r--gcc/genchecksum.c6
-rw-r--r--gcc/genconfig.c8
-rw-r--r--gcc/genflags.c4
-rw-r--r--gcc/gengtype-parse.c12
-rw-r--r--gcc/gengtype.c46
-rw-r--r--gcc/gengtype.h4
-rw-r--r--gcc/genmddeps.c2
-rw-r--r--gcc/genmodes.c10
-rw-r--r--gcc/genopinit.c10
-rw-r--r--gcc/genpreds.c34
-rw-r--r--gcc/gensupport.c10
-rw-r--r--gcc/ggc-common.c12
-rw-r--r--gcc/ggc-page.c22
-rw-r--r--gcc/ggc-zone.c20
-rw-r--r--gcc/ggc.h2
-rw-r--r--gcc/gimple-iterator.c6
-rw-r--r--gcc/gimple-low.c10
-rw-r--r--gcc/gimple-pretty-print.c6
-rw-r--r--gcc/gimple.c58
-rw-r--r--gcc/gimple.def10
-rw-r--r--gcc/gimple.h18
-rw-r--r--gcc/gimplify.c54
-rw-r--r--gcc/graphds.c4
-rw-r--r--gcc/graphite-clast-to-gimple.c2
-rw-r--r--gcc/gthr-nks.h2
-rw-r--r--gcc/gthr-posix.c8
-rw-r--r--gcc/gthr-posix.h4
-rw-r--r--gcc/gthr-posix95.h2
-rw-r--r--gcc/gthr-single.h8
-rw-r--r--gcc/gthr-tpf.h4
-rw-r--r--gcc/gthr-vxworks.h2
-rw-r--r--gcc/gthr.h14
-rw-r--r--gcc/haifa-sched.c354
-rw-r--r--gcc/hard-reg-set.h10
-rw-r--r--gcc/hooks.c2
-rw-r--r--gcc/hooks.h2
-rw-r--r--gcc/hosthooks.h2
-rw-r--r--gcc/hwint.h4
-rw-r--r--gcc/ifcvt.c10
-rw-r--r--gcc/incpath.c10
-rw-r--r--gcc/init-regs.c10
-rw-r--r--gcc/integrate.c2
-rw-r--r--gcc/ipa-cp.c36
-rw-r--r--gcc/ipa-inline.c46
-rw-r--r--gcc/ipa-prop.c2
-rw-r--r--gcc/ipa-pure-const.c96
-rw-r--r--gcc/ipa-reference.c250
-rw-r--r--gcc/ipa-struct-reorg.c706
-rw-r--r--gcc/ipa-struct-reorg.h2
-rw-r--r--gcc/ipa-type-escape.c432
-rw-r--r--gcc/ipa-type-escape.h2
-rw-r--r--gcc/ipa-utils.c62
-rw-r--r--gcc/ipa-utils.h2
-rw-r--r--gcc/ipa.c8
-rw-r--r--gcc/ira-build.c28
-rw-r--r--gcc/ira-color.c22
-rw-r--r--gcc/ira-conflicts.c16
-rw-r--r--gcc/ira-costs.c12
-rw-r--r--gcc/ira-emit.c8
-rw-r--r--gcc/ira-int.h22
-rw-r--r--gcc/ira-lives.c76
-rw-r--r--gcc/ira.c134
-rw-r--r--gcc/jump.c6
-rw-r--r--gcc/lambda-code.c200
-rw-r--r--gcc/lambda-mat.c10
-rw-r--r--gcc/lambda-trans.c10
-rw-r--r--gcc/lambda.h26
-rw-r--r--gcc/langhooks.c6
-rw-r--r--gcc/lcm.c2
-rw-r--r--gcc/libgcov.c64
-rw-r--r--gcc/lists.c4
-rw-r--r--gcc/loop-doloop.c8
-rw-r--r--gcc/loop-init.c8
-rw-r--r--gcc/loop-invariant.c50
-rw-r--r--gcc/loop-iv.c48
-rw-r--r--gcc/loop-unroll.c240
-rw-r--r--gcc/lower-subreg.c2
-rw-r--r--gcc/lto-cgraph.c42
-rw-r--r--gcc/lto-compress.c2
-rw-r--r--gcc/lto-opts.c2
-rw-r--r--gcc/lto-section-in.c34
-rw-r--r--gcc/lto-section-out.c10
-rw-r--r--gcc/lto-streamer-in.c70
-rw-r--r--gcc/lto-streamer-out.c28
-rw-r--r--gcc/lto-streamer.c8
-rw-r--r--gcc/lto-streamer.h30
-rw-r--r--gcc/lto-symtab.c2
-rw-r--r--gcc/lto-wpa-fixup.c10
-rw-r--r--gcc/matrix-reorg.c140
-rw-r--r--gcc/mcf.c6
-rw-r--r--gcc/mode-switching.c4
-rw-r--r--gcc/modulo-sched.c60
-rw-r--r--gcc/omega.c86
-rw-r--r--gcc/omega.h8
-rw-r--r--gcc/omp-low.c76
-rw-r--r--gcc/optabs.c60
-rw-r--r--gcc/optabs.h2
-rw-r--r--gcc/opts-common.c2
-rw-r--r--gcc/opts.c20
-rw-r--r--gcc/params.def24
-rw-r--r--gcc/params.h4
-rw-r--r--gcc/passes.c50
-rw-r--r--gcc/plugin.c4
-rw-r--r--gcc/postreload-gcse.c12
-rw-r--r--gcc/postreload.c2
-rw-r--r--gcc/predict.c30
-rw-r--r--gcc/predict.def2
-rw-r--r--gcc/pretty-print.c6
-rw-r--r--gcc/pretty-print.h16
-rw-r--r--gcc/print-rtl.c2
-rw-r--r--gcc/print-tree.c26
-rw-r--r--gcc/profile.c8
-rw-r--r--gcc/read-rtl.c4
-rw-r--r--gcc/real.c50
-rw-r--r--gcc/recog.c24
-rw-r--r--gcc/reg-stack.c8
-rw-r--r--gcc/regcprop.c4
-rw-r--r--gcc/reginfo.c6
-rw-r--r--gcc/regmove.c6
-rw-r--r--gcc/regrename.c6
-rw-r--r--gcc/regs.h2
-rw-r--r--gcc/regstat.c60
-rw-r--r--gcc/reload.c52
-rw-r--r--gcc/reload1.c30
-rw-r--r--gcc/resource.c2
-rw-r--r--gcc/rtl.c8
-rw-r--r--gcc/rtl.def40
-rw-r--r--gcc/rtl.h12
-rw-r--r--gcc/rtlanal.c30
-rw-r--r--gcc/sbitmap.c38
-rw-r--r--gcc/sched-deps.c132
-rw-r--r--gcc/sched-ebb.c16
-rw-r--r--gcc/sched-int.h18
-rw-r--r--gcc/sched-rgn.c140
-rw-r--r--gcc/sched-vis.c8
-rw-r--r--gcc/sdbout.c2
-rw-r--r--gcc/sel-sched-dump.c44
-rw-r--r--gcc/sel-sched-dump.h22
-rw-r--r--gcc/sel-sched-ir.c732
-rw-r--r--gcc/sel-sched-ir.h118
-rw-r--r--gcc/sel-sched.c1376
-rw-r--r--gcc/sel-sched.h2
-rw-r--r--gcc/sese.c34
-rw-r--r--gcc/sese.h26
-rw-r--r--gcc/simplify-rtx.c114
-rw-r--r--gcc/stack-ptr-mod.c2
-rw-r--r--gcc/stmt.c2
-rw-r--r--gcc/stor-layout.c2
-rw-r--r--gcc/store-motion.c10
-rw-r--r--gcc/stringpool.c2
-rw-r--r--gcc/stub-objc.c8
-rw-r--r--gcc/sync-builtins.def2
-rw-r--r--gcc/target-def.h4
-rw-r--r--gcc/target.h16
-rw-r--r--gcc/targhooks.c12
-rw-r--r--gcc/targhooks.h2
-rw-r--r--gcc/timevar.c2
-rw-r--r--gcc/tlink.c2
-rw-r--r--gcc/toplev.c16
-rw-r--r--gcc/toplev.h2
-rw-r--r--gcc/tracer.c2
-rw-r--r--gcc/tree-affine.c18
-rw-r--r--gcc/tree-affine.h14
-rw-r--r--gcc/tree-browser.def8
-rw-r--r--gcc/tree-call-cdce.c124
-rw-r--r--gcc/tree-cfg.c132
-rw-r--r--gcc/tree-cfgcleanup.c6
-rw-r--r--gcc/tree-chrec.c352
-rw-r--r--gcc/tree-chrec.h38
-rw-r--r--gcc/tree-complex.c26
-rw-r--r--gcc/tree-data-ref.c558
-rw-r--r--gcc/tree-data-ref.h56
-rw-r--r--gcc/tree-dfa.c22
-rw-r--r--gcc/tree-dump.c6
-rw-r--r--gcc/tree-dump.h2
-rw-r--r--gcc/tree-eh.c8
-rw-r--r--gcc/tree-flow-inline.h36
-rw-r--r--gcc/tree-flow.h22
-rw-r--r--gcc/tree-if-conv.c12
-rw-r--r--gcc/tree-inline.c74
-rw-r--r--gcc/tree-into-ssa.c46
-rw-r--r--gcc/tree-loop-distribution.c14
-rw-r--r--gcc/tree-loop-linear.c54
-rw-r--r--gcc/tree-mudflap.c44
-rw-r--r--gcc/tree-nested.c32
-rw-r--r--gcc/tree-nomudflap.c4
-rw-r--r--gcc/tree-nrv.c10
-rw-r--r--gcc/tree-object-size.c8
-rw-r--r--gcc/tree-optimize.c10
-rw-r--r--gcc/tree-outof-ssa.c54
-rw-r--r--gcc/tree-parloops.c160
-rw-r--r--gcc/tree-pass.h6
-rw-r--r--gcc/tree-phinodes.c4
-rw-r--r--gcc/tree-predcom.c50
-rw-r--r--gcc/tree-pretty-print.c12
-rw-r--r--gcc/tree-profile.c64
-rw-r--r--gcc/tree-scalar-evolution.c576
-rw-r--r--gcc/tree-ssa-address.c24
-rw-r--r--gcc/tree-ssa-alias.c4
-rw-r--r--gcc/tree-ssa-ccp.c54
-rw-r--r--gcc/tree-ssa-coalesce.c114
-rw-r--r--gcc/tree-ssa-copy.c26
-rw-r--r--gcc/tree-ssa-copyrename.c42
-rw-r--r--gcc/tree-ssa-dce.c22
-rw-r--r--gcc/tree-ssa-dom.c64
-rw-r--r--gcc/tree-ssa-dse.c8
-rw-r--r--gcc/tree-ssa-forwprop.c30
-rw-r--r--gcc/tree-ssa-ifcombine.c2
-rw-r--r--gcc/tree-ssa-live.c66
-rw-r--r--gcc/tree-ssa-live.h54
-rw-r--r--gcc/tree-ssa-loop-ch.c12
-rw-r--r--gcc/tree-ssa-loop-im.c24
-rw-r--r--gcc/tree-ssa-loop-ivcanon.c22
-rw-r--r--gcc/tree-ssa-loop-ivopts.c72
-rw-r--r--gcc/tree-ssa-loop-manip.c26
-rw-r--r--gcc/tree-ssa-loop-niter.c84
-rw-r--r--gcc/tree-ssa-loop-prefetch.c112
-rw-r--r--gcc/tree-ssa-loop-unswitch.c12
-rw-r--r--gcc/tree-ssa-loop.c26
-rw-r--r--gcc/tree-ssa-math-opts.c14
-rw-r--r--gcc/tree-ssa-operands.c84
-rw-r--r--gcc/tree-ssa-operands.h28
-rw-r--r--gcc/tree-ssa-phiopt.c18
-rw-r--r--gcc/tree-ssa-phiprop.c6
-rw-r--r--gcc/tree-ssa-pre.c8
-rw-r--r--gcc/tree-ssa-propagate.c12
-rw-r--r--gcc/tree-ssa-reassoc.c52
-rw-r--r--gcc/tree-ssa-sccvn.c44
-rw-r--r--gcc/tree-ssa-sink.c60
-rw-r--r--gcc/tree-ssa-structalias.c20
-rw-r--r--gcc/tree-ssa-ter.c106
-rw-r--r--gcc/tree-ssa-threadedge.c20
-rw-r--r--gcc/tree-ssa-threadupdate.c6
-rw-r--r--gcc/tree-ssa-uncprop.c8
-rw-r--r--gcc/tree-ssa.c26
-rw-r--r--gcc/tree-ssanames.c14
-rw-r--r--gcc/tree-switch-conversion.c4
-rw-r--r--gcc/tree-tailcall.c28
-rw-r--r--gcc/tree-vect-data-refs.c462
-rw-r--r--gcc/tree-vect-generic.c20
-rw-r--r--gcc/tree-vect-loop-manip.c246
-rw-r--r--gcc/tree-vect-loop.c386
-rw-r--r--gcc/tree-vect-patterns.c90
-rw-r--r--gcc/tree-vect-slp.c394
-rw-r--r--gcc/tree-vect-stmts.c618
-rw-r--r--gcc/tree-vectorizer.c48
-rw-r--r--gcc/tree-vectorizer.h116
-rw-r--r--gcc/tree-vrp.c94
-rw-r--r--gcc/tree.c130
-rw-r--r--gcc/tree.def28
-rw-r--r--gcc/tree.h14
-rw-r--r--gcc/treestruct.def6
-rw-r--r--gcc/unwind-compat.c2
-rw-r--r--gcc/unwind-dw2-fde-glibc.c10
-rw-r--r--gcc/unwind-dw2.c14
-rw-r--r--gcc/value-prof.c46
-rw-r--r--gcc/value-prof.h2
-rw-r--r--gcc/var-tracking.c32
-rw-r--r--gcc/varasm.c108
-rw-r--r--gcc/varpool.c12
-rw-r--r--gcc/vec.c18
-rw-r--r--gcc/vec.h42
-rw-r--r--gcc/vmsdbgout.c2
-rw-r--r--gcc/web.c6
-rw-r--r--gcc/xcoffout.c2
360 files changed, 9633 insertions, 9271 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 16356a33d9b..e2e25069b59 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,365 @@
+2009-11-25 H.J. Lu <hongjiu.lu@intel.com>
+
+ * alias.c: Remove trailing white spaces.
+ * alloc-pool.c: Likewise.
+ * alloc-pool.h: Likewise.
+ * attribs.c: Likewise.
+ * auto-inc-dec.c: Likewise.
+ * basic-block.h: Likewise.
+ * bb-reorder.c: Likewise.
+ * bt-load.c: Likewise.
+ * builtins.c: Likewise.
+ * builtins.def: Likewise.
+ * c-common.c: Likewise.
+ * c-common.h: Likewise.
+ * c-cppbuiltin.c: Likewise.
+ * c-decl.c: Likewise.
+ * c-format.c: Likewise.
+ * c-lex.c: Likewise.
+ * c-omp.c: Likewise.
+ * c-opts.c: Likewise.
+ * c-parser.c: Likewise.
+ * c-pretty-print.c: Likewise.
+ * c-tree.h: Likewise.
+ * c-typeck.c: Likewise.
+ * caller-save.c: Likewise.
+ * calls.c: Likewise.
+ * cfg.c: Likewise.
+ * cfganal.c: Likewise.
+ * cfgexpand.c: Likewise.
+ * cfghooks.c: Likewise.
+ * cfghooks.h: Likewise.
+ * cfglayout.c: Likewise.
+ * cfgloop.c: Likewise.
+ * cfgloop.h: Likewise.
+ * cfgloopmanip.c: Likewise.
+ * cfgrtl.c: Likewise.
+ * cgraph.c: Likewise.
+ * cgraph.h: Likewise.
+ * cgraphbuild.c: Likewise.
+ * cgraphunit.c: Likewise.
+ * cif-code.def: Likewise.
+ * collect2.c: Likewise.
+ * combine.c: Likewise.
+ * convert.c: Likewise.
+ * coverage.c: Likewise.
+ * crtstuff.c: Likewise.
+ * cse.c: Likewise.
+ * cselib.c: Likewise.
+ * dbgcnt.c: Likewise.
+ * dbgcnt.def: Likewise.
+ * dbgcnt.h: Likewise.
+ * dbxout.c: Likewise.
+ * dce.c: Likewise.
+ * ddg.c: Likewise.
+ * ddg.h: Likewise.
+ * defaults.h: Likewise.
+ * df-byte-scan.c: Likewise.
+ * df-core.c: Likewise.
+ * df-problems.c: Likewise.
+ * df-scan.c: Likewise.
+ * df.h: Likewise.
+ * dfp.c: Likewise.
+ * diagnostic.c: Likewise.
+ * diagnostic.h: Likewise.
+ * dominance.c: Likewise.
+ * domwalk.c: Likewise.
+ * double-int.c: Likewise.
+ * double-int.h: Likewise.
+ * dse.c: Likewise.
+ * dwarf2asm.c: Likewise.
+ * dwarf2asm.h: Likewise.
+ * dwarf2out.c: Likewise.
+ * ebitmap.c: Likewise.
+ * ebitmap.h: Likewise.
+ * emit-rtl.c: Likewise.
+ * et-forest.c: Likewise.
+ * except.c: Likewise.
+ * except.h: Likewise.
+ * expmed.c: Likewise.
+ * expr.c: Likewise.
+ * expr.h: Likewise.
+ * final.c: Likewise.
+ * flags.h: Likewise.
+ * fold-const.c: Likewise.
+ * function.c: Likewise.
+ * function.h: Likewise.
+ * fwprop.c: Likewise.
+ * gcc.c: Likewise.
+ * gcov-dump.c: Likewise.
+ * gcov-io.c: Likewise.
+ * gcov-io.h: Likewise.
+ * gcov.c: Likewise.
+ * gcse.c: Likewise.
+ * genattr.c: Likewise.
+ * genattrtab.c: Likewise.
+ * genautomata.c: Likewise.
+ * genchecksum.c: Likewise.
+ * genconfig.c: Likewise.
+ * genflags.c: Likewise.
+ * gengtype-parse.c: Likewise.
+ * gengtype.c: Likewise.
+ * gengtype.h: Likewise.
+ * genmddeps.c: Likewise.
+ * genmodes.c: Likewise.
+ * genopinit.c: Likewise.
+ * genpreds.c: Likewise.
+ * gensupport.c: Likewise.
+ * ggc-common.c: Likewise.
+ * ggc-page.c: Likewise.
+ * ggc-zone.c: Likewise.
+ * ggc.h: Likewise.
+ * gimple-iterator.c: Likewise.
+ * gimple-low.c: Likewise.
+ * gimple-pretty-print.c: Likewise.
+ * gimple.c: Likewise.
+ * gimple.def: Likewise.
+ * gimple.h: Likewise.
+ * gimplify.c: Likewise.
+ * graphds.c: Likewise.
+ * graphite-clast-to-gimple.c: Likewise.
+ * gthr-nks.h: Likewise.
+ * gthr-posix.c: Likewise.
+ * gthr-posix.h: Likewise.
+ * gthr-posix95.h: Likewise.
+ * gthr-single.h: Likewise.
+ * gthr-tpf.h: Likewise.
+ * gthr-vxworks.h: Likewise.
+ * gthr.h: Likewise.
+ * haifa-sched.c: Likewise.
+ * hard-reg-set.h: Likewise.
+ * hooks.c: Likewise.
+ * hooks.h: Likewise.
+ * hosthooks.h: Likewise.
+ * hwint.h: Likewise.
+ * ifcvt.c: Likewise.
+ * incpath.c: Likewise.
+ * init-regs.c: Likewise.
+ * integrate.c: Likewise.
+ * ipa-cp.c: Likewise.
+ * ipa-inline.c: Likewise.
+ * ipa-prop.c: Likewise.
+ * ipa-pure-const.c: Likewise.
+ * ipa-reference.c: Likewise.
+ * ipa-struct-reorg.c: Likewise.
+ * ipa-struct-reorg.h: Likewise.
+ * ipa-type-escape.c: Likewise.
+ * ipa-type-escape.h: Likewise.
+ * ipa-utils.c: Likewise.
+ * ipa-utils.h: Likewise.
+ * ipa.c: Likewise.
+ * ira-build.c: Likewise.
+ * ira-color.c: Likewise.
+ * ira-conflicts.c: Likewise.
+ * ira-costs.c: Likewise.
+ * ira-emit.c: Likewise.
+ * ira-int.h: Likewise.
+ * ira-lives.c: Likewise.
+ * ira.c: Likewise.
+ * jump.c: Likewise.
+ * lambda-code.c: Likewise.
+ * lambda-mat.c: Likewise.
+ * lambda-trans.c: Likewise.
+ * lambda.h: Likewise.
+ * langhooks.c: Likewise.
+ * lcm.c: Likewise.
+ * libgcov.c: Likewise.
+ * lists.c: Likewise.
+ * loop-doloop.c: Likewise.
+ * loop-init.c: Likewise.
+ * loop-invariant.c: Likewise.
+ * loop-iv.c: Likewise.
+ * loop-unroll.c: Likewise.
+ * lower-subreg.c: Likewise.
+ * lto-cgraph.c: Likewise.
+ * lto-compress.c: Likewise.
+ * lto-opts.c: Likewise.
+ * lto-section-in.c: Likewise.
+ * lto-section-out.c: Likewise.
+ * lto-streamer-in.c: Likewise.
+ * lto-streamer-out.c: Likewise.
+ * lto-streamer.c: Likewise.
+ * lto-streamer.h: Likewise.
+ * lto-symtab.c: Likewise.
+ * lto-wpa-fixup.c: Likewise.
+ * matrix-reorg.c: Likewise.
+ * mcf.c: Likewise.
+ * mode-switching.c: Likewise.
+ * modulo-sched.c: Likewise.
+ * omega.c: Likewise.
+ * omega.h: Likewise.
+ * omp-low.c: Likewise.
+ * optabs.c: Likewise.
+ * optabs.h: Likewise.
+ * opts-common.c: Likewise.
+ * opts.c: Likewise.
+ * params.def: Likewise.
+ * params.h: Likewise.
+ * passes.c: Likewise.
+ * plugin.c: Likewise.
+ * postreload-gcse.c: Likewise.
+ * postreload.c: Likewise.
+ * predict.c: Likewise.
+ * predict.def: Likewise.
+ * pretty-print.c: Likewise.
+ * pretty-print.h: Likewise.
+ * print-rtl.c: Likewise.
+ * print-tree.c: Likewise.
+ * profile.c: Likewise.
+ * read-rtl.c: Likewise.
+ * real.c: Likewise.
+ * recog.c: Likewise.
+ * reg-stack.c: Likewise.
+ * regcprop.c: Likewise.
+ * reginfo.c: Likewise.
+ * regmove.c: Likewise.
+ * regrename.c: Likewise.
+ * regs.h: Likewise.
+ * regstat.c: Likewise.
+ * reload.c: Likewise.
+ * reload1.c: Likewise.
+ * resource.c: Likewise.
+ * rtl.c: Likewise.
+ * rtl.def: Likewise.
+ * rtl.h: Likewise.
+ * rtlanal.c: Likewise.
+ * sbitmap.c: Likewise.
+ * sched-deps.c: Likewise.
+ * sched-ebb.c: Likewise.
+ * sched-int.h: Likewise.
+ * sched-rgn.c: Likewise.
+ * sched-vis.c: Likewise.
+ * sdbout.c: Likewise.
+ * sel-sched-dump.c: Likewise.
+ * sel-sched-dump.h: Likewise.
+ * sel-sched-ir.c: Likewise.
+ * sel-sched-ir.h: Likewise.
+ * sel-sched.c: Likewise.
+ * sel-sched.h: Likewise.
+ * sese.c: Likewise.
+ * sese.h: Likewise.
+ * simplify-rtx.c: Likewise.
+ * stack-ptr-mod.c: Likewise.
+ * stmt.c: Likewise.
+ * stor-layout.c: Likewise.
+ * store-motion.c: Likewise.
+ * stringpool.c: Likewise.
+ * stub-objc.c: Likewise.
+ * sync-builtins.def: Likewise.
+ * target-def.h: Likewise.
+ * target.h: Likewise.
+ * targhooks.c: Likewise.
+ * targhooks.h: Likewise.
+ * timevar.c: Likewise.
+ * tlink.c: Likewise.
+ * toplev.c: Likewise.
+ * toplev.h: Likewise.
+ * tracer.c: Likewise.
+ * tree-affine.c: Likewise.
+ * tree-affine.h: Likewise.
+ * tree-browser.def: Likewise.
+ * tree-call-cdce.c: Likewise.
+ * tree-cfg.c: Likewise.
+ * tree-cfgcleanup.c: Likewise.
+ * tree-chrec.c: Likewise.
+ * tree-chrec.h: Likewise.
+ * tree-complex.c: Likewise.
+ * tree-data-ref.c: Likewise.
+ * tree-data-ref.h: Likewise.
+ * tree-dfa.c: Likewise.
+ * tree-dump.c: Likewise.
+ * tree-dump.h: Likewise.
+ * tree-eh.c: Likewise.
+ * tree-flow-inline.h: Likewise.
+ * tree-flow.h: Likewise.
+ * tree-if-conv.c: Likewise.
+ * tree-inline.c: Likewise.
+ * tree-into-ssa.c: Likewise.
+ * tree-loop-distribution.c: Likewise.
+ * tree-loop-linear.c: Likewise.
+ * tree-mudflap.c: Likewise.
+ * tree-nested.c: Likewise.
+ * tree-nomudflap.c: Likewise.
+ * tree-nrv.c: Likewise.
+ * tree-object-size.c: Likewise.
+ * tree-optimize.c: Likewise.
+ * tree-outof-ssa.c: Likewise.
+ * tree-parloops.c: Likewise.
+ * tree-pass.h: Likewise.
+ * tree-phinodes.c: Likewise.
+ * tree-predcom.c: Likewise.
+ * tree-pretty-print.c: Likewise.
+ * tree-profile.c: Likewise.
+ * tree-scalar-evolution.c: Likewise.
+ * tree-ssa-address.c: Likewise.
+ * tree-ssa-alias.c: Likewise.
+ * tree-ssa-ccp.c: Likewise.
+ * tree-ssa-coalesce.c: Likewise.
+ * tree-ssa-copy.c: Likewise.
+ * tree-ssa-copyrename.c: Likewise.
+ * tree-ssa-dce.c: Likewise.
+ * tree-ssa-dom.c: Likewise.
+ * tree-ssa-dse.c: Likewise.
+ * tree-ssa-forwprop.c: Likewise.
+ * tree-ssa-ifcombine.c: Likewise.
+ * tree-ssa-live.c: Likewise.
+ * tree-ssa-live.h: Likewise.
+ * tree-ssa-loop-ch.c: Likewise.
+ * tree-ssa-loop-im.c: Likewise.
+ * tree-ssa-loop-ivcanon.c: Likewise.
+ * tree-ssa-loop-ivopts.c: Likewise.
+ * tree-ssa-loop-manip.c: Likewise.
+ * tree-ssa-loop-niter.c: Likewise.
+ * tree-ssa-loop-prefetch.c: Likewise.
+ * tree-ssa-loop-unswitch.c: Likewise.
+ * tree-ssa-loop.c: Likewise.
+ * tree-ssa-math-opts.c: Likewise.
+ * tree-ssa-operands.c: Likewise.
+ * tree-ssa-operands.h: Likewise.
+ * tree-ssa-phiopt.c: Likewise.
+ * tree-ssa-phiprop.c: Likewise.
+ * tree-ssa-pre.c: Likewise.
+ * tree-ssa-propagate.c: Likewise.
+ * tree-ssa-reassoc.c: Likewise.
+ * tree-ssa-sccvn.c: Likewise.
+ * tree-ssa-sink.c: Likewise.
+ * tree-ssa-structalias.c: Likewise.
+ * tree-ssa-ter.c: Likewise.
+ * tree-ssa-threadedge.c: Likewise.
+ * tree-ssa-threadupdate.c: Likewise.
+ * tree-ssa-uncprop.c: Likewise.
+ * tree-ssa.c: Likewise.
+ * tree-ssanames.c: Likewise.
+ * tree-switch-conversion.c: Likewise.
+ * tree-tailcall.c: Likewise.
+ * tree-vect-data-refs.c: Likewise.
+ * tree-vect-generic.c: Likewise.
+ * tree-vect-loop-manip.c: Likewise.
+ * tree-vect-loop.c: Likewise.
+ * tree-vect-patterns.c: Likewise.
+ * tree-vect-slp.c: Likewise.
+ * tree-vect-stmts.c: Likewise.
+ * tree-vectorizer.c: Likewise.
+ * tree-vectorizer.h: Likewise.
+ * tree-vrp.c: Likewise.
+ * tree.c: Likewise.
+ * tree.def: Likewise.
+ * tree.h: Likewise.
+ * treestruct.def: Likewise.
+ * unwind-compat.c: Likewise.
+ * unwind-dw2-fde-glibc.c: Likewise.
+ * unwind-dw2.c: Likewise.
+ * value-prof.c: Likewise.
+ * value-prof.h: Likewise.
+ * var-tracking.c: Likewise.
+ * varasm.c: Likewise.
+ * varpool.c: Likewise.
+ * vec.c: Likewise.
+ * vec.h: Likewise.
+ * vmsdbgout.c: Likewise.
+ * web.c: Likewise.
+ * xcoffout.c: Likewise.
+
2009-11-24 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
* pa.c (output_call): Only use sr4 for long interspace calls if
diff --git a/gcc/alias.c b/gcc/alias.c
index cdfa6d2d3ac..41a2f2318f7 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -452,8 +452,8 @@ walk_mems_2 (rtx *x, rtx mem)
{
if (alias_sets_conflict_p (MEM_ALIAS_SET(*x), MEM_ALIAS_SET(mem)))
return 1;
-
- return -1;
+
+ return -1;
}
return 0;
}
@@ -467,7 +467,7 @@ walk_mems_1 (rtx *x, rtx *pat)
if (for_each_rtx (pat, (rtx_function) walk_mems_2, *x))
/* Indicate that dependence was determined and stop traversal. */
return 1;
-
+
return -1;
}
return 0;
diff --git a/gcc/alloc-pool.c b/gcc/alloc-pool.c
index cb5d83d23ee..6eecef53ee2 100644
--- a/gcc/alloc-pool.c
+++ b/gcc/alloc-pool.c
@@ -274,7 +274,7 @@ pool_alloc (alloc_pool pool)
block = XNEWVEC (char, pool->block_size);
block_header = (alloc_pool_list) block;
block += align_eight (sizeof (struct alloc_pool_list_def));
-
+
/* Throw it on the block list. */
block_header->next = pool->block_list;
pool->block_list = block_header;
@@ -290,7 +290,7 @@ pool_alloc (alloc_pool pool)
pool->blocks_allocated += 1;
}
-
+
/* We now know that we can take the first elt off the virgin list and
put it on the returned list. */
block = pool->virgin_free_list;
@@ -374,8 +374,8 @@ print_statistics (void **slot, void *b)
if (d->allocated)
{
fprintf (stderr, "%-22s %6d %10lu %10lu(%10lu) %10lu(%10lu) %10lu(%10lu)\n", d->name,
- d->elt_size, d->created, d->allocated, d->allocated / d->elt_size,
- d->peak, d->peak / d->elt_size,
+ d->elt_size, d->created, d->allocated, d->allocated / d->elt_size,
+ d->peak, d->peak / d->elt_size,
d->current, d->current / d->elt_size);
i->total_allocated += d->allocated;
i->total_created += d->created;
diff --git a/gcc/alloc-pool.h b/gcc/alloc-pool.h
index 1fc3c575093..fa9848a2b9b 100644
--- a/gcc/alloc-pool.h
+++ b/gcc/alloc-pool.h
@@ -45,7 +45,7 @@ typedef struct alloc_pool_def
char* virgin_free_list;
/* The number of elements in the virgin_free_list that can be
- allocated before needing another block. */
+ allocated before needing another block. */
size_t virgin_elts_remaining;
size_t elts_allocated;
diff --git a/gcc/attribs.c b/gcc/attribs.c
index 5ae462e7287..9f2f50bdfd9 100644
--- a/gcc/attribs.c
+++ b/gcc/attribs.c
@@ -192,7 +192,7 @@ init_attributes (void)
/* Insert a single ATTR into the attribute table. */
void
-register_attribute (const struct attribute_spec *attr)
+register_attribute (const struct attribute_spec *attr)
{
struct substring str;
void **slot;
diff --git a/gcc/auto-inc-dec.c b/gcc/auto-inc-dec.c
index 3b3006c985f..830fc5e09f0 100644
--- a/gcc/auto-inc-dec.c
+++ b/gcc/auto-inc-dec.c
@@ -1,7 +1,7 @@
/* Discovery of auto-inc and auto-dec instructions.
Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
-
+
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
@@ -74,7 +74,7 @@ along with GCC; see the file COPYING3. If not see
...
b <- a + c
- (For this case to be true, b must not be assigned or used between
+ (For this case to be true, b must not be assigned or used between
the *a and the assignment to b. B must also be a Pmode reg.)
becomes
@@ -104,9 +104,9 @@ along with GCC; see the file COPYING3. If not see
by the pointer. This is useful for machines that have
HAVE_PRE_MODIFY_DISP, HAVE_POST_MODIFY_DISP defined.
- 3) c is a register. This is useful for machines that have
- HAVE_PRE_MODIFY_REG, HAVE_POST_MODIFY_REG
-
+ 3) c is a register. This is useful for machines that have
+ HAVE_PRE_MODIFY_REG, HAVE_POST_MODIFY_REG
+
The is one special case: if a already had an offset equal to it +-
its width and that offset is equal to -c when the increment was
before the ref or +c if the increment was after the ref, then if we
@@ -129,7 +129,7 @@ enum form
ANY is used for constants that are not +-size or 0. REG is used if
the forms are reg1 + reg2. */
-enum inc_state
+enum inc_state
{
INC_ZERO, /* == 0 */
INC_NEG_SIZE, /* == +size */
@@ -295,7 +295,7 @@ init_decision_table (void)
/* Parsed fields of an inc insn of the form "reg_res = reg0+reg1" or
"reg_res = reg0+c". */
-static struct inc_insn
+static struct inc_insn
{
rtx insn; /* The insn being parsed. */
rtx pat; /* The pattern of the insn. */
@@ -311,10 +311,10 @@ static struct inc_insn
/* Dump the parsed inc insn to FILE. */
-static void
+static void
dump_inc_insn (FILE *file)
{
- const char *f = ((inc_insn.form == FORM_PRE_ADD)
+ const char *f = ((inc_insn.form == FORM_PRE_ADD)
|| (inc_insn.form == FORM_PRE_INC)) ? "pre" : "post";
dump_insn_slim (file, inc_insn.insn);
@@ -324,26 +324,26 @@ dump_inc_insn (FILE *file)
case FORM_PRE_ADD:
case FORM_POST_ADD:
if (inc_insn.reg1_is_const)
- fprintf (file, "found %s add(%d) r[%d]=r[%d]+%d\n",
- f, INSN_UID (inc_insn.insn),
- REGNO (inc_insn.reg_res),
+ fprintf (file, "found %s add(%d) r[%d]=r[%d]+%d\n",
+ f, INSN_UID (inc_insn.insn),
+ REGNO (inc_insn.reg_res),
REGNO (inc_insn.reg0), (int) inc_insn.reg1_val);
else
- fprintf (file, "found %s add(%d) r[%d]=r[%d]+r[%d]\n",
- f, INSN_UID (inc_insn.insn),
- REGNO (inc_insn.reg_res),
+ fprintf (file, "found %s add(%d) r[%d]=r[%d]+r[%d]\n",
+ f, INSN_UID (inc_insn.insn),
+ REGNO (inc_insn.reg_res),
REGNO (inc_insn.reg0), REGNO (inc_insn.reg1));
break;
-
+
case FORM_PRE_INC:
case FORM_POST_INC:
if (inc_insn.reg1_is_const)
- fprintf (file, "found %s inc(%d) r[%d]+=%d\n",
- f, INSN_UID (inc_insn.insn),
+ fprintf (file, "found %s inc(%d) r[%d]+=%d\n",
+ f, INSN_UID (inc_insn.insn),
REGNO (inc_insn.reg_res), (int) inc_insn.reg1_val);
else
- fprintf (file, "found %s inc(%d) r[%d]+=r[%d]\n",
- f, INSN_UID (inc_insn.insn),
+ fprintf (file, "found %s inc(%d) r[%d]+=r[%d]\n",
+ f, INSN_UID (inc_insn.insn),
REGNO (inc_insn.reg_res), REGNO (inc_insn.reg1));
break;
@@ -372,18 +372,18 @@ static struct mem_insn
/* Dump the parsed mem insn to FILE. */
-static void
+static void
dump_mem_insn (FILE *file)
{
dump_insn_slim (file, mem_insn.insn);
if (mem_insn.reg1_is_const)
- fprintf (file, "found mem(%d) *(r[%d]+%d)\n",
- INSN_UID (mem_insn.insn),
+ fprintf (file, "found mem(%d) *(r[%d]+%d)\n",
+ INSN_UID (mem_insn.insn),
REGNO (mem_insn.reg0), (int) mem_insn.reg1_val);
else
- fprintf (file, "found mem(%d) *(r[%d]+r[%d])\n",
- INSN_UID (mem_insn.insn),
+ fprintf (file, "found mem(%d) *(r[%d]+r[%d])\n",
+ INSN_UID (mem_insn.insn),
REGNO (mem_insn.reg0), REGNO (mem_insn.reg1));
}
@@ -409,17 +409,17 @@ static rtx *reg_next_def = NULL;
insn. Moving the REG_EQUAL and REG_EQUIV is clearly wrong and it
does not appear that there are any other kinds of relevant notes. */
-static void
+static void
move_dead_notes (rtx to_insn, rtx from_insn, rtx pattern)
{
- rtx note;
+ rtx note;
rtx next_note;
rtx prev_note = NULL;
for (note = REG_NOTES (from_insn); note; note = next_note)
{
next_note = XEXP (note, 1);
-
+
if ((REG_NOTE_KIND (note) == REG_DEAD)
&& pattern == XEXP (note, 0))
{
@@ -451,7 +451,7 @@ insert_move_insn_before (rtx next_insn, rtx dest_reg, rtx src_reg)
return insns;
}
-
+
/* Change mem_insn.mem_loc so that uses NEW_ADDR which has an
increment of INC_REG. To have reached this point, the change is a
legitimate one from a dataflow point of view. The only questions
@@ -504,7 +504,7 @@ attempt_change (rtx new_addr, rtx inc_reg)
if (! validate_change (mem_insn.insn, mem_insn.mem_loc, new_mem, 0))
{
if (dump_file)
- fprintf (dump_file, "validation failure\n");
+ fprintf (dump_file, "validation failure\n");
return false;
}
@@ -517,7 +517,7 @@ attempt_change (rtx new_addr, rtx inc_reg)
/* Replace the addition with a move. Do it at the location of
the addition since the operand of the addition may change
before the memory reference. */
- mov_insn = insert_move_insn_before (inc_insn.insn,
+ mov_insn = insert_move_insn_before (inc_insn.insn,
inc_insn.reg_res, inc_insn.reg0);
move_dead_notes (mov_insn, inc_insn.insn, inc_insn.reg0);
@@ -543,7 +543,7 @@ attempt_change (rtx new_addr, rtx inc_reg)
break;
case FORM_POST_ADD:
- mov_insn = insert_move_insn_before (mem_insn.insn,
+ mov_insn = insert_move_insn_before (mem_insn.insn,
inc_insn.reg_res, inc_insn.reg0);
move_dead_notes (mov_insn, inc_insn.insn, inc_insn.reg0);
@@ -603,7 +603,7 @@ attempt_change (rtx new_addr, rtx inc_reg)
Assuming the form is ok, a prototype new address is built which is
passed to ATTEMPT_CHANGE for final processing. */
-static bool
+static bool
try_merge (void)
{
enum gen_form gen_form;
@@ -648,13 +648,13 @@ try_merge (void)
return false;
}
- mem_insn.reg1_state = (mem_insn.reg1_is_const)
+ mem_insn.reg1_state = (mem_insn.reg1_is_const)
? set_inc_state (mem_insn.reg1_val, size) : INC_REG;
inc_insn.reg1_state = (inc_insn.reg1_is_const)
? set_inc_state (inc_insn.reg1_val, size) : INC_REG;
/* Now get the form that we are generating. */
- gen_form = decision_table
+ gen_form = decision_table
[inc_insn.reg1_state][mem_insn.reg1_state][inc_insn.form];
if (dbg_cnt (auto_inc_dec) == false)
@@ -671,25 +671,25 @@ try_merge (void)
fprintf (dump_file, "trying SIMPLE_PRE_INC\n");
return attempt_change (gen_rtx_PRE_INC (reg_mode, inc_reg), inc_reg);
break;
-
+
case SIMPLE_POST_INC: /* size++ */
if (dump_file)
fprintf (dump_file, "trying SIMPLE_POST_INC\n");
return attempt_change (gen_rtx_POST_INC (reg_mode, inc_reg), inc_reg);
break;
-
+
case SIMPLE_PRE_DEC: /* --size */
if (dump_file)
fprintf (dump_file, "trying SIMPLE_PRE_DEC\n");
return attempt_change (gen_rtx_PRE_DEC (reg_mode, inc_reg), inc_reg);
break;
-
+
case SIMPLE_POST_DEC: /* size-- */
if (dump_file)
fprintf (dump_file, "trying SIMPLE_POST_DEC\n");
return attempt_change (gen_rtx_POST_DEC (reg_mode, inc_reg), inc_reg);
break;
-
+
case DISP_PRE: /* ++con */
if (dump_file)
fprintf (dump_file, "trying DISP_PRE\n");
@@ -700,7 +700,7 @@ try_merge (void)
inc_insn.reg1)),
inc_reg);
break;
-
+
case DISP_POST: /* con++ */
if (dump_file)
fprintf (dump_file, "trying POST_DISP\n");
@@ -711,7 +711,7 @@ try_merge (void)
inc_insn.reg1)),
inc_reg);
break;
-
+
case REG_PRE: /* ++reg */
if (dump_file)
fprintf (dump_file, "trying PRE_REG\n");
@@ -722,7 +722,7 @@ try_merge (void)
inc_insn.reg1)),
inc_reg);
break;
-
+
case REG_POST: /* reg++ */
if (dump_file)
fprintf (dump_file, "trying POST_REG\n");
@@ -758,10 +758,10 @@ get_next_ref (int regno, basic_block bb, rtx *next_array)
/* Reverse the operands in a mem insn. */
-static void
+static void
reverse_mem (void)
{
- rtx tmp = mem_insn.reg1;
+ rtx tmp = mem_insn.reg1;
mem_insn.reg1 = mem_insn.reg0;
mem_insn.reg0 = tmp;
}
@@ -769,10 +769,10 @@ reverse_mem (void)
/* Reverse the operands in a inc insn. */
-static void
+static void
reverse_inc (void)
{
- rtx tmp = inc_insn.reg1;
+ rtx tmp = inc_insn.reg1;
inc_insn.reg1 = inc_insn.reg0;
inc_insn.reg0 = tmp;
}
@@ -780,8 +780,8 @@ reverse_inc (void)
/* Return true if INSN is of a form "a = b op c" where a and b are
regs. op is + if c is a reg and +|- if c is a const. Fill in
- INC_INSN with what is found.
-
+ INC_INSN with what is found.
+
This function is called in two contexts, if BEFORE_MEM is true,
this is called for each insn in the basic block. If BEFORE_MEM is
false, it is called for the instruction in the block that uses the
@@ -812,7 +812,7 @@ parse_add_or_inc (rtx insn, bool before_mem)
inc_insn.reg0 = XEXP (SET_SRC (pat), 0);
if (rtx_equal_p (inc_insn.reg_res, inc_insn.reg0))
inc_insn.form = before_mem ? FORM_PRE_INC : FORM_POST_INC;
- else
+ else
inc_insn.form = before_mem ? FORM_PRE_ADD : FORM_POST_ADD;
if (CONST_INT_P (XEXP (SET_SRC (pat), 1)))
@@ -838,8 +838,8 @@ parse_add_or_inc (rtx insn, bool before_mem)
/* Process a = b + c where c is a reg. */
inc_insn.reg1 = XEXP (SET_SRC (pat), 1);
inc_insn.reg1_is_const = false;
-
- if (inc_insn.form == FORM_PRE_INC
+
+ if (inc_insn.form == FORM_PRE_INC
|| inc_insn.form == FORM_POST_INC)
return true;
else if (rtx_equal_p (inc_insn.reg_res, inc_insn.reg1))
@@ -850,7 +850,7 @@ parse_add_or_inc (rtx insn, bool before_mem)
inc_insn.form = before_mem ? FORM_PRE_INC : FORM_POST_INC;
return true;
}
- else
+ else
return true;
}
@@ -862,7 +862,7 @@ parse_add_or_inc (rtx insn, bool before_mem)
ADDRESS_OF_X to see if any single one of them is compatible with
what has been found in inc_insn.
- -1 is returned for success. 0 is returned if nothing was found and
+ -1 is returned for success. 0 is returned if nothing was found and
1 is returned for failure. */
static int
@@ -897,15 +897,15 @@ find_address (rtx *address_of_x)
{
/* Match with *(reg0 + reg1) where reg1 is a const. */
HOST_WIDE_INT val = INTVAL (b);
- if (inc_insn.reg1_is_const
+ if (inc_insn.reg1_is_const
&& (inc_insn.reg1_val == val || inc_insn.reg1_val == -val))
{
mem_insn.reg1_val = val;
return -1;
}
}
- else if (!inc_insn.reg1_is_const
- && rtx_equal_p (inc_insn.reg1, b))
+ else if (!inc_insn.reg1_is_const
+ && rtx_equal_p (inc_insn.reg1, b))
/* Match with *(reg0 + reg1). */
return -1;
}
@@ -965,7 +965,7 @@ find_address (rtx *address_of_x)
add of the second register. The FIRST_TRY parameter is used to
only allow the parameters to be reversed once. */
-static bool
+static bool
find_inc (bool first_try)
{
rtx insn;
@@ -977,7 +977,7 @@ find_inc (bool first_try)
if (count_occurrences (PATTERN (mem_insn.insn), mem_insn.reg0, 1) != 1)
{
if (dump_file)
- fprintf (dump_file, "mem count failure\n");
+ fprintf (dump_file, "mem count failure\n");
return false;
}
@@ -985,8 +985,8 @@ find_inc (bool first_try)
dump_mem_insn (dump_file);
/* Find the next use that is an inc. */
- insn = get_next_ref (REGNO (mem_insn.reg0),
- BASIC_BLOCK (BLOCK_NUM (mem_insn.insn)),
+ insn = get_next_ref (REGNO (mem_insn.reg0),
+ BASIC_BLOCK (BLOCK_NUM (mem_insn.insn)),
reg_next_inc_use);
if (!insn)
return false;
@@ -997,11 +997,11 @@ find_inc (bool first_try)
{
/* Next use was not an add. Look for one extra case. It could be
that we have:
-
+
*(a + b)
...= a;
...= b + a
-
+
if we reverse the operands in the mem ref we would
find this. Only try it once though. */
if (first_try && !mem_insn.reg1_is_const)
@@ -1013,13 +1013,13 @@ find_inc (bool first_try)
return false;
}
- /* Need to assure that none of the operands of the inc instruction are
+ /* Need to assure that none of the operands of the inc instruction are
assigned to by the mem insn. */
for (def_rec = DF_INSN_DEFS (mem_insn.insn); *def_rec; def_rec++)
{
df_ref def = *def_rec;
unsigned int regno = DF_REF_REGNO (def);
- if ((regno == REGNO (inc_insn.reg0))
+ if ((regno == REGNO (inc_insn.reg0))
|| (regno == REGNO (inc_insn.reg_res)))
{
if (dump_file)
@@ -1041,26 +1041,26 @@ find_inc (bool first_try)
{
/* Make sure that there is no insn that assigns to inc_insn.res
between the mem_insn and the inc_insn. */
- rtx other_insn = get_next_ref (REGNO (inc_insn.reg_res),
- BASIC_BLOCK (BLOCK_NUM (mem_insn.insn)),
+ rtx other_insn = get_next_ref (REGNO (inc_insn.reg_res),
+ BASIC_BLOCK (BLOCK_NUM (mem_insn.insn)),
reg_next_def);
if (other_insn != inc_insn.insn)
{
if (dump_file)
- fprintf (dump_file,
+ fprintf (dump_file,
"result of add is assigned to between mem and inc insns.\n");
return false;
}
- other_insn = get_next_ref (REGNO (inc_insn.reg_res),
- BASIC_BLOCK (BLOCK_NUM (mem_insn.insn)),
+ other_insn = get_next_ref (REGNO (inc_insn.reg_res),
+ BASIC_BLOCK (BLOCK_NUM (mem_insn.insn)),
reg_next_use);
- if (other_insn
+ if (other_insn
&& (other_insn != inc_insn.insn)
&& (DF_INSN_LUID (inc_insn.insn) > DF_INSN_LUID (other_insn)))
{
if (dump_file)
- fprintf (dump_file,
+ fprintf (dump_file,
"result of add is used between mem and inc insns.\n");
return false;
}
@@ -1087,7 +1087,7 @@ find_inc (bool first_try)
int luid = DF_INSN_LUID (inc_insn.insn);
if (inc_insn.form == FORM_POST_ADD)
{
- /* The trick is that we are not going to increment r0,
+ /* The trick is that we are not going to increment r0,
we are going to increment the result of the add insn.
For this trick to be correct, the result reg of
the inc must be a valid addressing reg. */
@@ -1102,16 +1102,16 @@ find_inc (bool first_try)
/* We also need to make sure that the next use of
inc result is after the inc. */
- other_insn
+ other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_use);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
if (!rtx_equal_p (mem_insn.reg0, inc_insn.reg0))
- reverse_inc ();
+ reverse_inc ();
}
- other_insn
+ other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_def);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
@@ -1130,14 +1130,14 @@ find_inc (bool first_try)
need to treat it as if it was *(b + a). It may also be that
the add is of the form a + c where c does not match b and
then we just abandon this. */
-
+
int luid = DF_INSN_LUID (inc_insn.insn);
rtx other_insn;
-
+
/* Make sure this reg appears only once in this insn. */
if (count_occurrences (PATTERN (mem_insn.insn), mem_insn.reg1, 1) != 1)
return false;
-
+
if (inc_insn.form == FORM_POST_ADD)
{
/* For this trick to be correct, the result reg of the inc
@@ -1167,7 +1167,7 @@ find_inc (bool first_try)
/* Need to check that there are no assignments to b
before the add insn. */
- other_insn
+ other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_def);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
@@ -1188,14 +1188,14 @@ find_inc (bool first_try)
}
/* To have gotten here know that.
*(b + a)
-
+
... = (b + a)
-
+
We also know that the lhs of the inc is not b or a. We
need to make sure that there are no assignments to b
- between the mem ref and the inc. */
-
- other_insn
+ between the mem ref and the inc. */
+
+ other_insn
= get_next_ref (REGNO (inc_insn.reg0), bb, reg_next_def);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
@@ -1203,13 +1203,13 @@ find_inc (bool first_try)
/* Need to check that the next use of the add result is later than
add insn since this will be the reg incremented. */
- other_insn
+ other_insn
= get_next_ref (REGNO (inc_insn.reg_res), bb, reg_next_use);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
}
else /* FORM_POST_INC. There is less to check here because we
- know that operands must line up. */
+ know that operands must line up. */
{
if (!rtx_equal_p (mem_insn.reg1, inc_insn.reg1))
/* See comment above on find_inc (false) call. */
@@ -1219,19 +1219,19 @@ find_inc (bool first_try)
reverse_mem ();
return find_inc (false);
}
- else
+ else
return false;
}
-
+
/* To have gotten here know that.
*(a + b)
-
+
... = (a + b)
-
+
We also know that the lhs of the inc is not b. We need to make
sure that there are no assignments to b between the mem ref and
the inc. */
- other_insn
+ other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_def);
if (other_insn && luid > DF_INSN_LUID (other_insn))
return false;
@@ -1240,7 +1240,7 @@ find_inc (bool first_try)
if (inc_insn.form == FORM_POST_INC)
{
- other_insn
+ other_insn
= get_next_ref (REGNO (inc_insn.reg0), bb, reg_next_use);
/* When we found inc_insn, we were looking for the
next add or inc, not the next insn that used the
@@ -1348,10 +1348,10 @@ merge_in_block (int max_reg, basic_block bb)
bool insn_is_add_or_inc = true;
if (!NONDEBUG_INSN_P (insn))
- continue;
+ continue;
/* This continue is deliberate. We do not want the uses of the
- jump put into reg_next_use because it is not considered safe to
+ jump put into reg_next_use because it is not considered safe to
combine a preincrement with a jump. */
if (JUMP_P (insn))
continue;
@@ -1381,22 +1381,22 @@ merge_in_block (int max_reg, basic_block bb)
clear of c because the inc insn is going to move
into the mem_insn.insn. */
int luid = DF_INSN_LUID (mem_insn.insn);
- rtx other_insn
+ rtx other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_use);
-
+
if (other_insn && luid > DF_INSN_LUID (other_insn))
ok = false;
-
- other_insn
+
+ other_insn
= get_next_ref (REGNO (inc_insn.reg1), bb, reg_next_def);
-
+
if (other_insn && luid > DF_INSN_LUID (other_insn))
ok = false;
}
-
+
if (dump_file)
dump_inc_insn (dump_file);
-
+
if (ok && find_address (&PATTERN (mem_insn.insn)) == -1)
{
if (dump_file)
@@ -1417,7 +1417,7 @@ merge_in_block (int max_reg, basic_block bb)
if (find_mem (&PATTERN (insn)))
success_in_block++;
}
-
+
/* If the inc insn was merged with a mem, the inc insn is gone
and there is noting to update. */
if (DF_INSN_UID_GET (uid))
@@ -1432,7 +1432,7 @@ merge_in_block (int max_reg, basic_block bb)
reg_next_inc_use[DF_REF_REGNO (def)] = NULL;
reg_next_def[DF_REF_REGNO (def)] = insn;
}
-
+
for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
{
df_ref use = *use_rec;
@@ -1441,7 +1441,7 @@ merge_in_block (int max_reg, basic_block bb)
reg_next_inc_use[DF_REF_REGNO (use)] = insn;
else
reg_next_inc_use[DF_REF_REGNO (use)] = NULL;
- }
+ }
}
else if (dump_file)
fprintf (dump_file, "skipping update of deleted insn %d\n", uid);
@@ -1464,7 +1464,7 @@ merge_in_block (int max_reg, basic_block bb)
#endif
-static unsigned int
+static unsigned int
rest_of_handle_auto_inc_dec (void)
{
#ifdef AUTO_INC_DEC
@@ -1523,7 +1523,7 @@ struct rtl_opt_pass pass_inc_dec =
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func |
+ TODO_dump_func |
TODO_df_finish, /* todo_flags_finish */
}
};
diff --git a/gcc/basic-block.h b/gcc/basic-block.h
index 2cc3e91daa2..3d9b6727cf9 100644
--- a/gcc/basic-block.h
+++ b/gcc/basic-block.h
@@ -463,7 +463,7 @@ struct GTY(()) control_flow_graph {
for ((INSN) = BB_HEAD (BB), (CURR) = (INSN) ? NEXT_INSN ((INSN)): NULL; \
(INSN) && (INSN) != NEXT_INSN (BB_END (BB)); \
(INSN) = (CURR), (CURR) = (INSN) ? NEXT_INSN ((INSN)) : NULL)
-
+
#define FOR_BB_INSNS_REVERSE(BB, INSN) \
for ((INSN) = BB_END (BB); \
(INSN) && (INSN) != PREV_INSN (BB_HEAD (BB)); \
diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index 47828bf28c1..561d7d00444 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -1374,7 +1374,7 @@ fix_up_fall_thru_edges (void)
edge_iterator ei;
/* Find EDGE_CAN_FALLTHRU edge. */
- FOR_EACH_EDGE (e, ei, cur_bb->succs)
+ FOR_EACH_EDGE (e, ei, cur_bb->succs)
if (e->flags & EDGE_CAN_FALLTHRU)
{
fall_thru = e;
@@ -1438,7 +1438,7 @@ fix_up_fall_thru_edges (void)
/* This is the case where both edges out of the basic
block are crossing edges. Here we will fix up the
fall through edge. The jump edge will be taken care
- of later. The EDGE_CROSSING flag of fall_thru edge
+ of later. The EDGE_CROSSING flag of fall_thru edge
is unset before the call to force_nonfallthru
function because if a new basic-block is created
this edge remains in the current section boundary
diff --git a/gcc/bt-load.c b/gcc/bt-load.c
index be36f2c6e4a..5e3d12c359a 100644
--- a/gcc/bt-load.c
+++ b/gcc/bt-load.c
@@ -1417,7 +1417,7 @@ migrate_btr_defs (enum reg_class btr_class, int allow_callee_save)
CLEAR_HARD_REG_SET (all_btrs);
for (first_btr = -1, reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
if (TEST_HARD_REG_BIT (reg_class_contents[(int) btr_class], reg)
- && (allow_callee_save || call_used_regs[reg]
+ && (allow_callee_save || call_used_regs[reg]
|| df_regs_ever_live_p (reg)))
{
SET_HARD_REG_BIT (all_btrs, reg);
diff --git a/gcc/builtins.c b/gcc/builtins.c
index 835d78888ff..cfdf5d5c535 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -118,7 +118,7 @@ static rtx expand_builtin_strncmp (tree, rtx, enum machine_mode);
static rtx builtin_memcpy_read_str (void *, HOST_WIDE_INT, enum machine_mode);
static rtx expand_builtin_memcpy (tree, rtx);
static rtx expand_builtin_mempcpy (tree, rtx, enum machine_mode);
-static rtx expand_builtin_mempcpy_args (tree, tree, tree, rtx,
+static rtx expand_builtin_mempcpy_args (tree, tree, tree, rtx,
enum machine_mode, int);
static rtx expand_builtin_strcpy (tree, rtx);
static rtx expand_builtin_strcpy_args (tree, tree, rtx);
@@ -280,7 +280,7 @@ get_object_alignment (tree exp, unsigned int align, unsigned int max_align)
{
HOST_WIDE_INT bitsize, bitpos;
tree offset;
- enum machine_mode mode;
+ enum machine_mode mode;
int unsignedp, volatilep;
exp = get_inner_reference (exp, &bitsize, &bitpos, &offset,
@@ -829,7 +829,7 @@ expand_builtin_longjmp (rtx buf_addr, rtx value)
rtx fp, lab, stack, insn, last;
enum machine_mode sa_mode = STACK_SAVEAREA_MODE (SAVE_NONLOCAL);
- /* DRAP is needed for stack realign if longjmp is expanded to current
+ /* DRAP is needed for stack realign if longjmp is expanded to current
function */
if (SUPPORTS_STACK_ALIGNMENT)
crtl->need_drap = true;
@@ -2430,7 +2430,7 @@ expand_builtin_cexpi (tree exp, rtx target, rtx subtarget)
fn = built_in_decls[BUILT_IN_SINCOSL];
else
gcc_unreachable ();
-
+
op1 = assign_temp (TREE_TYPE (arg), 0, 1, 1);
op2 = assign_temp (TREE_TYPE (arg), 0, 1, 1);
op1a = copy_to_mode_reg (Pmode, XEXP (op1, 0));
@@ -2481,7 +2481,7 @@ expand_builtin_cexpi (tree exp, rtx target, rtx subtarget)
/* Make sure not to fold the cexp call again. */
call = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn);
- return expand_expr (build_call_nary (ctype, call, 1, narg),
+ return expand_expr (build_call_nary (ctype, call, 1, narg),
target, VOIDmode, EXPAND_NORMAL);
}
@@ -2662,7 +2662,7 @@ expand_builtin_int_roundingfn_2 (tree exp, rtx target)
if (!validate_arglist (exp, REAL_TYPE, VOID_TYPE))
gcc_unreachable ();
-
+
arg = CALL_EXPR_ARG (exp, 0);
switch (DECL_FUNCTION_CODE (fndecl))
@@ -3110,7 +3110,7 @@ expand_builtin_powi (tree exp, rtx target, rtx subtarget)
return target;
}
-/* Expand expression EXP which is a call to the strlen builtin. Return
+/* Expand expression EXP which is a call to the strlen builtin. Return
NULL_RTX if we failed the caller should emit a normal call, otherwise
try to get the result in TARGET, if convenient. */
@@ -3268,7 +3268,7 @@ expand_builtin_memcpy (tree exp, rtx target)
operation in-line. */
if (src_align == 0)
return NULL_RTX;
-
+
if (currently_expanding_gimple_stmt)
stringop_block_profile (currently_expanding_gimple_stmt,
&expected_align, &expected_size);
@@ -3485,8 +3485,8 @@ expand_movstr (tree dest, tree src, rtx target, int endp)
return target;
}
-/* Expand expression EXP, which is a call to the strcpy builtin. Return
- NULL_RTX if we failed the caller should emit a normal call, otherwise
+/* Expand expression EXP, which is a call to the strcpy builtin. Return
+ NULL_RTX if we failed the caller should emit a normal call, otherwise
try to get the result in TARGET, if convenient (and in mode MODE if that's
convenient). */
@@ -3606,7 +3606,7 @@ builtin_strncpy_read_str (void *data, HOST_WIDE_INT offset,
return c_readstr (str + offset, mode);
}
-/* Expand expression EXP, which is a call to the strncpy builtin. Return
+/* Expand expression EXP, which is a call to the strncpy builtin. Return
NULL_RTX if we failed the caller should emit a normal call. */
static rtx
@@ -3699,8 +3699,8 @@ builtin_memset_gen_str (void *data, HOST_WIDE_INT offset ATTRIBUTE_UNUSED,
return force_reg (mode, target);
}
-/* Expand expression EXP, which is a call to the memset builtin. Return
- NULL_RTX if we failed the caller should emit a normal call, otherwise
+/* Expand expression EXP, which is a call to the memset builtin. Return
+ NULL_RTX if we failed the caller should emit a normal call, otherwise
try to get the result in TARGET, if convenient (and in mode MODE if that's
convenient). */
@@ -3793,7 +3793,7 @@ expand_builtin_memset_args (tree dest, tree val, tree len,
dest_align, expected_align,
expected_size))
goto do_libcall;
-
+
dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX);
dest_mem = convert_memory_address (ptr_mode, dest_mem);
return dest_mem;
@@ -3814,7 +3814,7 @@ expand_builtin_memset_args (tree dest, tree val, tree len,
dest_align, expected_align,
expected_size))
goto do_libcall;
-
+
dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX);
dest_mem = convert_memory_address (ptr_mode, dest_mem);
return dest_mem;
@@ -3848,7 +3848,7 @@ expand_builtin_memset_args (tree dest, tree val, tree len,
return expand_call (fn, target, target == const0_rtx);
}
-/* Expand expression EXP, which is a call to the bzero builtin. Return
+/* Expand expression EXP, which is a call to the bzero builtin. Return
NULL_RTX if we failed the caller should emit a normal call. */
static rtx
@@ -4122,7 +4122,7 @@ expand_builtin_strcmp (tree exp, ATTRIBUTE_UNUSED rtx target)
return NULL_RTX;
}
-/* Expand expression EXP, which is a call to the strncmp builtin. Return
+/* Expand expression EXP, which is a call to the strncmp builtin. Return
NULL_RTX if we failed the caller should emit a normal call, otherwise try to get
the result in TARGET, if convenient. */
@@ -4805,7 +4805,7 @@ expand_builtin_alloca (tree exp, rtx target)
rtx result;
/* Emit normal call if marked not-inlineable. */
- if (CALL_CANNOT_INLINE_P (exp))
+ if (CALL_CANNOT_INLINE_P (exp))
return NULL_RTX;
if (!validate_arglist (exp, INTEGER_TYPE, VOID_TYPE))
@@ -4871,7 +4871,7 @@ expand_builtin_unop (enum machine_mode target_mode, tree exp, rtx target,
return convert_to_mode (target_mode, target, 0);
}
-/* Expand a call to __builtin_expect. We just return our argument
+/* Expand a call to __builtin_expect. We just return our argument
as the builtin_expect semantic should've been already executed by
tree branch prediction pass. */
@@ -5613,7 +5613,7 @@ expand_builtin_fork_or_exec (tree fn, tree exp, rtx target, int ignore)
call = rewrite_call_expr (EXPR_LOCATION (exp), exp, 0, decl, 0);
return expand_call (call, target, ignore);
}
-
+
/* Reconstitute a mode for a __sync intrinsic operation. Since the type of
@@ -5774,7 +5774,7 @@ expand_builtin_compare_and_swap (enum machine_mode mode, tree exp,
/* Expand the __sync_lock_test_and_set intrinsic. Note that the most
general form is actually an atomic exchange, and some targets only
support a reduced form with the second argument being a constant 1.
- EXP is the CALL_EXPR; TARGET is an optional place for us to store
+ EXP is the CALL_EXPR; TARGET is an optional place for us to store
the results. */
static rtx
@@ -6747,7 +6747,7 @@ builtin_mathfn_code (const_tree t)
if (! more_const_call_expr_args_p (&iter))
return END_BUILTINS;
-
+
arg = next_const_call_expr_arg (&iter);
argtype = TREE_TYPE (arg);
@@ -7188,12 +7188,12 @@ fold_builtin_cabs (location_t loc, tree arg, tree type, tree fndecl)
&& (res = do_mpfr_arg2 (TREE_REALPART (arg), TREE_IMAGPART (arg),
type, mpfr_hypot)))
return res;
-
+
if (TREE_CODE (arg) == COMPLEX_EXPR)
{
tree real = TREE_OPERAND (arg, 0);
tree imag = TREE_OPERAND (arg, 1);
-
+
/* If either part is zero, cabs is fabs of the other. */
if (real_zerop (real))
return fold_build1_loc (loc, ABS_EXPR, type, imag);
@@ -7265,7 +7265,7 @@ fold_builtin_sqrt (location_t loc, tree arg, tree type)
/* Calculate the result when the argument is a constant. */
if ((res = do_mpfr_arg1 (arg, type, mpfr_sqrt, &dconst0, NULL, true)))
return res;
-
+
/* Optimize sqrt(expN(x)) = expN(x*0.5). */
fcode = builtin_mathfn_code (arg);
if (flag_unsafe_math_optimizations && BUILTIN_EXPONENT_P (fcode))
@@ -7394,7 +7394,7 @@ fold_builtin_cbrt (location_t loc, tree arg, tree type)
}
/* Optimize cbrt(pow(x,y)) -> pow(x,y/3) iff x is nonnegative. */
- if (fcode == BUILT_IN_POW
+ if (fcode == BUILT_IN_POW
|| fcode == BUILT_IN_POWF
|| fcode == BUILT_IN_POWL)
{
@@ -7430,7 +7430,7 @@ fold_builtin_cos (location_t loc,
/* Calculate the result when the argument is a constant. */
if ((res = do_mpfr_arg1 (arg, type, mpfr_cos, NULL, NULL, 0)))
return res;
-
+
/* Optimize cos(-x) into cos (x). */
if ((narg = fold_strip_sign_ops (arg)))
return build_call_expr_loc (loc, fndecl, 1, narg);
@@ -7451,12 +7451,12 @@ fold_builtin_cosh (location_t loc, tree arg, tree type, tree fndecl)
/* Calculate the result when the argument is a constant. */
if ((res = do_mpfr_arg1 (arg, type, mpfr_cosh, NULL, NULL, 0)))
return res;
-
+
/* Optimize cosh(-x) into cosh (x). */
if ((narg = fold_strip_sign_ops (arg)))
return build_call_expr_loc (loc, fndecl, 1, narg);
}
-
+
return NULL_TREE;
}
@@ -7479,7 +7479,7 @@ fold_builtin_ccos (location_t loc,
if ((tmp = do_mpc_arg1 (arg, type, (hyper ? mpc_cosh : mpc_cos))))
return tmp;
#endif
-
+
/* Optimize fn(-x) into fn(x). */
if ((tmp = fold_strip_sign_ops (arg)))
return build_call_expr_loc (loc, fndecl, 1, tmp);
@@ -7503,7 +7503,7 @@ fold_builtin_tan (tree arg, tree type)
/* Calculate the result when the argument is a constant. */
if ((res = do_mpfr_arg1 (arg, type, mpfr_tan, NULL, NULL, 0)))
return res;
-
+
/* Optimize tan(atan(x)) = x. */
fcode = builtin_mathfn_code (arg);
if (flag_unsafe_math_optimizations
@@ -7576,7 +7576,7 @@ fold_builtin_cexp (location_t loc, tree arg0, tree type)
if ((res = do_mpc_arg1 (arg0, type, mpc_exp)))
return res;
#endif
-
+
rtype = TREE_TYPE (TREE_TYPE (arg0));
/* In case we can figure out the real part of arg0 and it is constant zero
@@ -8001,7 +8001,7 @@ fold_builtin_logarithm (location_t loc, tree fndecl, tree arg,
{
CASE_FLT_FN (BUILT_IN_EXP):
/* Prepare to do logN(exp(exponent) -> exponent*logN(e). */
- x = build_real (type, real_value_truncate (TYPE_MODE (type),
+ x = build_real (type, real_value_truncate (TYPE_MODE (type),
dconst_e ()));
exponent = CALL_EXPR_ARG (arg, 0);
break;
@@ -8068,23 +8068,23 @@ fold_builtin_hypot (location_t loc, tree fndecl,
/* Calculate the result when the argument is a constant. */
if ((res = do_mpfr_arg2 (arg0, arg1, type, mpfr_hypot)))
return res;
-
+
/* If either argument to hypot has a negate or abs, strip that off.
E.g. hypot(-x,fabs(y)) -> hypot(x,y). */
narg0 = fold_strip_sign_ops (arg0);
narg1 = fold_strip_sign_ops (arg1);
if (narg0 || narg1)
{
- return build_call_expr_loc (loc, fndecl, 2, narg0 ? narg0 : arg0,
+ return build_call_expr_loc (loc, fndecl, 2, narg0 ? narg0 : arg0,
narg1 ? narg1 : arg1);
}
-
+
/* If either argument is zero, hypot is fabs of the other. */
if (real_zerop (arg0))
return fold_build1_loc (loc, ABS_EXPR, type, arg1);
else if (real_zerop (arg1))
return fold_build1_loc (loc, ABS_EXPR, type, arg0);
-
+
/* hypot(x,x) -> fabs(x)*sqrt(2). */
if (flag_unsafe_math_optimizations
&& operand_equal_p (arg0, arg1, OEP_PURE_SAME))
@@ -8310,7 +8310,7 @@ fold_builtin_exponent (location_t loc, tree fndecl, tree arg,
{
tree type = TREE_TYPE (TREE_TYPE (fndecl));
tree res;
-
+
/* Calculate the result when the argument is a constant. */
if ((res = do_mpfr_arg1 (arg, type, func, NULL, NULL, 0)))
return res;
@@ -8490,7 +8490,7 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src,
src_align = get_pointer_alignment (src, BIGGEST_ALIGNMENT);
dest_align = get_pointer_alignment (dest, BIGGEST_ALIGNMENT);
- /* Both DEST and SRC must be pointer types.
+ /* Both DEST and SRC must be pointer types.
??? This is what old code did. Is the testing for pointer types
really mandatory?
@@ -9238,7 +9238,7 @@ fold_builtin_toascii (location_t loc, tree arg)
{
if (!validate_arg (arg, INTEGER_TYPE))
return NULL_TREE;
-
+
/* Transform toascii(c) -> (c & 0x7f). */
return fold_build2_loc (loc, BIT_AND_EXPR, integer_type_node, arg,
build_int_cst (NULL_TREE, 0x7f));
@@ -9329,7 +9329,7 @@ fold_builtin_fmin_fmax (location_t loc, tree arg0, tree arg1,
/* Transform fmin/fmax(x,x) -> x. */
if (operand_equal_p (arg0, arg1, OEP_PURE_SAME))
return omit_one_operand_loc (loc, type, arg0, arg1);
-
+
/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
functions to return the numeric arg if the other one is NaN.
These tree codes don't honor that, so only transform if
@@ -9352,7 +9352,7 @@ fold_builtin_carg (location_t loc, tree arg, tree type)
&& TREE_CODE (TREE_TYPE (TREE_TYPE (arg))) == REAL_TYPE)
{
tree atan2_fn = mathfn_built_in (type, BUILT_IN_ATAN2);
-
+
if (atan2_fn)
{
tree new_arg = builtin_save_expr (arg);
@@ -9361,7 +9361,7 @@ fold_builtin_carg (location_t loc, tree arg, tree type)
return build_call_expr_loc (loc, atan2_fn, 2, i_arg, r_arg);
}
}
-
+
return NULL_TREE;
}
@@ -9372,13 +9372,13 @@ fold_builtin_logb (location_t loc, tree arg, tree rettype)
{
if (! validate_arg (arg, REAL_TYPE))
return NULL_TREE;
-
+
STRIP_NOPS (arg);
-
+
if (TREE_CODE (arg) == REAL_CST && ! TREE_OVERFLOW (arg))
{
const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (arg);
-
+
switch (value->cl)
{
case rvc_nan:
@@ -9403,7 +9403,7 @@ fold_builtin_logb (location_t loc, tree arg, tree rettype)
break;
}
}
-
+
return NULL_TREE;
}
@@ -9414,13 +9414,13 @@ fold_builtin_significand (location_t loc, tree arg, tree rettype)
{
if (! validate_arg (arg, REAL_TYPE))
return NULL_TREE;
-
+
STRIP_NOPS (arg);
-
+
if (TREE_CODE (arg) == REAL_CST && ! TREE_OVERFLOW (arg))
{
const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (arg);
-
+
switch (value->cl)
{
case rvc_zero:
@@ -9442,7 +9442,7 @@ fold_builtin_significand (location_t loc, tree arg, tree rettype)
break;
}
}
-
+
return NULL_TREE;
}
@@ -9453,12 +9453,12 @@ fold_builtin_frexp (location_t loc, tree arg0, tree arg1, tree rettype)
{
if (! validate_arg (arg0, REAL_TYPE) || ! validate_arg (arg1, POINTER_TYPE))
return NULL_TREE;
-
+
STRIP_NOPS (arg0);
-
+
if (!(TREE_CODE (arg0) == REAL_CST && ! TREE_OVERFLOW (arg0)))
return NULL_TREE;
-
+
arg1 = build_fold_indirect_ref_loc (loc, arg1);
/* Proceed if a valid pointer type was passed in. */
@@ -9466,7 +9466,7 @@ fold_builtin_frexp (location_t loc, tree arg0, tree arg1, tree rettype)
{
const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (arg0);
tree frac, exp;
-
+
switch (value->cl)
{
case rvc_zero:
@@ -9492,7 +9492,7 @@ fold_builtin_frexp (location_t loc, tree arg0, tree arg1, tree rettype)
default:
gcc_unreachable ();
}
-
+
/* Create the COMPOUND_EXPR (*arg1 = trunc, frac). */
arg1 = fold_build2_loc (loc, MODIFY_EXPR, rettype, arg1, exp);
TREE_SIDE_EFFECTS (arg1) = 1;
@@ -9520,7 +9520,7 @@ fold_builtin_load_exponent (location_t loc, tree arg0, tree arg1,
|| (TREE_CODE (arg0) == REAL_CST
&& !real_isfinite (&TREE_REAL_CST (arg0))))
return omit_one_operand_loc (loc, type, arg0, arg1);
-
+
/* If both arguments are constant, then try to evaluate it. */
if ((ldexp || REAL_MODE_FORMAT (TYPE_MODE (type))->b == 2)
&& TREE_CODE (arg0) == REAL_CST && !TREE_OVERFLOW (arg0)
@@ -9529,20 +9529,20 @@ fold_builtin_load_exponent (location_t loc, tree arg0, tree arg1,
/* Bound the maximum adjustment to twice the range of the
mode's valid exponents. Use abs to ensure the range is
positive as a sanity check. */
- const long max_exp_adj = 2 *
+ const long max_exp_adj = 2 *
labs (REAL_MODE_FORMAT (TYPE_MODE (type))->emax
- REAL_MODE_FORMAT (TYPE_MODE (type))->emin);
/* Get the user-requested adjustment. */
const HOST_WIDE_INT req_exp_adj = tree_low_cst (arg1, 0);
-
+
/* The requested adjustment must be inside this range. This
is a preliminary cap to avoid things like overflow, we
may still fail to compute the result for other reasons. */
if (-max_exp_adj < req_exp_adj && req_exp_adj < max_exp_adj)
{
REAL_VALUE_TYPE initial_result;
-
+
real_ldexp (&initial_result, &TREE_REAL_CST (arg0), req_exp_adj);
/* Ensure we didn't overflow. */
@@ -9550,7 +9550,7 @@ fold_builtin_load_exponent (location_t loc, tree arg0, tree arg1,
{
const REAL_VALUE_TYPE trunc_result
= real_value_truncate (TYPE_MODE (type), initial_result);
-
+
/* Only proceed if the target mode can hold the
resulting value. */
if (REAL_VALUES_EQUAL (initial_result, trunc_result))
@@ -9570,12 +9570,12 @@ fold_builtin_modf (location_t loc, tree arg0, tree arg1, tree rettype)
{
if (! validate_arg (arg0, REAL_TYPE) || ! validate_arg (arg1, POINTER_TYPE))
return NULL_TREE;
-
+
STRIP_NOPS (arg0);
-
+
if (!(TREE_CODE (arg0) == REAL_CST && ! TREE_OVERFLOW (arg0)))
return NULL_TREE;
-
+
arg1 = build_fold_indirect_ref_loc (loc, arg1);
/* Proceed if a valid pointer type was passed in. */
@@ -9607,7 +9607,7 @@ fold_builtin_modf (location_t loc, tree arg0, tree arg1, tree rettype)
frac.sign = value->sign;
break;
}
-
+
/* Create the COMPOUND_EXPR (*arg1 = trunc, frac). */
arg1 = fold_build2_loc (loc, MODIFY_EXPR, rettype, arg1,
build_real (rettype, trunc));
@@ -9615,7 +9615,7 @@ fold_builtin_modf (location_t loc, tree arg0, tree arg1, tree rettype)
return fold_build2_loc (loc, COMPOUND_EXPR, rettype, arg1,
build_real (rettype, frac));
}
-
+
return NULL_TREE;
}
@@ -9760,7 +9760,7 @@ fold_builtin_classify (location_t loc, tree fndecl, tree arg, int builtin_index)
signbit_call, integer_zero_node);
isinf_call = fold_build2_loc (loc, NE_EXPR, integer_type_node,
isinf_call, integer_zero_node);
-
+
tmp = fold_build3_loc (loc, COND_EXPR, integer_type_node, signbit_call,
integer_minus_one_node, integer_one_node);
tmp = fold_build3_loc (loc, COND_EXPR, integer_type_node,
@@ -9818,13 +9818,13 @@ fold_builtin_fpclassify (location_t loc, tree exp)
enum machine_mode mode;
REAL_VALUE_TYPE r;
char buf[128];
-
+
/* Verify the required arguments in the original call. */
if (!validate_arglist (exp, INTEGER_TYPE, INTEGER_TYPE,
INTEGER_TYPE, INTEGER_TYPE,
INTEGER_TYPE, REAL_TYPE, VOID_TYPE))
return NULL_TREE;
-
+
fp_nan = CALL_EXPR_ARG (exp, 0);
fp_infinite = CALL_EXPR_ARG (exp, 1);
fp_normal = CALL_EXPR_ARG (exp, 2);
@@ -9835,12 +9835,12 @@ fold_builtin_fpclassify (location_t loc, tree exp)
mode = TYPE_MODE (type);
arg = builtin_save_expr (fold_build1_loc (loc, ABS_EXPR, type, arg));
- /* fpclassify(x) ->
+ /* fpclassify(x) ->
isnan(x) ? FP_NAN :
(fabs(x) == Inf ? FP_INFINITE :
(fabs(x) >= DBL_MIN ? FP_NORMAL :
(x == 0 ? FP_ZERO : FP_SUBNORMAL))). */
-
+
tmp = fold_build2_loc (loc, EQ_EXPR, integer_type_node, arg,
build_real (type, dconst0));
res = fold_build3_loc (loc, COND_EXPR, integer_type_node,
@@ -9851,7 +9851,7 @@ fold_builtin_fpclassify (location_t loc, tree exp)
tmp = fold_build2_loc (loc, GE_EXPR, integer_type_node,
arg, build_real (type, r));
res = fold_build3_loc (loc, COND_EXPR, integer_type_node, tmp, fp_normal, res);
-
+
if (HONOR_INFINITIES (mode))
{
real_inf (&r);
@@ -9866,7 +9866,7 @@ fold_builtin_fpclassify (location_t loc, tree exp)
tmp = fold_build2_loc (loc, ORDERED_EXPR, integer_type_node, arg, arg);
res = fold_build3_loc (loc, COND_EXPR, integer_type_node, tmp, res, fp_nan);
}
-
+
return res;
}
@@ -9991,13 +9991,13 @@ fold_builtin_1 (location_t loc, tree fndecl, tree arg0, bool ignore)
CASE_FLT_FN (BUILT_IN_CONJ):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return fold_build1_loc (loc, CONJ_EXPR, type, arg0);
break;
CASE_FLT_FN (BUILT_IN_CREAL):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return non_lvalue_loc (loc, fold_build1_loc (loc, REALPART_EXPR, type, arg0));;
break;
@@ -10009,86 +10009,86 @@ fold_builtin_1 (location_t loc, tree fndecl, tree arg0, bool ignore)
CASE_FLT_FN (BUILT_IN_CCOS):
return fold_builtin_ccos(loc, arg0, type, fndecl, /*hyper=*/ false);
-
+
CASE_FLT_FN (BUILT_IN_CCOSH):
return fold_builtin_ccos(loc, arg0, type, fndecl, /*hyper=*/ true);
-
+
#ifdef HAVE_mpc
CASE_FLT_FN (BUILT_IN_CSIN):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_sin);
break;
-
+
CASE_FLT_FN (BUILT_IN_CSINH):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_sinh);
break;
-
+
CASE_FLT_FN (BUILT_IN_CTAN):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_tan);
break;
-
+
CASE_FLT_FN (BUILT_IN_CTANH):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_tanh);
break;
-
+
CASE_FLT_FN (BUILT_IN_CLOG):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_log);
break;
-
+
CASE_FLT_FN (BUILT_IN_CSQRT):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_sqrt);
break;
-
+
#ifdef HAVE_mpc_arc
CASE_FLT_FN (BUILT_IN_CASIN):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_asin);
break;
-
+
CASE_FLT_FN (BUILT_IN_CACOS):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_acos);
break;
-
+
CASE_FLT_FN (BUILT_IN_CATAN):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_atan);
break;
-
+
CASE_FLT_FN (BUILT_IN_CASINH):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_asinh);
break;
-
+
CASE_FLT_FN (BUILT_IN_CACOSH):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_acosh);
break;
-
+
CASE_FLT_FN (BUILT_IN_CATANH):
if (validate_arg (arg0, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE)
return do_mpc_arg1 (arg0, type, mpc_atanh);
break;
#endif /* HAVE_mpc_arc */
#endif /* HAVE_mpc */
-
+
CASE_FLT_FN (BUILT_IN_CABS):
return fold_builtin_cabs (loc, arg0, type, fndecl);
@@ -10181,7 +10181,7 @@ fold_builtin_1 (location_t loc, tree fndecl, tree arg0, bool ignore)
if (validate_arg (arg0, REAL_TYPE))
return do_mpfr_arg1 (arg0, type, mpfr_gamma, NULL, NULL, 0);
break;
-
+
CASE_FLT_FN (BUILT_IN_EXP):
return fold_builtin_exponent (loc, fndecl, arg0, mpfr_exp);
@@ -10196,7 +10196,7 @@ fold_builtin_1 (location_t loc, tree fndecl, tree arg0, bool ignore)
if (validate_arg (arg0, REAL_TYPE))
return do_mpfr_arg1 (arg0, type, mpfr_expm1, NULL, NULL, 0);
break;
-
+
CASE_FLT_FN (BUILT_IN_LOG):
return fold_builtin_logarithm (loc, fndecl, arg0, mpfr_log);
@@ -10410,7 +10410,7 @@ fold_builtin_2 (location_t loc, tree fndecl, tree arg0, tree arg1, bool ignore)
if (validate_arg (arg0, COMPLEX_TYPE)
&& TREE_CODE (TREE_TYPE (TREE_TYPE (arg0))) == REAL_TYPE
&& validate_arg (arg1, COMPLEX_TYPE)
- && TREE_CODE (TREE_TYPE (TREE_TYPE (arg1))) == REAL_TYPE)
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (arg1))) == REAL_TYPE)
return do_mpc_arg2 (arg0, arg1, type, /*do_nonfinite=*/ 0, mpc_pow);
break;
#endif
@@ -10664,7 +10664,7 @@ fold_builtin_3 (location_t loc, tree fndecl,
ARG2, and ARG3. IGNORE is true if the result of the function call is
ignored. This function returns NULL_TREE if no simplification was
possible. */
-
+
static tree
fold_builtin_4 (location_t loc, tree fndecl,
tree arg0, tree arg1, tree arg2, tree arg3, bool ignore)
@@ -10710,9 +10710,9 @@ fold_builtin_4 (location_t loc, tree fndecl,
fixed argument patterns. Foldings that do varargs-to-varargs
transformations, or that match calls with more than 4 arguments,
need to be handled with fold_builtin_varargs instead. */
-
+
#define MAX_ARGS_TO_FOLD_BUILTIN 4
-
+
static tree
fold_builtin_n (location_t loc, tree fndecl, tree *args, int nargs, bool ignore)
{
@@ -10861,10 +10861,10 @@ fold_call_expr (location_t loc, tree exp, bool ignore)
}
return NULL_TREE;
}
-
+
/* Conveniently construct a function call expression. FNDECL names the
function to be called and ARGLIST is a TREE_LIST of arguments. */
-
+
tree
build_function_call_expr (location_t loc, tree fndecl, tree arglist)
{
@@ -10882,7 +10882,7 @@ build_function_call_expr (location_t loc, tree fndecl, tree arglist)
/* Conveniently construct a function call expression. FNDECL names the
function to be called, N is the number of arguments, and the "..."
parameters are the argument expressions. */
-
+
tree
build_call_expr_loc (location_t loc, tree fndecl, int n, ...)
{
@@ -10987,7 +10987,7 @@ rewrite_call_expr (location_t loc, tree exp, int skip, tree fndecl, int n, ...)
for (j = skip; j < oldnargs; j++, i++)
buffer[i] = CALL_EXPR_ARG (exp, j);
}
- else
+ else
buffer = CALL_EXPR_ARGP (exp) + skip;
return fold (build_call_array_loc (loc, TREE_TYPE (exp), fn, nargs, buffer));
@@ -10995,7 +10995,7 @@ rewrite_call_expr (location_t loc, tree exp, int skip, tree fndecl, int n, ...)
/* Validate a single argument ARG against a tree code CODE representing
a type. */
-
+
static bool
validate_arg (const_tree arg, enum tree_code code)
{
@@ -11449,7 +11449,7 @@ fold_builtin_strcat (location_t loc ATTRIBUTE_UNUSED, tree dst, tree src)
/* If we don't have a movstr we don't want to emit an strcpy
call. We have to do that if the length of the source string
isn't computable (in that case we can use memcpy probably
- later expanding to a sequence of mov instructions). If we
+ later expanding to a sequence of mov instructions). If we
have movstr instructions we can emit strcpy calls. */
if (!HAVE_movstr)
{
@@ -11759,7 +11759,7 @@ fold_builtin_next_arg (tree exp, bool va_start_p)
arg = SSA_NAME_VAR (arg);
/* We destructively modify the call to be __builtin_va_start (ap, 0)
- or __builtin_next_arg (0) the first time we see it, after checking
+ or __builtin_next_arg (0) the first time we see it, after checking
the arguments and if needed issuing a warning. */
if (!integer_zerop (arg))
{
@@ -12129,7 +12129,7 @@ maybe_emit_sprintf_chk_warning (tree exp, enum built_in_function fcode)
int nargs = call_expr_nargs (exp);
/* Verify the required arguments in the original call. */
-
+
if (nargs < 4)
return;
size = CALL_EXPR_ARG (exp, 2);
@@ -13097,7 +13097,7 @@ do_mpfr_arg1 (tree arg, tree type, int (*func)(mpfr_ptr, mpfr_srcptr, mp_rnd_t),
bool inclusive)
{
tree result = NULL_TREE;
-
+
STRIP_NOPS (arg);
/* To proceed, MPFR must exactly represent the target floating point
@@ -13125,7 +13125,7 @@ do_mpfr_arg1 (tree arg, tree type, int (*func)(mpfr_ptr, mpfr_srcptr, mp_rnd_t),
mpfr_clear (m);
}
}
-
+
return result;
}
@@ -13140,7 +13140,7 @@ do_mpfr_arg2 (tree arg1, tree arg2, tree type,
int (*func)(mpfr_ptr, mpfr_srcptr, mpfr_srcptr, mp_rnd_t))
{
tree result = NULL_TREE;
-
+
STRIP_NOPS (arg1);
STRIP_NOPS (arg2);
@@ -13170,7 +13170,7 @@ do_mpfr_arg2 (tree arg1, tree arg2, tree type,
mpfr_clears (m1, m2, NULL);
}
}
-
+
return result;
}
@@ -13185,7 +13185,7 @@ do_mpfr_arg3 (tree arg1, tree arg2, tree arg3, tree type,
int (*func)(mpfr_ptr, mpfr_srcptr, mpfr_srcptr, mpfr_srcptr, mp_rnd_t))
{
tree result = NULL_TREE;
-
+
STRIP_NOPS (arg1);
STRIP_NOPS (arg2);
STRIP_NOPS (arg3);
@@ -13219,7 +13219,7 @@ do_mpfr_arg3 (tree arg1, tree arg2, tree arg3, tree type,
mpfr_clears (m1, m2, m3, NULL);
}
}
-
+
return result;
}
@@ -13235,9 +13235,9 @@ do_mpfr_sincos (tree arg, tree arg_sinp, tree arg_cosp)
{
tree const type = TREE_TYPE (arg);
tree result = NULL_TREE;
-
+
STRIP_NOPS (arg);
-
+
/* To proceed, MPFR must exactly represent the target floating point
format, which only happens when the target base equals two. */
if (REAL_MODE_FORMAT (TYPE_MODE (type))->b == 2
@@ -13336,7 +13336,7 @@ do_mpfr_bessel_n (tree arg1, tree arg2, tree type,
mpfr_clear (m);
}
}
-
+
return result;
}
@@ -13350,10 +13350,10 @@ do_mpfr_remquo (tree arg0, tree arg1, tree arg_quo)
{
tree const type = TREE_TYPE (arg0);
tree result = NULL_TREE;
-
+
STRIP_NOPS (arg0);
STRIP_NOPS (arg1);
-
+
/* To proceed, MPFR must exactly represent the target floating point
format, which only happens when the target base equals two. */
if (REAL_MODE_FORMAT (TYPE_MODE (type))->b == 2
@@ -13427,7 +13427,7 @@ do_mpfr_lgamma_r (tree arg, tree arg_sg, tree type)
tree result = NULL_TREE;
STRIP_NOPS (arg);
-
+
/* To proceed, MPFR must exactly represent the target floating point
format, which only happens when the target base equals two. Also
verify ARG is a constant and that ARG_SG is an int pointer. */
@@ -13489,7 +13489,7 @@ static tree
do_mpc_arg1 (tree arg, tree type, int (*func)(mpc_ptr, mpc_srcptr, mpc_rnd_t))
{
tree result = NULL_TREE;
-
+
STRIP_NOPS (arg);
/* To proceed, MPFR must exactly represent the target floating point
@@ -13510,7 +13510,7 @@ do_mpc_arg1 (tree arg, tree type, int (*func)(mpc_ptr, mpc_srcptr, mpc_rnd_t))
const mpc_rnd_t crnd = fmt->round_towards_zero ? MPC_RNDZZ : MPC_RNDNN;
int inexact;
mpc_t m;
-
+
mpc_init2 (m, prec);
mpfr_from_real (mpc_realref(m), re, rnd);
mpfr_from_real (mpc_imagref(m), im, rnd);
@@ -13538,7 +13538,7 @@ do_mpc_arg2 (tree arg0, tree arg1, tree type, int do_nonfinite,
int (*func)(mpc_ptr, mpc_srcptr, mpc_srcptr, mpc_rnd_t))
{
tree result = NULL_TREE;
-
+
STRIP_NOPS (arg0);
STRIP_NOPS (arg1);
@@ -13566,7 +13566,7 @@ do_mpc_arg2 (tree arg0, tree arg1, tree type, int do_nonfinite,
const mpc_rnd_t crnd = fmt->round_towards_zero ? MPC_RNDZZ : MPC_RNDNN;
int inexact;
mpc_t m0, m1;
-
+
mpc_init2 (m0, prec);
mpc_init2 (m1, prec);
mpfr_from_real (mpc_realref(m0), re0, rnd);
diff --git a/gcc/builtins.def b/gcc/builtins.def
index 785eefb0b74..09177a9001a 100644
--- a/gcc/builtins.def
+++ b/gcc/builtins.def
@@ -643,7 +643,7 @@ DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSL, "ffsl", BT_FN_INT_LONG, ATTR_CONST_NOTHRO
DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST)
DEF_EXT_LIB_BUILTIN (BUILT_IN_FORK, "fork", BT_FN_PID, ATTR_NOTHROW_LIST)
DEF_GCC_BUILTIN (BUILT_IN_FRAME_ADDRESS, "frame_address", BT_FN_PTR_UINT, ATTR_NULL)
-DEF_LIB_BUILTIN (BUILT_IN_FREE, "free", BT_FN_VOID_PTR, ATTR_NOTHROW_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_FREE, "free", BT_FN_VOID_PTR, ATTR_NOTHROW_LIST)
DEF_GCC_BUILTIN (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr", BT_FN_PTR_PTR, ATTR_NULL)
DEF_EXT_LIB_BUILTIN (BUILT_IN_GETTEXT, "gettext", BT_FN_STRING_CONST_STRING, ATTR_FORMAT_ARG_1)
DEF_C99_BUILTIN (BUILT_IN_IMAXABS, "imaxabs", BT_FN_INTMAX_INTMAX, ATTR_CONST_NOTHROW_LIST)
diff --git a/gcc/c-common.c b/gcc/c-common.c
index 20b24f0c3c2..747e7051e3e 100644
--- a/gcc/c-common.c
+++ b/gcc/c-common.c
@@ -1542,7 +1542,7 @@ decl_constant_value_for_optimization (tree exp)
void
constant_expression_warning (tree value)
{
- if (warn_overflow && pedantic
+ if (warn_overflow && pedantic
&& (TREE_CODE (value) == INTEGER_CST || TREE_CODE (value) == REAL_CST
|| TREE_CODE (value) == FIXED_CST
|| TREE_CODE (value) == VECTOR_CST
@@ -1585,12 +1585,12 @@ overflow_warning (location_t loc, tree value)
case INTEGER_CST:
warning_at (loc, OPT_Woverflow, "integer overflow in expression");
break;
-
+
case REAL_CST:
warning_at (loc, OPT_Woverflow,
"floating point overflow in expression");
break;
-
+
case FIXED_CST:
warning_at (loc, OPT_Woverflow, "fixed-point overflow in expression");
break;
@@ -1598,7 +1598,7 @@ overflow_warning (location_t loc, tree value)
case VECTOR_CST:
warning_at (loc, OPT_Woverflow, "vector overflow in expression");
break;
-
+
case COMPLEX_CST:
if (TREE_CODE (TREE_REALPART (value)) == INTEGER_CST)
warning_at (loc, OPT_Woverflow,
@@ -1620,7 +1620,7 @@ overflow_warning (location_t loc, tree value)
had CODE_LEFT and CODE_RIGHT, into an expression of type TYPE. */
void
warn_logical_operator (location_t location, enum tree_code code, tree type,
- enum tree_code code_left, tree op_left,
+ enum tree_code code_left, tree op_left,
enum tree_code ARG_UNUSED (code_right), tree op_right)
{
int or_op = (code == TRUTH_ORIF_EXPR || code == TRUTH_OR_EXPR);
@@ -1677,12 +1677,12 @@ warn_logical_operator (location_t location, enum tree_code code, tree type,
if (rhs && TREE_CODE (rhs) == C_MAYBE_CONST_EXPR)
rhs = C_MAYBE_CONST_EXPR_EXPR (rhs);
-
+
/* If this is an OR operation, invert both sides; we will invert
again at the end. */
if (or_op)
in0_p = !in0_p, in1_p = !in1_p;
-
+
/* If both expressions are the same, if we can merge the ranges, and we
can build the range test, return it or it inverted. */
if (lhs && rhs && operand_equal_p (lhs, rhs, 0)
@@ -1741,7 +1741,7 @@ strict_aliasing_warning (tree otype, tree type, tree expr)
}
else
{
- /* warn_strict_aliasing >= 3. This includes the default (3).
+ /* warn_strict_aliasing >= 3. This includes the default (3).
Only warn if the cast is dereferenced immediately. */
alias_set_type set1 =
get_alias_set (TREE_TYPE (TREE_OPERAND (expr, 0)));
@@ -1806,7 +1806,7 @@ check_main_parameter_types (tree decl)
{
case 1:
if (TYPE_MAIN_VARIANT (type) != integer_type_node)
- pedwarn (input_location, OPT_Wmain, "first argument of %q+D should be %<int%>",
+ pedwarn (input_location, OPT_Wmain, "first argument of %q+D should be %<int%>",
decl);
break;
@@ -1901,7 +1901,7 @@ vector_types_convertible_p (const_tree t1, const_tree t2, bool emit_lax_note)
both args are zero-extended or both are sign-extended.
Otherwise, we might change the result.
Eg, (short)-1 | (unsigned short)-1 is (int)-1
- but calculated in (unsigned short) it would be (unsigned short)-1.
+ but calculated in (unsigned short) it would be (unsigned short)-1.
*/
tree shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise)
{
@@ -1916,13 +1916,13 @@ tree shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise)
from signed char and that RESULT_TYPE is long long int.
If we explicitly cast OP0 to RESULT_TYPE, OP0 would look
like
-
+
(long long int) (unsigned int) signed_char
which get_narrower would narrow down to
-
+
(unsigned int) signed char
-
+
If we do not cast OP0 first, get_narrower would return
signed_char, which is inconsistent with the case of the
explicit cast. */
@@ -1937,7 +1937,7 @@ tree shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise)
/* Handle the case that OP0 (or OP1) does not *contain* a conversion
but it *requires* conversion to FINAL_TYPE. */
-
+
if ((TYPE_PRECISION (TREE_TYPE (op0))
== TYPE_PRECISION (TREE_TYPE (arg0)))
&& TREE_TYPE (op0) != result_type)
@@ -1946,18 +1946,18 @@ tree shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise)
== TYPE_PRECISION (TREE_TYPE (arg1)))
&& TREE_TYPE (op1) != result_type)
unsigned1 = TYPE_UNSIGNED (TREE_TYPE (op1));
-
+
/* Now UNSIGNED0 is 1 if ARG0 zero-extends to FINAL_TYPE. */
-
+
/* For bitwise operations, signedness of nominal type
does not matter. Consider only how operands were extended. */
if (bitwise)
uns = unsigned0;
-
+
/* Note that in all three cases below we refrain from optimizing
an unsigned operation on sign-extended args.
That would not be valid. */
-
+
/* Both args variable: if both extended in same way
from same width, do it in that width.
Do it unsigned if args were zero-extended. */
@@ -2036,7 +2036,7 @@ conversion_warning (tree type, tree expr)
/* Conversion from boolean to a signed:1 bit-field (which only
can hold the values 0 and -1) doesn't lose information - but
it does change the value. */
- if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type))
+ if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type))
warning (OPT_Wconversion,
"conversion to %qT from boolean expression", type);
return;
@@ -2057,7 +2057,7 @@ conversion_warning (tree type, tree expr)
&& TREE_CODE (type) == INTEGER_TYPE
&& !int_fits_type_p (expr, type))
{
- if (TYPE_UNSIGNED (type) && !TYPE_UNSIGNED (expr_type)
+ if (TYPE_UNSIGNED (type) && !TYPE_UNSIGNED (expr_type)
&& tree_int_cst_sgn (expr) < 0)
warning (OPT_Wsign_conversion,
"negative integer implicitly converted to unsigned type");
@@ -2102,7 +2102,7 @@ conversion_warning (tree type, tree expr)
tree op1 = TREE_OPERAND (expr, 1);
tree op2 = TREE_OPERAND (expr, 2);
- if ((TREE_CODE (op1) == REAL_CST || TREE_CODE (op1) == INTEGER_CST
+ if ((TREE_CODE (op1) == REAL_CST || TREE_CODE (op1) == INTEGER_CST
|| TREE_CODE (op1) == COND_EXPR)
&& (TREE_CODE (op2) == REAL_CST || TREE_CODE (op2) == INTEGER_CST
|| TREE_CODE (op2) == COND_EXPR))
@@ -2129,15 +2129,15 @@ conversion_warning (tree type, tree expr)
expr_type = TREE_TYPE (expr);
/* Don't warn for short y; short x = ((int)y & 0xff); */
- if (TREE_CODE (expr) == BIT_AND_EXPR
- || TREE_CODE (expr) == BIT_IOR_EXPR
+ if (TREE_CODE (expr) == BIT_AND_EXPR
+ || TREE_CODE (expr) == BIT_IOR_EXPR
|| TREE_CODE (expr) == BIT_XOR_EXPR)
{
/* If both args were extended from a shortest type,
use that type if that is safe. */
- expr_type = shorten_binary_op (expr_type,
- TREE_OPERAND (expr, 0),
- TREE_OPERAND (expr, 1),
+ expr_type = shorten_binary_op (expr_type,
+ TREE_OPERAND (expr, 0),
+ TREE_OPERAND (expr, 1),
/* bitwise */1);
if (TREE_CODE (expr) == BIT_AND_EXPR)
@@ -2155,13 +2155,13 @@ conversion_warning (tree type, tree expr)
&& int_fits_type_p (op0, c_common_unsigned_type (type)))
|| (TREE_CODE (op1) == INTEGER_CST
&& int_fits_type_p (op1, c_common_signed_type (type))
- && int_fits_type_p (op1,
+ && int_fits_type_p (op1,
c_common_unsigned_type (type))))
return;
/* If constant is unsigned and fits in the target
type, then the result will also fit. */
else if ((TREE_CODE (op0) == INTEGER_CST
- && unsigned0
+ && unsigned0
&& int_fits_type_p (op0, type))
|| (TREE_CODE (op1) == INTEGER_CST
&& unsigned1
@@ -2170,7 +2170,7 @@ conversion_warning (tree type, tree expr)
}
}
/* Warn for integer types converted to smaller integer types. */
- if (TYPE_PRECISION (type) < TYPE_PRECISION (expr_type))
+ if (TYPE_PRECISION (type) < TYPE_PRECISION (expr_type))
give_warning = true;
/* When they are the same width but different signedness,
@@ -2194,9 +2194,9 @@ conversion_warning (tree type, tree expr)
{
tree type_low_bound = TYPE_MIN_VALUE (expr_type);
tree type_high_bound = TYPE_MAX_VALUE (expr_type);
- REAL_VALUE_TYPE real_low_bound
+ REAL_VALUE_TYPE real_low_bound
= real_value_from_int_cst (0, type_low_bound);
- REAL_VALUE_TYPE real_high_bound
+ REAL_VALUE_TYPE real_high_bound
= real_value_from_int_cst (0, type_high_bound);
if (!exact_real_truncate (TYPE_MODE (type), &real_low_bound)
@@ -2245,7 +2245,7 @@ warnings_for_convert_and_check (tree type, tree expr, tree result)
else
conversion_warning (type, expr);
}
- else if (!int_fits_type_p (expr, c_common_unsigned_type (type)))
+ else if (!int_fits_type_p (expr, c_common_unsigned_type (type)))
warning (OPT_Woverflow,
"overflow in implicit constant conversion");
/* No warning for converting 0x80000000 to int. */
@@ -2294,7 +2294,7 @@ convert_and_check (tree type, tree expr)
if (TREE_TYPE (expr) == type)
return expr;
-
+
result = convert (type, expr);
if (c_inhibit_evaluation_warnings == 0
@@ -2600,7 +2600,7 @@ verify_tree (tree x, struct tlist **pbefore_sp, struct tlist **pno_sp,
{
call_expr_arg_iterator iter;
tree arg;
- tmp_before = tmp_nosp = 0;
+ tmp_before = tmp_nosp = 0;
verify_tree (CALL_EXPR_FN (x), &tmp_before, &tmp_nosp, NULL_TREE);
FOR_EACH_CALL_EXPR_ARG (arg, iter, x)
{
@@ -3685,7 +3685,7 @@ shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr,
&& !(TREE_CODE (primop0) == INTEGER_CST
&& !TREE_OVERFLOW (convert (c_common_signed_type (type),
primop0))))
- warning (OPT_Wtype_limits,
+ warning (OPT_Wtype_limits,
"comparison of unsigned expression >= 0 is always true");
value = truthvalue_true_node;
break;
@@ -3695,7 +3695,7 @@ shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr,
&& !(TREE_CODE (primop0) == INTEGER_CST
&& !TREE_OVERFLOW (convert (c_common_signed_type (type),
primop0))))
- warning (OPT_Wtype_limits,
+ warning (OPT_Wtype_limits,
"comparison of unsigned expression < 0 is always false");
value = truthvalue_false_node;
break;
@@ -3737,19 +3737,19 @@ pointer_int_sum (location_t loc, enum tree_code resultcode,
if (TREE_CODE (TREE_TYPE (result_type)) == VOID_TYPE)
{
- pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
+ pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"pointer of type %<void *%> used in arithmetic");
size_exp = integer_one_node;
}
else if (TREE_CODE (TREE_TYPE (result_type)) == FUNCTION_TYPE)
{
- pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
+ pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"pointer to a function used in arithmetic");
size_exp = integer_one_node;
}
else if (TREE_CODE (TREE_TYPE (result_type)) == METHOD_TYPE)
{
- pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
+ pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"pointer to member function used in arithmetic");
size_exp = integer_one_node;
}
@@ -3912,7 +3912,7 @@ c_common_truthvalue_conversion (location_t location, tree expr)
if (TREE_TYPE (expr) == truthvalue_type_node)
return expr;
expr = build2 (TREE_CODE (expr), truthvalue_type_node,
- c_common_truthvalue_conversion (location,
+ c_common_truthvalue_conversion (location,
TREE_OPERAND (expr, 0)),
c_common_truthvalue_conversion (location,
TREE_OPERAND (expr, 1)));
@@ -4005,7 +4005,7 @@ c_common_truthvalue_conversion (location_t location, tree expr)
{
expr = build2 (COMPOUND_EXPR, truthvalue_type_node,
TREE_OPERAND (expr, 1),
- c_common_truthvalue_conversion
+ c_common_truthvalue_conversion
(location, TREE_OPERAND (expr, 0)));
goto ret;
}
@@ -4361,7 +4361,7 @@ c_sizeof_or_alignof_type (location_t loc,
if (is_sizeof)
{
if (complain && (pedantic || warn_pointer_arith))
- pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
+ pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"invalid application of %<sizeof%> to a function type");
else if (!complain)
return error_mark_node;
@@ -4374,7 +4374,7 @@ c_sizeof_or_alignof_type (location_t loc,
{
if (type_code == VOID_TYPE
&& complain && (pedantic || warn_pointer_arith))
- pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
+ pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"invalid application of %qs to a void type", op_name);
else if (!complain)
return error_mark_node;
@@ -4770,7 +4770,7 @@ c_common_nodes_and_builtins (void)
/* Only supported decimal floating point extension if the target
actually supports underlying modes. */
- if (targetm.scalar_mode_supported_p (SDmode)
+ if (targetm.scalar_mode_supported_p (SDmode)
&& targetm.scalar_mode_supported_p (DDmode)
&& targetm.scalar_mode_supported_p (TDmode))
{
@@ -5332,7 +5332,7 @@ c_add_case_label (location_t loc, splay_tree cases, tree cond, tree orig_type,
/* Case ranges are a GNU extension. */
if (high_value)
- pedwarn (loc, OPT_pedantic,
+ pedwarn (loc, OPT_pedantic,
"range expressions in switch statements are non-standard");
type = TREE_TYPE (cond);
@@ -6269,7 +6269,7 @@ get_priority (tree args, bool is_destructor)
if (!args)
return DEFAULT_INIT_PRIORITY;
-
+
if (!SUPPORTS_INIT_PRIORITY)
{
if (is_destructor)
@@ -6293,12 +6293,12 @@ get_priority (tree args, bool is_destructor)
if (is_destructor)
warning (0,
"destructor priorities from 0 to %d are reserved "
- "for the implementation",
+ "for the implementation",
MAX_RESERVED_INIT_PRIORITY);
else
warning (0,
"constructor priorities from 0 to %d are reserved "
- "for the implementation",
+ "for the implementation",
MAX_RESERVED_INIT_PRIORITY);
}
return pri;
@@ -6578,7 +6578,7 @@ handle_section_attribute (tree *node, tree ARG_UNUSED (name), tree args,
&& current_function_decl != NULL_TREE
&& !TREE_STATIC (decl))
{
- error_at (DECL_SOURCE_LOCATION (decl),
+ error_at (DECL_SOURCE_LOCATION (decl),
"section attribute cannot be specified for "
"local variables");
*no_add_attrs = true;
@@ -6744,11 +6744,11 @@ handle_alias_attribute (tree *node, tree name, tree args,
*no_add_attrs = true;
}
else if ((TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl))
- || (TREE_CODE (decl) != FUNCTION_DECL
+ || (TREE_CODE (decl) != FUNCTION_DECL
&& TREE_PUBLIC (decl) && !DECL_EXTERNAL (decl))
/* A static variable declaration is always a tentative definition,
but the alias is a non-tentative definition which overrides. */
- || (TREE_CODE (decl) != FUNCTION_DECL
+ || (TREE_CODE (decl) != FUNCTION_DECL
&& ! TREE_PUBLIC (decl) && DECL_INITIAL (decl)))
{
error ("%q+D defined both normally and as an alias", decl);
@@ -7091,11 +7091,11 @@ handle_alloc_size_attribute (tree *node, tree ARG_UNUSED (name), tree args,
tree position = TREE_VALUE (args);
if (TREE_CODE (position) != INTEGER_CST
- || TREE_INT_CST_HIGH (position)
+ || TREE_INT_CST_HIGH (position)
|| TREE_INT_CST_LOW (position) < 1
|| TREE_INT_CST_LOW (position) > arg_count )
{
- warning (OPT_Wattributes,
+ warning (OPT_Wattributes,
"alloc_size parameter outside range");
*no_add_attrs = true;
return NULL_TREE;
@@ -7653,7 +7653,7 @@ handle_sentinel_attribute (tree *node, tree name, tree args,
if (TREE_CODE (position) != INTEGER_CST)
{
- warning (OPT_Wattributes,
+ warning (OPT_Wattributes,
"requested position is not an integer constant");
*no_add_attrs = true;
}
@@ -7679,10 +7679,10 @@ handle_type_generic_attribute (tree *node, tree ARG_UNUSED (name),
bool * ARG_UNUSED (no_add_attrs))
{
tree params;
-
+
/* Ensure we have a function type. */
gcc_assert (TREE_CODE (*node) == FUNCTION_TYPE);
-
+
params = TYPE_ARG_TYPES (*node);
while (params && ! VOID_TYPE_P (TREE_VALUE (params)))
params = TREE_CHAIN (params);
@@ -8054,7 +8054,7 @@ check_builtin_function_arguments (tree fndecl, int nargs, tree *args)
if (validate_nargs (fndecl, nargs, 6))
{
unsigned i;
-
+
for (i=0; i<5; i++)
if (TREE_CODE (args[i]) != INTEGER_CST)
{
@@ -8166,7 +8166,7 @@ catenate_strings (const char *lhs, const char *rhs_start, int rhs_size)
TOKEN, which had the associated VALUE. */
void
-c_parse_error (const char *gmsgid, enum cpp_ttype token_type,
+c_parse_error (const char *gmsgid, enum cpp_ttype token_type,
tree value, unsigned char token_flags)
{
#define catenate_messages(M1, M2) catenate_strings ((M1), (M2), sizeof (M2))
@@ -8175,8 +8175,8 @@ c_parse_error (const char *gmsgid, enum cpp_ttype token_type,
if (token_type == CPP_EOF)
message = catenate_messages (gmsgid, " at end of input");
- else if (token_type == CPP_CHAR
- || token_type == CPP_WCHAR
+ else if (token_type == CPP_CHAR
+ || token_type == CPP_WCHAR
|| token_type == CPP_CHAR16
|| token_type == CPP_CHAR32)
{
@@ -8208,8 +8208,8 @@ c_parse_error (const char *gmsgid, enum cpp_ttype token_type,
free (message);
message = NULL;
}
- else if (token_type == CPP_STRING
- || token_type == CPP_WSTRING
+ else if (token_type == CPP_STRING
+ || token_type == CPP_WSTRING
|| token_type == CPP_STRING16
|| token_type == CPP_STRING32
|| token_type == CPP_UTF8STRING)
@@ -8585,7 +8585,7 @@ complete_array_type (tree *ptype, tree initial_value, bool do_default)
/* Make sure we have the canonical MAIN_TYPE. */
hashcode = iterative_hash_object (TYPE_HASH (unqual_elt), hashcode);
- hashcode = iterative_hash_object (TYPE_HASH (TYPE_DOMAIN (main_type)),
+ hashcode = iterative_hash_object (TYPE_HASH (TYPE_DOMAIN (main_type)),
hashcode);
main_type = type_hash_canon (hashcode, main_type);
@@ -8596,7 +8596,7 @@ complete_array_type (tree *ptype, tree initial_value, bool do_default)
else if (TYPE_CANONICAL (TREE_TYPE (main_type)) != TREE_TYPE (main_type)
|| (TYPE_CANONICAL (TYPE_DOMAIN (main_type))
!= TYPE_DOMAIN (main_type)))
- TYPE_CANONICAL (main_type)
+ TYPE_CANONICAL (main_type)
= build_array_type (TYPE_CANONICAL (TREE_TYPE (main_type)),
TYPE_CANONICAL (TYPE_DOMAIN (main_type)));
else
@@ -8891,7 +8891,7 @@ warn_about_parentheses (enum tree_code code,
|| ((CODE) != INTEGER_CST \
&& (integer_onep (ARG) || integer_zerop (ARG))))
- switch (code)
+ switch (code)
{
case LSHIFT_EXPR:
if (code_left == PLUS_EXPR || code_right == PLUS_EXPR)
@@ -9041,16 +9041,16 @@ warn_for_div_by_zero (location_t loc, tree divisor)
The arguments of this function map directly to local variables
of build_binary_op. */
-void
+void
warn_for_sign_compare (location_t location,
- tree orig_op0, tree orig_op1,
- tree op0, tree op1,
+ tree orig_op0, tree orig_op1,
+ tree op0, tree op1,
tree result_type, enum tree_code resultcode)
{
int op0_signed = !TYPE_UNSIGNED (TREE_TYPE (orig_op0));
int op1_signed = !TYPE_UNSIGNED (TREE_TYPE (orig_op1));
int unsignedp0, unsignedp1;
-
+
/* In C++, check for comparison of different enum types. */
if (c_dialect_cxx()
&& TREE_CODE (TREE_TYPE (orig_op0)) == ENUMERAL_TYPE
@@ -9078,10 +9078,10 @@ warn_for_sign_compare (location_t location,
if (op0_signed)
sop = orig_op0, uop = orig_op1;
- else
+ else
sop = orig_op1, uop = orig_op0;
- STRIP_TYPE_NOPS (sop);
+ STRIP_TYPE_NOPS (sop);
STRIP_TYPE_NOPS (uop);
base_type = (TREE_CODE (result_type) == COMPLEX_TYPE
? TREE_TYPE (result_type) : result_type);
@@ -9107,23 +9107,23 @@ warn_for_sign_compare (location_t location,
&& int_fits_type_p (TYPE_MAX_VALUE (TREE_TYPE (uop)),
c_common_signed_type (base_type)))
/* OK */;
- else
+ else
warning_at (location,
- OPT_Wsign_compare,
+ OPT_Wsign_compare,
"comparison between signed and unsigned integer expressions");
}
-
+
/* Warn if two unsigned values are being compared in a size larger
than their original size, and one (and only one) is the result of
a `~' operator. This comparison will always fail.
-
+
Also warn if one operand is a constant, and the constant does not
have all bits set that are set in the ~ operand when it is
extended. */
op0 = get_narrower (op0, &unsignedp0);
op1 = get_narrower (op1, &unsignedp1);
-
+
if ((TREE_CODE (op0) == BIT_NOT_EXPR)
^ (TREE_CODE (op1) == BIT_NOT_EXPR))
{
@@ -9138,7 +9138,7 @@ warn_for_sign_compare (location_t location,
HOST_WIDE_INT constant, mask;
int unsignedp;
unsigned int bits;
-
+
if (host_integerp (op0, 0))
{
primop = op1;
@@ -9151,7 +9151,7 @@ warn_for_sign_compare (location_t location,
unsignedp = unsignedp0;
constant = tree_low_cst (op1, 0);
}
-
+
bits = TYPE_PRECISION (TREE_TYPE (primop));
if (bits < TYPE_PRECISION (result_type)
&& bits < HOST_BITS_PER_LONG && unsignedp)
@@ -9160,10 +9160,10 @@ warn_for_sign_compare (location_t location,
if ((mask & constant) != mask)
{
if (constant == 0)
- warning (OPT_Wsign_compare,
+ warning (OPT_Wsign_compare,
"promoted ~unsigned is always non-zero");
else
- warning_at (location, OPT_Wsign_compare,
+ warning_at (location, OPT_Wsign_compare,
"comparison of promoted ~unsigned with constant");
}
}
diff --git a/gcc/c-common.h b/gcc/c-common.h
index d91546ff239..5830d7b9335 100644
--- a/gcc/c-common.h
+++ b/gcc/c-common.h
@@ -1038,9 +1038,9 @@ extern void warn_about_parentheses (enum tree_code,
extern void warn_for_unused_label (tree label);
extern void warn_for_div_by_zero (location_t, tree divisor);
extern void warn_for_sign_compare (location_t,
- tree orig_op0, tree orig_op1,
- tree op0, tree op1,
- tree result_type,
+ tree orig_op0, tree orig_op1,
+ tree op0, tree op1,
+ tree result_type,
enum tree_code resultcode);
extern void set_underlying_type (tree x);
extern bool is_typedef_decl (tree x);
diff --git a/gcc/c-cppbuiltin.c b/gcc/c-cppbuiltin.c
index d9b95aff443..1565aac23db 100644
--- a/gcc/c-cppbuiltin.c
+++ b/gcc/c-cppbuiltin.c
@@ -60,7 +60,7 @@ static void builtin_define_type_max (const char *, tree);
static void builtin_define_type_minmax (const char *, const char *, tree);
static void builtin_define_type_precision (const char *, tree);
static void builtin_define_type_sizeof (const char *, tree);
-static void builtin_define_float_constants (const char *,
+static void builtin_define_float_constants (const char *,
const char *,
const char *,
tree);
@@ -84,9 +84,9 @@ builtin_define_type_sizeof (const char *name, tree type)
/* Define the float.h constants for TYPE using NAME_PREFIX, FP_SUFFIX,
and FP_CAST. */
static void
-builtin_define_float_constants (const char *name_prefix,
- const char *fp_suffix,
- const char *fp_cast,
+builtin_define_float_constants (const char *name_prefix,
+ const char *fp_suffix,
+ const char *fp_cast,
tree type)
{
/* Used to convert radix-based values to base 10 values in several cases.
@@ -205,7 +205,7 @@ builtin_define_float_constants (const char *name_prefix,
construct the following numbers directly as a hexadecimal
constants. */
get_max_float (fmt, buf, sizeof (buf));
-
+
sprintf (name, "__%s_MAX__", name_prefix);
builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix, fp_cast);
@@ -260,8 +260,8 @@ builtin_define_float_constants (const char *name_prefix,
/* Define __DECx__ constants for TYPE using NAME_PREFIX and SUFFIX. */
static void
-builtin_define_decimal_float_constants (const char *name_prefix,
- const char *suffix,
+builtin_define_decimal_float_constants (const char *name_prefix,
+ const char *suffix,
tree type)
{
const struct real_format *fmt;
@@ -286,7 +286,7 @@ builtin_define_decimal_float_constants (const char *name_prefix,
/* Compute the minimum representable value. */
sprintf (name, "__%s_MIN__", name_prefix);
sprintf (buf, "1E%d%s", fmt->emin - 1, suffix);
- builtin_define_with_value (name, buf, 0);
+ builtin_define_with_value (name, buf, 0);
/* Compute the maximum representable value. */
sprintf (name, "__%s_MAX__", name_prefix);
@@ -300,7 +300,7 @@ builtin_define_decimal_float_constants (const char *name_prefix,
*p = 0;
/* fmt->p plus 1, to account for the decimal point and fmt->emax
minus 1 because the digits are nines, not 1.0. */
- sprintf (&buf[fmt->p + 1], "E%d%s", fmt->emax - 1, suffix);
+ sprintf (&buf[fmt->p + 1], "E%d%s", fmt->emax - 1, suffix);
builtin_define_with_value (name, buf, 0);
/* Compute epsilon (the difference between 1 and least value greater
@@ -319,7 +319,7 @@ builtin_define_decimal_float_constants (const char *name_prefix,
*p++ = '.';
}
*p = 0;
- sprintf (&buf[fmt->p], "1E%d%s", fmt->emin - 1, suffix);
+ sprintf (&buf[fmt->p], "1E%d%s", fmt->emin - 1, suffix);
builtin_define_with_value (name, buf, 0);
}
@@ -935,7 +935,7 @@ builtin_define_with_int_value (const char *macro, HOST_WIDE_INT value)
static void
builtin_define_with_hex_fp_value (const char *macro,
tree type, int digits,
- const char *hex_str,
+ const char *hex_str,
const char *fp_suffix,
const char *fp_cast)
{
@@ -961,7 +961,7 @@ builtin_define_with_hex_fp_value (const char *macro,
sprintf (buf1, "%s%s", dec_str, fp_suffix);
sprintf (buf2, fp_cast, buf1);
sprintf (buf1, "%s=%s", macro, buf2);
-
+
cpp_define (parse_in, buf1);
}
diff --git a/gcc/c-decl.c b/gcc/c-decl.c
index 492d2e673b7..59179dd5858 100644
--- a/gcc/c-decl.c
+++ b/gcc/c-decl.c
@@ -1102,7 +1102,7 @@ pop_scope (void)
error ("label %q+D used but not defined", p);
DECL_INITIAL (p) = error_mark_node;
}
- else
+ else
warn_for_unused_label (p);
/* Labels go in BLOCK_VARS. */
@@ -1897,7 +1897,7 @@ diagnose_mismatched_decls (tree newdecl, tree olddecl,
}
else if (warn_traditional)
{
- warned |= warning (OPT_Wtraditional,
+ warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
@@ -1975,7 +1975,7 @@ diagnose_mismatched_decls (tree newdecl, tree olddecl,
}
else if (warn_traditional)
{
- warned |= warning (OPT_Wtraditional,
+ warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
@@ -2046,14 +2046,14 @@ diagnose_mismatched_decls (tree newdecl, tree olddecl,
if (DECL_DECLARED_INLINE_P (newdecl)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl)))
{
- warned |= warning (OPT_Wattributes,
+ warned |= warning (OPT_Wattributes,
"inline declaration of %qD follows "
"declaration with attribute noinline", newdecl);
}
else if (DECL_DECLARED_INLINE_P (olddecl)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl)))
{
- warned |= warning (OPT_Wattributes,
+ warned |= warning (OPT_Wattributes,
"declaration of %q+D with attribute "
"noinline follows inline declaration ", newdecl);
}
@@ -2812,8 +2812,8 @@ implicit_decl_warning (tree id, tree olddecl)
if (flag_isoc99)
warned = pedwarn (input_location, OPT_Wimplicit_function_declaration,
"implicit declaration of function %qE", id);
- else
- warned = warning (OPT_Wimplicit_function_declaration,
+ else
+ warned = warning (OPT_Wimplicit_function_declaration,
G_("implicit declaration of function %qE"), id);
if (olddecl && warned)
locate_old_decl (olddecl);
@@ -3497,10 +3497,10 @@ c_make_fname_decl (location_t loc, tree id, int type_dep)
if (current_function_decl
/* For invalid programs like this:
-
+
void foo()
const char* p = __FUNCTION__;
-
+
the __FUNCTION__ is believed to appear in K&R style function
parameter declarator. In that case we still don't have
function_scope. */
@@ -4653,7 +4653,7 @@ warn_variable_length_array (tree name, tree size)
}
else
{
- if (name)
+ if (name)
pedwarn (input_location, OPT_Wvla,
"ISO C90 forbids variable length array %qE",
name);
@@ -4880,11 +4880,11 @@ grokdeclarator (const struct c_declarator *declarator,
else
{
if (name)
- pedwarn_c99 (loc, flag_isoc99 ? 0 : OPT_Wimplicit_int,
+ pedwarn_c99 (loc, flag_isoc99 ? 0 : OPT_Wimplicit_int,
"type defaults to %<int%> in declaration of %qE",
name);
else
- pedwarn_c99 (input_location, flag_isoc99 ? 0 : OPT_Wimplicit_int,
+ pedwarn_c99 (input_location, flag_isoc99 ? 0 : OPT_Wimplicit_int,
"type defaults to %<int%> in type name");
}
}
@@ -4946,8 +4946,8 @@ grokdeclarator (const struct c_declarator *declarator,
|| storage_class == csc_typedef))
{
if (storage_class == csc_auto)
- pedwarn (loc,
- (current_scope == file_scope) ? 0 : OPT_pedantic,
+ pedwarn (loc,
+ (current_scope == file_scope) ? 0 : OPT_pedantic,
"function definition declared %<auto%>");
if (storage_class == csc_register)
error_at (loc, "function definition declared %<register%>");
@@ -6833,7 +6833,7 @@ finish_struct (location_t loc, tree t, tree fieldlist, tree attributes,
if (pedantic && TREE_CODE (t) == RECORD_TYPE
&& flexible_array_type_p (TREE_TYPE (x)))
- pedwarn (DECL_SOURCE_LOCATION (x), OPT_pedantic,
+ pedwarn (DECL_SOURCE_LOCATION (x), OPT_pedantic,
"invalid use of structure with flexible array member");
if (DECL_NAME (x))
@@ -7284,7 +7284,7 @@ build_enumerator (location_t loc,
(6.4.4.3/2 in the C99 Standard). GCC allows any integer type as
an extension. */
else if (!int_fits_type_p (value, integer_type_node))
- pedwarn (loc, OPT_pedantic,
+ pedwarn (loc, OPT_pedantic,
"ISO C restricts enumerator values to range of %<int%>");
/* The ISO C Standard mandates enumerators to have type int, even
@@ -7396,7 +7396,7 @@ start_function (struct c_declspecs *declspecs, struct c_declarator *declarator,
}
if (warn_about_return_type)
- pedwarn_c99 (loc, flag_isoc99 ? 0
+ pedwarn_c99 (loc, flag_isoc99 ? 0
: (warn_return_type ? OPT_Wreturn_type : OPT_Wimplicit_int),
"return type defaults to %<int%>");
@@ -7693,7 +7693,7 @@ store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info)
if (flag_isoc99)
pedwarn (DECL_SOURCE_LOCATION (decl),
0, "type of %qD defaults to %<int%>", decl);
- else
+ else
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wmissing_parameter_type,
"type of %qD defaults to %<int%>", decl);
@@ -8039,7 +8039,7 @@ finish_function (void)
c_determine_visibility (fndecl);
/* For GNU C extern inline functions disregard inline limits. */
- if (DECL_EXTERNAL (fndecl)
+ if (DECL_EXTERNAL (fndecl)
&& DECL_DECLARED_INLINE_P (fndecl))
DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1;
@@ -8896,7 +8896,7 @@ declspecs_add_type (location_t loc, struct c_declspecs *specs,
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
- {
+ {
const char *str;
if (i == RID_DFLOAT32)
str = "_Decimal32";
@@ -9065,7 +9065,7 @@ declspecs_add_scspec (struct c_declspecs *specs, tree scspec)
&& C_IS_RESERVED_WORD (scspec));
i = C_RID_CODE (scspec);
if (specs->non_sc_seen_p)
- warning (OPT_Wold_style_declaration,
+ warning (OPT_Wold_style_declaration,
"%qE is not at beginning of declaration", scspec);
switch (i)
{
@@ -9187,7 +9187,7 @@ finish_declspecs (struct c_declspecs *specs)
else if (specs->complex_p)
{
specs->typespec_word = cts_double;
- pedwarn (input_location, OPT_pedantic,
+ pedwarn (input_location, OPT_pedantic,
"ISO C does not support plain %<complex%> meaning "
"%<double complex%>");
}
@@ -9232,7 +9232,7 @@ finish_declspecs (struct c_declspecs *specs)
specs->type = char_type_node;
if (specs->complex_p)
{
- pedwarn (input_location, OPT_pedantic,
+ pedwarn (input_location, OPT_pedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
@@ -9258,7 +9258,7 @@ finish_declspecs (struct c_declspecs *specs)
: integer_type_node);
if (specs->complex_p)
{
- pedwarn (input_location, OPT_pedantic,
+ pedwarn (input_location, OPT_pedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
diff --git a/gcc/c-format.c b/gcc/c-format.c
index 6b14f4026c7..3d46bd2c813 100644
--- a/gcc/c-format.c
+++ b/gcc/c-format.c
@@ -1775,7 +1775,7 @@ check_format_info_main (format_check_results *res,
scalar_identity_flag = 0;
if (fli)
{
- while (fli->name != 0
+ while (fli->name != 0
&& strncmp (fli->name, format_chars, strlen (fli->name)))
fli++;
if (fli->name != 0)
diff --git a/gcc/c-lex.c b/gcc/c-lex.c
index fd3df8c0a48..5535eaeb42e 100644
--- a/gcc/c-lex.c
+++ b/gcc/c-lex.c
@@ -433,7 +433,7 @@ c_lex_with_flags (tree *value, location_t *loc, unsigned char *cpp_flags,
}
*value = build_string (tok->val.str.len, (const char *) tok->val.str.text);
break;
-
+
case CPP_PRAGMA:
*value = build_int_cst (NULL, tok->val.pragma);
break;
@@ -588,11 +588,11 @@ interpret_integer (const cpp_token *token, unsigned int flags)
type = integer_types[itk];
if (itk > itk_unsigned_long
&& (flags & CPP_N_WIDTH) != CPP_N_LARGE)
- emit_diagnostic
+ emit_diagnostic
((c_dialect_cxx () ? cxx_dialect == cxx98 : !flag_isoc99)
? DK_PEDWARN : DK_WARNING,
input_location, OPT_Wlong_long,
- (flags & CPP_N_UNSIGNED)
+ (flags & CPP_N_UNSIGNED)
? "integer constant is too large for %<unsigned long%> type"
: "integer constant is too large for %<long%> type");
}
@@ -689,9 +689,9 @@ interpret_float (const cpp_token *token, unsigned int flags)
has any suffixes, cut them off; REAL_VALUE_ATOF/ REAL_VALUE_HTOF
can't handle them. */
copylen = token->val.str.len;
- if (flags & CPP_N_DFLOAT)
+ if (flags & CPP_N_DFLOAT)
copylen -= 2;
- else
+ else
{
if ((flags & CPP_N_WIDTH) != CPP_N_MEDIUM)
/* Must be an F or L or machine defined suffix. */
@@ -732,7 +732,7 @@ interpret_float (const cpp_token *token, unsigned int flags)
{
REAL_VALUE_TYPE realvoidmode;
int overflow = real_from_string (&realvoidmode, copy);
- if (overflow < 0 || !REAL_VALUES_EQUAL (realvoidmode, dconst0))
+ if (overflow < 0 || !REAL_VALUES_EQUAL (realvoidmode, dconst0))
warning (OPT_Woverflow, "floating constant truncated to zero");
}
diff --git a/gcc/c-omp.c b/gcc/c-omp.c
index 6445e5d42ee..6f1cb99720e 100644
--- a/gcc/c-omp.c
+++ b/gcc/c-omp.c
@@ -1,4 +1,4 @@
-/* This file contains routines to construct GNU OpenMP constructs,
+/* This file contains routines to construct GNU OpenMP constructs,
called from parsing in the C and C++ front ends.
Copyright (C) 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
@@ -154,7 +154,7 @@ c_finish_omp_atomic (location_t loc, enum tree_code code, tree lhs, tree rhs)
input_location, rhs, NULL_TREE);
if (x == error_mark_node)
return error_mark_node;
- gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
+ gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
rhs = TREE_OPERAND (x, 1);
/* Punt the actual generation of atomic operations to common code. */
@@ -275,7 +275,7 @@ c_finish_omp_for (location_t locus, tree declv, tree initv, tree condv,
fail = true;
}
- init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
+ init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
/* FIXME diagnostics: This should
be the location of the INIT. */
elocus,
diff --git a/gcc/c-opts.c b/gcc/c-opts.c
index e026fd97dc7..cf1e3723da1 100644
--- a/gcc/c-opts.c
+++ b/gcc/c-opts.c
@@ -270,7 +270,7 @@ c_common_handle_option (size_t scode, const char *arg, int value)
/* Prevent resetting the language standard to a C dialect when the driver
has already determined that we're looking at assembler input. */
bool preprocessing_asm_p = (cpp_get_options (parse_in)->lang == CLK_ASM);
-
+
switch (code)
{
default:
@@ -466,10 +466,10 @@ c_common_handle_option (size_t scode, const char *arg, int value)
global_dc->warning_as_error_requested = value;
break;
- case OPT_Werror_implicit_function_declaration:
+ case OPT_Werror_implicit_function_declaration:
/* For backward compatibility, this is the same as
-Werror=implicit-function-declaration. */
- enable_warning_as_error ("implicit-function-declaration", value, CL_C | CL_ObjC);
+ enable_warning_as_error ("implicit-function-declaration", value, CL_C | CL_ObjC);
break;
case OPT_Wformat:
@@ -1145,7 +1145,7 @@ c_common_post_options (const char **pfilename)
}
/* -Wimplicit-function-declaration is enabled by default for C99. */
- if (warn_implicit_function_declaration == -1)
+ if (warn_implicit_function_declaration == -1)
warn_implicit_function_declaration = flag_isoc99;
/* If we're allowing C++0x constructs, don't warn about C++0x
@@ -1435,7 +1435,7 @@ sanitize_cpp_opts (void)
/* Wlong-long is disabled by default. It is enabled by:
[-pedantic | -Wtraditional] -std=[gnu|c]++98 ; or
- [-pedantic | -Wtraditional] -std=non-c99 .
+ [-pedantic | -Wtraditional] -std=non-c99 .
Either -Wlong-long or -Wno-long-long override any other settings. */
if (warn_long_long == -1)
diff --git a/gcc/c-parser.c b/gcc/c-parser.c
index 1a6012e9128..37f77963513 100644
--- a/gcc/c-parser.c
+++ b/gcc/c-parser.c
@@ -992,7 +992,7 @@ c_parser_translation_unit (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_EOF))
{
- pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
+ pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"ISO C forbids an empty translation unit");
}
else
@@ -1078,7 +1078,7 @@ c_parser_external_declaration (c_parser *parser)
}
break;
case CPP_SEMICOLON:
- pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
+ pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
@@ -1158,9 +1158,9 @@ c_parser_external_declaration (c_parser *parser)
C we also allow but diagnose declarations without declaration
specifiers, but only at top level (elsewhere they conflict with
other syntax).
-
+
OpenMP:
-
+
declaration:
threadprivate-directive */
@@ -1908,7 +1908,7 @@ c_parser_struct_or_union_specifier (c_parser *parser)
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
- pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
+ pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"extra semicolon in struct or union specified");
c_parser_consume_token (parser);
continue;
@@ -1937,7 +1937,7 @@ c_parser_struct_or_union_specifier (c_parser *parser)
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
- pedwarn (c_parser_peek_token (parser)->location, 0,
+ pedwarn (c_parser_peek_token (parser)->location, 0,
"no semicolon at end of struct or union");
else
{
@@ -2033,7 +2033,7 @@ c_parser_struct_declaration (c_parser *parser)
tree ret;
if (!specs->type_seen_p)
{
- pedwarn (decl_loc, OPT_pedantic,
+ pedwarn (decl_loc, OPT_pedantic,
"ISO C forbids member declarations with no members");
shadow_tag_warned (specs, pedantic);
ret = NULL_TREE;
@@ -2414,7 +2414,7 @@ c_parser_direct_declarator_inner (c_parser *parser, bool id_present,
/* Parse a sequence of array declarators and parameter lists. */
if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
- location_t brace_loc = c_parser_peek_token (parser)->location;
+ location_t brace_loc = c_parser_peek_token (parser)->location;
struct c_declarator *declarator;
struct c_declspecs *quals_attrs = build_null_declspecs ();
bool static_seen;
@@ -3143,7 +3143,7 @@ c_parser_initelt (c_parser *parser)
/* Old-style structure member designator. */
set_init_label (c_parser_peek_token (parser)->value);
/* Use the colon as the error location. */
- pedwarn (c_parser_peek_2nd_token (parser)->location, OPT_pedantic,
+ pedwarn (c_parser_peek_2nd_token (parser)->location, OPT_pedantic,
"obsolete use of designated initializer with %<:%>");
c_parser_consume_token (parser);
c_parser_consume_token (parser);
@@ -3278,7 +3278,7 @@ c_parser_initelt (c_parser *parser)
c_parser_consume_token (parser);
set_init_index (first, second);
if (second)
- pedwarn (ellipsis_loc, OPT_pedantic,
+ pedwarn (ellipsis_loc, OPT_pedantic,
"ISO C forbids specifying range of elements to initialize");
}
else
@@ -3291,14 +3291,14 @@ c_parser_initelt (c_parser *parser)
if (c_parser_next_token_is (parser, CPP_EQ))
{
if (!flag_isoc99)
- pedwarn (des_loc, OPT_pedantic,
+ pedwarn (des_loc, OPT_pedantic,
"ISO C90 forbids specifying subobject to initialize");
c_parser_consume_token (parser);
}
else
{
if (des_seen == 1)
- pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
+ pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"obsolete use of designated initializer without %<=%>");
else
{
@@ -3385,9 +3385,9 @@ c_parser_initval (c_parser *parser, struct c_expr *after)
old parser in requiring something after label declarations.
Although they are erroneous if the labels declared aren't defined,
is it useful for the syntax to be this way?
-
+
OpenMP:
-
+
block-item:
openmp-directive
@@ -3496,7 +3496,7 @@ c_parser_compound_statement_nostart (c_parser *parser)
mark_valid_location_for_stdc_pragma (false);
c_parser_declaration_or_fndef (parser, true, true, true, true);
if (last_stmt)
- pedwarn_c90 (loc,
+ pedwarn_c90 (loc,
(pedantic && !flag_isoc99)
? OPT_pedantic
: OPT_Wdeclaration_after_statement,
@@ -3553,13 +3553,13 @@ c_parser_compound_statement_nostart (c_parser *parser)
}
else if (c_parser_next_token_is_keyword (parser, RID_ELSE))
{
- if (parser->in_if_block)
+ if (parser->in_if_block)
{
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
error_at (loc, """expected %<}%> before %<else%>");
return;
}
- else
+ else
{
error_at (loc, "%<else%> without a previous %<if%>");
c_parser_consume_token (parser);
@@ -3658,7 +3658,7 @@ c_parser_label (c_parser *parser)
error_at (c_parser_peek_token (parser)->location,
"a label can only be part of a statement and "
"a declaration is not a statement");
- c_parser_declaration_or_fndef (parser, /*fndef_ok*/ false,
+ c_parser_declaration_or_fndef (parser, /*fndef_ok*/ false,
/*nested*/ true, /*empty_ok*/ false,
/*start_attr_ok*/ true);
}
@@ -4014,7 +4014,7 @@ c_parser_else_body (c_parser *parser)
add_stmt (build_empty_stmt (loc));
c_parser_consume_token (parser);
}
- else
+ else
c_parser_statement_after_labels (parser);
return c_end_compound_stmt (else_loc, block, flag_isoc99);
}
@@ -4501,7 +4501,7 @@ c_parser_asm_clobbers (c_parser *parser)
}
/* Parse asm goto labels, a GNU extension.
-
+
asm-goto-operands:
identifier
asm-goto-operands , identifier
@@ -4652,7 +4652,7 @@ c_parser_conditional_expression (c_parser *parser, struct c_expr *after)
if (c_parser_next_token_is (parser, CPP_COLON))
{
tree eptype = NULL_TREE;
- pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
+ pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"ISO C forbids omitting the middle term of a ?: expression");
if (TREE_CODE (cond.value) == EXCESS_PRECISION_EXPR)
{
@@ -5436,7 +5436,7 @@ c_parser_postfix_expression (c_parser *parser)
c_parser_compound_statement_nostart (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
- pedwarn (loc, OPT_pedantic,
+ pedwarn (loc, OPT_pedantic,
"ISO C forbids braced-groups within expressions");
expr.value = c_finish_stmt_expr (brace_loc, stmt);
}
@@ -6216,7 +6216,7 @@ c_parser_objc_class_instance_variables (c_parser *parser)
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
- pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
+ pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"extra semicolon in struct or union specified");
c_parser_consume_token (parser);
continue;
@@ -6433,7 +6433,7 @@ c_parser_objc_method_definition (c_parser *parser)
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
- pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
+ pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"extra semicolon in method definition specified");
}
if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE))
@@ -6470,7 +6470,7 @@ c_parser_objc_methodprotolist (c_parser *parser)
switch (c_parser_peek_token (parser)->type)
{
case CPP_SEMICOLON:
- pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
+ pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic,
"ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
@@ -7046,7 +7046,7 @@ c_parser_pragma (c_parser *parser, enum pragma_context context)
c_parser_consume_pragma (parser);
c_invoke_pragma_handler (id);
- /* Skip to EOL, but suppress any error message. Those will have been
+ /* Skip to EOL, but suppress any error message. Those will have been
generated by the handler routine through calling error, as opposed
to calling c_parser_error. */
parser->error = true;
@@ -7810,7 +7810,7 @@ c_parser_omp_structured_block (c_parser *parser)
binop:
+, *, -, /, &, ^, |, <<, >>
- where x is an lvalue expression with scalar type.
+ where x is an lvalue expression with scalar type.
LOC is the location of the #pragma token. */
@@ -8320,7 +8320,7 @@ c_parser_omp_ordered (location_t loc, c_parser *parser)
section-sequence:
section-directive[opt] structured-block
- section-sequence section-directive structured-block
+ section-sequence section-directive structured-block
SECTIONS_LOC is the location of the #pragma omp sections. */
diff --git a/gcc/c-pretty-print.c b/gcc/c-pretty-print.c
index 01770014c21..352ee098a0e 100644
--- a/gcc/c-pretty-print.c
+++ b/gcc/c-pretty-print.c
@@ -847,7 +847,7 @@ pp_c_integer_constant (c_pretty_printer *pp, tree i)
high = ~high + !low;
low = -low;
}
- sprintf (pp_buffer (pp)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
+ sprintf (pp_buffer (pp)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
(unsigned HOST_WIDE_INT) high, (unsigned HOST_WIDE_INT) low);
pp_string (pp, pp_buffer (pp)->digit_buffer);
}
@@ -1953,7 +1953,7 @@ pp_c_conditional_expression (c_pretty_printer *pp, tree e)
static void
pp_c_assignment_expression (c_pretty_printer *pp, tree e)
{
- if (TREE_CODE (e) == MODIFY_EXPR
+ if (TREE_CODE (e) == MODIFY_EXPR
|| TREE_CODE (e) == INIT_EXPR)
{
pp_c_unary_expression (pp, TREE_OPERAND (e, 0));
diff --git a/gcc/c-tree.h b/gcc/c-tree.h
index e71771ae840..2309d51d589 100644
--- a/gcc/c-tree.h
+++ b/gcc/c-tree.h
@@ -521,7 +521,7 @@ extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
-extern struct c_expr parser_build_binary_op (location_t,
+extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
diff --git a/gcc/c-typeck.c b/gcc/c-typeck.c
index 9b1f09c4593..184b4063709 100644
--- a/gcc/c-typeck.c
+++ b/gcc/c-typeck.c
@@ -566,7 +566,7 @@ composite_type (tree t1, tree t2)
{
TREE_VALUE (n) = composite_type (TREE_TYPE (memb),
TREE_VALUE (p2));
- pedwarn (input_location, OPT_pedantic,
+ pedwarn (input_location, OPT_pedantic,
"function types not truly compatible in ISO C");
goto parm_done;
}
@@ -591,7 +591,7 @@ composite_type (tree t1, tree t2)
{
TREE_VALUE (n) = composite_type (TREE_TYPE (memb),
TREE_VALUE (p1));
- pedwarn (input_location, OPT_pedantic,
+ pedwarn (input_location, OPT_pedantic,
"function types not truly compatible in ISO C");
goto parm_done;
}
@@ -2261,10 +2261,10 @@ build_array_ref (location_t loc, tree array, tree index)
while (TREE_CODE (foo) == COMPONENT_REF)
foo = TREE_OPERAND (foo, 0);
if (TREE_CODE (foo) == VAR_DECL && C_DECL_REGISTER (foo))
- pedwarn (loc, OPT_pedantic,
+ pedwarn (loc, OPT_pedantic,
"ISO C forbids subscripting %<register%> array");
else if (!flag_isoc99 && !lvalue_p (foo))
- pedwarn (loc, OPT_pedantic,
+ pedwarn (loc, OPT_pedantic,
"ISO C90 forbids subscripting non-lvalue array");
}
@@ -2347,7 +2347,7 @@ build_external_ref (location_t loc, tree id, int fun, tree *type)
warn_deprecated_use (ref, NULL_TREE);
/* Recursive call does not count as usage. */
- if (ref != current_function_decl)
+ if (ref != current_function_decl)
{
TREE_USED (ref) = 1;
}
@@ -2566,7 +2566,7 @@ build_function_call_vec (location_t loc, tree function, VEC(tree,gc) *params,
tree tem;
int nargs;
tree *argarray;
-
+
/* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
STRIP_TYPE_NOPS (function);
@@ -2690,7 +2690,7 @@ build_function_call_vec (location_t loc, tree function, VEC(tree,gc) *params,
&& !strncmp (IDENTIFIER_POINTER (name), "__builtin_", 10))
{
if (require_constant_value)
- result =
+ result =
fold_build_call_array_initializer_loc (loc, TREE_TYPE (fntype),
function, nargs, argarray);
else
@@ -3107,8 +3107,8 @@ parser_build_binary_op (location_t location, enum tree_code code,
warning_at (location, OPT_Waddress,
"comparison with string literal results in unspecified behavior");
- if (TREE_OVERFLOW_P (result.value)
- && !TREE_OVERFLOW_P (arg1.value)
+ if (TREE_OVERFLOW_P (result.value)
+ && !TREE_OVERFLOW_P (arg1.value)
&& !TREE_OVERFLOW_P (arg2.value))
overflow_warning (location, result.value);
@@ -3170,10 +3170,10 @@ pointer_diff (location_t loc, tree op0, tree op1)
if (TREE_CODE (target_type) == VOID_TYPE)
- pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
+ pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"pointer of type %<void *%> used in subtraction");
if (TREE_CODE (target_type) == FUNCTION_TYPE)
- pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
+ pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"pointer to a function used in subtraction");
/* If the conversion to ptrdiff_type does anything like widening or
@@ -3337,7 +3337,7 @@ build_unary_op (location_t location,
else if (typecode == COMPLEX_TYPE)
{
code = CONJ_EXPR;
- pedwarn (location, OPT_pedantic,
+ pedwarn (location, OPT_pedantic,
"ISO C does not support %<~%> for complex conjugation");
if (!noconvert)
arg = default_conversion (arg);
@@ -3456,7 +3456,7 @@ build_unary_op (location_t location,
{
tree real, imag;
- pedwarn (location, OPT_pedantic,
+ pedwarn (location, OPT_pedantic,
"ISO C does not support %<++%> and %<--%> on complex types");
arg = stabilize_reference (arg);
@@ -3507,10 +3507,10 @@ build_unary_op (location_t location,
|| TREE_CODE (TREE_TYPE (argtype)) == VOID_TYPE)
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
- pedwarn (location, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
+ pedwarn (location, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"wrong type argument to increment");
else
- pedwarn (location, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
+ pedwarn (location, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"wrong type argument to decrement");
}
@@ -4229,7 +4229,7 @@ build_compound_expr (location_t loc, tree expr1, tree expr2)
&& CONVERT_EXPR_P (TREE_OPERAND (expr1, 1)))
; /* (void) a, (void) b, c */
else
- warning_at (loc, OPT_Wunused_value,
+ warning_at (loc, OPT_Wunused_value,
"left-hand operand of comma expression has no effect");
}
}
@@ -4348,7 +4348,7 @@ handle_warn_cast_qual (tree type, tree otype)
while (TREE_CODE (in_type) == POINTER_TYPE);
}
-/* Build an expression representing a cast to type TYPE of expression EXPR.
+/* Build an expression representing a cast to type TYPE of expression EXPR.
LOC is the location of the cast-- typically the open paren of the cast. */
tree
@@ -4395,7 +4395,7 @@ build_c_cast (location_t loc, tree type, tree expr)
{
if (TREE_CODE (type) == RECORD_TYPE
|| TREE_CODE (type) == UNION_TYPE)
- pedwarn (loc, OPT_pedantic,
+ pedwarn (loc, OPT_pedantic,
"ISO C forbids casting nonscalar to the same type");
}
else if (TREE_CODE (type) == UNION_TYPE)
@@ -4633,7 +4633,7 @@ c_cast_expr (location_t loc, struct c_type_name *type_name, tree expr)
tree
build_modify_expr (location_t location, tree lhs, tree lhs_origtype,
- enum tree_code modifycode,
+ enum tree_code modifycode,
location_t rhs_loc, tree rhs, tree rhs_origtype)
{
tree result;
@@ -5096,7 +5096,7 @@ convert_for_assignment (location_t location, tree type, tree rhs,
}
if (!fundecl || !DECL_IN_SYSTEM_HEADER (fundecl))
- pedwarn (location, OPT_pedantic,
+ pedwarn (location, OPT_pedantic,
"ISO C prohibits argument conversion to union type");
rhs = fold_convert_loc (location, TREE_TYPE (memb), rhs);
@@ -5435,7 +5435,7 @@ store_init_value (location_t init_loc, tree decl, tree init, tree origtype)
/* ANSI wants warnings about out-of-range constant initializers. */
STRIP_TYPE_NOPS (value);
- if (TREE_STATIC (decl))
+ if (TREE_STATIC (decl))
constant_expression_warning (value);
/* Check if we need to set array size from compound literal size. */
@@ -5620,7 +5620,7 @@ pedwarn_init (location_t location, int opt, const char *msgid)
pedwarn (location, opt, "(near initialization for %qs)", ofwhat);
}
-/* Issue a warning for a bad initializer component.
+/* Issue a warning for a bad initializer component.
OPT is the OPT_W* value corresponding to the warning option that
controls this warning. MSGID identifies the message. The
@@ -5648,7 +5648,7 @@ maybe_warn_string_init (tree type, struct c_expr expr)
&& TREE_CODE (type) == ARRAY_TYPE
&& TREE_CODE (expr.value) == STRING_CST
&& expr.original_code != STRING_CST)
- pedwarn_init (input_location, OPT_pedantic,
+ pedwarn_init (input_location, OPT_pedantic,
"array initialized from parenthesized string constant");
}
@@ -8264,7 +8264,7 @@ c_finish_return (location_t loc, tree retval, tree origtype)
if ((warn_return_type || flag_isoc99)
&& valtype != 0 && TREE_CODE (valtype) != VOID_TYPE)
{
- pedwarn_c99 (loc, flag_isoc99 ? 0 : OPT_Wreturn_type,
+ pedwarn_c99 (loc, flag_isoc99 ? 0 : OPT_Wreturn_type,
"%<return%> with no value, in "
"function returning non-void");
no_warning = true;
@@ -8274,9 +8274,9 @@ c_finish_return (location_t loc, tree retval, tree origtype)
{
current_function_returns_null = 1;
if (TREE_CODE (TREE_TYPE (retval)) != VOID_TYPE)
- pedwarn (loc, 0,
+ pedwarn (loc, 0,
"%<return%> with a value, in function returning void");
- else
+ else
pedwarn (loc, OPT_pedantic, "ISO C forbids "
"%<return%> with expression, in function returning void");
}
@@ -9499,7 +9499,7 @@ build_binary_op (location_t location, enum tree_code code,
{
result_type = type0;
if (pedantic)
- pedwarn (location, OPT_pedantic,
+ pedwarn (location, OPT_pedantic,
"ordered comparison of pointer with integer zero");
else if (extra_warnings)
warning_at (location, OPT_Wextra,
@@ -9508,7 +9508,7 @@ build_binary_op (location_t location, enum tree_code code,
else if (code1 == POINTER_TYPE && null_pointer_constant_p (orig_op0))
{
result_type = type1;
- pedwarn (location, OPT_pedantic,
+ pedwarn (location, OPT_pedantic,
"ordered comparison of pointer with integer zero");
}
else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
@@ -9653,7 +9653,7 @@ build_binary_op (location_t location, enum tree_code code,
if (shorten && none_complex)
{
final_type = result_type;
- result_type = shorten_binary_op (result_type, op0, op1,
+ result_type = shorten_binary_op (result_type, op0, op1,
shorten == -1);
}
@@ -10220,11 +10220,11 @@ c_build_qualified_type (tree type, int type_quals)
else if (TYPE_CANONICAL (element_type) != element_type
|| (domain && TYPE_CANONICAL (domain) != domain))
{
- tree unqualified_canon
+ tree unqualified_canon
= build_array_type (TYPE_CANONICAL (element_type),
- domain? TYPE_CANONICAL (domain)
+ domain? TYPE_CANONICAL (domain)
: NULL_TREE);
- TYPE_CANONICAL (t)
+ TYPE_CANONICAL (t)
= c_build_qualified_type (unqualified_canon, type_quals);
}
else
diff --git a/gcc/caller-save.c b/gcc/caller-save.c
index 377ffad5e55..e12deb73ead 100644
--- a/gcc/caller-save.c
+++ b/gcc/caller-save.c
@@ -311,7 +311,7 @@ init_save_areas (void)
for (j = 1; j <= MOVE_MAX_WORDS; j++)
regno_save_mem[i][j] = 0;
save_slots_num = 0;
-
+
}
/* The structure represents a hard register which should be saved
@@ -394,7 +394,7 @@ saved_hard_reg_compare_func (const void *v1p, const void *v2p)
{
const struct saved_hard_reg *p1 = *(struct saved_hard_reg * const *) v1p;
const struct saved_hard_reg *p2 = *(struct saved_hard_reg * const *) v2p;
-
+
if (flag_omit_frame_pointer)
{
if (p1->call_freq - p2->call_freq != 0)
@@ -467,7 +467,7 @@ setup_save_areas (void)
int best_slot_num;
int prev_save_slots_num;
rtx prev_save_slots[FIRST_PSEUDO_REGISTER];
-
+
initiate_saved_hard_regs ();
/* Create hard reg saved regs. */
for (chain = reload_insn_chain; chain != 0; chain = next)
@@ -510,10 +510,10 @@ setup_save_areas (void)
{
int r = reg_renumber[regno];
int bound;
-
+
if (r < 0)
continue;
-
+
bound = r + hard_regno_nregs[r][PSEUDO_REGNO_MODE (regno)];
for (; r < bound; r++)
if (TEST_HARD_REG_BIT (used_regs, r))
@@ -568,7 +568,7 @@ setup_save_areas (void)
{
int r = reg_renumber[regno];
int bound;
-
+
if (r < 0)
continue;
@@ -686,17 +686,17 @@ setup_save_areas (void)
/* Now run through all the call-used hard-registers and allocate
space for them in the caller-save area. Try to allocate space
in a manner which allows multi-register saves/restores to be done. */
-
+
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
for (j = MOVE_MAX_WORDS; j > 0; j--)
{
int do_save = 1;
-
+
/* If no mode exists for this size, try another. Also break out
if we have already saved this hard register. */
if (regno_save_mode[i][j] == VOIDmode || regno_save_mem[i][1] != 0)
continue;
-
+
/* See if any register in this group has been saved. */
for (k = 0; k < j; k++)
if (regno_save_mem[i + k][1])
@@ -706,7 +706,7 @@ setup_save_areas (void)
}
if (! do_save)
continue;
-
+
for (k = 0; k < j; k++)
if (! TEST_HARD_REG_BIT (hard_regs_used, i + k))
{
@@ -715,7 +715,7 @@ setup_save_areas (void)
}
if (! do_save)
continue;
-
+
/* We have found an acceptable mode to store in. Since
hard register is always saved in the widest mode
available, the mode may be wider than necessary, it is
@@ -727,7 +727,7 @@ setup_save_areas (void)
= assign_stack_local_1 (regno_save_mode[i][j],
GET_MODE_SIZE (regno_save_mode[i][j]),
0, true);
-
+
/* Setup single word save area just in case... */
for (k = 0; k < j; k++)
/* This should not depend on WORDS_BIG_ENDIAN.
diff --git a/gcc/calls.c b/gcc/calls.c
index 5528ab9fbeb..34fde8b5280 100644
--- a/gcc/calls.c
+++ b/gcc/calls.c
@@ -903,7 +903,7 @@ store_unaligned_arguments_into_pseudos (struct arg_data *args, int num_actuals)
}
/* Fill in ARGS_SIZE and ARGS array based on the parameters found in
- CALL_EXPR EXP.
+ CALL_EXPR EXP.
NUM_ACTUALS is the total number of parameters.
@@ -1343,7 +1343,7 @@ precompute_arguments (int num_actuals, struct arg_data *args)
compute and return the final value for MUST_PREALLOCATE. */
static int
-finalize_must_preallocate (int must_preallocate, int num_actuals,
+finalize_must_preallocate (int must_preallocate, int num_actuals,
struct arg_data *args, struct args_size *args_size)
{
/* See if we have or want to preallocate stack space.
diff --git a/gcc/cfg.c b/gcc/cfg.c
index 550f8f13d3b..834bb5cc2bd 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -92,9 +92,9 @@ init_flow (struct function *the_fun)
EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)
= GGC_CNEW (struct basic_block_def);
EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)->index = EXIT_BLOCK;
- ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun)->next_bb
+ ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun)->next_bb
= EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun);
- EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)->prev_bb
+ EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)->prev_bb
= ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun);
}
@@ -171,13 +171,13 @@ compact_blocks (void)
SET_BASIC_BLOCK (ENTRY_BLOCK, ENTRY_BLOCK_PTR);
SET_BASIC_BLOCK (EXIT_BLOCK, EXIT_BLOCK_PTR);
-
+
if (df)
df_compact_blocks ();
- else
+ else
{
basic_block bb;
-
+
i = NUM_FIXED_BLOCKS;
FOR_EACH_BB (bb)
{
@@ -433,7 +433,7 @@ clear_bb_flags (void)
basic_block bb;
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
- bb->flags = (BB_PARTITION (bb)
+ bb->flags = (BB_PARTITION (bb)
| (bb->flags & (BB_DISABLE_SCHEDULE + BB_RTL + BB_NON_LOCAL_GOTO_TARGET)));
}
@@ -544,7 +544,7 @@ dump_bb_info (basic_block bb, bool header, bool footer, int flags,
fprintf (file, HOST_WIDEST_INT_PRINT_DEC, bb->count);
fprintf (file, ", freq %i", bb->frequency);
/* Both maybe_hot_bb_p & probably_never_executed_bb_p functions
- crash without cfun. */
+ crash without cfun. */
if (cfun && maybe_hot_bb_p (bb))
fputs (", maybe hot", file);
if (cfun && probably_never_executed_bb_p (bb))
@@ -584,7 +584,7 @@ dump_bb_info (basic_block bb, bool header, bool footer, int flags,
/* Dump the register info to FILE. */
-void
+void
dump_reg_info (FILE *file)
{
unsigned int i, max = max_reg_num ();
@@ -598,14 +598,14 @@ dump_reg_info (FILE *file)
for (i = FIRST_PSEUDO_REGISTER; i < max; i++)
{
enum reg_class rclass, altclass;
-
+
if (regstat_n_sets_and_refs)
fprintf (file, "\nRegister %d used %d times across %d insns",
i, REG_N_REFS (i), REG_LIVE_LENGTH (i));
else if (df)
fprintf (file, "\nRegister %d used %d times across %d insns",
i, DF_REG_USE_COUNT (i) + DF_REG_DEF_COUNT (i), REG_LIVE_LENGTH (i));
-
+
if (REG_BASIC_BLOCK (i) >= NUM_FIXED_BLOCKS)
fprintf (file, " in block %d", REG_BASIC_BLOCK (i));
if (regstat_n_sets_and_refs)
@@ -627,7 +627,7 @@ dump_reg_info (FILE *file)
if (regno_reg_rtx[i] != NULL
&& PSEUDO_REGNO_BYTES (i) != UNITS_PER_WORD)
fprintf (file, "; %d bytes", PSEUDO_REGNO_BYTES (i));
-
+
rclass = reg_preferred_class (i);
altclass = reg_alternate_class (i);
if (rclass != GENERAL_REGS || altclass != ALL_REGS)
@@ -641,7 +641,7 @@ dump_reg_info (FILE *file)
reg_class_names[(int) rclass],
reg_class_names[(int) altclass]);
}
-
+
if (regno_reg_rtx[i] != NULL && REG_POINTER (regno_reg_rtx[i]))
fputs ("; pointer", file);
fputs (".\n", file);
diff --git a/gcc/cfganal.c b/gcc/cfganal.c
index 75cb49d293c..22a0503c013 100644
--- a/gcc/cfganal.c
+++ b/gcc/cfganal.c
@@ -653,7 +653,7 @@ connect_infinite_loops_to_exit (void)
true, unreachable blocks are deleted. */
int
-post_order_compute (int *post_order, bool include_entry_exit,
+post_order_compute (int *post_order, bool include_entry_exit,
bool delete_unreachable)
{
edge_iterator *stack;
@@ -719,9 +719,9 @@ post_order_compute (int *post_order, bool include_entry_exit,
post_order[post_order_num++] = ENTRY_BLOCK;
count = post_order_num;
}
- else
+ else
count = post_order_num + 2;
-
+
/* Delete the unreachable blocks if some were found and we are
supposed to do it. */
if (delete_unreachable && (count != n_basic_blocks))
@@ -731,11 +731,11 @@ post_order_compute (int *post_order, bool include_entry_exit,
for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
{
next_bb = b->next_bb;
-
+
if (!(TEST_BIT (visited, b->index)))
delete_basic_block (b);
}
-
+
tidy_fallthru_edges ();
}
@@ -745,7 +745,7 @@ post_order_compute (int *post_order, bool include_entry_exit,
}
-/* Helper routine for inverted_post_order_compute.
+/* Helper routine for inverted_post_order_compute.
BB has to belong to a region of CFG
unreachable by inverted traversal from the exit.
i.e. there's no control flow path from ENTRY to EXIT
@@ -753,8 +753,8 @@ post_order_compute (int *post_order, bool include_entry_exit,
This can happen in two cases - if there's an infinite loop
or if there's a block that has no successor
(call to a function with no return).
- Some RTL passes deal with this condition by
- calling connect_infinite_loops_to_exit () and/or
+ Some RTL passes deal with this condition by
+ calling connect_infinite_loops_to_exit () and/or
add_noreturn_fake_exit_edges ().
However, those methods involve modifying the CFG itself
which may not be desirable.
@@ -801,12 +801,12 @@ dfs_find_deadend (basic_block bb)
with no successors can't visit all blocks.
To solve this problem, we first do inverted traversal
starting from the blocks with no successor.
- And if there's any block left that's not visited by the regular
+ And if there's any block left that's not visited by the regular
inverted traversal from EXIT,
those blocks are in such problematic region.
- Among those, we find one block that has
+ Among those, we find one block that has
any visited predecessor (which is an entry into such a region),
- and start looking for a "dead end" from that block
+ and start looking for a "dead end" from that block
and do another inverted traversal from that block. */
int
@@ -833,14 +833,14 @@ inverted_post_order_compute (int *post_order)
if (EDGE_COUNT (bb->succs) == 0)
{
/* Push the initial edge on to the stack. */
- if (EDGE_COUNT (bb->preds) > 0)
+ if (EDGE_COUNT (bb->preds) > 0)
{
stack[sp++] = ei_start (bb->preds);
SET_BIT (visited, bb->index);
}
}
- do
+ do
{
bool has_unvisited_bb = false;
@@ -880,7 +880,7 @@ inverted_post_order_compute (int *post_order)
}
}
- /* Detect any infinite loop and activate the kludge.
+ /* Detect any infinite loop and activate the kludge.
Note that this doesn't check EXIT_BLOCK itself
since EXIT_BLOCK is always added after the outer do-while loop. */
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
@@ -914,7 +914,7 @@ inverted_post_order_compute (int *post_order)
if (has_unvisited_bb && sp == 0)
{
- /* No blocks are reachable from EXIT at all.
+ /* No blocks are reachable from EXIT at all.
Find a dead-end from the ENTRY, and restart the iteration. */
basic_block be = dfs_find_deadend (ENTRY_BLOCK_PTR);
gcc_assert (be != NULL);
@@ -922,7 +922,7 @@ inverted_post_order_compute (int *post_order)
stack[sp++] = ei_start (be->preds);
}
- /* The only case the below while fires is
+ /* The only case the below while fires is
when there's an infinite loop. */
}
while (sp);
@@ -940,14 +940,14 @@ inverted_post_order_compute (int *post_order)
REV_POST_ORDER is nonzero, return the reverse completion number for each
node. Returns the number of nodes visited. A depth first search
tries to get as far away from the starting point as quickly as
- possible.
+ possible.
pre_order is a really a preorder numbering of the graph.
rev_post_order is really a reverse postorder numbering of the graph.
*/
int
-pre_and_rev_post_order_compute (int *pre_order, int *rev_post_order,
+pre_and_rev_post_order_compute (int *pre_order, int *rev_post_order,
bool include_entry_exit)
{
edge_iterator *stack;
@@ -968,7 +968,7 @@ pre_and_rev_post_order_compute (int *pre_order, int *rev_post_order,
if (rev_post_order)
rev_post_order[rev_post_order_num--] = ENTRY_BLOCK;
}
- else
+ else
rev_post_order_num -= NUM_FIXED_BLOCKS;
/* Allocate bitmap to track nodes that have been visited. */
@@ -1165,12 +1165,12 @@ dfs_enumerate_from (basic_block bb, int reverse,
static sbitmap visited;
static unsigned v_size;
-#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
-#define UNMARK_VISITED(BB) (RESET_BIT (visited, (BB)->index))
-#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
+#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
+#define UNMARK_VISITED(BB) (RESET_BIT (visited, (BB)->index))
+#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
/* Resize the VISITED sbitmap if necessary. */
- size = last_basic_block;
+ size = last_basic_block;
if (size < 10)
size = 10;
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 3f7b1d28ac6..087b6240b9b 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -61,7 +61,7 @@ gimple_assign_rhs_to_tree (gimple stmt)
{
tree t;
enum gimple_rhs_class grhs_class;
-
+
grhs_class = get_gimple_rhs_class (gimple_expr_code (stmt));
if (grhs_class == GIMPLE_BINARY_RHS)
@@ -373,7 +373,7 @@ stack_var_conflict_p (size_t x, size_t y)
gcc_assert (index < stack_vars_conflict_alloc);
return stack_vars_conflict[index];
}
-
+
/* Returns true if TYPE is or contains a union type. */
static bool
@@ -962,7 +962,7 @@ defer_stack_allocation (tree var, bool toplevel)
/* A subroutine of expand_used_vars. Expand one variable according to
its flavor. Variables to be placed on the stack are not actually
- expanded yet, merely recorded.
+ expanded yet, merely recorded.
When REALLY_EXPAND is false, only add stack values to be allocated.
Return stack usage this variable is supposed to take.
*/
@@ -1285,7 +1285,7 @@ account_used_vars_for_block (tree block, bool toplevel)
}
/* Prepare for expanding variables. */
-static void
+static void
init_vars_expansion (void)
{
tree t;
@@ -1554,7 +1554,7 @@ label_rtx_for_bb (basic_block bb ATTRIBUTE_UNUSED)
return (rtx) *elt;
/* Find the tree label if it is present. */
-
+
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
lab_stmt = gsi_stmt (gsi);
@@ -3188,7 +3188,7 @@ expand_gimple_basic_block (basic_block bb)
/* Ignore this stmt if it is in the list of
replaceable expressions. */
if (SA.values
- && bitmap_bit_p (SA.values,
+ && bitmap_bit_p (SA.values,
SSA_NAME_VERSION (DEF_FROM_PTR (def_p))))
continue;
}
@@ -3451,7 +3451,7 @@ expand_stack_alignment (void)
if (! SUPPORTS_STACK_ALIGNMENT)
return;
-
+
if (cfun->calls_alloca
|| cfun->has_nonlocal_label
|| crtl->has_nonlocal_goto)
@@ -3496,7 +3496,7 @@ expand_stack_alignment (void)
/* Target has to redefine TARGET_GET_DRAP_RTX to support stack
alignment. */
gcc_assert (targetm.calls.get_drap_rtx != NULL);
- drap_rtx = targetm.calls.get_drap_rtx ();
+ drap_rtx = targetm.calls.get_drap_rtx ();
/* stack_realign_drap and drap_rtx must match. */
gcc_assert ((stack_realign_drap != 0) == (drap_rtx != NULL));
@@ -3575,10 +3575,10 @@ gimple_expand_cfg (void)
if (warn_stack_protect)
{
if (cfun->calls_alloca)
- warning (OPT_Wstack_protector,
+ warning (OPT_Wstack_protector,
"not protecting local variables: variable length buffer");
if (has_short_buffer && !crtl->stack_protect_guard)
- warning (OPT_Wstack_protector,
+ warning (OPT_Wstack_protector,
"not protecting function: no buffer at least %d bytes long",
(int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE));
}
diff --git a/gcc/cfghooks.c b/gcc/cfghooks.c
index d6690a4b434..ad5fb6b6c4c 100644
--- a/gcc/cfghooks.c
+++ b/gcc/cfghooks.c
@@ -770,7 +770,7 @@ make_forwarder_block (basic_block bb, bool (*redirect_edge_p) (edge),
&& dummy->loop_father->header == dummy
&& dummy->loop_father->latch == e_src)
dummy->loop_father->latch = jump;
-
+
if (new_bb_cbk != NULL)
new_bb_cbk (jump);
}
diff --git a/gcc/cfghooks.h b/gcc/cfghooks.h
index 8d6a6694cf5..574176805e2 100644
--- a/gcc/cfghooks.h
+++ b/gcc/cfghooks.h
@@ -57,7 +57,7 @@ struct cfg_hooks
/* Creates a new basic block just after basic block B by splitting
everything after specified instruction I. */
basic_block (*split_block) (basic_block b, void * i);
-
+
/* Move block B immediately after block A. */
bool (*move_block_after) (basic_block b, basic_block a);
diff --git a/gcc/cfglayout.c b/gcc/cfglayout.c
index d6d1b3ab697..9d23b16c116 100644
--- a/gcc/cfglayout.c
+++ b/gcc/cfglayout.c
@@ -694,7 +694,7 @@ relink_block_chain (bool stay_in_cfglayout_mode)
free_original_copy_tables ();
if (stay_in_cfglayout_mode)
initialize_original_copy_tables ();
-
+
/* Finally, put basic_block_info in the new order. */
compact_blocks ();
}
@@ -928,7 +928,7 @@ fixup_reorder_chain (void)
FOR_EACH_EDGE (e, ei, bb->succs)
if (e->flags & EDGE_FALLTHRU)
break;
-
+
if (e && !can_fallthru (e->src, e->dest))
force_nonfallthru (e);
}
diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c
index 0f6e797329c..0365f568b30 100644
--- a/gcc/cfgloop.c
+++ b/gcc/cfgloop.c
@@ -523,7 +523,7 @@ flow_loops_find (struct loops *loops)
profile is usually too flat and unreliable for this (and it is mostly based
on the loop structure of the program, so it does not make much sense to
derive the loop structure from it). */
-
+
static edge
find_subloop_latch_edge_by_profile (VEC (edge, heap) *latches)
{
@@ -656,7 +656,7 @@ form_subloop (struct loop *loop, edge latch)
edge_iterator ei;
edge e, new_entry;
struct loop *new_loop;
-
+
mfb_reis_set = pointer_set_create ();
FOR_EACH_EDGE (e, ei, loop->header->preds)
{
@@ -892,7 +892,7 @@ get_loop_body_in_dom_order (const struct loop *loop)
/* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
basic_block *
-get_loop_body_in_custom_order (const struct loop *loop,
+get_loop_body_in_custom_order (const struct loop *loop,
int (*bb_comparator) (const void *, const void *))
{
basic_block *bbs = get_loop_body (loop);
@@ -983,7 +983,7 @@ loop_exit_free (void *ex)
for (; exit; exit = next)
{
next = exit->next_e;
-
+
exit->next->prev = exit->prev;
exit->prev->next = exit->next;
@@ -1037,7 +1037,7 @@ rescan_loop_exit (edge e, bool new_edge, bool removed)
exit->next_e = exits;
exits = exit;
}
- }
+ }
if (!exits && new_edge)
return;
@@ -1523,7 +1523,7 @@ verify_loop_structure (void)
exit = get_exit_descriptions (e);
if (!exit)
{
- error ("Exit %d->%d not recorded",
+ error ("Exit %d->%d not recorded",
e->src->index, e->dest->index);
err = 1;
}
@@ -1541,7 +1541,7 @@ verify_loop_structure (void)
if (eloops != 0)
{
- error ("Wrong list of exited loops for edge %d->%d",
+ error ("Wrong list of exited loops for edge %d->%d",
e->src->index, e->dest->index);
err = 1;
}
diff --git a/gcc/cfgloop.h b/gcc/cfgloop.h
index 764520711fd..26f9050076a 100644
--- a/gcc/cfgloop.h
+++ b/gcc/cfgloop.h
@@ -58,7 +58,7 @@ struct GTY ((chain_next ("%h.next"))) nb_iter_bound {
b) it is consistent with the result of number_of_iterations_exit. */
double_int bound;
- /* True if the statement will cause the loop to be leaved the (at most)
+ /* True if the statement will cause the loop to be leaved the (at most)
BOUND + 1-st time it is executed, that is, all the statements after it
are executed at most BOUND times. */
bool is_exit;
@@ -240,7 +240,7 @@ extern unsigned get_loop_body_with_size (const struct loop *, basic_block *,
unsigned);
extern basic_block *get_loop_body_in_dom_order (const struct loop *);
extern basic_block *get_loop_body_in_bfs_order (const struct loop *);
-extern basic_block *get_loop_body_in_custom_order (const struct loop *,
+extern basic_block *get_loop_body_in_custom_order (const struct loop *,
int (*) (const void *, const void *));
extern VEC (edge, heap) *get_loop_exit_edges (const struct loop *);
@@ -293,7 +293,7 @@ extern struct loop *create_empty_loop_on_edge (edge, tree, tree, tree, tree,
tree *, tree *, struct loop *);
extern struct loop * duplicate_loop (struct loop *, struct loop *);
extern void duplicate_subloops (struct loop *, struct loop *);
-extern bool duplicate_loop_to_header_edge (struct loop *, edge,
+extern bool duplicate_loop_to_header_edge (struct loop *, edge,
unsigned, sbitmap, edge,
VEC (edge, heap) **, int);
extern struct loop *loopify (edge, edge,
diff --git a/gcc/cfgloopmanip.c b/gcc/cfgloopmanip.c
index 6f24415a72d..1c77141af65 100644
--- a/gcc/cfgloopmanip.c
+++ b/gcc/cfgloopmanip.c
@@ -164,7 +164,7 @@ fix_loop_placement (struct loop *loop)
placement of subloops of FROM->loop_father, that might also be altered due
to this change; the condition for them is similar, except that instead of
successors we consider edges coming out of the loops.
-
+
If the changes may invalidate the information about irreducible regions,
IRRED_INVALIDATED is set to true. */
@@ -330,7 +330,7 @@ remove_path (edge e)
{
SET_BIT (seen, ae->dest->index);
bord_bbs[n_bord_bbs++] = ae->dest;
-
+
if (ae->flags & EDGE_IRREDUCIBLE_LOOP)
irred_invalidated = true;
}
@@ -504,7 +504,7 @@ update_dominators_in_loop (struct loop *loop)
}
/* Creates an if region as shown above. CONDITION is used to create
- the test for the if.
+ the test for the if.
|
| ------------- -------------
@@ -549,7 +549,7 @@ create_empty_if_region_on_edge (edge entry_edge, tree condition)
succ_bb = entry_edge->dest;
cond_bb = split_edge (entry_edge);
-
+
/* Insert condition in cond_bb. */
gsi = gsi_last_bb (cond_bb);
simple_cond =
@@ -558,7 +558,7 @@ create_empty_if_region_on_edge (edge entry_edge, tree condition)
cond_stmt = gimple_build_cond_from_tree (simple_cond, NULL_TREE, NULL_TREE);
gsi = gsi_last_bb (cond_bb);
gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
-
+
join_bb = split_edge (single_succ_edge (cond_bb));
e_true = single_succ_edge (cond_bb);
@@ -839,7 +839,7 @@ unloop (struct loop *loop, bool *irred_invalidated)
condition stated in description of fix_loop_placement holds for them.
It is used in case when we removed some edges coming out of LOOP, which
may cause the right placement of LOOP inside loop tree to change.
-
+
IRRED_INVALIDATED is set to true if a change in the loop structures might
invalidate the information about irreducible regions. */
@@ -1320,7 +1320,7 @@ has_preds_from_loop (basic_block block, struct loop *loop)
{
edge e;
edge_iterator ei;
-
+
FOR_EACH_EDGE (e, ei, block->preds)
if (e->src->loop_father == loop)
return true;
@@ -1331,7 +1331,7 @@ has_preds_from_loop (basic_block block, struct loop *loop)
CP_SIMPLE_PREHEADERS is set in FLAGS, we only force LOOP to have single
entry; otherwise we also force preheader block to have only one successor.
When CP_FALLTHRU_PREHEADERS is set in FLAGS, we force the preheader block
- to be a fallthru predecessor to the loop header and to have only
+ to be a fallthru predecessor to the loop header and to have only
predecessors from outside of the loop.
The function also updates dominators. */
@@ -1360,7 +1360,7 @@ create_preheader (struct loop *loop, int flags)
if (nentry == 1)
{
bool need_forwarder_block = false;
-
+
/* We do not allow entry block to be the loop preheader, since we
cannot emit code there. */
if (single_entry->src == ENTRY_BLOCK_PTR)
@@ -1416,7 +1416,7 @@ create_preheader (struct loop *loop, int flags)
if (dump_file)
fprintf (dump_file, "Created preheader block for loop %i\n",
loop->num);
-
+
if (flags & CP_FALLTHRU_PREHEADERS)
gcc_assert ((single_succ_edge (dummy)->flags & EDGE_FALLTHRU)
&& !JUMP_P (BB_END (dummy)));
@@ -1524,7 +1524,7 @@ lv_adjust_loop_entry_edge (basic_block first_head, basic_block second_head,
is the ratio by that the frequencies in the original loop should
be scaled. ELSE_SCALE is the ratio by that the frequencies in the
new loop should be scaled.
-
+
If PLACE_AFTER is true, we place the new loop after LOOP in the
instruction stream, otherwise it is placed before LOOP. */
diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c
index 16cbab41bf1..1a47bfe7143 100644
--- a/gcc/cfgrtl.c
+++ b/gcc/cfgrtl.c
@@ -470,7 +470,7 @@ emit_insn_at_entry (rtx insn)
}
/* Update BLOCK_FOR_INSN of insns between BEGIN and END
- (or BARRIER if found) and notify df of the bb change.
+ (or BARRIER if found) and notify df of the bb change.
The insn chain range is inclusive
(i.e. both BEGIN and END will be updated. */
@@ -1256,7 +1256,7 @@ force_nonfallthru_and_redirect (edge e, basic_block target)
if (abnormal_edge_flags)
make_edge (src, target, abnormal_edge_flags);
- df_mark_solutions_dirty ();
+ df_mark_solutions_dirty ();
return new_bb;
}
@@ -1612,7 +1612,7 @@ rtl_dump_bb (basic_block bb, FILE *outf, int indent, int flags ATTRIBUTE_UNUSED)
s_indent = (char *) alloca ((size_t) indent + 1);
memset (s_indent, ' ', (size_t) indent);
s_indent[indent] = '\0';
-
+
if (df)
{
df_dump_top (bb, outf);
@@ -1679,7 +1679,7 @@ print_rtl_with_bb (FILE *outf, const_rtx rtx_first)
{
edge e;
edge_iterator ei;
-
+
fprintf (outf, ";; Start of basic block (");
FOR_EACH_EDGE (e, ei, bb->preds)
fprintf (outf, " %d", e->src->index);
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index 3e5b8466d8c..8fd5e7fbf08 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -795,8 +795,8 @@ cgraph_set_call_stmt_including_clones (struct cgraph_node *orig,
}
/* Like cgraph_create_edge walk the clone tree and update all clones sharing
- same function body.
-
+ same function body.
+
TODO: COUNT and LOOP_DEPTH should be properly distributed based on relative
frequencies of the clones. */
@@ -1853,7 +1853,7 @@ clone_function_name (tree decl)
}
/* Create callgraph node clone with new declaration. The actual body will
- be copied later at compilation stage.
+ be copied later at compilation stage.
TODO: after merging in ipa-sra use function call notes instead of args_to_skip
bitmap interface.
@@ -2004,7 +2004,7 @@ cgraph_function_body_availability (struct cgraph_node *node)
GIMPLE.
The function is assumed to be reachable and have address taken (so no
- API breaking optimizations are performed on it).
+ API breaking optimizations are performed on it).
Main work done by this function is to enqueue the function for later
processing to avoid need the passes to be re-entrant. */
diff --git a/gcc/cgraph.h b/gcc/cgraph.h
index e09a858bdac..a683c5bc970 100644
--- a/gcc/cgraph.h
+++ b/gcc/cgraph.h
@@ -227,7 +227,7 @@ struct GTY((chain_next ("%h.next"), chain_prev ("%h.previous"))) cgraph_node {
ABSTRACT_DECL_ORIGIN of a reachable function. */
unsigned abstract_and_needed : 1;
/* Set when function is reachable by call from other function
- that is either reachable or needed.
+ that is either reachable or needed.
This flag is computed at original cgraph construction and then
updated in cgraph_remove_unreachable_nodes. Note that after
cgraph_remove_unreachable_nodes cgraph still can contain unreachable
@@ -312,7 +312,7 @@ struct GTY((chain_next ("%h.next_caller"), chain_prev ("%h.prev_caller"))) cgrap
cgraph_inline_failed_t inline_failed;
/* Expected number of executions: calculated in profile.c. */
gcov_type count;
- /* Expected frequency of executions within the function.
+ /* Expected frequency of executions within the function.
When set to CGRAPH_FREQ_BASE, the edge is expected to be called once
per function call. The range is 0 to CGRAPH_FREQ_MAX. */
int frequency;
@@ -669,7 +669,7 @@ cgraph_node_set_size (cgraph_node_set set)
struct GTY(()) constant_descriptor_tree {
/* A MEM for the constant. */
rtx rtl;
-
+
/* The value of the constant. */
tree value;
diff --git a/gcc/cgraphbuild.c b/gcc/cgraphbuild.c
index 0c3bff2f90e..bcc66c0eecd 100644
--- a/gcc/cgraphbuild.c
+++ b/gcc/cgraphbuild.c
@@ -33,7 +33,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-pass.h"
/* Walk tree and record all calls and references to functions/variables.
- Called via walk_tree: TP is pointer to tree to be examined.
+ Called via walk_tree: TP is pointer to tree to be examined.
When DATA is non-null, record references to callgraph.
*/
@@ -222,14 +222,14 @@ struct gimple_opt_pass pass_build_cgraph_edges =
};
/* Record references to functions and other variables present in the
- initial value of DECL, a variable.
+ initial value of DECL, a variable.
When ONLY_VARS is true, we mark needed only variables, not functions. */
void
record_references_in_initializer (tree decl, bool only_vars)
{
struct pointer_set_t *visited_nodes = pointer_set_create ();
- walk_tree (&DECL_INITIAL (decl), record_reference,
+ walk_tree (&DECL_INITIAL (decl), record_reference,
only_vars ? NULL : decl, visited_nodes);
pointer_set_destroy (visited_nodes);
}
diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c
index e934b3d8abc..4a4e6a4b6e3 100644
--- a/gcc/cgraphunit.c
+++ b/gcc/cgraphunit.c
@@ -151,8 +151,8 @@ static GTY (()) VEC(tree, gc) *static_dtors;
/* When target does not have ctors and dtors, we call all constructor
and destructor by special initialization/destruction function
- recognized by collect2.
-
+ recognized by collect2.
+
When we are going to build this function, collect all constructors and
destructors and turn them into normal functions. */
@@ -239,7 +239,7 @@ compare_ctor (const void *p1, const void *p2)
f2 = *(const tree *)p2;
priority1 = DECL_INIT_PRIORITY (f1);
priority2 = DECL_INIT_PRIORITY (f2);
-
+
if (priority1 < priority2)
return -1;
else if (priority1 > priority2)
@@ -265,7 +265,7 @@ compare_dtor (const void *p1, const void *p2)
f2 = *(const tree *)p2;
priority1 = DECL_FINI_PRIORITY (f1);
priority2 = DECL_FINI_PRIORITY (f2);
-
+
if (priority1 < priority2)
return -1;
else if (priority1 > priority2)
@@ -286,12 +286,12 @@ cgraph_build_cdtor_fns (void)
{
gcc_assert (!targetm.have_ctors_dtors);
qsort (VEC_address (tree, static_ctors),
- VEC_length (tree, static_ctors),
+ VEC_length (tree, static_ctors),
sizeof (tree),
compare_ctor);
build_cdtor (/*ctor_p=*/true,
VEC_address (tree, static_ctors),
- VEC_length (tree, static_ctors));
+ VEC_length (tree, static_ctors));
VEC_truncate (tree, static_ctors, 0);
}
@@ -299,12 +299,12 @@ cgraph_build_cdtor_fns (void)
{
gcc_assert (!targetm.have_ctors_dtors);
qsort (VEC_address (tree, static_dtors),
- VEC_length (tree, static_dtors),
+ VEC_length (tree, static_dtors),
sizeof (tree),
compare_dtor);
build_cdtor (/*ctor_p=*/false,
VEC_address (tree, static_dtors),
- VEC_length (tree, static_dtors));
+ VEC_length (tree, static_dtors));
VEC_truncate (tree, static_dtors, 0);
}
}
@@ -1689,7 +1689,7 @@ cgraph_copy_node_for_versioning (struct cgraph_node *old_version,
TREE_MAP is a mapping of tree nodes we want to replace with
new ones (according to results of prior analysis).
OLD_VERSION_NODE is the node that is versioned.
- It returns the new version's cgraph node.
+ It returns the new version's cgraph node.
ARGS_TO_SKIP lists arguments to be omitted from functions
*/
@@ -1739,7 +1739,7 @@ cgraph_function_versioning (struct cgraph_node *old_version_node,
/* Update the call_expr on the edges to call the new version node. */
update_call_expr (new_version_node);
-
+
cgraph_call_function_insertion_hooks (new_version_node);
return new_version_node;
}
diff --git a/gcc/cif-code.def b/gcc/cif-code.def
index 48dbabffbed..2de63b62178 100644
--- a/gcc/cif-code.def
+++ b/gcc/cif-code.def
@@ -61,7 +61,7 @@ DEFCIFCODE(MAX_INLINE_INSNS_SINGLE_LIMIT,
DEFCIFCODE(MAX_INLINE_INSNS_AUTO_LIMIT,
N_("--param max-inline-insns-auto limit reached"))
DEFCIFCODE(INLINE_UNIT_GROWTH_LIMIT,
- N_("--param inline-unit-growth limit reached"))
+ N_("--param inline-unit-growth limit reached"))
/* Recursive inlining. */
DEFCIFCODE(RECURSIVE_INLINING, N_("recursive inlining"))
diff --git a/gcc/collect2.c b/gcc/collect2.c
index b259f09333f..aa4b549b11b 100644
--- a/gcc/collect2.c
+++ b/gcc/collect2.c
@@ -349,7 +349,7 @@ typedef enum {
enum scanfilter_masks {
SCAN_NOTHING = 0,
- SCAN_CTOR = 1 << SYM_CTOR,
+ SCAN_CTOR = 1 << SYM_CTOR,
SCAN_DTOR = 1 << SYM_DTOR,
SCAN_INIT = 1 << SYM_INIT,
SCAN_FINI = 1 << SYM_FINI,
@@ -1157,7 +1157,7 @@ main (int argc, char **argv)
char **ld1_argv;
const char **ld1;
bool use_plugin = false;
-
+
/* The kinds of symbols we will have to consider when scanning the
outcome of a first pass link. This is ALL to start with, then might
be adjusted before getting to the first pass link per se, typically on
@@ -1653,25 +1653,25 @@ main (int argc, char **argv)
would otherwise reference them all, hence drag all the corresponding
objects even if nothing else is referenced. */
{
- const char **export_object_lst
+ const char **export_object_lst
= CONST_CAST2 (const char **, char **, object_lst);
-
+
struct id *list = libs.first;
/* Compute the filter to use from the current one, do scan, then adjust
the "current" filter to remove what we just included here. This will
control whether we need a first pass link later on or not, and what
will remain to be scanned there. */
-
+
scanfilter this_filter
= shared_obj ? ld1_filter : (ld1_filter & ~SCAN_DWEH);
-
+
while (export_object_lst < object)
scan_prog_file (*export_object_lst++, PASS_OBJ, this_filter);
-
+
for (; list; list = list->next)
scan_prog_file (list->name, PASS_FIRST, this_filter);
-
+
ld1_filter = ld1_filter & ~this_filter;
}
@@ -1744,9 +1744,9 @@ main (int argc, char **argv)
/* Load the program, searching all libraries and attempting to provide
undefined symbols from repository information.
-
+
If -r or they will be run via some other method, do not build the
- constructor or destructor list, just return now. */
+ constructor or destructor list, just return now. */
{
bool early_exit
= rflag || (! DO_COLLECT_EXPORT_LIST && ! do_collecting);
@@ -1759,10 +1759,10 @@ main (int argc, char **argv)
objects and libraries has performed above. In the !shared_obj case, we
expect the relevant tables to be dragged together with their associated
functions from precise cross reference insertions by the compiler. */
-
+
if (early_exit || ld1_filter != SCAN_NOTHING)
do_tlink (ld1_argv, object_lst);
-
+
if (early_exit)
{
#ifdef COLLECT_EXPORT_LIST
diff --git a/gcc/combine.c b/gcc/combine.c
index 80c538ec490..cdc677d17c7 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -767,7 +767,7 @@ do_SUBST_MODE (rtx *into, enum machine_mode newval)
/* Subroutine of try_combine. Determine whether the combine replacement
patterns NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to
insn_rtx_cost that the original instruction sequence I1, I2, I3 and
- undobuf.other_insn. Note that I1 and/or NEWI2PAT may be NULL_RTX.
+ undobuf.other_insn. Note that I1 and/or NEWI2PAT may be NULL_RTX.
NEWOTHERPAT and undobuf.other_insn may also both be NULL_RTX. This
function returns false, if the costs of all instructions can be
estimated, and the replacements are more expensive than the original
@@ -912,7 +912,7 @@ create_log_links (void)
register and establishing log links when def is encountered.
Note that we do not clear next_use array in order to save time,
so we have to test whether the use is in the same basic block as def.
-
+
There are a few cases below when we do not consider the definition or
usage -- these are taken from original flow.c did. Don't ask me why it is
done this way; I don't know and if it works, I don't want to know. */
@@ -1367,7 +1367,7 @@ setup_incoming_promotions (rtx first)
mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
- /* The mode and signedness of the argument as it is actually passed,
+ /* The mode and signedness of the argument as it is actually passed,
after any TARGET_PROMOTE_FUNCTION_ARGS-driven ABI promotions. */
mode3 = promote_function_mode (DECL_ARG_TYPE (arg), mode2, &uns3,
TREE_TYPE (cfun->decl), 0);
@@ -3887,7 +3887,7 @@ try_combine (rtx i3, rtx i2, rtx i1, int *new_direct_jump_p)
if (newi2pat && new_i2_notes)
distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX);
-
+
if (new_i3_notes)
distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX);
@@ -4033,7 +4033,7 @@ try_combine (rtx i3, rtx i2, rtx i1, int *new_direct_jump_p)
}
df_insn_rescan (i3);
}
-
+
/* Set new_direct_jump_p if a new return or simple jump instruction
has been created. Adjust the CFG accordingly. */
@@ -4061,7 +4061,7 @@ try_combine (rtx i3, rtx i2, rtx i1, int *new_direct_jump_p)
*new_direct_jump_p = 1;
update_cfg_for_uncondjump (i3);
}
-
+
combine_successes++;
undo_commit ();
@@ -5272,7 +5272,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest)
}
/* Try simplify a*(b/c) as (a*b)/c. */
- if (FLOAT_MODE_P (mode) && flag_associative_math
+ if (FLOAT_MODE_P (mode) && flag_associative_math
&& GET_CODE (XEXP (x, 0)) == DIV)
{
rtx tem = simplify_binary_operation (MULT, mode,
@@ -10175,7 +10175,7 @@ recog_for_combine (rtx *pnewpat, rtx insn, rtx *pnotes)
if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
&& ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
return -1;
- if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
+ if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
{
gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
notes = alloc_reg_note (REG_UNUSED,
diff --git a/gcc/convert.c b/gcc/convert.c
index 453f5ed873c..4fe95ced915 100644
--- a/gcc/convert.c
+++ b/gcc/convert.c
@@ -434,7 +434,7 @@ convert_to_integer (tree type, tree expr)
tree s_intype = TREE_TYPE (s_expr);
const enum built_in_function fcode = builtin_mathfn_code (s_expr);
tree fn = 0;
-
+
switch (fcode)
{
CASE_FLT_FN (BUILT_IN_CEIL):
@@ -494,7 +494,7 @@ convert_to_integer (tree type, tree expr)
default:
break;
}
-
+
if (fn)
{
tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
@@ -515,7 +515,7 @@ convert_to_integer (tree type, tree expr)
tree s_intype = TREE_TYPE (s_expr);
const enum built_in_function fcode = builtin_mathfn_code (s_expr);
tree fn = 0;
-
+
switch (fcode)
{
CASE_FLT_FN (BUILT_IN_LOGB):
diff --git a/gcc/coverage.c b/gcc/coverage.c
index a223bc067e6..e04d22b7a88 100644
--- a/gcc/coverage.c
+++ b/gcc/coverage.c
@@ -201,7 +201,7 @@ read_counts_file (void)
/* Read and discard the stamp. */
gcov_read_unsigned ();
-
+
counts_hash = htab_create (10,
htab_counts_entry_hash, htab_counts_entry_eq,
htab_counts_entry_del);
@@ -779,7 +779,7 @@ build_ctr_info_value (unsigned int counter, tree type)
varpool_finalize_decl (tree_ctr_tables[counter]);
value = tree_cons (fields,
- build1 (ADDR_EXPR, TREE_TYPE (fields),
+ build1 (ADDR_EXPR, TREE_TYPE (fields),
tree_ctr_tables[counter]),
value);
}
@@ -1003,14 +1003,14 @@ coverage_init (const char *filename)
int len = strlen (filename);
/* + 1 for extra '/', in case prefix doesn't end with /. */
int prefix_len;
-
+
if (profile_data_prefix == 0 && filename[0] != '/')
profile_data_prefix = getpwd ();
prefix_len = (profile_data_prefix) ? strlen (profile_data_prefix) + 1 : 0;
/* Name of da file. */
- da_file_name = XNEWVEC (char, len + strlen (GCOV_DATA_SUFFIX)
+ da_file_name = XNEWVEC (char, len + strlen (GCOV_DATA_SUFFIX)
+ prefix_len + 1);
if (profile_data_prefix)
diff --git a/gcc/crtstuff.c b/gcc/crtstuff.c
index 2b6b7799209..c44abb54d04 100644
--- a/gcc/crtstuff.c
+++ b/gcc/crtstuff.c
@@ -135,7 +135,7 @@ call_ ## FUNC (void) \
declaration for functions that we want to have weak references.
Neither way is particularly good. */
-
+
/* References to __register_frame_info and __deregister_frame_info should
be weak in this file if at all possible. */
extern void __register_frame_info (const void *, struct object *)
@@ -254,7 +254,7 @@ void *__dso_handle = 0;
extern void __cxa_finalize (void *) TARGET_ATTRIBUTE_WEAK;
/* Run all the global destructors on exit from the program. */
-
+
/* Some systems place the number of pointers in the first word of the
table. On SVR4 however, that word is -1. In all cases, the table is
null-terminated. On SVR4, we start from the beginning of the list and
@@ -546,7 +546,7 @@ STATIC EH_FRAME_SECTION_CONST int32 __FRAME_END__[]
#ifdef JCR_SECTION_NAME
/* Null terminate the .jcr section array. */
-STATIC void *__JCR_END__[1]
+STATIC void *__JCR_END__[1]
__attribute__ ((unused, section(JCR_SECTION_NAME),
aligned(sizeof(void *))))
= { 0 };
diff --git a/gcc/cse.c b/gcc/cse.c
index 05f6ed6e0fe..484198a9615 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -2237,7 +2237,7 @@ hash_rtx_string (const char *ps)
return hash;
}
-/* Same as hash_rtx, but call CB on each rtx if it is not NULL.
+/* Same as hash_rtx, but call CB on each rtx if it is not NULL.
When the callback returns true, we continue with the new rtx. */
unsigned
@@ -2260,7 +2260,7 @@ hash_rtx_cb (const_rtx x, enum machine_mode mode,
return hash;
/* Invoke the callback first. */
- if (cb != NULL
+ if (cb != NULL
&& ((*cb) (x, mode, &newx, &newmode)))
{
hash += hash_rtx_cb (newx, newmode, do_not_record_p,
@@ -2370,7 +2370,7 @@ hash_rtx_cb (const_rtx x, enum machine_mode mode,
{
elt = CONST_VECTOR_ELT (x, i);
hash += hash_rtx_cb (elt, GET_MODE (elt),
- do_not_record_p, hash_arg_in_memory_p,
+ do_not_record_p, hash_arg_in_memory_p,
have_reg_qty, cb);
}
@@ -2516,7 +2516,7 @@ hash_rtx_cb (const_rtx x, enum machine_mode mode,
x = XEXP (x, i);
goto repeat;
}
-
+
hash += hash_rtx_cb (XEXP (x, i), VOIDmode, do_not_record_p,
hash_arg_in_memory_p,
have_reg_qty, cb);
@@ -2684,8 +2684,8 @@ exp_equiv_p (const_rtx x, const_rtx y, int validate, bool for_gcse)
They could e.g. be two different entities allocated into the
same space on the stack (see e.g. PR25130). In that case, the
MEM addresses can be the same, even though the two MEMs are
- absolutely not equivalent.
-
+ absolutely not equivalent.
+
But because really all MEM attributes should be the same for
equivalent MEMs, we just use the invariant that MEMs that have
the same attributes share the same mem_attrs data structure. */
@@ -3427,7 +3427,7 @@ fold_rtx (rtx x, rtx insn)
constant through simplifications. */
p = lookup (folded_arg0, SAFE_HASH (folded_arg0, mode_arg0),
mode_arg0);
-
+
if (p != NULL)
{
cheapest_simplification = x;
@@ -6001,7 +6001,7 @@ cse_process_notes (rtx x, rtx object, bool *changed)
describe the path.
It is filled with a queue of basic blocks, starting with FIRST_BB
and following a trace through the CFG.
-
+
If all paths starting at FIRST_BB have been followed, or no new path
starting at FIRST_BB can be constructed, this function returns FALSE.
Otherwise, DATA->path is filled and the function returns TRUE indicating
@@ -6017,7 +6017,7 @@ cse_find_path (basic_block first_bb, struct cse_basic_block_data *data,
basic_block bb;
edge e;
int path_size;
-
+
SET_BIT (cse_visited_basic_blocks, first_bb->index);
/* See if there is a previous path. */
@@ -6178,7 +6178,7 @@ cse_prescan_path (struct cse_basic_block_data *data)
int path_entry;
/* Scan to end of each basic block in the path. */
- for (path_entry = 0; path_entry < path_size; path_entry++)
+ for (path_entry = 0; path_entry < path_size; path_entry++)
{
basic_block bb;
rtx insn;
@@ -6784,7 +6784,7 @@ cse_change_cc_mode (rtx *loc, void *data)
&& GET_MODE (*loc) != GET_MODE (args->newreg))
{
validate_change (args->insn, loc, args->newreg, 1);
-
+
return -1;
}
return 0;
@@ -6804,10 +6804,10 @@ cse_change_cc_mode_insn (rtx insn, rtx newreg)
args.insn = insn;
args.newreg = newreg;
-
+
for_each_rtx (&PATTERN (insn), cse_change_cc_mode, &args);
for_each_rtx (&REG_NOTES (insn), cse_change_cc_mode, &args);
-
+
/* If the following assertion was triggered, there is most probably
something wrong with the cc_modes_compatible back end function.
CC modes only can be considered compatible if the insn - with the mode
@@ -6926,7 +6926,7 @@ cse_cc_succs (basic_block bb, basic_block orig_bb, rtx cc_reg, rtx cc_src,
XEXP (SET_SRC (set), 0))
&& rtx_equal_p (XEXP (cc_src, 1),
XEXP (SET_SRC (set), 1)))
-
+
{
comp_mode = targetm.cc_modes_compatible (mode, set_mode);
if (comp_mode != VOIDmode
@@ -7183,8 +7183,8 @@ struct rtl_opt_pass pass_cse =
{
RTL_PASS,
"cse1", /* name */
- gate_handle_cse, /* gate */
- rest_of_handle_cse, /* execute */
+ gate_handle_cse, /* gate */
+ rest_of_handle_cse, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
@@ -7246,8 +7246,8 @@ struct rtl_opt_pass pass_cse2 =
{
RTL_PASS,
"cse2", /* name */
- gate_handle_cse2, /* gate */
- rest_of_handle_cse2, /* execute */
+ gate_handle_cse2, /* gate */
+ rest_of_handle_cse2, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
@@ -7307,8 +7307,8 @@ struct rtl_opt_pass pass_cse_after_global_opts =
{
RTL_PASS,
"cse_local", /* name */
- gate_handle_cse_after_global_opts, /* gate */
- rest_of_handle_cse_after_global_opts, /* execute */
+ gate_handle_cse_after_global_opts, /* gate */
+ rest_of_handle_cse_after_global_opts, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
diff --git a/gcc/cselib.c b/gcc/cselib.c
index 0aa22a4fe1b..66d92a01ad0 100644
--- a/gcc/cselib.c
+++ b/gcc/cselib.c
@@ -278,7 +278,7 @@ entry_and_rtx_equal_p (const void *entry, const void *x_arg)
gcc_assert (!CONST_INT_P (x) && GET_CODE (x) != CONST_FIXED
&& (mode != VOIDmode || GET_CODE (x) != CONST_DOUBLE));
-
+
if (mode != GET_MODE (v->val_rtx))
return 0;
@@ -815,10 +815,10 @@ cselib_hash_rtx (rtx x, int create)
{
rtx tem = XEXP (x, i);
unsigned int tem_hash = cselib_hash_rtx (tem, create);
-
+
if (tem_hash == 0)
return 0;
-
+
hash += tem_hash;
}
break;
@@ -827,10 +827,10 @@ cselib_hash_rtx (rtx x, int create)
{
unsigned int tem_hash
= cselib_hash_rtx (XVECEXP (x, i, j), create);
-
+
if (tem_hash == 0)
return 0;
-
+
hash += tem_hash;
}
break;
@@ -838,13 +838,13 @@ cselib_hash_rtx (rtx x, int create)
case 's':
{
const unsigned char *p = (const unsigned char *) XSTR (x, i);
-
+
if (p)
while (*p)
hash += *p++;
break;
}
-
+
case 'i':
hash += XINT (x, i);
break;
@@ -853,7 +853,7 @@ cselib_hash_rtx (rtx x, int create)
case 't':
/* unused */
break;
-
+
default:
gcc_unreachable ();
}
@@ -971,7 +971,7 @@ cselib_lookup_mem (rtx x, int create)
non-reg results, we just take the first one because they will all
expand to the same place. */
-static rtx
+static rtx
expand_loc (struct elt_loc_list *p, struct expand_value_data *evd,
int max_depth)
{
@@ -983,8 +983,8 @@ expand_loc (struct elt_loc_list *p, struct expand_value_data *evd,
{
/* Avoid infinite recursion trying to expand a reg into a
the same reg. */
- if ((REG_P (p->loc))
- && (REGNO (p->loc) < regno)
+ if ((REG_P (p->loc))
+ && (REGNO (p->loc) < regno)
&& !bitmap_bit_p (evd->regs_active, REGNO (p->loc)))
{
reg_result = p->loc;
@@ -992,7 +992,7 @@ expand_loc (struct elt_loc_list *p, struct expand_value_data *evd,
}
/* Avoid infinite recursion and do not try to expand the
value. */
- else if (GET_CODE (p->loc) == VALUE
+ else if (GET_CODE (p->loc) == VALUE
&& CSELIB_VAL_PTR (p->loc)->locs == p_in)
continue;
else if (!REG_P (p->loc))
@@ -1013,9 +1013,9 @@ expand_loc (struct elt_loc_list *p, struct expand_value_data *evd,
if (result)
return result;
}
-
+
}
-
+
if (regno != UINT_MAX)
{
rtx result;
@@ -1034,7 +1034,7 @@ expand_loc (struct elt_loc_list *p, struct expand_value_data *evd,
print_inline_rtx (dump_file, reg_result, 0);
fprintf (dump_file, "\n");
}
- else
+ else
fprintf (dump_file, "NULL\n");
}
return reg_result;
@@ -1045,7 +1045,7 @@ expand_loc (struct elt_loc_list *p, struct expand_value_data *evd,
This is the opposite of common subexpression. Because local value
numbering is such a weak optimization, the expanded expression is
pretty much unique (not from a pointer equals point of view but
- from a tree shape point of view.
+ from a tree shape point of view.
This function returns NULL if the expansion fails. The expansion
will fail if there is no value number for one of the operands or if
@@ -1124,7 +1124,7 @@ cselib_expand_value_rtx_1 (rtx orig, struct expand_value_data *evd,
{
rtx result;
int regno = REGNO (orig);
-
+
/* The only thing that we are not willing to do (this
is requirement of dse and if others potential uses
need this function we should add a parm to control
@@ -1156,11 +1156,11 @@ cselib_expand_value_rtx_1 (rtx orig, struct expand_value_data *evd,
if (result)
return result;
- else
+ else
return orig;
}
}
-
+
case CONST_INT:
case CONST_DOUBLE:
case CONST_VECTOR:
@@ -1979,7 +1979,7 @@ cselib_process_insn (rtx insn)
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (call_used_regs[i]
|| (REG_VALUES (i) && REG_VALUES (i)->elt
- && HARD_REGNO_CALL_PART_CLOBBERED (i,
+ && HARD_REGNO_CALL_PART_CLOBBERED (i,
GET_MODE (REG_VALUES (i)->elt->val_rtx))))
cselib_invalidate_regno (i, reg_raw_mode[i]);
@@ -2025,11 +2025,11 @@ cselib_process_insn (rtx insn)
void
cselib_init (bool record_memory)
{
- elt_list_pool = create_alloc_pool ("elt_list",
+ elt_list_pool = create_alloc_pool ("elt_list",
sizeof (struct elt_list), 10);
- elt_loc_list_pool = create_alloc_pool ("elt_loc_list",
+ elt_loc_list_pool = create_alloc_pool ("elt_loc_list",
sizeof (struct elt_loc_list), 10);
- cselib_val_pool = create_alloc_pool ("cselib_val_list",
+ cselib_val_pool = create_alloc_pool ("cselib_val_list",
sizeof (cselib_val), 10);
value_pool = create_alloc_pool ("value", RTX_CODE_SIZE (VALUE), 100);
cselib_record_memory = record_memory;
diff --git a/gcc/dbgcnt.c b/gcc/dbgcnt.c
index 3fe34854f06..6c3bd9192a8 100644
--- a/gcc/dbgcnt.c
+++ b/gcc/dbgcnt.c
@@ -15,7 +15,7 @@ for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>.
+<http://www.gnu.org/licenses/>.
See dbgcnt.def for usage information. */
@@ -62,7 +62,7 @@ dbg_cnt (enum debug_counter index)
{
count[index]++;
if (dump_file && count[index] == limit[index])
- fprintf (dump_file, "***dbgcnt: limit reached for %s.***\n",
+ fprintf (dump_file, "***dbgcnt: limit reached for %s.***\n",
map[index].name);
return dbg_cnt_is_enabled (index);
@@ -104,7 +104,7 @@ dbg_cnt_process_single_pair (const char *arg)
const char *colon = strchr (arg, ':');
char *endptr = NULL;
int value;
-
+
if (colon == NULL)
return NULL;
@@ -113,7 +113,7 @@ dbg_cnt_process_single_pair (const char *arg)
if (endptr != NULL && endptr != colon + 1
&& dbg_cnt_set_limit_by_name (arg, colon - arg, value))
return endptr;
-
+
return NULL;
}
@@ -140,7 +140,7 @@ dbg_cnt_process_opt (const char *arg)
/* Print name, limit and count of all counters. */
-void
+void
dbg_cnt_list_all_counters (void)
{
int i;
diff --git a/gcc/dbgcnt.def b/gcc/dbgcnt.def
index 82dd988a86c..78553a41d6e 100644
--- a/gcc/dbgcnt.def
+++ b/gcc/dbgcnt.def
@@ -71,7 +71,7 @@ along with GCC; see the file COPYING3. If not see
assuming that the following script is called binarySearch,
the command:
- binarySearch tryTest
+ binarySearch tryTest
will automatically find the highest value of the counter for which
the program fails. If tryTest never fails, binarySearch will
@@ -82,7 +82,7 @@ along with GCC; see the file COPYING3. If not see
dump_file of the form:
***dbgcnt: limit reached for %s.***
-
+
Assuming that the dump file is logging the analysis/transformations
it is making, this pinpoints the exact position in the log file
where the problem transformation is being logged.
diff --git a/gcc/dbgcnt.h b/gcc/dbgcnt.h
index 8be73763009..354e88a48ea 100644
--- a/gcc/dbgcnt.h
+++ b/gcc/dbgcnt.h
@@ -15,7 +15,7 @@ for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>.
+<http://www.gnu.org/licenses/>.
See dbgcnt.def for usage information. */
diff --git a/gcc/dbxout.c b/gcc/dbxout.c
index a7bae6003f6..a314e7b4430 100644
--- a/gcc/dbxout.c
+++ b/gcc/dbxout.c
@@ -216,8 +216,8 @@ struct dbx_file
struct dbx_file *prev; /* Chain to traverse all pending bincls. */
};
-/* This is the top of the stack.
-
+/* This is the top of the stack.
+
This is not saved for PCH, because restoring a PCH should not change it.
next_file_number does have to be saved, because the PCH may use some
file numbers; however, just before restoring a PCH, next_file_number
@@ -306,7 +306,7 @@ static void emit_pending_bincls (void);
static inline void emit_pending_bincls_if_required (void);
static void dbxout_init (const char *);
-
+
static void dbxout_finish (const char *);
static void dbxout_start_source_file (unsigned, const char *);
static void dbxout_end_source_file (unsigned);
@@ -848,7 +848,7 @@ do { \
SYM is the DECL of the symbol under consideration; it is used only
for its DECL_SOURCE_LINE. The other arguments are all passed directly
to DBX_FINISH_STABS; see above for details. */
-
+
static void
dbxout_finish_complex_stabs (tree sym, stab_code_type code,
rtx addr, const char *label, int number)
@@ -902,7 +902,7 @@ dbxout_finish_complex_stabs (tree sym, stab_code_type code,
obstack_grow (&stabstr_ob, "\",", 2);
len = obstack_object_size (&stabstr_ob);
str = XOBFINISH (&stabstr_ob, char *);
-
+
fwrite (str, 1, len, asm_out_file);
DBX_FINISH_STABS (sym, code, line, addr, label, number);
}
@@ -919,7 +919,7 @@ dbxout_function_end (tree decl ATTRIBUTE_UNUSED)
/* The Lscope label must be emitted even if we aren't doing anything
else; dbxout_block needs it. */
switch_to_section (function_section (current_function_decl));
-
+
/* Convert Lscope into the appropriate format for local labels in case
the system doesn't insert underscores in front of user generated
labels. */
@@ -939,10 +939,10 @@ dbxout_function_end (tree decl ATTRIBUTE_UNUSED)
if (flag_reorder_blocks_and_partition)
{
dbxout_begin_empty_stabs (N_FUN);
- dbxout_stab_value_label_diff (crtl->subsections.hot_section_end_label,
+ dbxout_stab_value_label_diff (crtl->subsections.hot_section_end_label,
crtl->subsections.hot_section_label);
dbxout_begin_empty_stabs (N_FUN);
- dbxout_stab_value_label_diff (crtl->subsections.cold_section_end_label,
+ dbxout_stab_value_label_diff (crtl->subsections.cold_section_end_label,
crtl->subsections.cold_section_label);
}
else
@@ -1178,7 +1178,7 @@ dbxout_start_source_file (unsigned int line ATTRIBUTE_UNUSED,
n->next = current_file;
n->next_type_number = 1;
- /* Do not assign file number now.
+ /* Do not assign file number now.
Delay it until we actually emit BINCL. */
n->file_number = 0;
n->prev = NULL;
@@ -1259,7 +1259,7 @@ dbxout_source_file (const char *filename)
}
}
-/* Output N_BNSYM, line number symbol entry, and local symbol at
+/* Output N_BNSYM, line number symbol entry, and local symbol at
function scope */
static void
@@ -1275,7 +1275,7 @@ dbxout_begin_prologue (unsigned int lineno, const char *filename)
scope_labelno++;
dbxout_source_line (lineno, filename, 0, true);
- /* Output function begin block at function scope, referenced
+ /* Output function begin block at function scope, referenced
by dbxout_block, dbxout_source_line and dbxout_function_end. */
emit_pending_bincls_if_required ();
targetm.asm_out.internal_label (asm_out_file, "LFBB", scope_labelno);
@@ -1299,7 +1299,7 @@ dbxout_source_line (unsigned int lineno, const char *filename,
char begin_label[20];
dbxout_begin_stabn_sline (lineno);
/* Reference current function start using LFBB. */
- ASM_GENERATE_INTERNAL_LABEL (begin_label, "LFBB", scope_labelno);
+ ASM_GENERATE_INTERNAL_LABEL (begin_label, "LFBB", scope_labelno);
dbxout_stab_value_internal_label_diff ("LM", &dbxout_source_line_counter,
begin_label);
}
@@ -2093,7 +2093,7 @@ dbxout_type (tree type, int full)
int i;
tree child;
VEC(tree,gc) *accesses = BINFO_BASE_ACCESSES (binfo);
-
+
if (use_gnu_debug_info_extensions)
{
if (BINFO_N_BASE_BINFOS (binfo))
@@ -2320,7 +2320,7 @@ static void
dbxout_type_name (tree type)
{
tree t = TYPE_NAME (type);
-
+
gcc_assert (t);
switch (TREE_CODE (t))
{
@@ -3138,7 +3138,7 @@ dbxout_symbol_name (tree decl, const char *suffix, int letter)
{
tree name;
- if (DECL_CONTEXT (decl)
+ if (DECL_CONTEXT (decl)
&& (TYPE_P (DECL_CONTEXT (decl))
|| TREE_CODE (DECL_CONTEXT (decl)) == NAMESPACE_DECL))
/* One slight hitch: if this is a VAR_DECL which is a class member
@@ -3190,12 +3190,12 @@ dbxout_common_check (tree decl, int *value)
rtx home;
rtx sym_addr;
const char *name = NULL;
-
+
/* If the decl isn't a VAR_DECL, or if it isn't static, or if
it does not have a value (the offset into the common area), or if it
is thread local (as opposed to global) then it isn't common, and shouldn't
be handled as such.
-
+
??? DECL_THREAD_LOCAL_P check prevents problems with improper .stabs
for thread-local symbols. Can be handled via same mechanism as used
in dwarf2out.c. */
@@ -3206,7 +3206,7 @@ dbxout_common_check (tree decl, int *value)
|| !is_fortran ())
return NULL;
- home = DECL_RTL (decl);
+ home = DECL_RTL (decl);
if (home == NULL_RTX || GET_CODE (home) != MEM)
return NULL;
@@ -3431,7 +3431,7 @@ dbxout_parms (tree parms)
that it was actually passed by invisible reference. */
code = DBX_REGPARM_STABS_CODE;
-
+
/* GDB likes this marked with a special letter. */
letter = (use_gnu_debug_info_extensions
? 'a' : DBX_REGPARM_STABS_LETTER);
@@ -3502,7 +3502,7 @@ dbxout_parms (tree parms)
continue;
dbxout_begin_complex_stabs ();
-
+
if (DECL_NAME (parms))
{
stabstr_I (DECL_NAME (parms));
diff --git a/gcc/dce.c b/gcc/dce.c
index a2c6973f53f..855bef32237 100644
--- a/gcc/dce.c
+++ b/gcc/dce.c
@@ -622,7 +622,7 @@ mark_artificial_uses (void)
FOR_ALL_BB (bb)
{
- for (use_rec = df_get_artificial_uses (bb->index);
+ for (use_rec = df_get_artificial_uses (bb->index);
*use_rec; use_rec++)
for (defs = DF_REF_CHAIN (*use_rec); defs; defs = defs->next)
if (! DF_REF_IS_ARTIFICIAL (defs->ref))
@@ -825,7 +825,7 @@ byte_dce_process_block (basic_block bb, bool redo_out, bitmap au)
mark_insn (insn, true);
goto quickexit;
}
-
+
last = start + len;
while (start < last)
if (bitmap_bit_p (local_live, start++))
@@ -834,9 +834,9 @@ byte_dce_process_block (basic_block bb, bool redo_out, bitmap au)
goto quickexit;
}
}
-
- quickexit:
-
+
+ quickexit:
+
/* No matter if the instruction is needed or not, we remove
any regno in the defs from the live set. */
df_byte_lr_simulate_defs (insn, local_live);
@@ -848,12 +848,12 @@ byte_dce_process_block (basic_block bb, bool redo_out, bitmap au)
if (dump_file)
{
- fprintf (dump_file, "finished processing insn %d live out = ",
+ fprintf (dump_file, "finished processing insn %d live out = ",
INSN_UID (insn));
df_print_byte_regset (dump_file, local_live);
}
}
-
+
df_byte_lr_simulate_artificial_refs_at_top (bb, local_live);
block_changed = !bitmap_equal_p (local_live, DF_BYTE_LR_IN (bb));
@@ -913,10 +913,10 @@ dce_process_block (basic_block bb, bool redo_out, bitmap au)
needed = true;
break;
}
-
+
if (needed)
mark_insn (insn, true);
-
+
/* No matter if the instruction is needed or not, we remove
any regno in the defs from the live set. */
df_simulate_defs (insn, local_live);
@@ -926,7 +926,7 @@ dce_process_block (basic_block bb, bool redo_out, bitmap au)
if (marked_insn_p (insn))
df_simulate_uses (insn, local_live);
}
-
+
df_simulate_finalize_backwards (bb, local_live);
block_changed = !bitmap_equal_p (local_live, DF_LR_IN (bb));
@@ -986,15 +986,15 @@ fast_dce (bool byte_level)
}
if (byte_level)
- local_changed
+ local_changed
= byte_dce_process_block (bb, bitmap_bit_p (redo_out, index),
bb_has_eh_pred (bb) ? au_eh : au);
else
- local_changed
+ local_changed
= dce_process_block (bb, bitmap_bit_p (redo_out, index),
bb_has_eh_pred (bb) ? au_eh : au);
bitmap_set_bit (processed, index);
-
+
if (local_changed)
{
edge e;
@@ -1010,7 +1010,7 @@ fast_dce (bool byte_level)
bitmap_set_bit (redo_out, e->src->index);
}
}
-
+
if (global_changed)
{
/* Turn off the RUN_DCE flag to prevent recursive calls to
@@ -1023,11 +1023,11 @@ fast_dce (bool byte_level)
sbitmap_zero (marked);
bitmap_clear (processed);
bitmap_clear (redo_out);
-
+
/* We do not need to rescan any instructions. We only need
to redo the dataflow equations for the blocks that had a
change at the top of the block. Then we need to redo the
- iteration. */
+ iteration. */
if (byte_level)
df_analyze_problem (df_byte_lr, all_blocks, postorder, n_blocks);
else
diff --git a/gcc/ddg.c b/gcc/ddg.c
index 0c54f8079fb..23fa6fa2aeb 100644
--- a/gcc/ddg.c
+++ b/gcc/ddg.c
@@ -355,7 +355,7 @@ add_inter_loop_mem_dep (ddg_ptr g, ddg_node_ptr from, ddg_node_ptr to)
if (!insn_alias_sets_conflict_p (from->insn, to->insn))
/* Do not create edge if memory references have disjoint alias sets. */
return;
-
+
if (mem_write_insn_p (from->insn))
{
if (mem_read_insn_p (to->insn))
@@ -523,10 +523,10 @@ create_ddg (basic_block bb, int closing_branch_deps)
g->nodes[i++].insn = insn;
first_note = NULL_RTX;
}
-
+
/* We must have found a branch in DDG. */
gcc_assert (g->closing_branch);
-
+
/* Build the data dependency graph. */
build_intra_loop_deps (g);
@@ -869,9 +869,9 @@ static int
compare_sccs (const void *s1, const void *s2)
{
const int rec_l1 = (*(const ddg_scc_ptr *)s1)->recurrence_length;
- const int rec_l2 = (*(const ddg_scc_ptr *)s2)->recurrence_length;
+ const int rec_l2 = (*(const ddg_scc_ptr *)s2)->recurrence_length;
return ((rec_l2 > rec_l1) - (rec_l2 < rec_l1));
-
+
}
/* Order the backarcs in descending recMII order using compare_sccs. */
diff --git a/gcc/ddg.h b/gcc/ddg.h
index fbe2988606c..e17e534af4d 100644
--- a/gcc/ddg.h
+++ b/gcc/ddg.h
@@ -27,7 +27,7 @@ along with GCC; see the file COPYING3. If not see
/* For basic_block. */
#include "basic-block.h"
#include "df.h"
-
+
typedef struct ddg_node *ddg_node_ptr;
typedef struct ddg_edge *ddg_edge_ptr;
typedef struct ddg *ddg_ptr;
diff --git a/gcc/defaults.h b/gcc/defaults.h
index 182de95685c..0363a460e06 100644
--- a/gcc/defaults.h
+++ b/gcc/defaults.h
@@ -460,11 +460,11 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#ifndef DECIMAL32_TYPE_SIZE
#define DECIMAL32_TYPE_SIZE 32
-#endif
+#endif
-#ifndef DECIMAL64_TYPE_SIZE
+#ifndef DECIMAL64_TYPE_SIZE
#define DECIMAL64_TYPE_SIZE 64
-#endif
+#endif
#ifndef DECIMAL128_TYPE_SIZE
#define DECIMAL128_TYPE_SIZE 128
diff --git a/gcc/df-byte-scan.c b/gcc/df-byte-scan.c
index 7e4db4b35bc..8271465041e 100644
--- a/gcc/df-byte-scan.c
+++ b/gcc/df-byte-scan.c
@@ -43,12 +43,12 @@ along with GCC; see the file COPYING3. If not see
/* Helper for df_compute_accessed_bytes. Ref is some sort of extract.
Return true if this effects the entire reg in REF. Return false if
otherwise and set START_BYTE and LAST_BYTE. See the description of
- df_compute_accessed_bytes for a description of MM. */
+ df_compute_accessed_bytes for a description of MM. */
-static bool
+static bool
df_compute_accessed_bytes_extract (df_ref ref,
enum df_mm mm ,
- unsigned int *start_byte,
+ unsigned int *start_byte,
unsigned int *last_byte)
{
int start;
@@ -61,12 +61,12 @@ df_compute_accessed_bytes_extract (df_ref ref,
/* (*_extract:M1 (reg:M2 X) WIDTH POS)
(*_extract:M1 (subreg:M1 (reg:M2 X N) WIDTH POS)
-
+
This is a bitfield extraction. The assignment clobbers/extracts
exactly the bits named by WIDTH and POS and does not affect the
other bits in register X. It is also technically possible that
the bits asked for are longer than units per word. */
-
+
int offset = DF_REF_EXTRACT_OFFSET (ref);
int width = DF_REF_EXTRACT_WIDTH (ref);
@@ -134,9 +134,9 @@ df_compute_accessed_bytes_extract (df_ref ref,
last = m2_size;
if (dump_file)
- fprintf (dump_file, " cpb extract regno=%d start=%d last=%d\n",
+ fprintf (dump_file, " cpb extract regno=%d start=%d last=%d\n",
DF_REF_REGNO (ref), start, last);
-
+
*start_byte = start;
*last_byte = last;
return false;
@@ -145,11 +145,11 @@ df_compute_accessed_bytes_extract (df_ref ref,
/* Helper for df_compute_accessed_bytes. Ref is a strict_low_part.
Return true if this effects the entire reg in REF. Return false if
- otherwise and set START_BYTE and LAST_BYTE. */
+ otherwise and set START_BYTE and LAST_BYTE. */
-static bool
-df_compute_accessed_bytes_strict_low_part (df_ref ref,
- unsigned int *start_byte,
+static bool
+df_compute_accessed_bytes_strict_low_part (df_ref ref,
+ unsigned int *start_byte,
unsigned int *last_byte)
{
int start;
@@ -177,17 +177,17 @@ df_compute_accessed_bytes_strict_low_part (df_ref ref,
gcc_assert (m1_size <= m2_size);
/* (set (strict_low_part (subreg:M1 (reg:M2 X) N)) ...)
-
+
This is a bitfield insertion. The assignment clobbers exactly the
bits named by the subreg--the M1 bits at position N. It is also
technically possible that the bits asked for are longer than units
per word. */
-
+
start = offset;
last = offset + m1_size;
if (dump_file)
- fprintf (dump_file, " cpb strict low part regno=%d start=%d last=%d\n",
+ fprintf (dump_file, " cpb strict low part regno=%d start=%d last=%d\n",
DF_REF_REGNO (ref), start, last);
*start_byte = start;
@@ -197,10 +197,10 @@ df_compute_accessed_bytes_strict_low_part (df_ref ref,
/* Helper for df_compute_accessed_bytes. Ref is a naked subreg.
Return true if this effects the entire reg in REF. Return false if
- otherwise and set START_BYTE and LAST_BYTE. */
+ otherwise and set START_BYTE and LAST_BYTE. */
-static bool
-df_compute_accessed_bytes_subreg (df_ref ref, unsigned int *start_byte,
+static bool
+df_compute_accessed_bytes_subreg (df_ref ref, unsigned int *start_byte,
unsigned int *last_byte)
{
@@ -231,7 +231,7 @@ df_compute_accessed_bytes_subreg (df_ref ref, unsigned int *start_byte,
/* Defs and uses are different in the amount of the reg that touch. */
if (DF_REF_REG_DEF_P (ref))
{
- /* This is an lvalue. */
+ /* This is an lvalue. */
if (m2_size > UNITS_PER_WORD)
{
@@ -239,23 +239,23 @@ df_compute_accessed_bytes_subreg (df_ref ref, unsigned int *start_byte,
Look at the bytes named by the subreg, and expand it to
cover a UNITS_PER_WORD part of register X. That part of
register X is clobbered, the rest is not.
-
+
E.g., (subreg:SI (reg:DI X) 0), where UNITS_PER_WORD is the
size of SImode, clobbers the first SImode part of X, and does
not affect the second SImode part.
-
+
E.g., (subreg:QI (reg:DI X) 0), where UNITS_PER_WORD is the
size of SImode, clobbers the first SImode part of X, and does
not affect the second SImode part. Here the QImode byte is
expanded to a UNITS_PER_WORD portion of the register for
purposes of determining what is clobbered.
-
+
If this is an rvalue, then it touches just the bytes that it
talks about. */
int offset = SUBREG_BYTE (reg);
-
+
start = offset & ~(UNITS_PER_WORD - 1);
- last = (offset + m1_size + UNITS_PER_WORD - 1)
+ last = (offset + m1_size + UNITS_PER_WORD - 1)
& ~(UNITS_PER_WORD - 1);
}
else
@@ -264,7 +264,7 @@ df_compute_accessed_bytes_subreg (df_ref ref, unsigned int *start_byte,
X. */
return true;
}
- else
+ else
{
/* This is an rvalue. It touches just the bytes they explicitly
mentioned. */
@@ -272,9 +272,9 @@ df_compute_accessed_bytes_subreg (df_ref ref, unsigned int *start_byte,
start = offset;
last = start + m1_size;
}
-
+
if (dump_file)
- fprintf (dump_file, " cpb subreg regno=%d start=%d last=%d\n",
+ fprintf (dump_file, " cpb subreg regno=%d start=%d last=%d\n",
DF_REF_REGNO (ref), start, last);
*start_byte = start;
@@ -299,15 +299,15 @@ df_compute_accessed_bytes_subreg (df_ref ref, unsigned int *start_byte,
in, START_BYTE and LAST_BYTE are set to 0 and false is returned.
This means that this use can be ignored. */
-bool
-df_compute_accessed_bytes (df_ref ref, enum df_mm mm,
- unsigned int *start_byte,
+bool
+df_compute_accessed_bytes (df_ref ref, enum df_mm mm,
+ unsigned int *start_byte,
unsigned int *last_byte)
{
if (!dbg_cnt (df_byte_scan))
return true;
- if (!DF_REF_REG_DEF_P (ref)
+ if (!DF_REF_REG_DEF_P (ref)
&& DF_REF_FLAGS_IS_SET (ref, DF_REF_READ_WRITE))
{
if (DF_REF_FLAGS_IS_SET (ref, DF_REF_PRE_POST_MODIFY))
@@ -331,7 +331,7 @@ df_compute_accessed_bytes (df_ref ref, enum df_mm mm,
if (DF_REF_FLAGS_IS_SET (ref, DF_REF_SIGN_EXTRACT | DF_REF_ZERO_EXTRACT))
return df_compute_accessed_bytes_extract (ref, mm, start_byte, last_byte);
else if (DF_REF_FLAGS_IS_SET (ref, DF_REF_STRICT_LOW_PART))
- return df_compute_accessed_bytes_strict_low_part (ref,
+ return df_compute_accessed_bytes_strict_low_part (ref,
start_byte, last_byte);
else if (GET_CODE (DF_REF_REG (ref)) == SUBREG)
return df_compute_accessed_bytes_subreg (ref, start_byte, last_byte);
diff --git a/gcc/df-core.c b/gcc/df-core.c
index a53e7091d95..5ec802d0f8b 100644
--- a/gcc/df-core.c
+++ b/gcc/df-core.c
@@ -1,7 +1,7 @@
/* Allocation for dataflow support routines.
Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
2008, 2009 Free Software Foundation, Inc.
- Originally contributed by Michael P. Hayes
+ Originally contributed by Michael P. Hayes
(m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
and Kenneth Zadeck (zadeck@naturalbridge.com).
@@ -43,7 +43,7 @@ There are three variations of the live variable problem that are
available whenever dataflow is available. The LR problem finds the
areas that can reach a use of a variable, the UR problems finds the
areas that can be reached from a definition of a variable. The LIVE
-problem finds the intersection of these two areas.
+problem finds the intersection of these two areas.
There are several optional problems. These can be enabled when they
are needed and disabled when they are not needed.
@@ -59,7 +59,7 @@ section.
In the middle layer, basic blocks are scanned to produce transfer
functions which describe the effects of that block on the global
dataflow solution. The transfer functions are only rebuilt if the
-some instruction within the block has changed.
+some instruction within the block has changed.
The top layer is the dataflow solution itself. The dataflow solution
is computed by using an efficient iterative solver and the transfer
@@ -200,7 +200,7 @@ There are four ways of doing the incremental scanning:
4) Do it yourself - In this mechanism, the pass updates the insns
itself using the low level df primitives. Currently no pass does
this, but it has the advantage that it is quite efficient given
- that the pass generally has exact knowledge of what it is changing.
+ that the pass generally has exact knowledge of what it is changing.
DATA STRUCTURES
@@ -247,7 +247,7 @@ to the engine that resolves the dataflow equations.
DATA STRUCTURES:
-The basic object is a DF_REF (reference) and this may either be a
+The basic object is a DF_REF (reference) and this may either be a
DEF (definition) or a USE of a register.
These are linked into a variety of lists; namely reg-def, reg-use,
@@ -291,12 +291,12 @@ There are 4 ways to obtain access to refs:
1) References are divided into two categories, REAL and ARTIFICIAL.
- REAL refs are associated with instructions.
+ REAL refs are associated with instructions.
ARTIFICIAL refs are associated with basic blocks. The heads of
these lists can be accessed by calling df_get_artificial_defs or
- df_get_artificial_uses for the particular basic block.
-
+ df_get_artificial_uses for the particular basic block.
+
Artificial defs and uses occur both at the beginning and ends of blocks.
For blocks that area at the destination of eh edges, the
@@ -315,14 +315,14 @@ There are 4 ways to obtain access to refs:
Artificial defs occur at the end of the entry block. These arise
from registers that are live at entry to the function.
-2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
+2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
uses that appear inside a REG_EQUAL or REG_EQUIV note.)
All of the eq_uses, uses and defs associated with each pseudo or
hard register may be linked in a bidirectional chain. These are
called reg-use or reg_def chains. If the changeable flag
DF_EQ_NOTES is set when the chains are built, the eq_uses will be
- treated like uses. If it is not set they are ignored.
+ treated like uses. If it is not set they are ignored.
The first use, eq_use or def for a register can be obtained using
the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
@@ -347,14 +347,14 @@ There are 4 ways to obtain access to refs:
has been set the array will contain the eq_uses. Otherwise these
are ignored when building the array and assigning the ids. Note
that the values in the id field of a ref may change across calls to
- df_analyze or df_reorganize_defs or df_reorganize_uses.
+ df_analyze or df_reorganize_defs or df_reorganize_uses.
If the only use of this array is to find all of the refs, it is
better to traverse all of the registers and then traverse all of
reg-use or reg-def chains.
NOTES:
-
+
Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
both a use and a def. These are both marked read/write to show that they
are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
@@ -371,7 +371,7 @@ operation. We generate both a use and a def and again mark them
read/write.
Paradoxical subreg writes do not leave a trace of the old content, so they
-are write-only operations.
+are write-only operations.
*/
@@ -448,7 +448,7 @@ df_add_problem (struct df_problem *problem)
However for this to work, the computation of RI must be pushed
after which ever of those problems is defined, but we do not
require any of those except for LR to have actually been
- defined. */
+ defined. */
df->num_problems_defined++;
for (i = df->num_problems_defined - 2; i >= 0; i--)
{
@@ -492,7 +492,7 @@ df_clear_flags (int changeable_flags)
not called or is called with null, the entire function in
analyzed. */
-void
+void
df_set_blocks (bitmap blocks)
{
if (blocks)
@@ -515,7 +515,7 @@ df_set_blocks (bitmap blocks)
{
bitmap_iterator bi;
unsigned int bb_index;
-
+
EXECUTE_IF_SET_IN_BITMAP (diff, 0, bb_index, bi)
{
basic_block bb = BASIC_BLOCK (bb_index);
@@ -552,7 +552,7 @@ df_set_blocks (bitmap blocks)
BITMAP_ALLOC (&df_bitmap_obstack);
FOR_ALL_BB(bb)
{
- bitmap_set_bit (blocks_to_reset, bb->index);
+ bitmap_set_bit (blocks_to_reset, bb->index);
}
}
dflow->problem->reset_fun (blocks_to_reset);
@@ -729,7 +729,7 @@ rest_of_handle_df_initialize (void)
gcc_assert (df->n_blocks == df->n_blocks_inverted);
df->hard_regs_live_count = XNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
- memset (df->hard_regs_live_count, 0,
+ memset (df->hard_regs_live_count, 0,
sizeof (unsigned int) * FIRST_PSEUDO_REGISTER);
df_hard_reg_init ();
@@ -809,7 +809,7 @@ rest_of_handle_df_finish (void)
for (i = 0; i < df->num_problems_defined; i++)
{
struct dataflow *dflow = df->problems_in_order[i];
- dflow->problem->free_fun ();
+ dflow->problem->free_fun ();
}
if (df->postorder)
@@ -854,7 +854,7 @@ struct rtl_opt_pass pass_df_finish =
/* Helper function for df_worklist_dataflow.
- Propagate the dataflow forward.
+ Propagate the dataflow forward.
Given a BB_INDEX, do the dataflow propagation
and set bits on for successors in PENDING
if the out set of the dataflow has changed. */
@@ -873,16 +873,16 @@ df_worklist_propagate_forward (struct dataflow *dataflow,
/* Calculate <conf_op> of incoming edges. */
if (EDGE_COUNT (bb->preds) > 0)
FOR_EACH_EDGE (e, ei, bb->preds)
- {
- if (TEST_BIT (considered, e->src->index))
+ {
+ if (TEST_BIT (considered, e->src->index))
dataflow->problem->con_fun_n (e);
- }
+ }
else if (dataflow->problem->con_fun_0)
dataflow->problem->con_fun_0 (bb);
if (dataflow->problem->trans_fun (bb_index))
{
- /* The out set of this block has changed.
+ /* The out set of this block has changed.
Propagate to the outgoing blocks. */
FOR_EACH_EDGE (e, ei, bb->succs)
{
@@ -912,16 +912,16 @@ df_worklist_propagate_backward (struct dataflow *dataflow,
/* Calculate <conf_op> of incoming edges. */
if (EDGE_COUNT (bb->succs) > 0)
FOR_EACH_EDGE (e, ei, bb->succs)
- {
- if (TEST_BIT (considered, e->dest->index))
+ {
+ if (TEST_BIT (considered, e->dest->index))
dataflow->problem->con_fun_n (e);
- }
+ }
else if (dataflow->problem->con_fun_0)
dataflow->problem->con_fun_0 (bb);
if (dataflow->problem->trans_fun (bb_index))
{
- /* The out set of this block has changed.
+ /* The out set of this block has changed.
Propagate to the outgoing blocks. */
FOR_EACH_EDGE (e, ei, bb->preds)
{
@@ -937,7 +937,7 @@ df_worklist_propagate_backward (struct dataflow *dataflow,
/* This will free "pending". */
-static void
+static void
df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
bitmap pending,
sbitmap considered,
@@ -972,7 +972,7 @@ df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
df_worklist_propagate_forward (dataflow, bb_index,
bbindex_to_postorder,
pending, considered);
- else
+ else
df_worklist_propagate_backward (dataflow, bb_index,
bbindex_to_postorder,
pending, considered);
@@ -993,13 +993,13 @@ df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
}
/* Worklist-based dataflow solver. It uses sbitmap as a worklist,
- with "n"-th bit representing the n-th block in the reverse-postorder order.
+ with "n"-th bit representing the n-th block in the reverse-postorder order.
The solver is a double-queue algorithm similar to the "double stack" solver
from Cooper, Harvey and Kennedy, "Iterative data-flow analysis, Revisited".
The only significant difference is that the worklist in this implementation
is always sorted in RPO of the CFG visiting direction. */
-void
+void
df_worklist_dataflow (struct dataflow *dataflow,
bitmap blocks_to_consider,
int *blocks_in_postorder,
@@ -1069,16 +1069,16 @@ df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
}
-/* Execute dataflow analysis on a single dataflow problem.
+/* Execute dataflow analysis on a single dataflow problem.
BLOCKS_TO_CONSIDER are the blocks whose solution can either be
examined or will be computed. For calls from DF_ANALYZE, this is
- the set of blocks that has been passed to DF_SET_BLOCKS.
+ the set of blocks that has been passed to DF_SET_BLOCKS.
*/
void
-df_analyze_problem (struct dataflow *dflow,
- bitmap blocks_to_consider,
+df_analyze_problem (struct dataflow *dflow,
+ bitmap blocks_to_consider,
int *postorder, int n_blocks)
{
timevar_push (dflow->problem->tv_id);
@@ -1088,7 +1088,7 @@ df_analyze_problem (struct dataflow *dflow,
dflow->problem->verify_start_fun ();
#endif
- /* (Re)Allocate the datastructures necessary to solve the problem. */
+ /* (Re)Allocate the datastructures necessary to solve the problem. */
if (dflow->problem->alloc_fun)
dflow->problem->alloc_fun (blocks_to_consider);
@@ -1125,7 +1125,7 @@ df_analyze (void)
bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
bool everything;
int i;
-
+
if (df->postorder)
free (df->postorder);
if (df->postorder_inverted)
@@ -1167,10 +1167,10 @@ df_analyze (void)
{
everything = false;
bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
- df->n_blocks = df_prune_to_subcfg (df->postorder,
+ df->n_blocks = df_prune_to_subcfg (df->postorder,
df->n_blocks, df->blocks_to_analyze);
- df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
- df->n_blocks_inverted,
+ df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
+ df->n_blocks_inverted,
df->blocks_to_analyze);
BITMAP_FREE (current_all_blocks);
}
@@ -1214,7 +1214,7 @@ df_analyze (void)
/* Return the number of basic blocks from the last call to df_analyze. */
-int
+int
df_get_n_blocks (enum df_flow_dir dir)
{
gcc_assert (dir != DF_NONE);
@@ -1230,7 +1230,7 @@ df_get_n_blocks (enum df_flow_dir dir)
}
-/* Return a pointer to the array of basic blocks in the reverse postorder.
+/* Return a pointer to the array of basic blocks in the reverse postorder.
Depending on the direction of the dataflow problem,
it returns either the usual reverse postorder array
or the reverse postorder of inverted traversal. */
@@ -1248,7 +1248,7 @@ df_get_postorder (enum df_flow_dir dir)
return df->postorder;
}
-static struct df_problem user_problem;
+static struct df_problem user_problem;
static struct dataflow user_dflow;
/* Interface for calling iterative dataflow with user defined
@@ -1277,7 +1277,7 @@ df_simple_dataflow (enum df_flow_dir dir,
df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
}
-
+
/*----------------------------------------------------------------------------
Functions to support limited incremental change.
@@ -1300,7 +1300,7 @@ df_get_bb_info (struct dataflow *dflow, unsigned int index)
/* Set basic block info. */
static void
-df_set_bb_info (struct dataflow *dflow, unsigned int index,
+df_set_bb_info (struct dataflow *dflow, unsigned int index,
void *bb_info)
{
gcc_assert (dflow->block_info);
@@ -1310,12 +1310,12 @@ df_set_bb_info (struct dataflow *dflow, unsigned int index,
/* Mark the solutions as being out of date. */
-void
+void
df_mark_solutions_dirty (void)
{
if (df)
{
- int p;
+ int p;
for (p = 1; p < df->num_problems_defined; p++)
df->problems_in_order[p]->solutions_dirty = true;
}
@@ -1324,12 +1324,12 @@ df_mark_solutions_dirty (void)
/* Return true if BB needs it's transfer functions recomputed. */
-bool
+bool
df_get_bb_dirty (basic_block bb)
{
if (df && df_live)
return bitmap_bit_p (df_live->out_of_date_transfer_functions, bb->index);
- else
+ else
return false;
}
@@ -1337,12 +1337,12 @@ df_get_bb_dirty (basic_block bb)
/* Mark BB as needing it's transfer functions as being out of
date. */
-void
+void
df_set_bb_dirty (basic_block bb)
{
if (df)
{
- int p;
+ int p;
for (p = 1; p < df->num_problems_defined; p++)
{
struct dataflow *dflow = df->problems_in_order[p];
@@ -1359,7 +1359,7 @@ df_set_bb_dirty (basic_block bb)
static void
df_clear_bb_dirty (basic_block bb)
{
- int p;
+ int p;
for (p = 1; p < df->num_problems_defined; p++)
{
struct dataflow *dflow = df->problems_in_order[p];
@@ -1370,7 +1370,7 @@ df_clear_bb_dirty (basic_block bb)
/* Called from the rtl_compact_blocks to reorganize the problems basic
block info. */
-void
+void
df_compact_blocks (void)
{
int i, p;
@@ -1396,7 +1396,7 @@ df_compact_blocks (void)
bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB (bb)
{
if (bitmap_bit_p (tmp, bb->index))
bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
@@ -1414,20 +1414,20 @@ df_compact_blocks (void)
place in the block_info vector. Null out the copied
item. The entry and exit blocks never move. */
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB (bb)
{
df_set_bb_info (dflow, i, problem_temps[bb->index]);
problem_temps[bb->index] = NULL;
i++;
}
- memset (dflow->block_info + i, 0,
+ memset (dflow->block_info + i, 0,
(last_basic_block - i) *sizeof (void *));
/* Free any block infos that were not copied (and NULLed).
These are from orphaned blocks. */
for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK (i);
if (problem_temps[i] && bb)
dflow->problem->free_bb_fun
(bb, problem_temps[i]);
@@ -1446,7 +1446,7 @@ df_compact_blocks (void)
bitmap_copy (tmp, df->blocks_to_analyze);
bitmap_clear (df->blocks_to_analyze);
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB (bb)
{
if (bitmap_bit_p (tmp, bb->index))
bitmap_set_bit (df->blocks_to_analyze, i);
@@ -1459,7 +1459,7 @@ df_compact_blocks (void)
free (problem_temps);
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB (bb)
{
SET_BASIC_BLOCK (i, bb);
bb->index = i;
@@ -1481,7 +1481,7 @@ df_compact_blocks (void)
/* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
block. There is no excuse for people to do this kind of thing. */
-void
+void
df_bb_replace (int old_index, basic_block new_block)
{
int new_block_index = new_block->index;
@@ -1500,7 +1500,7 @@ df_bb_replace (int old_index, basic_block new_block)
{
df_grow_bb_info (dflow);
gcc_assert (df_get_bb_info (dflow, old_index) == NULL);
- df_set_bb_info (dflow, old_index,
+ df_set_bb_info (dflow, old_index,
df_get_bb_info (dflow, new_block_index));
}
}
@@ -1525,7 +1525,7 @@ df_bb_delete (int bb_index)
if (!df)
return;
-
+
for (i = 0; i < df->num_problems_defined; i++)
{
struct dataflow *dflow = df->problems_in_order[i];
@@ -1534,7 +1534,7 @@ df_bb_delete (int bb_index)
void *bb_info = df_get_bb_info (dflow, bb_index);
if (bb_info)
{
- dflow->problem->free_bb_fun (bb, bb_info);
+ dflow->problem->free_bb_fun (bb, bb_info);
df_set_bb_info (dflow, bb_index, NULL);
}
}
@@ -1620,7 +1620,7 @@ df_check_cfg_clean (void)
if (df_lr->solutions_dirty)
return;
- if (saved_cfg == NULL)
+ if (saved_cfg == NULL)
return;
new_map = df_compute_cfg_image ();
@@ -1648,7 +1648,7 @@ df_set_clean_cfg (void)
/* Return first def of REGNO within BB. */
-df_ref
+df_ref
df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
{
rtx insn;
@@ -1674,7 +1674,7 @@ df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
/* Return last def of REGNO within BB. */
-df_ref
+df_ref
df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
{
rtx insn;
@@ -1701,7 +1701,7 @@ df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
/* Finds the reference corresponding to the definition of REG in INSN.
DF is the dataflow object. */
-df_ref
+df_ref
df_find_def (rtx insn, rtx reg)
{
unsigned int uid;
@@ -1723,19 +1723,19 @@ df_find_def (rtx insn, rtx reg)
}
-/* Return true if REG is defined in INSN, zero otherwise. */
+/* Return true if REG is defined in INSN, zero otherwise. */
bool
df_reg_defined (rtx insn, rtx reg)
{
return df_find_def (insn, reg) != NULL;
}
-
+
/* Finds the reference corresponding to the use of REG in INSN.
DF is the dataflow object. */
-
-df_ref
+
+df_ref
df_find_use (rtx insn, rtx reg)
{
unsigned int uid;
@@ -1751,26 +1751,26 @@ df_find_use (rtx insn, rtx reg)
df_ref use = *use_rec;
if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
return use;
- }
+ }
if (df->changeable_flags & DF_EQ_NOTES)
for (use_rec = DF_INSN_UID_EQ_USES (uid); *use_rec; use_rec++)
{
df_ref use = *use_rec;
if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
- return use;
+ return use;
}
return NULL;
}
-/* Return true if REG is referenced in INSN, zero otherwise. */
+/* Return true if REG is referenced in INSN, zero otherwise. */
bool
df_reg_used (rtx insn, rtx reg)
{
return df_find_use (insn, reg) != NULL;
}
-
+
/*----------------------------------------------------------------------------
Debugging and printing functions.
@@ -1895,18 +1895,18 @@ df_dump_region (FILE *file)
fprintf (file, "\n\nstarting region dump\n");
df_dump_start (file);
-
- EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
+
+ EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
{
basic_block bb = BASIC_BLOCK (bb_index);
-
+
df_print_bb_index (bb, file);
df_dump_top (bb, file);
df_dump_bottom (bb, file);
}
fprintf (file, "\n");
}
- else
+ else
df_dump (file);
}
@@ -1934,13 +1934,13 @@ df_dump_start (FILE *file)
{
df_dump_problem_function fun = dflow->problem->dump_start_fun;
if (fun)
- fun(file);
+ fun(file);
}
}
}
-/* Dump the top of the block information for BB. */
+/* Dump the top of the block information for BB. */
void
df_dump_top (basic_block bb, FILE *file)
@@ -1957,13 +1957,13 @@ df_dump_top (basic_block bb, FILE *file)
{
df_dump_bb_problem_function bbfun = dflow->problem->dump_top_fun;
if (bbfun)
- bbfun (bb, file);
+ bbfun (bb, file);
}
}
}
-/* Dump the bottom of the block information for BB. */
+/* Dump the bottom of the block information for BB. */
void
df_dump_bottom (basic_block bb, FILE *file)
@@ -1980,7 +1980,7 @@ df_dump_bottom (basic_block bb, FILE *file)
{
df_dump_bb_problem_function bbfun = dflow->problem->dump_bottom_fun;
if (bbfun)
- bbfun (bb, file);
+ bbfun (bb, file);
}
}
}
@@ -2028,7 +2028,7 @@ df_mws_dump (struct df_mw_hardreg **mws, FILE *file)
{
while (*mws)
{
- fprintf (file, "mw %c r[%d..%d]\n",
+ fprintf (file, "mw %c r[%d..%d]\n",
(DF_MWS_REG_DEF_P (*mws)) ? 'd' : 'u',
(*mws)->start_regno, (*mws)->end_regno);
mws++;
@@ -2036,8 +2036,8 @@ df_mws_dump (struct df_mw_hardreg **mws, FILE *file)
}
-static void
-df_insn_uid_debug (unsigned int uid,
+static void
+df_insn_uid_debug (unsigned int uid,
bool follow_chain, FILE *file)
{
fprintf (file, "insn %d luid %d",
@@ -2085,7 +2085,7 @@ df_insn_debug_regno (rtx insn, FILE *file)
INSN_UID (insn), BLOCK_FOR_INSN (insn)->index,
DF_INSN_INFO_LUID (insn_info));
df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info), false, file);
-
+
fprintf (file, " uses ");
df_refs_chain_dump (DF_INSN_INFO_USES (insn_info), false, file);
diff --git a/gcc/df-problems.c b/gcc/df-problems.c
index 5aad97afeaf..cc7ab882a36 100644
--- a/gcc/df-problems.c
+++ b/gcc/df-problems.c
@@ -1,7 +1,7 @@
/* Standard problems for dataflow support routines.
Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
2008, 2009 Free Software Foundation, Inc.
- Originally contributed by Michael P. Hayes
+ Originally contributed by Michael P. Hayes
(m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
and Kenneth Zadeck (zadeck@naturalbridge.com).
@@ -47,7 +47,7 @@ along with GCC; see the file COPYING3. If not see
/* Note that turning REG_DEAD_DEBUGGING on will cause
gcc.c-torture/unsorted/dump-noaddr.c to fail because it prints
- addresses in the dumps. */
+ addresses in the dumps. */
#if 0
#define REG_DEAD_DEBUGGING
#endif
@@ -73,7 +73,7 @@ df_get_live_out (basic_block bb)
if (df_live)
return DF_LIVE_OUT (bb);
- else
+ else
return DF_LR_OUT (bb);
}
@@ -89,7 +89,7 @@ df_get_live_in (basic_block bb)
if (df_live)
return DF_LIVE_IN (bb);
- else
+ else
return DF_LR_IN (bb);
}
@@ -136,7 +136,7 @@ df_chain_dump (struct df_link *link, FILE *file)
/* Print some basic block info as part of df_dump. */
-void
+void
df_print_bb_index (basic_block bb, FILE *file)
{
edge e;
@@ -147,13 +147,13 @@ df_print_bb_index (basic_block bb, FILE *file)
{
basic_block pred = e->src;
fprintf (file, "%d%s ", pred->index, e->flags & EDGE_EH ? "(EH)" : "");
- }
+ }
fprintf (file, ")->[%d]->( ", bb->index);
FOR_EACH_EDGE (e, ei, bb->succs)
{
basic_block succ = e->dest;
fprintf (file, "%d%s ", succ->index, e->flags & EDGE_EH ? "(EH)" : "");
- }
+ }
fprintf (file, ")\n");
}
@@ -168,12 +168,12 @@ df_print_bb_index (basic_block bb, FILE *file)
----------------------------------------------------------------------------*/
/* This problem plays a large number of games for the sake of
- efficiency.
-
+ efficiency.
+
1) The order of the bits in the bitvectors. After the scanning
phase, all of the defs are sorted. All of the defs for the reg 0
are first, followed by all defs for reg 1 and so on.
-
+
2) There are two kill sets, one if the number of defs is less or
equal to DF_SPARSE_THRESHOLD and another if the number of defs is
greater.
@@ -195,8 +195,8 @@ df_print_bb_index (basic_block bb, FILE *file)
struct df_rd_problem_data
{
/* The set of defs to regs invalidated by call. */
- bitmap sparse_invalidated_by_call;
- /* The set of defs to regs invalidate by call for rd. */
+ bitmap sparse_invalidated_by_call;
+ /* The set of defs to regs invalidate by call for rd. */
bitmap dense_invalidated_by_call;
/* An obstack for the bitmaps we need for this problem. */
bitmap_obstack rd_bitmaps;
@@ -205,7 +205,7 @@ struct df_rd_problem_data
/* Set basic block info. */
static void
-df_rd_set_bb_info (unsigned int index,
+df_rd_set_bb_info (unsigned int index,
struct df_rd_bb_info *bb_info)
{
gcc_assert (df_rd);
@@ -217,7 +217,7 @@ df_rd_set_bb_info (unsigned int index,
/* Free basic block info. */
static void
-df_rd_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
+df_rd_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
struct df_rd_bb_info *bb_info = (struct df_rd_bb_info *) vbb_info;
@@ -236,7 +236,7 @@ df_rd_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
/* Allocate or reset bitmaps for DF_RD blocks. The solution bits are
not touched unless the block is new. */
-static void
+static void
df_rd_alloc (bitmap all_blocks)
{
unsigned int bb_index;
@@ -244,7 +244,7 @@ df_rd_alloc (bitmap all_blocks)
struct df_rd_problem_data *problem_data;
if (!df_rd->block_pool)
- df_rd->block_pool = create_alloc_pool ("df_rd_block pool",
+ df_rd->block_pool = create_alloc_pool ("df_rd_block pool",
sizeof (struct df_rd_bb_info), 50);
if (df_rd->problem_data)
@@ -253,7 +253,7 @@ df_rd_alloc (bitmap all_blocks)
bitmap_clear (problem_data->sparse_invalidated_by_call);
bitmap_clear (problem_data->dense_invalidated_by_call);
}
- else
+ else
{
problem_data = XNEW (struct df_rd_problem_data);
df_rd->problem_data = problem_data;
@@ -275,13 +275,13 @@ df_rd_alloc (bitmap all_blocks)
{
struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
if (bb_info)
- {
+ {
bitmap_clear (bb_info->kill);
bitmap_clear (bb_info->sparse_kill);
bitmap_clear (bb_info->gen);
}
else
- {
+ {
bb_info = (struct df_rd_bb_info *) pool_alloc (df_rd->block_pool);
df_rd_set_bb_info (bb_index, bb_info);
bb_info->kill = BITMAP_ALLOC (&problem_data->rd_bitmaps);
@@ -310,8 +310,8 @@ df_rd_simulate_artificial_defs_at_top (basic_block bb, bitmap local_rd)
{
unsigned int dregno = DF_REF_REGNO (def);
if (!(DF_REF_FLAGS (def) & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
- bitmap_clear_range (local_rd,
- DF_DEFS_BEGIN (dregno),
+ bitmap_clear_range (local_rd,
+ DF_DEFS_BEGIN (dregno),
DF_DEFS_COUNT (dregno));
bitmap_set_bit (local_rd, DF_REF_ID (def));
}
@@ -336,10 +336,10 @@ df_rd_simulate_one_insn (basic_block bb ATTRIBUTE_UNUSED, rtx insn,
|| (dregno >= FIRST_PSEUDO_REGISTER))
{
if (!(DF_REF_FLAGS (def) & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
- bitmap_clear_range (local_rd,
- DF_DEFS_BEGIN (dregno),
+ bitmap_clear_range (local_rd,
+ DF_DEFS_BEGIN (dregno),
DF_DEFS_COUNT (dregno));
- if (!(DF_REF_FLAGS (def)
+ if (!(DF_REF_FLAGS (def)
& (DF_REF_MUST_CLOBBER | DF_REF_MAY_CLOBBER)))
bitmap_set_bit (local_rd, DF_REF_ID (def));
}
@@ -352,7 +352,7 @@ df_rd_simulate_one_insn (basic_block bb ATTRIBUTE_UNUSED, rtx insn,
of kill sets. */
static void
-df_rd_bb_local_compute_process_def (struct df_rd_bb_info *bb_info,
+df_rd_bb_local_compute_process_def (struct df_rd_bb_info *bb_info,
df_ref *def_rec,
int top_flag)
{
@@ -364,12 +364,12 @@ df_rd_bb_local_compute_process_def (struct df_rd_bb_info *bb_info,
unsigned int regno = DF_REF_REGNO (def);
unsigned int begin = DF_DEFS_BEGIN (regno);
unsigned int n_defs = DF_DEFS_COUNT (regno);
-
+
if ((!(df->changeable_flags & DF_NO_HARD_REGS))
|| (regno >= FIRST_PSEUDO_REGISTER))
{
/* Only the last def(s) for a regno in the block has any
- effect. */
+ effect. */
if (!bitmap_bit_p (seen_in_block, regno))
{
/* The first def for regno in insn gets to knock out the
@@ -377,7 +377,7 @@ df_rd_bb_local_compute_process_def (struct df_rd_bb_info *bb_info,
if ((!bitmap_bit_p (seen_in_insn, regno))
/* If the def is to only part of the reg, it does
not kill the other defs that reach here. */
- && (!(DF_REF_FLAGS (def) &
+ && (!(DF_REF_FLAGS (def) &
(DF_REF_PARTIAL | DF_REF_CONDITIONAL | DF_REF_MAY_CLOBBER))))
{
if (n_defs > DF_SPARSE_THRESHOLD)
@@ -391,11 +391,11 @@ df_rd_bb_local_compute_process_def (struct df_rd_bb_info *bb_info,
bitmap_clear_range (bb_info->gen, begin, n_defs);
}
}
-
+
bitmap_set_bit (seen_in_insn, regno);
/* All defs for regno in the instruction may be put into
the gen set. */
- if (!(DF_REF_FLAGS (def)
+ if (!(DF_REF_FLAGS (def)
& (DF_REF_MUST_CLOBBER | DF_REF_MAY_CLOBBER)))
bitmap_set_bit (bb_info->gen, DF_REF_ID (def));
}
@@ -419,7 +419,7 @@ df_rd_bb_local_compute (unsigned int bb_index)
/* Artificials are only hard regs. */
if (!(df->changeable_flags & DF_NO_HARD_REGS))
- df_rd_bb_local_compute_process_def (bb_info,
+ df_rd_bb_local_compute_process_def (bb_info,
df_get_artificial_defs (bb_index),
0);
@@ -430,7 +430,7 @@ df_rd_bb_local_compute (unsigned int bb_index)
if (!INSN_P (insn))
continue;
- df_rd_bb_local_compute_process_def (bb_info,
+ df_rd_bb_local_compute_process_def (bb_info,
DF_INSN_UID_DEFS (uid), 0);
/* This complex dance with the two bitmaps is required because
@@ -447,7 +447,7 @@ df_rd_bb_local_compute (unsigned int bb_index)
are going backwards through the block and these are logically at
the start. */
if (!(df->changeable_flags & DF_NO_HARD_REGS))
- df_rd_bb_local_compute_process_def (bb_info,
+ df_rd_bb_local_compute_process_def (bb_info,
df_get_artificial_defs (bb_index),
DF_REF_AT_TOP);
}
@@ -475,15 +475,15 @@ df_rd_local_compute (bitmap all_blocks)
{
df_rd_bb_local_compute (bb_index);
}
-
+
/* Set up the knockout bit vectors to be applied across EH_EDGES. */
EXECUTE_IF_SET_IN_BITMAP (regs_invalidated_by_call_regset, 0, regno, bi)
{
if (DF_DEFS_COUNT (regno) > DF_SPARSE_THRESHOLD)
bitmap_set_bit (sparse_invalidated, regno);
else
- bitmap_set_range (dense_invalidated,
- DF_DEFS_BEGIN (regno),
+ bitmap_set_range (dense_invalidated,
+ DF_DEFS_BEGIN (regno),
DF_DEFS_COUNT (regno));
}
@@ -494,7 +494,7 @@ df_rd_local_compute (bitmap all_blocks)
/* Initialize the solution bit vectors for problem. */
-static void
+static void
df_rd_init_solution (bitmap all_blocks)
{
unsigned int bb_index;
@@ -503,7 +503,7 @@ df_rd_init_solution (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
-
+
bitmap_copy (bb_info->out, bb_info->gen);
bitmap_clear (bb_info->in);
}
@@ -517,7 +517,7 @@ df_rd_confluence_n (edge e)
bitmap op1 = df_rd_get_bb_info (e->dest->index)->in;
bitmap op2 = df_rd_get_bb_info (e->src->index)->out;
- if (e->flags & EDGE_FAKE)
+ if (e->flags & EDGE_FAKE)
return;
if (e->flags & EDGE_EH)
@@ -535,8 +535,8 @@ df_rd_confluence_n (edge e)
EXECUTE_IF_SET_IN_BITMAP (sparse_invalidated, 0, regno, bi)
{
- bitmap_clear_range (tmp,
- DF_DEFS_BEGIN (regno),
+ bitmap_clear_range (tmp,
+ DF_DEFS_BEGIN (regno),
DF_DEFS_COUNT (regno));
}
bitmap_ior_into (op1, tmp);
@@ -563,7 +563,7 @@ df_rd_transfer_function (int bb_index)
if (bitmap_empty_p (sparse_kill))
return bitmap_ior_and_compl (out, gen, in, kill);
- else
+ else
{
struct df_rd_problem_data *problem_data;
bool changed = false;
@@ -577,8 +577,8 @@ df_rd_transfer_function (int bb_index)
bitmap_copy (tmp, in);
EXECUTE_IF_SET_IN_BITMAP (sparse_kill, 0, regno, bi)
{
- bitmap_clear_range (tmp,
- DF_DEFS_BEGIN (regno),
+ bitmap_clear_range (tmp,
+ DF_DEFS_BEGIN (regno),
DF_DEFS_COUNT (regno));
}
bitmap_and_compl_into (tmp, kill);
@@ -589,7 +589,7 @@ df_rd_transfer_function (int bb_index)
BITMAP_FREE (out);
bb_info->out = tmp;
}
- else
+ else
BITMAP_FREE (tmp);
return changed;
}
@@ -608,7 +608,7 @@ df_rd_free (void)
{
free_alloc_pool (df_rd->block_pool);
bitmap_obstack_release (&problem_data->rd_bitmaps);
-
+
df_rd->block_info_size = 0;
free (df_rd->block_info);
free (df_rd->problem_data);
@@ -626,8 +626,8 @@ df_rd_start_dump (FILE *file)
= (struct df_rd_problem_data *) df_rd->problem_data;
unsigned int m = DF_REG_SIZE(df);
unsigned int regno;
-
- if (!df_rd->block_info)
+
+ if (!df_rd->block_info)
return;
fprintf (file, ";; Reaching defs:\n\n");
@@ -639,8 +639,8 @@ df_rd_start_dump (FILE *file)
for (regno = 0; regno < m; regno++)
if (DF_DEFS_COUNT (regno))
- fprintf (file, "%d[%d,%d] ", regno,
- DF_DEFS_BEGIN (regno),
+ fprintf (file, "%d[%d,%d] ", regno,
+ DF_DEFS_BEGIN (regno),
DF_DEFS_COUNT (regno));
fprintf (file, "\n");
@@ -655,7 +655,7 @@ df_rd_top_dump (basic_block bb, FILE *file)
struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb->index);
if (!bb_info || !bb_info->in)
return;
-
+
fprintf (file, ";; rd in \t(%d)\n", (int) bitmap_count_bits (bb_info->in));
dump_bitmap (file, bb_info->in);
fprintf (file, ";; rd gen \t(%d)\n", (int) bitmap_count_bits (bb_info->gen));
@@ -673,7 +673,7 @@ df_rd_bottom_dump (basic_block bb, FILE *file)
struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb->index);
if (!bb_info || !bb_info->out)
return;
-
+
fprintf (file, ";; rd out \t(%d)\n", (int) bitmap_count_bits (bb_info->out));
dump_bitmap (file, bb_info->out);
}
@@ -690,8 +690,8 @@ static struct df_problem problem_RD =
df_rd_local_compute, /* Local compute function. */
df_rd_init_solution, /* Init the solution specific data. */
df_worklist_dataflow, /* Worklist solver. */
- NULL, /* Confluence operator 0. */
- df_rd_confluence_n, /* Confluence operator n. */
+ NULL, /* Confluence operator 0. */
+ df_rd_confluence_n, /* Confluence operator n. */
df_rd_transfer_function, /* Transfer function. */
NULL, /* Finalize function. */
df_rd_free, /* Free all of the problem information. */
@@ -702,7 +702,7 @@ static struct df_problem problem_RD =
NULL, /* Incremental solution verify start. */
NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- TV_DF_RD, /* Timing variable. */
+ TV_DF_RD, /* Timing variable. */
true /* Reset blocks on dropping out of blocks_to_analyze. */
};
@@ -739,7 +739,7 @@ struct df_lr_problem_data
/* Set basic block info. */
static void
-df_lr_set_bb_info (unsigned int index,
+df_lr_set_bb_info (unsigned int index,
struct df_lr_bb_info *bb_info)
{
gcc_assert (df_lr);
@@ -747,11 +747,11 @@ df_lr_set_bb_info (unsigned int index,
df_lr->block_info[index] = bb_info;
}
-
+
/* Free basic block info. */
static void
-df_lr_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
+df_lr_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
struct df_lr_bb_info *bb_info = (struct df_lr_bb_info *) vbb_info;
@@ -769,14 +769,14 @@ df_lr_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
/* Allocate or reset bitmaps for DF_LR blocks. The solution bits are
not touched unless the block is new. */
-static void
+static void
df_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
unsigned int bb_index;
bitmap_iterator bi;
if (!df_lr->block_pool)
- df_lr->block_pool = create_alloc_pool ("df_lr_block pool",
+ df_lr->block_pool = create_alloc_pool ("df_lr_block pool",
sizeof (struct df_lr_bb_info), 50);
df_grow_bb_info (df_lr);
@@ -785,12 +785,12 @@ df_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
if (bb_info)
- {
+ {
bitmap_clear (bb_info->def);
bitmap_clear (bb_info->use);
}
else
- {
+ {
bb_info = (struct df_lr_bb_info *) pool_alloc (df_lr->block_pool);
df_lr_set_bb_info (bb_index, bb_info);
bb_info->use = BITMAP_ALLOC (NULL);
@@ -806,7 +806,7 @@ df_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
/* Reset the global solution for recalculation. */
-static void
+static void
df_lr_reset (bitmap all_blocks)
{
unsigned int bb_index;
@@ -859,7 +859,7 @@ df_lr_bb_local_compute (unsigned int bb_index)
unsigned int uid = INSN_UID (insn);
if (!NONDEBUG_INSN_P (insn))
- continue;
+ continue;
for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
{
@@ -895,7 +895,7 @@ df_lr_bb_local_compute (unsigned int bb_index)
bitmap_clear_bit (bb_info->use, dregno);
}
}
-
+
#ifdef EH_USES
/* Process the uses that are live into an exception handler. */
for (use_rec = df_get_artificial_uses (bb_index); *use_rec; use_rec++)
@@ -923,12 +923,12 @@ df_lr_local_compute (bitmap all_blocks ATTRIBUTE_UNUSED)
{
unsigned int bb_index;
bitmap_iterator bi;
-
+
bitmap_clear (df->hardware_regs_used);
-
+
/* The all-important stack pointer must always be live. */
bitmap_set_bit (df->hardware_regs_used, STACK_POINTER_REGNUM);
-
+
/* Before reload, there are a few registers that must be forced
live everywhere -- which might not already be the case for
blocks within infinite loops. */
@@ -937,21 +937,21 @@ df_lr_local_compute (bitmap all_blocks ATTRIBUTE_UNUSED)
/* Any reference to any pseudo before reload is a potential
reference of the frame pointer. */
bitmap_set_bit (df->hardware_regs_used, FRAME_POINTER_REGNUM);
-
+
#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
/* Pseudos with argument area equivalences may require
reloading via the argument pointer. */
if (fixed_regs[ARG_POINTER_REGNUM])
bitmap_set_bit (df->hardware_regs_used, ARG_POINTER_REGNUM);
#endif
-
+
/* Any constant, or pseudo with constant equivalences, may
require reloading from memory using the pic register. */
if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
&& fixed_regs[PIC_OFFSET_TABLE_REGNUM])
bitmap_set_bit (df->hardware_regs_used, PIC_OFFSET_TABLE_REGNUM);
}
-
+
EXECUTE_IF_SET_IN_BITMAP (df_lr->out_of_date_transfer_functions, 0, bb_index, bi)
{
if (bb_index == EXIT_BLOCK)
@@ -971,7 +971,7 @@ df_lr_local_compute (bitmap all_blocks ATTRIBUTE_UNUSED)
/* Initialize the solution vectors. */
-static void
+static void
df_lr_init (bitmap all_blocks)
{
unsigned int bb_index;
@@ -995,7 +995,7 @@ df_lr_confluence_0 (basic_block bb)
bitmap op1 = df_lr_get_bb_info (bb->index)->out;
if (bb != EXIT_BLOCK_PTR)
bitmap_copy (op1, df->hardware_regs_used);
-}
+}
/* Confluence function that ignores fake edges. */
@@ -1005,7 +1005,7 @@ df_lr_confluence_n (edge e)
{
bitmap op1 = df_lr_get_bb_info (e->src->index)->out;
bitmap op2 = df_lr_get_bb_info (e->dest->index)->in;
-
+
/* Call-clobbered registers die across exception and call edges. */
/* ??? Abnormal call edges ignored for the moment, as this gets
confused by sibling call edges, which crashes reg-stack. */
@@ -1015,7 +1015,7 @@ df_lr_confluence_n (edge e)
bitmap_ior_into (op1, op2);
bitmap_ior_into (op1, df->hardware_regs_used);
-}
+}
/* Transfer function. */
@@ -1087,7 +1087,7 @@ df_lr_free (void)
}
}
free_alloc_pool (df_lr->block_pool);
-
+
df_lr->block_info_size = 0;
free (df_lr->block_info);
}
@@ -1106,7 +1106,7 @@ df_lr_top_dump (basic_block bb, FILE *file)
struct df_lr_problem_data *problem_data;
if (!bb_info || !bb_info->in)
return;
-
+
fprintf (file, ";; lr in \t");
df_print_regset (file, bb_info->in);
if (df_lr->problem_data)
@@ -1119,7 +1119,7 @@ df_lr_top_dump (basic_block bb, FILE *file)
df_print_regset (file, bb_info->use);
fprintf (file, ";; lr def \t");
df_print_regset (file, bb_info->def);
-}
+}
/* Debugging info at bottom of bb. */
@@ -1131,7 +1131,7 @@ df_lr_bottom_dump (basic_block bb, FILE *file)
struct df_lr_problem_data *problem_data;
if (!bb_info || !bb_info->out)
return;
-
+
fprintf (file, ";; lr out \t");
df_print_regset (file, bb_info->out);
if (df_lr->problem_data)
@@ -1140,7 +1140,7 @@ df_lr_bottom_dump (basic_block bb, FILE *file)
fprintf (file, ";; old out \t");
df_print_regset (file, problem_data->out[bb->index]);
}
-}
+}
/* Build the datastructure to verify that the solution to the dataflow
@@ -1157,7 +1157,7 @@ df_lr_verify_solution_start (void)
return;
}
- /* Set it true so that the solution is recomputed. */
+ /* Set it true so that the solution is recomputed. */
df_lr->solutions_dirty = true;
problem_data = XNEW (struct df_lr_problem_data);
@@ -1231,8 +1231,8 @@ static struct df_problem problem_LR =
df_lr_local_compute, /* Local compute function. */
df_lr_init, /* Init the solution specific data. */
df_worklist_dataflow, /* Worklist solver. */
- df_lr_confluence_0, /* Confluence operator 0. */
- df_lr_confluence_n, /* Confluence operator n. */
+ df_lr_confluence_0, /* Confluence operator 0. */
+ df_lr_confluence_n, /* Confluence operator n. */
df_lr_transfer_function, /* Transfer function. */
df_lr_finalize, /* Finalize function. */
df_lr_free, /* Free all of the problem information. */
@@ -1243,7 +1243,7 @@ static struct df_problem problem_LR =
df_lr_verify_solution_start,/* Incremental solution verify start. */
df_lr_verify_solution_end, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- TV_DF_LR, /* Timing variable. */
+ TV_DF_LR, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
@@ -1294,7 +1294,7 @@ df_lr_verify_transfer_functions (void)
/* Make a copy of the transfer functions and then compute
new ones to see if the transfer functions have
changed. */
- if (!bitmap_bit_p (df_lr->out_of_date_transfer_functions,
+ if (!bitmap_bit_p (df_lr->out_of_date_transfer_functions,
bb->index))
{
bitmap_copy (saved_def, bb_info->def);
@@ -1312,7 +1312,7 @@ df_lr_verify_transfer_functions (void)
/* If we do not have basic block info, the block must be in
the list of dirty blocks or else some one has added a
block behind our backs. */
- gcc_assert (bitmap_bit_p (df_lr->out_of_date_transfer_functions,
+ gcc_assert (bitmap_bit_p (df_lr->out_of_date_transfer_functions,
bb->index));
}
/* Make sure no one created a block without following
@@ -1321,8 +1321,8 @@ df_lr_verify_transfer_functions (void)
}
/* Make sure there are no dirty bits in blocks that have been deleted. */
- gcc_assert (!bitmap_intersect_compl_p (df_lr->out_of_date_transfer_functions,
- all_blocks));
+ gcc_assert (!bitmap_intersect_compl_p (df_lr->out_of_date_transfer_functions,
+ all_blocks));
BITMAP_FREE (saved_def);
BITMAP_FREE (saved_use);
@@ -1349,7 +1349,7 @@ df_lr_verify_transfer_functions (void)
Then, the in and out sets for the LIVE problem itself are computed.
These are the logical AND of the IN and OUT sets from the LR problem
- and the must-initialized problem.
+ and the must-initialized problem.
----------------------------------------------------------------------------*/
/* Private data used to verify the solution for this problem. */
@@ -1367,7 +1367,7 @@ static bitmap df_live_scratch;
/* Set basic block info. */
static void
-df_live_set_bb_info (unsigned int index,
+df_live_set_bb_info (unsigned int index,
struct df_live_bb_info *bb_info)
{
gcc_assert (df_live);
@@ -1379,7 +1379,7 @@ df_live_set_bb_info (unsigned int index,
/* Free basic block info. */
static void
-df_live_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
+df_live_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
struct df_live_bb_info *bb_info = (struct df_live_bb_info *) vbb_info;
@@ -1397,14 +1397,14 @@ df_live_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
/* Allocate or reset bitmaps for DF_LIVE blocks. The solution bits are
not touched unless the block is new. */
-static void
+static void
df_live_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
unsigned int bb_index;
bitmap_iterator bi;
if (!df_live->block_pool)
- df_live->block_pool = create_alloc_pool ("df_live_block pool",
+ df_live->block_pool = create_alloc_pool ("df_live_block pool",
sizeof (struct df_live_bb_info), 100);
if (!df_live_scratch)
df_live_scratch = BITMAP_ALLOC (NULL);
@@ -1415,12 +1415,12 @@ df_live_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
if (bb_info)
- {
+ {
bitmap_clear (bb_info->kill);
bitmap_clear (bb_info->gen);
}
else
- {
+ {
bb_info = (struct df_live_bb_info *) pool_alloc (df_live->block_pool);
df_live_set_bb_info (bb_index, bb_info);
bb_info->kill = BITMAP_ALLOC (NULL);
@@ -1435,7 +1435,7 @@ df_live_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
/* Reset the global solution for recalculation. */
-static void
+static void
df_live_reset (bitmap all_blocks)
{
unsigned int bb_index;
@@ -1517,7 +1517,7 @@ df_live_local_compute (bitmap all_blocks ATTRIBUTE_UNUSED)
df_grow_insn_info ();
- EXECUTE_IF_SET_IN_BITMAP (df_live->out_of_date_transfer_functions,
+ EXECUTE_IF_SET_IN_BITMAP (df_live->out_of_date_transfer_functions,
0, bb_index, bi)
{
df_live_bb_local_compute (bb_index);
@@ -1529,7 +1529,7 @@ df_live_local_compute (bitmap all_blocks ATTRIBUTE_UNUSED)
/* Initialize the solution vectors. */
-static void
+static void
df_live_init (bitmap all_blocks)
{
unsigned int bb_index;
@@ -1554,12 +1554,12 @@ df_live_confluence_n (edge e)
{
bitmap op1 = df_live_get_bb_info (e->dest->index)->in;
bitmap op2 = df_live_get_bb_info (e->src->index)->out;
-
- if (e->flags & EDGE_FAKE)
+
+ if (e->flags & EDGE_FAKE)
return;
bitmap_ior_into (op1, op2);
-}
+}
/* Transfer function for the forwards must-initialized problem. */
@@ -1602,13 +1602,13 @@ df_live_finalize (bitmap all_blocks)
{
struct df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
struct df_live_bb_info *bb_live_info = df_live_get_bb_info (bb_index);
-
+
/* No register may reach a location where it is not used. Thus
we trim the rr result to the places where it is used. */
bitmap_and_into (bb_live_info->in, bb_lr_info->in);
bitmap_and_into (bb_live_info->out, bb_lr_info->out);
}
-
+
df_live->solutions_dirty = false;
}
}
@@ -1622,7 +1622,7 @@ df_live_free (void)
if (df_live->block_info)
{
unsigned int i;
-
+
for (i = 0; i < df_live->block_info_size; i++)
{
struct df_live_bb_info *bb_info = df_live_get_bb_info (i);
@@ -1634,7 +1634,7 @@ df_live_free (void)
BITMAP_FREE (bb_info->out);
}
}
-
+
free_alloc_pool (df_live->block_pool);
df_live->block_info_size = 0;
free (df_live->block_info);
@@ -1657,7 +1657,7 @@ df_live_top_dump (basic_block bb, FILE *file)
if (!bb_info || !bb_info->in)
return;
-
+
fprintf (file, ";; live in \t");
df_print_regset (file, bb_info->in);
if (df_live->problem_data)
@@ -1683,7 +1683,7 @@ df_live_bottom_dump (basic_block bb, FILE *file)
if (!bb_info || !bb_info->out)
return;
-
+
fprintf (file, ";; live out \t");
df_print_regset (file, bb_info->out);
if (df_live->problem_data)
@@ -1709,7 +1709,7 @@ df_live_verify_solution_start (void)
return;
}
- /* Set it true so that the solution is recomputed. */
+ /* Set it true so that the solution is recomputed. */
df_live->solutions_dirty = true;
problem_data = XNEW (struct df_live_problem_data);
@@ -1778,8 +1778,8 @@ static struct df_problem problem_LIVE =
df_live_local_compute, /* Local compute function. */
df_live_init, /* Init the solution specific data. */
df_worklist_dataflow, /* Worklist solver. */
- NULL, /* Confluence operator 0. */
- df_live_confluence_n, /* Confluence operator n. */
+ NULL, /* Confluence operator 0. */
+ df_live_confluence_n, /* Confluence operator n. */
df_live_transfer_function, /* Transfer function. */
df_live_finalize, /* Finalize function. */
df_live_free, /* Free all of the problem information. */
@@ -1817,7 +1817,7 @@ df_live_set_all_dirty (void)
{
basic_block bb;
FOR_ALL_BB (bb)
- bitmap_set_bit (df_live->out_of_date_transfer_functions,
+ bitmap_set_bit (df_live->out_of_date_transfer_functions,
bb->index);
}
@@ -1852,7 +1852,7 @@ df_live_verify_transfer_functions (void)
/* Make a copy of the transfer functions and then compute
new ones to see if the transfer functions have
changed. */
- if (!bitmap_bit_p (df_live->out_of_date_transfer_functions,
+ if (!bitmap_bit_p (df_live->out_of_date_transfer_functions,
bb->index))
{
bitmap_copy (saved_gen, bb_info->gen);
@@ -1870,7 +1870,7 @@ df_live_verify_transfer_functions (void)
/* If we do not have basic block info, the block must be in
the list of dirty blocks or else some one has added a
block behind our backs. */
- gcc_assert (bitmap_bit_p (df_live->out_of_date_transfer_functions,
+ gcc_assert (bitmap_bit_p (df_live->out_of_date_transfer_functions,
bb->index));
}
/* Make sure no one created a block without following
@@ -1879,8 +1879,8 @@ df_live_verify_transfer_functions (void)
}
/* Make sure there are no dirty bits in blocks that have been deleted. */
- gcc_assert (!bitmap_intersect_compl_p (df_live->out_of_date_transfer_functions,
- all_blocks));
+ gcc_assert (!bitmap_intersect_compl_p (df_live->out_of_date_transfer_functions,
+ all_blocks));
BITMAP_FREE (saved_gen);
BITMAP_FREE (saved_kill);
BITMAP_FREE (all_blocks);
@@ -1906,7 +1906,7 @@ df_chain_create (df_ref src, df_ref dst)
{
struct df_link *head = DF_REF_CHAIN (src);
struct df_link *link = (struct df_link *) pool_alloc (df_chain->block_pool);
-
+
DF_REF_CHAIN (src) = link;
link->next = head;
link->ref = dst;
@@ -1915,7 +1915,7 @@ df_chain_create (df_ref src, df_ref dst)
/* Delete any du or ud chains that start at REF and point to
- TARGET. */
+ TARGET. */
static void
df_chain_unlink_1 (df_ref ref, df_ref target)
{
@@ -1958,10 +1958,10 @@ df_chain_unlink (df_ref ref)
/* Copy the du or ud chain starting at FROM_REF and attach it to
- TO_REF. */
+ TO_REF. */
-void
-df_chain_copy (df_ref to_ref,
+void
+df_chain_copy (df_ref to_ref,
struct df_link *from_ref)
{
while (from_ref)
@@ -1980,7 +1980,7 @@ df_chain_remove_problem (void)
bitmap_iterator bi;
unsigned int bb_index;
- /* Wholesale destruction of the old chains. */
+ /* Wholesale destruction of the old chains. */
if (df_chain->block_pool)
free_alloc_pool (df_chain->block_pool);
@@ -1997,11 +1997,11 @@ df_chain_remove_problem (void)
if (df_chain_problem_p (DF_UD_CHAIN))
for (use_rec = df_get_artificial_uses (bb->index); *use_rec; use_rec++)
DF_REF_CHAIN (*use_rec) = NULL;
-
+
FOR_BB_INSNS (bb, insn)
{
unsigned int uid = INSN_UID (insn);
-
+
if (INSN_P (insn))
{
if (df_chain_problem_p (DF_DU_CHAIN))
@@ -2036,11 +2036,11 @@ df_chain_fully_remove_problem (void)
/* Create def-use or use-def chains. */
-static void
+static void
df_chain_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
df_chain_remove_problem ();
- df_chain->block_pool = create_alloc_pool ("df_chain_block pool",
+ df_chain->block_pool = create_alloc_pool ("df_chain_block pool",
sizeof (struct df_link), 50);
df_chain->optional_p = true;
}
@@ -2064,7 +2064,7 @@ df_chain_create_bb_process_use (bitmap local_rd,
{
bitmap_iterator bi;
unsigned int def_index;
-
+
while (*use_rec)
{
df_ref use = *use_rec;
@@ -2080,13 +2080,13 @@ df_chain_create_bb_process_use (bitmap local_rd,
{
unsigned int first_index = DF_DEFS_BEGIN (uregno);
unsigned int last_index = first_index + count - 1;
-
+
EXECUTE_IF_SET_IN_BITMAP (local_rd, first_index, def_index, bi)
{
df_ref def;
- if (def_index > last_index)
+ if (def_index > last_index)
break;
-
+
def = DF_DEFS_GET (def_index);
if (df_chain_problem_p (DF_DU_CHAIN))
df_chain_create (def, use);
@@ -2121,16 +2121,16 @@ df_chain_create_bb (unsigned int bb_index)
#ifdef EH_USES
/* Create the chains for the artificial uses from the EH_USES at the
beginning of the block. */
-
+
/* Artificials are only hard regs. */
if (!(df->changeable_flags & DF_NO_HARD_REGS))
df_chain_create_bb_process_use (cpy,
- df_get_artificial_uses (bb->index),
+ df_get_artificial_uses (bb->index),
DF_REF_AT_TOP);
#endif
df_rd_simulate_artificial_defs_at_top (bb, cpy);
-
+
/* Process the regular instructions next. */
FOR_BB_INSNS (bb, insn)
if (INSN_P (insn))
@@ -2151,7 +2151,7 @@ df_chain_create_bb (unsigned int bb_index)
at the end of the block. */
if (!(df->changeable_flags & DF_NO_HARD_REGS))
df_chain_create_bb_process_use (cpy,
- df_get_artificial_uses (bb->index),
+ df_get_artificial_uses (bb->index),
0);
BITMAP_FREE (cpy);
@@ -2165,7 +2165,7 @@ df_chain_finalize (bitmap all_blocks)
{
unsigned int bb_index;
bitmap_iterator bi;
-
+
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
df_chain_create_bb (bb_index);
@@ -2195,7 +2195,7 @@ df_chain_top_dump (basic_block bb, FILE *file)
df_ref *def_rec = df_get_artificial_defs (bb->index);
if (*def_rec)
{
-
+
fprintf (file, ";; DU chains for artificial defs\n");
while (*def_rec)
{
@@ -2205,7 +2205,7 @@ df_chain_top_dump (basic_block bb, FILE *file)
fprintf (file, "\n");
def_rec++;
}
- }
+ }
FOR_BB_INSNS (bb, insn)
{
@@ -2215,9 +2215,9 @@ df_chain_top_dump (basic_block bb, FILE *file)
def_rec = DF_INSN_INFO_DEFS (insn_info);
if (*def_rec)
{
- fprintf (file, ";; DU chains for insn luid %d uid %d\n",
+ fprintf (file, ";; DU chains for insn luid %d uid %d\n",
DF_INSN_INFO_LUID (insn_info), INSN_UID (insn));
-
+
while (*def_rec)
{
df_ref def = *def_rec;
@@ -2254,7 +2254,7 @@ df_chain_bottom_dump (basic_block bb, FILE *file)
fprintf (file, "\n");
use_rec++;
}
- }
+ }
FOR_BB_INSNS (bb, insn)
{
@@ -2265,9 +2265,9 @@ df_chain_bottom_dump (basic_block bb, FILE *file)
use_rec = DF_INSN_INFO_USES (insn_info);
if (*use_rec || *eq_use_rec)
{
- fprintf (file, ";; UD chains for insn luid %d uid %d\n",
+ fprintf (file, ";; UD chains for insn luid %d uid %d\n",
DF_INSN_INFO_LUID (insn_info), INSN_UID (insn));
-
+
while (*use_rec)
{
df_ref use = *use_rec;
@@ -2303,8 +2303,8 @@ static struct df_problem problem_CHAIN =
NULL, /* Local compute function. */
NULL, /* Init the solution specific data. */
NULL, /* Iterative solver. */
- NULL, /* Confluence operator 0. */
- NULL, /* Confluence operator n. */
+ NULL, /* Confluence operator 0. */
+ NULL, /* Confluence operator n. */
NULL, /* Transfer function. */
df_chain_finalize, /* Finalize function. */
df_chain_free, /* Free all of the problem information. */
@@ -2377,8 +2377,8 @@ struct df_byte_lr_problem_data
bitmap needs_expansion;
/* The start position and len for each regno in the various bit
- vectors. */
- unsigned int* regno_start;
+ vectors. */
+ unsigned int* regno_start;
unsigned int* regno_len;
/* An obstack for the bitmaps we need for this problem. */
bitmap_obstack byte_lr_bitmaps;
@@ -2387,10 +2387,10 @@ struct df_byte_lr_problem_data
/* Get the starting location for REGNO in the df_byte_lr bitmaps. */
-int
+int
df_byte_lr_get_regno_start (unsigned int regno)
{
- struct df_byte_lr_problem_data *problem_data
+ struct df_byte_lr_problem_data *problem_data
= (struct df_byte_lr_problem_data *)df_byte_lr->problem_data;;
return problem_data->regno_start[regno];
}
@@ -2398,10 +2398,10 @@ df_byte_lr_get_regno_start (unsigned int regno)
/* Get the len for REGNO in the df_byte_lr bitmaps. */
-int
+int
df_byte_lr_get_regno_len (unsigned int regno)
-{
- struct df_byte_lr_problem_data *problem_data
+{
+ struct df_byte_lr_problem_data *problem_data
= (struct df_byte_lr_problem_data *)df_byte_lr->problem_data;;
return problem_data->regno_len[regno];
}
@@ -2410,7 +2410,7 @@ df_byte_lr_get_regno_len (unsigned int regno)
/* Set basic block info. */
static void
-df_byte_lr_set_bb_info (unsigned int index,
+df_byte_lr_set_bb_info (unsigned int index,
struct df_byte_lr_bb_info *bb_info)
{
gcc_assert (df_byte_lr);
@@ -2418,11 +2418,11 @@ df_byte_lr_set_bb_info (unsigned int index,
df_byte_lr->block_info[index] = bb_info;
}
-
+
/* Free basic block info. */
static void
-df_byte_lr_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
+df_byte_lr_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
struct df_byte_lr_bb_info *bb_info = (struct df_byte_lr_bb_info *) vbb_info;
@@ -2443,14 +2443,14 @@ df_byte_lr_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
static void
df_byte_lr_check_regs (df_ref *ref_rec)
{
- struct df_byte_lr_problem_data *problem_data
+ struct df_byte_lr_problem_data *problem_data
= (struct df_byte_lr_problem_data *)df_byte_lr->problem_data;
for (; *ref_rec; ref_rec++)
{
df_ref ref = *ref_rec;
- if (DF_REF_FLAGS_IS_SET (ref, DF_REF_SIGN_EXTRACT
- | DF_REF_ZERO_EXTRACT
+ if (DF_REF_FLAGS_IS_SET (ref, DF_REF_SIGN_EXTRACT
+ | DF_REF_ZERO_EXTRACT
| DF_REF_STRICT_LOW_PART)
|| GET_CODE (DF_REF_REG (ref)) == SUBREG)
bitmap_set_bit (problem_data->needs_expansion, DF_REF_REGNO (ref));
@@ -2458,13 +2458,13 @@ df_byte_lr_check_regs (df_ref *ref_rec)
}
-/* Expand bitmap SRC which is indexed by regno to DEST which is indexed by
+/* Expand bitmap SRC which is indexed by regno to DEST which is indexed by
regno_start and regno_len. */
static void
df_byte_lr_expand_bitmap (bitmap dest, bitmap src)
{
- struct df_byte_lr_problem_data *problem_data
+ struct df_byte_lr_problem_data *problem_data
= (struct df_byte_lr_problem_data *)df_byte_lr->problem_data;
bitmap_iterator bi;
unsigned int i;
@@ -2472,7 +2472,7 @@ df_byte_lr_expand_bitmap (bitmap dest, bitmap src)
bitmap_clear (dest);
EXECUTE_IF_SET_IN_BITMAP (src, 0, i, bi)
{
- bitmap_set_range (dest, problem_data->regno_start[i],
+ bitmap_set_range (dest, problem_data->regno_start[i],
problem_data->regno_len[i]);
}
}
@@ -2481,7 +2481,7 @@ df_byte_lr_expand_bitmap (bitmap dest, bitmap src)
/* Allocate or reset bitmaps for DF_BYTE_LR blocks. The solution bits are
not touched unless the block is new. */
-static void
+static void
df_byte_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
unsigned int bb_index;
@@ -2496,7 +2496,7 @@ df_byte_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
df_byte_lr->problem_data = problem_data;
if (!df_byte_lr->block_pool)
- df_byte_lr->block_pool = create_alloc_pool ("df_byte_lr_block pool",
+ df_byte_lr->block_pool = create_alloc_pool ("df_byte_lr_block pool",
sizeof (struct df_byte_lr_bb_info), 50);
df_grow_bb_info (df_byte_lr);
@@ -2513,7 +2513,7 @@ df_byte_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
problem_data->hardware_regs_used = BITMAP_ALLOC (&problem_data->byte_lr_bitmaps);
problem_data->invalidated_by_call = BITMAP_ALLOC (&problem_data->byte_lr_bitmaps);
problem_data->needs_expansion = BITMAP_ALLOC (&problem_data->byte_lr_bitmaps);
-
+
/* Discover which regno's use subregs, extracts or
strict_low_parts. */
FOR_EACH_BB (bb)
@@ -2533,7 +2533,7 @@ df_byte_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
bitmap_set_bit (df_byte_lr->out_of_date_transfer_functions, ENTRY_BLOCK);
bitmap_set_bit (df_byte_lr->out_of_date_transfer_functions, EXIT_BLOCK);
-
+
/* Allocate the slots for each regno. */
for (regno = 0; regno < max_reg; regno++)
{
@@ -2541,28 +2541,28 @@ df_byte_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
problem_data->regno_start[regno] = index;
if (bitmap_bit_p (problem_data->needs_expansion, regno))
len = GET_MODE_SIZE (GET_MODE (regno_reg_rtx[regno]));
- else
+ else
len = 1;
-
+
problem_data->regno_len[regno] = len;
index += len;
}
- df_byte_lr_expand_bitmap (problem_data->hardware_regs_used,
+ df_byte_lr_expand_bitmap (problem_data->hardware_regs_used,
df->hardware_regs_used);
- df_byte_lr_expand_bitmap (problem_data->invalidated_by_call,
+ df_byte_lr_expand_bitmap (problem_data->invalidated_by_call,
regs_invalidated_by_call_regset);
EXECUTE_IF_SET_IN_BITMAP (df_byte_lr->out_of_date_transfer_functions, 0, bb_index, bi)
{
struct df_byte_lr_bb_info *bb_info = df_byte_lr_get_bb_info (bb_index);
if (bb_info)
- {
+ {
bitmap_clear (bb_info->def);
bitmap_clear (bb_info->use);
}
else
- {
+ {
bb_info = (struct df_byte_lr_bb_info *) pool_alloc (df_byte_lr->block_pool);
df_byte_lr_set_bb_info (bb_index, bb_info);
bb_info->use = BITMAP_ALLOC (&problem_data->byte_lr_bitmaps);
@@ -2571,14 +2571,14 @@ df_byte_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
bb_info->out = BITMAP_ALLOC (&problem_data->byte_lr_bitmaps);
}
}
-
+
df_byte_lr->optional_p = true;
}
/* Reset the global solution for recalculation. */
-static void
+static void
df_byte_lr_reset (bitmap all_blocks)
{
unsigned int bb_index;
@@ -2599,7 +2599,7 @@ df_byte_lr_reset (bitmap all_blocks)
static void
df_byte_lr_bb_local_compute (unsigned int bb_index)
{
- struct df_byte_lr_problem_data *problem_data
+ struct df_byte_lr_problem_data *problem_data
= (struct df_byte_lr_problem_data *)df_byte_lr->problem_data;
basic_block bb = BASIC_BLOCK (bb_index);
struct df_byte_lr_bb_info *bb_info = df_byte_lr_get_bb_info (bb_index);
@@ -2640,7 +2640,7 @@ df_byte_lr_bb_local_compute (unsigned int bb_index)
unsigned int uid = INSN_UID (insn);
if (!INSN_P (insn))
- continue;
+ continue;
for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
{
@@ -2701,7 +2701,7 @@ df_byte_lr_bb_local_compute (unsigned int bb_index)
bitmap_clear_range (bb_info->use, start, len);
}
}
-
+
#ifdef EH_USES
/* Process the uses that are live into an exception handler. */
for (use_rec = df_get_artificial_uses (bb_index); *use_rec; use_rec++)
@@ -2747,7 +2747,7 @@ df_byte_lr_local_compute (bitmap all_blocks ATTRIBUTE_UNUSED)
/* Initialize the solution vectors. */
-static void
+static void
df_byte_lr_init (bitmap all_blocks)
{
unsigned int bb_index;
@@ -2768,12 +2768,12 @@ df_byte_lr_init (bitmap all_blocks)
static void
df_byte_lr_confluence_0 (basic_block bb)
{
- struct df_byte_lr_problem_data *problem_data
+ struct df_byte_lr_problem_data *problem_data
= (struct df_byte_lr_problem_data *)df_byte_lr->problem_data;
bitmap op1 = df_byte_lr_get_bb_info (bb->index)->out;
if (bb != EXIT_BLOCK_PTR)
bitmap_copy (op1, problem_data->hardware_regs_used);
-}
+}
/* Confluence function that ignores fake edges. */
@@ -2781,11 +2781,11 @@ df_byte_lr_confluence_0 (basic_block bb)
static void
df_byte_lr_confluence_n (edge e)
{
- struct df_byte_lr_problem_data *problem_data
+ struct df_byte_lr_problem_data *problem_data
= (struct df_byte_lr_problem_data *)df_byte_lr->problem_data;
bitmap op1 = df_byte_lr_get_bb_info (e->src->index)->out;
bitmap op2 = df_byte_lr_get_bb_info (e->dest->index)->in;
-
+
/* Call-clobbered registers die across exception and call edges. */
/* ??? Abnormal call edges ignored for the moment, as this gets
confused by sibling call edges, which crashes reg-stack. */
@@ -2795,7 +2795,7 @@ df_byte_lr_confluence_n (edge e)
bitmap_ior_into (op1, op2);
bitmap_ior_into (op1, problem_data->hardware_regs_used);
-}
+}
/* Transfer function. */
@@ -2846,14 +2846,14 @@ df_byte_lr_top_dump (basic_block bb, FILE *file)
struct df_byte_lr_bb_info *bb_info = df_byte_lr_get_bb_info (bb->index);
if (!bb_info || !bb_info->in)
return;
-
+
fprintf (file, ";; blr in \t");
df_print_byte_regset (file, bb_info->in);
fprintf (file, ";; blr use \t");
df_print_byte_regset (file, bb_info->use);
fprintf (file, ";; blr def \t");
df_print_byte_regset (file, bb_info->def);
-}
+}
/* Debugging info at bottom of bb. */
@@ -2864,10 +2864,10 @@ df_byte_lr_bottom_dump (basic_block bb, FILE *file)
struct df_byte_lr_bb_info *bb_info = df_byte_lr_get_bb_info (bb->index);
if (!bb_info || !bb_info->out)
return;
-
+
fprintf (file, ";; blr out \t");
df_print_byte_regset (file, bb_info->out);
-}
+}
/* All of the information associated with every instance of the problem. */
@@ -2882,8 +2882,8 @@ static struct df_problem problem_BYTE_LR =
df_byte_lr_local_compute, /* Local compute function. */
df_byte_lr_init, /* Init the solution specific data. */
df_worklist_dataflow, /* Worklist solver. */
- df_byte_lr_confluence_0, /* Confluence operator 0. */
- df_byte_lr_confluence_n, /* Confluence operator n. */
+ df_byte_lr_confluence_0, /* Confluence operator 0. */
+ df_byte_lr_confluence_n, /* Confluence operator n. */
df_byte_lr_transfer_function, /* Transfer function. */
NULL, /* Finalize function. */
df_byte_lr_free, /* Free all of the problem information. */
@@ -2894,7 +2894,7 @@ static struct df_problem problem_BYTE_LR =
NULL, /* Incremental solution verify start. */
NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- TV_DF_BYTE_LR, /* Timing variable. */
+ TV_DF_BYTE_LR, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
@@ -2918,7 +2918,7 @@ df_byte_lr_add_problem (void)
void
df_byte_lr_simulate_defs (rtx insn, bitmap live)
{
- struct df_byte_lr_problem_data *problem_data
+ struct df_byte_lr_problem_data *problem_data
= (struct df_byte_lr_problem_data *)df_byte_lr->problem_data;
df_ref *def_rec;
unsigned int uid = INSN_UID (insn);
@@ -2946,15 +2946,15 @@ df_byte_lr_simulate_defs (rtx insn, bitmap live)
bitmap_clear_range (live, start, len);
}
}
-}
+}
/* Simulate the effects of the uses of INSN on LIVE. */
-void
+void
df_byte_lr_simulate_uses (rtx insn, bitmap live)
{
- struct df_byte_lr_problem_data *problem_data
+ struct df_byte_lr_problem_data *problem_data
= (struct df_byte_lr_problem_data *)df_byte_lr->problem_data;
df_ref *use_rec;
unsigned int uid = INSN_UID (insn);
@@ -2967,13 +2967,13 @@ df_byte_lr_simulate_uses (rtx insn, bitmap live)
unsigned int len = problem_data->regno_len[uregno];
unsigned int sb;
unsigned int lb;
-
+
if (!df_compute_accessed_bytes (use, DF_MM_MAY, &sb, &lb))
{
start += sb;
len = lb - sb;
}
-
+
/* Add use to set of uses in this BB. */
if (len)
bitmap_set_range (live, start, len);
@@ -2984,17 +2984,17 @@ df_byte_lr_simulate_uses (rtx insn, bitmap live)
/* Apply the artificial uses and defs at the top of BB in a forwards
direction. */
-void
+void
df_byte_lr_simulate_artificial_refs_at_top (basic_block bb, bitmap live)
{
- struct df_byte_lr_problem_data *problem_data
+ struct df_byte_lr_problem_data *problem_data
= (struct df_byte_lr_problem_data *)df_byte_lr->problem_data;
df_ref *def_rec;
#ifdef EH_USES
df_ref *use_rec;
#endif
int bb_index = bb->index;
-
+
#ifdef EH_USES
for (use_rec = df_get_artificial_uses (bb_index); *use_rec; use_rec++)
{
@@ -3013,7 +3013,7 @@ df_byte_lr_simulate_artificial_refs_at_top (basic_block bb, bitmap live)
{
df_ref def = *def_rec;
if (DF_REF_FLAGS (def) & DF_REF_AT_TOP)
- {
+ {
unsigned int dregno = DF_REF_REGNO (def);
unsigned int start = problem_data->regno_start[dregno];
unsigned int len = problem_data->regno_len[dregno];
@@ -3026,15 +3026,15 @@ df_byte_lr_simulate_artificial_refs_at_top (basic_block bb, bitmap live)
/* Apply the artificial uses and defs at the end of BB in a backwards
direction. */
-void
+void
df_byte_lr_simulate_artificial_refs_at_end (basic_block bb, bitmap live)
{
- struct df_byte_lr_problem_data *problem_data
+ struct df_byte_lr_problem_data *problem_data
= (struct df_byte_lr_problem_data *)df_byte_lr->problem_data;
df_ref *def_rec;
df_ref *use_rec;
int bb_index = bb->index;
-
+
for (def_rec = df_get_artificial_defs (bb_index); *def_rec; def_rec++)
{
df_ref def = *def_rec;
@@ -3066,14 +3066,14 @@ df_byte_lr_simulate_artificial_refs_at_end (basic_block bb, bitmap live)
This problem computes REG_DEAD and REG_UNUSED notes.
----------------------------------------------------------------------------*/
-static void
+static void
df_note_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
df_note->optional_p = true;
}
#ifdef REG_DEAD_DEBUGGING
-static void
+static void
df_print_note (const char *prefix, rtx insn, rtx note)
{
if (dump_file)
@@ -3091,14 +3091,14 @@ df_print_note (const char *prefix, rtx insn, rtx note)
just leave the notes alone. */
#ifdef STACK_REGS
-static inline bool
+static inline bool
df_ignore_stack_reg (int regno)
{
return regstack_completed
&& IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG);
}
#else
-static inline bool
+static inline bool
df_ignore_stack_reg (int regno ATTRIBUTE_UNUSED)
{
return false;
@@ -3122,7 +3122,7 @@ df_kill_notes (rtx insn, rtx *old_dead_notes, rtx *old_unused_notes)
switch (REG_NOTE_KIND (link))
{
case REG_DEAD:
- /* After reg-stack, we need to ignore any unused notes
+ /* After reg-stack, we need to ignore any unused notes
for the stack registers. */
if (df_ignore_stack_reg (REGNO (XEXP (link, 0))))
{
@@ -3142,7 +3142,7 @@ df_kill_notes (rtx insn, rtx *old_dead_notes, rtx *old_unused_notes)
break;
case REG_UNUSED:
- /* After reg-stack, we need to ignore any unused notes
+ /* After reg-stack, we need to ignore any unused notes
for the stack registers. */
if (df_ignore_stack_reg (REGNO (XEXP (link, 0))))
{
@@ -3160,7 +3160,7 @@ df_kill_notes (rtx insn, rtx *old_dead_notes, rtx *old_unused_notes)
*pprev = link = next;
}
break;
-
+
default:
pprev = &XEXP (link, 1);
link = *pprev;
@@ -3200,7 +3200,7 @@ df_set_note (enum reg_note note_type, rtx insn, rtx old, rtx reg)
prev = curr;
curr = XEXP (curr, 1);
}
-
+
/* Did not find the note. */
add_reg_note (insn, note_type, reg);
return old;
@@ -3241,14 +3241,14 @@ df_whole_mw_reg_unused_p (struct df_mw_hardreg *mws,
static rtx
df_set_unused_notes_for_mw (rtx insn, rtx old, struct df_mw_hardreg *mws,
- bitmap live, bitmap do_not_gen,
+ bitmap live, bitmap do_not_gen,
bitmap artificial_uses)
{
unsigned int r;
-
+
#ifdef REG_DEAD_DEBUGGING
if (dump_file)
- fprintf (dump_file, "mw_set_unused looking at mws[%d..%d]\n",
+ fprintf (dump_file, "mw_set_unused looking at mws[%d..%d]\n",
mws->start_regno, mws->end_regno);
#endif
@@ -3322,11 +3322,11 @@ df_set_dead_notes_for_mw (rtx insn, rtx old, struct df_mw_hardreg *mws,
bool is_debug = *added_notes_p;
*added_notes_p = false;
-
+
#ifdef REG_DEAD_DEBUGGING
if (dump_file)
{
- fprintf (dump_file, "mw_set_dead looking at mws[%d..%d]\n do_not_gen =",
+ fprintf (dump_file, "mw_set_dead looking at mws[%d..%d]\n do_not_gen =",
mws->start_regno, mws->end_regno);
df_print_regset (dump_file, do_not_gen);
fprintf (dump_file, " live =");
@@ -3375,11 +3375,11 @@ df_set_dead_notes_for_mw (rtx insn, rtx old, struct df_mw_hardreg *mws,
LIVE. Do not generate notes for registers in ARTIFICIAL_USES. */
static rtx
-df_create_unused_note (rtx insn, rtx old, df_ref def,
+df_create_unused_note (rtx insn, rtx old, df_ref def,
bitmap live, bitmap artificial_uses)
{
unsigned int dregno = DF_REF_REGNO (def);
-
+
#ifdef REG_DEAD_DEBUGGING
if (dump_file)
{
@@ -3393,14 +3393,14 @@ df_create_unused_note (rtx insn, rtx old, df_ref def,
|| bitmap_bit_p (artificial_uses, dregno)
|| df_ignore_stack_reg (dregno)))
{
- rtx reg = (DF_REF_LOC (def))
+ rtx reg = (DF_REF_LOC (def))
? *DF_REF_REAL_LOC (def): DF_REF_REG (def);
old = df_set_note (REG_UNUSED, insn, old, reg);
#ifdef REG_DEAD_DEBUGGING
df_print_note ("adding 3: ", insn, REG_NOTES (insn));
#endif
}
-
+
return old;
}
@@ -3410,7 +3410,7 @@ df_create_unused_note (rtx insn, rtx old, df_ref def,
BB. The three bitvectors are scratch regs used here. */
static void
-df_note_bb_compute (unsigned int bb_index,
+df_note_bb_compute (unsigned int bb_index,
bitmap live, bitmap do_not_gen, bitmap artificial_uses)
{
basic_block bb = BASIC_BLOCK (bb_index);
@@ -3450,13 +3450,13 @@ df_note_bb_compute (unsigned int bb_index,
{
unsigned int regno = DF_REF_REGNO (use);
bitmap_set_bit (live, regno);
-
+
/* Notes are not generated for any of the artificial registers
at the bottom of the block. */
bitmap_set_bit (artificial_uses, regno);
}
}
-
+
#ifdef REG_DEAD_DEBUGGING
if (dump_file)
{
@@ -3472,7 +3472,7 @@ df_note_bb_compute (unsigned int bb_index,
rtx old_dead_notes;
rtx old_unused_notes;
int debug_insn;
-
+
if (!INSN_P (insn))
continue;
@@ -3496,12 +3496,12 @@ df_note_bb_compute (unsigned int bb_index,
mws_rec = DF_INSN_UID_MWS (uid);
while (*mws_rec)
{
- struct df_mw_hardreg *mws = *mws_rec;
- if ((DF_MWS_REG_DEF_P (mws))
+ struct df_mw_hardreg *mws = *mws_rec;
+ if ((DF_MWS_REG_DEF_P (mws))
&& !df_ignore_stack_reg (mws->start_regno))
- old_unused_notes
- = df_set_unused_notes_for_mw (insn, old_unused_notes,
- mws, live, do_not_gen,
+ old_unused_notes
+ = df_set_unused_notes_for_mw (insn, old_unused_notes,
+ mws, live, do_not_gen,
artificial_uses);
mws_rec++;
}
@@ -3515,7 +3515,7 @@ df_note_bb_compute (unsigned int bb_index,
if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MUST_CLOBBER | DF_REF_MAY_CLOBBER))
{
old_unused_notes
- = df_create_unused_note (insn, old_unused_notes,
+ = df_create_unused_note (insn, old_unused_notes,
def, live, artificial_uses);
bitmap_set_bit (do_not_gen, dregno);
}
@@ -3530,11 +3530,11 @@ df_note_bb_compute (unsigned int bb_index,
mws_rec = DF_INSN_UID_MWS (uid);
while (*mws_rec)
{
- struct df_mw_hardreg *mws = *mws_rec;
+ struct df_mw_hardreg *mws = *mws_rec;
if (DF_MWS_REG_DEF_P (mws))
old_unused_notes
- = df_set_unused_notes_for_mw (insn, old_unused_notes,
- mws, live, do_not_gen,
+ = df_set_unused_notes_for_mw (insn, old_unused_notes,
+ mws, live, do_not_gen,
artificial_uses);
mws_rec++;
}
@@ -3544,7 +3544,7 @@ df_note_bb_compute (unsigned int bb_index,
df_ref def = *def_rec;
unsigned int dregno = DF_REF_REGNO (def);
old_unused_notes
- = df_create_unused_note (insn, old_unused_notes,
+ = df_create_unused_note (insn, old_unused_notes,
def, live, artificial_uses);
if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MUST_CLOBBER | DF_REF_MAY_CLOBBER))
@@ -3554,13 +3554,13 @@ df_note_bb_compute (unsigned int bb_index,
bitmap_clear_bit (live, dregno);
}
}
-
+
/* Process the uses. */
mws_rec = DF_INSN_UID_MWS (uid);
while (*mws_rec)
{
- struct df_mw_hardreg *mws = *mws_rec;
- if ((DF_MWS_REG_DEF_P (mws))
+ struct df_mw_hardreg *mws = *mws_rec;
+ if ((DF_MWS_REG_DEF_P (mws))
&& !df_ignore_stack_reg (mws->start_regno))
{
bool really_add_notes = debug_insn != 0;
@@ -3603,7 +3603,7 @@ df_note_bb_compute (unsigned int bb_index,
&& (!(DF_REF_FLAGS (use) & DF_REF_READ_WRITE))
&& (!df_ignore_stack_reg (uregno)))
{
- rtx reg = (DF_REF_LOC (use))
+ rtx reg = (DF_REF_LOC (use))
? *DF_REF_REAL_LOC (use) : DF_REF_REG (use);
old_dead_notes = df_set_note (REG_DEAD, insn, old_dead_notes, reg);
@@ -3687,8 +3687,8 @@ static struct df_problem problem_NOTE =
df_note_compute, /* Local compute function. */
NULL, /* Init the solution specific data. */
NULL, /* Iterative solver. */
- NULL, /* Confluence operator 0. */
- NULL, /* Confluence operator n. */
+ NULL, /* Confluence operator 0. */
+ NULL, /* Confluence operator n. */
NULL, /* Transfer function. */
NULL, /* Finalize function. */
df_note_free, /* Free all of the problem information. */
@@ -3718,7 +3718,7 @@ df_note_add_problem (void)
/*----------------------------------------------------------------------------
- Functions for simulating the effects of single insns.
+ Functions for simulating the effects of single insns.
You can either simulate in the forwards direction, starting from
the top of a block or the backwards direction from the end of the
@@ -3772,12 +3772,12 @@ df_simulate_defs (rtx insn, bitmap live)
if (!(DF_REF_FLAGS (def) & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
bitmap_clear_bit (live, dregno);
}
-}
+}
/* Simulate the effects of the uses of INSN on LIVE. */
-void
+void
df_simulate_uses (rtx insn, bitmap live)
{
df_ref *use_rec;
@@ -3824,13 +3824,13 @@ df_simulate_fixup_sets (basic_block bb, bitmap live)
/* Apply the artificial uses and defs at the end of BB in a backwards
direction. */
-void
+void
df_simulate_initialize_backwards (basic_block bb, bitmap live)
{
df_ref *def_rec;
df_ref *use_rec;
int bb_index = bb->index;
-
+
for (def_rec = df_get_artificial_defs (bb_index); *def_rec; def_rec++)
{
df_ref def = *def_rec;
@@ -3849,12 +3849,12 @@ df_simulate_initialize_backwards (basic_block bb, bitmap live)
/* Simulate the backwards effects of INSN on the bitmap LIVE. */
-void
+void
df_simulate_one_insn_backwards (basic_block bb, rtx insn, bitmap live)
{
if (!NONDEBUG_INSN_P (insn))
- return;
-
+ return;
+
df_simulate_defs (insn, live);
df_simulate_uses (insn, live);
df_simulate_fixup_sets (bb, live);
@@ -3864,7 +3864,7 @@ df_simulate_one_insn_backwards (basic_block bb, rtx insn, bitmap live)
/* Apply the artificial uses and defs at the top of BB in a backwards
direction. */
-void
+void
df_simulate_finalize_backwards (basic_block bb, bitmap live)
{
df_ref *def_rec;
@@ -3872,7 +3872,7 @@ df_simulate_finalize_backwards (basic_block bb, bitmap live)
df_ref *use_rec;
#endif
int bb_index = bb->index;
-
+
for (def_rec = df_get_artificial_defs (bb_index); *def_rec; def_rec++)
{
df_ref def = *def_rec;
@@ -3892,7 +3892,7 @@ df_simulate_finalize_backwards (basic_block bb, bitmap live)
/*----------------------------------------------------------------------------
The following three functions are used only for FORWARDS scanning:
i.e. they process the defs and the REG_DEAD and REG_UNUSED notes.
- Thus it is important to add the DF_NOTES problem to the stack of
+ Thus it is important to add the DF_NOTES problem to the stack of
problems computed before using these functions.
df_simulate_initialize_forwards should be called first with a
@@ -3909,12 +3909,12 @@ df_simulate_finalize_backwards (basic_block bb, bitmap live)
unless (as is the case for fwprop) they are correct when liveness
bitmaps are *under*estimated. */
-void
+void
df_simulate_initialize_forwards (basic_block bb, bitmap live)
{
df_ref *def_rec;
int bb_index = bb->index;
-
+
for (def_rec = df_get_artificial_defs (bb_index); *def_rec; def_rec++)
{
df_ref def = *def_rec;
@@ -3925,14 +3925,14 @@ df_simulate_initialize_forwards (basic_block bb, bitmap live)
/* Simulate the forwards effects of INSN on the bitmap LIVE. */
-void
+void
df_simulate_one_insn_forwards (basic_block bb, rtx insn, bitmap live)
{
rtx link;
if (! INSN_P (insn))
- return;
+ return;
- /* Make sure that DF_NOTE really is an active df problem. */
+ /* Make sure that DF_NOTE really is an active df problem. */
gcc_assert (df_note);
/* Note that this is the opposite as how the problem is defined, because
@@ -3958,7 +3958,7 @@ df_simulate_one_insn_forwards (basic_block bb, rtx insn, bitmap live)
while (--n >= 0)
bitmap_clear_bit (live, regno + n);
}
- else
+ else
bitmap_clear_bit (live, regno);
}
break;
@@ -4015,13 +4015,13 @@ df_simulate_one_insn_forwards (basic_block bb, rtx insn, bitmap live)
init-set of BB3 includes r10 and r12, but not r11. Note that we do
not need to iterate the dominance frontier, because we do not insert
anything like PHI functions there! Instead, dataflow will take care of
- propagating the information to BB3's successors.
+ propagating the information to BB3's successors.
---------------------------------------------------------------------------*/
/* Set basic block info. */
static void
-df_md_set_bb_info (unsigned int index,
+df_md_set_bb_info (unsigned int index,
struct df_md_bb_info *bb_info)
{
gcc_assert (df_md);
@@ -4031,7 +4031,7 @@ df_md_set_bb_info (unsigned int index,
static void
-df_md_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
+df_md_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
struct df_md_bb_info *bb_info = (struct df_md_bb_info *) vbb_info;
@@ -4050,14 +4050,14 @@ df_md_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
/* Allocate or reset bitmaps for DF_MD. The solution bits are
not touched unless the block is new. */
-static void
+static void
df_md_alloc (bitmap all_blocks)
{
unsigned int bb_index;
bitmap_iterator bi;
if (!df_md->block_pool)
- df_md->block_pool = create_alloc_pool ("df_md_block pool",
+ df_md->block_pool = create_alloc_pool ("df_md_block pool",
sizeof (struct df_md_bb_info), 50);
df_grow_bb_info (df_md);
@@ -4066,7 +4066,7 @@ df_md_alloc (bitmap all_blocks)
{
struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
if (bb_info)
- {
+ {
bitmap_clear (bb_info->init);
bitmap_clear (bb_info->gen);
bitmap_clear (bb_info->kill);
@@ -4074,7 +4074,7 @@ df_md_alloc (bitmap all_blocks)
bitmap_clear (bb_info->out);
}
else
- {
+ {
bb_info = (struct df_md_bb_info *) pool_alloc (df_md->block_pool);
df_md_set_bb_info (bb_index, bb_info);
bb_info->init = BITMAP_ALLOC (NULL);
@@ -4139,7 +4139,7 @@ df_md_simulate_one_insn (basic_block bb ATTRIBUTE_UNUSED, rtx insn,
}
static void
-df_md_bb_local_compute_process_def (struct df_md_bb_info *bb_info,
+df_md_bb_local_compute_process_def (struct df_md_bb_info *bb_info,
df_ref *def_rec,
int top_flag)
{
@@ -4186,7 +4186,7 @@ df_md_bb_local_compute (unsigned int bb_index)
/* Artificials are only hard regs. */
if (!(df->changeable_flags & DF_NO_HARD_REGS))
- df_md_bb_local_compute_process_def (bb_info,
+ df_md_bb_local_compute_process_def (bb_info,
df_get_artificial_defs (bb_index),
DF_REF_AT_TOP);
@@ -4200,7 +4200,7 @@ df_md_bb_local_compute (unsigned int bb_index)
}
if (!(df->changeable_flags & DF_NO_HARD_REGS))
- df_md_bb_local_compute_process_def (bb_info,
+ df_md_bb_local_compute_process_def (bb_info,
df_get_artificial_defs (bb_index),
0);
}
@@ -4221,7 +4221,7 @@ df_md_local_compute (bitmap all_blocks)
{
df_md_bb_local_compute (bb_index);
}
-
+
BITMAP_FREE (seen_in_insn);
frontiers = XNEWVEC (bitmap, last_basic_block);
@@ -4249,7 +4249,7 @@ df_md_local_compute (bitmap all_blocks)
/* Reset the global solution for recalculation. */
-static void
+static void
df_md_reset (bitmap all_blocks)
{
unsigned int bb_index;
@@ -4278,7 +4278,7 @@ df_md_transfer_function (int bb_index)
/* Initialize the solution bit vectors for problem. */
-static void
+static void
df_md_init (bitmap all_blocks)
{
unsigned int bb_index;
@@ -4287,7 +4287,7 @@ df_md_init (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
-
+
bitmap_copy (bb_info->in, bb_info->init);
df_md_transfer_function (bb_index);
}
@@ -4298,7 +4298,7 @@ df_md_confluence_0 (basic_block bb)
{
struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
bitmap_copy (bb_info->in, bb_info->init);
-}
+}
/* In of target gets or of out of source. */
@@ -4308,7 +4308,7 @@ df_md_confluence_n (edge e)
bitmap op1 = df_md_get_bb_info (e->dest->index)->in;
bitmap op2 = df_md_get_bb_info (e->src->index)->out;
- if (e->flags & EDGE_FAKE)
+ if (e->flags & EDGE_FAKE)
return;
if (e->flags & EDGE_EH)
@@ -4352,7 +4352,7 @@ df_md_top_dump (basic_block bb, FILE *file)
struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
if (!bb_info || !bb_info->in)
return;
-
+
fprintf (file, ";; md in \t");
df_print_regset (file, bb_info->in);
fprintf (file, ";; md init \t");
@@ -4371,10 +4371,10 @@ df_md_bottom_dump (basic_block bb, FILE *file)
struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
if (!bb_info || !bb_info->out)
return;
-
+
fprintf (file, ";; md out \t");
df_print_regset (file, bb_info->out);
-}
+}
static struct df_problem problem_MD =
{
@@ -4386,8 +4386,8 @@ static struct df_problem problem_MD =
df_md_local_compute, /* Local compute function. */
df_md_init, /* Init the solution specific data. */
df_worklist_dataflow, /* Worklist solver. */
- df_md_confluence_0, /* Confluence operator 0. */
- df_md_confluence_n, /* Confluence operator n. */
+ df_md_confluence_0, /* Confluence operator 0. */
+ df_md_confluence_n, /* Confluence operator n. */
df_md_transfer_function, /* Transfer function. */
NULL, /* Finalize function. */
df_md_free, /* Free all of the problem information. */
@@ -4398,7 +4398,7 @@ static struct df_problem problem_MD =
NULL, /* Incremental solution verify start. */
NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- TV_DF_MD, /* Timing variable. */
+ TV_DF_MD, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index c12c00e8a4e..4428b8820e9 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -1,7 +1,7 @@
/* Scanning of rtl for dataflow analysis.
Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
2008, 2009 Free Software Foundation, Inc.
- Originally contributed by Michael P. Hayes
+ Originally contributed by Michael P. Hayes
(m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
and Kenneth Zadeck (zadeck@naturalbridge.com).
@@ -75,7 +75,7 @@ DEF_VEC_ALLOC_P_STACK(df_mw_hardreg_ptr);
/* The following two macros free the vecs that hold either the refs or
the mw refs. They are a little tricky because the vec has 0
- elements is special and is not to be freed. */
+ elements is special and is not to be freed. */
#define df_scan_free_ref_vec(V) \
do { \
if (V && *V) \
@@ -107,7 +107,7 @@ static df_ref df_null_ref_rec[1];
static struct df_mw_hardreg * df_null_mw_rec[1];
static void df_ref_record (enum df_ref_class, struct df_collection_rec *,
- rtx, rtx *,
+ rtx, rtx *,
basic_block, struct df_insn_info *,
enum df_ref_type, int ref_flags,
int, int, enum machine_mode);
@@ -120,17 +120,17 @@ static void df_defs_record (struct df_collection_rec *, rtx,
static void df_uses_record (enum df_ref_class, struct df_collection_rec *,
rtx *, enum df_ref_type,
basic_block, struct df_insn_info *,
- int ref_flags,
+ int ref_flags,
int, int, enum machine_mode);
-static df_ref df_ref_create_structure (enum df_ref_class,
- struct df_collection_rec *, rtx, rtx *,
+static df_ref df_ref_create_structure (enum df_ref_class,
+ struct df_collection_rec *, rtx, rtx *,
basic_block, struct df_insn_info *,
enum df_ref_type, int ref_flags,
int, int, enum machine_mode);
-static void df_insn_refs_collect (struct df_collection_rec*,
- basic_block, struct df_insn_info *);
+static void df_insn_refs_collect (struct df_collection_rec*,
+ basic_block, struct df_insn_info *);
static void df_canonize_collection_rec (struct df_collection_rec *);
static void df_get_regular_block_artificial_uses (bitmap);
@@ -144,13 +144,13 @@ static void df_grow_ref_info (struct df_ref_info *, unsigned int);
static void df_ref_chain_delete_du_chain (df_ref *);
static void df_ref_chain_delete (df_ref *);
-static void df_refs_add_to_chains (struct df_collection_rec *,
+static void df_refs_add_to_chains (struct df_collection_rec *,
basic_block, rtx);
static bool df_insn_refs_verify (struct df_collection_rec *, basic_block, rtx, bool);
static void df_entry_block_defs_collect (struct df_collection_rec *, bitmap);
static void df_exit_block_uses_collect (struct df_collection_rec *, bitmap);
-static void df_install_ref (df_ref, struct df_reg_info *,
+static void df_install_ref (df_ref, struct df_reg_info *,
struct df_ref_info *, bool);
static int df_ref_compare (const void *, const void *);
@@ -193,7 +193,7 @@ typedef struct df_scan_bb_info *df_scan_bb_info_t;
/* Internal function to shut down the scanning problem. */
-static void
+static void
df_scan_free_internal (void)
{
struct df_scan_problem_data *problem_data
@@ -282,7 +282,7 @@ df_scan_free_internal (void)
/* Set basic block info. */
static void
-df_scan_set_bb_info (unsigned int index,
+df_scan_set_bb_info (unsigned int index,
struct df_scan_bb_info *bb_info)
{
gcc_assert (df_scan);
@@ -307,10 +307,10 @@ df_scan_free_bb_info (basic_block bb, void *vbb_info)
/* Record defs within INSN. */
df_insn_delete (bb, INSN_UID (insn));
}
-
+
if (bb_index < df_scan->block_info_size)
bb_info = df_scan_get_bb_info (bb_index);
-
+
/* Get rid of any artificial uses or defs. */
df_ref_chain_delete_du_chain (bb_info->artificial_defs);
df_ref_chain_delete_du_chain (bb_info->artificial_uses);
@@ -326,7 +326,7 @@ df_scan_free_bb_info (basic_block bb, void *vbb_info)
/* Allocate the problem data for the scanning problem. This should be
called when the problem is created or when the entire function is to
be rescanned. */
-void
+void
df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
struct df_scan_problem_data *problem_data;
@@ -339,41 +339,41 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
if (df_scan->problem_data)
df_scan_free_internal ();
- df_scan->block_pool
- = create_alloc_pool ("df_scan_block pool",
- sizeof (struct df_scan_bb_info),
+ df_scan->block_pool
+ = create_alloc_pool ("df_scan_block pool",
+ sizeof (struct df_scan_bb_info),
block_size);
problem_data = XNEW (struct df_scan_problem_data);
df_scan->problem_data = problem_data;
df_scan->computed = true;
- problem_data->ref_base_pool
- = create_alloc_pool ("df_scan ref base",
+ problem_data->ref_base_pool
+ = create_alloc_pool ("df_scan ref base",
sizeof (struct df_base_ref), block_size);
- problem_data->ref_artificial_pool
- = create_alloc_pool ("df_scan ref artificial",
+ problem_data->ref_artificial_pool
+ = create_alloc_pool ("df_scan ref artificial",
sizeof (struct df_artificial_ref), block_size);
- problem_data->ref_regular_pool
- = create_alloc_pool ("df_scan ref regular",
+ problem_data->ref_regular_pool
+ = create_alloc_pool ("df_scan ref regular",
sizeof (struct df_regular_ref), block_size);
- problem_data->ref_extract_pool
- = create_alloc_pool ("df_scan ref extract",
+ problem_data->ref_extract_pool
+ = create_alloc_pool ("df_scan ref extract",
sizeof (struct df_extract_ref), block_size);
- problem_data->insn_pool
- = create_alloc_pool ("df_scan insn",
+ problem_data->insn_pool
+ = create_alloc_pool ("df_scan insn",
sizeof (struct df_insn_info), block_size);
- problem_data->reg_pool
- = create_alloc_pool ("df_scan reg",
+ problem_data->reg_pool
+ = create_alloc_pool ("df_scan reg",
sizeof (struct df_reg_info), block_size);
- problem_data->mw_reg_pool
- = create_alloc_pool ("df_scan mw_reg",
+ problem_data->mw_reg_pool
+ = create_alloc_pool ("df_scan mw_reg",
sizeof (struct df_mw_hardreg), block_size);
bitmap_obstack_initialize (&problem_data->reg_bitmaps);
bitmap_obstack_initialize (&problem_data->insn_bitmaps);
- insn_num += insn_num / 4;
+ insn_num += insn_num / 4;
df_grow_reg_info ();
df_grow_insn_info ();
@@ -406,7 +406,7 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
/* Free all of the data associated with the scan problem. */
-static void
+static void
df_scan_free (void)
{
if (df_scan->problem_data)
@@ -422,7 +422,7 @@ df_scan_free (void)
}
/* Dump the preamble for DF_SCAN dump. */
-static void
+static void
df_scan_start_dump (FILE *file ATTRIBUTE_UNUSED)
{
int i;
@@ -451,7 +451,7 @@ df_scan_start_dump (FILE *file ATTRIBUTE_UNUSED)
if (df_regs_ever_live_p (i))
fprintf (file, " %d[%s]", i, reg_names[i]);
fprintf (file, "\n;; ref usage \t");
-
+
for (i = 0; i < (int)df->regs_inited; i++)
if (DF_REG_DEF_COUNT (i) || DF_REG_USE_COUNT (i) || DF_REG_EQ_USE_COUNT (i))
{
@@ -488,12 +488,12 @@ df_scan_start_dump (FILE *file ATTRIBUTE_UNUSED)
icount++;
}
- fprintf (file, "\n;; total ref usage %d{%dd,%du,%de} in %d{%d regular + %d call} insns.\n",
+ fprintf (file, "\n;; total ref usage %d{%dd,%du,%de} in %d{%d regular + %d call} insns.\n",
dcount + ucount + ecount, dcount, ucount, ecount, icount + ccount, icount, ccount);
}
/* Dump the bb_info for a given basic block. */
-static void
+static void
df_scan_start_block (basic_block bb, FILE *file)
{
struct df_scan_bb_info *bb_info
@@ -527,8 +527,8 @@ static struct df_problem problem_SCAN =
NULL, /* Local compute function. */
NULL, /* Init the solution specific data. */
NULL, /* Iterative solver. */
- NULL, /* Confluence operator 0. */
- NULL, /* Confluence operator n. */
+ NULL, /* Confluence operator 0. */
+ NULL, /* Confluence operator n. */
NULL, /* Transfer function. */
NULL, /* Finalize function. */
df_scan_free, /* Free all of the problem information. */
@@ -562,12 +562,12 @@ df_scan_add_problem (void)
/* First, grow the reg_info information. If the current size is less than
the number of pseudos, grow to 25% more than the number of
- pseudos.
+ pseudos.
Second, assure that all of the slots up to max_reg_num have been
filled with reg_info structures. */
-void
+void
df_grow_reg_info (void)
{
unsigned int max_reg = max_reg_num ();
@@ -608,14 +608,14 @@ df_grow_reg_info (void)
df->use_info.begin[i] = 0;
df->use_info.count[i] = 0;
}
-
+
df->regs_inited = max_reg;
}
/* Grow the ref information. */
-static void
+static void
df_grow_ref_info (struct df_ref_info *ref_info, unsigned int new_size)
{
if (ref_info->refs_size < new_size)
@@ -634,7 +634,7 @@ df_grow_ref_info (struct df_ref_info *ref_info, unsigned int new_size)
ref_info->total_size. */
static void
-df_check_and_grow_ref_info (struct df_ref_info *ref_info,
+df_check_and_grow_ref_info (struct df_ref_info *ref_info,
unsigned bitmap_addend)
{
if (ref_info->refs_size < ref_info->total_size + bitmap_addend)
@@ -650,7 +650,7 @@ df_check_and_grow_ref_info (struct df_ref_info *ref_info,
number of instructions, grow to 25% more than the number of
instructions. */
-void
+void
df_grow_insn_info (void)
{
unsigned int new_size = get_max_uid () + 1;
@@ -685,7 +685,7 @@ df_scan_blocks (void)
df_get_regular_block_artificial_uses (df->regular_block_artificial_uses);
df_get_eh_block_artificial_uses (df->eh_block_artificial_uses);
- bitmap_ior_into (df->eh_block_artificial_uses,
+ bitmap_ior_into (df->eh_block_artificial_uses,
df->regular_block_artificial_uses);
/* ENTRY and EXIT blocks have special defs/uses. */
@@ -706,17 +706,17 @@ df_scan_blocks (void)
/* Create a new ref of type DF_REF_TYPE for register REG at address
- LOC within INSN of BB. This function is only used externally.
+ LOC within INSN of BB. This function is only used externally.
If the REF_FLAGS field contain DF_REF_SIGN_EXTRACT or
DF_REF_ZERO_EXTRACT. WIDTH, OFFSET and MODE are used to access the
fields if they were constants. Otherwise they should be -1 if
those flags were set. */
-df_ref
-df_ref_create (rtx reg, rtx *loc, rtx insn,
+df_ref
+df_ref_create (rtx reg, rtx *loc, rtx insn,
basic_block bb,
- enum df_ref_type ref_type,
+ enum df_ref_type ref_type,
int ref_flags,
int width, int offset, enum machine_mode mode)
{
@@ -741,7 +741,7 @@ df_ref_create (rtx reg, rtx *loc, rtx insn,
else
cl = DF_REF_BASE;
ref = df_ref_create_structure (cl, NULL, reg, loc, bb, DF_INSN_INFO_GET (insn),
- ref_type, ref_flags,
+ ref_type, ref_flags,
width, offset, mode);
if (DF_REF_REG_DEF_P (ref))
@@ -781,7 +781,7 @@ df_ref_create (rtx reg, rtx *loc, rtx insn,
add_to_table = bitmap_bit_p (df->blocks_to_analyze, bb->index);
df_install_ref (ref, reg_info[DF_REF_REGNO (ref)], ref_info, add_to_table);
-
+
if (add_to_table)
switch (ref_info->ref_order)
{
@@ -828,7 +828,7 @@ df_ref_create (rtx reg, rtx *loc, rtx insn,
#endif
/* By adding the ref directly, df_insn_rescan my not find any
differences even though the block will have changed. So we need
- to mark the block dirty ourselves. */
+ to mark the block dirty ourselves. */
if (!DEBUG_INSN_P (DF_REF_INSN (ref)))
df_set_bb_dirty (bb);
@@ -872,9 +872,9 @@ df_free_ref (df_ref ref)
Also delete the def-use or use-def chain if it exists. */
static void
-df_reg_chain_unlink (df_ref ref)
+df_reg_chain_unlink (df_ref ref)
{
- df_ref next = DF_REF_NEXT_REG (ref);
+ df_ref next = DF_REF_NEXT_REG (ref);
df_ref prev = DF_REF_PREV_REG (ref);
int id = DF_REF_ID (ref);
struct df_reg_info *reg_info;
@@ -886,7 +886,7 @@ df_reg_chain_unlink (df_ref ref)
reg_info = DF_REG_DEF_GET (regno);
refs = df->def_info.refs;
}
- else
+ else
{
if (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE)
{
@@ -919,7 +919,7 @@ df_reg_chain_unlink (df_ref ref)
else
refs[id] = NULL;
}
-
+
/* Delete any def-use or use-def chains that start here. It is
possible that there is trash in this field. This happens for
insns that have been deleted when rescanning has been deferred
@@ -927,7 +927,7 @@ df_reg_chain_unlink (df_ref ref)
code skips deleted insns. */
if (df_chain && DF_REF_CHAIN (ref))
df_chain_unlink (ref);
-
+
reg_info->n_refs--;
if (DF_REF_FLAGS_IS_SET (ref, DF_HARD_REG_LIVE))
{
@@ -962,7 +962,7 @@ df_ref_compress_rec (df_ref **vec_ptr, df_ref ref)
{
while (*vec && *vec != ref)
vec++;
-
+
while (*vec)
{
*vec = *(vec+1);
@@ -994,7 +994,7 @@ df_ref_remove (df_ref ref)
{
if (DF_REF_IS_ARTIFICIAL (ref))
{
- struct df_scan_bb_info *bb_info
+ struct df_scan_bb_info *bb_info
= df_scan_get_bb_info (DF_REF_BBNO (ref));
df_ref_compress_rec (&bb_info->artificial_defs, ref);
}
@@ -1009,11 +1009,11 @@ df_ref_remove (df_ref ref)
{
if (DF_REF_IS_ARTIFICIAL (ref))
{
- struct df_scan_bb_info *bb_info
+ struct df_scan_bb_info *bb_info
= df_scan_get_bb_info (DF_REF_BBNO (ref));
df_ref_compress_rec (&bb_info->artificial_uses, ref);
}
- else
+ else
{
unsigned int uid = DF_REF_INSN_UID (ref);
struct df_insn_info *insn_rec = DF_INSN_UID_GET (uid);
@@ -1027,7 +1027,7 @@ df_ref_remove (df_ref ref)
/* By deleting the ref directly, df_insn_rescan my not find any
differences even though the block will have changed. So we need
- to mark the block dirty ourselves. */
+ to mark the block dirty ourselves. */
if (!DEBUG_INSN_P (DF_REF_INSN (ref)))
df_set_bb_dirty (DF_REF_BB (ref));
df_reg_chain_unlink (ref);
@@ -1065,7 +1065,7 @@ df_ref_chain_delete_du_chain (df_ref *ref_rec)
while (*ref_rec)
{
df_ref ref = *ref_rec;
- /* CHAIN is allocated by DF_CHAIN. So make sure to
+ /* CHAIN is allocated by DF_CHAIN. So make sure to
pass df_scan instance for the problem. */
if (DF_REF_CHAIN (ref))
df_chain_unlink (ref);
@@ -1117,7 +1117,7 @@ df_mw_hardreg_chain_delete (struct df_mw_hardreg **hardregs)
except when called from df_process_deferred_rescans to mark the block
as dirty. */
-void
+void
df_insn_delete (basic_block bb, unsigned int uid)
{
struct df_insn_info *insn_info = NULL;
@@ -1157,7 +1157,7 @@ df_insn_delete (basic_block bb, unsigned int uid)
bitmap_clear_bit (df->insns_to_notes_rescan, uid);
if (insn_info)
{
- struct df_scan_problem_data *problem_data
+ struct df_scan_problem_data *problem_data
= (struct df_scan_problem_data *) df_scan->problem_data;
/* In general, notes do not have the insn_info fields
@@ -1168,14 +1168,14 @@ df_insn_delete (basic_block bb, unsigned int uid)
if (insn_info->defs)
{
df_mw_hardreg_chain_delete (insn_info->mw_hardregs);
-
+
if (df_chain)
{
df_ref_chain_delete_du_chain (insn_info->defs);
- df_ref_chain_delete_du_chain (insn_info->uses);
+ df_ref_chain_delete_du_chain (insn_info->uses);
df_ref_chain_delete_du_chain (insn_info->eq_uses);
}
-
+
df_ref_chain_delete (insn_info->defs);
df_ref_chain_delete (insn_info->uses);
df_ref_chain_delete (insn_info->eq_uses);
@@ -1192,7 +1192,7 @@ static void
df_free_collection_rec (struct df_collection_rec *collection_rec)
{
unsigned int ix;
- struct df_scan_problem_data *problem_data
+ struct df_scan_problem_data *problem_data
= (struct df_scan_problem_data *) df_scan->problem_data;
df_ref ref;
struct df_mw_hardreg *mw;
@@ -1216,7 +1216,7 @@ df_free_collection_rec (struct df_collection_rec *collection_rec)
/* Rescan INSN. Return TRUE if the rescanning produced any changes. */
-bool
+bool
df_insn_rescan (rtx insn)
{
unsigned int uid = INSN_UID (insn);
@@ -1256,7 +1256,7 @@ df_insn_rescan (rtx insn)
}
if (dump_file)
fprintf (dump_file, "deferring rescan insn with uid = %d.\n", uid);
-
+
bitmap_clear_bit (df->insns_to_delete, uid);
bitmap_clear_bit (df->insns_to_notes_rescan, uid);
bitmap_set_bit (df->insns_to_rescan, INSN_UID (insn));
@@ -1382,13 +1382,13 @@ df_insn_rescan_all (void)
bitmap_iterator bi;
unsigned int uid;
bitmap tmp = BITMAP_ALLOC (&df_bitmap_obstack);
-
+
if (df->changeable_flags & DF_NO_INSN_RESCAN)
{
df_clear_flags (DF_NO_INSN_RESCAN);
no_insn_rescan = true;
}
-
+
if (df->changeable_flags & DF_DEFER_INSN_RESCAN)
{
df_clear_flags (DF_DEFER_INSN_RESCAN);
@@ -1408,7 +1408,7 @@ df_insn_rescan_all (void)
bitmap_clear (df->insns_to_rescan);
bitmap_clear (df->insns_to_notes_rescan);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB (bb)
{
rtx insn;
FOR_BB_INSNS (bb, insn)
@@ -1434,13 +1434,13 @@ df_process_deferred_rescans (void)
bitmap_iterator bi;
unsigned int uid;
bitmap tmp = BITMAP_ALLOC (&df_bitmap_obstack);
-
+
if (df->changeable_flags & DF_NO_INSN_RESCAN)
{
df_clear_flags (DF_NO_INSN_RESCAN);
no_insn_rescan = true;
}
-
+
if (df->changeable_flags & DF_DEFER_INSN_RESCAN)
{
df_clear_flags (DF_DEFER_INSN_RESCAN);
@@ -1502,13 +1502,13 @@ df_process_deferred_rescans (void)
INCLUDE_EQ_USES. */
static unsigned int
-df_count_refs (bool include_defs, bool include_uses,
+df_count_refs (bool include_defs, bool include_uses,
bool include_eq_uses)
{
unsigned int regno;
int size = 0;
unsigned int m = df->regs_inited;
-
+
for (regno = 0; regno < m; regno++)
{
if (include_defs)
@@ -1526,10 +1526,10 @@ df_count_refs (bool include_defs, bool include_uses,
or reg-def chains. This version processes the refs in reg order
which is likely to be best if processing the whole function. */
-static void
+static void
df_reorganize_refs_by_reg_by_reg (struct df_ref_info *ref_info,
- bool include_defs,
- bool include_uses,
+ bool include_defs,
+ bool include_uses,
bool include_eq_uses)
{
unsigned int m = df->regs_inited;
@@ -1546,7 +1546,7 @@ df_reorganize_refs_by_reg_by_reg (struct df_ref_info *ref_info,
else
start = 0;
- ref_info->total_size
+ ref_info->total_size
= df_count_refs (include_defs, include_uses, include_eq_uses);
df_check_and_grow_ref_info (ref_info, 1);
@@ -1558,7 +1558,7 @@ df_reorganize_refs_by_reg_by_reg (struct df_ref_info *ref_info,
if (include_defs)
{
df_ref ref = DF_REG_DEF_CHAIN (regno);
- while (ref)
+ while (ref)
{
ref_info->refs[offset] = ref;
DF_REF_ID (ref) = offset++;
@@ -1570,7 +1570,7 @@ df_reorganize_refs_by_reg_by_reg (struct df_ref_info *ref_info,
if (include_uses)
{
df_ref ref = DF_REG_USE_CHAIN (regno);
- while (ref)
+ while (ref)
{
ref_info->refs[offset] = ref;
DF_REF_ID (ref) = offset++;
@@ -1582,7 +1582,7 @@ df_reorganize_refs_by_reg_by_reg (struct df_ref_info *ref_info,
if (include_eq_uses)
{
df_ref ref = DF_REG_EQ_USE_CHAIN (regno);
- while (ref)
+ while (ref)
{
ref_info->refs[offset] = ref;
DF_REF_ID (ref) = offset++;
@@ -1593,7 +1593,7 @@ df_reorganize_refs_by_reg_by_reg (struct df_ref_info *ref_info,
}
ref_info->count[regno] = count;
}
-
+
/* The bitmap size is not decremented when refs are deleted. So
reset it now that we have squished out all of the empty
slots. */
@@ -1606,10 +1606,10 @@ df_reorganize_refs_by_reg_by_reg (struct df_ref_info *ref_info,
which is likely to be best if processing some segment of the
function. */
-static void
+static void
df_reorganize_refs_by_reg_by_insn (struct df_ref_info *ref_info,
- bool include_defs,
- bool include_uses,
+ bool include_defs,
+ bool include_uses,
bool include_eq_uses)
{
bitmap_iterator bi;
@@ -1617,7 +1617,7 @@ df_reorganize_refs_by_reg_by_insn (struct df_ref_info *ref_info,
unsigned int m = df->regs_inited;
unsigned int offset = 0;
unsigned int r;
- unsigned int start
+ unsigned int start
= (df->changeable_flags & DF_NO_HARD_REGS) ? FIRST_PSEUDO_REGISTER : 0;
memset (ref_info->begin, 0, sizeof (int) * df->regs_inited);
@@ -1650,7 +1650,7 @@ df_reorganize_refs_by_reg_by_insn (struct df_ref_info *ref_info,
if (INSN_P (insn))
{
unsigned int uid = INSN_UID (insn);
-
+
if (include_defs)
for (ref_rec = DF_INSN_UID_DEFS (uid); *ref_rec; ref_rec++)
{
@@ -1679,7 +1679,7 @@ df_reorganize_refs_by_reg_by_insn (struct df_ref_info *ref_info,
offset += ref_info->count[r];
ref_info->count[r] = 0;
}
-
+
EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
{
basic_block bb = BASIC_BLOCK (bb_index);
@@ -1718,7 +1718,7 @@ df_reorganize_refs_by_reg_by_insn (struct df_ref_info *ref_info,
if (INSN_P (insn))
{
unsigned int uid = INSN_UID (insn);
-
+
if (include_defs)
for (ref_rec = DF_INSN_UID_DEFS (uid); *ref_rec; ref_rec++)
{
@@ -1772,25 +1772,25 @@ df_reorganize_refs_by_reg_by_insn (struct df_ref_info *ref_info,
/* Take build ref table for either the uses or defs from the reg-use
or reg-def chains. */
-static void
+static void
df_reorganize_refs_by_reg (struct df_ref_info *ref_info,
- bool include_defs,
- bool include_uses,
+ bool include_defs,
+ bool include_uses,
bool include_eq_uses)
{
if (df->analyze_subset)
- df_reorganize_refs_by_reg_by_insn (ref_info, include_defs,
+ df_reorganize_refs_by_reg_by_insn (ref_info, include_defs,
include_uses, include_eq_uses);
else
- df_reorganize_refs_by_reg_by_reg (ref_info, include_defs,
+ df_reorganize_refs_by_reg_by_reg (ref_info, include_defs,
include_uses, include_eq_uses);
}
/* Add the refs in REF_VEC to the table in REF_INFO starting at OFFSET. */
-static unsigned int
-df_add_refs_to_table (unsigned int offset,
- struct df_ref_info *ref_info,
+static unsigned int
+df_add_refs_to_table (unsigned int offset,
+ struct df_ref_info *ref_info,
df_ref *ref_vec)
{
while (*ref_vec)
@@ -1813,18 +1813,18 @@ df_add_refs_to_table (unsigned int offset,
eq_uses if INCLUDE_EQ_USES. */
static unsigned int
-df_reorganize_refs_by_insn_bb (basic_block bb, unsigned int offset,
+df_reorganize_refs_by_insn_bb (basic_block bb, unsigned int offset,
struct df_ref_info *ref_info,
- bool include_defs, bool include_uses,
+ bool include_defs, bool include_uses,
bool include_eq_uses)
{
rtx insn;
if (include_defs)
- offset = df_add_refs_to_table (offset, ref_info,
+ offset = df_add_refs_to_table (offset, ref_info,
df_get_artificial_defs (bb->index));
if (include_uses)
- offset = df_add_refs_to_table (offset, ref_info,
+ offset = df_add_refs_to_table (offset, ref_info,
df_get_artificial_uses (bb->index));
FOR_BB_INSNS (bb, insn)
@@ -1832,13 +1832,13 @@ df_reorganize_refs_by_insn_bb (basic_block bb, unsigned int offset,
{
unsigned int uid = INSN_UID (insn);
if (include_defs)
- offset = df_add_refs_to_table (offset, ref_info,
+ offset = df_add_refs_to_table (offset, ref_info,
DF_INSN_UID_DEFS (uid));
if (include_uses)
- offset = df_add_refs_to_table (offset, ref_info,
+ offset = df_add_refs_to_table (offset, ref_info,
DF_INSN_UID_USES (uid));
if (include_eq_uses)
- offset = df_add_refs_to_table (offset, ref_info,
+ offset = df_add_refs_to_table (offset, ref_info,
DF_INSN_UID_EQ_USES (uid));
}
return offset;
@@ -1852,7 +1852,7 @@ df_reorganize_refs_by_insn_bb (basic_block bb, unsigned int offset,
static void
df_reorganize_refs_by_insn (struct df_ref_info *ref_info,
- bool include_defs, bool include_uses,
+ bool include_defs, bool include_uses,
bool include_eq_uses)
{
basic_block bb;
@@ -1867,8 +1867,8 @@ df_reorganize_refs_by_insn (struct df_ref_info *ref_info,
EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, index, bi)
{
- offset = df_reorganize_refs_by_insn_bb (BASIC_BLOCK (index), offset, ref_info,
- include_defs, include_uses,
+ offset = df_reorganize_refs_by_insn_bb (BASIC_BLOCK (index), offset, ref_info,
+ include_defs, include_uses,
include_eq_uses);
}
@@ -1877,8 +1877,8 @@ df_reorganize_refs_by_insn (struct df_ref_info *ref_info,
else
{
FOR_ALL_BB (bb)
- offset = df_reorganize_refs_by_insn_bb (bb, offset, ref_info,
- include_defs, include_uses,
+ offset = df_reorganize_refs_by_insn_bb (bb, offset, ref_info,
+ include_defs, include_uses,
include_eq_uses);
ref_info->table_size = offset;
}
@@ -1887,7 +1887,7 @@ df_reorganize_refs_by_insn (struct df_ref_info *ref_info,
/* If the use refs in DF are not organized, reorganize them. */
-void
+void
df_maybe_reorganize_use_refs (enum df_ref_order order)
{
if (order == df->use_info.ref_order)
@@ -1922,14 +1922,14 @@ df_maybe_reorganize_use_refs (enum df_ref_order order)
gcc_unreachable ();
break;
}
-
+
df->use_info.ref_order = order;
}
/* If the def refs in DF are not organized, reorganize them. */
-void
+void
df_maybe_reorganize_def_refs (enum df_ref_order order)
{
if (order == df->def_info.ref_order)
@@ -1958,14 +1958,14 @@ df_maybe_reorganize_def_refs (enum df_ref_order order)
gcc_unreachable ();
break;
}
-
+
df->def_info.ref_order = order;
}
/* Change all of the basic block references in INSN to use the insn's
- current basic block. This function is called from routines that move
- instructions from one block to another. */
+ current basic block. This function is called from routines that move
+ instructions from one block to another. */
void
df_insn_change_bb (rtx insn, basic_block new_bb)
@@ -2001,7 +2001,7 @@ df_insn_change_bb (rtx insn, basic_block new_bb)
if (old_bb)
{
if (dump_file)
- fprintf (dump_file, " from %d to %d\n",
+ fprintf (dump_file, " from %d to %d\n",
old_bb->index, new_bb->index);
df_set_bb_dirty (old_bb);
}
@@ -2014,7 +2014,7 @@ df_insn_change_bb (rtx insn, basic_block new_bb)
/* Helper function for df_ref_change_reg_with_loc. */
static void
-df_ref_change_reg_with_loc_1 (struct df_reg_info *old_df,
+df_ref_change_reg_with_loc_1 (struct df_reg_info *old_df,
struct df_reg_info *new_df,
int new_regno, rtx loc)
{
@@ -2062,11 +2062,11 @@ df_ref_change_reg_with_loc_1 (struct df_reg_info *old_df,
else
ref_vec = insn_info->uses;
if (dump_file)
- fprintf (dump_file, "changing reg in insn %d\n",
- DF_REF_INSN_UID (the_ref));
-
+ fprintf (dump_file, "changing reg in insn %d\n",
+ DF_REF_INSN_UID (the_ref));
+
ref_vec_t = ref_vec;
-
+
/* Find the length. */
while (*ref_vec_t)
{
@@ -2096,11 +2096,11 @@ df_ref_change_reg_with_loc (int old_regno, int new_regno, rtx loc)
df_grow_reg_info ();
- df_ref_change_reg_with_loc_1 (DF_REG_DEF_GET (old_regno),
+ df_ref_change_reg_with_loc_1 (DF_REG_DEF_GET (old_regno),
DF_REG_DEF_GET (new_regno), new_regno, loc);
- df_ref_change_reg_with_loc_1 (DF_REG_USE_GET (old_regno),
+ df_ref_change_reg_with_loc_1 (DF_REG_USE_GET (old_regno),
DF_REG_USE_GET (new_regno), new_regno, loc);
- df_ref_change_reg_with_loc_1 (DF_REG_EQ_USE_GET (old_regno),
+ df_ref_change_reg_with_loc_1 (DF_REG_EQ_USE_GET (old_regno),
DF_REG_EQ_USE_GET (new_regno), new_regno, loc);
}
@@ -2113,7 +2113,7 @@ df_mw_hardreg_chain_delete_eq_uses (struct df_insn_info *insn_info)
struct df_mw_hardreg **mw_vec = insn_info->mw_hardregs;
unsigned int deleted = 0;
unsigned int count = 0;
- struct df_scan_problem_data *problem_data
+ struct df_scan_problem_data *problem_data
= (struct df_scan_problem_data *) df_scan->problem_data;
if (!*mw_vec)
@@ -2130,7 +2130,7 @@ df_mw_hardreg_chain_delete_eq_uses (struct df_insn_info *insn_info)
/* Shove the remaining ones down one to fill the gap. While
this looks n**2, it is highly unusual to have any mw regs
in eq_notes and the chances of more than one are almost
- non existent. */
+ non existent. */
while (*temp_vec)
{
*temp_vec = *(temp_vec + 1);
@@ -2190,7 +2190,7 @@ df_notes_rescan (rtx insn)
insn_info->eq_uses = df_null_ref_rec;
insn_info->mw_hardregs = df_null_mw_rec;
}
-
+
bitmap_clear_bit (df->insns_to_delete, uid);
/* If the insn is set to be rescanned, it does not need to also
be notes rescanned. */
@@ -2253,22 +2253,22 @@ df_notes_rescan (rtx insn)
expanding it if necessary. */
if (mw_len > num_deleted)
{
- insn_info->mw_hardregs =
+ insn_info->mw_hardregs =
XRESIZEVEC (struct df_mw_hardreg *,
insn_info->mw_hardregs,
count + 1 + mw_len);
}
memcpy (&insn_info->mw_hardregs[count],
- VEC_address (df_mw_hardreg_ptr, collection_rec.mw_vec),
+ VEC_address (df_mw_hardreg_ptr, collection_rec.mw_vec),
mw_len * sizeof (struct df_mw_hardreg *));
insn_info->mw_hardregs[count + mw_len] = NULL;
- qsort (insn_info->mw_hardregs, count + mw_len,
+ qsort (insn_info->mw_hardregs, count + mw_len,
sizeof (struct df_mw_hardreg *), df_mw_compare);
}
else
{
- /* No vector there. */
- insn_info->mw_hardregs
+ /* No vector there. */
+ insn_info->mw_hardregs
= XNEWVEC (struct df_mw_hardreg*, 1 + mw_len);
memcpy (insn_info->mw_hardregs,
VEC_address (df_mw_hardreg_ptr, collection_rec.mw_vec),
@@ -2294,7 +2294,7 @@ df_notes_rescan (rtx insn)
----------------------------------------------------------------------------*/
-/* Return true if the contents of two df_ref's are identical.
+/* Return true if the contents of two df_ref's are identical.
It ignores DF_REF_MARKER. */
static bool
@@ -2302,7 +2302,7 @@ df_ref_equal_p (df_ref ref1, df_ref ref2)
{
if (!ref2)
return false;
-
+
if (ref1 == ref2)
return true;
@@ -2310,12 +2310,12 @@ df_ref_equal_p (df_ref ref1, df_ref ref2)
|| DF_REF_REGNO (ref1) != DF_REF_REGNO (ref2)
|| DF_REF_REG (ref1) != DF_REF_REG (ref2)
|| DF_REF_TYPE (ref1) != DF_REF_TYPE (ref2)
- || ((DF_REF_FLAGS (ref1) & ~(DF_REF_REG_MARKER + DF_REF_MW_HARDREG))
+ || ((DF_REF_FLAGS (ref1) & ~(DF_REF_REG_MARKER + DF_REF_MW_HARDREG))
!= (DF_REF_FLAGS (ref2) & ~(DF_REF_REG_MARKER + DF_REF_MW_HARDREG)))
|| DF_REF_BB (ref1) != DF_REF_BB (ref2)
|| DF_REF_INSN_INFO (ref1) != DF_REF_INSN_INFO (ref2))
return false;
-
+
switch (DF_REF_CLASS (ref1))
{
case DF_REF_ARTIFICIAL:
@@ -2357,7 +2357,7 @@ df_ref_compare (const void *r1, const void *r2)
if (DF_REF_REGNO (ref1) != DF_REF_REGNO (ref2))
return (int)DF_REF_REGNO (ref1) - (int)DF_REF_REGNO (ref2);
-
+
if (DF_REF_TYPE (ref1) != DF_REF_TYPE (ref2))
return (int)DF_REF_TYPE (ref1) - (int)DF_REF_TYPE (ref2);
@@ -2439,7 +2439,7 @@ df_sort_and_compress_refs (VEC(df_ref,stack) **ref_vec)
which is the most common case for large COUNT case
(which happens for CALL INSNs),
no need to sort and filter out duplicate.
- Simply return the count.
+ Simply return the count.
Make sure DF_GET_ADD_REFS adds refs in the increasing order
of DF_REF_COMPARE. */
if (i == count - 1)
@@ -2469,7 +2469,7 @@ df_sort_and_compress_refs (VEC(df_ref,stack) **ref_vec)
}
-/* Return true if the contents of two df_ref's are identical.
+/* Return true if the contents of two df_ref's are identical.
It ignores DF_REF_MARKER. */
static bool
@@ -2522,7 +2522,7 @@ static void
df_sort_and_compress_mws (VEC(df_mw_hardreg_ptr,stack) **mw_vec)
{
unsigned int count;
- struct df_scan_problem_data *problem_data
+ struct df_scan_problem_data *problem_data
= (struct df_scan_problem_data *) df_scan->problem_data;
unsigned int i;
unsigned int dist = 0;
@@ -2585,8 +2585,8 @@ df_canonize_collection_rec (struct df_collection_rec *collection_rec)
/* Add the new df_ref to appropriate reg_info/ref_info chains. */
static void
-df_install_ref (df_ref this_ref,
- struct df_reg_info *reg_info,
+df_install_ref (df_ref this_ref,
+ struct df_reg_info *reg_info,
struct df_ref_info *ref_info,
bool add_to_table)
{
@@ -2613,7 +2613,7 @@ df_install_ref (df_ref this_ref,
if (head)
DF_REF_PREV_REG (head) = this_ref;
-
+
if (add_to_table)
{
gcc_assert (ref_info->ref_order != DF_REF_ORDER_NO_TABLE);
@@ -2622,10 +2622,10 @@ df_install_ref (df_ref this_ref,
/* Add the ref to the big array of defs. */
ref_info->refs[ref_info->table_size] = this_ref;
ref_info->table_size++;
- }
+ }
else
DF_REF_ID (this_ref) = -1;
-
+
ref_info->total_size++;
}
@@ -2637,7 +2637,7 @@ df_install_ref (df_ref this_ref,
static df_ref *
df_install_refs (basic_block bb,
VEC(df_ref,stack)* old_vec,
- struct df_reg_info **reg_info,
+ struct df_reg_info **reg_info,
struct df_ref_info *ref_info,
bool is_notes)
{
@@ -2677,10 +2677,10 @@ df_install_refs (basic_block bb,
for (ix = 0; VEC_iterate (df_ref, old_vec, ix, this_ref); ++ix)
{
new_vec[ix] = this_ref;
- df_install_ref (this_ref, reg_info[DF_REF_REGNO (this_ref)],
+ df_install_ref (this_ref, reg_info[DF_REF_REGNO (this_ref)],
ref_info, add_to_table);
}
-
+
new_vec[count] = NULL;
return new_vec;
}
@@ -2700,9 +2700,9 @@ df_install_mws (VEC(df_mw_hardreg_ptr,stack) *old_vec)
count = VEC_length (df_mw_hardreg_ptr, old_vec);
if (count)
{
- struct df_mw_hardreg **new_vec
+ struct df_mw_hardreg **new_vec
= XNEWVEC (struct df_mw_hardreg*, count + 1);
- memcpy (new_vec, VEC_address (df_mw_hardreg_ptr, old_vec),
+ memcpy (new_vec, VEC_address (df_mw_hardreg_ptr, old_vec),
sizeof (struct df_mw_hardreg*) * count);
new_vec[count] = NULL;
return new_vec;
@@ -2716,7 +2716,7 @@ df_install_mws (VEC(df_mw_hardreg_ptr,stack) *old_vec)
chains and update other necessary information. */
static void
-df_refs_add_to_chains (struct df_collection_rec *collection_rec,
+df_refs_add_to_chains (struct df_collection_rec *collection_rec,
basic_block bb, rtx insn)
{
if (insn)
@@ -2728,7 +2728,7 @@ df_refs_add_to_chains (struct df_collection_rec *collection_rec,
if (collection_rec->def_vec)
{
df_scan_free_ref_vec (insn_rec->defs);
- insn_rec->defs
+ insn_rec->defs
= df_install_refs (bb, collection_rec->def_vec,
df->def_regs,
&df->def_info, false);
@@ -2736,23 +2736,23 @@ df_refs_add_to_chains (struct df_collection_rec *collection_rec,
if (collection_rec->use_vec)
{
df_scan_free_ref_vec (insn_rec->uses);
- insn_rec->uses
- = df_install_refs (bb, collection_rec->use_vec,
+ insn_rec->uses
+ = df_install_refs (bb, collection_rec->use_vec,
df->use_regs,
&df->use_info, false);
}
if (collection_rec->eq_use_vec)
{
df_scan_free_ref_vec (insn_rec->eq_uses);
- insn_rec->eq_uses
- = df_install_refs (bb, collection_rec->eq_use_vec,
+ insn_rec->eq_uses
+ = df_install_refs (bb, collection_rec->eq_use_vec,
df->eq_use_regs,
&df->use_info, true);
}
if (collection_rec->mw_vec)
{
df_scan_free_mws_vec (insn_rec->mw_hardregs);
- insn_rec->mw_hardregs
+ insn_rec->mw_hardregs
= df_install_mws (collection_rec->mw_vec);
}
}
@@ -2761,32 +2761,32 @@ df_refs_add_to_chains (struct df_collection_rec *collection_rec,
struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb->index);
df_scan_free_ref_vec (bb_info->artificial_defs);
- bb_info->artificial_defs
+ bb_info->artificial_defs
= df_install_refs (bb, collection_rec->def_vec,
df->def_regs,
&df->def_info, false);
df_scan_free_ref_vec (bb_info->artificial_uses);
- bb_info->artificial_uses
- = df_install_refs (bb, collection_rec->use_vec,
+ bb_info->artificial_uses
+ = df_install_refs (bb, collection_rec->use_vec,
df->use_regs,
&df->use_info, false);
}
}
-/* Allocate a ref and initialize its fields.
+/* Allocate a ref and initialize its fields.
If the REF_FLAGS field contain DF_REF_SIGN_EXTRACT or
DF_REF_ZERO_EXTRACT. WIDTH, OFFSET and MODE are used to access the fields
if they were constants. Otherwise they should be -1 if those flags
were set. */
-static df_ref
-df_ref_create_structure (enum df_ref_class cl,
+static df_ref
+df_ref_create_structure (enum df_ref_class cl,
struct df_collection_rec *collection_rec,
- rtx reg, rtx *loc,
+ rtx reg, rtx *loc,
basic_block bb, struct df_insn_info *info,
- enum df_ref_type ref_type,
+ enum df_ref_type ref_type,
int ref_flags,
int width, int offset, enum machine_mode mode)
{
@@ -2842,7 +2842,7 @@ df_ref_create_structure (enum df_ref_class cl,
DF_REF_FLAGS_CLEAR (this_ref, DF_HARD_REG_LIVE);
/* See if this ref needs to have DF_HARD_REG_LIVE bit set. */
- if ((regno < FIRST_PSEUDO_REGISTER)
+ if ((regno < FIRST_PSEUDO_REGISTER)
&& (!DF_REF_IS_ARTIFICIAL (this_ref)))
{
if (DF_REF_REG_DEF_P (this_ref))
@@ -2871,7 +2871,7 @@ df_ref_create_structure (enum df_ref_class cl,
/* Create new references of type DF_REF_TYPE for each part of register REG
- at address LOC within INSN of BB.
+ at address LOC within INSN of BB.
If the REF_FLAGS field contain DF_REF_SIGN_EXTRACT or
DF_REF_ZERO_EXTRACT. WIDTH, OFFSET and MODE are used to access the
@@ -2880,13 +2880,13 @@ df_ref_create_structure (enum df_ref_class cl,
static void
-df_ref_record (enum df_ref_class cl,
+df_ref_record (enum df_ref_class cl,
struct df_collection_rec *collection_rec,
- rtx reg, rtx *loc,
+ rtx reg, rtx *loc,
basic_block bb, struct df_insn_info *insn_info,
- enum df_ref_type ref_type,
+ enum df_ref_type ref_type,
int ref_flags,
- int width, int offset, enum machine_mode mode)
+ int width, int offset, enum machine_mode mode)
{
unsigned int regno;
@@ -2916,7 +2916,7 @@ df_ref_record (enum df_ref_class cl,
and REG_UNUSED notes. */
if ((endregno != regno + 1) && insn_info)
{
- /* Sets to a subreg of a multiword register are partial.
+ /* Sets to a subreg of a multiword register are partial.
Sets to a non-subreg of a multiword register are not. */
if (GET_CODE (reg) == SUBREG)
ref_flags |= DF_REF_PARTIAL;
@@ -2935,8 +2935,8 @@ df_ref_record (enum df_ref_class cl,
for (i = regno; i < endregno; i++)
{
- ref = df_ref_create_structure (cl, collection_rec, regno_reg_rtx[i], loc,
- bb, insn_info, ref_type, ref_flags,
+ ref = df_ref_create_structure (cl, collection_rec, regno_reg_rtx[i], loc,
+ bb, insn_info, ref_type, ref_flags,
width, offset, mode);
gcc_assert (ORIGINAL_REGNO (DF_REF_REG (ref)) == i);
@@ -2944,7 +2944,7 @@ df_ref_record (enum df_ref_class cl,
}
else
{
- df_ref_create_structure (cl, collection_rec, reg, loc, bb, insn_info,
+ df_ref_create_structure (cl, collection_rec, reg, loc, bb, insn_info,
ref_type, ref_flags, width, offset, mode);
}
}
@@ -3003,8 +3003,8 @@ df_def_record_1 (struct df_collection_rec *collection_rec,
if (GET_CODE (temp) == EXPR_LIST || GET_CODE (temp) == CLOBBER
|| GET_CODE (temp) == SET)
df_def_record_1 (collection_rec,
- temp, bb, insn_info,
- GET_CODE (temp) == CLOBBER
+ temp, bb, insn_info,
+ GET_CODE (temp) == CLOBBER
? flags | DF_REF_MUST_CLOBBER : flags);
}
return;
@@ -3021,7 +3021,7 @@ df_def_record_1 (struct df_collection_rec *collection_rec,
if (GET_CODE (dst) == ZERO_EXTRACT)
{
flags |= DF_REF_READ_WRITE | DF_REF_PARTIAL | DF_REF_ZERO_EXTRACT;
-
+
if (CONST_INT_P (XEXP (dst, 1))
&& CONST_INT_P (XEXP (dst, 2)))
{
@@ -3038,15 +3038,15 @@ df_def_record_1 (struct df_collection_rec *collection_rec,
/* At this point if we do not have a reg or a subreg, just return. */
if (REG_P (dst))
{
- df_ref_record (cl, collection_rec,
- dst, loc, bb, insn_info, DF_REF_REG_DEF, flags,
+ df_ref_record (cl, collection_rec,
+ dst, loc, bb, insn_info, DF_REF_REG_DEF, flags,
width, offset, mode);
/* We want to keep sp alive everywhere - by making all
writes to sp also use of sp. */
if (REGNO (dst) == STACK_POINTER_REGNUM)
df_ref_record (DF_REF_BASE, collection_rec,
- dst, NULL, bb, insn_info, DF_REF_REG_USE, flags,
+ dst, NULL, bb, insn_info, DF_REF_REG_USE, flags,
width, offset, mode);
}
else if (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst)))
@@ -3056,8 +3056,8 @@ df_def_record_1 (struct df_collection_rec *collection_rec,
flags |= DF_REF_SUBREG;
- df_ref_record (cl, collection_rec,
- dst, loc, bb, insn_info, DF_REF_REG_DEF, flags,
+ df_ref_record (cl, collection_rec,
+ dst, loc, bb, insn_info, DF_REF_REG_DEF, flags,
width, offset, mode);
}
}
@@ -3066,7 +3066,7 @@ df_def_record_1 (struct df_collection_rec *collection_rec,
/* Process all the registers defined in the pattern rtx, X. */
static void
-df_defs_record (struct df_collection_rec *collection_rec,
+df_defs_record (struct df_collection_rec *collection_rec,
rtx x, basic_block bb, struct df_insn_info *insn_info,
int flags)
{
@@ -3081,7 +3081,7 @@ df_defs_record (struct df_collection_rec *collection_rec,
}
else if (code == COND_EXEC)
{
- df_defs_record (collection_rec, COND_EXEC_CODE (x),
+ df_defs_record (collection_rec, COND_EXEC_CODE (x),
bb, insn_info, DF_REF_CONDITIONAL);
}
else if (code == PARALLEL)
@@ -3095,7 +3095,7 @@ df_defs_record (struct df_collection_rec *collection_rec,
}
-/* Process all the registers used in the rtx at address LOC.
+/* Process all the registers used in the rtx at address LOC.
If the REF_FLAGS field contain DF_REF_SIGN_EXTRACT or
DF_REF_ZERO_EXTRACT. WIDTH, OFFSET and MODE are used to access the
@@ -3147,8 +3147,8 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
case MEM:
df_uses_record (cl, collection_rec,
- &XEXP (x, 0), DF_REF_REG_MEM_LOAD,
- bb, insn_info, flags & DF_REF_IN_NOTE,
+ &XEXP (x, 0), DF_REF_REG_MEM_LOAD,
+ bb, insn_info, flags & DF_REF_IN_NOTE,
width, offset, mode);
return;
@@ -3159,16 +3159,16 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
if (!REG_P (SUBREG_REG (x)))
{
loc = &SUBREG_REG (x);
- df_uses_record (cl, collection_rec, loc, ref_type, bb, insn_info, flags,
+ df_uses_record (cl, collection_rec, loc, ref_type, bb, insn_info, flags,
width, offset, mode);
return;
}
/* ... Fall through ... */
case REG:
- df_ref_record (cl, collection_rec,
+ df_ref_record (cl, collection_rec,
x, loc, bb, insn_info,
- ref_type, flags,
+ ref_type, flags,
width, offset, mode);
return;
@@ -3191,7 +3191,7 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
flags |= DF_REF_SIGN_EXTRACT;
df_uses_record (DF_REF_EXTRACT, collection_rec,
- &XEXP (x, 0), ref_type, bb, insn_info, flags,
+ &XEXP (x, 0), ref_type, bb, insn_info, flags,
width, offset, mode);
return;
}
@@ -3203,7 +3203,7 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
rtx dst = SET_DEST (x);
gcc_assert (!(flags & DF_REF_IN_NOTE));
df_uses_record (cl, collection_rec,
- &SET_SRC (x), DF_REF_REG_USE, bb, insn_info, flags,
+ &SET_SRC (x), DF_REF_REG_USE, bb, insn_info, flags,
width, offset, mode);
switch (GET_CODE (dst))
@@ -3211,9 +3211,9 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
case SUBREG:
if (df_read_modify_subreg_p (dst))
{
- df_uses_record (cl, collection_rec, &SUBREG_REG (dst),
- DF_REF_REG_USE, bb, insn_info,
- flags | DF_REF_READ_WRITE | DF_REF_SUBREG,
+ df_uses_record (cl, collection_rec, &SUBREG_REG (dst),
+ DF_REF_REG_USE, bb, insn_info,
+ flags | DF_REF_READ_WRITE | DF_REF_SUBREG,
width, offset, mode);
break;
}
@@ -3226,7 +3226,7 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
break;
case MEM:
df_uses_record (cl, collection_rec, &XEXP (dst, 0),
- DF_REF_REG_MEM_STORE, bb, insn_info, flags,
+ DF_REF_REG_MEM_STORE, bb, insn_info, flags,
width, offset, mode);
break;
case STRICT_LOW_PART:
@@ -3235,10 +3235,10 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
/* A strict_low_part uses the whole REG and not just the
SUBREG. */
dst = XEXP (dst, 0);
- df_uses_record (cl, collection_rec,
- (GET_CODE (dst) == SUBREG) ? &SUBREG_REG (dst) : temp,
+ df_uses_record (cl, collection_rec,
+ (GET_CODE (dst) == SUBREG) ? &SUBREG_REG (dst) : temp,
DF_REF_REG_USE, bb, insn_info,
- DF_REF_READ_WRITE | DF_REF_STRICT_LOW_PART,
+ DF_REF_READ_WRITE | DF_REF_STRICT_LOW_PART,
width, offset, mode);
}
break;
@@ -3253,7 +3253,7 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
if (GET_CODE (XEXP (dst,0)) == MEM)
{
/* Handle the case of zero_extract(mem(...)) in the set dest.
- This special case is allowed only if the mem is a single byte and
+ This special case is allowed only if the mem is a single byte and
is useful to set a bitfield in memory. */
df_uses_record (DF_REF_EXTRACT, collection_rec, &XEXP (XEXP (dst,0), 0),
DF_REF_REG_MEM_STORE, bb, insn_info,
@@ -3262,23 +3262,23 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
}
else
{
- df_uses_record (DF_REF_EXTRACT, collection_rec, &XEXP (dst, 0),
- DF_REF_REG_USE, bb, insn_info,
- DF_REF_READ_WRITE | DF_REF_ZERO_EXTRACT,
+ df_uses_record (DF_REF_EXTRACT, collection_rec, &XEXP (dst, 0),
+ DF_REF_REG_USE, bb, insn_info,
+ DF_REF_READ_WRITE | DF_REF_ZERO_EXTRACT,
width, offset, mode);
}
}
- else
+ else
{
- df_uses_record (cl, collection_rec, &XEXP (dst, 1),
- DF_REF_REG_USE, bb, insn_info, flags,
+ df_uses_record (cl, collection_rec, &XEXP (dst, 1),
+ DF_REF_REG_USE, bb, insn_info, flags,
width, offset, mode);
- df_uses_record (cl, collection_rec, &XEXP (dst, 2),
- DF_REF_REG_USE, bb, insn_info, flags,
+ df_uses_record (cl, collection_rec, &XEXP (dst, 2),
+ DF_REF_REG_USE, bb, insn_info, flags,
width, offset, mode);
- df_uses_record (cl, collection_rec, &XEXP (dst, 0),
- DF_REF_REG_USE, bb, insn_info,
- DF_REF_READ_WRITE | DF_REF_ZERO_EXTRACT,
+ df_uses_record (cl, collection_rec, &XEXP (dst, 0),
+ DF_REF_REG_USE, bb, insn_info,
+ DF_REF_READ_WRITE | DF_REF_ZERO_EXTRACT,
width, offset, mode);
}
@@ -3331,7 +3331,7 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
df_uses_record (cl, collection_rec, &ASM_OPERANDS_INPUT (x, j),
- DF_REF_REG_USE, bb, insn_info, flags,
+ DF_REF_REG_USE, bb, insn_info, flags,
width, offset, mode);
return;
}
@@ -3354,9 +3354,9 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
gcc_assert (!DEBUG_INSN_P (insn_info->insn));
/* Catch the def of the register being modified. */
df_ref_record (cl, collection_rec, XEXP (x, 0), &XEXP (x, 0),
- bb, insn_info,
+ bb, insn_info,
DF_REF_REG_DEF,
- flags | DF_REF_READ_WRITE | DF_REF_PRE_POST_MODIFY,
+ flags | DF_REF_READ_WRITE | DF_REF_PRE_POST_MODIFY,
width, offset, mode);
/* ... Fall through to handle uses ... */
@@ -3380,8 +3380,8 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
loc = &XEXP (x, 0);
goto retry;
}
- df_uses_record (cl, collection_rec, &XEXP (x, i), ref_type,
- bb, insn_info, flags,
+ df_uses_record (cl, collection_rec, &XEXP (x, i), ref_type,
+ bb, insn_info, flags,
width, offset, mode);
}
else if (fmt[i] == 'E')
@@ -3389,8 +3389,8 @@ df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec,
int j;
for (j = 0; j < XVECLEN (x, i); j++)
df_uses_record (cl, collection_rec,
- &XVECEXP (x, i, j), ref_type,
- bb, insn_info, flags,
+ &XVECEXP (x, i, j), ref_type,
+ bb, insn_info, flags,
width, offset, mode);
}
}
@@ -3439,7 +3439,7 @@ df_get_conditional_uses (struct df_collection_rec *collection_rec)
static void
df_get_call_refs (struct df_collection_rec * collection_rec,
- basic_block bb,
+ basic_block bb,
struct df_insn_info *insn_info,
int flags)
{
@@ -3485,7 +3485,7 @@ df_get_call_refs (struct df_collection_rec * collection_rec,
/* The stack ptr is used (honorarily) by a CALL insn. */
df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[STACK_POINTER_REGNUM],
NULL, bb, insn_info, DF_REF_REG_USE,
- DF_REF_CALL_STACK_USAGE | flags,
+ DF_REF_CALL_STACK_USAGE | flags,
-1, -1, VOIDmode);
/* Calls may also reference any of the global registers,
@@ -3508,11 +3508,11 @@ df_get_call_refs (struct df_collection_rec * collection_rec,
&& (!bitmap_bit_p (defs_generated, ui))
&& (!is_sibling_call
|| !bitmap_bit_p (df->exit_block_uses, ui)
- || refers_to_regno_p (ui, ui+1,
+ || refers_to_regno_p (ui, ui+1,
crtl->return_rtx, NULL)))
- df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[ui],
+ df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[ui],
NULL, bb, insn_info, DF_REF_REG_DEF,
- DF_REF_MAY_CLOBBER | flags,
+ DF_REF_MAY_CLOBBER | flags,
-1, -1, VOIDmode);
}
@@ -3526,8 +3526,8 @@ df_get_call_refs (struct df_collection_rec * collection_rec,
and reg chains. */
static void
-df_insn_refs_collect (struct df_collection_rec* collection_rec,
- basic_block bb, struct df_insn_info *insn_info)
+df_insn_refs_collect (struct df_collection_rec* collection_rec,
+ basic_block bb, struct df_insn_info *insn_info)
{
rtx note;
bool is_cond_exec = (GET_CODE (PATTERN (insn_info->insn)) == COND_EXEC);
@@ -3572,12 +3572,12 @@ df_insn_refs_collect (struct df_collection_rec* collection_rec,
}
if (CALL_P (insn_info->insn))
- df_get_call_refs (collection_rec, bb, insn_info,
+ df_get_call_refs (collection_rec, bb, insn_info,
(is_cond_exec) ? DF_REF_CONDITIONAL : 0);
/* Record the register uses. */
df_uses_record (DF_REF_REGULAR, collection_rec,
- &PATTERN (insn_info->insn), DF_REF_REG_USE, bb, insn_info, 0,
+ &PATTERN (insn_info->insn), DF_REF_REG_USE, bb, insn_info, 0,
-1, -1, VOIDmode);
/* DF_REF_CONDITIONAL needs corresponding USES. */
@@ -3660,14 +3660,14 @@ df_bb_refs_collect (struct df_collection_rec *collection_rec, basic_block bb)
if (bb->flags & BB_NON_LOCAL_GOTO_TARGET)
df_ref_record (DF_REF_ARTIFICIAL, collection_rec, hard_frame_pointer_rtx, NULL,
bb, NULL, DF_REF_REG_DEF, DF_REF_AT_TOP, -1, -1, VOIDmode);
-
+
/* Add the artificial uses. */
if (bb->index >= NUM_FIXED_BLOCKS)
{
bitmap_iterator bi;
unsigned int regno;
- bitmap au = bb_has_eh_pred (bb)
- ? df->eh_block_artificial_uses
+ bitmap au = bb_has_eh_pred (bb)
+ ? df->eh_block_artificial_uses
: df->regular_block_artificial_uses;
EXECUTE_IF_SET_IN_BITMAP (au, 0, regno, bi)
@@ -3697,7 +3697,7 @@ df_bb_refs_record (int bb_index, bool scan_insns)
bb_info = df_scan_get_bb_info (bb_index);
- /* Need to make sure that there is a record in the basic block info. */
+ /* Need to make sure that there is a record in the basic block info. */
if (!bb_info)
{
bb_info = (struct df_scan_bb_info *) pool_alloc (df_scan->block_pool);
@@ -3769,7 +3769,7 @@ df_get_regular_block_artificial_uses (bitmap regular_block_artificial_uses)
/* Any reference to any pseudo before reload is a potential
reference of the frame pointer. */
bitmap_set_bit (regular_block_artificial_uses, FRAME_POINTER_REGNUM);
-
+
#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
bitmap_set_bit (regular_block_artificial_uses, HARD_FRAME_POINTER_REGNUM);
#endif
@@ -3780,7 +3780,7 @@ df_get_regular_block_artificial_uses (bitmap regular_block_artificial_uses)
if (fixed_regs[ARG_POINTER_REGNUM])
bitmap_set_bit (regular_block_artificial_uses, ARG_POINTER_REGNUM);
#endif
-
+
/* Any constant, or pseudo with constant equivalences, may
require reloading from memory using the pic register. */
if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
@@ -3880,7 +3880,7 @@ df_get_entry_block_def_set (bitmap entry_block_defs)
bitmap_set_bit (entry_block_defs, i);
#endif
}
-
+
/* The always important stack pointer. */
bitmap_set_bit (entry_block_defs, STACK_POINTER_REGNUM);
@@ -3926,7 +3926,7 @@ df_get_entry_block_def_set (bitmap entry_block_defs)
if (fixed_regs[ARG_POINTER_REGNUM])
bitmap_set_bit (entry_block_defs, ARG_POINTER_REGNUM);
#endif
-
+
#ifdef PIC_OFFSET_TABLE_REGNUM
/* Any constant, or pseudo with constant equivalences, may
require reloading from memory using the pic register. */
@@ -3940,26 +3940,26 @@ df_get_entry_block_def_set (bitmap entry_block_defs)
if (REG_P (INCOMING_RETURN_ADDR_RTX))
bitmap_set_bit (entry_block_defs, REGNO (INCOMING_RETURN_ADDR_RTX));
#endif
-
+
targetm.live_on_entry (entry_block_defs);
}
/* Return the (conservative) set of hard registers that are defined on
- entry to the function.
- It uses df->entry_block_defs to determine which register
+ entry to the function.
+ It uses df->entry_block_defs to determine which register
reference to include. */
static void
-df_entry_block_defs_collect (struct df_collection_rec *collection_rec,
+df_entry_block_defs_collect (struct df_collection_rec *collection_rec,
bitmap entry_block_defs)
{
- unsigned int i;
+ unsigned int i;
bitmap_iterator bi;
EXECUTE_IF_SET_IN_BITMAP (entry_block_defs, 0, i, bi)
{
- df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[i], NULL,
+ df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[i], NULL,
ENTRY_BLOCK_PTR, NULL, DF_REF_REG_DEF, 0, -1, -1,
VOIDmode);
}
@@ -4028,17 +4028,17 @@ df_update_entry_block_defs (void)
static void
df_get_exit_block_use_set (bitmap exit_block_uses)
{
- unsigned int i;
+ unsigned int i;
bitmap_clear (exit_block_uses);
/* Stack pointer is always live at the exit. */
bitmap_set_bit (exit_block_uses, STACK_POINTER_REGNUM);
-
+
/* Mark the frame pointer if needed at the end of the function.
If we end up eliminating it, it will be removed from the live
list of each basic block by reload. */
-
+
if ((!reload_completed) || frame_pointer_needed)
{
bitmap_set_bit (exit_block_uses, FRAME_POINTER_REGNUM);
@@ -4057,14 +4057,14 @@ df_get_exit_block_use_set (bitmap exit_block_uses)
&& fixed_regs[PIC_OFFSET_TABLE_REGNUM])
bitmap_set_bit (exit_block_uses, PIC_OFFSET_TABLE_REGNUM);
#endif
-
+
/* Mark all global registers, and all registers used by the
epilogue as being live at the end of the function since they
may be referenced by our caller. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (global_regs[i] || EPILOGUE_USES (i))
bitmap_set_bit (exit_block_uses, i);
-
+
if (HAVE_epilogue && epilogue_completed)
{
/* Mark all call-saved registers that we actually used. */
@@ -4073,7 +4073,7 @@ df_get_exit_block_use_set (bitmap exit_block_uses)
&& !TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
bitmap_set_bit (exit_block_uses, i);
}
-
+
#ifdef EH_RETURN_DATA_REGNO
/* Mark the registers that will contain data for the handler. */
if (reload_completed && crtl->calls_eh_return)
@@ -4104,20 +4104,20 @@ df_get_exit_block_use_set (bitmap exit_block_uses)
if (tmp && REG_P (tmp))
df_mark_reg (tmp, exit_block_uses);
}
-#endif
+#endif
/* Mark function return value. */
diddle_return_value (df_mark_reg, (void*) exit_block_uses);
}
-/* Return the refs of hard registers that are used in the exit block.
+/* Return the refs of hard registers that are used in the exit block.
It uses df->exit_block_uses to determine register to include. */
static void
df_exit_block_uses_collect (struct df_collection_rec *collection_rec, bitmap exit_block_uses)
{
- unsigned int i;
+ unsigned int i;
bitmap_iterator bi;
EXECUTE_IF_SET_IN_BITMAP (exit_block_uses, 0, i, bi)
@@ -4127,7 +4127,7 @@ df_exit_block_uses_collect (struct df_collection_rec *collection_rec, bitmap exi
#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
/* It is deliberate that this is not put in the exit block uses but
I do not know why. */
- if (reload_completed
+ if (reload_completed
&& !bitmap_bit_p (exit_block_uses, ARG_POINTER_REGNUM)
&& bb_has_eh_pred (EXIT_BLOCK_PTR)
&& fixed_regs[ARG_POINTER_REGNUM])
@@ -4139,7 +4139,7 @@ df_exit_block_uses_collect (struct df_collection_rec *collection_rec, bitmap exi
}
-/* Record the set of hard registers that are used in the exit block.
+/* Record the set of hard registers that are used in the exit block.
It uses df->exit_block_uses to determine which bit to include. */
static void
@@ -4199,7 +4199,7 @@ static bool initialized = false;
/* Initialize some platform specific structures. */
-void
+void
df_hard_reg_init (void)
{
#ifdef ELIMINABLE_REGS
@@ -4212,22 +4212,22 @@ df_hard_reg_init (void)
/* Record which registers will be eliminated. We use this in
mark_used_regs. */
CLEAR_HARD_REG_SET (elim_reg_set);
-
+
#ifdef ELIMINABLE_REGS
for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
SET_HARD_REG_BIT (elim_reg_set, eliminables[i].from);
#else
SET_HARD_REG_BIT (elim_reg_set, FRAME_POINTER_REGNUM);
#endif
-
+
initialized = true;
}
/* Recompute the parts of scanning that are based on regs_ever_live
- because something changed in that array. */
+ because something changed in that array. */
-void
+void
df_update_entry_exit_and_calls (void)
{
basic_block bb;
@@ -4237,7 +4237,7 @@ df_update_entry_exit_and_calls (void)
/* The call insns need to be rescanned because there may be changes
in the set of registers clobbered across the call. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB (bb)
{
rtx insn;
FOR_BB_INSNS (bb, insn)
@@ -4254,7 +4254,7 @@ df_update_entry_exit_and_calls (void)
this array. See the comment in df.h for df->hard_regs_live_count
for the conditions that this array is set. */
-bool
+bool
df_hard_reg_used_p (unsigned int reg)
{
gcc_assert (df);
@@ -4279,7 +4279,7 @@ df_hard_reg_used_count (unsigned int reg)
/* Get the value of regs_ever_live[REGNO]. */
-bool
+bool
df_regs_ever_live_p (unsigned int regno)
{
return regs_ever_live[regno];
@@ -4289,7 +4289,7 @@ df_regs_ever_live_p (unsigned int regno)
/* Set regs_ever_live[REGNO] to VALUE. If this cause regs_ever_live
to change, schedule that change for the next update. */
-void
+void
df_set_regs_ever_live (unsigned int regno, bool value)
{
if (regs_ever_live[regno] == value)
@@ -4309,7 +4309,7 @@ df_compute_regs_ever_live (bool reset)
{
unsigned int i;
bool changed = df->redo_entry_and_exit;
-
+
if (reset)
memset (regs_ever_live, 0, sizeof (regs_ever_live));
@@ -4342,10 +4342,10 @@ df_compute_regs_ever_live (bool reset)
/* Mark all refs in the reg chain. Verify that all of the registers
-are in the correct chain. */
+are in the correct chain. */
static unsigned int
-df_reg_chain_mark (df_ref refs, unsigned int regno,
+df_reg_chain_mark (df_ref refs, unsigned int regno,
bool is_def, bool is_eq_use)
{
unsigned int count = 0;
@@ -4380,7 +4380,7 @@ df_reg_chain_mark (df_ref refs, unsigned int regno,
}
-/* Verify that all of the registers in the chain are unmarked. */
+/* Verify that all of the registers in the chain are unmarked. */
static void
df_reg_chain_verify_unmarked (df_ref refs)
@@ -4461,7 +4461,7 @@ df_mws_verify (VEC(df_mw_hardreg_ptr,stack) *new_rec,
/* Return true if the existing insn refs information is complete and
correct. Otherwise (i.e. if there's any missing or extra refs),
- return the correct df_ref chain in REFS_RETURN.
+ return the correct df_ref chain in REFS_RETURN.
If ABORT_IF_FAIL, leave the refs that are verified (already in the
ref chain) as DF_REF_MARKED(). If it's false, then it's a per-insn
@@ -4472,7 +4472,7 @@ df_mws_verify (VEC(df_mw_hardreg_ptr,stack) *new_rec,
static bool
df_insn_refs_verify (struct df_collection_rec *collection_rec,
- basic_block bb,
+ basic_block bb,
rtx insn,
bool abort_if_fail)
{
@@ -4487,19 +4487,19 @@ df_insn_refs_verify (struct df_collection_rec *collection_rec,
/* The insn_rec was created but it was never filled out. */
if (abort_if_fail)
gcc_assert (0);
- else
+ else
return false;
}
/* Unfortunately we cannot opt out early if one of these is not
right because the marks will not get cleared. */
- ret1 = df_refs_verify (collection_rec->def_vec, DF_INSN_UID_DEFS (uid),
+ ret1 = df_refs_verify (collection_rec->def_vec, DF_INSN_UID_DEFS (uid),
abort_if_fail);
- ret2 = df_refs_verify (collection_rec->use_vec, DF_INSN_UID_USES (uid),
+ ret2 = df_refs_verify (collection_rec->use_vec, DF_INSN_UID_USES (uid),
abort_if_fail);
- ret3 = df_refs_verify (collection_rec->eq_use_vec, DF_INSN_UID_EQ_USES (uid),
+ ret3 = df_refs_verify (collection_rec->eq_use_vec, DF_INSN_UID_EQ_USES (uid),
abort_if_fail);
- ret4 = df_mws_verify (collection_rec->mw_vec, DF_INSN_UID_MWS (uid),
+ ret4 = df_mws_verify (collection_rec->mw_vec, DF_INSN_UID_MWS (uid),
abort_if_fail);
return (ret1 && ret2 && ret3 && ret4);
}
@@ -4515,7 +4515,7 @@ df_bb_verify (basic_block bb)
rtx insn;
struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb->index);
struct df_collection_rec collection_rec;
-
+
memset (&collection_rec, 0, sizeof (struct df_collection_rec));
collection_rec.def_vec = VEC_alloc (df_ref, stack, 128);
collection_rec.use_vec = VEC_alloc (df_ref, stack, 32);
@@ -4538,12 +4538,12 @@ df_bb_verify (basic_block bb)
df_refs_verify (collection_rec.def_vec, df_get_artificial_defs (bb->index), true);
df_refs_verify (collection_rec.use_vec, df_get_artificial_uses (bb->index), true);
df_free_collection_rec (&collection_rec);
-
+
return true;
}
-/* Returns true if the entry block has correct and complete df_ref set.
+/* Returns true if the entry block has correct and complete df_ref set.
If not it either aborts if ABORT_IF_FAIL is true or returns false. */
static bool
@@ -4572,7 +4572,7 @@ df_entry_block_bitmap_verify (bool abort_if_fail)
}
-/* Returns true if the exit block has correct and complete df_ref set.
+/* Returns true if the exit block has correct and complete df_ref set.
If not it either aborts if ABORT_IF_FAIL is true or returns false. */
static bool
@@ -4620,11 +4620,11 @@ df_scan_verify (void)
/* (1) All of the refs are marked by going thru the reg chains. */
for (i = 0; i < DF_REG_SIZE (df); i++)
{
- gcc_assert (df_reg_chain_mark (DF_REG_DEF_CHAIN (i), i, true, false)
+ gcc_assert (df_reg_chain_mark (DF_REG_DEF_CHAIN (i), i, true, false)
== DF_REG_DEF_COUNT(i));
- gcc_assert (df_reg_chain_mark (DF_REG_USE_CHAIN (i), i, false, false)
+ gcc_assert (df_reg_chain_mark (DF_REG_USE_CHAIN (i), i, false, false)
== DF_REG_USE_COUNT(i));
- gcc_assert (df_reg_chain_mark (DF_REG_EQ_USE_CHAIN (i), i, false, true)
+ gcc_assert (df_reg_chain_mark (DF_REG_EQ_USE_CHAIN (i), i, false, true)
== DF_REG_EQ_USE_COUNT(i));
}
@@ -4637,13 +4637,13 @@ df_scan_verify (void)
df_get_regular_block_artificial_uses (regular_block_artificial_uses);
df_get_eh_block_artificial_uses (eh_block_artificial_uses);
- bitmap_ior_into (eh_block_artificial_uses,
+ bitmap_ior_into (eh_block_artificial_uses,
regular_block_artificial_uses);
/* Check artificial_uses bitmaps didn't change. */
- gcc_assert (bitmap_equal_p (regular_block_artificial_uses,
+ gcc_assert (bitmap_equal_p (regular_block_artificial_uses,
df->regular_block_artificial_uses));
- gcc_assert (bitmap_equal_p (eh_block_artificial_uses,
+ gcc_assert (bitmap_equal_p (eh_block_artificial_uses,
df->eh_block_artificial_uses));
BITMAP_FREE (regular_block_artificial_uses);
@@ -4653,7 +4653,7 @@ df_scan_verify (void)
the refs are verified in df_bb_verify. */
df_entry_block_bitmap_verify (true);
df_exit_block_bitmap_verify (true);
-
+
/* (3) All of the insns in all of the blocks are traversed and the
marks are cleared both in the artificial refs attached to the
blocks and the real refs inside the insns. It is a failure to
diff --git a/gcc/df.h b/gcc/df.h
index 0614983bc7d..be37fa412a8 100644
--- a/gcc/df.h
+++ b/gcc/df.h
@@ -2,7 +2,7 @@
for GNU compiler. This is part of flow optimization.
Copyright (C) 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
- Originally contributed by Michael P. Hayes
+ Originally contributed by Michael P. Hayes
(m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
and Kenneth Zadeck (zadeck@naturalbridge.com).
@@ -38,14 +38,14 @@ struct df_link;
struct df_insn_info;
union df_ref_d;
-/* Data flow problems. All problems must have a unique id here. */
+/* Data flow problems. All problems must have a unique id here. */
/* Scanning is not really a dataflow problem, but it is useful to have
the basic block functions in the vector so that things get done in
a uniform manner. The last four problems can be added or deleted
at any time are always defined (though LIVE is always there at -O2
or higher); the others are always there. */
-#define DF_SCAN 0
+#define DF_SCAN 0
#define DF_LR 1 /* Live Registers backward. */
#define DF_LIVE 2 /* Live Registers & Uninitialized Registers */
#define DF_RD 3 /* Reaching Defs. */
@@ -105,7 +105,7 @@ enum df_ref_flags
/* This flag is set if this ref is a partial use or def of the
associated register. */
DF_REF_PARTIAL = 1 << 4,
-
+
/* Read-modify-write refs generate both a use and a def and
these are marked with this flag to show that they are not
independent. */
@@ -120,7 +120,7 @@ enum df_ref_flags
/* This flag is set if this ref, generally a def, is a real
clobber. This is not currently set for registers live across a
- call because that clobbering may or may not happen.
+ call because that clobbering may or may not happen.
Most of the uses of this are with sets that have a
GET_CODE(..)==CLOBBER. Note that this is set even if the
@@ -133,7 +133,7 @@ enum df_ref_flags
/* If the ref has one of the following two flags set, then the
struct df_ref can be cast to struct df_ref_extract to access
the width and offset fields. */
-
+
/* This flag is set if the ref contains a SIGN_EXTRACT. */
DF_REF_SIGN_EXTRACT = 1 << 8,
@@ -165,14 +165,14 @@ enum df_ref_flags
/* The possible ordering of refs within the df_ref_info. */
enum df_ref_order
{
- /* There is not table. */
+ /* There is not table. */
DF_REF_ORDER_NO_TABLE,
/* There is a table of refs but it is not (or no longer) organized
by one of the following methods. */
DF_REF_ORDER_UNORDERED,
DF_REF_ORDER_UNORDERED_WITH_NOTES,
-
+
/* Organize the table by reg order, all of the refs with regno 0
followed by all of the refs with regno 1 ... . Within all of
the regs for a particular regno, the refs are unordered. */
@@ -183,7 +183,7 @@ enum df_ref_order
DF_REF_ORDER_BY_REG_WITH_NOTES,
/* Organize the refs in insn order. The insns are ordered within a
- block, and the blocks are ordered by FOR_ALL_BB. */
+ block, and the blocks are ordered by FOR_ALL_BB. */
DF_REF_ORDER_BY_INSN,
/* For uses, the refs within eq notes may be added for
@@ -255,7 +255,7 @@ typedef void (*df_verify_solution_end) (void);
struct df_problem {
/* The unique id of the problem. This is used it index into
df->defined_problems to make accessing the problem data easy. */
- unsigned int id;
+ unsigned int id;
enum df_flow_dir dir; /* Dataflow direction. */
df_alloc_function alloc_fun;
df_reset_function reset_fun;
@@ -296,7 +296,7 @@ struct dataflow
unsigned int block_info_size;
/* The pool to allocate the block_info from. */
- alloc_pool block_pool;
+ alloc_pool block_pool;
/* The lr and live problems have their transfer functions recomputed
only if necessary. This is possible for them because, the
@@ -313,7 +313,7 @@ struct dataflow
/* Local flags for some of the problems. */
unsigned int local_flags;
-
+
/* True if this problem of this instance has been initialized. This
is used by the dumpers to keep garbage out of the dumps if, for
debugging a dump is produced before the first call to
@@ -321,7 +321,7 @@ struct dataflow
bool computed;
/* True if the something has changed which invalidates the dataflow
- solutions. Note that this bit is always true for all problems except
+ solutions. Note that this bit is always true for all problems except
lr and live. */
bool solutions_dirty;
@@ -338,7 +338,7 @@ struct dataflow
REG_UNUSED notes. */
struct df_mw_hardreg
{
- rtx mw_reg; /* The multiword hardreg. */
+ rtx mw_reg; /* The multiword hardreg. */
/* These two bitfields are intentionally oversized, in the hope that
accesses to 16-bit fields will usually be quicker. */
ENUM_BITFIELD(df_ref_type) type : 16;
@@ -348,7 +348,7 @@ struct df_mw_hardreg
unsigned int end_regno; /* Last word of the multi word subreg. */
unsigned int mw_order; /* Same as df_ref.ref_order. */
};
-
+
/* Define a register reference structure. One of these is allocated
for every register reference (use or def). Note some register
@@ -364,9 +364,9 @@ struct df_base_ref
int flags : 16; /* Various df_ref_flags. */
rtx reg; /* The register referenced. */
struct df_link *chain; /* Head of def-use, use-def. */
- /* Pointer to the insn info of the containing instruction. FIXME!
+ /* Pointer to the insn info of the containing instruction. FIXME!
Currently this is NULL for artificial refs but this will be used
- when FUDs are added. */
+ when FUDs are added. */
struct df_insn_info *insn_info;
/* For each regno, there are three chains of refs, one for the uses,
the eq_uses and the defs. These chains go thru the refs
@@ -374,7 +374,7 @@ struct df_base_ref
union df_ref_d *next_reg; /* Next ref with same regno and type. */
union df_ref_d *prev_reg; /* Prev ref with same regno and type. */
unsigned int regno; /* The register number referenced. */
- /* Location in the ref table. This is only valid after a call to
+ /* Location in the ref table. This is only valid after a call to
df_maybe_reorganize_[use,def]_refs which is an expensive operation. */
int id;
/* The index at which the operand was scanned in the insn. This is
@@ -384,7 +384,7 @@ struct df_base_ref
/* The three types of df_refs. Note that the df_ref_extract is an
- extension of the df_regular_ref, not the df_base_ref. */
+ extension of the df_regular_ref, not the df_base_ref. */
struct df_artificial_ref
{
struct df_base_ref base;
@@ -436,14 +436,14 @@ struct df_insn_info
df_ref *defs; /* Head of insn-def chain. */
df_ref *uses; /* Head of insn-use chain. */
/* Head of insn-use chain for uses in REG_EQUAL/EQUIV notes. */
- df_ref *eq_uses;
+ df_ref *eq_uses;
struct df_mw_hardreg **mw_hardregs;
/* The logical uid of the insn in the basic block. This is valid
after any call to df_analyze but may rot after insns are added,
deleted or moved. */
- int luid;
+ int luid;
};
-
+
/* These links are used for ref-ref chains. Currently only DEF-USE and
USE-DEF chains can be built by DF. */
struct df_link
@@ -456,11 +456,11 @@ struct df_link
enum df_chain_flags
{
/* Flags that control the building of chains. */
- DF_DU_CHAIN = 1, /* Build DU chains. */
+ DF_DU_CHAIN = 1, /* Build DU chains. */
DF_UD_CHAIN = 2 /* Build UD chains. */
};
-enum df_changeable_flags
+enum df_changeable_flags
{
/* Scanning flags. */
/* Flag to control the running of dce as a side effect of building LR. */
@@ -533,13 +533,13 @@ struct df
the problem local data without having to search the first
array. */
- struct dataflow *problems_in_order[DF_LAST_PROBLEM_PLUS1];
- struct dataflow *problems_by_index[DF_LAST_PROBLEM_PLUS1];
+ struct dataflow *problems_in_order[DF_LAST_PROBLEM_PLUS1];
+ struct dataflow *problems_by_index[DF_LAST_PROBLEM_PLUS1];
/* If not NULL, this subset of blocks of the program to be
considered for analysis. At certain times, this will contain all
the blocks in the function so it cannot be used as an indicator
- of if we are analyzing a subset. See analyze_subset. */
+ of if we are analyzing a subset. See analyze_subset. */
bitmap blocks_to_analyze;
/* The following information is really the problem data for the
@@ -578,12 +578,12 @@ struct df
bitmap insns_to_delete;
bitmap insns_to_rescan;
bitmap insns_to_notes_rescan;
- int *postorder; /* The current set of basic blocks
+ int *postorder; /* The current set of basic blocks
in reverse postorder. */
- int *postorder_inverted; /* The current set of basic blocks
+ int *postorder_inverted; /* The current set of basic blocks
in reverse postorder of inverted CFG. */
int n_blocks; /* The number of blocks in reverse postorder. */
- int n_blocks_inverted; /* The number of blocks
+ int n_blocks_inverted; /* The number of blocks
in reverse postorder of inverted CFG. */
/* An array [FIRST_PSEUDO_REGISTER], indexed by regno, of the number
@@ -624,20 +624,20 @@ struct df
/* Most transformations that wish to use live register analysis will
use these macros. This info is the and of the lr and live sets. */
-#define DF_LIVE_IN(BB) (DF_LIVE_BB_INFO(BB)->in)
-#define DF_LIVE_OUT(BB) (DF_LIVE_BB_INFO(BB)->out)
+#define DF_LIVE_IN(BB) (DF_LIVE_BB_INFO(BB)->in)
+#define DF_LIVE_OUT(BB) (DF_LIVE_BB_INFO(BB)->out)
/* These macros are used by passes that are not tolerant of
uninitialized variables. This intolerance should eventually
be fixed. */
-#define DF_LR_IN(BB) (DF_LR_BB_INFO(BB)->in)
-#define DF_LR_OUT(BB) (DF_LR_BB_INFO(BB)->out)
+#define DF_LR_IN(BB) (DF_LR_BB_INFO(BB)->in)
+#define DF_LR_OUT(BB) (DF_LR_BB_INFO(BB)->out)
/* These macros are used by passes that are not tolerant of
uninitialized variables. This intolerance should eventually
be fixed. */
-#define DF_BYTE_LR_IN(BB) (DF_BYTE_LR_BB_INFO(BB)->in)
-#define DF_BYTE_LR_OUT(BB) (DF_BYTE_LR_BB_INFO(BB)->out)
+#define DF_BYTE_LR_IN(BB) (DF_BYTE_LR_BB_INFO(BB)->in)
+#define DF_BYTE_LR_OUT(BB) (DF_BYTE_LR_BB_INFO(BB)->out)
/* Macros to access the elements within the ref structure. */
@@ -674,8 +674,8 @@ struct df
#define DF_REF_IS_REG_MARKED(REF) (DF_REF_FLAGS_IS_SET ((REF),DF_REF_REG_MARKER))
#define DF_REF_NEXT_REG(REF) ((REF)->base.next_reg)
#define DF_REF_PREV_REG(REF) ((REF)->base.prev_reg)
-/* The following two macros may only be applied if one of
- DF_REF_SIGN_EXTRACT | DF_REF_ZERO_EXTRACT is true. */
+/* The following two macros may only be applied if one of
+ DF_REF_SIGN_EXTRACT | DF_REF_ZERO_EXTRACT is true. */
#define DF_REF_EXTRACT_WIDTH(REF) ((REF)->extract_ref.width)
#define DF_REF_EXTRACT_OFFSET(REF) ((REF)->extract_ref.offset)
#define DF_REF_EXTRACT_MODE(REF) ((REF)->extract_ref.mode)
@@ -695,7 +695,7 @@ struct df
/* Macros to get the refs out of def_info or use_info refs table. If
the focus of the dataflow has been set to some subset of blocks
with df_set_blocks, these macros will only find the uses and defs
- in that subset of blocks.
+ in that subset of blocks.
These macros should be used with care. The def macros are only
usable after a call to df_maybe_reorganize_def_refs and the use
@@ -791,10 +791,10 @@ struct df_scan_bb_info
/* Reaching definitions. All bitmaps are indexed by the id field of
the ref except sparse_kill which is indexed by regno. */
-struct df_rd_bb_info
+struct df_rd_bb_info
{
/* Local sets to describe the basic blocks. */
- bitmap kill;
+ bitmap kill;
bitmap sparse_kill;
bitmap gen; /* The set of defs generated in this block. */
@@ -807,7 +807,7 @@ struct df_rd_bb_info
/* Multiple reaching definitions. All bitmaps are referenced by the
register number. */
-struct df_md_bb_info
+struct df_md_bb_info
{
/* Local sets to describe the basic blocks. */
bitmap gen; /* Partial/conditional definitions live at BB out. */
@@ -823,10 +823,10 @@ struct df_md_bb_info
/* Live registers, a backwards dataflow problem. All bitmaps are
referenced by the register number. */
-struct df_lr_bb_info
+struct df_lr_bb_info
{
/* Local sets to describe the basic blocks. */
- bitmap def; /* The set of registers set in this block
+ bitmap def; /* The set of registers set in this block
- except artificial defs at the top. */
bitmap use; /* The set of registers used in this block. */
@@ -840,7 +840,7 @@ struct df_lr_bb_info
register number. Anded results of the forwards and backward live
info. Note that the forwards live information is not available
separately. */
-struct df_live_bb_info
+struct df_live_bb_info
{
/* Local sets to describe the basic blocks. */
bitmap kill; /* The set of registers unset in this block. Calls,
@@ -856,10 +856,10 @@ struct df_live_bb_info
/* Live registers, a backwards dataflow problem. These bitmaps are
indexed by the df_byte_lr_offset array which is indexed by pseudo. */
-struct df_byte_lr_bb_info
+struct df_byte_lr_bb_info
{
/* Local sets to describe the basic blocks. */
- bitmap def; /* The set of registers set in this block
+ bitmap def; /* The set of registers set in this block
- except artificial defs at the top. */
bitmap use; /* The set of registers used in this block. */
@@ -871,7 +871,7 @@ struct df_byte_lr_bb_info
/* This is used for debugging and for the dumpers to find the latest
instance so that the df info can be added to the dumps. This
- should not be used by regular code. */
+ should not be used by regular code. */
extern struct df *df;
#define df_scan (df->problems_by_index[DF_SCAN])
#define df_rd (df->problems_by_index[DF_RD])
@@ -993,7 +993,7 @@ extern void df_scan_add_problem (void);
extern void df_grow_reg_info (void);
extern void df_grow_insn_info (void);
extern void df_scan_blocks (void);
-extern df_ref df_ref_create (rtx, rtx *, rtx,basic_block,
+extern df_ref df_ref_create (rtx, rtx *, rtx,basic_block,
enum df_ref_type, int ref_flags,
int, int, enum machine_mode);
extern void df_ref_remove (df_ref);
@@ -1023,7 +1023,7 @@ extern bool df_read_modify_subreg_p (rtx);
extern void df_scan_verify (void);
/* Functions defined in df-byte-scan.c. */
-extern bool df_compute_accessed_bytes (df_ref, enum df_mm,
+extern bool df_compute_accessed_bytes (df_ref, enum df_mm,
unsigned int *, unsigned int *);
diff --git a/gcc/dfp.c b/gcc/dfp.c
index 5e1dbcc41eb..90ca2a2a4f2 100644
--- a/gcc/dfp.c
+++ b/gcc/dfp.c
@@ -138,7 +138,7 @@ encode_decimal32 (const struct real_format *fmt ATTRIBUTE_UNUSED,
decContextDefault (&set, DEC_INIT_DECIMAL128);
set.traps = 0;
- decimal_to_decnumber (r, &dn);
+ decimal_to_decnumber (r, &dn);
decimal32FromNumber (&d32, &dn, &set);
memcpy (&image, d32.bytes, sizeof (int32_t));
@@ -163,7 +163,7 @@ decode_decimal32 (const struct real_format *fmt ATTRIBUTE_UNUSED,
memcpy (&d32.bytes, &image, sizeof (int32_t));
decimal32ToNumber (&d32, &dn);
- decimal_from_decnumber (r, &dn, &set);
+ decimal_from_decnumber (r, &dn, &set);
}
/* Encode a real into an IEEE 754 decimal64 type. */
@@ -204,7 +204,7 @@ encode_decimal64 (const struct real_format *fmt ATTRIBUTE_UNUSED,
void
decode_decimal64 (const struct real_format *fmt ATTRIBUTE_UNUSED,
REAL_VALUE_TYPE *r, const long *buf)
-{
+{
decNumber dn;
decimal64 d64;
decContext set;
@@ -229,7 +229,7 @@ decode_decimal64 (const struct real_format *fmt ATTRIBUTE_UNUSED,
}
decimal64ToNumber (&d64, &dn);
- decimal_from_decnumber (r, &dn, &set);
+ decimal_from_decnumber (r, &dn, &set);
}
/* Encode a real into an IEEE 754 decimal128 type. */
@@ -311,7 +311,7 @@ decode_decimal128 (const struct real_format *fmt ATTRIBUTE_UNUSED,
}
decimal128ToNumber (&d128, &dn);
- decimal_from_decnumber (r, &dn, &set);
+ decimal_from_decnumber (r, &dn, &set);
}
/* Helper function to convert from a binary real internal
@@ -365,10 +365,10 @@ decimal_do_compare (const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b,
decimal_from_binary (&b1, b);
b = &b1;
}
-
+
/* Convert into decNumber form for comparison operation. */
decContextDefault (&set, DEC_INIT_DECIMAL128);
- set.traps = 0;
+ set.traps = 0;
decimal128ToNumber ((const decimal128 *) a->sig, &dn2);
decimal128ToNumber ((const decimal128 *) b->sig, &dn3);
@@ -382,7 +382,7 @@ decimal_do_compare (const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b,
return 0;
else if (decNumberIsNegative (&dn))
return -1;
- else
+ else
return 1;
}
@@ -435,7 +435,7 @@ decimal_round_for_format (const struct real_format *fmt, REAL_VALUE_TYPE *r)
binary and decimal types. */
void
-decimal_real_convert (REAL_VALUE_TYPE *r, enum machine_mode mode,
+decimal_real_convert (REAL_VALUE_TYPE *r, enum machine_mode mode,
const REAL_VALUE_TYPE *a)
{
const struct real_format *fmt = REAL_MODE_FORMAT (mode);
@@ -484,7 +484,7 @@ decimal_do_add (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *op0,
if (subtract_p)
decNumberSubtract (&dn, &dn2, &dn3, &set);
- else
+ else
decNumberAdd (&dn, &dn2, &dn3, &set);
decimal_from_decnumber (r, &dn, &set);
@@ -697,7 +697,7 @@ decimal_real_arithmetic (REAL_VALUE_TYPE *r, enum tree_code code,
void
decimal_real_maxval (REAL_VALUE_TYPE *r, int sign, enum machine_mode mode)
-{
+{
const char *max;
switch (mode)
diff --git a/gcc/diagnostic.c b/gcc/diagnostic.c
index bca74783291..28217a7cc78 100644
--- a/gcc/diagnostic.c
+++ b/gcc/diagnostic.c
@@ -300,7 +300,7 @@ diagnostic_classify_diagnostic (diagnostic_context *context,
DC. This function is *the* subroutine in terms of which front-ends
should implement their specific diagnostic handling modules. The
front-end independent format specifiers are exactly those described
- in the documentation of output_format.
+ in the documentation of output_format.
Return true if a diagnostic was printed, false otherwise. */
bool
@@ -320,9 +320,9 @@ diagnostic_report_diagnostic (diagnostic_context *context,
if (diagnostic->kind == DK_NOTE && flag_compare_debug)
return false;
- if (diagnostic->kind == DK_PEDWARN)
+ if (diagnostic->kind == DK_PEDWARN)
diagnostic->kind = pedantic_warning_kind ();
-
+
if (context->lock > 0)
{
/* If we're reporting an ICE in the middle of some other error,
@@ -344,7 +344,7 @@ diagnostic_report_diagnostic (diagnostic_context *context,
diagnostic->kind = DK_ERROR;
maybe_print_warnings_as_errors_message = true;
}
-
+
if (diagnostic->option_index)
{
/* This tests if the user provided the appropriate -Wfoo or
@@ -384,7 +384,7 @@ diagnostic_report_diagnostic (diagnostic_context *context,
dump_active_plugins (stderr);
}
- if (diagnostic->kind == DK_ICE)
+ if (diagnostic->kind == DK_ICE)
{
#ifndef ENABLE_CHECKING
/* When not checking, ICEs are converted to fatal errors when an
@@ -405,13 +405,13 @@ diagnostic_report_diagnostic (diagnostic_context *context,
diagnostic->message.args_ptr);
}
++diagnostic_kind_count (context, diagnostic->kind);
-
+
saved_format_spec = diagnostic->message.format_spec;
if (context->show_option_requested && diagnostic->option_index)
diagnostic->message.format_spec
= ACONCAT ((diagnostic->message.format_spec,
" [", cl_options[diagnostic->option_index].opt_text, "]", NULL));
-
+
diagnostic->message.locus = &diagnostic->location;
diagnostic->message.abstract_origin = &diagnostic->abstract_origin;
diagnostic->abstract_origin = NULL;
@@ -482,7 +482,7 @@ verbatim (const char *gmsgid, ...)
}
bool
-emit_diagnostic (diagnostic_t kind, location_t location, int opt,
+emit_diagnostic (diagnostic_t kind, location_t location, int opt,
const char *gmsgid, ...)
{
diagnostic_info diagnostic;
@@ -520,7 +520,7 @@ inform (location_t location, const char *gmsgid, ...)
}
/* A warning at INPUT_LOCATION. Use this for code which is correct according
- to the relevant language specification but is likely to be buggy anyway.
+ to the relevant language specification but is likely to be buggy anyway.
Returns true if the warning was printed, false if it was inhibited. */
bool
warning (int opt, const char *gmsgid, ...)
diff --git a/gcc/diagnostic.h b/gcc/diagnostic.h
index c2c08bc8496..0cfadb6fe51 100644
--- a/gcc/diagnostic.h
+++ b/gcc/diagnostic.h
@@ -28,7 +28,7 @@ along with GCC; see the file COPYING3. If not see
/* Constants used to discriminate diagnostics. */
typedef enum
{
-#define DEFINE_DIAGNOSTIC_KIND(K, msgid) K,
+#define DEFINE_DIAGNOSTIC_KIND(K, msgid) K,
#include "diagnostic.def"
#undef DEFINE_DIAGNOSTIC_KIND
DK_LAST_DIAGNOSTIC_KIND
@@ -70,7 +70,7 @@ struct diagnostic_context
/* True if we should display the "warnings are being tread as error"
message, usually displayed once per compiler run. */
bool issue_warnings_are_errors_message;
-
+
/* True if it has been requested that warnings be treated as errors. */
bool warning_as_error_requested;
diff --git a/gcc/dominance.c b/gcc/dominance.c
index 78ad48c2543..f93c4dcb2d2 100644
--- a/gcc/dominance.c
+++ b/gcc/dominance.c
@@ -717,7 +717,7 @@ set_immediate_dominator (enum cdi_direction dir, basic_block bb,
{
unsigned int dir_index = dom_convert_dir_to_idx (dir);
struct et_node *node = bb->dom[dir_index];
-
+
gcc_assert (dom_computed[dir_index]);
if (node->father)
@@ -758,7 +758,7 @@ get_dominated_by (enum cdi_direction dir, basic_block bb)
/* Returns the list of basic blocks that are immediately dominated (in
direction DIR) by some block between N_REGION ones stored in REGION,
except for blocks in the REGION itself. */
-
+
VEC (basic_block, heap) *
get_dominated_by_region (enum cdi_direction dir, basic_block *region,
unsigned n_region)
@@ -815,7 +815,7 @@ redirect_immediate_dominators (enum cdi_direction dir, basic_block bb,
{
unsigned int dir_index = dom_convert_dir_to_idx (dir);
struct et_node *bb_node, *to_node, *son;
-
+
bb_node = bb->dom[dir_index];
to_node = to->dom[dir_index];
@@ -862,7 +862,7 @@ nearest_common_dominator_for_set (enum cdi_direction dir, bitmap blocks)
unsigned i, first;
bitmap_iterator bi;
basic_block dom;
-
+
first = bitmap_first_set_bit (blocks);
dom = BASIC_BLOCK (first);
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
@@ -881,11 +881,11 @@ nearest_common_dominator_for_set (enum cdi_direction dir, bitmap blocks)
You can view these as bounds for the range of dfs numbers the
nodes in the subtree of the dominator tree rooted at that node
will contain.
-
+
The dominator tree is always a simple acyclic tree, so there are
only three possible relations two nodes in the dominator tree have
to each other:
-
+
1. Node A is above Node B (and thus, Node A dominates node B)
A
@@ -899,10 +899,10 @@ nearest_common_dominator_for_set (enum cdi_direction dir, bitmap blocks)
B, and DFS_Number_Out of A will be >= DFS_Number_Out of B. This is
because we must hit A in the dominator tree *before* B on the walk
down, and we will hit A *after* B on the walk back up
-
+
2. Node A is below node B (and thus, node B dominates node A)
-
-
+
+
B
|
A
@@ -911,10 +911,10 @@ nearest_common_dominator_for_set (enum cdi_direction dir, bitmap blocks)
In the above case, DFS_Number_In of A will be >= DFS_Number_In of
B, and DFS_Number_Out of A will be <= DFS_Number_Out of B.
-
+
This is because we must hit A in the dominator tree *after* B on
the walk down, and we will hit A *before* B on the walk back up
-
+
3. Node A and B are siblings (and thus, neither dominates the other)
C
@@ -937,7 +937,7 @@ nearest_common_dominator_for_set (enum cdi_direction dir, bitmap blocks)
A_Dominates_B (node A, node B)
{
- return DFS_Number_In(A) <= DFS_Number_In(B)
+ return DFS_Number_In(A) <= DFS_Number_In(B)
&& DFS_Number_Out (A) >= DFS_Number_Out(B);
}
@@ -950,10 +950,10 @@ nearest_common_dominator_for_set (enum cdi_direction dir, bitmap blocks)
/* Return TRUE in case BB1 is dominated by BB2. */
bool
dominated_by_p (enum cdi_direction dir, const_basic_block bb1, const_basic_block bb2)
-{
+{
unsigned int dir_index = dom_convert_dir_to_idx (dir);
struct et_node *n1 = bb1->dom[dir_index], *n2 = bb2->dom[dir_index];
-
+
gcc_assert (dom_computed[dir_index]);
if (dom_computed[dir_index] == DOM_OK)
@@ -1389,7 +1389,7 @@ add_to_dominance_info (enum cdi_direction dir, basic_block bb)
gcc_assert (!bb->dom[dir_index]);
n_bbs_in_dom_tree[dir_index]++;
-
+
bb->dom[dir_index] = et_new_tree (bb);
if (dom_computed[dir_index] == DOM_OK)
diff --git a/gcc/domwalk.c b/gcc/domwalk.c
index b70a807e7a4..07764eb8047 100644
--- a/gcc/domwalk.c
+++ b/gcc/domwalk.c
@@ -27,7 +27,7 @@ along with GCC; see the file COPYING3. If not see
#include "domwalk.h"
#include "ggc.h"
-/* This file implements a generic walker for dominator trees.
+/* This file implements a generic walker for dominator trees.
To understand the dominator walker one must first have a grasp of dominators,
immediate dominators and the dominator tree.
@@ -69,8 +69,8 @@ along with GCC; see the file COPYING3. If not see
| +--9 11
| / |
+--- 10 ---> 12
-
-
+
+
We have a dominator tree which looks like
1
@@ -88,34 +88,34 @@ along with GCC; see the file COPYING3. If not see
9
|
10
-
-
-
+
+
+
The dominator tree is the basis for a number of analysis, transformation
and optimization algorithms that operate on a semi-global basis.
-
+
The dominator walker is a generic routine which visits blocks in the CFG
via a depth first search of the dominator tree. In the example above
the dominator walker might visit blocks in the following order
1, 2, 3, 4, 5, 8, 9, 10, 6, 7, 11, 12.
-
+
The dominator walker has a number of callbacks to perform actions
during the walk of the dominator tree. There are two callbacks
which walk statements, one before visiting the dominator children,
- one after visiting the dominator children. There is a callback
+ one after visiting the dominator children. There is a callback
before and after each statement walk callback. In addition, the
dominator walker manages allocation/deallocation of data structures
which are local to each block visited.
-
+
The dominator walker is meant to provide a generic means to build a pass
which can analyze or transform/optimize a function based on walking
the dominator tree. One simply fills in the dominator walker data
structure with the appropriate callbacks and calls the walker.
-
+
We currently use the dominator walker to prune the set of variables
which might need PHI nodes (which can greatly improve compile-time
performance in some cases).
-
+
We also use the dominator walker to rewrite the function into SSA form
which reduces code duplication since the rewriting phase is inherently
a walk of the dominator tree.
diff --git a/gcc/double-int.c b/gcc/double-int.c
index 93c7ca45c58..a49ce473a7e 100644
--- a/gcc/double-int.c
+++ b/gcc/double-int.c
@@ -1,18 +1,18 @@
/* Operations with long integers.
Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -50,7 +50,7 @@ double_int_mask (unsigned prec)
/* Clears the bits of CST over the precision PREC. If UNS is false, the bits
outside of the precision are set to the sign bit (i.e., the PREC-th one),
otherwise they are set to zero.
-
+
This corresponds to returning the value represented by PREC lowermost bits
of CST, with the given signedness. */
@@ -102,7 +102,7 @@ double_int_sext (double_int cst, unsigned prec)
{
r.low = cst.low & mask.low;
r.high = cst.high & mask.high;
- }
+ }
return r;
}
@@ -450,7 +450,7 @@ mpz_get_double_int (const_tree type, mpz_t val, bool wrap)
double_int res;
if (!wrap)
- {
+ {
mpz_t min, max;
mpz_init (min);
diff --git a/gcc/double-int.h b/gcc/double-int.h
index d35a52cb083..84185890e29 100644
--- a/gcc/double-int.h
+++ b/gcc/double-int.h
@@ -1,18 +1,18 @@
/* Operations with long integers.
Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -72,7 +72,7 @@ static inline double_int
shwi_to_double_int (HOST_WIDE_INT cst)
{
double_int r;
-
+
r.low = (unsigned HOST_WIDE_INT) cst;
r.high = cst < 0 ? -1 : 0;
@@ -94,7 +94,7 @@ static inline double_int
uhwi_to_double_int (unsigned HOST_WIDE_INT cst)
{
double_int r;
-
+
r.low = cst;
r.high = 0;
diff --git a/gcc/dse.c b/gcc/dse.c
index a883bcd3d69..9569a6176e1 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -48,13 +48,13 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
/* This file contains three techniques for performing Dead Store
- Elimination (dse).
+ Elimination (dse).
* The first technique performs dse locally on any base address. It
is based on the cselib which is a local value numbering technique.
This technique is local to a basic block but deals with a fairly
general addresses.
-
+
* The second technique performs dse globally but is restricted to
base addresses that are either constant or are relative to the
frame_pointer.
@@ -84,7 +84,7 @@ along with GCC; see the file COPYING3. If not see
stores, only one store to an address is necessary because those
stores die at the end of the function.
- 3) Set up the global dataflow equations based on processing the
+ 3) Set up the global dataflow equations based on processing the
info parsed in the first step.
4) Solve the dataflow equations.
@@ -138,10 +138,10 @@ along with GCC; see the file COPYING3. If not see
These are expensive and cumbersome in our bitmap formulation so
care has been taken to avoid large vectors filled with 1s. See
the comments in bb_info and in the dataflow confluence functions
- for details.
+ for details.
There are two places for further enhancements to this algorithm:
-
+
1) The original dse which was embedded in a pass called flow also
did local address forwarding. For example in
@@ -183,7 +183,7 @@ along with GCC; see the file COPYING3. If not see
with the same mode and alias set. The backout in this case is a
little more graceful than (a). In this case the slot is unmarked
as being a spill slot and if final address comes out to be based
- off the frame pointer, the global algorithm handles this slot.
+ off the frame pointer, the global algorithm handles this slot.
c) For any pass that may prespill, there is currently no
mechanism to tell the dse pass that the slot being used has the
@@ -203,7 +203,7 @@ static bitmap scratch = NULL;
struct insn_info;
/* This structure holds information about a candidate store. */
-struct store_info
+struct store_info
{
/* False means this is a clobber. */
@@ -216,7 +216,7 @@ struct store_info
true, this is -1. Otherwise, it is the index into the group
table. */
int group_id;
-
+
/* This is the cselib value. */
cselib_val *cse_base;
@@ -285,7 +285,7 @@ static alloc_pool rtx_store_info_pool;
/* This structure holds information about a load. These are only
built for rtx bases. */
-struct read_info
+struct read_info
{
/* The id of the mem group of the base address. */
int group_id;
@@ -310,7 +310,7 @@ static alloc_pool read_info_pool;
/* One of these records is created for each insn. */
-struct insn_info
+struct insn_info
{
/* Set true if the insn contains a store but the insn itself cannot
be deleted. This is set if the insn is a parallel and there is
@@ -384,10 +384,10 @@ typedef struct insn_info *insn_info_t;
static alloc_pool insn_info_pool;
/* The linked list of stores that are under consideration in this
- basic block. */
+ basic block. */
static insn_info_t active_local_stores;
-struct bb_info
+struct bb_info
{
/* Pointer to the insn info for the last insn in the block. These
@@ -410,7 +410,7 @@ struct bb_info
/* The set of store positions that exist in this block before a wild read. */
bitmap gen;
-
+
/* The set of load positions that exist in this block above the
same position of a store. */
bitmap kill;
@@ -455,7 +455,7 @@ static bb_info_t *bb_table;
memory. There are also not many of the rtx bases because they are
very limited in scope. */
-struct group_info
+struct group_info
{
/* The actual base of the address. */
rtx rtx_base;
@@ -475,7 +475,7 @@ struct group_info
/* A mem wrapped around the base pointer for the group in order to
do read dependency. */
rtx base_mem;
-
+
/* Canonized version of base_mem's address. */
rtx canon_base_addr;
@@ -506,8 +506,8 @@ struct group_info
positions in the global bitmaps. It is only created after all of
the all of stores have been scanned and we know which ones we
care about. */
- int *offset_map_n, *offset_map_p;
- int offset_map_size_n, offset_map_size_p;
+ int *offset_map_n, *offset_map_p;
+ int offset_map_size_n, offset_map_size_p;
};
typedef struct group_info *group_info_t;
typedef const struct group_info *const_group_info_t;
@@ -527,7 +527,7 @@ static VEC(group_info_t,heap) *rtx_group_vec;
/* This structure holds the set of changes that are being deferred
when removing read operation. See replace_read. */
-struct deferred_change
+struct deferred_change
{
/* The mem that is being replaced. */
@@ -555,7 +555,7 @@ static bitmap clear_alias_sets = NULL;
/* The set of clear_alias_sets that have been disqualified because
there are loads or stores using a different mode than the alias set
- was registered with. */
+ was registered with. */
static bitmap disqualified_clear_alias_sets = NULL;
/* The group that holds all of the clear_alias_sets. */
@@ -578,10 +578,10 @@ static alloc_pool clear_alias_mode_pool;
static bool stores_off_frame_dead_at_return;
/* Counter for stats. */
-static int globally_deleted;
-static int locally_deleted;
-static int spill_deleted;
-
+static int globally_deleted;
+static int locally_deleted;
+static int spill_deleted;
+
static bitmap all_blocks;
/* The number of bits used in the global bitmaps. */
@@ -596,7 +596,7 @@ static bool gate_dse2 (void);
/*----------------------------------------------------------------------------
Zeroth step.
- Initialization.
+ Initialization.
----------------------------------------------------------------------------*/
/* Hashtable callbacks for maintaining the "bases" field of
@@ -605,9 +605,9 @@ static bool gate_dse2 (void);
static int
clear_alias_mode_eq (const void *p1, const void *p2)
{
- const struct clear_alias_mode_holder * h1
+ const struct clear_alias_mode_holder * h1
= (const struct clear_alias_mode_holder *) p1;
- const struct clear_alias_mode_holder * h2
+ const struct clear_alias_mode_holder * h2
= (const struct clear_alias_mode_holder *) p2;
return h1->alias_set == h2->alias_set;
}
@@ -616,7 +616,7 @@ clear_alias_mode_eq (const void *p1, const void *p2)
static hashval_t
clear_alias_mode_hash (const void *p)
{
- const struct clear_alias_mode_holder *holder
+ const struct clear_alias_mode_holder *holder
= (const struct clear_alias_mode_holder *) p;
return holder->alias_set;
}
@@ -629,11 +629,11 @@ clear_alias_set_lookup (alias_set_type alias_set)
{
struct clear_alias_mode_holder tmp_holder;
void **slot;
-
+
tmp_holder.alias_set = alias_set;
slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, NO_INSERT);
gcc_assert (*slot);
-
+
return (struct clear_alias_mode_holder *) *slot;
}
@@ -664,8 +664,8 @@ invariant_group_base_hash (const void *p)
static group_info_t
get_group_info (rtx base)
{
- struct group_info tmp_gi;
- group_info_t gi;
+ struct group_info tmp_gi;
+ group_info_t gi;
void **slot;
if (base)
@@ -712,7 +712,7 @@ get_group_info (rtx base)
gi->store2_p = BITMAP_ALLOC (NULL);
gi->group_kill = BITMAP_ALLOC (NULL);
gi->process_globally = false;
- gi->frame_related =
+ gi->frame_related =
(base == frame_pointer_rtx) || (base == hard_frame_pointer_rtx);
gi->offset_map_size_n = 0;
gi->offset_map_size_p = 0;
@@ -737,22 +737,22 @@ dse_step0 (void)
scratch = BITMAP_ALLOC (NULL);
rtx_store_info_pool
- = create_alloc_pool ("rtx_store_info_pool",
+ = create_alloc_pool ("rtx_store_info_pool",
sizeof (struct store_info), 100);
read_info_pool
- = create_alloc_pool ("read_info_pool",
+ = create_alloc_pool ("read_info_pool",
sizeof (struct read_info), 100);
insn_info_pool
- = create_alloc_pool ("insn_info_pool",
+ = create_alloc_pool ("insn_info_pool",
sizeof (struct insn_info), 100);
bb_info_pool
- = create_alloc_pool ("bb_info_pool",
+ = create_alloc_pool ("bb_info_pool",
sizeof (struct bb_info), 100);
rtx_group_info_pool
- = create_alloc_pool ("rtx_group_info_pool",
+ = create_alloc_pool ("rtx_group_info_pool",
sizeof (struct group_info), 100);
deferred_change_pool
- = create_alloc_pool ("deferred_change_pool",
+ = create_alloc_pool ("deferred_change_pool",
sizeof (struct deferred_change), 10);
rtx_group_table = htab_create (11, invariant_group_base_hash,
@@ -764,7 +764,7 @@ dse_step0 (void)
stores_off_frame_dead_at_return = !cfun->stdarg;
init_alias_analysis ();
-
+
if (clear_alias_sets)
clear_alias_group = get_group_info (NULL);
else
@@ -783,7 +783,7 @@ dse_step0 (void)
/* Delete all of the store_info recs from INSN_INFO. */
-static void
+static void
free_store_info (insn_info_t insn_info)
{
store_info_t store_info = insn_info->store_rec;
@@ -832,7 +832,7 @@ replace_inc_dec (rtx *r, void *d)
data->insn);
return -1;
}
-
+
case PRE_DEC:
case POST_DEC:
{
@@ -843,7 +843,7 @@ replace_inc_dec (rtx *r, void *d)
data->insn);
return -1;
}
-
+
case PRE_MODIFY:
case POST_MODIFY:
{
@@ -859,7 +859,7 @@ replace_inc_dec (rtx *r, void *d)
return 0;
}
}
-
+
/* If X is a MEM, check the address to see if it is PRE/POST-INC/DEC/MODIFY
and generate an add to replace that. */
@@ -876,7 +876,7 @@ replace_inc_dec_mem (rtx *r, void *d)
data.insn = (rtx) d;
for_each_rtx (&XEXP (x, 0), replace_inc_dec, &data);
-
+
return -1;
}
return 0;
@@ -894,7 +894,7 @@ check_for_inc_dec (rtx insn)
}
-/* Delete the insn and free all of the fields inside INSN_INFO. */
+/* Delete the insn and free all of the fields inside INSN_INFO. */
static void
delete_dead_store_insn (insn_info_t insn_info)
@@ -907,10 +907,10 @@ delete_dead_store_insn (insn_info_t insn_info)
check_for_inc_dec (insn_info->insn);
if (dump_file)
{
- fprintf (dump_file, "Locally deleting insn %d ",
+ fprintf (dump_file, "Locally deleting insn %d ",
INSN_UID (insn_info->insn));
if (insn_info->store_rec->alias_set)
- fprintf (dump_file, "alias set %d\n",
+ fprintf (dump_file, "alias set %d\n",
(int) insn_info->store_rec->alias_set);
else
fprintf (dump_file, "\n");
@@ -918,7 +918,7 @@ delete_dead_store_insn (insn_info_t insn_info)
free_store_info (insn_info);
read_info = insn_info->read_rec;
-
+
while (read_info)
{
read_info_t next = read_info->next;
@@ -961,10 +961,10 @@ set_usage_bits (group_info_t group, HOST_WIDE_INT offset, HOST_WIDE_INT width)
store2 = group->store2_p;
ai = i;
}
-
+
if (bitmap_bit_p (store1, ai))
bitmap_set_bit (store2, ai);
- else
+ else
{
bitmap_set_bit (store1, ai);
if (i < 0)
@@ -998,7 +998,7 @@ add_wild_read (bb_info_t bb_info)
pool_free (read_info_pool, *ptr);
*ptr = next;
}
- else
+ else
ptr = &(*ptr)->next;
}
insn_info->wild_read = true;
@@ -1043,8 +1043,8 @@ const_or_frame_p (rtx x)
}
}
-/* Take all reasonable action to put the address of MEM into the form
- that we can do analysis on.
+/* Take all reasonable action to put the address of MEM into the form
+ that we can do analysis on.
The gold standard is to get the address into the form: address +
OFFSET where address is something that rtx_varies_p considers a
@@ -1054,8 +1054,8 @@ const_or_frame_p (rtx x)
obtained from that.
If that fails, we try cselib to get a value we can at least use
- locally. If that fails we return false.
-
+ locally. If that fails we return false.
+
The GROUP_ID is set to -1 for cselib bases and the index of the
group for non_varying bases.
@@ -1065,7 +1065,7 @@ static bool
canon_address (rtx mem,
alias_set_type *alias_set_out,
int *group_id,
- HOST_WIDE_INT *offset,
+ HOST_WIDE_INT *offset,
cselib_val **base)
{
enum machine_mode address_mode
@@ -1085,18 +1085,18 @@ canon_address (rtx mem,
fprintf (dump_file, "found alias set %d\n", (int) alias_set);
if (bitmap_bit_p (clear_alias_sets, alias_set))
{
- struct clear_alias_mode_holder *entry
+ struct clear_alias_mode_holder *entry
= clear_alias_set_lookup (alias_set);
/* If the modes do not match, we cannot process this set. */
if (entry->mode != GET_MODE (mem))
{
if (dump_file)
- fprintf (dump_file,
- "disqualifying alias set %d, (%s) != (%s)\n",
- (int) alias_set, GET_MODE_NAME (entry->mode),
+ fprintf (dump_file,
+ "disqualifying alias set %d, (%s) != (%s)\n",
+ (int) alias_set, GET_MODE_NAME (entry->mode),
GET_MODE_NAME (GET_MODE (mem)));
-
+
bitmap_set_bit (disqualified_clear_alias_sets, alias_set);
return false;
}
@@ -1126,12 +1126,12 @@ canon_address (rtx mem,
if (expanded)
{
/* Use cselib to replace all of the reg references with the full
- expression. This will take care of the case where we have
+ expression. This will take care of the case where we have
r_x = base + offset;
val = *r_x;
-
- by making it into
+
+ by making it into
val = *(base + offset); */
@@ -1199,7 +1199,7 @@ canon_address (rtx mem,
return false;
}
if (dump_file)
- fprintf (dump_file, " varying cselib base=%d offset = %d\n",
+ fprintf (dump_file, " varying cselib base=%d offset = %d\n",
(*base)->value, (int)*offset);
return true;
}
@@ -1344,7 +1344,7 @@ record_store (rtx body, bb_info_t bb_info)
{
if (GET_CODE (XEXP (mem, 0)) == SCRATCH)
{
- if (dump_file)
+ if (dump_file)
fprintf (dump_file, " adding wild read for (clobber (mem:BLK (scratch))\n");
add_wild_read (bb_info);
insn_info->cannot_delete = true;
@@ -1394,15 +1394,15 @@ record_store (rtx body, bb_info_t bb_info)
bitmap store2 = clear_alias_group->store2_p;
gcc_assert (GET_MODE (mem) != BLKmode);
-
+
if (bitmap_bit_p (store1, spill_alias_set))
bitmap_set_bit (store2, spill_alias_set);
- else
+ else
bitmap_set_bit (store1, spill_alias_set);
-
+
if (clear_alias_group->offset_map_size_p < spill_alias_set)
clear_alias_group->offset_map_size_p = spill_alias_set;
-
+
store_info = (store_info_t) pool_alloc (rtx_store_info_pool);
if (dump_file)
@@ -1413,10 +1413,10 @@ record_store (rtx body, bb_info_t bb_info)
{
/* In the restrictive case where the base is a constant or the
frame pointer we can do global analysis. */
-
- group_info_t group
+
+ group_info_t group
= VEC_index (group_info_t, rtx_group_vec, group_id);
-
+
store_info = (store_info_t) pool_alloc (rtx_store_info_pool);
set_usage_bits (group, offset, width);
@@ -1511,7 +1511,7 @@ record_store (rtx body, bb_info_t bb_info)
del = false;
else if (s_info->alias_set)
{
- struct clear_alias_mode_holder *entry
+ struct clear_alias_mode_holder *entry
= clear_alias_set_lookup (s_info->alias_set);
/* Generally, spills cannot be processed if and of the
references to the slot have a different mode. But if
@@ -1528,13 +1528,13 @@ record_store (rtx body, bb_info_t bb_info)
fprintf (dump_file, " trying spill store in insn=%d alias_set=%d\n",
INSN_UID (ptr->insn), (int) s_info->alias_set);
}
- else if ((s_info->group_id == group_id)
+ else if ((s_info->group_id == group_id)
&& (s_info->cse_base == base))
{
HOST_WIDE_INT i;
if (dump_file)
fprintf (dump_file, " trying store in insn=%d gid=%d[%d..%d)\n",
- INSN_UID (ptr->insn), s_info->group_id,
+ INSN_UID (ptr->insn), s_info->group_id,
(int)s_info->begin, (int)s_info->end);
/* Even if PTR won't be eliminated as unneeded, if both
@@ -1582,7 +1582,7 @@ record_store (rtx body, bb_info_t bb_info)
the value of store_info. If it is, set the rhs to NULL to
keep it from being used to remove a load. */
{
- if (canon_true_dependence (s_info->mem,
+ if (canon_true_dependence (s_info->mem,
GET_MODE (s_info->mem),
s_info->mem_addr,
mem, mem_addr, rtx_varies_p))
@@ -1601,20 +1601,20 @@ record_store (rtx body, bb_info_t bb_info)
if (del)
{
insn_info_t insn_to_delete = ptr;
-
+
if (last)
last->next_local_store = ptr->next_local_store;
else
active_local_stores = ptr->next_local_store;
-
+
delete_dead_store_insn (insn_to_delete);
}
else
last = ptr;
-
+
ptr = next;
}
-
+
/* Finish filling in the store_info. */
store_info->next = insn_info->store_rec;
insn_info->store_rec = store_info;
@@ -1651,7 +1651,7 @@ record_store (rtx body, bb_info_t bb_info)
static void
dump_insn_info (const char * start, insn_info_t insn_info)
{
- fprintf (dump_file, "%s insn=%d %s\n", start,
+ fprintf (dump_file, "%s insn=%d %s\n", start,
INSN_UID (insn_info->insn),
insn_info->store_rec ? "has store" : "naked");
}
@@ -1876,7 +1876,7 @@ get_stored_val (store_info_t store_info, enum machine_mode read_mode,
...
... <- A
- and change it into
+ and change it into
r2 <- r1
A <- r1
...
@@ -1903,7 +1903,7 @@ get_stored_val (store_info_t store_info, enum machine_mode read_mode,
went ok. */
static bool
-replace_read (store_info_t store_info, insn_info_t store_insn,
+replace_read (store_info_t store_info, insn_info_t store_insn,
read_info_t read_info, insn_info_t read_insn, rtx *loc,
bitmap regs_live)
{
@@ -1958,17 +1958,17 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
for (this_insn = insns; this_insn != NULL_RTX; this_insn = NEXT_INSN (this_insn))
note_stores (PATTERN (this_insn), look_for_hardregs, regs_set);
-
+
bitmap_and_into (regs_set, regs_live);
if (!bitmap_empty_p (regs_set))
{
if (dump_file)
{
- fprintf (dump_file,
+ fprintf (dump_file,
"abandoning replacement because sequence clobbers live hardregs:");
df_print_regset (dump_file, regs_set);
}
-
+
BITMAP_FREE (regs_set);
return false;
}
@@ -1979,25 +1979,25 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
{
deferred_change_t deferred_change =
(deferred_change_t) pool_alloc (deferred_change_pool);
-
+
/* Insert this right before the store insn where it will be safe
from later insns that might change it before the read. */
emit_insn_before (insns, store_insn->insn);
-
+
/* And now for the kludge part: cselib croaks if you just
return at this point. There are two reasons for this:
-
+
1) Cselib has an idea of how many pseudos there are and
that does not include the new ones we just added.
-
+
2) Cselib does not know about the move insn we added
above the store_info, and there is no way to tell it
about it, because it has "moved on".
-
+
Problem (1) is fixable with a certain amount of engineering.
Problem (2) is requires starting the bb from scratch. This
could be expensive.
-
+
So we are just going to have to lie. The move/extraction
insns are not really an issue, cselib did not see them. But
the use of the new pseudo read_insn is a real problem because
@@ -2006,13 +2006,13 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
and when we are finished with the block, we undo this. We
keep a table of mems to get rid of. At the end of the basic
block we can put them back. */
-
+
*loc = read_info->mem;
deferred_change->next = deferred_change_list;
deferred_change_list = deferred_change;
deferred_change->loc = loc;
deferred_change->reg = read_reg;
-
+
/* Get rid of the read_info, from the point of view of the
rest of dse, play like this read never happened. */
read_insn->read_rec = read_info->next;
@@ -2025,7 +2025,7 @@ replace_read (store_info_t store_info, insn_info_t store_insn,
}
return true;
}
- else
+ else
{
if (dump_file)
{
@@ -2050,7 +2050,7 @@ check_mem_read_rtx (rtx *loc, void *data)
HOST_WIDE_INT offset = 0;
HOST_WIDE_INT width = 0;
alias_set_type spill_alias_set = 0;
- cselib_val *base = NULL;
+ cselib_val *base = NULL;
int group_id;
read_info_t read_info;
@@ -2132,7 +2132,7 @@ check_mem_read_rtx (rtx *loc, void *data)
/* Skip the clobbers. */
while (!store_info->is_set)
store_info = store_info->next;
-
+
if (store_info->alias_set == spill_alias_set)
{
if (dump_file)
@@ -2154,7 +2154,7 @@ check_mem_read_rtx (rtx *loc, void *data)
the frame pointer and offset is a constant. */
insn_info_t i_ptr = active_local_stores;
insn_info_t last = NULL;
-
+
if (dump_file)
{
if (width == -1)
@@ -2169,35 +2169,35 @@ check_mem_read_rtx (rtx *loc, void *data)
{
bool remove = false;
store_info_t store_info = i_ptr->store_rec;
-
+
/* Skip the clobbers. */
while (!store_info->is_set)
store_info = store_info->next;
-
+
/* There are three cases here. */
if (store_info->group_id < 0)
/* We have a cselib store followed by a read from a
const base. */
- remove
- = canon_true_dependence (store_info->mem,
+ remove
+ = canon_true_dependence (store_info->mem,
GET_MODE (store_info->mem),
store_info->mem_addr,
mem, mem_addr, rtx_varies_p);
-
+
else if (group_id == store_info->group_id)
{
/* This is a block mode load. We may get lucky and
canon_true_dependence may save the day. */
if (width == -1)
- remove
- = canon_true_dependence (store_info->mem,
+ remove
+ = canon_true_dependence (store_info->mem,
GET_MODE (store_info->mem),
store_info->mem_addr,
mem, mem_addr, rtx_varies_p);
-
+
/* If this read is just reading back something that we just
stored, rewrite the read. */
- else
+ else
{
if (store_info->rhs
&& offset >= store_info->begin
@@ -2211,17 +2211,17 @@ check_mem_read_rtx (rtx *loc, void *data)
/* The bases are the same, just see if the offsets
overlap. */
- if ((offset < store_info->end)
+ if ((offset < store_info->end)
&& (offset + width > store_info->begin))
remove = true;
}
}
-
- /* else
+
+ /* else
The else case that is missing here is that the
bases are constant but different. There is nothing
to do here because there is no overlap. */
-
+
if (remove)
{
if (dump_file)
@@ -2237,7 +2237,7 @@ check_mem_read_rtx (rtx *loc, void *data)
i_ptr = i_ptr->next_local_store;
}
}
- else
+ else
{
insn_info_t i_ptr = active_local_stores;
insn_info_t last = NULL;
@@ -2252,7 +2252,7 @@ check_mem_read_rtx (rtx *loc, void *data)
{
bool remove = false;
store_info_t store_info = i_ptr->store_rec;
-
+
if (dump_file)
fprintf (dump_file, " processing cselib load against insn %d\n",
INSN_UID (i_ptr->insn));
@@ -2276,16 +2276,16 @@ check_mem_read_rtx (rtx *loc, void *data)
return 0;
if (!store_info->alias_set)
- remove = canon_true_dependence (store_info->mem,
+ remove = canon_true_dependence (store_info->mem,
GET_MODE (store_info->mem),
store_info->mem_addr,
mem, mem_addr, rtx_varies_p);
-
+
if (remove)
{
if (dump_file)
dump_insn_info ("removing from active", i_ptr);
-
+
if (last)
last->next_local_store = i_ptr->next_local_store;
else
@@ -2299,7 +2299,7 @@ check_mem_read_rtx (rtx *loc, void *data)
return 0;
}
-/* A for_each_rtx callback in which DATA points the INSN_INFO for
+/* A for_each_rtx callback in which DATA points the INSN_INFO for
as check_mem_read_rtx. Nullify the pointer if i_m_r_m_r returns
true for any part of *LOC. */
@@ -2389,7 +2389,7 @@ scan_insn (bb_info_t bb_info, rtx insn)
insn_info->prev_insn = bb_info->last_insn;
insn_info->insn = insn;
bb_info->last_insn = insn_info;
-
+
if (DEBUG_INSN_P (insn))
{
insn_info->cannot_delete = true;
@@ -2488,7 +2488,7 @@ scan_insn (bb_info_t bb_info, rtx insn)
{
if (dump_file)
dump_insn_info ("removing from active", i_ptr);
-
+
if (last)
last->next_local_store = i_ptr->next_local_store;
else
@@ -2538,7 +2538,7 @@ scan_insn (bb_info_t bb_info, rtx insn)
|| (RTX_FRAME_RELATED_P (insn))
|| find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX))
insn_info->cannot_delete = true;
-
+
body = PATTERN (insn);
if (GET_CODE (body) == PARALLEL)
{
@@ -2550,7 +2550,7 @@ scan_insn (bb_info_t bb_info, rtx insn)
mems_found += record_store (body, bb_info);
if (dump_file)
- fprintf (dump_file, "mems_found = %d, cannot_delete = %s\n",
+ fprintf (dump_file, "mems_found = %d, cannot_delete = %s\n",
mems_found, insn_info->cannot_delete ? "true" : "false");
/* If we found some sets of mems, add it into the active_local_stores so
@@ -2586,7 +2586,7 @@ remove_useless_values (cselib_val *base)
being deleted, then the insn can not be deleted. */
while (store_info)
{
- if ((store_info->group_id == -1)
+ if ((store_info->group_id == -1)
&& (store_info->cse_base == base))
{
del = true;
@@ -2605,7 +2605,7 @@ remove_useless_values (cselib_val *base)
}
else
last = insn_info;
-
+
insn_info = insn_info->next_local_store;
}
}
@@ -2618,7 +2618,7 @@ dse_step1 (void)
{
basic_block bb;
bitmap regs_live = BITMAP_ALLOC (NULL);
-
+
cselib_init (false);
all_blocks = BITMAP_ALLOC (NULL);
bitmap_set_bit (all_blocks, ENTRY_BLOCK);
@@ -2644,11 +2644,11 @@ dse_step1 (void)
rtx insn;
cse_store_info_pool
- = create_alloc_pool ("cse_store_info_pool",
+ = create_alloc_pool ("cse_store_info_pool",
sizeof (struct store_info), 100);
active_local_stores = NULL;
cselib_clear_table ();
-
+
/* Scan the insns. */
FOR_BB_INSNS (bb, insn)
{
@@ -2658,7 +2658,7 @@ dse_step1 (void)
if (INSN_P (insn))
df_simulate_one_insn_forwards (bb, insn, regs_live);
}
-
+
/* This is something of a hack, because the global algorithm
is supposed to take care of the case where stores go dead
at the end of the function. However, the global
@@ -2684,10 +2684,10 @@ dse_step1 (void)
store_info = store_info->next;
if (store_info->alias_set && !i_ptr->cannot_delete)
delete_dead_store_insn (i_ptr);
- else
+ else
if (store_info->group_id >= 0)
{
- group_info_t group
+ group_info_t group
= VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
if (group->frame_related && !i_ptr->cannot_delete)
delete_dead_store_insn (i_ptr);
@@ -2789,7 +2789,7 @@ dse_step2_init (void)
for the position. We do this because the stack related
stores can be deleted if their is no read between them and
the end of the function.
-
+
To make this work in the current framework, we take the stack
related bases add all of the bits from store1 into store2.
This has the effect of making the eligible even if there is
@@ -2800,7 +2800,7 @@ dse_step2_init (void)
bitmap_ior_into (group->store2_n, group->store1_n);
bitmap_ior_into (group->store2_p, group->store1_p);
if (dump_file)
- fprintf (dump_file, "group %d is frame related ", i);
+ fprintf (dump_file, "group %d is frame related ", i);
}
group->offset_map_size_n++;
@@ -2810,7 +2810,7 @@ dse_step2_init (void)
group->process_globally = false;
if (dump_file)
{
- fprintf (dump_file, "group %d(%d+%d): ", i,
+ fprintf (dump_file, "group %d(%d+%d): ", i,
(int)bitmap_count_bits (group->store2_n),
(int)bitmap_count_bits (group->store2_p));
bitmap_print (dump_file, group->store2_n, "n ", " ");
@@ -2851,7 +2851,7 @@ dse_step2_nospill (void)
}
EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
{
- bitmap_set_bit (group->group_kill, current_position);
+ bitmap_set_bit (group->group_kill, current_position);
group->offset_map_p[j] = current_position++;
group->process_globally = true;
}
@@ -2875,24 +2875,24 @@ dse_step2_spill (void)
if (dump_file)
{
- bitmap_print (dump_file, clear_alias_sets,
+ bitmap_print (dump_file, clear_alias_sets,
"clear alias sets ", "\n");
- bitmap_print (dump_file, disqualified_clear_alias_sets,
+ bitmap_print (dump_file, disqualified_clear_alias_sets,
"disqualified clear alias sets ", "\n");
}
memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
bitmap_clear (group->group_kill);
-
+
/* Remove the disqualified positions from the store2_p set. */
bitmap_and_compl_into (group->store2_p, disqualified_clear_alias_sets);
-
+
/* We do not need to process the store2_n set because
alias_sets are always positive. */
EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
{
- bitmap_set_bit (group->group_kill, current_position);
+ bitmap_set_bit (group->group_kill, current_position);
group->offset_map_p[j] = current_position++;
group->process_globally = true;
}
@@ -2904,25 +2904,25 @@ dse_step2_spill (void)
/*----------------------------------------------------------------------------
Third step.
-
+
Build the bit vectors for the transfer functions.
----------------------------------------------------------------------------*/
/* Note that this is NOT a general purpose function. Any mem that has
an alias set registered here expected to be COMPLETELY unaliased:
- i.e it's addresses are not and need not be examined.
+ i.e it's addresses are not and need not be examined.
It is known that all references to this address will have this
alias set and there are NO other references to this address in the
- function.
+ function.
Currently the only place that is known to be clean enough to use
- this interface is the code that assigns the spill locations.
+ this interface is the code that assigns the spill locations.
All of the mems that have alias_sets registered are subjected to a
very powerful form of dse where function calls, volatile reads and
- writes, and reads from random location are not taken into account.
+ writes, and reads from random location are not taken into account.
It is also assumed that these locations go dead when the function
returns. This assumption could be relaxed if there were found to
@@ -2939,8 +2939,8 @@ dse_step2_spill (void)
elements. So when we see a mode mismatch, we just bail. */
-void
-dse_record_singleton_alias_set (alias_set_type alias_set,
+void
+dse_record_singleton_alias_set (alias_set_type alias_set,
enum machine_mode mode)
{
struct clear_alias_mode_holder tmp_holder;
@@ -2958,7 +2958,7 @@ dse_record_singleton_alias_set (alias_set_type alias_set,
disqualified_clear_alias_sets = BITMAP_ALLOC (NULL);
clear_alias_mode_table = htab_create (11, clear_alias_mode_hash,
clear_alias_mode_eq, NULL);
- clear_alias_mode_pool = create_alloc_pool ("clear_alias_mode_pool",
+ clear_alias_mode_pool = create_alloc_pool ("clear_alias_mode_pool",
sizeof (struct clear_alias_mode_holder), 100);
}
@@ -2978,7 +2978,7 @@ dse_record_singleton_alias_set (alias_set_type alias_set,
/* Remove ALIAS_SET from the sets of stack slots being considered. */
-void
+void
dse_invalidate_singleton_alias_set (alias_set_type alias_set)
{
if ((!gate_dse()) || !alias_set)
@@ -3013,13 +3013,13 @@ get_bitmap_index (group_info_t group_info, HOST_WIDE_INT offset)
/* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
may be NULL. */
-static void
+static void
scan_stores_nospill (store_info_t store_info, bitmap gen, bitmap kill)
{
while (store_info)
{
HOST_WIDE_INT i;
- group_info_t group_info
+ group_info_t group_info
= VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
if (group_info->process_globally)
for (i = store_info->begin; i < store_info->end; i++)
@@ -3040,14 +3040,14 @@ scan_stores_nospill (store_info_t store_info, bitmap gen, bitmap kill)
/* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
may be NULL. */
-static void
+static void
scan_stores_spill (store_info_t store_info, bitmap gen, bitmap kill)
{
while (store_info)
{
if (store_info->alias_set)
{
- int index = get_bitmap_index (clear_alias_group,
+ int index = get_bitmap_index (clear_alias_group,
store_info->alias_set);
if (index != 0)
{
@@ -3079,7 +3079,7 @@ scan_reads_nospill (insn_info_t insn_info, bitmap gen, bitmap kill)
{
if (kill)
bitmap_ior_into (kill, group->group_kill);
- bitmap_and_compl_into (gen, group->group_kill);
+ bitmap_and_compl_into (gen, group->group_kill);
}
}
@@ -3124,7 +3124,7 @@ scan_reads_nospill (insn_info_t insn_info, bitmap gen, bitmap kill)
something else with a different constant
base. */
if ((read_info->group_id < 0)
- && canon_true_dependence (group->base_mem,
+ && canon_true_dependence (group->base_mem,
QImode,
group->canon_base_addr,
read_info->mem, NULL_RTX,
@@ -3137,7 +3137,7 @@ scan_reads_nospill (insn_info_t insn_info, bitmap gen, bitmap kill)
}
}
}
-
+
read_info = read_info->next;
}
}
@@ -3152,7 +3152,7 @@ scan_reads_spill (read_info_t read_info, bitmap gen, bitmap kill)
{
if (read_info->alias_set)
{
- int index = get_bitmap_index (clear_alias_group,
+ int index = get_bitmap_index (clear_alias_group,
read_info->alias_set);
if (index != 0)
{
@@ -3161,7 +3161,7 @@ scan_reads_spill (read_info_t read_info, bitmap gen, bitmap kill)
bitmap_clear_bit (gen, index);
}
}
-
+
read_info = read_info->next;
}
}
@@ -3214,7 +3214,7 @@ dse_step3_scan (bool for_spills, basic_block bb)
insn_info = bb_info->last_insn;
else
insn_info = find_insn_before_first_wild_read (bb_info);
-
+
/* In the spill case or in the no_spill case if there is no wild
read in the block, we will need a kill set. */
if (insn_info == bb_info->last_insn)
@@ -3224,7 +3224,7 @@ dse_step3_scan (bool for_spills, basic_block bb)
else
bb_info->kill = BITMAP_ALLOC (NULL);
}
- else
+ else
if (bb_info->kill)
BITMAP_FREE (bb_info->kill);
@@ -3234,7 +3234,7 @@ dse_step3_scan (bool for_spills, basic_block bb)
this phase. */
if (insn_info->insn && INSN_P (insn_info->insn))
{
- /* Process the read(s) last. */
+ /* Process the read(s) last. */
if (for_spills)
{
scan_stores_spill (insn_info->store_rec, bb_info->gen, bb_info->kill);
@@ -3245,7 +3245,7 @@ dse_step3_scan (bool for_spills, basic_block bb)
scan_stores_nospill (insn_info->store_rec, bb_info->gen, bb_info->kill);
scan_reads_nospill (insn_info, bb_info->gen, bb_info->kill);
}
- }
+ }
insn_info = insn_info->prev_insn;
}
@@ -3260,12 +3260,12 @@ dse_step3_exit_block_scan (bb_info_t bb_info)
{
/* The gen set is all 0's for the exit block except for the
frame_pointer_group. */
-
+
if (stores_off_frame_dead_at_return)
{
unsigned int i;
group_info_t group;
-
+
for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
{
if (group->process_globally && group->frame_related)
@@ -3290,9 +3290,9 @@ mark_reachable_blocks (sbitmap unreachable_blocks, basic_block bb)
{
RESET_BIT (unreachable_blocks, bb->index);
FOR_EACH_EDGE (e, ei, bb->preds)
- {
+ {
mark_reachable_blocks (unreachable_blocks, e->src);
- }
+ }
}
}
@@ -3306,7 +3306,7 @@ dse_step3 (bool for_spills)
sbitmap_iterator sbi;
bitmap all_ones = NULL;
unsigned int i;
-
+
sbitmap_ones (unreachable_blocks);
FOR_ALL_BB (bb)
@@ -3418,7 +3418,7 @@ dse_confluence_n (edge e)
/* Propagate the info from the out to the in set of BB_INDEX's basic
- block. There are three cases:
+ block. There are three cases:
1) The block has no kill set. In this case the kill set is all
ones. It does not matter what the out set of the block is, none of
@@ -3444,12 +3444,12 @@ dse_transfer_function (int bb_index)
{
/* Case 3 above. */
if (bb_info->in)
- return bitmap_ior_and_compl (bb_info->in, bb_info->gen,
+ return bitmap_ior_and_compl (bb_info->in, bb_info->gen,
bb_info->out, bb_info->kill);
else
{
bb_info->in = BITMAP_ALLOC (NULL);
- bitmap_ior_and_compl (bb_info->in, bb_info->gen,
+ bitmap_ior_and_compl (bb_info->in, bb_info->gen,
bb_info->out, bb_info->kill);
return true;
}
@@ -3478,9 +3478,9 @@ dse_transfer_function (int bb_index)
static void
dse_step4 (void)
{
- df_simple_dataflow (DF_BACKWARD, NULL, dse_confluence_0,
- dse_confluence_n, dse_transfer_function,
- all_blocks, df_get_postorder (DF_BACKWARD),
+ df_simple_dataflow (DF_BACKWARD, NULL, dse_confluence_0,
+ dse_confluence_n, dse_transfer_function,
+ all_blocks, df_get_postorder (DF_BACKWARD),
df_get_n_blocks (DF_BACKWARD));
if (dump_file)
{
@@ -3543,7 +3543,7 @@ dse_step5_nospill (void)
/* There may have been code deleted by the dce pass run before
this phase. */
- if (insn_info->insn
+ if (insn_info->insn
&& INSN_P (insn_info->insn)
&& (!insn_info->cannot_delete)
&& (!bitmap_empty_p (v)))
@@ -3552,7 +3552,7 @@ dse_step5_nospill (void)
/* Try to delete the current insn. */
deleted = true;
-
+
/* Skip the clobbers. */
while (!store_info->is_set)
store_info = store_info->next;
@@ -3562,19 +3562,19 @@ dse_step5_nospill (void)
else
{
HOST_WIDE_INT i;
- group_info_t group_info
+ group_info_t group_info
= VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
-
+
for (i = store_info->begin; i < store_info->end; i++)
{
int index = get_bitmap_index (group_info, i);
-
+
if (dump_file)
- fprintf (dump_file, "i = %d, index = %d\n", (int)i, index);
+ fprintf (dump_file, "i = %d, index = %d\n", (int)i, index);
if (index == 0 || !bitmap_bit_p (v, index))
{
if (dump_file)
- fprintf (dump_file, "failing at i = %d\n", (int)i);
+ fprintf (dump_file, "failing at i = %d\n", (int)i);
deleted = false;
break;
}
@@ -3594,7 +3594,7 @@ dse_step5_nospill (void)
/* We do want to process the local info if the insn was
deleted. For instance, if the insn did a wild read, we
no longer need to trash the info. */
- if (insn_info->insn
+ if (insn_info->insn
&& INSN_P (insn_info->insn)
&& (!deleted))
{
@@ -3612,7 +3612,7 @@ dse_step5_nospill (void)
scan_reads_nospill (insn_info, v, NULL);
}
}
-
+
insn_info = insn_info->prev_insn;
}
}
@@ -3634,7 +3634,7 @@ dse_step5_spill (void)
bool deleted = false;
/* There may have been code deleted by the dce pass run before
this phase. */
- if (insn_info->insn
+ if (insn_info->insn
&& INSN_P (insn_info->insn)
&& (!insn_info->cannot_delete)
&& (!bitmap_empty_p (v)))
@@ -3642,12 +3642,12 @@ dse_step5_spill (void)
/* Try to delete the current insn. */
store_info_t store_info = insn_info->store_rec;
deleted = true;
-
+
while (store_info)
{
if (store_info->alias_set)
{
- int index = get_bitmap_index (clear_alias_group,
+ int index = get_bitmap_index (clear_alias_group,
store_info->alias_set);
if (index == 0 || !bitmap_bit_p (v, index))
{
@@ -3655,14 +3655,14 @@ dse_step5_spill (void)
break;
}
}
- else
+ else
deleted = false;
store_info = store_info->next;
}
if (deleted && dbg_cnt (dse))
{
if (dump_file)
- fprintf (dump_file, "Spill deleting insn %d\n",
+ fprintf (dump_file, "Spill deleting insn %d\n",
INSN_UID (insn_info->insn));
check_for_inc_dec (insn_info->insn);
delete_insn (insn_info->insn);
@@ -3670,15 +3670,15 @@ dse_step5_spill (void)
insn_info->insn = NULL;
}
}
-
- if (insn_info->insn
+
+ if (insn_info->insn
&& INSN_P (insn_info->insn)
&& (!deleted))
{
scan_stores_spill (insn_info->store_rec, v, NULL);
scan_reads_spill (insn_info->read_rec, v, NULL);
}
-
+
insn_info = insn_info->prev_insn;
}
}
@@ -3739,16 +3739,16 @@ dse_step6 (void)
/*----------------------------------------------------------------------------
Seventh step.
- Destroy everything left standing.
+ Destroy everything left standing.
----------------------------------------------------------------------------*/
-static void
+static void
dse_step7 (bool global_done)
{
unsigned int i;
group_info_t group;
basic_block bb;
-
+
for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
{
free (group->offset_map_n);
@@ -3834,7 +3834,7 @@ rest_of_handle_dse (void)
pass to process the spills. These are special in that they are
totally transparent, i.e, there is no aliasing issues that need
to be considered. This means that the wild reads that kill
- everything else do not apply here. */
+ everything else do not apply here. */
if (clear_alias_sets && dse_step2_spill ())
{
if (!did_global)
diff --git a/gcc/dwarf2asm.c b/gcc/dwarf2asm.c
index c779e07609e..3657d0be89b 100644
--- a/gcc/dwarf2asm.c
+++ b/gcc/dwarf2asm.c
@@ -268,7 +268,7 @@ dw2_asm_output_addr_rtx (int size, rtx addr,
If COMMENT is not NULL and comments in the debug information
have been requested by the user, append the given COMMENT
to the generated output. */
-
+
void
dw2_asm_output_nstring (const char *str, size_t orig_len,
const char *comment, ...)
@@ -528,7 +528,7 @@ eh_data_format_name (int format)
};
gcc_assert (format >= 0 && format < 0x100 && format_names[format]);
-
+
return format_names[format];
#else
}
diff --git a/gcc/dwarf2asm.h b/gcc/dwarf2asm.h
index 04c4320c67a..e7be9bdbaee 100644
--- a/gcc/dwarf2asm.h
+++ b/gcc/dwarf2asm.h
@@ -30,7 +30,7 @@ extern void dw2_asm_output_delta (int, const char *, const char *,
const char *, ...)
ATTRIBUTE_NULL_PRINTF_4;
-extern void dw2_asm_output_offset (int, const char *, section *,
+extern void dw2_asm_output_offset (int, const char *, section *,
const char *, ...)
ATTRIBUTE_NULL_PRINTF_4;
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index 780117fbd83..3a6053e529d 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -804,7 +804,7 @@ add_fde_cfi (const char *label, dw_cfi_ref cfi)
/* Emit the state save. */
emit_cfa_remember = false;
- cfi_remember = new_cfi ();
+ cfi_remember = new_cfi ();
cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
add_fde_cfi (label, cfi_remember);
}
@@ -1042,7 +1042,7 @@ def_cfa_1 (const char *label, dw_cfa_location *loc_p)
if (loc.reg == old_cfa.reg && !loc.indirect)
{
/* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
- the CFA register did not change but the offset did. The data
+ the CFA register did not change but the offset did. The data
factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
in the assembler via the .cfi_def_cfa_offset directive. */
if (loc.offset < 0)
@@ -1882,7 +1882,7 @@ dwarf2out_frame_debug_cfa_offset (rtx set, const char *label)
addr = XEXP (set, 0);
gcc_assert (MEM_P (addr));
addr = XEXP (addr, 0);
-
+
/* As documented, only consider extremely simple addresses. */
switch (GET_CODE (addr))
{
@@ -2870,7 +2870,7 @@ dwarf2out_begin_epilogue (rtx insn)
void
dwarf2out_frame_debug_restore_state (void)
{
- dw_cfi_ref cfi = new_cfi ();
+ dw_cfi_ref cfi = new_cfi ();
const char *label = dwarf2out_cfi_label (false);
cfi->dw_cfi_opc = DW_CFA_restore_state;
@@ -9605,7 +9605,7 @@ htab_decl_del (void *what)
free (entry);
}
-/* Copy DIE and its ancestors, up to, but not including, the compile unit
+/* Copy DIE and its ancestors, up to, but not including, the compile unit
or type unit entry, to a new tree. Adds the new tree to UNIT and returns
a pointer to the copy of DIE. If DECL_TABLE is provided, it is used
to check if the ancestor has already been copied into UNIT. */
@@ -14044,7 +14044,7 @@ add_loc_descr_to_each (dw_loc_list_ref list, dw_loc_descr_ref ref)
TODO: We handle only simple cases of RET or LIST having at most one
element. General case would inolve sorting the lists in program order
- and merging them that will need some additional work.
+ and merging them that will need some additional work.
Adding that will improve quality of debug info especially for SRA-ed
structures. */
@@ -14432,7 +14432,7 @@ loc_list_from_tree (tree loc, int want_address)
if (bytepos > 0)
add_loc_descr_to_each (list_ret, new_loc_descr (DW_OP_plus_uconst, bytepos, 0));
else if (bytepos < 0)
- loc_list_plus_const (list_ret, bytepos);
+ loc_list_plus_const (list_ret, bytepos);
have_address = 1;
break;
@@ -15041,11 +15041,11 @@ add_data_member_location_attribute (dw_die_ref die, tree decl)
else
{
enum dwarf_location_atom op;
-
+
/* The DWARF2 standard says that we should assume that the structure
address is already on the stack, so we can specify a structure
field address by using DW_OP_plus_uconst. */
-
+
#ifdef MIPS_DEBUGGING_INFO
/* ??? The SGI dwarf reader does not handle the DW_OP_plus_uconst
operator correctly. It works only if we leave the offset on the
@@ -15054,7 +15054,7 @@ add_data_member_location_attribute (dw_die_ref die, tree decl)
#else
op = DW_OP_plus_uconst;
#endif
-
+
loc_descr = new_loc_descr (op, offset, 0);
}
}
@@ -16889,7 +16889,7 @@ gen_array_type_die (tree type, dw_die_ref context_die)
add_subscript_info (array_die, type, collapse_nested_arrays);
/* Add representation of the type of the elements of this array type and
- emit the corresponding DIE if we haven't done it already. */
+ emit the corresponding DIE if we haven't done it already. */
element_type = TREE_TYPE (type);
if (collapse_nested_arrays)
while (TREE_CODE (element_type) == ARRAY_TYPE)
@@ -17834,7 +17834,7 @@ gen_subprogram_die (tree decl, dw_die_ref context_die)
of the pack. Note that the set of pack arguments can be empty.
In that case, the DW_TAG_GNU_formal_parameter_pack DIE will not have any
children DIE.
-
+
Otherwise, we just consider the parameters of DECL. */
while (generic_decl_parm || parm)
{
@@ -19922,7 +19922,7 @@ gen_remaining_tmpl_value_param_die_attribute (void)
/* Replace DW_AT_name for the decl with name. */
-
+
static void
dwarf2out_set_name (tree decl, tree name)
{
@@ -20577,7 +20577,7 @@ prune_unused_types_mark (dw_die_ref die, int dokids)
breaking out types into comdat sections, do this
for all type definitions. */
if (die->die_tag == DW_TAG_array_type
- || (dwarf_version >= 4
+ || (dwarf_version >= 4
&& is_type_die (die) && ! is_declaration_die (die)))
FOR_EACH_CHILD (die, c, prune_unused_types_mark (c, 1));
else
diff --git a/gcc/ebitmap.c b/gcc/ebitmap.c
index a18a21efba8..472ff2e57ae 100644
--- a/gcc/ebitmap.c
+++ b/gcc/ebitmap.c
@@ -88,7 +88,7 @@ ebitmap_last_set_bit (ebitmap map)
unsigned int i = 0;
ebitmap_iterator ebi;
bool foundbit = false;
-
+
/* This is not the fastest way to do this, we could simply look for
the popcount, and start there, but this function is not used
anywhere speed critical. */
@@ -96,7 +96,7 @@ ebitmap_last_set_bit (ebitmap map)
{
foundbit = true;
}
-
+
if (foundbit)
return i;
@@ -176,7 +176,7 @@ ebitmap_array_init (ebitmap map, unsigned int size)
static inline void
ebitmap_array_clear (ebitmap map)
{
- if (map->elts)
+ if (map->elts)
{
free (map->elts);
map->elts = NULL;
@@ -225,7 +225,7 @@ ebitmap_clear_bit (ebitmap map, unsigned int bit)
unsigned int bitindex, shift;
bool have_eltwordindex = false;
EBITMAP_ELT_TYPE *elt_ptr;
-
+
/* If the bit can't exist in our bitmap, just return. */
if (map->numwords == 0)
return;
@@ -233,7 +233,7 @@ ebitmap_clear_bit (ebitmap map, unsigned int bit)
if (wordindex >= map->wordmask->n_bits
|| !TEST_BIT (map->wordmask, wordindex))
return;
-
+
if (map->cache != NULL && map->cacheindex == wordindex)
elt_ptr = map->cache;
else
@@ -242,10 +242,10 @@ ebitmap_clear_bit (ebitmap map, unsigned int bit)
elt_ptr = &map->elts[eltwordindex];
have_eltwordindex = true;
}
-
+
bitindex = bit % EBITMAP_ELT_BITS;
shift = bitindex;
-
+
*(elt_ptr) &= ~(((EBITMAP_ELT_TYPE)1) << shift);
/* Clear out the empty words. */
@@ -253,7 +253,7 @@ ebitmap_clear_bit (ebitmap map, unsigned int bit)
{
if (!have_eltwordindex)
eltwordindex = sbitmap_popcount (map->wordmask, wordindex);
-
+
if (map->cache != NULL && map->cacheindex == eltwordindex)
map->cache = NULL;
diff --git a/gcc/ebitmap.h b/gcc/ebitmap.h
index 4f9fd44ceb3..b067ddb892a 100644
--- a/gcc/ebitmap.h
+++ b/gcc/ebitmap.h
@@ -26,7 +26,7 @@ along with GCC; see the file COPYING3. If not see
#define EBITMAP_ELT_TYPE unsigned HOST_WIDEST_FAST_INT
typedef struct ebitmap_def
-{
+{
unsigned int n_elts; /* number of elements in the array. */
sbitmap wordmask; /* wordmask saying which words are
nonzero. */
@@ -86,11 +86,11 @@ typedef struct {
/* The word mask iterator. */
sbitmap_iterator maskiter;
} ebitmap_iterator;
-
+
static inline void
ebitmap_iter_init (ebitmap_iterator *i, ebitmap bmp, unsigned int min)
{
- sbitmap_iter_init (&i->maskiter, bmp->wordmask,
+ sbitmap_iter_init (&i->maskiter, bmp->wordmask,
min / EBITMAP_ELT_BITS);
i->size = bmp->numwords;
if (i->size == 0)
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index 6ef490e6442..91212adadbc 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -73,7 +73,7 @@ enum machine_mode ptr_mode; /* Mode whose width is POINTER_SIZE. */
struct rtl_data x_rtl;
/* Indexed by pseudo register number, gives the rtx for that pseudo.
- Allocated in parallel with regno_pointer_align.
+ Allocated in parallel with regno_pointer_align.
FIXME: We could put it into emit_status struct, but gengtype is not able to deal
with length attribute nested in top level structures. */
@@ -535,7 +535,7 @@ immed_double_const (HOST_WIDE_INT i0, HOST_WIDE_INT i1, enum machine_mode mode)
gen_int_mode.
2) GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT, but the value of
the integer fits into HOST_WIDE_INT anyway (i.e., i1 consists only
- from copies of the sign bit, and sign of i0 and i1 are the same), then
+ from copies of the sign bit, and sign of i0 and i1 are the same), then
we return a CONST_INT for i0.
3) Otherwise, we create a CONST_DOUBLE for i0 and i1. */
if (mode != VOIDmode)
@@ -865,7 +865,7 @@ gen_reg_rtx (enum machine_mode mode)
/* If a virtual register with bigger mode alignment is generated,
increase stack alignment estimation because it might be spilled
to stack later. */
- if (SUPPORTS_STACK_ALIGNMENT
+ if (SUPPORTS_STACK_ALIGNMENT
&& crtl->stack_alignment_estimated < align
&& !crtl->stack_realign_processed)
{
@@ -1189,7 +1189,7 @@ gen_lowpart_common (enum machine_mode mode, rtx x)
innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
else if (innermode == VOIDmode)
innermode = mode_for_size (HOST_BITS_PER_WIDE_INT * 2, MODE_INT, 0);
-
+
xsize = GET_MODE_SIZE (innermode);
gcc_assert (innermode != VOIDmode && innermode != BLKmode);
@@ -1250,7 +1250,7 @@ gen_highpart (enum machine_mode mode, rtx x)
result = simplify_gen_subreg (mode, x, GET_MODE (x),
subreg_highpart_offset (mode, GET_MODE (x)));
gcc_assert (result);
-
+
/* simplify_gen_subreg is not guaranteed to return a valid operand for
the target if we have a MEM. gen_highpart must return a valid operand,
emitting code if necessary to do so. */
@@ -1259,7 +1259,7 @@ gen_highpart (enum machine_mode mode, rtx x)
result = validize_mem (result);
gcc_assert (result);
}
-
+
return result;
}
@@ -1583,11 +1583,11 @@ set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp,
/* We can set the alignment from the type if we are making an object,
this is an INDIRECT_REF, or if TYPE_ALIGN_OK. */
- if (objectp || TREE_CODE (t) == INDIRECT_REF
- || TREE_CODE (t) == ALIGN_INDIRECT_REF
+ if (objectp || TREE_CODE (t) == INDIRECT_REF
+ || TREE_CODE (t) == ALIGN_INDIRECT_REF
|| TYPE_ALIGN_OK (type))
align = MAX (align, TYPE_ALIGN (type));
- else
+ else
if (TREE_CODE (t) == MISALIGNED_INDIRECT_REF)
{
if (integer_zerop (TREE_OPERAND (t, 1)))
@@ -2465,7 +2465,7 @@ verify_rtx_sharing (rtx orig, rtx insn)
}
#endif
gcc_assert (!RTX_FLAG (x, used));
-
+
RTX_FLAG (x, used) = 1;
/* Now scan the subexpressions recursively. */
@@ -2673,7 +2673,7 @@ repeat:
format_ptr = GET_RTX_FORMAT (code);
length = GET_RTX_LENGTH (code);
last_ptr = NULL;
-
+
for (i = 0; i < length; i++)
{
switch (*format_ptr++)
@@ -2689,12 +2689,12 @@ repeat:
{
int j;
int len = XVECLEN (x, i);
-
+
/* Copy the vector iff I copied the rtx and the length
is nonzero. */
if (copied && len > 0)
XVEC (x, i) = gen_rtvec_v (len, XVEC (x, i)->elem);
-
+
/* Call recursively on all inside the vector. */
for (j = 0; j < len; j++)
{
@@ -2769,7 +2769,7 @@ repeat:
format_ptr = GET_RTX_FORMAT (code);
length = GET_RTX_LENGTH (code);
-
+
for (i = 0; i < length; i++)
{
switch (*format_ptr++)
@@ -3812,7 +3812,7 @@ add_insn_before (rtx insn, rtx before, basic_block bb)
gcc_assert (stack);
}
- if (!bb
+ if (!bb
&& !BARRIER_P (before)
&& !BARRIER_P (insn))
bb = BLOCK_FOR_INSN (before);
@@ -5009,15 +5009,15 @@ rtx
emit_note_copy (rtx orig)
{
rtx note;
-
+
note = rtx_alloc (NOTE);
-
+
INSN_UID (note) = cur_insn_uid++;
NOTE_DATA (note) = NOTE_DATA (orig);
NOTE_KIND (note) = NOTE_KIND (orig);
BLOCK_FOR_INSN (note) = NULL;
add_insn (note);
-
+
return note;
}
@@ -5996,7 +5996,7 @@ emit_copy_of_insn_after (rtx insn, rtx after)
SIBLING_CALL_P (new_rtx) = SIBLING_CALL_P (insn);
RTL_CONST_CALL_P (new_rtx) = RTL_CONST_CALL_P (insn);
RTL_PURE_CALL_P (new_rtx) = RTL_PURE_CALL_P (insn);
- RTL_LOOPING_CONST_OR_PURE_CALL_P (new_rtx)
+ RTL_LOOPING_CONST_OR_PURE_CALL_P (new_rtx)
= RTL_LOOPING_CONST_OR_PURE_CALL_P (insn);
break;
diff --git a/gcc/et-forest.c b/gcc/et-forest.c
index e87322c6428..c15b6d8f2c3 100644
--- a/gcc/et-forest.c
+++ b/gcc/et-forest.c
@@ -210,7 +210,7 @@ record_path_before_1 (struct et_occ *occ, int depth)
if (occ->prev)
{
- m = record_path_before_1 (occ->prev, depth);
+ m = record_path_before_1 (occ->prev, depth);
if (m < mn)
mn = m;
}
@@ -261,7 +261,7 @@ check_path_after_1 (struct et_occ *occ, int depth)
if (occ->next)
{
- m = check_path_after_1 (occ->next, depth);
+ m = check_path_after_1 (occ->next, depth);
if (m < mn)
mn = m;
}
@@ -308,7 +308,7 @@ et_splay (struct et_occ *occ)
record_path_before (occ);
et_check_tree_sanity (occ);
#endif
-
+
while (occ->parent)
{
occ_depth = occ->depth;
@@ -444,7 +444,7 @@ static struct et_occ *
et_new_occ (struct et_node *node)
{
struct et_occ *nw;
-
+
if (!et_occurrences)
et_occurrences = create_alloc_pool ("et_occ pool", sizeof (struct et_occ), 300);
nw = (struct et_occ *) pool_alloc (et_occurrences);
@@ -467,7 +467,7 @@ struct et_node *
et_new_tree (void *data)
{
struct et_node *nw;
-
+
if (!et_nodes)
et_nodes = create_alloc_pool ("et_node pool", sizeof (struct et_node), 300);
nw = (struct et_node *) pool_alloc (et_nodes);
@@ -590,7 +590,7 @@ et_split (struct et_node *t)
for (r = rmost->next; r->prev; r = r->prev)
continue;
- et_splay (r);
+ et_splay (r);
r->prev->parent = NULL;
p_occ = t->parent_occ;
diff --git a/gcc/except.c b/gcc/except.c
index 6c7618339e5..572aad0f842 100644
--- a/gcc/except.c
+++ b/gcc/except.c
@@ -48,7 +48,7 @@ along with GCC; see the file COPYING3. If not see
a given statement does throw. During this lowering process,
we create an EH_LANDING_PAD node for each EH_REGION that has
some code within the function that needs to be executed if a
- throw does happen. We also create RESX statements that are
+ throw does happen. We also create RESX statements that are
used to transfer control from an inner EH_REGION to an outer
EH_REGION. We also create EH_DISPATCH statements as placeholders
for a runtime type comparison that should be made in order to
@@ -75,7 +75,7 @@ along with GCC; see the file COPYING3. If not see
handler for the exception must be within a function somewhere
up the call chain, so we call back into the exception runtime
(__builtin_unwind_resume).
-
+
During pass_expand (cfgexpand.c), we generate REG_EH_REGION notes
that create an rtl to eh_region mapping that corresponds to the
gimple to eh_region mapping that had been recorded in the
@@ -93,7 +93,7 @@ along with GCC; see the file COPYING3. If not see
frame are emitted at this time.
During pass_convert_to_eh_region_ranges (except.c), we transform
- the REG_EH_REGION notes attached to individual insns into
+ the REG_EH_REGION notes attached to individual insns into
non-overlapping ranges of insns bounded by NOTE_INSN_EH_REGION_BEG
and NOTE_INSN_EH_REGION_END. Each insn within such ranges has the
same associated action within the exception region tree, meaning
@@ -611,7 +611,7 @@ duplicate_eh_regions (struct function *ifun,
data.eh_map = pointer_map_create ();
outer_region = get_eh_region_from_lp_number (outer_lp);
-
+
/* Copy all the regions in the subtree. */
if (copy_region)
duplicate_eh_regions_1 (&data, copy_region, outer_region);
@@ -1490,7 +1490,7 @@ remove_eh_landing_pad (eh_landing_pad lp)
for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
continue;
*pp = lp->next_lp;
-
+
if (lp->post_landing_pad)
EH_LANDING_PAD_NR (lp->post_landing_pad) = 0;
VEC_replace (eh_landing_pad, cfun->eh->lp_array, lp->index, NULL);
@@ -1555,7 +1555,7 @@ for_each_eh_label (void (*callback) (rtx))
}
/* Create the REG_EH_REGION note for INSN, given its ECF_FLAGS for a
- call insn.
+ call insn.
At the gimple level, we use LP_NR
> 0 : The statement transfers to landing pad LP_NR
diff --git a/gcc/except.h b/gcc/except.h
index f1dbf90c2a9..13aadd79ccb 100644
--- a/gcc/except.h
+++ b/gcc/except.h
@@ -156,7 +156,7 @@ struct GTY(()) eh_region_d
The normal way for this to happen is for there to be a CLEANUP region
contained within this MUST_NOT_THROW region. Note that if the
runtime handles the MUST_NOT_THROW region, we have no control over
- what termination function is called; it will be decided by the
+ what termination function is called; it will be decided by the
personality function in effect for this CIE. */
tree failure_decl;
/* The location assigned to the call of FAILURE_DECL, if expanded. */
diff --git a/gcc/expmed.c b/gcc/expmed.c
index 8a5cf5054e7..d51b4542677 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -390,7 +390,7 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
always get higher addresses. */
int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
-
+
byte_offset = 0;
/* Paradoxical subregs need special handling on big endian machines. */
@@ -2365,7 +2365,7 @@ struct alg_hash_entry {
Otherwise, the cost within which multiplication by T is
impossible. */
struct mult_cost cost;
-
+
/* OPtimized for speed? */
bool speed;
};
@@ -3198,7 +3198,7 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
target, unsignedp);
}
}
-
+
/* We used to test optimize here, on the grounds that it's better to
produce a smaller program when -O is not used. But this causes
such a terrible slowdown sometimes that it seems better to always
@@ -3577,8 +3577,8 @@ expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
- /* We can't optimize modes wider than BITS_PER_WORD.
- ??? We might be able to perform double-word arithmetic if
+ /* We can't optimize modes wider than BITS_PER_WORD.
+ ??? We might be able to perform double-word arithmetic if
mode == word_mode, however all the cost calculations in
synth_mult etc. assume single-word operations. */
if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
@@ -4944,7 +4944,7 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
if (!remainder)
{
remainder = gen_reg_rtx (compute_mode);
- if (!expand_twoval_binop_libfunc
+ if (!expand_twoval_binop_libfunc
(unsignedp ? udivmod_optab : sdivmod_optab,
op0, op1,
NULL_RTX, remainder,
@@ -4987,12 +4987,12 @@ make_tree (tree type, rtx x)
&& (GET_MODE_BITSIZE (TYPE_MODE (type))
< HOST_BITS_PER_WIDE_INT)))
hi = -1;
-
+
t = build_int_cst_wide (type, INTVAL (x), hi);
-
+
return t;
}
-
+
case CONST_DOUBLE:
if (GET_MODE (x) == VOIDmode)
t = build_int_cst_wide (type,
@@ -5154,7 +5154,7 @@ emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
target_mode = result_mode;
if (!target)
target = gen_reg_rtx (target_mode);
-
+
if (optimize
|| !(insn_data[(int) icode].operand[0].predicate (target, result_mode)))
subtarget = gen_reg_rtx (result_mode);
@@ -5530,7 +5530,7 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
/* Cannot split ORDERED and UNORDERED, only try the above trick. */
if (code == ORDERED || code == UNORDERED)
return 0;
-
+
and_them = split_comparison (code, mode, &first_code, &code);
/* If there are no NaNs, the first comparison should always fall through.
@@ -5783,7 +5783,7 @@ emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
/* If this failed, we have to do this with set/compare/jump/set code.
For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
- if (code == NE
+ if (code == NE
&& GET_MODE_CLASS (mode) == MODE_INT
&& REG_P (target)
&& op0 == target
diff --git a/gcc/expr.c b/gcc/expr.c
index 923db8f49c8..75c17923cd0 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -2311,7 +2311,7 @@ can_store_by_pieces (unsigned HOST_WIDE_INT len,
if (len == 0)
return 1;
- if (! (memsetp
+ if (! (memsetp
? SET_BY_PIECES_P (len, align)
: STORE_BY_PIECES_P (len, align)))
return 0;
@@ -4511,7 +4511,7 @@ emit_storent_insn (rtx to, rtx from)
If CALL_PARAM_P is nonzero, this is a store into a call param on the
stack, and block moves may need to be treated specially.
-
+
If NONTEMPORAL is true, try using a nontemporal store instruction. */
rtx
@@ -5753,7 +5753,7 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size)
ALIAS_SET is the alias set for the destination. This value will
(in general) be different from that for TARGET, since TARGET is a
reference to the containing structure.
-
+
If NONTEMPORAL is true, try generating a nontemporal store. */
static rtx
@@ -6143,7 +6143,7 @@ contains_packed_reference (const_tree exp)
case COMPONENT_REF:
{
tree field = TREE_OPERAND (exp, 1);
- packed_p = DECL_PACKED (field)
+ packed_p = DECL_PACKED (field)
|| TYPE_PACKED (TREE_TYPE (field))
|| TYPE_PACKED (TREE_TYPE (exp));
if (packed_p)
@@ -6750,7 +6750,7 @@ highest_pow2_factor_for_target (const_tree target, const_tree exp)
{
unsigned HOST_WIDE_INT talign = target_align (target) / BITS_PER_UNIT;
unsigned HOST_WIDE_INT factor = highest_pow2_factor (exp);
-
+
return MAX (factor, talign);
}
@@ -6940,7 +6940,7 @@ expand_expr_addr_expr_1 (tree exp, rtx target, enum machine_mode tmode,
if (modifier != EXPAND_NORMAL)
result = force_operand (result, NULL);
- tmp = expand_expr (offset, NULL_RTX, tmode,
+ tmp = expand_expr (offset, NULL_RTX, tmode,
modifier == EXPAND_INITIALIZER
? EXPAND_INITIALIZER : EXPAND_NORMAL);
@@ -7390,9 +7390,9 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
return CONST0_RTX (mode);
}
- case POINTER_PLUS_EXPR:
+ case POINTER_PLUS_EXPR:
/* Even though the sizetype mode and the pointer's mode can be different
- expand is able to handle this correctly and get the correct result out
+ expand is able to handle this correctly and get the correct result out
of the PLUS_EXPR code. */
/* Make sure to sign-extend the sizetype offset in a POINTER_PLUS_EXPR
if sizetype precision is smaller than pointer precision. */
@@ -9330,7 +9330,7 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
/* If both modes are integral, then we can convert from one to the
other. */
else if (SCALAR_INT_MODE_P (GET_MODE (op0)) && SCALAR_INT_MODE_P (mode))
- op0 = convert_modes (mode, GET_MODE (op0), op0,
+ op0 = convert_modes (mode, GET_MODE (op0), op0,
TYPE_UNSIGNED (TREE_TYPE (treeop0)));
/* As a last resort, spill op0 to memory, and reload it in a
different mode. */
diff --git a/gcc/expr.h b/gcc/expr.h
index 88d72daad4f..93c66a8d474 100644
--- a/gcc/expr.h
+++ b/gcc/expr.h
@@ -395,7 +395,7 @@ extern rtx builtin_strncpy_read_str (void *, HOST_WIDE_INT, enum machine_mode);
/* Functions from expr.c: */
-/* This is run during target initialization to set up which modes can be
+/* This is run during target initialization to set up which modes can be
used directly in memory and to initialize the block move optab. */
extern void init_expr_target (void);
@@ -484,7 +484,7 @@ extern rtx clear_storage_hints (rtx, rtx, enum block_op_methods,
rtx set_storage_via_libcall (rtx, rtx, rtx, bool);
/* Expand a setmem pattern; return true if successful. */
-extern bool set_storage_via_setmem (rtx, rtx, rtx, unsigned int,
+extern bool set_storage_via_setmem (rtx, rtx, rtx, unsigned int,
unsigned int, HOST_WIDE_INT);
/* Determine whether the LEN bytes can be moved by using several move
@@ -546,7 +546,7 @@ extern rtx store_expr (tree, rtx, int, bool);
extern rtx force_operand (rtx, rtx);
/* Work horses for expand_expr. */
-extern rtx expand_expr_real (tree, rtx, enum machine_mode,
+extern rtx expand_expr_real (tree, rtx, enum machine_mode,
enum expand_modifier, rtx *);
extern rtx expand_expr_real_1 (tree, rtx, enum machine_mode,
enum expand_modifier, rtx *);
diff --git a/gcc/final.c b/gcc/final.c
index ef450d2fe3e..5d037f53933 100644
--- a/gcc/final.c
+++ b/gcc/final.c
@@ -1413,7 +1413,7 @@ int
asm_str_count (const char *templ)
{
int count = 1;
-
+
if (!*templ)
return 0;
diff --git a/gcc/flags.h b/gcc/flags.h
index bd8b82d4372..519aa920cd8 100644
--- a/gcc/flags.h
+++ b/gcc/flags.h
@@ -114,7 +114,7 @@ extern int optimize_size;
/* True if this is the LTO front end (lto1). This is used to disable
gimple generation and lowering passes that are normally run on the
output of a front end. These passes must be bypassed for lto since
- they have already been done before the gimple was written. */
+ they have already been done before the gimple was written. */
extern bool in_lto_p;
@@ -122,9 +122,9 @@ extern bool in_lto_p;
extern int flag_generate_lto;
-/* Used to set the level of -Wstrict-aliasing, when no level is specified.
+/* Used to set the level of -Wstrict-aliasing, when no level is specified.
The external way to set the default level is to use
- -Wstrict-aliasing=level.
+ -Wstrict-aliasing=level.
ONOFF is assumed to take value 1 when -Wstrict-aliasing is specified,
and 0 otherwise. After calling this function, wstrict_aliasing will be
set to the default value of -Wstrict_aliasing=level. */
@@ -264,8 +264,8 @@ extern bool sel_sched_switch_set;
/* Whether to run the warn_unused_result attribute pass. */
extern bool flag_warn_unused_result;
-/* Values of the -falign-* flags: how much to align labels in code.
- 0 means `use default', 1 means `don't align'.
+/* Values of the -falign-* flags: how much to align labels in code.
+ 0 means `use default', 1 means `don't align'.
For each variable, there is an _log variant which is the power
of two not less than the variable, for .align output. */
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index c6b420bfe88..40a580e77a6 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -1064,7 +1064,7 @@ negate_mathfn_p (enum built_in_function code)
CASE_FLT_FN (BUILT_IN_NEARBYINT):
CASE_FLT_FN (BUILT_IN_RINT):
return !flag_rounding_math;
-
+
default:
break;
}
@@ -1244,7 +1244,7 @@ fold_negate_expr (location_t loc, tree t)
return fold_build2_loc (loc, PLUS_EXPR, type, TREE_OPERAND (t, 0),
build_int_cst (type, 1));
break;
-
+
case INTEGER_CST:
tem = fold_negate_const (t, type);
if (TREE_OVERFLOW (tem) == TREE_OVERFLOW (t)
@@ -1282,7 +1282,7 @@ fold_negate_expr (location_t loc, tree t)
fold_negate_expr (loc, TREE_OPERAND (t, 0)),
fold_negate_expr (loc, TREE_OPERAND (t, 1)));
break;
-
+
case CONJ_EXPR:
if (negate_expr_p (t))
return fold_build1_loc (loc, CONJ_EXPR, type,
@@ -2034,7 +2034,7 @@ const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
tree compare = fold_build2 (LT_EXPR, boolean_type_node,
fold_abs_const (r2, TREE_TYPE (type)),
fold_abs_const (i2, TREE_TYPE (type)));
-
+
if (integer_nonzerop (compare))
{
/* In the TRUE branch, we compute
@@ -2096,17 +2096,17 @@ const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
tree type = TREE_TYPE(arg1);
int count = TYPE_VECTOR_SUBPARTS (type), i;
tree elements1, elements2, list = NULL_TREE;
-
+
if(TREE_CODE(arg2) != VECTOR_CST)
return NULL_TREE;
-
+
elements1 = TREE_VECTOR_CST_ELTS (arg1);
elements2 = TREE_VECTOR_CST_ELTS (arg2);
for (i = 0; i < count; i++)
{
tree elem1, elem2, elem;
-
+
/* The trailing elements can be empty and should be treated as 0 */
if(!elements1)
elem1 = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
@@ -2114,8 +2114,8 @@ const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
{
elem1 = TREE_VALUE(elements1);
elements1 = TREE_CHAIN (elements1);
- }
-
+ }
+
if(!elements2)
elem2 = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
else
@@ -2123,17 +2123,17 @@ const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
elem2 = TREE_VALUE(elements2);
elements2 = TREE_CHAIN (elements2);
}
-
+
elem = const_binop (code, elem1, elem2, notrunc);
-
+
/* It is possible that const_binop cannot handle the given
code and return NULL_TREE */
if(elem == NULL_TREE)
return NULL_TREE;
-
+
list = tree_cons (NULL_TREE, elem, list);
}
- return build_vector(type, nreverse(list));
+ return build_vector(type, nreverse(list));
}
return NULL_TREE;
}
@@ -2573,7 +2573,7 @@ build_zero_vector (tree type)
elem = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
units = TYPE_VECTOR_SUBPARTS (type);
-
+
list = NULL_TREE;
for (i = 0; i < units; i++)
list = tree_cons (NULL_TREE, elem, list);
@@ -3250,7 +3250,7 @@ operand_equal_p (const_tree arg0, const_tree arg1, unsigned int flags)
TREE_REAL_CST (arg1)))
return 1;
-
+
if (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0))))
{
/* If we do not distinguish between signed and unsigned zero,
@@ -3409,7 +3409,7 @@ operand_equal_p (const_tree arg0, const_tree arg1, unsigned int flags)
case COND_EXPR:
return OP_SAME (0) && OP_SAME (1) && OP_SAME (2);
-
+
default:
return 0;
}
@@ -4126,7 +4126,7 @@ make_bit_field_ref (location_t loc, tree inner, tree type,
tree size = TYPE_SIZE (TREE_TYPE (inner));
if ((INTEGRAL_TYPE_P (TREE_TYPE (inner))
|| POINTER_TYPE_P (TREE_TYPE (inner)))
- && host_integerp (size, 0)
+ && host_integerp (size, 0)
&& tree_low_cst (size, 0) == bitsize)
return fold_convert_loc (loc, type, inner);
}
@@ -5536,7 +5536,7 @@ fold_cond_expr_with_comparison (location_t loc, tree type,
tem = fold_build2_loc (loc, MIN_EXPR, TREE_TYPE (arg00), arg00,
fold_convert_loc (loc, TREE_TYPE (arg00),
arg2));
- return pedantic_non_lvalue_loc (loc,
+ return pedantic_non_lvalue_loc (loc,
fold_convert_loc (loc, type, tem));
}
break;
@@ -7271,7 +7271,7 @@ fold_single_bit_test (location_t loc, enum tree_code code,
operations as unsigned. If we must use the AND, we have a choice.
Normally unsigned is faster, but for some machines signed is. */
#ifdef LOAD_EXTEND_OP
- ops_unsigned = (LOAD_EXTEND_OP (operand_mode) == SIGN_EXTEND
+ ops_unsigned = (LOAD_EXTEND_OP (operand_mode) == SIGN_EXTEND
&& !flag_syntax_only) ? 0 : 1;
#else
ops_unsigned = 1;
@@ -7556,7 +7556,7 @@ try_move_mult_to_index (location_t loc, tree addr, tree op1)
STRIP_NOPS (arg0);
STRIP_NOPS (arg1);
-
+
if (TREE_CODE (arg0) == INTEGER_CST)
{
s = arg0;
@@ -8445,7 +8445,7 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0)
case FIX_TRUNC_EXPR:
if (TREE_TYPE (op0) == type)
return op0;
-
+
/* If we have (type) (a CMP b) and type is an integral type, return
new expression involving the new type. */
if (COMPARISON_CLASS_P (op0) && INTEGRAL_TYPE_P (type))
@@ -9377,7 +9377,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type,
&& (code == EQ_EXPR
|| code == NE_EXPR
|| POINTER_TYPE_OVERFLOW_UNDEFINED))
-
+
{
if (code != EQ_EXPR
&& code != NE_EXPR
@@ -9652,7 +9652,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type,
/* Likewise, we can simplify a comparison of a real constant with
a MINUS_EXPR whose first operand is also a real constant, i.e.
- (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
+ (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
floating-point types only if -fassociative-math is set. */
if (flag_associative_math
&& TREE_CODE (arg1) == REAL_CST
@@ -9976,7 +9976,7 @@ get_pointer_modulus_and_residue (tree expr, unsigned HOST_WIDE_INT *residue,
tree op0, op1;
unsigned HOST_WIDE_INT modulus;
enum tree_code inner_code;
-
+
op0 = TREE_OPERAND (expr, 0);
STRIP_NOPS (op0);
modulus = get_pointer_modulus_and_residue (op0, residue,
@@ -9996,7 +9996,7 @@ get_pointer_modulus_and_residue (tree expr, unsigned HOST_WIDE_INT *residue,
if (TREE_CODE (op1) == INTEGER_CST)
{
unsigned HOST_WIDE_INT align;
-
+
/* Compute the greatest power-of-2 divisor of op1. */
align = TREE_INT_CST_LOW (op1);
align &= -align;
@@ -10165,7 +10165,7 @@ fold_binary_loc (location_t loc,
if (TREE_CODE (arg0) == COND_EXPR || COMPARISON_CLASS_P (arg0))
{
tem = fold_binary_op_with_conditional_arg (loc, code, type, op0, op1,
- arg0, arg1,
+ arg0, arg1,
/*cond_first_p=*/1);
if (tem != NULL_TREE)
return tem;
@@ -10174,7 +10174,7 @@ fold_binary_loc (location_t loc,
if (TREE_CODE (arg1) == COND_EXPR || COMPARISON_CLASS_P (arg1))
{
tem = fold_binary_op_with_conditional_arg (loc, code, type, op0, op1,
- arg1, arg0,
+ arg1, arg0,
/*cond_first_p=*/0);
if (tem != NULL_TREE)
return tem;
@@ -10456,7 +10456,7 @@ fold_binary_loc (location_t loc,
return fold_build2_loc (loc, MULT_EXPR, type, arg0,
build_real (type, dconst2));
- /* Convert a + (b*c + d*e) into (a + b*c) + d*e.
+ /* Convert a + (b*c + d*e) into (a + b*c) + d*e.
We associate floats only if the user has specified
-fassociative-math. */
if (flag_associative_math
@@ -10473,7 +10473,7 @@ fold_binary_loc (location_t loc,
return fold_build2_loc (loc, PLUS_EXPR, type, tree0, tree11);
}
}
- /* Convert (b*c + d*e) + a into b*c + (d*e +a).
+ /* Convert (b*c + d*e) + a into b*c + (d*e +a).
We associate floats only if the user has specified
-fassociative-math. */
if (flag_associative_math
@@ -10909,7 +10909,7 @@ fold_binary_loc (location_t loc,
tree diff = build2 (MINUS_EXPR, type, op0, op1);
return fold_build2_loc (loc, MULT_EXPR, type, diff,
fold_convert_loc (loc, type, esz));
-
+
}
}
@@ -11288,7 +11288,7 @@ fold_binary_loc (location_t loc,
if (width > HOST_BITS_PER_WIDE_INT)
{
- mhi = (unsigned HOST_WIDE_INT) -1
+ mhi = (unsigned HOST_WIDE_INT) -1
>> (2 * HOST_BITS_PER_WIDE_INT - width);
mlo = -1;
}
@@ -11475,7 +11475,7 @@ fold_binary_loc (location_t loc,
fold_convert_loc (loc, type, t1));
return t1;
}
-
+
/* Convert ~X ^ ~Y to X ^ Y. */
if (TREE_CODE (arg0) == BIT_NOT_EXPR
&& TREE_CODE (arg1) == BIT_NOT_EXPR)
@@ -11505,7 +11505,7 @@ fold_binary_loc (location_t loc,
&& operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
{
tem = fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
- return fold_build2_loc (loc, BIT_AND_EXPR, type,
+ return fold_build2_loc (loc, BIT_AND_EXPR, type,
fold_build1_loc (loc, BIT_NOT_EXPR, type, tem),
fold_convert_loc (loc, type, arg1));
}
@@ -11624,7 +11624,7 @@ fold_binary_loc (location_t loc,
&& operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
{
tem = fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
- return fold_build2_loc (loc, BIT_AND_EXPR, type,
+ return fold_build2_loc (loc, BIT_AND_EXPR, type,
fold_build1_loc (loc, BIT_NOT_EXPR, type, tem),
fold_convert_loc (loc, type, arg1));
}
@@ -11889,7 +11889,7 @@ fold_binary_loc (location_t loc,
}
}
}
- /* Convert A/B/C to A/(B*C). */
+ /* Convert A/B/C to A/(B*C). */
if (flag_reciprocal_math
&& TREE_CODE (arg0) == RDIV_EXPR)
return fold_build2_loc (loc, RDIV_EXPR, type, TREE_OPERAND (arg0, 0),
@@ -14222,7 +14222,7 @@ fold_checksum_tree (const_tree expr, struct md5_ctx *ctx, htab_t ht)
enum tree_code code;
union tree_node buf;
int i, len;
-
+
recursive_label:
gcc_assert ((sizeof (struct tree_exp) + 5 * sizeof (tree)
@@ -14332,7 +14332,7 @@ recursive_label:
}
if (CODE_CONTAINS_STRUCT (TREE_CODE (expr), TS_DECL_WITH_VIS))
fold_checksum_tree (DECL_SECTION_NAME (expr), ctx, ht);
-
+
if (CODE_CONTAINS_STRUCT (TREE_CODE (expr), TS_DECL_NON_COMMON))
{
fold_checksum_tree (DECL_VINDEX (expr), ctx, ht);
@@ -14377,7 +14377,7 @@ debug_fold_checksum (const_tree t)
unsigned char checksum[16];
struct md5_ctx ctx;
htab_t ht = htab_create (32, htab_hash_pointer, htab_eq_pointer, NULL);
-
+
md5_init_ctx (&ctx);
fold_checksum_tree (t, &ctx, ht);
md5_finish_ctx (&ctx, checksum);
@@ -14412,14 +14412,14 @@ fold_build1_stat_loc (location_t loc,
md5_finish_ctx (&ctx, checksum_before);
htab_empty (ht);
#endif
-
+
tem = fold_unary_loc (loc, code, type, op0);
if (!tem)
{
tem = build1_stat (code, type, op0 PASS_MEM_STAT);
SET_EXPR_LOCATION (tem, loc);
}
-
+
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
fold_checksum_tree (op0, &ctx, ht);
@@ -14470,7 +14470,7 @@ fold_build2_stat_loc (location_t loc,
tem = build2_stat (code, type, op0, op1 PASS_MEM_STAT);
SET_EXPR_LOCATION (tem, loc);
}
-
+
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
fold_checksum_tree (op0, &ctx, ht);
@@ -14479,7 +14479,7 @@ fold_build2_stat_loc (location_t loc,
if (memcmp (checksum_before_op0, checksum_after_op0, 16))
fold_check_failed (op0, tem);
-
+
md5_init_ctx (&ctx);
fold_checksum_tree (op1, &ctx, ht);
md5_finish_ctx (&ctx, checksum_after_op1);
@@ -14535,7 +14535,7 @@ fold_build3_stat_loc (location_t loc, enum tree_code code, tree type,
tem = build3_stat (code, type, op0, op1, op2 PASS_MEM_STAT);
SET_EXPR_LOCATION (tem, loc);
}
-
+
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
fold_checksum_tree (op0, &ctx, ht);
@@ -14544,7 +14544,7 @@ fold_build3_stat_loc (location_t loc, enum tree_code code, tree type,
if (memcmp (checksum_before_op0, checksum_after_op0, 16))
fold_check_failed (op0, tem);
-
+
md5_init_ctx (&ctx);
fold_checksum_tree (op1, &ctx, ht);
md5_finish_ctx (&ctx, checksum_after_op1);
@@ -14552,7 +14552,7 @@ fold_build3_stat_loc (location_t loc, enum tree_code code, tree type,
if (memcmp (checksum_before_op1, checksum_after_op1, 16))
fold_check_failed (op1, tem);
-
+
md5_init_ctx (&ctx);
fold_checksum_tree (op2, &ctx, ht);
md5_finish_ctx (&ctx, checksum_after_op2);
@@ -14597,7 +14597,7 @@ fold_build_call_array_loc (location_t loc, tree type, tree fn,
#endif
tem = fold_builtin_call_array (loc, type, fn, nargs, argarray);
-
+
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
fold_checksum_tree (fn, &ctx, ht);
@@ -14606,7 +14606,7 @@ fold_build_call_array_loc (location_t loc, tree type, tree fn,
if (memcmp (checksum_before_fn, checksum_after_fn, 16))
fold_check_failed (fn, tem);
-
+
md5_init_ctx (&ctx);
for (i = 0; i < nargs; i++)
fold_checksum_tree (argarray[i], &ctx, ht);
@@ -14952,10 +14952,10 @@ tree_binary_nonnegative_warnv_p (enum tree_code code, tree type, tree op0,
&& (TREE_CODE (op0) == NOP_EXPR || TREE_CODE (op0) == INTEGER_CST)
&& (TREE_CODE (op1) == NOP_EXPR || TREE_CODE (op1) == INTEGER_CST))
{
- tree inner0 = (TREE_CODE (op0) == NOP_EXPR)
+ tree inner0 = (TREE_CODE (op0) == NOP_EXPR)
? TREE_TYPE (TREE_OPERAND (op0, 0))
: TREE_TYPE (op0);
- tree inner1 = (TREE_CODE (op1) == NOP_EXPR)
+ tree inner1 = (TREE_CODE (op1) == NOP_EXPR)
? TREE_TYPE (TREE_OPERAND (op1, 0))
: TREE_TYPE (op1);
@@ -16003,7 +16003,7 @@ fold_build_cleanup_point_expr (tree type, tree expr)
if (!TREE_SIDE_EFFECTS (op))
return expr;
}
-
+
return build1 (CLEANUP_POINT_EXPR, type, expr);
}
@@ -16067,17 +16067,17 @@ fold_indirect_ref_1 (location_t loc, tree type, tree op0)
/* ((foo*)&vectorfoo)[1] => BIT_FIELD_REF<vectorfoo,...> */
if (TREE_CODE (sub) == POINTER_PLUS_EXPR
&& TREE_CODE (TREE_OPERAND (sub, 1)) == INTEGER_CST)
- {
+ {
tree op00 = TREE_OPERAND (sub, 0);
tree op01 = TREE_OPERAND (sub, 1);
tree op00type;
-
+
STRIP_NOPS (op00);
op00type = TREE_TYPE (op00);
if (TREE_CODE (op00) == ADDR_EXPR
&& TREE_CODE (TREE_TYPE (op00type)) == VECTOR_TYPE
&& type == TREE_TYPE (TREE_TYPE (op00type)))
- {
+ {
HOST_WIDE_INT offset = tree_low_cst (op01, 0);
tree part_width = TYPE_SIZE (type);
unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0)/BITS_PER_UNIT;
@@ -16088,7 +16088,7 @@ fold_indirect_ref_1 (location_t loc, tree type, tree op0)
return fold_build3_loc (loc,
BIT_FIELD_REF, type, TREE_OPERAND (op00, 0),
part_width, index);
-
+
}
}
@@ -16113,7 +16113,7 @@ fold_indirect_ref_1 (location_t loc, tree type, tree op0)
TREE_OPERAND (op00, 0));
}
}
-
+
/* *(foo *)fooarrptr => (*fooarrptr)[0] */
if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE
&& type == TREE_TYPE (TREE_TYPE (subtype)))
@@ -16437,7 +16437,7 @@ fold_strip_sign_ops (tree exp)
if (arg1)
return fold_build2_loc (loc, COMPOUND_EXPR, TREE_TYPE (exp), arg0, arg1);
break;
-
+
case COND_EXPR:
arg0 = fold_strip_sign_ops (TREE_OPERAND (exp, 1));
arg1 = fold_strip_sign_ops (TREE_OPERAND (exp, 2));
@@ -16447,7 +16447,7 @@ fold_strip_sign_ops (tree exp)
arg0 ? arg0 : TREE_OPERAND (exp, 1),
arg1 ? arg1 : TREE_OPERAND (exp, 2));
break;
-
+
case CALL_EXPR:
{
const enum built_in_function fcode = builtin_mathfn_code (exp);
diff --git a/gcc/function.c b/gcc/function.c
index 60c199f4298..88e036c0857 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -243,7 +243,7 @@ get_frame_size (void)
bool
frame_offset_overflow (HOST_WIDE_INT offset, tree func)
-{
+{
unsigned HOST_WIDE_INT size = FRAME_GROWS_DOWNWARD ? -offset : offset;
if (size > ((unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (Pmode) - 1))
@@ -710,7 +710,7 @@ assign_stack_temp_for_type (enum machine_mode mode, HOST_WIDE_INT size,
/* Try to find an available, already-allocated temporary of the proper
mode which meets the size and alignment requirements. Choose the
smallest one with the closest alignment.
-
+
If assign_stack_temp is called outside of the tree->rtl expansion,
we cannot reuse the stack slots (that may still refer to
VIRTUAL_STACK_VARS_REGNUM). */
@@ -1844,7 +1844,7 @@ aggregate_value_p (const_tree exp, const_tree fntype)
check for by-invisible-reference returns, typically for CALL_EXPR input
EXPressions. */
const_tree fndecl = NULL_TREE;
-
+
if (fntype)
switch (TREE_CODE (fntype))
{
@@ -1887,7 +1887,7 @@ aggregate_value_p (const_tree exp, const_tree fntype)
if (TREE_CODE (exp) == CALL_EXPR && fndecl && DECL_RESULT (fndecl)
&& DECL_BY_REFERENCE (DECL_RESULT (fndecl)))
return 1;
-
+
if (targetm.calls.return_in_memory (type, fntype))
return 1;
/* Types that are TREE_ADDRESSABLE must be constructed in memory,
@@ -1921,7 +1921,7 @@ use_register_for_decl (const_tree decl)
{
if (!targetm.calls.allocate_stack_slots_for_args())
return true;
-
+
/* Honor volatile. */
if (TREE_SIDE_EFFECTS (decl))
return false;
@@ -2501,7 +2501,7 @@ assign_parm_adjust_entry_rtl (struct assign_parm_data_one *data)
locations. The Irix 6 ABI has examples of this. */
if (GET_CODE (entry_parm) == PARALLEL)
emit_group_store (validize_mem (stack_parm), entry_parm,
- data->passed_type,
+ data->passed_type,
int_size_in_bytes (data->passed_type));
else
{
@@ -2626,7 +2626,7 @@ assign_parm_setup_block_p (struct assign_parm_data_one *data)
return false;
}
-/* A subroutine of assign_parms. Arrange for the parameter to be
+/* A subroutine of assign_parms. Arrange for the parameter to be
present and valid in DATA->STACK_RTL. */
static void
@@ -3167,7 +3167,7 @@ assign_parms (tree fndecl)
crtl->stack_alignment_estimated = align;
}
}
-
+
if (cfun->stdarg && !TREE_CHAIN (parm))
assign_parms_setup_varargs (&all, &data, false);
@@ -3224,7 +3224,7 @@ assign_parms (tree fndecl)
crtl->stack_alignment_estimated = align;
}
}
- }
+ }
}
/* If we are receiving a struct value address as the first argument, set up
@@ -3747,7 +3747,7 @@ setjmp_vars_warning (bitmap setjmp_crosses, tree block)
&& DECL_RTL_SET_P (decl)
&& REG_P (DECL_RTL (decl))
&& regno_clobbered_at_setjmp (setjmp_crosses, REGNO (DECL_RTL (decl))))
- warning (OPT_Wclobbered, "variable %q+D might be clobbered by"
+ warning (OPT_Wclobbered, "variable %q+D might be clobbered by"
" %<longjmp%> or %<vfork%>", decl);
}
@@ -3767,14 +3767,14 @@ setjmp_args_warning (bitmap setjmp_crosses)
if (DECL_RTL (decl) != 0
&& REG_P (DECL_RTL (decl))
&& regno_clobbered_at_setjmp (setjmp_crosses, REGNO (DECL_RTL (decl))))
- warning (OPT_Wclobbered,
+ warning (OPT_Wclobbered,
"argument %q+D might be clobbered by %<longjmp%> or %<vfork%>",
decl);
}
/* Generate warning messages for variables live across setjmp. */
-void
+void
generate_setjmp_warnings (void)
{
bitmap setjmp_crosses = regstat_get_setjmp_crosses ();
@@ -4076,7 +4076,7 @@ pop_cfun (void)
/* Return value of funcdef and increase it. */
int
-get_next_funcdef_no (void)
+get_next_funcdef_no (void)
{
return funcdef_no++;
}
@@ -4135,7 +4135,7 @@ allocate_struct_function (tree fndecl, bool abstract_p)
&& TYPE_ARG_TYPES (fntype) != 0
&& (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
!= void_type_node));
-
+
/* Assume all registers in stdarg functions need to be saved. */
cfun->va_list_gpr_size = VA_LIST_MAX_GPR_SIZE;
cfun->va_list_fpr_size = VA_LIST_MAX_FPR_SIZE;
@@ -4229,8 +4229,8 @@ struct rtl_opt_pass pass_init_function =
{
RTL_PASS,
"*init_function", /* name */
- NULL, /* gate */
- init_function_for_compilation, /* execute */
+ NULL, /* gate */
+ init_function_for_compilation, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
@@ -5005,7 +5005,7 @@ thread_prologue_and_epilogue_insns (void)
seq = gen_prologue ();
emit_insn (seq);
- /* Insert an explicit USE for the frame pointer
+ /* Insert an explicit USE for the frame pointer
if the profiling is on and the frame pointer is required. */
if (crtl->profile && frame_pointer_needed)
emit_use (hard_frame_pointer_rtx);
@@ -5013,7 +5013,7 @@ thread_prologue_and_epilogue_insns (void)
/* Retain a map of the prologue insns. */
record_insns (seq, NULL, &prologue_insn_hash);
emit_note (NOTE_INSN_PROLOGUE_END);
-
+
#ifndef PROFILE_BEFORE_PROLOGUE
/* Ensure that instructions are not moved into the prologue when
profiling is on. The call to the profiling routine can be
@@ -5286,7 +5286,7 @@ epilogue_done:
for (insn = epilogue_end; insn; insn = next)
{
next = NEXT_INSN (insn);
- if (NOTE_P (insn)
+ if (NOTE_P (insn)
&& (NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG))
reorder_insns (insn, insn, PREV_INSN (epilogue_end));
}
@@ -5389,7 +5389,7 @@ reposition_prologue_and_epilogue_notes (void)
if (note)
{
/* If the function has a single basic block, and no real
- epilogue insns (e.g. sibcall with no cleanup), the
+ epilogue insns (e.g. sibcall with no cleanup), the
epilogue note can get scheduled before the prologue
note. If we have frame related prologue insns, having
them scanned during the epilogue will result in a crash.
@@ -5586,7 +5586,7 @@ struct rtl_opt_pass pass_thread_prologue_and_epilogue =
/* This mini-pass fixes fall-out from SSA in asm statements that have
- in-out constraints. Say you start with
+ in-out constraints. Say you start with
orig = inout;
asm ("": "+mr" (inout));
diff --git a/gcc/function.h b/gcc/function.h
index ad203ba195e..fb2965a2c6b 100644
--- a/gcc/function.h
+++ b/gcc/function.h
@@ -86,7 +86,7 @@ struct GTY(()) emit_status {
/* Indexed by pseudo register number, gives the rtx for that pseudo.
- Allocated in parallel with regno_pointer_align.
+ Allocated in parallel with regno_pointer_align.
FIXME: We could put it into emit_status struct, but gengtype is not able to deal
with length attribute nested in top level structures. */
@@ -335,7 +335,7 @@ struct GTY(()) rtl_data {
/* The stack alignment estimated before reload, with consideration of
following factors:
1. Alignment of local stack variables (max_used_stack_slot_alignment)
- 2. Alignment requirement to call other functions
+ 2. Alignment requirement to call other functions
(preferred_stack_boundary)
3. Alignment of non-local stack variables but might be spilled in
local stack. */
@@ -361,7 +361,7 @@ struct GTY(()) rtl_data {
/* Nonzero if function being compiled has nonlocal gotos to parent
function. */
bool has_nonlocal_goto;
-
+
/* Nonzero if function being compiled has an asm statement. */
bool has_asm_statement;
@@ -488,7 +488,7 @@ struct GTY(()) function {
tree static_chain_decl;
/* An expression that contains the non-local goto save area. The first
- word is the saved frame pointer and the second is the saved stack
+ word is the saved frame pointer and the second is the saved stack
pointer. */
tree nonlocal_goto_save_area;
@@ -553,7 +553,7 @@ struct GTY(()) function {
from nested functions. */
unsigned int has_nonlocal_label : 1;
- /* Nonzero if we've set cannot_be_copied_reason. I.e. if
+ /* Nonzero if we've set cannot_be_copied_reason. I.e. if
(cannot_be_copied_set && !cannot_be_copied_reason), the function
can in fact be copied. */
unsigned int cannot_be_copied_set : 1;
diff --git a/gcc/fwprop.c b/gcc/fwprop.c
index 75a354ea54d..1159211ae98 100644
--- a/gcc/fwprop.c
+++ b/gcc/fwprop.c
@@ -905,7 +905,7 @@ update_df (rtx insn, rtx *loc, df_ref *use_rec, enum df_ref_type type,
use was within a MEM. */
new_use = df_ref_create (DF_REF_REG (orig_use), new_loc,
insn, BLOCK_FOR_INSN (insn),
- type, DF_REF_FLAGS (orig_use) | new_flags,
+ type, DF_REF_FLAGS (orig_use) | new_flags,
width, offset, mode);
/* Set up the use-def chain. */
diff --git a/gcc/gcc.c b/gcc/gcc.c
index 6bc8e150a67..543cb5a74b3 100644
--- a/gcc/gcc.c
+++ b/gcc/gcc.c
@@ -2960,7 +2960,7 @@ execute (void)
commands[0].prog = argbuf[0]; /* first command. */
commands[0].argv = &argbuf[0];
-
+
if (!wrapper_string)
{
string = find_a_file (&exec_prefixes, commands[0].prog, X_OK, false);
@@ -4309,7 +4309,7 @@ process_command (int argc, const char **argv)
/* Set up the search paths. We add directories that we expect to
contain GNU Toolchain components before directories specified by
the machine description so that we will find GNU components (like
- the GNU assembler) before those of the host system. */
+ the GNU assembler) before those of the host system. */
/* If we don't know where the toolchain has been installed, use the
configured-in locations. */
@@ -6265,7 +6265,7 @@ handle_braces (const char *p)
if (atom == end_atom)
{
if (!n_way_choice || disj_matched || *p == '|'
- || a_is_negated || a_is_suffix || a_is_spectype
+ || a_is_negated || a_is_suffix || a_is_spectype
|| a_is_starred)
goto invalid;
@@ -7048,9 +7048,9 @@ main (int argc, char **argv)
else if (*cross_compile == '0')
{
add_prefix (&startfile_prefixes,
- concat (gcc_exec_prefix
- ? gcc_exec_prefix : standard_exec_prefix,
- machine_suffix,
+ concat (gcc_exec_prefix
+ ? gcc_exec_prefix : standard_exec_prefix,
+ machine_suffix,
standard_startfile_prefix, NULL),
NULL, PREFIX_PRIORITY_LAST, 0, 1);
}
@@ -8564,9 +8564,9 @@ getenv_spec_function (int argc, const char **argv)
ptr[0] = '\\';
ptr[1] = *value++;
}
-
+
strcpy (ptr, argv[1]);
-
+
return result;
}
diff --git a/gcc/gcov-dump.c b/gcc/gcov-dump.c
index 174a02dafac..c090c986784 100644
--- a/gcc/gcov-dump.c
+++ b/gcc/gcov-dump.c
@@ -157,7 +157,7 @@ dump_file (const char *filename)
const char *type = NULL;
int endianness = 0;
char m[4], v[4];
-
+
if ((endianness = gcov_magic (magic, GCOV_DATA_MAGIC)))
type = "data";
else if ((endianness = gcov_magic (magic, GCOV_NOTE_MAGIC)))
@@ -171,13 +171,13 @@ dump_file (const char *filename)
version = gcov_read_unsigned ();
GCOV_UNSIGNED2STRING (v, version);
GCOV_UNSIGNED2STRING (m, magic);
-
+
printf ("%s:%s:magic `%.4s':version `%.4s'%s\n", filename, type,
m, v, endianness < 0 ? " (swapped endianness)" : "");
if (version != GCOV_VERSION)
{
char e[4];
-
+
GCOV_UNSIGNED2STRING (e, GCOV_VERSION);
printf ("%s:warning:current version is `%.4s'\n", filename, e);
}
@@ -189,7 +189,7 @@ dump_file (const char *filename)
printf ("%s:stamp %lu\n", filename, (unsigned long)stamp);
}
-
+
while (1)
{
gcov_position_t base, position = gcov_position ();
diff --git a/gcc/gcov-io.c b/gcc/gcov-io.c
index d736cf853d4..6d371cdcddf 100644
--- a/gcc/gcov-io.c
+++ b/gcc/gcov-io.c
@@ -73,7 +73,7 @@ gcov_open (const char *name, int mode)
s_flock.l_len = 0; /* Until EOF. */
s_flock.l_pid = getpid ();
#endif
-
+
gcc_assert (!gcov_var.file);
gcov_var.start = 0;
gcov_var.offset = gcov_var.length = 0;
@@ -145,7 +145,7 @@ gcov_open (const char *name, int mode)
#endif
setbuf (gcov_var.file, (char *)0);
-
+
return 1;
}
@@ -200,12 +200,12 @@ static void
gcov_allocate (unsigned length)
{
size_t new_size = gcov_var.alloc;
-
+
if (!new_size)
new_size = GCOV_BLOCK_SIZE;
new_size += length;
new_size *= 2;
-
+
gcov_var.alloc = new_size;
gcov_var.buffer = XRESIZEVAR (gcov_unsigned_t, gcov_var.buffer, new_size << 2);
}
@@ -248,7 +248,7 @@ gcov_write_words (unsigned words)
#endif
result = &gcov_var.buffer[gcov_var.offset];
gcov_var.offset += words;
-
+
return result;
}
@@ -296,7 +296,7 @@ gcov_write_string (const char *string)
length = strlen (string);
alloc = (length + 4) >> 2;
}
-
+
buffer = gcov_write_words (1 + alloc);
buffer[0] = alloc;
@@ -317,7 +317,7 @@ gcov_write_tag (gcov_unsigned_t tag)
buffer[0] = tag;
buffer[1] = 0;
-
+
return result;
}
@@ -389,7 +389,7 @@ gcov_read_words (unsigned words)
{
const gcov_unsigned_t *result;
unsigned excess = gcov_var.length - gcov_var.offset;
-
+
gcc_assert (gcov_var.mode > 0);
if (excess < words)
{
@@ -472,7 +472,7 @@ GCOV_LINKAGE const char *
gcov_read_string (void)
{
unsigned length = gcov_read_unsigned ();
-
+
if (!length)
return 0;
@@ -485,7 +485,7 @@ gcov_read_summary (struct gcov_summary *summary)
{
unsigned ix;
struct gcov_ctr_summary *csum;
-
+
summary->checksum = gcov_read_unsigned ();
for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++)
{
@@ -538,7 +538,7 @@ GCOV_LINKAGE time_t
gcov_time (void)
{
struct stat status;
-
+
if (fstat (fileno (gcov_var.file), &status))
return 0;
else
diff --git a/gcc/gcov-io.h b/gcc/gcov-io.h
index f4fe279a809..ffc62ca7f69 100644
--- a/gcc/gcov-io.h
+++ b/gcc/gcov-io.h
@@ -61,7 +61,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
file. It need not be an absolute time stamp, merely a ticker that
increments fast enough and cycles slow enough to distinguish
different compile/run/compile cycles.
-
+
Although the ident and version are formally 32 bit numbers, they
are derived from 4 character ASCII strings. The version number
consists of the single character major version number, a two
@@ -338,11 +338,11 @@ typedef HOST_WIDEST_INT gcov_type;
/* Number of counters used for value profiling. */
#define GCOV_N_VALUE_COUNTERS \
(GCOV_LAST_VALUE_COUNTER - GCOV_FIRST_VALUE_COUNTER + 1)
-
+
/* A list of human readable names of the counters */
#define GCOV_COUNTER_NAMES {"arcs", "interval", "pow2", "single", \
"delta","indirect_call", "average", "ior"}
-
+
/* Names of merge functions for counters. */
#define GCOV_MERGE_FUNCTIONS {"__gcov_merge_add", \
"__gcov_merge_add", \
@@ -352,7 +352,7 @@ typedef HOST_WIDEST_INT gcov_type;
"__gcov_merge_single", \
"__gcov_merge_add", \
"__gcov_merge_ior"}
-
+
/* Convert a counter index to a tag. */
#define GCOV_TAG_FOR_COUNTER(COUNT) \
(GCOV_TAG_COUNTER_BASE + ((gcov_unsigned_t)(COUNT) << 17))
@@ -438,7 +438,7 @@ struct gcov_info
gcov_unsigned_t stamp; /* uniquifying time stamp */
const char *filename; /* output file name */
-
+
unsigned n_functions; /* number of functions */
const struct gcov_fn_info *functions; /* table of functions */
@@ -469,7 +469,7 @@ extern void __gcov_merge_delta (gcov_type *, unsigned) ATTRIBUTE_HIDDEN;
extern void __gcov_merge_ior (gcov_type *, unsigned) ATTRIBUTE_HIDDEN;
/* The profiler functions. */
-extern void __gcov_interval_profiler (gcov_type *, gcov_type, int, unsigned);
+extern void __gcov_interval_profiler (gcov_type *, gcov_type, int, unsigned);
extern void __gcov_pow2_profiler (gcov_type *, gcov_type);
extern void __gcov_one_value_profiler (gcov_type *, gcov_type);
extern void __gcov_indirect_call_profiler (gcov_type *, gcov_type, void *, void *);
diff --git a/gcc/gcov.c b/gcc/gcov.c
index 4f5c3d4ebba..1d1fc0bc358 100644
--- a/gcc/gcov.c
+++ b/gcc/gcov.c
@@ -684,7 +684,7 @@ create_file_names (const char *file_name)
*cptr = 0;
length = strlen (name);
-
+
bbg_file_name = XNEWVEC (char, length + strlen (GCOV_NOTE_SUFFIX) + 1);
strcpy (bbg_file_name, name);
strcpy (bbg_file_name + length, GCOV_NOTE_SUFFIX);
@@ -721,7 +721,7 @@ find_source (const char *file_name)
src->index = source_index++;
src->next = sources;
sources = src;
-
+
if (!stat (file_name, &status))
src->file_time = status.st_mtime;
}
@@ -1043,7 +1043,7 @@ read_count_file (void)
GCOV_UNSIGNED2STRING (v, version);
GCOV_UNSIGNED2STRING (e, GCOV_VERSION);
-
+
fnotice (stderr, "%s:version '%.4s', prefer version '%.4s'\n",
da_file_name, v, e);
}
@@ -1896,11 +1896,11 @@ output_lines (FILE *gcov_file, const source_t *src)
{
arc_t *arc = fn->blocks[fn->num_blocks - 1].pred;
gcov_type return_count = fn->blocks[fn->num_blocks - 1].count;
-
+
for (; arc; arc = arc->pred_next)
if (arc->fake)
return_count -= arc->count;
-
+
fprintf (gcov_file, "function %s", fn->name);
fprintf (gcov_file, " called %s",
format_gcov (fn->blocks[0].count, 0, -1));
diff --git a/gcc/gcse.c b/gcc/gcse.c
index 0da075f2585..8be10cdbb3a 100644
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -214,7 +214,7 @@ along with GCC; see the file COPYING3. If not see
In addition, expressions in REG_EQUAL notes are candidates for GXSE-ing.
This allows PRE to hoist expressions that are expressed in multiple insns,
- such as comprex address calculations (e.g. for PIC code, or loads with a
+ such as comprex address calculations (e.g. for PIC code, or loads with a
high part and as lowe part).
PRE handles moving invariant expressions out of loops (by treating them as
@@ -843,17 +843,17 @@ can_assign_to_reg_without_clobbers_p (rtx x)
valid. */
PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
SET_SRC (PATTERN (test_insn)) = x;
-
+
icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
if (icode < 0)
return false;
-
+
if (num_clobbers > 0 && added_clobbers_hard_reg_p (icode))
return false;
-
+
if (targetm.cannot_copy_insn_p && targetm.cannot_copy_insn_p (test_insn))
return false;
-
+
return true;
}
@@ -2094,7 +2094,7 @@ compute_transp (const_rtx x, int indx, sbitmap *bmap, int set_p)
/* Now iterate over the blocks which have memory modifications
but which do not have any calls. */
- EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
+ EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
blocks_with_calls,
0, bb_index, bi)
{
@@ -2993,7 +2993,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump)
for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
{
removed_p = 0;
-
+
if (e->flags & EDGE_COMPLEX)
{
ei_next (&ei);
@@ -3328,7 +3328,7 @@ pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_bloc
{
edge pred;
edge_iterator ei;
-
+
FOR_EACH_EDGE (pred, ei, bb->preds)
{
basic_block pred_bb = pred->src;
@@ -3410,7 +3410,7 @@ process_insert_insn (struct expr *expr)
if (insn_invalid_p (insn))
gcc_unreachable ();
}
-
+
pat = get_insns ();
end_sequence ();
@@ -4980,7 +4980,7 @@ one_cprop_pass (void)
FIXME: This local pass should not be necessary after CSE (but for
some reason it still is). It is also (proven) not necessary
to run the local pass right after FWPWOP.
-
+
FIXME: The global analysis would not get into infinite loops if it
would use the DF solver (via df_simple_dataflow) instead of
the solver implemented in this file. */
@@ -5127,8 +5127,8 @@ struct rtl_opt_pass pass_rtl_cprop =
{
RTL_PASS,
"cprop", /* name */
- gate_rtl_cprop, /* gate */
- execute_rtl_cprop, /* execute */
+ gate_rtl_cprop, /* gate */
+ execute_rtl_cprop, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
@@ -5148,8 +5148,8 @@ struct rtl_opt_pass pass_rtl_pre =
{
RTL_PASS,
"rtl pre", /* name */
- gate_rtl_pre, /* gate */
- execute_rtl_pre, /* execute */
+ gate_rtl_pre, /* gate */
+ execute_rtl_pre, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
@@ -5169,8 +5169,8 @@ struct rtl_opt_pass pass_rtl_hoist =
{
RTL_PASS,
"hoist", /* name */
- gate_rtl_hoist, /* gate */
- execute_rtl_hoist, /* execute */
+ gate_rtl_hoist, /* gate */
+ execute_rtl_hoist, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
diff --git a/gcc/genattr.c b/gcc/genattr.c
index ac3426a2ae9..18bba53034d 100644
--- a/gcc/genattr.c
+++ b/gcc/genattr.c
@@ -256,7 +256,7 @@ main (int argc, char **argv)
printf (" define_insn_reservation will be changed after\n");
printf (" last call of dfa_start. */\n");
printf ("extern void dfa_clean_insn_cache (void);\n\n");
- printf ("extern void dfa_clear_single_insn_cache (rtx);\n\n");
+ printf ("extern void dfa_clear_single_insn_cache (rtx);\n\n");
printf ("/* Initiate and finish work with DFA. They should be\n");
printf (" called as the first and the last interface\n");
printf (" functions. */\n");
diff --git a/gcc/genattrtab.c b/gcc/genattrtab.c
index 4d6295af604..1e5198a3dad 100644
--- a/gcc/genattrtab.c
+++ b/gcc/genattrtab.c
@@ -1941,22 +1941,22 @@ evaluate_eq_attr (rtx exp, rtx value, int insn_code, int insn_index)
else
newexp = false_rtx;
break;
-
+
case SYMBOL_REF:
{
char *p;
char string[256];
-
+
gcc_assert (GET_CODE (exp) == EQ_ATTR);
gcc_assert (strlen (XSTR (exp, 0)) + strlen (XSTR (exp, 1)) + 2
<= 256);
-
+
strcpy (string, XSTR (exp, 0));
strcat (string, "_");
strcat (string, XSTR (exp, 1));
for (p = string; *p; p++)
*p = TOUPPER (*p);
-
+
newexp = attr_rtx (EQ, value,
attr_rtx (SYMBOL_REF,
DEF_ATTR_STRING (string)));
@@ -1967,12 +1967,12 @@ evaluate_eq_attr (rtx exp, rtx value, int insn_code, int insn_index)
/* We construct an IOR of all the cases for which the
requested attribute value is present. Since we start with
FALSE, if it is not present, FALSE will be returned.
-
+
Each case is the AND of the NOT's of the previous conditions with the
current condition; in the default case the current condition is TRUE.
-
+
For each possible COND value, call ourselves recursively.
-
+
The extra TRUE and FALSE expressions will be eliminated by another
call to the simplification routine. */
@@ -2108,7 +2108,7 @@ simplify_and_tree (rtx exp, rtx *pterm, int insn_code, int insn_index)
if (attr_alt_subset_p (exp, *pterm))
*pterm = true_rtx;
-
+
return exp;
}
@@ -4307,7 +4307,7 @@ gen_insn_reserv (rtx def)
decl->insn_num = n_insn_reservs;
decl->bypassed = false;
decl->next = 0;
-
+
*last_insn_reserv_p = decl;
last_insn_reserv_p = &decl->next;
n_insn_reservs++;
@@ -4392,7 +4392,7 @@ make_automaton_attrs (void)
code_exp = rtx_alloc (COND);
lats_exp = rtx_alloc (COND);
-
+
XVEC (code_exp, 0) = rtvec_alloc (n_insn_reservs * 2);
XVEC (lats_exp, 0) = rtvec_alloc (n_insn_reservs * 2);
@@ -4405,7 +4405,7 @@ make_automaton_attrs (void)
{
XVECEXP (code_exp, 0, i) = decl->condexp;
XVECEXP (lats_exp, 0, i) = decl->condexp;
-
+
XVECEXP (code_exp, 0, i+1) = make_numeric_value (decl->insn_num);
XVECEXP (lats_exp, 0, i+1) = make_numeric_value (decl->default_latency);
}
diff --git a/gcc/genautomata.c b/gcc/genautomata.c
index 77dea231ed5..f3321410da5 100644
--- a/gcc/genautomata.c
+++ b/gcc/genautomata.c
@@ -2382,7 +2382,7 @@ insert_bypass (struct bypass_decl *bypass)
struct bypass_decl *curr, *last;
struct insn_reserv_decl *out_insn_reserv = bypass->out_insn_reserv;
struct insn_reserv_decl *in_insn_reserv = bypass->in_insn_reserv;
-
+
for (curr = out_insn_reserv->bypass_list, last = NULL;
curr != NULL;
last = curr, curr = curr->next)
@@ -2420,7 +2420,7 @@ insert_bypass (struct bypass_decl *bypass)
last = curr;
break;
}
-
+
}
if (last == NULL)
{
@@ -2936,7 +2936,7 @@ process_regexp_cycles (regexp_t regexp, int max_start_cycle,
{
int max_cycle = 0;
int min_cycle = 0;
-
+
for (i = 0; i < REGEXP_ALLOF (regexp)->regexps_num; i++)
{
process_regexp_cycles (REGEXP_ALLOF (regexp)->regexps [i],
@@ -2956,7 +2956,7 @@ process_regexp_cycles (regexp_t regexp, int max_start_cycle,
{
int max_cycle = 0;
int min_cycle = 0;
-
+
for (i = 0; i < REGEXP_ONEOF (regexp)->regexps_num; i++)
{
process_regexp_cycles (REGEXP_ONEOF (regexp)->regexps [i],
@@ -4779,7 +4779,7 @@ transform_3 (regexp_t regexp)
default:
break;
}
-
+
if (allof_length == 1)
REGEXP_SEQUENCE (result)->regexps [i] = allof_op;
else
@@ -4963,7 +4963,7 @@ store_alt_unit_usage (regexp_t regexp, regexp_t unit, int cycle,
length = (cycle + 1) * REGEXP_ONEOF (regexp)->regexps_num;
while (VEC_length (unit_usage_t, cycle_alt_unit_usages) < length)
VEC_safe_push (unit_usage_t, heap, cycle_alt_unit_usages, 0);
-
+
index = cycle * REGEXP_ONEOF (regexp)->regexps_num + alt_num;
prev = NULL;
for (curr = VEC_index (unit_usage_t, cycle_alt_unit_usages, index);
@@ -5078,14 +5078,14 @@ check_regexp_units_distribution (const char *insn_reserv_name,
gcc_assert (unit->mode == rm_nothing);
}
break;
-
+
case rm_unit:
store_alt_unit_usage (regexp, allof, j, i);
break;
-
+
case rm_nothing:
break;
-
+
default:
gcc_unreachable ();
}
@@ -5101,10 +5101,10 @@ check_regexp_units_distribution (const char *insn_reserv_name,
case rm_unit:
store_alt_unit_usage (regexp, unit, 0, i);
break;
-
+
case rm_nothing:
break;
-
+
default:
gcc_unreachable ();
}
@@ -5271,7 +5271,7 @@ process_seq_for_forming_states (regexp_t regexp, automaton_t automaton,
set_state_reserv (state_being_formed, curr_cycle,
REGEXP_UNIT (regexp)->unit_decl->unit_num);
return curr_cycle;
-
+
case rm_sequence:
for (i = 0; i < REGEXP_SEQUENCE (regexp)->regexps_num; i++)
curr_cycle
@@ -5283,7 +5283,7 @@ process_seq_for_forming_states (regexp_t regexp, automaton_t automaton,
{
int finish_cycle = 0;
int cycle;
-
+
for (i = 0; i < REGEXP_ALLOF (regexp)->regexps_num; i++)
{
cycle = process_seq_for_forming_states (REGEXP_ALLOF (regexp)
@@ -5819,7 +5819,7 @@ cache_presence (state_t state)
unsigned int sz;
sz = (description->query_units_num + sizeof (int) * CHAR_BIT - 1)
/ (sizeof (int) * CHAR_BIT);
-
+
state->presence_signature = XCREATENODEVEC (unsigned int, sz);
for (i = 0; i < description->units_num; i++)
if (units_array [i]->query_p)
@@ -5965,7 +5965,7 @@ partition_equiv_class (state_t first_state, int odd_iteration_flag,
curr_state = next_state)
{
next_state = curr_state->next_equiv_class_state;
- if (state_is_differed (curr_state, first_state,
+ if (state_is_differed (curr_state, first_state,
odd_iteration_flag))
{
/* Remove curr state from the class equivalence. */
@@ -6705,11 +6705,11 @@ form_regexp (regexp_t regexp)
const char *name = (regexp->mode == rm_unit
? REGEXP_UNIT (regexp)->name
: REGEXP_RESERV (regexp)->name);
-
+
obstack_grow (&irp, name, strlen (name));
break;
}
-
+
case rm_sequence:
for (i = 0; i < REGEXP_SEQUENCE (regexp)->regexps_num; i++)
{
@@ -6735,7 +6735,7 @@ form_regexp (regexp_t regexp)
}
obstack_1grow (&irp, ')');
break;
-
+
case rm_oneof:
for (i = 0; i < REGEXP_ONEOF (regexp)->regexps_num; i++)
{
@@ -6748,11 +6748,11 @@ form_regexp (regexp_t regexp)
obstack_1grow (&irp, ')');
}
break;
-
+
case rm_repeat:
{
char digits [30];
-
+
if (REGEXP_REPEAT (regexp)->regexp->mode == rm_sequence
|| REGEXP_REPEAT (regexp)->regexp->mode == rm_allof
|| REGEXP_REPEAT (regexp)->regexp->mode == rm_oneof)
@@ -7654,7 +7654,7 @@ output_dead_lock_vect (automaton_t automaton)
output_states_vect = 0;
pass_states (automaton, add_states_vect_el);
- VEC_safe_grow (vect_el_t, heap, dead_lock_vect,
+ VEC_safe_grow (vect_el_t, heap, dead_lock_vect,
VEC_length (state_t, output_states_vect));
for (i = 0; i < VEC_length (state_t, output_states_vect); i++)
{
@@ -7709,7 +7709,7 @@ output_reserved_units_table (automaton_t automaton)
* state_byte_size);
reserved_units_table = VEC_alloc (vect_el_t, heap, reserved_units_size);
-
+
for (i = 0; i < reserved_units_size; i++)
VEC_quick_push (vect_el_t, reserved_units_table, 0);
for (n = 0; n < VEC_length (state_t, output_states_vect); n++)
@@ -8218,7 +8218,7 @@ output_min_insn_conflict_delay_func (void)
fprintf (output_file, "}\n\n");
}
-/* Output the array holding default latency values. These are used in
+/* Output the array holding default latency values. These are used in
insn_latency and maximal_insn_latency function implementations. */
static void
output_default_latencies (void)
@@ -8442,7 +8442,7 @@ output_print_reservation_func (void)
{
gcc_assert (j == DECL_INSN_RESERV (decl)->insn_num);
j++;
-
+
fprintf (output_file, "\n \"%s\",",
regexp_representation (DECL_INSN_RESERV (decl)->regexp));
finish_regexp_representation ();
diff --git a/gcc/genchecksum.c b/gcc/genchecksum.c
index fa00d0e3225..38487a05103 100644
--- a/gcc/genchecksum.c
+++ b/gcc/genchecksum.c
@@ -34,21 +34,21 @@ dosum (const char *file)
FILE *f;
unsigned char result[16];
int i;
-
+
f = fopen (file, "rb");
if (!f)
{
fprintf (stderr, "opening %s: %s\n", file, xstrerror (errno));
exit (1);
}
-
+
/* Some executable formats have timestamps in the first 16 bytes, yuck. */
if (fseek (f, 16, SEEK_SET) != 0)
{
fprintf (stderr, "seeking in %s: %s\n", file, xstrerror (errno));
exit (1);
}
-
+
if (md5_stream (f, result) != 0
|| fclose (f) != 0)
{
diff --git a/gcc/genconfig.c b/gcc/genconfig.c
index 7498f6b5bc5..a0a023dce47 100644
--- a/gcc/genconfig.c
+++ b/gcc/genconfig.c
@@ -57,7 +57,7 @@ static void gen_peephole (rtx);
static void gen_peephole2 (rtx);
/* RECOG_P will be nonzero if this pattern was seen in a context where it will
- be used to recognize, rather than just generate an insn.
+ be used to recognize, rather than just generate an insn.
NON_PC_SET_SRC will be nonzero if this pattern was seen in a SET_SRC
of a SET whose destination is not (pc). */
@@ -287,13 +287,13 @@ main (int argc, char **argv)
desc = read_md_rtx (&line_no, &insn_code_number);
if (desc == NULL)
break;
-
- switch (GET_CODE (desc))
+
+ switch (GET_CODE (desc))
{
case DEFINE_INSN:
gen_insn (desc);
break;
-
+
case DEFINE_EXPAND:
gen_expand (desc);
break;
diff --git a/gcc/genflags.c b/gcc/genflags.c
index 53641010935..fcfd2f1bd89 100644
--- a/gcc/genflags.c
+++ b/gcc/genflags.c
@@ -195,7 +195,7 @@ gen_insn (int line_no, rtx insn)
int len;
int truth = maybe_eval_c_test (XSTR (insn, 2));
- lt = strchr (name, '<');
+ lt = strchr (name, '<');
if (lt && strchr (lt + 1, '>'))
{
message_with_line (line_no, "unresolved iterator");
@@ -203,7 +203,7 @@ gen_insn (int line_no, rtx insn)
return;
}
- gt = strchr (name, '>');
+ gt = strchr (name, '>');
if (lt || gt)
{
message_with_line (line_no,
diff --git a/gcc/gengtype-parse.c b/gcc/gengtype-parse.c
index 6977ff26d81..c6b6e933cc5 100644
--- a/gcc/gengtype-parse.c
+++ b/gcc/gengtype-parse.c
@@ -695,7 +695,7 @@ type (options_p *optsp, bool nested)
case UNION:
{
options_p opts = 0;
- /* GTY annotations follow attribute syntax
+ /* GTY annotations follow attribute syntax
GTY_BEFORE_ID is for union/struct declarations
GTY_AFTER_ID is for variable declarations. */
enum {
@@ -729,22 +729,22 @@ type (options_p *optsp, bool nested)
is_gty = GTY_AFTER_ID;
opts = gtymarker_opt ();
}
-
- if (is_gty)
+
+ if (is_gty)
{
if (token () == '{')
{
pair_p fields;
- if (is_gty == GTY_AFTER_ID)
+ if (is_gty == GTY_AFTER_ID)
parse_error ("GTY must be specified before identifier");
-
+
advance ();
fields = struct_field_seq ();
require ('}');
return new_structure (s, is_union, &lexer_line, fields, opts);
}
- }
+ }
else if (token () == '{')
consume_balanced ('{', '}');
if (opts)
diff --git a/gcc/gengtype.c b/gcc/gengtype.c
index 32abf6eff12..91f3f57d7c9 100644
--- a/gcc/gengtype.c
+++ b/gcc/gengtype.c
@@ -463,7 +463,7 @@ read_input_list (const char *listname)
things there are. (We do not bother resizing the arrays down.) */
num_lang_dirs = langno;
/* Add the plugin files if provided. */
- if (plugin_files)
+ if (plugin_files)
{
size_t i;
for (i = 0; i < nb_plugin_files; i++)
@@ -976,7 +976,7 @@ write_rtx_next (void)
{
outf_p f = get_output_file_with_visibility (NULL);
int i;
- if (!f)
+ if (!f)
return;
oprintf (f, "\n/* Used to implement the RTX_NEXT macro. */\n");
@@ -1393,7 +1393,7 @@ set_gc_used_type (type_p t, enum gc_used_enum level, type_p param[NUM_PARAM])
&length, &skip, &nested_ptr);
if (nested_ptr && f->type->kind == TYPE_POINTER)
- set_gc_used_type (nested_ptr, GC_POINTED_TO,
+ set_gc_used_type (nested_ptr, GC_POINTED_TO,
pass_param ? param : NULL);
else if (length && f->type->kind == TYPE_POINTER)
set_gc_used_type (f->type->u.p, GC_USED, NULL);
@@ -1503,7 +1503,7 @@ create_file (const char *name, const char *oname)
return f;
}
-/* Print, like fprintf, to O.
+/* Print, like fprintf, to O.
N.B. You might think this could be implemented more efficiently
with vsnprintf(). Unfortunately, there are C libraries that
provide that function but without the C99 semantics for its return
@@ -1517,7 +1517,7 @@ oprintf (outf_p o, const char *format, ...)
/* In plugin mode, the O could be a NULL pointer, so avoid crashing
in that case. */
- if (!o)
+ if (!o)
return;
va_start (ap, format);
@@ -1564,7 +1564,7 @@ open_base_files (void)
{
/* The order of files here matters very much. */
static const char *const ifiles [] = {
- "config.h", "system.h", "coretypes.h", "tm.h", "varray.h",
+ "config.h", "system.h", "coretypes.h", "tm.h", "varray.h",
"hashtab.h", "splay-tree.h", "obstack.h", "bitmap.h", "input.h",
"tree.h", "rtl.h", "function.h", "insn-config.h", "expr.h",
"hard-reg-set.h", "basic-block.h", "cselib.h", "insn-addr.h",
@@ -1593,7 +1593,7 @@ static const char *
get_file_realbasename (const char *f)
{
const char * lastslash = strrchr (f, '/');
-
+
return (lastslash != NULL) ? lastslash + 1 : f;
}
@@ -1635,7 +1635,7 @@ get_prefix_langdir_index (const char *f)
{
const char * langdir = lang_dir_names [lang_index];
size_t langdir_len = strlen (langdir);
-
+
if (f_len > langdir_len
&& IS_DIR_SEPARATOR (f[langdir_len])
&& memcmp (f, langdir, langdir_len) == 0)
@@ -1676,7 +1676,7 @@ get_file_gtfilename (const char *f)
const char *basename = get_file_realbasename (f);
const char *langdir = get_file_langdir (f);
-
+
char * result =
(langdir ? xasprintf ("gt-%s-%s", langdir, basename)
: xasprintf ("gt-%s", basename));
@@ -1735,7 +1735,7 @@ get_output_file_with_visibility (const char *input_file)
|| (len > 2 && memcmp (basename+len-2, ".y", 2) == 0)
|| (len > 3 && memcmp (basename+len-3, ".in", 3) == 0))
{
- output_name = get_file_gtfilename (input_file);
+ output_name = get_file_gtfilename (input_file);
for_name = basename;
}
/* Some headers get used by more than one front-end; hence, it
@@ -1761,7 +1761,7 @@ get_output_file_with_visibility (const char *input_file)
else if (strncmp (basename, "objc", 4) == 0 && IS_DIR_SEPARATOR (basename[4])
&& strcmp (basename + 5, "objc-act.h") == 0)
output_name = "gt-objc-objc-act.h", for_name = "objc/objc-act.c";
- else
+ else
{
int lang_index = get_prefix_langdir_index (basename);
@@ -2172,9 +2172,9 @@ walk_type (type_p t, struct walk_type_data *d)
d->indent += 2;
d->val = xasprintf ("x%d", d->counter++);
oprintf (d->of, "%*s%s %s * %s%s =\n", d->indent, "",
- (nested_ptr_d->type->kind == TYPE_UNION
- ? "union" : "struct"),
- nested_ptr_d->type->u.s.tag,
+ (nested_ptr_d->type->kind == TYPE_UNION
+ ? "union" : "struct"),
+ nested_ptr_d->type->u.s.tag,
d->fn_wants_lvalue ? "" : "const ",
d->val);
oprintf (d->of, "%*s", d->indent + 2, "");
@@ -2262,7 +2262,7 @@ walk_type (type_p t, struct walk_type_data *d)
else
oprintf (d->of, "%s", t->u.a.len);
oprintf (d->of, ");\n");
-
+
oprintf (d->of, "%*sfor (i%d = 0; i%d != l%d; i%d++) {\n",
d->indent, "",
loopcounter, loopcounter, loopcounter, loopcounter);
@@ -2790,8 +2790,8 @@ write_types (outf_p output_header, type_p structures, type_p param_structs,
continue;
}
}
-
- /* At last we emit the functions code. */
+
+ /* At last we emit the functions code. */
oprintf (output_header, "\n/* functions code */\n");
for (s = structures; s; s = s->next)
if (s->gc_used == GC_POINTED_TO
@@ -2807,7 +2807,7 @@ write_types (outf_p output_header, type_p structures, type_p param_structs,
break;
if (opt)
continue;
-
+
if (s->kind == TYPE_LANG_STRUCT)
{
type_p ss;
@@ -2934,7 +2934,7 @@ write_local (outf_p output_header, type_p structures, type_p param_structs)
{
type_p s;
- if (!output_header)
+ if (!output_header)
return;
oprintf (output_header, "\n/* Local pointer-walking routines. */\n");
for (s = structures; s; s = s->next)
@@ -3021,7 +3021,7 @@ write_enum_defn (type_p structures, type_p param_structs)
{
type_p s;
- if (!header_file)
+ if (!header_file)
return;
oprintf (header_file, "\n/* Enumeration of types known. */\n");
oprintf (header_file, "enum gt_types_enum {\n");
@@ -3073,7 +3073,7 @@ static void
put_mangled_filename (outf_p f, const char *fn)
{
const char *name = get_output_file_name (fn);
- if (!f || !name)
+ if (!f || !name)
return;
for (; *name != 0; name++)
if (ISALNUM (*name))
@@ -3719,11 +3719,11 @@ main (int argc, char **argv)
strcpy (plugin_files[i], name);
}
}
- else if (argc == 3)
+ else if (argc == 3)
{
srcdir = argv[1];
inputlist = argv[2];
- }
+ }
else
fatal ("usage: gengtype [-P pluginout.h] srcdir input-list "
"[file1 file2 ... fileN]");
diff --git a/gcc/gengtype.h b/gcc/gengtype.h
index 533b508b556..cb58fa7af81 100644
--- a/gcc/gengtype.h
+++ b/gcc/gengtype.h
@@ -20,7 +20,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_GENGTYPE_H
#define GCC_GENGTYPE_H
-/* A file position, mostly for error messages.
+/* A file position, mostly for error messages.
The FILE element may be compared using pointer equality. */
struct fileloc {
const char *file;
@@ -38,7 +38,7 @@ extern int lexer_toplevel_done;
extern struct fileloc lexer_line;
/* Print an error message. */
-extern void error_at_line
+extern void error_at_line
(struct fileloc *pos, const char *msg, ...) ATTRIBUTE_PRINTF_2;
/* Like asprintf, but calls fatal() on out of memory. */
diff --git a/gcc/genmddeps.c b/gcc/genmddeps.c
index 7e5ec80e306..fb5c3113e54 100644
--- a/gcc/genmddeps.c
+++ b/gcc/genmddeps.c
@@ -48,7 +48,7 @@ main (int argc, char **argv)
progname = "genmddeps";
include_callback = add_filedep;
-
+
if (init_md_reader_args (argc, argv) != SUCCESS_EXIT_CODE)
return (FATAL_EXIT_CODE);
diff --git a/gcc/genmodes.c b/gcc/genmodes.c
index 3851aff11e4..2aa559f3d25 100644
--- a/gcc/genmodes.c
+++ b/gcc/genmodes.c
@@ -179,7 +179,7 @@ new_mode (enum mode_class cl, const char *name,
n_modes[cl]++;
*htab_find_slot (modes_by_name, m, INSERT) = m;
-
+
return m;
}
@@ -230,7 +230,7 @@ new_adjust (const char *name,
mode_class_names[required_class_to] + 5);
return;
}
-
+
for (a = *category; a; a = a->next)
if (a->mode == mode)
{
@@ -1147,7 +1147,7 @@ emit_class_narrowest_mode (void)
? modes[c]->next->name
: void_mode->name))
: void_mode->name);
-
+
print_closer ();
}
@@ -1295,7 +1295,7 @@ emit_mode_adjustments (void)
a->file, a->line, a->adjustment);
printf (" mode_fbit[%smode] = s;\n", a->mode->name);
}
-
+
/* Real mode formats don't have to propagate anywhere. */
for (a = adj_format; a; a = a->next)
printf ("\n /* %s:%d */\n REAL_MODE_FORMAT (%smode) = %s;\n",
@@ -1397,7 +1397,7 @@ main (int argc, char **argv)
if (have_error)
return FATAL_EXIT_CODE;
-
+
calc_wider_mode ();
if (gen_header)
diff --git a/gcc/genopinit.c b/gcc/genopinit.c
index 52e0dd9462a..d28baabba7f 100644
--- a/gcc/genopinit.c
+++ b/gcc/genopinit.c
@@ -182,9 +182,9 @@ static const char * const optabs[] =
"optab_handler (logb_optab, $A)->insn_code = CODE_FOR_$(logb$a2$)",
"optab_handler (ilogb_optab, $A)->insn_code = CODE_FOR_$(ilogb$a2$)",
"optab_handler (log_optab, $A)->insn_code = CODE_FOR_$(log$a2$)",
- "optab_handler (log10_optab, $A)->insn_code = CODE_FOR_$(log10$a2$)",
- "optab_handler (log2_optab, $A)->insn_code = CODE_FOR_$(log2$a2$)",
- "optab_handler (log1p_optab, $A)->insn_code = CODE_FOR_$(log1p$a2$)",
+ "optab_handler (log10_optab, $A)->insn_code = CODE_FOR_$(log10$a2$)",
+ "optab_handler (log2_optab, $A)->insn_code = CODE_FOR_$(log2$a2$)",
+ "optab_handler (log1p_optab, $A)->insn_code = CODE_FOR_$(log1p$a2$)",
"optab_handler (tan_optab, $A)->insn_code = CODE_FOR_$(tan$a2$)",
"optab_handler (atan_optab, $A)->insn_code = CODE_FOR_$(atan$a2$)",
"optab_handler (strlen_optab, $A)->insn_code = CODE_FOR_$(strlen$a$)",
@@ -366,14 +366,14 @@ gen_insn (rtx insn)
break;
if (*p == 0
- && (! force_int || mode_class[i] == MODE_INT
+ && (! force_int || mode_class[i] == MODE_INT
|| mode_class[i] == MODE_VECTOR_INT)
&& (! force_partial_int
|| mode_class[i] == MODE_INT
|| mode_class[i] == MODE_PARTIAL_INT
|| mode_class[i] == MODE_VECTOR_INT)
&& (! force_float
- || mode_class[i] == MODE_FLOAT
+ || mode_class[i] == MODE_FLOAT
|| mode_class[i] == MODE_DECIMAL_FLOAT
|| mode_class[i] == MODE_COMPLEX_FLOAT
|| mode_class[i] == MODE_VECTOR_FLOAT)
diff --git a/gcc/genpreds.c b/gcc/genpreds.c
index 7f76270c66c..db56c0a4948 100644
--- a/gcc/genpreds.c
+++ b/gcc/genpreds.c
@@ -102,7 +102,7 @@ process_define_predicate (rtx defn, int lineno)
for (p = XSTR (defn, 0) + 1; *p; p++)
if (!ISALNUM (*p) && *p != '_')
goto bad_name;
-
+
if (validate_exp (XEXP (defn, 1), XSTR (defn, 0), lineno))
return;
@@ -153,7 +153,7 @@ process_define_predicate (rtx defn, int lineno)
The only wart is that there's no way to insist on a { } string in
an RTL template, so we have to handle "" strings. */
-
+
static void
write_predicate_subfunction (struct pred_data *p)
{
@@ -290,7 +290,7 @@ mark_mode_tests (rtx exp)
NO_MODE_TEST (exp) = (NO_MODE_TEST (XEXP (exp, 0))
&& NO_MODE_TEST (XEXP (exp, 1)));
break;
-
+
case IOR:
mark_mode_tests (XEXP (exp, 0));
mark_mode_tests (XEXP (exp, 1));
@@ -383,23 +383,23 @@ add_mode_tests (struct pred_data *p)
{
int test0 = NO_MODE_TEST (XEXP (subexp, 0));
int test1 = NO_MODE_TEST (XEXP (subexp, 1));
-
+
gcc_assert (test0 || test1);
-
+
if (test0 && test1)
goto break_loop;
pos = test0 ? &XEXP (subexp, 0) : &XEXP (subexp, 1);
}
break;
-
+
case IF_THEN_ELSE:
{
int test0 = NO_MODE_TEST (XEXP (subexp, 0));
int test1 = NO_MODE_TEST (XEXP (subexp, 1));
int test2 = NO_MODE_TEST (XEXP (subexp, 2));
-
+
gcc_assert ((test0 && test1) || test2);
-
+
if (test0 && test1 && test2)
goto break_loop;
if (test0 && test1)
@@ -411,7 +411,7 @@ add_mode_tests (struct pred_data *p)
pos = &XEXP (subexp, 2);
}
break;
-
+
default:
goto break_loop;
}
@@ -473,7 +473,7 @@ write_match_code (const char *path, const char *codes)
putchar (TOUPPER (*code));
code++;
}
-
+
if (*codes == ',')
fputs (" || ", stdout);
}
@@ -493,7 +493,7 @@ write_predicate_expr (rtx exp)
write_predicate_expr (XEXP (exp, 1));
putchar (')');
break;
-
+
case IOR:
putchar ('(');
write_predicate_expr (XEXP (exp, 0));
@@ -801,7 +801,7 @@ add_constraint (const char *name, const char *regclass,
return;
}
-
+
namelen = strlen (name);
slot = &constraints_by_letter_table[(unsigned int)name[0]];
for (iter = slot; *iter; iter = &(*iter)->next_this_letter)
@@ -861,7 +861,7 @@ add_constraint (const char *name, const char *regclass,
else
message_with_line (lineno, "constraint names beginning with '%c' "
"(%s) are reserved for %s constraints",
- name[0], name,
+ name[0], name,
GET_RTX_NAME (appropriate_code));
have_error = 1;
@@ -896,7 +896,7 @@ add_constraint (const char *name, const char *regclass,
}
}
-
+
c = XOBNEW (rtl_obstack, struct constraint_data);
c->name = name;
c->c_name = need_mangled_name ? mangle (name) : name;
@@ -1048,7 +1048,7 @@ write_insn_constraint_len (void)
" return 1;\n"
"}\n");
}
-
+
/* Write out the function which computes the register class corresponding
to a register constraint. */
static void
@@ -1320,7 +1320,7 @@ write_tm_preds_h (void)
puts ("#endif /* tm-preds.h */");
}
-/* Write insn-preds.c.
+/* Write insn-preds.c.
N.B. the list of headers to include was copied from genrecog; it
may not be ideal.
@@ -1367,7 +1367,7 @@ write_insn_preds_c (void)
if (have_register_constraints)
write_regclass_for_constraint ();
write_constraint_satisfied_p ();
-
+
if (have_const_int_constraints)
write_insn_const_int_ok_for_constraint ();
diff --git a/gcc/gensupport.c b/gcc/gensupport.c
index b913ee23b41..f0b5613e050 100644
--- a/gcc/gensupport.c
+++ b/gcc/gensupport.c
@@ -336,7 +336,7 @@ process_rtx (rtx desc, int lineno)
/* Queue them. */
insn_elem
- = queue_pattern (desc, &define_insn_tail, read_rtx_filename,
+ = queue_pattern (desc, &define_insn_tail, read_rtx_filename,
lineno);
split_elem
= queue_pattern (split, &other_tail, read_rtx_filename, lineno);
@@ -875,7 +875,7 @@ process_one_cond_exec (struct queue_elem *ce_elem)
XVECEXP (split, 2, i) = pattern;
}
/* Add the new split to the queue. */
- queue_pattern (split, &other_tail, read_rtx_filename,
+ queue_pattern (split, &other_tail, read_rtx_filename,
insn_elem->split->lineno);
}
}
@@ -927,7 +927,7 @@ init_md_reader_args_cb (int argc, char **argv, bool (*parse_opt)(const char *))
{
if (argv[i][0] != '-')
continue;
-
+
c = argv[i][1];
switch (c)
{
@@ -996,7 +996,7 @@ init_md_reader_args_cb (int argc, char **argv, bool (*parse_opt)(const char *))
/* Read stdin. */
if (already_read_stdin)
fatal ("cannot read standard input twice");
-
+
base_dir = NULL;
read_rtx_filename = in_fname = "<stdin>";
read_rtx_lineno = 1;
@@ -1405,7 +1405,7 @@ init_predicate_table (void)
for (j = 0; j < NUM_RTX_CODE; j++)
if (GET_RTX_CLASS (j) == RTX_CONST_OBJ)
add_predicate_code (pred, (enum rtx_code) j);
-
+
add_predicate (pred);
}
}
diff --git a/gcc/ggc-common.c b/gcc/ggc-common.c
index 76a35f7e029..fc42f45d59c 100644
--- a/gcc/ggc-common.c
+++ b/gcc/ggc-common.c
@@ -41,7 +41,7 @@ along with GCC; see the file COPYING3. If not see
# include <sys/mman.h>
# ifdef HAVE_MINCORE
/* This is on Solaris. */
-# include <sys/types.h>
+# include <sys/types.h>
# endif
#endif
@@ -101,7 +101,7 @@ static VEC(const_ggc_root_tab_t, heap) *extra_root_vec;
/* Dynamically register a new GGC root table RT. This is useful for
plugins. */
-void
+void
ggc_register_root_tab (const struct ggc_root_tab* rt)
{
if (rt)
@@ -523,11 +523,11 @@ gt_pch_save (FILE *f)
/* Try to arrange things so that no relocation is necessary, but
don't try very hard. On most platforms, this will always work,
- and on the rest it's a lot of work to do better.
+ and on the rest it's a lot of work to do better.
(The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
-
+
ggc_pch_this_base (state.d, mmi.preferred_base);
state.ptrs = XNEWVEC (struct ptr_data *, state.count);
@@ -710,7 +710,7 @@ mmap_gt_pch_get_address (size_t size, int fd)
}
/* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
- Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
+ Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
mapping the data at BASE, -1 if we couldn't.
This version assumes that the kernel honors the START operand of mmap
@@ -802,7 +802,7 @@ ggc_min_heapsize_heuristic (void)
phys_kbytes /= 8;
#if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
- /* Try not to overrun the RSS limit while doing garbage collection.
+ /* Try not to overrun the RSS limit while doing garbage collection.
The RSS limit is only advisory, so no margin is subtracted. */
{
struct rlimit rlim;
diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c
index adb32880cfa..84b5c110c23 100644
--- a/gcc/ggc-page.c
+++ b/gcc/ggc-page.c
@@ -439,16 +439,16 @@ static struct globals
/* Total allocations and overhead for sizes less than 32, 64 and 128.
These sizes are interesting because they are typical cache line
sizes. */
-
+
unsigned long long total_allocated_under32;
unsigned long long total_overhead_under32;
-
+
unsigned long long total_allocated_under64;
unsigned long long total_overhead_under64;
-
+
unsigned long long total_allocated_under128;
unsigned long long total_overhead_under128;
-
+
/* The allocations for each of the allocation orders. */
unsigned long long total_allocated_per_order[NUM_ORDERS];
@@ -945,7 +945,7 @@ free_page (page_entry *entry)
/* We cannot free a page from a context deeper than the current
one. */
gcc_assert (entry->context_depth == top->context_depth);
-
+
/* Put top element into freed slot. */
G.by_depth[i] = top;
G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
@@ -1413,7 +1413,7 @@ ggc_free (void *p)
#ifdef ENABLE_GC_ALWAYS_COLLECT
/* In the completely-anal-checking mode, we do *not* immediately free
- the data, but instead verify that the data is *actually* not
+ the data, but instead verify that the data is *actually* not
reachable the next time we collect. */
{
struct free_object *fo = XNEW (struct free_object);
@@ -1440,7 +1440,7 @@ ggc_free (void *p)
/* If the page is completely full, then it's supposed to
be after all pages that aren't. Since we've freed one
object from a page that was full, we need to move the
- page to the head of the list.
+ page to the head of the list.
PE is the node we want to move. Q is the previous node
and P is the next node in the list. */
@@ -1484,7 +1484,7 @@ ggc_free (void *p)
static void
compute_inverse (unsigned order)
{
- size_t size, inv;
+ size_t size, inv;
unsigned int e;
size = OBJECT_SIZE (order);
@@ -1744,7 +1744,7 @@ sweep_pages (void)
G.pages[order] = next;
else
previous->next = next;
-
+
/* Splice P out of the back pointers too. */
if (next)
next->prev = previous;
@@ -2044,7 +2044,7 @@ ggc_print_statistics (void)
SCALE (G.allocated), STAT_LABEL(G.allocated),
SCALE (total_overhead), STAT_LABEL (total_overhead));
-#ifdef GATHER_STATISTICS
+#ifdef GATHER_STATISTICS
{
fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
@@ -2065,7 +2065,7 @@ ggc_print_statistics (void)
G.stats.total_overhead_under128);
fprintf (stderr, "Total Allocated under 128B: %10lld\n",
G.stats.total_allocated_under128);
-
+
for (i = 0; i < NUM_ORDERS; i++)
if (G.stats.total_allocated_per_order[i])
{
diff --git a/gcc/ggc-zone.c b/gcc/ggc-zone.c
index 442c80e6713..cae23e128de 100644
--- a/gcc/ggc-zone.c
+++ b/gcc/ggc-zone.c
@@ -426,13 +426,13 @@ struct alloc_zone
/* Total allocations and overhead for sizes less than 32, 64 and 128.
These sizes are interesting because they are typical cache line
sizes. */
-
+
unsigned long long total_allocated_under32;
unsigned long long total_overhead_under32;
-
+
unsigned long long total_allocated_under64;
unsigned long long total_overhead_under64;
-
+
unsigned long long total_allocated_under128;
unsigned long long total_overhead_under128;
} stats;
@@ -1315,7 +1315,7 @@ ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
zone->allocated += size;
-
+
timevar_ggc_mem_total += size;
#ifdef GATHER_STATISTICS
@@ -1521,7 +1521,7 @@ ggc_set_mark (const void *p)
offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
mark_word = offset / (8 * sizeof (mark_type));
mark_bit = offset % (8 * sizeof (mark_type));
-
+
if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
return 1;
pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
@@ -1571,7 +1571,7 @@ ggc_marked_p (const void *p)
offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
mark_word = offset / (8 * sizeof (mark_type));
mark_bit = offset % (8 * sizeof (mark_type));
-
+
return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
}
@@ -1963,7 +1963,7 @@ ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
ggc_prune_overhead_list ();
#endif
}
-
+
sweep_pages (zone);
zone->was_collected = true;
zone->allocated_last_gc = zone->allocated;
@@ -2178,7 +2178,7 @@ ggc_print_statistics (void)
chunk = chunk->next_free;
}
}
-
+
fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
zone->name,
SCALE (allocated), LABEL (allocated),
@@ -2221,7 +2221,7 @@ ggc_print_statistics (void)
SCALE (total_allocated), LABEL(total_allocated),
SCALE (total_overhead), LABEL (total_overhead));
-#ifdef GATHER_STATISTICS
+#ifdef GATHER_STATISTICS
{
unsigned long long all_overhead = 0, all_allocated = 0;
unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
@@ -2240,7 +2240,7 @@ ggc_print_statistics (void)
all_allocated_under64 += zone->stats.total_allocated_under64;
all_overhead_under64 += zone->stats.total_overhead_under64;
-
+
all_allocated_under128 += zone->stats.total_allocated_under128;
all_overhead_under128 += zone->stats.total_overhead_under128;
diff --git a/gcc/ggc.h b/gcc/ggc.h
index c373de32cb7..bc94d6c0c1b 100644
--- a/gcc/ggc.h
+++ b/gcc/ggc.h
@@ -226,7 +226,7 @@ extern void *ggc_realloc_stat (void *, size_t MEM_STAT_DECL);
extern void *ggc_calloc (size_t, size_t);
/* Free a block. To be used when known for certain it's not reachable. */
extern void ggc_free (void *);
-
+
extern void ggc_record_overhead (size_t, size_t, void * MEM_STAT_DECL);
extern void ggc_free_overhead (void *);
extern void ggc_prune_overhead_list (void);
diff --git a/gcc/gimple-iterator.c b/gcc/gimple-iterator.c
index c3ca0e37501..9a3c382606e 100644
--- a/gcc/gimple-iterator.c
+++ b/gcc/gimple-iterator.c
@@ -45,9 +45,9 @@ static void
update_modified_stmts (gimple_seq seq)
{
gimple_stmt_iterator gsi;
-
+
if (!ssa_operands_active ())
- return;
+ return;
for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
update_stmt_if_modified (gsi_stmt (gsi));
}
@@ -60,7 +60,7 @@ static void
update_bb_for_stmts (gimple_seq_node first, basic_block bb)
{
gimple_seq_node n;
-
+
for (n = first; n; n = n->next)
gimple_set_bb (n->stmt, bb);
}
diff --git a/gcc/gimple-low.c b/gcc/gimple-low.c
index cce31e946ff..63d501deb50 100644
--- a/gcc/gimple-low.c
+++ b/gcc/gimple-low.c
@@ -202,7 +202,7 @@ lower_function_body (void)
return 0;
}
-struct gimple_opt_pass pass_lower_cf =
+struct gimple_opt_pass pass_lower_cf =
{
{
GIMPLE_PASS,
@@ -309,7 +309,7 @@ static void
lower_omp_directive (gimple_stmt_iterator *gsi, struct lower_data *data)
{
gimple stmt;
-
+
stmt = gsi_stmt (*gsi);
lower_sequence (gimple_omp_body (stmt), data);
@@ -616,7 +616,7 @@ block_may_fallthru (const_tree block)
{
case GOTO_EXPR:
case RETURN_EXPR:
- /* Easy cases. If the last statement of the block implies
+ /* Easy cases. If the last statement of the block implies
control transfer, then we can't fall through. */
return false;
@@ -659,7 +659,7 @@ block_may_fallthru (const_tree block)
case CALL_EXPR:
/* Functions that do not return do not fall through. */
return (call_expr_flags (stmt) & ECF_NORETURN) == 0;
-
+
case CLEANUP_POINT_EXPR:
return block_may_fallthru (TREE_OPERAND (stmt, 0));
@@ -686,7 +686,7 @@ gimple_stmt_may_fallthru (gimple stmt)
case GIMPLE_GOTO:
case GIMPLE_RETURN:
case GIMPLE_RESX:
- /* Easy cases. If the last statement of the seq implies
+ /* Easy cases. If the last statement of the seq implies
control transfer, then we can't fall through. */
return false;
diff --git a/gcc/gimple-pretty-print.c b/gcc/gimple-pretty-print.c
index 4f6c4470c9d..6329d51f2da 100644
--- a/gcc/gimple-pretty-print.c
+++ b/gcc/gimple-pretty-print.c
@@ -228,7 +228,7 @@ dump_gimple_fmt (pretty_printer *buffer, int spc, int flags,
default:
gcc_unreachable ();
}
- }
+ }
else
pp_character (buffer, *c);
}
@@ -270,13 +270,13 @@ dump_unary_rhs (pretty_printer *buffer, gimple gs, int spc, int flags)
else
dump_generic_node (buffer, rhs, spc, flags, false);
break;
-
+
case PAREN_EXPR:
pp_string (buffer, "((");
dump_generic_node (buffer, rhs, spc, flags, false);
pp_string (buffer, "))");
break;
-
+
case ABS_EXPR:
pp_string (buffer, "ABS_EXPR <");
dump_generic_node (buffer, rhs, spc, flags, false);
diff --git a/gcc/gimple.c b/gcc/gimple.c
index f84a20cd6c1..a04683a87ac 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -171,7 +171,7 @@ gimple_set_subcode (gimple g, unsigned subcode)
/* Build a tuple with operands. CODE is the statement to build (which
must be one of the GIMPLE_WITH_OPS tuples). SUBCODE is the sub-code
- for the new tuple. NUM_OPS is the number of operands to allocate. */
+ for the new tuple. NUM_OPS is the number of operands to allocate. */
#define gimple_build_with_ops(c, s, n) \
gimple_build_with_ops_stat (c, s, n MEM_STAT_INFO)
@@ -350,7 +350,7 @@ gimple_build_assign_with_ops_stat (enum tree_code subcode, tree lhs, tree op1,
/* Need 1 operand for LHS and 1 or 2 for the RHS (depending on the
code). */
num_ops = get_gimple_rhs_num_ops (subcode) + 1;
-
+
p = gimple_build_with_ops_stat (GIMPLE_ASSIGN, (unsigned)subcode, num_ops
PASS_MEM_STAT);
gimple_assign_set_lhs (p, lhs);
@@ -375,7 +375,7 @@ gimple_build_assign_with_ops_stat (enum tree_code subcode, tree lhs, tree op1,
gimple
gimplify_assign (tree dst, tree src, gimple_seq *seq_p)
-{
+{
tree t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
gimplify_and_add (t, seq_p);
ggc_free (t);
@@ -485,7 +485,7 @@ gimple_build_goto (tree dest)
/* Build a GIMPLE_NOP statement. */
-gimple
+gimple
gimple_build_nop (void)
{
return gimple_alloc (GIMPLE_NOP, 0);
@@ -517,7 +517,7 @@ gimple_build_bind (tree vars, gimple_seq body, tree block)
*/
static inline gimple
-gimple_build_asm_1 (const char *string, unsigned ninputs, unsigned noutputs,
+gimple_build_asm_1 (const char *string, unsigned ninputs, unsigned noutputs,
unsigned nclobbers, unsigned nlabels)
{
gimple p;
@@ -539,7 +539,7 @@ gimple_build_asm_1 (const char *string, unsigned ninputs, unsigned noutputs,
#ifdef GATHER_STATISTICS
gimple_alloc_sizes[(int) gimple_alloc_kind (GIMPLE_ASM)] += size;
#endif
-
+
return p;
}
@@ -555,7 +555,7 @@ gimple_build_asm_1 (const char *string, unsigned ninputs, unsigned noutputs,
LABELS is a vector of destination labels. */
gimple
-gimple_build_asm_vec (const char *string, VEC(tree,gc)* inputs,
+gimple_build_asm_vec (const char *string, VEC(tree,gc)* inputs,
VEC(tree,gc)* outputs, VEC(tree,gc)* clobbers,
VEC(tree,gc)* labels)
{
@@ -564,10 +564,10 @@ gimple_build_asm_vec (const char *string, VEC(tree,gc)* inputs,
p = gimple_build_asm_1 (string,
VEC_length (tree, inputs),
- VEC_length (tree, outputs),
+ VEC_length (tree, outputs),
VEC_length (tree, clobbers),
VEC_length (tree, labels));
-
+
for (i = 0; i < VEC_length (tree, inputs); i++)
gimple_asm_set_input_op (p, i, VEC_index (tree, inputs, i));
@@ -576,10 +576,10 @@ gimple_build_asm_vec (const char *string, VEC(tree,gc)* inputs,
for (i = 0; i < VEC_length (tree, clobbers); i++)
gimple_asm_set_clobber_op (p, i, VEC_index (tree, clobbers, i));
-
+
for (i = 0; i < VEC_length (tree, labels); i++)
gimple_asm_set_label_op (p, i, VEC_index (tree, labels, i));
-
+
return p;
}
@@ -684,7 +684,7 @@ gimple_build_resx (int region)
NLABELS is the number of labels in the switch excluding the default.
DEFAULT_LABEL is the default label for the switch statement. */
-gimple
+gimple
gimple_build_switch_nlabels (unsigned nlabels, tree index, tree default_label)
{
/* nlabels + 1 default label + 1 index. */
@@ -700,10 +700,10 @@ gimple_build_switch_nlabels (unsigned nlabels, tree index, tree default_label)
/* Build a GIMPLE_SWITCH statement.
INDEX is the switch's index.
- NLABELS is the number of labels in the switch excluding the DEFAULT_LABEL.
+ NLABELS is the number of labels in the switch excluding the DEFAULT_LABEL.
... are the labels excluding the default. */
-gimple
+gimple
gimple_build_switch (unsigned nlabels, tree index, tree default_label, ...)
{
va_list al;
@@ -779,7 +779,7 @@ gimple_build_debug_bind_stat (tree var, tree value, gimple stmt MEM_STAT_DECL)
BODY is the sequence of statements for which only one thread can execute.
NAME is optional identifier for this critical block. */
-gimple
+gimple
gimple_build_omp_critical (gimple_seq body, tree name)
{
gimple p = gimple_alloc (GIMPLE_OMP_CRITICAL, 0);
@@ -793,7 +793,7 @@ gimple_build_omp_critical (gimple_seq body, tree name)
/* Build a GIMPLE_OMP_FOR statement.
BODY is sequence of statements inside the for loop.
- CLAUSES, are any of the OMP loop construct's clauses: private, firstprivate,
+ CLAUSES, are any of the OMP loop construct's clauses: private, firstprivate,
lastprivate, reductions, ordered, schedule, and nowait.
COLLAPSE is the collapse count.
PRE_BODY is the sequence of statements that are loop invariant. */
@@ -822,8 +822,8 @@ gimple_build_omp_for (gimple_seq body, tree clauses, size_t collapse,
CHILD_FN is the function created for the parallel threads to execute.
DATA_ARG are the shared data argument(s). */
-gimple
-gimple_build_omp_parallel (gimple_seq body, tree clauses, tree child_fn,
+gimple
+gimple_build_omp_parallel (gimple_seq body, tree clauses, tree child_fn,
tree data_arg)
{
gimple p = gimple_alloc (GIMPLE_OMP_PARALLEL, 0);
@@ -846,7 +846,7 @@ gimple_build_omp_parallel (gimple_seq body, tree clauses, tree child_fn,
COPY_FN is the optional function for firstprivate initialization.
ARG_SIZE and ARG_ALIGN are size and alignment of the data block. */
-gimple
+gimple
gimple_build_omp_task (gimple_seq body, tree clauses, tree child_fn,
tree data_arg, tree copy_fn, tree arg_size,
tree arg_align)
@@ -884,7 +884,7 @@ gimple_build_omp_section (gimple_seq body)
BODY is the sequence of statements to be executed by just the master. */
-gimple
+gimple
gimple_build_omp_master (gimple_seq body)
{
gimple p = gimple_alloc (GIMPLE_OMP_MASTER, 0);
@@ -900,7 +900,7 @@ gimple_build_omp_master (gimple_seq body)
CONTROL_DEF is the definition of the control variable.
CONTROL_USE is the use of the control variable. */
-gimple
+gimple
gimple_build_omp_continue (tree control_def, tree control_use)
{
gimple p = gimple_alloc (GIMPLE_OMP_CONTINUE, 0);
@@ -914,7 +914,7 @@ gimple_build_omp_continue (tree control_def, tree control_use)
BODY is the sequence of statements inside a loop that will executed in
sequence. */
-gimple
+gimple
gimple_build_omp_ordered (gimple_seq body)
{
gimple p = gimple_alloc (GIMPLE_OMP_ORDERED, 0);
@@ -928,7 +928,7 @@ gimple_build_omp_ordered (gimple_seq body)
/* Build a GIMPLE_OMP_RETURN statement.
WAIT_P is true if this is a non-waiting return. */
-gimple
+gimple
gimple_build_omp_return (bool wait_p)
{
gimple p = gimple_alloc (GIMPLE_OMP_RETURN, 0);
@@ -945,7 +945,7 @@ gimple_build_omp_return (bool wait_p)
CLAUSES are any of the OMP sections contsruct's clauses: private,
firstprivate, lastprivate, reduction, and nowait. */
-gimple
+gimple
gimple_build_omp_sections (gimple_seq body, tree clauses)
{
gimple p = gimple_alloc (GIMPLE_OMP_SECTIONS, 0);
@@ -972,7 +972,7 @@ gimple_build_omp_sections_switch (void)
CLAUSES are any of the OMP single construct's clauses: private, firstprivate,
copyprivate, nowait. */
-gimple
+gimple
gimple_build_omp_single (gimple_seq body, tree clauses)
{
gimple p = gimple_alloc (GIMPLE_OMP_SINGLE, 0);
@@ -1081,7 +1081,7 @@ gimple_seq_free (gimple_seq seq)
/* If this triggers, it's a sign that the same list is being freed
twice. */
gcc_assert (seq != gimple_seq_cache || gimple_seq_cache == NULL);
-
+
/* Add SEQ to the pool of free sequences. */
seq->next_free = gimple_seq_cache;
gimple_seq_cache = seq;
@@ -1179,7 +1179,7 @@ gimple_seq_copy (gimple_seq src)
/* Walk all the statements in the sequence SEQ calling walk_gimple_stmt
on each one. WI is as in walk_gimple_stmt.
-
+
If walk_gimple_stmt returns non-NULL, the walk is stopped, the
value is stored in WI->CALLBACK_RESULT and the statement that
produced the value is returned.
@@ -1789,7 +1789,7 @@ gimple_assign_single_p (gimple gs)
assignment. I suspect there may be cases where gimple_assign_copy_p,
gimple_assign_single_p, or equivalent logic is used where a similar
treatment of unary NOPs is appropriate. */
-
+
bool
gimple_assign_unary_nop_p (gimple gs)
{
@@ -2902,7 +2902,7 @@ get_base_address (tree t)
{
while (handled_component_p (t))
t = TREE_OPERAND (t, 0);
-
+
if (SSA_VAR_P (t)
|| TREE_CODE (t) == STRING_CST
|| TREE_CODE (t) == CONSTRUCTOR
diff --git a/gcc/gimple.def b/gcc/gimple.def
index d736dd719cb..7a1503c9270 100644
--- a/gcc/gimple.def
+++ b/gcc/gimple.def
@@ -32,7 +32,7 @@ DEFGSCODE(GIMPLE_ERROR_MARK, "gimple_error_mark", GSS_BASE)
/* GIMPLE_COND <COND_CODE, OP1, OP2, TRUE_LABEL, FALSE_LABEL>
represents the conditional jump:
-
+
if (OP1 COND_CODE OP2) goto TRUE_LABEL else goto FALSE_LABEL
COND_CODE is the tree code used as the comparison predicate. It
@@ -74,7 +74,7 @@ DEFGSCODE(GIMPLE_LABEL, "gimple_label", GSS_WITH_OPS)
DEFGSCODE(GIMPLE_SWITCH, "gimple_switch", GSS_WITH_OPS)
/* IMPORTANT.
-
+
Do not rearrange the codes between GIMPLE_ASSIGN and GIMPLE_RETURN.
It's exposed by GIMPLE_RANGE_CHECK calls. These are all the GIMPLE
statements with memory and register operands. */
@@ -132,7 +132,7 @@ DEFGSCODE(GIMPLE_RETURN, "gimple_return", GSS_WITH_MEM_OPS)
/* GIMPLE_BIND <VARS, BLOCK, BODY> represents a lexical scope.
VARS is the set of variables declared in that scope.
- BLOCK is the symbol binding block used for debug information.
+ BLOCK is the symbol binding block used for debug information.
BODY is the sequence of statements in the scope. */
DEFGSCODE(GIMPLE_BIND, "gimple_bind", GSS_BIND)
@@ -190,7 +190,7 @@ DEFGSCODE(GIMPLE_NOP, "gimple_nop", GSS_BASE)
/* IMPORTANT.
-
+
Do not rearrange any of the GIMPLE_OMP_* codes. This ordering is
exposed by the range check in gimple_omp_subcode(). */
@@ -345,6 +345,6 @@ DEFGSCODE(GIMPLE_PREDICT, "gimple_predict", GSS_BASE)
/* This node represents a cleanup expression. It is ONLY USED INTERNALLY
by the gimplifier as a placeholder for cleanups, and its uses will be
cleaned up by the time gimplification is done.
-
+
This tuple should not exist outside of the gimplifier proper. */
DEFGSCODE(GIMPLE_WITH_CLEANUP_EXPR, "gimple_with_cleanup_expr", GSS_WCE)
diff --git a/gcc/gimple.h b/gcc/gimple.h
index 9a0160c861a..7872d9e30e5 100644
--- a/gcc/gimple.h
+++ b/gcc/gimple.h
@@ -584,13 +584,13 @@ struct GTY(()) gimple_omp_for_iter {
/* Index variable. */
tree index;
-
+
/* Initial value. */
tree initial;
/* Final value. */
tree final;
-
+
/* Increment. */
tree incr;
};
@@ -700,7 +700,7 @@ struct GTY(()) gimple_statement_omp_single {
};
-/* GIMPLE_OMP_ATOMIC_LOAD.
+/* GIMPLE_OMP_ATOMIC_LOAD.
Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp
contains a sequence, which we don't need here. */
@@ -986,7 +986,7 @@ struct gimplify_ctx
gimple_seq conditional_cleanups;
tree exit_label;
tree return_temp;
-
+
VEC(tree,heap) *case_labels;
/* The formal temporary table. Should this be persistent? */
htab_t temp_htab;
@@ -1102,7 +1102,7 @@ gimple_has_substatements (gimple g)
return false;
}
}
-
+
/* Return the basic block holding statement G. */
@@ -2493,7 +2493,7 @@ gimple_goto_dest (const_gimple gs)
/* Set DEST to be the destination of the unconditonal jump GS. */
-static inline void
+static inline void
gimple_goto_set_dest (gimple gs, tree dest)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
@@ -3368,7 +3368,7 @@ gimple_debug_bind_has_value_p (gimple dbg)
/* Return the body for the OMP statement GS. */
-static inline gimple_seq
+static inline gimple_seq
gimple_omp_body (gimple gs)
{
return gs->omp.body;
@@ -4434,7 +4434,7 @@ gsi_start_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq seq;
-
+
seq = bb_seq (bb);
i.ptr = gimple_seq_first (seq);
i.seq = seq;
@@ -4585,7 +4585,7 @@ gsi_last_nondebug_bb (basic_block bb)
}
/* Return a pointer to the current stmt.
-
+
NOTE: You may want to use gsi_replace on the iterator itself,
as this performs additional bookkeeping that will not be done
if you simply assign through a pointer returned by gsi_stmt_ptr. */
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 8404e0fd13a..a3c6834beff 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -803,7 +803,7 @@ gimple_set_do_not_emit_location (gimple g)
static void
annotate_one_with_location (gimple gs, location_t location)
{
- if (!gimple_has_location (gs)
+ if (!gimple_has_location (gs)
&& !gimple_do_not_emit_location_p (gs)
&& should_carry_location_p (gs))
gimple_set_location (gs, location);
@@ -1445,12 +1445,12 @@ gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p)
tree default_case = NULL_TREE;
size_t i, len;
gimple gimple_switch;
-
+
/* If someone can be bothered to fill in the labels, they can
be bothered to null out the body too. */
gcc_assert (!SWITCH_LABELS (switch_expr));
- /* save old labels, get new ones from body, then restore the old
+ /* save old labels, get new ones from body, then restore the old
labels. Save all the things from the switch body to append after. */
saved_labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = VEC_alloc (tree, heap, 8);
@@ -1458,7 +1458,7 @@ gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p)
gimplify_stmt (&SWITCH_BODY (switch_expr), &switch_body_seq);
labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = saved_labels;
-
+
i = 0;
while (i < VEC_length (tree, labels))
{
@@ -1547,7 +1547,7 @@ gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p)
}
}
- gimple_switch = gimple_build_switch_vec (SWITCH_COND (switch_expr),
+ gimple_switch = gimple_build_switch_vec (SWITCH_COND (switch_expr),
default_case, labels);
gimplify_seq_add_stmt (pre_p, gimple_switch);
gimplify_seq_add_seq (pre_p, switch_body_seq);
@@ -1793,7 +1793,7 @@ gimplify_conversion (tree *expr_p)
/* Nonlocal VLAs seen in the current function. */
static struct pointer_set_t *nonlocal_vlas;
-/* Gimplify a VAR_DECL or PARM_DECL. Returns GS_OK if we expanded a
+/* Gimplify a VAR_DECL or PARM_DECL. Returns GS_OK if we expanded a
DECL_VALUE_EXPR, and it's worth re-examining things. */
static enum gimplify_status
@@ -1913,7 +1913,7 @@ gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
goto restart;
else
break;
-
+
VEC_safe_push (tree, heap, stack, *p);
}
@@ -2272,7 +2272,7 @@ gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
*expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p));
return GS_OK;
}
-
+
if (fold_builtin_next_arg (*expr_p, true))
{
*expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p));
@@ -3125,7 +3125,7 @@ gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value,
/* Assert our assumptions, to abort instead of producing wrong code
silently if they are not met. Beware that the RHS CONSTRUCTOR might
not be immediately exposed. */
- from = TREE_OPERAND (*expr_p, 1);
+ from = TREE_OPERAND (*expr_p, 1);
if (TREE_CODE (from) == WITH_SIZE_EXPR)
from = TREE_OPERAND (from, 0);
@@ -3358,18 +3358,18 @@ gimplify_init_ctor_eval_range (tree object, tree lower, tree upper,
}
/* Return true if FDECL is accessing a field that is zero sized. */
-
+
static bool
zero_sized_field_decl (const_tree fdecl)
{
- if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl)
+ if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl)
&& integer_zerop (DECL_SIZE (fdecl)))
return true;
return false;
}
/* Return true if TYPE is zero sized. */
-
+
static bool
zero_sized_type (const_tree type)
{
@@ -3616,7 +3616,7 @@ gimplify_init_constructor (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
return GS_OK;
break;
}
-
+
/* Fetch information about the constructor to direct later processing.
We might want to make static versions of it in various cases, and
can only do so if it known to be a valid constant initializer. */
@@ -4025,7 +4025,7 @@ gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p,
break;
case INDIRECT_REF:
{
- /* If we have code like
+ /* If we have code like
*(const A*)(A*)&x
@@ -4196,7 +4196,7 @@ gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p,
*expr_p = wrap;
return GS_OK;
}
-
+
case COMPOUND_LITERAL_EXPR:
{
tree complit = TREE_OPERAND (*expr_p, 1);
@@ -4717,7 +4717,7 @@ gimplify_asm_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
VEC(tree, gc) *clobbers;
VEC(tree, gc) *labels;
tree link_next;
-
+
expr = *expr_p;
noutputs = list_length (ASM_OUTPUTS (expr));
oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *));
@@ -4902,7 +4902,7 @@ gimplify_asm_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
TREE_CHAIN (link) = NULL_TREE;
VEC_safe_push (tree, gc, inputs, link);
}
-
+
for (link = ASM_CLOBBERS (expr); link; ++i, link = TREE_CHAIN (link))
VEC_safe_push (tree, gc, clobbers, link);
@@ -5277,7 +5277,7 @@ omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags)
}
/* When adding a variable-sized variable, we have to handle all sorts
- of additional bits of data: the pointer replacement variable, and
+ of additional bits of data: the pointer replacement variable, and
the parameters of the type. */
if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
@@ -5309,7 +5309,7 @@ omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags)
flags = GOVD_PRIVATE | GOVD_DEBUG_PRIVATE
| (flags & (GOVD_SEEN | GOVD_EXPLICIT));
- /* We're going to make use of the TYPE_SIZE_UNIT at least in the
+ /* We're going to make use of the TYPE_SIZE_UNIT at least in the
alloca statement we generate for the variable, so make sure it
is available. This isn't automatically needed for the SHARED
case, since we won't be allocating local storage then.
@@ -5628,7 +5628,7 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
push_gimplify_context (&gctx);
gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
- pop_gimplify_context
+ pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c)));
OMP_CLAUSE_REDUCTION_INIT (c) = NULL_TREE;
OMP_CLAUSE_REDUCTION_MERGE (c) = NULL_TREE;
@@ -5825,7 +5825,7 @@ gimplify_adjust_omp_clauses (tree *list_p)
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
= (n->value & GOVD_FIRSTPRIVATE) != 0;
break;
-
+
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
@@ -5851,7 +5851,7 @@ gimplify_adjust_omp_clauses (tree *list_p)
/* Add in any implicit data sharing. */
splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, list_p);
-
+
gimplify_omp_ctxp = ctx->outer_context;
delete_omp_context (ctx);
}
@@ -5975,7 +5975,7 @@ gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
{
var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
TREE_OPERAND (t, 0) = var;
-
+
gimplify_seq_add_stmt (&for_body, gimple_build_assign (decl, var));
omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN);
@@ -6125,14 +6125,14 @@ gimplify_omp_workshare (tree *expr_p, gimple_seq *pre_p)
}
/* A subroutine of gimplify_omp_atomic. The front end is supposed to have
- stabilized the lhs of the atomic operation as *ADDR. Return true if
+ stabilized the lhs of the atomic operation as *ADDR. Return true if
EXPR is this stabilized form. */
static bool
goa_lhs_expr_p (tree expr, tree addr)
{
/* Also include casts to other type variants. The C front end is fond
- of adding these for e.g. volatile variables. This is like
+ of adding these for e.g. volatile variables. This is like
STRIP_TYPE_NOPS but includes the main variant lookup. */
STRIP_USELESS_TYPE_CONVERSION (expr);
@@ -6178,7 +6178,7 @@ goa_stabilize_expr (tree *expr_p, gimple_seq *pre_p, tree lhs_addr,
}
if (is_gimple_val (expr))
return 0;
-
+
saw_lhs = 0;
switch (TREE_CODE_CLASS (TREE_CODE (expr)))
{
@@ -7458,7 +7458,7 @@ gimplify_body (tree *body_p, tree fndecl, bool do_parms)
/* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL
node for the function we want to gimplify.
-
+
Returns the sequence of GIMPLE statements corresponding to the body
of FNDECL. */
diff --git a/gcc/graphds.c b/gcc/graphds.c
index 7dcb04c139b..4ee71dff904 100644
--- a/gcc/graphds.c
+++ b/gcc/graphds.c
@@ -266,7 +266,7 @@ graphds_dfs (struct graph *g, int *qs, int nq, VEC (int, heap) **qt,
numbers assigned by the previous pass. If SUBGRAPH is not NULL, it
specifies the subgraph of G whose strongly connected components we want
to determine.
-
+
After running this function, v->component is the number of the strongly
connected component for each vertex of G. Returns the number of the
sccs of G. */
@@ -409,7 +409,7 @@ graphds_domtree (struct graph *g, int entry,
/* We use a slight modification of the standard iterative algorithm, as
described in
-
+
K. D. Cooper, T. J. Harvey and K. Kennedy: A Simple, Fast Dominance
Algorithm
diff --git a/gcc/graphite-clast-to-gimple.c b/gcc/graphite-clast-to-gimple.c
index 2df86fa2ac9..3f3bb3bb434 100644
--- a/gcc/graphite-clast-to-gimple.c
+++ b/gcc/graphite-clast-to-gimple.c
@@ -789,7 +789,7 @@ compute_cloog_iv_types_1 (poly_bb_p pbb, struct clast_user_stmt *user_stmt)
{
PTR *slot;
struct ivtype_map_elt_s tmp;
- struct clast_expr *expr = (struct clast_expr *)
+ struct clast_expr *expr = (struct clast_expr *)
((struct clast_assignment *)t)->RHS;
/* Create an entry (clast_var, type). */
diff --git a/gcc/gthr-nks.h b/gcc/gthr-nks.h
index 5c2ad1d4677..311c6904b1a 100644
--- a/gcc/gthr-nks.h
+++ b/gcc/gthr-nks.h
@@ -100,7 +100,7 @@ __gthread_objc_thread_detach (void (*func)(void *), void *arg)
NXContextFree (context);
thread_id = NULL;
}
-
+
return thread_id;
}
diff --git a/gcc/gthr-posix.c b/gcc/gthr-posix.c
index 429c3f463a6..1987ba738c2 100644
--- a/gcc/gthr-posix.c
+++ b/gcc/gthr-posix.c
@@ -72,8 +72,8 @@ pthread_create (pthread_t *thread ATTRIBUTE_UNUSED,
return 0;
}
-int
-pthread_join (pthread_t thread ATTRIBUTE_UNUSED,
+int
+pthread_join (pthread_t thread ATTRIBUTE_UNUSED,
void **value_ptr ATTRIBUTE_UNUSED)
{
return 0;
@@ -84,7 +84,7 @@ pthread_exit (void *value_ptr ATTRIBUTE_UNUSED)
{
}
-int
+int
pthread_detach (pthread_t thread ATTRIBUTE_UNUSED)
{
return 0;
@@ -178,7 +178,7 @@ pthread_cond_wait (pthread_cond_t *cond ATTRIBUTE_UNUSED,
int
pthread_cond_timedwait (pthread_cond_t *cond ATTRIBUTE_UNUSED,
- pthread_mutex_t *mutex ATTRIBUTE_UNUSED,
+ pthread_mutex_t *mutex ATTRIBUTE_UNUSED,
const struct timespec *abstime ATTRIBUTE_UNUSED)
{
return 0;
diff --git a/gcc/gthr-posix.h b/gcc/gthr-posix.h
index 82a3c581c0a..61b4dda4271 100644
--- a/gcc/gthr-posix.h
+++ b/gcc/gthr-posix.h
@@ -51,7 +51,7 @@ typedef struct timespec __gthread_time_t;
/* POSIX like conditional variables are supported. Please look at comments
in gthr.h for details. */
-#define __GTHREAD_HAS_COND 1
+#define __GTHREAD_HAS_COND 1
#define __GTHREAD_MUTEX_INIT PTHREAD_MUTEX_INITIALIZER
#define __GTHREAD_ONCE_INIT PTHREAD_ONCE_INIT
@@ -237,7 +237,7 @@ __gthread_active_p (void)
static inline int
__gthread_active_p (void)
{
- static void *const __gthread_active_ptr
+ static void *const __gthread_active_ptr
= __extension__ (void *) &__gthrw_(pthread_cancel);
return __gthread_active_ptr != 0;
}
diff --git a/gcc/gthr-posix95.h b/gcc/gthr-posix95.h
index 8772c977a28..55e254eb21f 100644
--- a/gcc/gthr-posix95.h
+++ b/gcc/gthr-posix95.h
@@ -171,7 +171,7 @@ __gthread_active_p (void)
static inline int
__gthread_active_p (void)
{
- static void *const __gthread_active_ptr
+ static void *const __gthread_active_ptr
= __extension__ (void *) &__gthrw_(pthread_cancel);
return __gthread_active_ptr != 0;
}
diff --git a/gcc/gthr-single.h b/gcc/gthr-single.h
index e267ba640ad..357528ad1f1 100644
--- a/gcc/gthr-single.h
+++ b/gcc/gthr-single.h
@@ -213,12 +213,12 @@ __gthread_active_p (void)
return 0;
}
-static inline int
+static inline int
__gthread_once (__gthread_once_t *__once UNUSED, void (*__func) (void) UNUSED)
{
return 0;
}
-
+
static inline int UNUSED
__gthread_key_create (__gthread_key_t *__key UNUSED, void (*__func) (void *) UNUSED)
{
@@ -230,14 +230,14 @@ __gthread_key_delete (__gthread_key_t __key UNUSED)
{
return 0;
}
-
+
static inline void *
__gthread_getspecific (__gthread_key_t __key UNUSED)
{
return 0;
}
-static inline int
+static inline int
__gthread_setspecific (__gthread_key_t __key UNUSED, const void *__v UNUSED)
{
return 0;
diff --git a/gcc/gthr-tpf.h b/gcc/gthr-tpf.h
index 4bc7e63642f..fb23e91cfcd 100644
--- a/gcc/gthr-tpf.h
+++ b/gcc/gthr-tpf.h
@@ -23,7 +23,7 @@ a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
-/* TPF needs its own version of gthr-*.h because TPF always links to
+/* TPF needs its own version of gthr-*.h because TPF always links to
the thread library. However, for performance reasons we still do not
want to issue thread api calls unless a check is made to see that we
are running as a thread. */
@@ -206,7 +206,7 @@ __gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *__mutex)
static inline int
__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *__mutex)
-{
+{
if (__tpf_pthread_active ())
{
pthread_mutexattr_t __attr;
diff --git a/gcc/gthr-vxworks.h b/gcc/gthr-vxworks.h
index dee15731f0a..d4da14ef492 100644
--- a/gcc/gthr-vxworks.h
+++ b/gcc/gthr-vxworks.h
@@ -137,7 +137,7 @@ __gthread_once_t;
#if defined (__RTP__)
# define __GTHREAD_ONCE_INIT { 0 }
-#elif defined (__PPC__)
+#elif defined (__PPC__)
# define __GTHREAD_ONCE_INIT { 0, 0, 0, 0 }
#else
# define __GTHREAD_ONCE_INIT { 0, 0 }
diff --git a/gcc/gthr.h b/gcc/gthr.h
index cd3cc23c615..6edfbcd69c3 100644
--- a/gcc/gthr.h
+++ b/gcc/gthr.h
@@ -101,16 +101,16 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
All functions returning int should return zero on success or the error
number. If the operation is not supported, -1 is returned.
- If the following are also defined, you should
+ If the following are also defined, you should
#define __GTHREADS_CXX0X 1
- to enable the c++0x thread library.
-
+ to enable the c++0x thread library.
+
Types:
__gthread_t
__gthread_time_t
Interface:
- int __gthread_create (__gthread_t *thread, void *(*func) (void*),
+ int __gthread_create (__gthread_t *thread, void *(*func) (void*),
void *args);
int __gthread_join (__gthread_t thread, void **value_ptr);
int __gthread_detach (__gthread_t thread);
@@ -122,9 +122,9 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
const __gthread_time_t *abs_timeout);
int __gthread_recursive_mutex_timedlock (__gthread_recursive_mutex_t *m,
const __gthread_time_t *abs_time);
-
+
int __gthread_cond_signal (__gthread_cond_t *cond);
- int __gthread_cond_timedwait (__gthread_cond_t *cond,
+ int __gthread_cond_timedwait (__gthread_cond_t *cond,
__gthread_mutex_t *mutex,
const __gthread_time_t *abs_timeout);
int __gthread_cond_timedwait_recursive (__gthread_cond_t *cond,
@@ -137,7 +137,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
POSIX/Unix95 threads with -D_PTHREADS95
DCE threads with -D_DCE_THREADS
Solaris/UI threads with -D_SOLARIS_THREADS
-
+
*/
/* Check first for thread specific defines. */
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index 1f05a7231f6..204fab6aff9 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -185,7 +185,7 @@ fix_sched_param (const char *param, const char *val)
warning (0, "fix_sched_param: unknown param: %s", param);
}
-/* This is a placeholder for the scheduler parameters common
+/* This is a placeholder for the scheduler parameters common
to all schedulers. */
struct common_sched_info_def *common_sched_info;
@@ -295,7 +295,7 @@ static int q_size = 0;
queue or ready list.
QUEUE_READY - INSN is in ready list.
N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
-
+
#define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
/* The following variable value refers for all current and future
@@ -336,7 +336,7 @@ static int may_trap_exp (const_rtx, int);
static int haifa_luid_for_non_insn (rtx x);
/* Haifa version of sched_info hooks common to all headers. */
-const struct common_sched_info_def haifa_common_sched_info =
+const struct common_sched_info_def haifa_common_sched_info =
{
NULL, /* fix_recovery_cfg */
NULL, /* add_block */
@@ -726,7 +726,7 @@ initiate_bb_reg_pressure_info (basic_block bb)
for (i = 0; ; ++i)
{
unsigned int regno = EH_RETURN_DATA_REGNO (i);
-
+
if (regno == INVALID_REGNUM)
break;
if (! bitmap_bit_p (df_get_live_in (bb), regno))
@@ -740,7 +740,7 @@ static void
save_reg_pressure (void)
{
int i;
-
+
for (i = 0; i < ira_reg_class_cover_size; i++)
saved_reg_pressure[ira_reg_class_cover[i]]
= curr_reg_pressure[ira_reg_class_cover[i]];
@@ -752,7 +752,7 @@ static void
restore_reg_pressure (void)
{
int i;
-
+
for (i = 0; i < ira_reg_class_cover_size; i++)
curr_reg_pressure[ira_reg_class_cover[i]]
= saved_reg_pressure[ira_reg_class_cover[i]];
@@ -887,7 +887,7 @@ dep_cost_1 (dep_t link, dw_t dw)
else if (bypass_p (insn))
cost = insn_latency (insn, used);
}
-
+
if (targetm.sched.adjust_cost_2)
cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
@@ -940,7 +940,7 @@ increase_insn_priority (rtx insn, int amount)
}
else
{
- /* In sel-sched.c INSN_PRIORITY is not kept up to date.
+ /* In sel-sched.c INSN_PRIORITY is not kept up to date.
Use EXPR_PRIORITY instead. */
sel_add_to_insn_priority (insn, amount);
}
@@ -1027,7 +1027,7 @@ priority (rtx insn)
different than that of normal instructions. Instead of walking
through INSN_FORW_DEPS (check) list, we walk through
INSN_FORW_DEPS list of each instruction in the corresponding
- recovery block. */
+ recovery block. */
/* Selective scheduling does not define RECOVERY_BLOCK macro. */
rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
@@ -1078,7 +1078,7 @@ priority (rtx insn)
this_priority = next_priority;
}
}
-
+
twin = PREV_INSN (twin);
}
while (twin != prev_first);
@@ -1178,7 +1178,7 @@ rank_for_schedule (const void *x, const void *y)
}
/* The insn in a schedule group should be issued the first. */
- if (flag_sched_group_heuristic &&
+ if (flag_sched_group_heuristic &&
SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
return SCHED_GROUP_P (tmp2) ? 1 : -1;
@@ -1216,7 +1216,7 @@ rank_for_schedule (const void *x, const void *y)
if (flag_sched_critical_path_heuristic && priority_val)
return priority_val;
-
+
/* Prefer speculative insn with greater dependencies weakness. */
if (flag_sched_spec_insn_heuristic && spec_info)
{
@@ -1229,7 +1229,7 @@ rank_for_schedule (const void *x, const void *y)
dw1 = ds_weak (ds1);
else
dw1 = NO_DEP_WEAK;
-
+
ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
if (ds2)
dw2 = ds_weak (ds2);
@@ -1416,7 +1416,7 @@ HAIFA_INLINE static rtx
ready_remove_first (struct ready_list *ready)
{
rtx t;
-
+
gcc_assert (ready->n_ready);
t = ready->vec[ready->first--];
ready->n_ready--;
@@ -1444,7 +1444,7 @@ rtx
ready_element (struct ready_list *ready, int index)
{
gcc_assert (ready->n_ready && index < ready->n_ready);
-
+
return ready->vec[ready->first - index];
}
@@ -1534,7 +1534,7 @@ advance_state (state_t state)
targetm.sched.dfa_pre_cycle_insn ());
state_transition (state, NULL);
-
+
if (targetm.sched.dfa_post_cycle_insn)
state_transition (state,
targetm.sched.dfa_post_cycle_insn ());
@@ -1728,7 +1728,7 @@ schedule_insn (rtx insn)
if (INSN_TICK (insn) > clock_var)
/* INSN has been prematurely moved from the queue to the ready list.
This is possible only if following flag is set. */
- gcc_assert (flag_sched_stalled_insns);
+ gcc_assert (flag_sched_stalled_insns);
/* ??? Probably, if INSN is scheduled prematurely, we should leave
INSN_TICK untouched. This is a machine-dependent issue, actually. */
@@ -1753,10 +1753,10 @@ schedule_insn (rtx insn)
if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
{
- int effective_cost;
-
+ int effective_cost;
+
effective_cost = try_ready (next);
-
+
if (effective_cost >= 0
&& SCHED_GROUP_P (next)
&& advance < effective_cost)
@@ -1804,7 +1804,7 @@ schedule_insn (rtx insn)
/* Functions for handling of notes. */
/* Insert the INSN note at the end of the notes list. */
-static void
+static void
add_to_note_list (rtx insn, rtx *note_list_end_p)
{
PREV_INSN (insn) = *note_list_end_p;
@@ -1832,7 +1832,7 @@ concat_note_lists (rtx from_end, rtx *to_endp)
from_start = from_end;
/* A note list should be traversed via PREV_INSN. */
- while (PREV_INSN (from_start) != NULL)
+ while (PREV_INSN (from_start) != NULL)
from_start = PREV_INSN (from_start);
add_to_note_list (from_start, to_endp);
@@ -2123,17 +2123,17 @@ queue_to_ready (struct ready_list *ready)
}
/* Used by early_queue_to_ready. Determines whether it is "ok" to
- prematurely move INSN from the queue to the ready list. Currently,
- if a target defines the hook 'is_costly_dependence', this function
+ prematurely move INSN from the queue to the ready list. Currently,
+ if a target defines the hook 'is_costly_dependence', this function
uses the hook to check whether there exist any dependences which are
- considered costly by the target, between INSN and other insns that
+ considered costly by the target, between INSN and other insns that
have already been scheduled. Dependences are checked up to Y cycles
back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
- controlling this value.
- (Other considerations could be taken into account instead (or in
+ controlling this value.
+ (Other considerations could be taken into account instead (or in
addition) depending on user flags and target hooks. */
-static bool
+static bool
ok_for_early_queue_removal (rtx insn)
{
int n_cycles;
@@ -2173,9 +2173,9 @@ ok_for_early_queue_removal (rtx insn)
break;
}
- if (!prev_insn)
+ if (!prev_insn)
break;
- prev_insn = PREV_INSN (prev_insn);
+ prev_insn = PREV_INSN (prev_insn);
}
}
@@ -2186,7 +2186,7 @@ ok_for_early_queue_removal (rtx insn)
/* Remove insns from the queue, before they become "ready" with respect
to FU latency considerations. */
-static int
+static int
early_queue_to_ready (state_t state, struct ready_list *ready)
{
rtx insn;
@@ -2200,20 +2200,20 @@ early_queue_to_ready (state_t state, struct ready_list *ready)
int insns_removed = 0;
/*
- Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
- function:
+ Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
+ function:
- X == 0: There is no limit on how many queued insns can be removed
+ X == 0: There is no limit on how many queued insns can be removed
prematurely. (flag_sched_stalled_insns = -1).
- X >= 1: Only X queued insns can be removed prematurely in each
+ X >= 1: Only X queued insns can be removed prematurely in each
invocation. (flag_sched_stalled_insns = X).
Otherwise: Early queue removal is disabled.
(flag_sched_stalled_insns = 0)
*/
- if (! flag_sched_stalled_insns)
+ if (! flag_sched_stalled_insns)
return 0;
for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
@@ -2232,7 +2232,7 @@ early_queue_to_ready (state_t state, struct ready_list *ready)
print_rtl_single (sched_dump, insn);
memcpy (temp_state, state, dfa_state_size);
- if (recog_memoized (insn) < 0)
+ if (recog_memoized (insn) < 0)
/* non-negative to indicate that it's not ready
to avoid infinite Q->R->Q->R... */
cost = 0;
@@ -2243,7 +2243,7 @@ early_queue_to_ready (state_t state, struct ready_list *ready)
fprintf (sched_dump, "transition cost = %d\n", cost);
move_to_ready = false;
- if (cost < 0)
+ if (cost < 0)
{
move_to_ready = ok_for_early_queue_removal (insn);
if (move_to_ready == true)
@@ -2252,7 +2252,7 @@ early_queue_to_ready (state_t state, struct ready_list *ready)
q_size -= 1;
ready_add (ready, insn, false);
- if (prev_link)
+ if (prev_link)
XEXP (prev_link, 1) = next_link;
else
insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
@@ -2276,11 +2276,11 @@ early_queue_to_ready (state_t state, struct ready_list *ready)
link = next_link;
} /* while link */
- } /* if link */
+ } /* if link */
} /* for stalls.. */
- return insns_removed;
+ return insns_removed;
}
@@ -2348,9 +2348,9 @@ move_insn (rtx insn, rtx last, rtx nt)
int jump_p = 0;
bb = BLOCK_FOR_INSN (insn);
-
+
/* BB_HEAD is either LABEL or NOTE. */
- gcc_assert (BB_HEAD (bb) != insn);
+ gcc_assert (BB_HEAD (bb) != insn);
if (BB_END (bb) == insn)
/* If this is last instruction in BB, move end marker one
@@ -2364,7 +2364,7 @@ move_insn (rtx insn, rtx last, rtx nt)
&& IS_SPECULATION_BRANCHY_CHECK_P (insn))
|| (common_sched_info->sched_pass_id
== SCHED_EBB_PASS));
-
+
gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
BB_END (bb) = PREV_INSN (insn);
@@ -2385,7 +2385,7 @@ move_insn (rtx insn, rtx last, rtx nt)
&& (LABEL_P (note)
|| BARRIER_P (note)))
note = NEXT_INSN (note);
-
+
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
}
else
@@ -2413,13 +2413,13 @@ move_insn (rtx insn, rtx last, rtx nt)
}
df_insn_change_bb (insn, bb);
-
+
/* Update BB_END, if needed. */
if (BB_END (bb) == last)
- BB_END (bb) = insn;
+ BB_END (bb) = insn;
}
- SCHED_GROUP_P (insn) = 0;
+ SCHED_GROUP_P (insn) = 0;
}
/* Return true if scheduling INSN will finish current clock cycle. */
@@ -2528,8 +2528,8 @@ max_issue (struct ready_list *ready, int privileged_n, state_t state,
/* ??? We used to assert here that we never issue more insns than issue_rate.
However, some targets (e.g. MIPS/SB1) claim lower issue rate than can be
achieved to get better performance. Until these targets are fixed to use
- scheduler hooks to manipulate insns priority instead, the assert should
- be disabled.
+ scheduler hooks to manipulate insns priority instead, the assert should
+ be disabled.
gcc_assert (more_issue >= 0); */
@@ -2646,7 +2646,7 @@ max_issue (struct ready_list *ready, int privileged_n, state_t state,
}
/* Restore the original state of the DFA. */
- memcpy (state, choice_stack->state, dfa_state_size);
+ memcpy (state, choice_stack->state, dfa_state_size);
return best;
}
@@ -2698,7 +2698,7 @@ choose_ready (struct ready_list *ready, rtx *insn_ptr)
rtx insn;
int try_data = 1, try_control = 1;
ds_t ts;
-
+
insn = ready_element (ready, 0);
if (INSN_CODE (insn) < 0)
{
@@ -2717,16 +2717,16 @@ choose_ready (struct ready_list *ready, rtx *insn_ptr)
x = ready_element (ready, i);
s = TODO_SPEC (x);
-
+
if (spec_info->flags & PREFER_NON_DATA_SPEC
&& !(s & DATA_SPEC))
- {
+ {
try_data = 0;
if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
|| !try_control)
break;
}
-
+
if (spec_info->flags & PREFER_NON_CONTROL_SPEC
&& !(s & CONTROL_SPEC))
{
@@ -2790,17 +2790,17 @@ choose_ready (struct ready_list *ready, rtx *insn_ptr)
{
*insn_ptr = ready_remove_first (ready);
if (sched_verbose >= 4)
- fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
+ fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
(*current_sched_info->print_insn) (*insn_ptr, 0));
return 0;
}
else
{
- if (sched_verbose >= 4)
+ if (sched_verbose >= 4)
fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
(*current_sched_info->print_insn)
(ready_element (ready, index), 0));
-
+
*insn_ptr = ready_remove (ready, index);
return 0;
}
@@ -2871,7 +2871,7 @@ schedule_block (basic_block *target_bb)
/* Start just before the beginning of time. */
clock_var = -1;
- /* We need queue and ready lists and clock_var be initialized
+ /* We need queue and ready lists and clock_var be initialized
in try_ready () (which is called through init_ready_list ()). */
(*current_sched_info->init_ready_list) ();
@@ -3037,9 +3037,9 @@ schedule_block (basic_block *target_bb)
print_curr_reg_pressure ();
}
- if (ready.n_ready == 0
- && can_issue_more
- && reload_completed)
+ if (ready.n_ready == 0
+ && can_issue_more
+ && reload_completed)
{
/* Allow scheduling insns directly from the queue in case
there's nothing better to do (ready list is empty) but
@@ -3095,10 +3095,10 @@ schedule_block (basic_block *target_bb)
to have the highest priority (so it will be returned by
the ready_remove_first call above), we invoke
ready_add (&ready, insn, true).
- But, still, there is one issue: INSN can be later
- discarded by scheduler's front end through
+ But, still, there is one issue: INSN can be later
+ discarded by scheduler's front end through
current_sched_info->can_schedule_ready_p, hence, won't
- be issued next. */
+ be issued next. */
{
ready_add (&ready, insn, true);
break;
@@ -3140,7 +3140,7 @@ schedule_block (basic_block *target_bb)
advance = cost;
break;
}
-
+
continue;
}
@@ -3153,12 +3153,12 @@ schedule_block (basic_block *target_bb)
continue;
}
- /* DECISION is made. */
-
+ /* DECISION is made. */
+
if (TODO_SPEC (insn) & SPECULATIVE)
generate_recovery_code (insn);
- if (control_flow_insn_p (last_scheduled_insn)
+ if (control_flow_insn_p (last_scheduled_insn)
/* This is used to switch basic blocks by request
from scheduler front-end (actually, sched-ebb.c only).
This is used to process blocks with single fallthru
@@ -3168,7 +3168,7 @@ schedule_block (basic_block *target_bb)
{
*target_bb = current_sched_info->advance_target_bb
(*target_bb, 0);
-
+
if (sched_verbose)
{
rtx x;
@@ -3180,15 +3180,15 @@ schedule_block (basic_block *target_bb)
last_scheduled_insn = bb_note (*target_bb);
}
-
+
/* Update counters, etc in the scheduler's front end. */
(*current_sched_info->begin_schedule_ready) (insn,
last_scheduled_insn);
-
+
move_insn (insn, last_scheduled_insn, current_sched_info->next_tail);
reemit_notes (insn);
last_scheduled_insn = insn;
-
+
if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
{
cycle_issued_insns++;
@@ -3282,19 +3282,19 @@ schedule_block (basic_block *target_bb)
/* Sanity check -- queue must be empty now. Meaningless if region has
multiple bbs. */
gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
- else
+ else
{
/* We must maintain QUEUE_INDEX between blocks in region. */
for (i = ready.n_ready - 1; i >= 0; i--)
{
rtx x;
-
+
x = ready_element (&ready, i);
QUEUE_INDEX (x) = QUEUE_NOWHERE;
TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
}
- if (q_size)
+ if (q_size)
for (i = 0; i <= max_insn_queue_index; i++)
{
rtx link;
@@ -3356,7 +3356,7 @@ set_priorities (rtx head, rtx tail)
{
rtx insn;
int n_insn;
- int sched_max_insns_priority =
+ int sched_max_insns_priority =
current_sched_info->sched_max_insns_priority;
rtx prev_head;
@@ -3398,7 +3398,7 @@ setup_sched_dump (void)
? stderr : dump_file);
}
-/* Initialize some global state for the scheduler. This function works
+/* Initialize some global state for the scheduler. This function works
with the common data shared between all the schedulers. It is called
from the scheduler specific initialization routine. */
@@ -3478,8 +3478,8 @@ sched_init (void)
}
df_analyze ();
-
- /* Do not run DCE after reload, as this can kill nops inserted
+
+ /* Do not run DCE after reload, as this can kill nops inserted
by bundling. */
if (reload_completed)
df_clear_flags (DF_LR_RUN_DCE);
@@ -3506,7 +3506,7 @@ sched_init (void)
saved_reg_live = BITMAP_ALLOC (NULL);
region_ref_regs = BITMAP_ALLOC (NULL);
}
-
+
curr_state = xmalloc (dfa_state_size);
}
@@ -3597,7 +3597,7 @@ haifa_sched_finish (void)
sched_finish ();
}
-/* Free global data used during insn scheduling. This function works with
+/* Free global data used during insn scheduling. This function works with
the common data shared between the schedulers. */
void
@@ -3644,7 +3644,7 @@ fix_inter_tick (rtx head, rtx tail)
int next_clock = clock_var + 1;
bitmap_initialize (&processed, 0);
-
+
/* Iterates over scheduled instructions and fix their INSN_TICKs and
INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
across different blocks. */
@@ -3655,26 +3655,26 @@ fix_inter_tick (rtx head, rtx tail)
int tick;
sd_iterator_def sd_it;
dep_t dep;
-
+
tick = INSN_TICK (head);
gcc_assert (tick >= MIN_TICK);
-
+
/* Fix INSN_TICK of instruction from just scheduled block. */
if (!bitmap_bit_p (&processed, INSN_LUID (head)))
{
bitmap_set_bit (&processed, INSN_LUID (head));
tick -= next_clock;
-
+
if (tick < MIN_TICK)
tick = MIN_TICK;
-
- INSN_TICK (head) = tick;
+
+ INSN_TICK (head) = tick;
}
-
+
FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
{
rtx next;
-
+
next = DEP_CON (dep);
tick = INSN_TICK (next);
@@ -3686,10 +3686,10 @@ fix_inter_tick (rtx head, rtx tail)
{
bitmap_set_bit (&processed, INSN_LUID (next));
tick -= next_clock;
-
+
if (tick < MIN_TICK)
tick = MIN_TICK;
-
+
if (tick > INTER_TICK (next))
INTER_TICK (next) = tick;
else
@@ -3704,7 +3704,7 @@ fix_inter_tick (rtx head, rtx tail)
}
static int haifa_speculate_insn (rtx, ds_t, rtx *);
-
+
/* Check if NEXT is ready to be added to the ready or queue list.
If "yes", add it to the proper list.
Returns:
@@ -3713,7 +3713,7 @@ static int haifa_speculate_insn (rtx, ds_t, rtx *);
0 < N - queued for N cycles. */
int
try_ready (rtx next)
-{
+{
ds_t old_ts, *ts;
ts = &TODO_SPEC (next);
@@ -3722,7 +3722,7 @@ try_ready (rtx next)
gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
&& ((old_ts & HARD_DEP)
|| (old_ts & SPECULATIVE)));
-
+
if (sd_lists_empty_p (next, SD_LIST_BACK))
/* NEXT has all its dependencies resolved. */
{
@@ -3798,11 +3798,11 @@ try_ready (rtx next)
{
int res;
rtx new_pat;
-
+
gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
-
+
res = haifa_speculate_insn (next, *ts, &new_pat);
-
+
switch (res)
{
case -1:
@@ -3811,47 +3811,47 @@ try_ready (rtx next)
so we won't reanalyze anything. */
*ts = (*ts & ~SPECULATIVE) | HARD_DEP;
break;
-
+
case 0:
/* We follow the rule, that every speculative insn
has non-null ORIG_PAT. */
if (!ORIG_PAT (next))
ORIG_PAT (next) = PATTERN (next);
break;
-
- case 1:
+
+ case 1:
if (!ORIG_PAT (next))
/* If we gonna to overwrite the original pattern of insn,
save it. */
ORIG_PAT (next) = PATTERN (next);
-
+
haifa_change_pattern (next, new_pat);
break;
-
+
default:
gcc_unreachable ();
}
}
-
+
/* We need to restore pattern only if (*ts == 0), because otherwise it is
either correct (*ts & SPECULATIVE),
or we simply don't care (*ts & HARD_DEP). */
-
+
gcc_assert (!ORIG_PAT (next)
|| !IS_SPECULATION_BRANCHY_CHECK_P (next));
-
+
if (*ts & HARD_DEP)
{
/* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
control-speculative NEXT could have been discarded by sched-rgn.c
(the same case as when discarded by can_schedule_ready_p ()). */
/*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
-
+
change_queue_index (next, QUEUE_NOWHERE);
return -1;
}
else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
- /* We should change pattern of every previously speculative
+ /* We should change pattern of every previously speculative
instruction - and we determine if NEXT was speculative by using
ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
pat too, so skip them. */
@@ -3861,12 +3861,12 @@ try_ready (rtx next)
}
if (sched_verbose >= 2)
- {
+ {
int s = TODO_SPEC (next);
-
+
fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
(*current_sched_info->print_insn) (next, 0));
-
+
if (spec_info && spec_info->dump)
{
if (s & BEGIN_DATA)
@@ -3878,10 +3878,10 @@ try_ready (rtx next)
}
fprintf (sched_dump, "\n");
- }
-
+ }
+
adjust_priority (next);
-
+
return fix_tick_ready (next);
}
@@ -3904,10 +3904,10 @@ fix_tick_ready (rtx next)
full_p = (tick == INVALID_TICK);
FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
- {
+ {
rtx pro = DEP_PRO (dep);
int tick1;
-
+
gcc_assert (INSN_TICK (pro) >= MIN_TICK);
tick1 = INSN_TICK (pro) + dep_cost (dep);
@@ -3940,10 +3940,10 @@ change_queue_index (rtx next, int delay)
{
int i = QUEUE_INDEX (next);
- gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
+ gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
&& delay != 0);
gcc_assert (i != QUEUE_SCHEDULED);
-
+
if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
|| (delay < 0 && delay == i))
/* We have nothing to do. */
@@ -3954,18 +3954,18 @@ change_queue_index (rtx next, int delay)
ready_remove_insn (next);
else if (i >= 0)
queue_remove (next);
-
+
/* Add it to the proper place. */
if (delay == QUEUE_READY)
ready_add (readyp, next, false);
else if (delay >= 1)
queue_insn (next, delay);
-
+
if (sched_verbose >= 2)
- {
+ {
fprintf (sched_dump, ";;\t\ttick updated: insn %s",
(*current_sched_info->print_insn) (next, 0));
-
+
if (delay == QUEUE_READY)
fprintf (sched_dump, " into ready\n");
else if (delay >= 1)
@@ -4047,10 +4047,10 @@ generate_recovery_code (rtx insn)
{
if (TODO_SPEC (insn) & BEGIN_SPEC)
begin_speculative_block (insn);
-
+
/* Here we have insn with no dependencies to
instructions other then CHECK_SPEC ones. */
-
+
if (TODO_SPEC (insn) & BE_IN_SPEC)
add_to_speculative_block (insn);
}
@@ -4094,7 +4094,7 @@ process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
ds_t new_ds;
new_ds = (ds & ~BEGIN_SPEC) | fs;
-
+
if (/* consumer can 'be in speculative'. */
sched_insn_is_legitimate_for_speculation_p (consumer,
new_ds))
@@ -4121,7 +4121,7 @@ static void
begin_speculative_block (rtx insn)
{
if (TODO_SPEC (insn) & BEGIN_DATA)
- nr_begin_data++;
+ nr_begin_data++;
if (TODO_SPEC (insn) & BEGIN_CONTROL)
nr_begin_control++;
@@ -4152,7 +4152,7 @@ add_to_speculative_block (rtx insn)
TODO_SPEC (insn) &= ~BE_IN_SPEC;
gcc_assert (!TODO_SPEC (insn));
-
+
DONE_SPEC (insn) |= ts;
/* First we convert all simple checks to branchy. */
@@ -4263,7 +4263,7 @@ add_to_speculative_block (rtx insn)
twin = XEXP (twins, 1);
free_INSN_LIST_node (twins);
- twins = twin;
+ twins = twin;
}
calc_priorities (priorities_roots);
@@ -4354,16 +4354,16 @@ init_before_recovery (basic_block *before_recovery_ptr)
if (e)
{
- /* We create two basic blocks:
+ /* We create two basic blocks:
1. Single instruction block is inserted right after E->SRC
- and has jump to
+ and has jump to
2. Empty block right before EXIT_BLOCK.
Between these two blocks recovery blocks will be emitted. */
basic_block single, empty;
rtx x, label;
- /* If the fallthrough edge to exit we've found is from the block we've
+ /* If the fallthrough edge to exit we've found is from the block we've
created before, don't do anything more. */
if (last == after_recovery)
return;
@@ -4397,7 +4397,7 @@ init_before_recovery (basic_block *before_recovery_ptr)
JUMP_LABEL (x) = label;
LABEL_NUSES (label)++;
haifa_init_insn (x);
-
+
emit_barrier_after (x);
sched_init_only_bb (empty, NULL);
@@ -4413,8 +4413,8 @@ init_before_recovery (basic_block *before_recovery_ptr)
if (sched_verbose >= 2 && spec_info->dump)
fprintf (spec_info->dump,
- ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
- last->index, single->index, empty->index);
+ ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
+ last->index, single->index, empty->index);
}
else
before_recovery = last;
@@ -4427,7 +4427,7 @@ sched_create_recovery_block (basic_block *before_recovery_ptr)
rtx label;
rtx barrier;
basic_block rec;
-
+
haifa_recovery_bb_recently_added_p = true;
haifa_recovery_bb_ever_added_p = true;
@@ -4445,8 +4445,8 @@ sched_create_recovery_block (basic_block *before_recovery_ptr)
if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
BB_SET_PARTITION (rec, BB_COLD_PARTITION);
-
- if (sched_verbose && spec_info->dump)
+
+ if (sched_verbose && spec_info->dump)
fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
rec->index);
@@ -4464,13 +4464,13 @@ sched_create_recovery_edges (basic_block first_bb, basic_block rec,
int edge_flags;
/* This is fixing of incoming edge. */
- /* ??? Which other flags should be specified? */
+ /* ??? Which other flags should be specified? */
if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
/* Partition type is the same, if it is "unpartitioned". */
edge_flags = EDGE_CROSSING;
else
edge_flags = 0;
-
+
make_edge (first_bb, rec, edge_flags);
label = block_label (second_bb);
jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
@@ -4491,9 +4491,9 @@ sched_create_recovery_edges (basic_block first_bb, basic_block rec,
edge_flags = EDGE_CROSSING;
}
else
- edge_flags = 0;
+ edge_flags = 0;
- make_single_succ_edge (rec, second_bb, edge_flags);
+ make_single_succ_edge (rec, second_bb, edge_flags);
}
/* This function creates recovery code for INSN. If MUTATE_P is nonzero,
@@ -4541,7 +4541,7 @@ create_check_block_twin (rtx insn, bool mutate_p)
if (rec != EXIT_BLOCK_PTR)
{
/* To have mem_reg alive at the beginning of second_bb,
- we emit check BEFORE insn, so insn after splitting
+ we emit check BEFORE insn, so insn after splitting
insn will be at the beginning of second_bb, which will
provide us with the correct life information. */
check = emit_jump_insn_before (check, insn);
@@ -4619,14 +4619,14 @@ create_check_block_twin (rtx insn, bool mutate_p)
sched_create_recovery_edges (first_bb, rec, second_bb);
- sched_init_only_bb (second_bb, first_bb);
+ sched_init_only_bb (second_bb, first_bb);
sched_init_only_bb (rec, EXIT_BLOCK_PTR);
jump = BB_END (rec);
haifa_init_insn (jump);
}
- /* Move backward dependences from INSN to CHECK and
+ /* Move backward dependences from INSN to CHECK and
move forward dependences from INSN to TWIN. */
/* First, create dependencies between INSN's producers and CHECK & TWIN. */
@@ -4639,7 +4639,7 @@ create_check_block_twin (rtx insn, bool mutate_p)
check --TRUE--> producer ??? or ANTI ???
twin --TRUE--> producer
twin --ANTI--> check
-
+
If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
check --ANTI--> producer
twin --ANTI--> producer
@@ -4648,7 +4648,7 @@ create_check_block_twin (rtx insn, bool mutate_p)
If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
check ~~TRUE~~> producer
twin ~~TRUE~~> producer
- twin --ANTI--> check */
+ twin --ANTI--> check */
ds = DEP_STATUS (dep);
@@ -4665,7 +4665,7 @@ create_check_block_twin (rtx insn, bool mutate_p)
{
DEP_CON (new_dep) = twin;
sd_add_dep (new_dep, false);
- }
+ }
}
/* Second, remove backward dependencies of INSN. */
@@ -4686,11 +4686,11 @@ create_check_block_twin (rtx insn, bool mutate_p)
/* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
here. */
-
+
gcc_assert (!DONE_SPEC (insn));
-
+
if (!mutate_p)
- {
+ {
ds_t ts = TODO_SPEC (insn);
DONE_SPEC (insn) = ts & BEGIN_SPEC;
@@ -4726,7 +4726,7 @@ create_check_block_twin (rtx insn, bool mutate_p)
}
else
{
- if (spec_info->dump)
+ if (spec_info->dump)
fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
(*current_sched_info->print_insn) (insn, 0));
@@ -4781,7 +4781,7 @@ fix_recovery_deps (basic_block rec)
rtx link;
bitmap_initialize (&in_ready, 0);
-
+
/* NOTE - a basic block note. */
note = NEXT_INSN (BB_HEAD (rec));
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
@@ -4816,7 +4816,7 @@ fix_recovery_deps (basic_block rec)
sd_iterator_next (&sd_it);
}
}
-
+
insn = PREV_INSN (insn);
}
while (insn != note);
@@ -4831,10 +4831,10 @@ fix_recovery_deps (basic_block rec)
/* Fixing jump's dependences. */
insn = BB_HEAD (rec);
jump = BB_END (rec);
-
+
gcc_assert (LABEL_P (insn));
insn = NEXT_INSN (insn);
-
+
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
add_jump_dependencies (insn, jump);
}
@@ -4948,7 +4948,7 @@ unlink_bb_notes (basic_block first, basic_block last)
if (LABEL_P (label))
note = NEXT_INSN (label);
else
- note = label;
+ note = label;
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
prev = PREV_INSN (label);
@@ -4962,7 +4962,7 @@ unlink_bb_notes (basic_block first, basic_block last)
if (last == first)
break;
-
+
last = last->prev_bb;
}
while (1);
@@ -4977,14 +4977,14 @@ restore_bb_notes (basic_block first)
return;
/* We DON'T unlink basic block notes of the first block in the ebb. */
- first = first->next_bb;
+ first = first->next_bb;
/* Remember: FIRST is actually a second basic block in the ebb. */
while (first != EXIT_BLOCK_PTR
&& bb_header[first->index])
{
rtx prev, label, note, next;
-
+
label = bb_header[first->index];
prev = PREV_INSN (label);
next = NEXT_INSN (prev);
@@ -4992,7 +4992,7 @@ restore_bb_notes (basic_block first)
if (LABEL_P (label))
note = NEXT_INSN (label);
else
- note = label;
+ note = label;
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
bb_header[first->index] = 0;
@@ -5000,7 +5000,7 @@ restore_bb_notes (basic_block first)
NEXT_INSN (prev) = label;
NEXT_INSN (note) = next;
PREV_INSN (next) = note;
-
+
first = first->next_bb;
}
@@ -5022,7 +5022,7 @@ fix_jump_move (rtx jump)
gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
|| IS_SPECULATION_BRANCHY_CHECK_P (jump));
-
+
if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
/* if jump_bb_next is not empty. */
BB_END (jump_bb) = BB_END (jump_bb_next);
@@ -5051,9 +5051,9 @@ move_block_after_check (rtx jump)
bb = BLOCK_FOR_INSN (PREV_INSN (jump));
jump_bb = BLOCK_FOR_INSN (jump);
jump_bb_next = jump_bb->next_bb;
-
+
update_bb_for_insn (jump_bb);
-
+
gcc_assert (IS_SPECULATION_CHECK_P (jump)
|| IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
@@ -5067,7 +5067,7 @@ move_block_after_check (rtx jump)
move_succs (&t, jump_bb_next);
df_mark_solutions_dirty ();
-
+
common_sched_info->fix_recovery_cfg
(bb->index, jump_bb->index, jump_bb_next->index);
}
@@ -5160,7 +5160,7 @@ add_jump_dependencies (rtx insn, rtx jump)
insn = NEXT_INSN (insn);
if (insn == jump)
break;
-
+
if (dep_list_size (insn) == 0)
{
dep_def _new_dep, *new_dep = &_new_dep;
@@ -5234,23 +5234,23 @@ check_cfg (rtx head, rtx tail)
next_tail = NEXT_INSN (tail);
do
- {
- not_last = head != tail;
+ {
+ not_last = head != tail;
if (not_first)
gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
if (not_last)
gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
- if (LABEL_P (head)
+ if (LABEL_P (head)
|| (NOTE_INSN_BASIC_BLOCK_P (head)
&& (!not_first
|| (not_first && !LABEL_P (PREV_INSN (head))))))
{
- gcc_assert (bb == 0);
+ gcc_assert (bb == 0);
bb = BLOCK_FOR_INSN (head);
if (bb != 0)
- gcc_assert (BB_HEAD (bb) == head);
+ gcc_assert (BB_HEAD (bb) == head);
else
/* This is the case of jump table. See inside_basic_block_p (). */
gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
@@ -5266,7 +5266,7 @@ check_cfg (rtx head, rtx tail)
gcc_assert (inside_basic_block_p (head)
|| NOTE_P (head));
gcc_assert (BLOCK_FOR_INSN (head) == bb);
-
+
if (LABEL_P (head))
{
head = NEXT_INSN (head);
@@ -5384,7 +5384,7 @@ sched_scan (const struct sched_scan_info_def *ssi,
extend_insn ();
if (bbs != NULL)
- {
+ {
unsigned i;
basic_block x;
@@ -5481,12 +5481,12 @@ sched_extend_target (void)
static void
extend_h_i_d (void)
{
- int reserve = (get_max_uid () + 1
+ int reserve = (get_max_uid () + 1
- VEC_length (haifa_insn_data_def, h_i_d));
- if (reserve > 0
+ if (reserve > 0
&& ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
{
- VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
+ VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
3 * get_max_uid () / 2);
sched_extend_target ();
}
@@ -5585,7 +5585,7 @@ sched_split_block_1 (basic_block first_bb, rtx after)
e = split_block (first_bb, after);
gcc_assert (e->src == first_bb);
- /* sched_split_block emits note if *check == BB_END. Probably it
+ /* sched_split_block emits note if *check == BB_END. Probably it
is better to rip that note off. */
return e->dest;
diff --git a/gcc/hard-reg-set.h b/gcc/hard-reg-set.h
index 1f2c1aea911..272a2394d5d 100644
--- a/gcc/hard-reg-set.h
+++ b/gcc/hard-reg-set.h
@@ -19,7 +19,7 @@ along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_HARD_REG_SET_H
-#define GCC_HARD_REG_SET_H
+#define GCC_HARD_REG_SET_H
/* Define the type of a set of hard registers. */
@@ -500,10 +500,10 @@ typedef struct
#define HARD_REG_ELT_BITS UHOST_BITS_PER_WIDE_INT
-/* The implementation of the iterator functions is fully analogous to
+/* The implementation of the iterator functions is fully analogous to
the bitmap iterators. */
static inline void
-hard_reg_set_iter_init (hard_reg_set_iterator *iter, HARD_REG_SET set,
+hard_reg_set_iter_init (hard_reg_set_iterator *iter, HARD_REG_SET set,
unsigned min, unsigned *regno)
{
#ifdef HARD_REG_SET_LONGS
@@ -525,7 +525,7 @@ hard_reg_set_iter_init (hard_reg_set_iterator *iter, HARD_REG_SET set,
*regno = min;
}
-static inline bool
+static inline bool
hard_reg_set_iter_set (hard_reg_set_iterator *iter, unsigned *regno)
{
while (1)
@@ -544,7 +544,7 @@ hard_reg_set_iter_set (hard_reg_set_iterator *iter, unsigned *regno)
}
return (*regno < FIRST_PSEUDO_REGISTER);
}
-
+
/* Round to the beginning of the next word. */
*regno = (*regno + HARD_REG_ELT_BITS - 1);
*regno -= *regno % HARD_REG_ELT_BITS;
diff --git a/gcc/hooks.c b/gcc/hooks.c
index 0547ca81bb6..ccbce01ac85 100644
--- a/gcc/hooks.c
+++ b/gcc/hooks.c
@@ -14,7 +14,7 @@
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>.
+ <http://www.gnu.org/licenses/>.
In other words, you are welcome to use, share and improve this program.
You are forbidden to forbid anyone else to use, share and improve
diff --git a/gcc/hooks.h b/gcc/hooks.h
index 2704e25234f..e0430de19f3 100644
--- a/gcc/hooks.h
+++ b/gcc/hooks.h
@@ -14,7 +14,7 @@
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>.
+ <http://www.gnu.org/licenses/>.
In other words, you are welcome to use, share and improve this program.
You are forbidden to forbid anyone else to use, share and improve
diff --git a/gcc/hosthooks.h b/gcc/hosthooks.h
index e313ffd0831..ce0e382c03e 100644
--- a/gcc/hosthooks.h
+++ b/gcc/hosthooks.h
@@ -30,7 +30,7 @@ struct host_hooks
void * (*gt_pch_get_address) (size_t size, int fd);
/* ADDR is an address returned by gt_pch_get_address. Attempt to allocate
- SIZE bytes at the same address and load it with the data from FD at
+ SIZE bytes at the same address and load it with the data from FD at
OFFSET. Return -1 if we couldn't allocate memory at ADDR, return 0
if the memory is allocated but the data not loaded, return 1 if done. */
int (*gt_pch_use_address) (void *addr, size_t size, int fd, size_t offset);
diff --git a/gcc/hwint.h b/gcc/hwint.h
index 32f88062df3..7dec86080dc 100644
--- a/gcc/hwint.h
+++ b/gcc/hwint.h
@@ -17,13 +17,13 @@
/* The string that should be inserted into a printf style format to
indicate a "long" operand. */
-#ifndef HOST_LONG_FORMAT
+#ifndef HOST_LONG_FORMAT
#define HOST_LONG_FORMAT "l"
#endif
/* The string that should be inserted into a printf style format to
indicate a "long long" operand. */
-#ifndef HOST_LONG_LONG_FORMAT
+#ifndef HOST_LONG_LONG_FORMAT
#define HOST_LONG_LONG_FORMAT "ll"
#endif
diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c
index 4417e6e6b87..348c0cca8bf 100644
--- a/gcc/ifcvt.c
+++ b/gcc/ifcvt.c
@@ -3765,7 +3765,7 @@ find_if_case_2 (basic_block test_bb, edge then_edge, edge else_edge)
test_bb->index, else_bb->index);
/* ELSE is small. */
- if (! cheap_bb_rtx_cost_p (else_bb,
+ if (! cheap_bb_rtx_cost_p (else_bb,
COSTS_N_INSNS (BRANCH_COST (optimize_bb_for_speed_p (else_edge->src),
predictable_edge_p (else_edge)))))
return FALSE;
@@ -3987,11 +3987,11 @@ dead_or_predicable (basic_block test_bb, basic_block merge_bb,
fail = 1;
}
}
-
+
/* For TEST, we're interested in a range of insns, not a whole block.
Moreover, we're interested in the insns live from OTHER_BB. */
-
- /* The loop below takes the set of live registers
+
+ /* The loop below takes the set of live registers
after JUMP, and calculates the live set before EARLIEST. */
bitmap_copy (test_live, df_get_live_in (other_bb));
df_simulate_initialize_backwards (test_bb, test_live);
@@ -4157,7 +4157,7 @@ if_convert (void)
FOR_EACH_BB (bb)
{
basic_block new_bb;
- while (!df_get_bb_dirty (bb)
+ while (!df_get_bb_dirty (bb)
&& (new_bb = find_if_header (bb, pass)) != NULL)
bb = new_bb;
}
diff --git a/gcc/incpath.c b/gcc/incpath.c
index a5619781b10..9cc783f6132 100644
--- a/gcc/incpath.c
+++ b/gcc/incpath.c
@@ -171,8 +171,8 @@ add_standard_paths (const char *sysroot, const char *iprefix,
&& strncmp (p->fname, cpp_PREFIX, cpp_PREFIX_len) == 0)
{
static const char *relocated_prefix;
- /* If this path starts with the configure-time prefix,
- but the compiler has been relocated, replace it
+ /* If this path starts with the configure-time prefix,
+ but the compiler has been relocated, replace it
with the run-time prefix. The run-time exec prefix
is GCC_EXEC_PREFIX. Compute the path from there back
to the toplevel prefix. */
@@ -182,13 +182,13 @@ add_standard_paths (const char *sysroot, const char *iprefix,
/* Make relative prefix expects the first argument
to be a program, not a directory. */
dummy = concat (gcc_exec_prefix, "dummy", NULL);
- relocated_prefix
+ relocated_prefix
= make_relative_prefix (dummy,
cpp_EXEC_PREFIX,
cpp_PREFIX);
}
str = concat (relocated_prefix,
- p->fname + cpp_PREFIX_len,
+ p->fname + cpp_PREFIX_len,
NULL);
str = update_path (str, p->component);
}
@@ -399,7 +399,7 @@ add_path (char *path, int chain, int cxx_aware, bool user_supplied_p)
char* end = path + pathlen - 1;
/* Preserve the lead '/' or lead "c:/". */
char* start = path + (pathlen > 2 && path[1] == ':' ? 3 : 1);
-
+
for (; end > start && IS_DIR_SEPARATOR (*end); end--)
*end = 0;
#endif
diff --git a/gcc/init-regs.c b/gcc/init-regs.c
index f667797b8df..b4dd5e9a1fb 100644
--- a/gcc/init-regs.c
+++ b/gcc/init-regs.c
@@ -1,4 +1,4 @@
-/* Initialization of uninitialized regs.
+/* Initialization of uninitialized regs.
Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GCC.
@@ -99,7 +99,7 @@ initialize_uninitialized_regs (void)
rtx move_insn;
rtx reg = DF_REF_REAL_REG (use);
- bitmap_set_bit (already_genned, regno);
+ bitmap_set_bit (already_genned, regno);
start_sequence ();
emit_move_insn (reg, CONST0_RTX (GET_MODE (reg)));
@@ -107,8 +107,8 @@ initialize_uninitialized_regs (void)
end_sequence ();
emit_insn_before (move_insn, insn);
if (dump_file)
- fprintf (dump_file,
- "adding initialization in %s of reg %d at in block %d for insn %d.\n",
+ fprintf (dump_file,
+ "adding initialization in %s of reg %d at in block %d for insn %d.\n",
current_function_name (), regno, bb->index, uid);
}
}
@@ -117,7 +117,7 @@ initialize_uninitialized_regs (void)
if (optimize == 1)
{
- if (dump_file)
+ if (dump_file)
df_dump (dump_file);
df_remove_problem (df_live);
}
diff --git a/gcc/integrate.c b/gcc/integrate.c
index d92cec5f67e..0884017e391 100644
--- a/gcc/integrate.c
+++ b/gcc/integrate.c
@@ -343,7 +343,7 @@ allocate_initial_values (rtx *reg_equiv_memory_loc)
{
int regno = REGNO (ivs->entries[i].pseudo);
rtx x = targetm.allocate_initial_value (ivs->entries[i].hard_reg);
-
+
if (x && REG_N_SETS (REGNO (ivs->entries[i].pseudo)) <= 1)
{
if (MEM_P (x))
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index 2af2c7f2d6f..b6e471230fa 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -1,19 +1,19 @@
/* Interprocedural constant propagation
Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
Contributed by Razya Ladelsky <RAZYA@il.ibm.com>
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -27,7 +27,7 @@ along with GCC; see the file COPYING3. If not see
{
printf ("value is %d",y);
}
-
+
int f (int x)
{
g (x);
@@ -43,32 +43,32 @@ along with GCC; see the file COPYING3. If not see
f (3);
h (3);
}
-
-
+
+
The IPCP algorithm will find that g's formal argument y is always called
with the value 3.
The algorithm used is based on "Interprocedural Constant Propagation", by
Challahan David, Keith D Cooper, Ken Kennedy, Linda Torczon, Comp86, pg
152-161
-
+
The optimization is divided into three stages:
First stage - intraprocedural analysis
=======================================
This phase computes jump_function and modification flags.
-
+
A jump function for a callsite represents the values passed as an actual
arguments of a given callsite. There are three types of values:
Pass through - the caller's formal parameter is passed as an actual argument.
Constant - a constant is passed as an actual argument.
Unknown - neither of the above.
-
+
The jump function info, ipa_jump_func, is stored in ipa_edge_args
structure (defined in ipa_prop.h and pointed to by cgraph_node->aux)
modified_flags are defined in ipa_node_params structure
(defined in ipa_prop.h and pointed to by cgraph_edge->aux).
-
+
-ipcp_init_stage() is the first stage driver.
Second stage - interprocedural analysis
@@ -79,10 +79,10 @@ along with GCC; see the file COPYING3. If not see
TOP - unknown.
BOTTOM - non constant.
CONSTANT - constant value.
-
+
Lattice describing a formal parameter p will have a constant value if all
callsites invoking this function have the same constant value passed to p.
-
+
The lattices are stored in ipcp_lattice which is itself in ipa_node_params
structure (defined in ipa_prop.h and pointed to by cgraph_edge->aux).
@@ -115,7 +115,7 @@ along with GCC; see the file COPYING3. If not see
and many calls redirected back to fit the description above.
-ipcp_insert_stage() is the third phase driver.
-
+
*/
#include "config.h"
@@ -473,7 +473,7 @@ ipcp_cloning_candidate_p (struct cgraph_node *node)
if (cgraph_maybe_hot_edge_p (e))
n_hot_calls ++;
}
-
+
if (!n_calls)
{
if (dump_file)
@@ -487,7 +487,7 @@ ipcp_cloning_candidate_p (struct cgraph_node *node)
fprintf (dump_file, "Considering %s for cloning; code would shrink.\n",
cgraph_node_name (node));
return true;
- }
+ }
if (!flag_ipa_cp_clone)
{
@@ -634,7 +634,7 @@ ipcp_init_stage (void)
if (ipa_get_cs_argument_count (IPA_EDGE_REF (cs))
!= ipa_get_param_count (IPA_NODE_REF (cs->callee)))
{
- /* Handle cases of functions with
+ /* Handle cases of functions with
a variable number of parameters. */
ipa_set_called_with_variable_arg (IPA_NODE_REF (cs->callee));
if (flag_indirect_inlining)
@@ -1291,7 +1291,7 @@ ipcp_generate_summary (void)
ipa_check_create_node_params ();
ipa_check_create_edge_args ();
ipa_register_cgraph_hooks ();
- /* 1. Call the init stage to initialize
+ /* 1. Call the init stage to initialize
the ipa_node_params and ipa_edge_args structures. */
ipcp_init_stage ();
}
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index 495d8851247..120c234f89b 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -262,7 +262,7 @@ cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
else
{
struct cgraph_node *n;
- n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
+ n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest,
update_original, NULL);
cgraph_redirect_edge_callee (e, n);
}
@@ -402,7 +402,7 @@ cgraph_estimate_growth (struct cgraph_node *node)
}
/* Return false when inlining WHAT into TO is not good idea
- as it would cause too large growth of function bodies.
+ as it would cause too large growth of function bodies.
When ONE_ONLY is true, assume that only one call site is going
to be inlined, otherwise figure out how many call sites in
TO calls WHAT and verify that all can be inlined.
@@ -571,7 +571,7 @@ cgraph_edge_badness (struct cgraph_edge *edge)
badness = growth * 10000;
div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
/ (edge->callee->global.time + 1) + 1, 100);
-
+
/* Decrease badness if call is nested. */
/* Compress the range so we don't overflow. */
@@ -595,7 +595,7 @@ cgraph_edge_badness (struct cgraph_edge *edge)
badness = cgraph_estimate_growth (edge->callee) * 256;
/* Decrease badness if call is nested. */
- if (badness > 0)
+ if (badness > 0)
badness >>= nest;
else
{
@@ -744,7 +744,7 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
}
if (dump_file)
- fprintf (dump_file,
+ fprintf (dump_file,
" Performing recursive inlining on %s\n",
cgraph_node_name (node));
@@ -773,7 +773,7 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
if (depth > max_depth)
{
if (dump_file)
- fprintf (dump_file,
+ fprintf (dump_file,
" maximal depth reached\n");
continue;
}
@@ -789,7 +789,7 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
if (curr->count * 100 / node->count < probability)
{
if (dump_file)
- fprintf (dump_file,
+ fprintf (dump_file,
" Probability of edge is too small\n");
continue;
}
@@ -797,7 +797,7 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
if (dump_file)
{
- fprintf (dump_file,
+ fprintf (dump_file,
" Inlining call of depth %i", depth);
if (node->count)
{
@@ -816,7 +816,7 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
fibheap_delete (heap);
if (dump_file)
- fprintf (dump_file,
+ fprintf (dump_file,
"\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
master_clone->global.size, node->global.size,
master_clone->global.time, node->global.time);
@@ -947,11 +947,11 @@ cgraph_decide_inlining_of_small_functions (void)
if (dump_file)
{
- fprintf (dump_file,
+ fprintf (dump_file,
"\nConsidering %s with %i size\n",
cgraph_node_name (edge->callee),
edge->callee->global.size);
- fprintf (dump_file,
+ fprintf (dump_file,
" to be inlined into %s in %s:%i\n"
" Estimated growth after inlined into all callees is %+i insns.\n"
" Estimated badness is %i, frequency %.2f.\n",
@@ -1089,7 +1089,7 @@ cgraph_decide_inlining_of_small_functions (void)
if (dump_file)
{
- fprintf (dump_file,
+ fprintf (dump_file,
" Inlined into %s which now has size %i and self time %i,"
"net change of %+i.\n",
cgraph_node_name (edge->caller),
@@ -1228,7 +1228,7 @@ cgraph_decide_inlining (void)
if (e->inline_failed)
e->inline_failed = CIF_RECURSIVE_INLINING;
if (dump_file)
- fprintf (dump_file,
+ fprintf (dump_file,
" Inlined for a net change of %+i size.\n",
overall_size - old_size);
}
@@ -1360,7 +1360,7 @@ try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
return false;
}
}
-
+
callee->aux = (void *)(size_t) mode;
if (dump_file)
{
@@ -1375,7 +1375,7 @@ try_inline (struct cgraph_edge *e, enum inlining_mode mode, int depth)
/* In order to fully inline always_inline functions, we need to
recurse here, since the inlined functions might not be processed by
- incremental inlining at all yet.
+ incremental inlining at all yet.
Also flattening needs to be done recursively. */
@@ -1402,7 +1402,7 @@ leaf_node_p (struct cgraph_node *n)
}
/* Decide on the inlining. We do so in the topological order to avoid
- expenses on updating data structures.
+ expenses on updating data structures.
DEPTH is depth of recursion, used only for debug output. */
static bool
@@ -1638,7 +1638,7 @@ cgraph_gate_early_inlining (void)
return flag_early_inlining;
}
-struct gimple_opt_pass pass_early_inline =
+struct gimple_opt_pass pass_early_inline =
{
{
GIMPLE_PASS,
@@ -1669,7 +1669,7 @@ cgraph_gate_ipa_early_inlining (void)
/* IPA pass wrapper for early inlining pass. We need to run early inlining
before tree profiling so we have stand alone IPA pass for doing so. */
-struct simple_ipa_opt_pass pass_ipa_early_inline =
+struct simple_ipa_opt_pass pass_ipa_early_inline =
{
{
SIMPLE_IPA_PASS,
@@ -1723,7 +1723,7 @@ likely_eliminated_by_inlining_p (gimple stmt)
while (handled_component_p (inner_rhs)
|| TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
inner_rhs = TREE_OPERAND (inner_rhs, 0);
-
+
if (TREE_CODE (inner_rhs) == PARM_DECL
|| (TREE_CODE (inner_rhs) == SSA_NAME
@@ -1875,7 +1875,7 @@ compute_inline_parameters_for_current (void)
return 0;
}
-struct gimple_opt_pass pass_inline_parameters =
+struct gimple_opt_pass pass_inline_parameters =
{
{
GIMPLE_PASS,
@@ -1963,7 +1963,7 @@ inline_generate_summary (void)
for (node = cgraph_nodes; node; node = node->next)
if (node->analyzed)
analyze_function (node);
-
+
return;
}
@@ -2003,7 +2003,7 @@ inline_transform (struct cgraph_node *node)
and inliner, so when ipa-cp is active, we don't need to write them
twice. */
-static void
+static void
inline_read_summary (void)
{
if (flag_indirect_inlining)
@@ -2020,7 +2020,7 @@ inline_read_summary (void)
Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
active, we don't need to write them twice. */
-static void
+static void
inline_write_summary (cgraph_node_set set)
{
if (flag_indirect_inlining && !flag_ipa_cp)
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 7394f911771..6a018f456ea 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -2190,7 +2190,7 @@ ipa_prop_read_jump_functions (void)
}
}
-/* After merging units, we can get mismatch in argument counts.
+/* After merging units, we can get mismatch in argument counts.
Also decl merging might've rendered parameter lists obsolette.
Also compute called_with_variable_arg info. */
diff --git a/gcc/ipa-pure-const.c b/gcc/ipa-pure-const.c
index bc03bfff61a..9efcb8d0524 100644
--- a/gcc/ipa-pure-const.c
+++ b/gcc/ipa-pure-const.c
@@ -69,13 +69,13 @@ enum pure_const_state_e
/* Holder for the const_state. There is one of these per function
decl. */
-struct funct_state_d
+struct funct_state_d
{
/* See above. */
enum pure_const_state_e pure_const_state;
/* What user set here; we can be always sure about this. */
- enum pure_const_state_e state_previously_known;
- bool looping_previously_known;
+ enum pure_const_state_e state_previously_known;
+ bool looping_previously_known;
/* True if the function could possibly infinite loop. There are a
lot of ways that this could be determined. We are pretty
@@ -92,7 +92,7 @@ typedef struct funct_state_d * funct_state;
/* The storage of the funct_state is abstracted because there is the
possibility that it may be desirable to move this to the cgraph
- local info. */
+ local info. */
/* Array, indexed by cgraph node uid, of function states. */
@@ -114,7 +114,7 @@ finish_state (void)
}
-/* Return the function state from NODE. */
+/* Return the function state from NODE. */
static inline funct_state
get_function_state (struct cgraph_node *node)
@@ -139,14 +139,14 @@ set_function_state (struct cgraph_node *node, funct_state s)
/* Check to see if the use (or definition when CHECKING_WRITE is true)
variable T is legal in a function that is either pure or const. */
-static inline void
-check_decl (funct_state local,
+static inline void
+check_decl (funct_state local,
tree t, bool checking_write)
{
/* Do not want to do anything with volatile except mark any
function that uses one to be not const or pure. */
- if (TREE_THIS_VOLATILE (t))
- {
+ if (TREE_THIS_VOLATILE (t))
+ {
local->pure_const_state = IPA_NEITHER;
if (dump_file)
fprintf (dump_file, " Volatile operand is not const/pure");
@@ -170,7 +170,7 @@ check_decl (funct_state local,
/* Since we have dealt with the locals and params cases above, if we
are CHECKING_WRITE, this cannot be a pure or constant
function. */
- if (checking_write)
+ if (checking_write)
{
local->pure_const_state = IPA_NEITHER;
if (dump_file)
@@ -183,7 +183,7 @@ check_decl (funct_state local,
/* Readonly reads are safe. */
if (TREE_READONLY (t) && !TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (t)))
return; /* Read of a constant, do not change the function state. */
- else
+ else
{
if (dump_file)
fprintf (dump_file, " global memory read is not const\n");
@@ -211,7 +211,7 @@ check_decl (funct_state local,
/* Check to see if the use (or definition when CHECKING_WRITE is true)
variable T is legal in a function that is either pure or const. */
-static inline void
+static inline void
check_op (funct_state local, tree t, bool checking_write)
{
t = get_base_address (t);
@@ -286,17 +286,17 @@ check_call (funct_state local, gimple call, bool ipa)
}
}
}
-
+
/* The const and pure flags are set by a variety of places in the
compiler (including here). If someone has already set the flags
for the callee, (such as for some of the builtins) we will use
- them, otherwise we will compute our own information.
-
+ them, otherwise we will compute our own information.
+
Const and pure functions have less clobber effects than other
functions so we process these first. Otherwise if it is a call
outside the compilation unit or an indirect call we punt. This
leaves local calls which will be processed by following the call
- graph. */
+ graph. */
if (callee_t)
{
callee = cgraph_node(callee_t);
@@ -354,12 +354,12 @@ check_call (funct_state local, gimple call, bool ipa)
}
local->can_throw = true;
}
- if (flags & ECF_CONST)
+ if (flags & ECF_CONST)
{
if (callee_t && DECL_LOOPING_CONST_OR_PURE_P (callee_t))
local->looping = true;
}
- else if (flags & ECF_PURE)
+ else if (flags & ECF_PURE)
{
if (callee_t && DECL_LOOPING_CONST_OR_PURE_P (callee_t))
local->looping = true;
@@ -368,7 +368,7 @@ check_call (funct_state local, gimple call, bool ipa)
if (local->pure_const_state == IPA_CONST)
local->pure_const_state = IPA_PURE;
}
- else
+ else
{
if (dump_file)
fprintf (dump_file, " uknown function call is not const/pure\n");
@@ -457,7 +457,7 @@ check_stmt (gimple_stmt_iterator *gsip, funct_state local, bool ipa)
for (i = 0; i < gimple_asm_nclobbers (stmt); i++)
{
tree op = gimple_asm_clobber_op (stmt, i);
- if (simple_cst_equal(TREE_VALUE (op), memory_identifier_string) == 1)
+ if (simple_cst_equal(TREE_VALUE (op), memory_identifier_string) == 1)
{
if (dump_file)
fprintf (dump_file, " memory asm clobber is not const/pure");
@@ -500,13 +500,13 @@ analyze_function (struct cgraph_node *fn, bool ipa)
if (dump_file)
{
- fprintf (dump_file, "\n\n local analysis of %s\n ",
+ fprintf (dump_file, "\n\n local analysis of %s\n ",
cgraph_node_name (fn));
}
-
+
push_cfun (DECL_STRUCT_FUNCTION (decl));
current_function_decl = decl;
-
+
FOR_EACH_BB (this_block)
{
gimple_stmt_iterator gsi;
@@ -544,7 +544,7 @@ end:
fprintf (dump_file, " has irreducible loops\n");
l->looping = true;
}
- else
+ else
{
loop_iterator li;
struct loop *loop;
@@ -663,7 +663,7 @@ register_hooks (void)
/* Analyze each function in the cgraph to see if it is locally PURE or
CONST. */
-static void
+static void
generate_summary (void)
{
struct cgraph_node *node;
@@ -676,7 +676,7 @@ generate_summary (void)
operations. */
visited_nodes = pointer_set_create ();
- /* Process all of the functions.
+ /* Process all of the functions.
We process AVAIL_OVERWRITABLE functions. We can not use the results
by default, but the info can be used at LTO with -fwhole-program or
@@ -708,9 +708,9 @@ pure_const_write_summary (cgraph_node_set set)
if (node->analyzed && get_function_state (node) != NULL)
count++;
}
-
+
lto_output_uleb128_stream (ob->main_stream, count);
-
+
/* Process all of the functions. */
for (csi = csi_start (set); !csi_end_p (csi); csi_next (&csi))
{
@@ -721,13 +721,13 @@ pure_const_write_summary (cgraph_node_set set)
funct_state fs;
int node_ref;
lto_cgraph_encoder_t encoder;
-
+
fs = get_function_state (node);
encoder = ob->decl_state->cgraph_node_encoder;
node_ref = lto_cgraph_encoder_encode (encoder, node);
lto_output_uleb128_stream (ob->main_stream, node_ref);
-
+
/* Note that flags will need to be read in the opposite
order as we are pushing the bitflags into FLAGS. */
bp = bitpack_create ();
@@ -747,7 +747,7 @@ pure_const_write_summary (cgraph_node_set set)
/* Deserialize the ipa info for lto. */
-static void
+static void
pure_const_read_summary (void)
{
struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data ();
@@ -760,8 +760,8 @@ pure_const_read_summary (void)
const char *data;
size_t len;
struct lto_input_block *ib
- = lto_create_simple_input_block (file_data,
- LTO_section_ipa_pure_const,
+ = lto_create_simple_input_block (file_data,
+ LTO_section_ipa_pure_const,
&data, &len);
if (ib)
{
@@ -796,8 +796,8 @@ pure_const_read_summary (void)
bitpack_delete (bp);
}
- lto_destroy_simple_input_block (file_data,
- LTO_section_ipa_pure_const,
+ lto_destroy_simple_input_block (file_data,
+ LTO_section_ipa_pure_const,
ib, data, len);
}
}
@@ -884,8 +884,8 @@ propagate (void)
if (count > 1)
looping = true;
-
- for (e = w->callees; e; e = e->next_callee)
+
+ for (e = w->callees; e; e = e->next_callee)
{
struct cgraph_node *y = e->callee;
@@ -943,22 +943,22 @@ propagate (void)
{
case IPA_CONST:
if (!TREE_READONLY (w->decl) && dump_file)
- fprintf (dump_file, "Function found to be %sconst: %s\n",
+ fprintf (dump_file, "Function found to be %sconst: %s\n",
this_looping ? "looping " : "",
- cgraph_node_name (w));
+ cgraph_node_name (w));
TREE_READONLY (w->decl) = 1;
DECL_LOOPING_CONST_OR_PURE_P (w->decl) = this_looping;
break;
-
+
case IPA_PURE:
if (!DECL_PURE_P (w->decl) && dump_file)
- fprintf (dump_file, "Function found to be %spure: %s\n",
+ fprintf (dump_file, "Function found to be %spure: %s\n",
this_looping ? "looping " : "",
- cgraph_node_name (w));
+ cgraph_node_name (w));
DECL_PURE_P (w->decl) = 1;
DECL_LOOPING_CONST_OR_PURE_P (w->decl) = this_looping;
break;
-
+
default:
break;
}
@@ -1006,8 +1006,8 @@ propagate (void)
if (can_throw)
break;
-
- for (e = w->callees; e; e = e->next_callee)
+
+ for (e = w->callees; e; e = e->next_callee)
{
struct cgraph_node *y = e->callee;
@@ -1015,7 +1015,7 @@ propagate (void)
{
funct_state y_l = get_function_state (y);
- if (can_throw)
+ if (can_throw)
break;
if (y_l->can_throw && !TREE_NOTHROW (w->decl)
&& e->can_throw_external)
@@ -1041,7 +1041,7 @@ propagate (void)
for (e = w->callers; e; e = e->next_caller)
e->can_throw_external = false;
if (dump_file)
- fprintf (dump_file, "Function found to be nothrow: %s\n",
+ fprintf (dump_file, "Function found to be nothrow: %s\n",
cgraph_node_name (w));
}
else if (can_throw && !TREE_NOTHROW (w->decl))
@@ -1064,7 +1064,7 @@ propagate (void)
if (cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
free (get_function_state (node));
}
-
+
free (order);
VEC_free (funct_state, heap, funct_state_vec);
finish_state ();
diff --git a/gcc/ipa-reference.c b/gcc/ipa-reference.c
index 074aea6655e..032bef278eb 100644
--- a/gcc/ipa-reference.c
+++ b/gcc/ipa-reference.c
@@ -19,7 +19,7 @@ along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* This file gathers information about how variables whose scope is
- confined to the compilation unit are used.
+ confined to the compilation unit are used.
There are two categories of information produced by this pass:
@@ -41,7 +41,7 @@ along with GCC; see the file COPYING3. If not see
local and global sets are examined to make the call clobbering less
traumatic, promote some statics to registers, and improve aliasing
information.
-
+
Currently must be run after inlining decisions have been made since
otherwise, the local sets will not contain information that is
consistent with post inlined state. The global sets are not prone
@@ -79,9 +79,9 @@ static void duplicate_node_data (struct cgraph_node *src,
void *data ATTRIBUTE_UNUSED);
/* The static variables defined within the compilation unit that are
- loaded or stored directly by function that owns this structure. */
+ loaded or stored directly by function that owns this structure. */
-struct ipa_reference_local_vars_info_d
+struct ipa_reference_local_vars_info_d
{
bitmap statics_read;
bitmap statics_written;
@@ -104,7 +104,7 @@ struct ipa_reference_local_vars_info_d
strongly connected component will have the same information. This
sharing saves both time and space in the computation of the vectors
as well as their translation from decl_uid form to ann_uid
- form. */
+ form. */
struct ipa_reference_global_vars_info_d
{
@@ -116,7 +116,7 @@ struct ipa_reference_global_vars_info_d
typedef struct ipa_reference_local_vars_info_d *ipa_reference_local_vars_info_t;
typedef struct ipa_reference_global_vars_info_d *ipa_reference_global_vars_info_t;
-struct ipa_reference_vars_info_d
+struct ipa_reference_vars_info_d
{
ipa_reference_local_vars_info_t local;
ipa_reference_global_vars_info_t global;
@@ -196,29 +196,29 @@ set_reference_vars_info (struct cgraph_node *node, ipa_reference_vars_info_t inf
/* Get a bitmap that contains all of the locally referenced static
variables for function FN. */
static ipa_reference_local_vars_info_t
-get_local_reference_vars_info (struct cgraph_node *fn)
+get_local_reference_vars_info (struct cgraph_node *fn)
{
ipa_reference_vars_info_t info = get_reference_vars_info (fn);
if (info)
return info->local;
else
- /* This phase was not run. */
+ /* This phase was not run. */
return NULL;
}
/* Get a bitmap that contains all of the globally referenced static
variables for function FN. */
-
+
static ipa_reference_global_vars_info_t
-get_global_reference_vars_info (struct cgraph_node *fn)
+get_global_reference_vars_info (struct cgraph_node *fn)
{
ipa_reference_vars_info_t info = get_reference_vars_info (fn);
if (info)
return info->global;
else
- /* This phase was not run. */
+ /* This phase was not run. */
return NULL;
}
@@ -226,11 +226,11 @@ get_global_reference_vars_info (struct cgraph_node *fn)
that are read during the execution of the function FN. Returns
NULL if no data is available. */
-bitmap
-ipa_reference_get_read_global (struct cgraph_node *fn)
+bitmap
+ipa_reference_get_read_global (struct cgraph_node *fn)
{
ipa_reference_global_vars_info_t g = get_global_reference_vars_info (fn);
- if (g)
+ if (g)
return g->statics_read;
else
return NULL;
@@ -241,11 +241,11 @@ ipa_reference_get_read_global (struct cgraph_node *fn)
that variables written may or may not be read during the function
call. Returns NULL if no data is available. */
-bitmap
-ipa_reference_get_written_global (struct cgraph_node *fn)
+bitmap
+ipa_reference_get_written_global (struct cgraph_node *fn)
{
ipa_reference_global_vars_info_t g = get_global_reference_vars_info (fn);
- if (g)
+ if (g)
return g->statics_written;
else
return NULL;
@@ -255,11 +255,11 @@ ipa_reference_get_written_global (struct cgraph_node *fn)
that are not read during the execution of the function FN. Returns
NULL if no data is available. */
-bitmap
-ipa_reference_get_not_read_global (struct cgraph_node *fn)
+bitmap
+ipa_reference_get_not_read_global (struct cgraph_node *fn)
{
ipa_reference_global_vars_info_t g = get_global_reference_vars_info (fn);
- if (g)
+ if (g)
return g->statics_not_read;
else
return NULL;
@@ -270,11 +270,11 @@ ipa_reference_get_not_read_global (struct cgraph_node *fn)
that variables written may or may not be read during the function
call. Returns NULL if no data is available. */
-bitmap
-ipa_reference_get_not_written_global (struct cgraph_node *fn)
+bitmap
+ipa_reference_get_not_written_global (struct cgraph_node *fn)
{
ipa_reference_global_vars_info_t g = get_global_reference_vars_info (fn);
- if (g)
+ if (g)
return g->statics_not_written;
else
return NULL;
@@ -285,8 +285,8 @@ ipa_reference_get_not_written_global (struct cgraph_node *fn)
/* Add VAR to all_module_statics and the two
reference_vars_to_consider* sets. */
-static inline void
-add_static_var (tree var)
+static inline void
+add_static_var (tree var)
{
int uid = DECL_UID (var);
gcc_assert (TREE_CODE (var) == VAR_DECL);
@@ -301,7 +301,7 @@ add_static_var (tree var)
/* Return true if the variable T is the right kind of static variable to
perform compilation unit scope escape analysis. */
-static inline bool
+static inline bool
has_proper_scope_for_analysis (tree t)
{
/* If the variable has the "used" attribute, treat it as if it had a
@@ -311,7 +311,7 @@ has_proper_scope_for_analysis (tree t)
/* Do not want to do anything with volatile except mark any
function that uses one to be not const or pure. */
- if (TREE_THIS_VOLATILE (t))
+ if (TREE_THIS_VOLATILE (t))
return false;
/* Do not care about a local automatic that is not static. */
@@ -395,16 +395,16 @@ check_asm_memory_clobber (ipa_reference_local_vars_info_t local, gimple stmt)
{
size_t i;
tree op;
-
+
for (i = 0; i < gimple_asm_nclobbers (stmt); i++)
{
op = gimple_asm_clobber_op (stmt, i);
- if (simple_cst_equal(TREE_VALUE (op), memory_identifier_string) == 1)
+ if (simple_cst_equal(TREE_VALUE (op), memory_identifier_string) == 1)
{
/* Abandon all hope, ye who enter here. */
local->calls_read_all = true;
local->calls_write_all = true;
- }
+ }
}
}
@@ -420,11 +420,11 @@ check_call (ipa_reference_local_vars_info_t local, gimple stmt)
time. */
if (!callee_t)
{
- if (flags & ECF_CONST)
+ if (flags & ECF_CONST)
;
else if (flags & ECF_PURE)
local->calls_read_all = true;
- else
+ else
{
local->calls_read_all = true;
/* When function does not reutrn, it is safe to ignore anythign it writes
@@ -461,11 +461,11 @@ scan_stmt_for_static_refs (gimple_stmt_iterator *gsip,
check_call (local, stmt);
else if (gimple_code (stmt) == GIMPLE_ASM)
check_asm_memory_clobber (local, stmt);
-
+
return NULL;
}
-/* Call-back to scan variable initializers for static references.
+/* Call-back to scan variable initializers for static references.
Called using walk_tree. */
static tree
@@ -483,7 +483,7 @@ scan_initializer_for_static_refs (tree *tp, int *walk_subtrees,
won't find anything useful there anyway. */
else if (IS_TYPE_OR_DECL_P (*tp))
*walk_subtrees = 0;
-
+
return NULL;
}
@@ -491,7 +491,7 @@ scan_initializer_for_static_refs (tree *tp, int *walk_subtrees,
static tree
get_static_decl (int index)
{
- splay_tree_node stn =
+ splay_tree_node stn =
splay_tree_lookup (reference_vars_to_consider, index);
if (stn)
return (tree)stn->value;
@@ -504,7 +504,7 @@ get_static_decl (int index)
static const char *
get_static_name (int index)
{
- splay_tree_node stn =
+ splay_tree_node stn =
splay_tree_lookup (reference_vars_to_consider, index);
if (stn)
return lang_hooks.decl_printable_name ((tree)(stn->value), 2);
@@ -519,7 +519,7 @@ static void
propagate_bits (ipa_reference_global_vars_info_t x_global, struct cgraph_node *x)
{
struct cgraph_edge *e;
- for (e = x->callees; e; e = e->next_callee)
+ for (e = x->callees; e; e = e->next_callee)
{
struct cgraph_node *y = e->callee;
@@ -528,51 +528,51 @@ propagate_bits (ipa_reference_global_vars_info_t x_global, struct cgraph_node *x
{
if (get_reference_vars_info (y))
{
- ipa_reference_vars_info_t y_info
+ ipa_reference_vars_info_t y_info
= get_reference_vars_info (y);
ipa_reference_global_vars_info_t y_global = y_info->global;
/* Calls in current cycle do not have global computed yet. */
if (!y_info->global)
continue;
-
+
if (x_global->statics_read
!= all_module_statics)
{
- if (y_global->statics_read
+ if (y_global->statics_read
== all_module_statics)
{
BITMAP_FREE (x_global->statics_read);
- x_global->statics_read
+ x_global->statics_read
= all_module_statics;
}
/* Skip bitmaps that are pointer equal to node's bitmap
(no reason to spin within the cycle). */
- else if (x_global->statics_read
+ else if (x_global->statics_read
!= y_global->statics_read)
bitmap_ior_into (x_global->statics_read,
y_global->statics_read);
}
-
- if (x_global->statics_written
+
+ if (x_global->statics_written
!= all_module_statics)
{
- if (y_global->statics_written
+ if (y_global->statics_written
== all_module_statics)
{
BITMAP_FREE (x_global->statics_written);
- x_global->statics_written
+ x_global->statics_written
= all_module_statics;
}
/* Skip bitmaps that are pointer equal to node's bitmap
(no reason to spin within the cycle). */
- else if (x_global->statics_written
+ else if (x_global->statics_written
!= y_global->statics_written)
bitmap_ior_into (x_global->statics_written,
y_global->statics_written);
}
}
- else
+ else
gcc_unreachable ();
}
}
@@ -580,8 +580,8 @@ propagate_bits (ipa_reference_global_vars_info_t x_global, struct cgraph_node *x
/* The init routine for analyzing global static variable usage. See
comments at top for description. */
-static void
-ipa_init (void)
+static void
+ipa_init (void)
{
static bool init_p = false;
@@ -621,7 +621,7 @@ ipa_init (void)
compilation unit but their right hand sides may contain references
to variables defined within this unit. */
-static void
+static void
analyze_variable (struct varpool_node *vnode)
{
struct walk_stmt_info wi;
@@ -639,7 +639,7 @@ analyze_variable (struct varpool_node *vnode)
static ipa_reference_local_vars_info_t
init_function_info (struct cgraph_node *fn)
{
- ipa_reference_vars_info_t info
+ ipa_reference_vars_info_t info
= XCNEW (struct ipa_reference_vars_info_d);
ipa_reference_local_vars_info_t l
= XCNEW (struct ipa_reference_local_vars_info_d);
@@ -674,7 +674,7 @@ analyze_function (struct cgraph_node *fn)
push_cfun (DECL_STRUCT_FUNCTION (decl));
current_function_decl = decl;
-
+
init_function_info (fn);
FOR_EACH_BB_FN (this_block, this_cfun)
{
@@ -724,7 +724,7 @@ analyze_function (struct cgraph_node *fn)
step = TREE_CHAIN (step))
{
tree var = TREE_VALUE (step);
- if (TREE_CODE (var) == VAR_DECL
+ if (TREE_CODE (var) == VAR_DECL
&& DECL_INITIAL (var)
&& !TREE_STATIC (var))
gcc_unreachable ();
@@ -760,29 +760,29 @@ clean_function (struct cgraph_node *fn)
{
ipa_reference_vars_info_t info = get_reference_vars_info (fn);
ipa_reference_global_vars_info_t g = info->global;
-
+
clean_function_local_data (fn);
if (g)
{
if (g->statics_read
&& g->statics_read != all_module_statics)
BITMAP_FREE (g->statics_read);
-
+
if (g->statics_written
&& g->statics_written != all_module_statics)
BITMAP_FREE (g->statics_written);
-
+
if (g->statics_not_read
&& g->statics_not_read != all_module_statics)
BITMAP_FREE (g->statics_not_read);
-
+
if (g->statics_not_written
&& g->statics_not_written != all_module_statics)
BITMAP_FREE (g->statics_not_written);
free (g);
info->global = NULL;
}
-
+
free (get_reference_vars_info (fn));
set_reference_vars_info (fn, NULL);
}
@@ -872,7 +872,7 @@ remove_node_data (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
/* Analyze each function in the cgraph to see which global or statics
are read or written. */
-static void
+static void
generate_summary (void)
{
struct cgraph_node *node;
@@ -881,7 +881,7 @@ generate_summary (void)
bitmap_iterator bi;
bitmap module_statics_readonly;
bitmap bm_temp;
-
+
ipa_init ();
module_statics_readonly = BITMAP_ALLOC (&local_info_obstack);
bm_temp = BITMAP_ALLOC (&local_info_obstack);
@@ -890,7 +890,7 @@ generate_summary (void)
FOR_EACH_STATIC_INITIALIZER (vnode)
analyze_variable (vnode);
- /* Process all of the functions next.
+ /* Process all of the functions next.
We do not want to process any of the clones so we check that this
is a master clone. However, we do need to process any
@@ -899,7 +899,7 @@ generate_summary (void)
overwrite such a function cannot access the statics because it
would not be in the same compilation unit. When the analysis is
finished, the computed information of these AVAIL_OVERWRITABLE is
- replaced with worst case info.
+ replaced with worst case info.
*/
for (node = cgraph_nodes; node; node = node->next)
if (cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
@@ -914,34 +914,34 @@ generate_summary (void)
{
splay_tree_remove (reference_vars_to_consider, index);
}
-
- bitmap_and_compl_into (all_module_statics,
+
+ bitmap_and_compl_into (all_module_statics,
module_statics_escape);
-
+
bitmap_and_compl (module_statics_readonly, all_module_statics,
module_statics_written);
-
+
/* If the address is not taken, we can unset the addressable bit
on this variable. */
EXECUTE_IF_SET_IN_BITMAP (all_module_statics, 0, index, bi)
{
tree var = get_static_decl (index);
TREE_ADDRESSABLE (var) = 0;
- if (dump_file)
+ if (dump_file)
fprintf (dump_file, "Not TREE_ADDRESSABLE var %s\n",
get_static_name (index));
}
-
+
/* If the variable is never written, we can set the TREE_READONLY
flag. Additionally if it has a DECL_INITIAL that is made up of
constants we can treat the entire global as a constant. */
-
+
bitmap_and_compl (module_statics_readonly, all_module_statics,
module_statics_written);
EXECUTE_IF_SET_IN_BITMAP (module_statics_readonly, 0, index, bi)
{
tree var = get_static_decl (index);
-
+
/* Ignore variables in named sections - changing TREE_READONLY
changes the section flags, potentially causing conflicts with
other variables in the same named section. */
@@ -949,44 +949,44 @@ generate_summary (void)
{
TREE_READONLY (var) = 1;
if (dump_file)
- fprintf (dump_file, "read-only var %s\n",
+ fprintf (dump_file, "read-only var %s\n",
get_static_name (index));
}
}
-
+
BITMAP_FREE(module_statics_escape);
BITMAP_FREE(module_statics_written);
module_statics_escape = NULL;
module_statics_written = NULL;
-
+
if (dump_file)
EXECUTE_IF_SET_IN_BITMAP (all_module_statics, 0, index, bi)
{
fprintf (dump_file, "\nPromotable global:%s",
get_static_name (index));
}
-
+
for (node = cgraph_nodes; node; node = node->next)
if (cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
{
ipa_reference_local_vars_info_t l;
l = get_reference_vars_info (node)->local;
-
+
/* Any variables that are not in all_module_statics are
removed from the local maps. This will include all of the
variables that were found to escape in the function
scanning. */
if (l->statics_read)
- bitmap_and_into (l->statics_read,
+ bitmap_and_into (l->statics_read,
all_module_statics);
if (l->statics_written)
- bitmap_and_into (l->statics_written,
+ bitmap_and_into (l->statics_written,
all_module_statics);
}
-
+
BITMAP_FREE(module_statics_readonly);
BITMAP_FREE(bm_temp);
-
+
if (dump_file)
for (node = cgraph_nodes; node; node = node->next)
if (cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
@@ -994,10 +994,10 @@ generate_summary (void)
ipa_reference_local_vars_info_t l;
unsigned int index;
bitmap_iterator bi;
-
+
l = get_reference_vars_info (node)->local;
- fprintf (dump_file,
- "\nFunction name:%s/%i:",
+ fprintf (dump_file,
+ "\nFunction name:%s/%i:",
cgraph_node_name (node), node->uid);
fprintf (dump_file, "\n locals read: ");
if (l->statics_read)
@@ -1029,14 +1029,14 @@ static bool
write_node_summary_p (struct cgraph_node *node)
{
gcc_assert (node->global.inlined_to == NULL);
- return (node->analyzed
+ return (node->analyzed
&& cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE
&& get_reference_vars_info (node) != NULL);
}
/* Serialize the ipa info for lto. */
-static void
+static void
ipa_reference_write_summary (cgraph_node_set set)
{
struct cgraph_node *node;
@@ -1048,9 +1048,9 @@ ipa_reference_write_summary (cgraph_node_set set)
for (csi = csi_start (set); !csi_end_p (csi); csi_next (&csi))
if (write_node_summary_p (csi_node (csi)))
count++;
-
+
lto_output_uleb128_stream (ob->main_stream, count);
-
+
/* Process all of the functions. */
for (csi = csi_start (set); !csi_end_p (csi); csi_next (&csi))
{
@@ -1099,10 +1099,10 @@ ipa_reference_write_summary (cgraph_node_set set)
/* Deserialize the ipa info for lto. */
-static void
+static void
ipa_reference_read_summary (void)
{
- struct lto_file_decl_data ** file_data_vec
+ struct lto_file_decl_data ** file_data_vec
= lto_get_file_decl_data ();
struct lto_file_decl_data * file_data;
unsigned int j = 0;
@@ -1114,11 +1114,11 @@ ipa_reference_read_summary (void)
const char *data;
size_t len;
struct lto_input_block *ib
- = lto_create_simple_input_block (file_data,
- LTO_section_ipa_reference,
+ = lto_create_simple_input_block (file_data,
+ LTO_section_ipa_reference,
&data, &len);
if (ib)
- {
+ {
unsigned int i;
unsigned int f_count = lto_input_uleb128 (ib);
@@ -1147,7 +1147,7 @@ ipa_reference_read_summary (void)
var_index);
add_static_var (v_decl);
bitmap_set_bit (l->statics_read, DECL_UID (v_decl));
- }
+ }
/* Set the statics written. */
v_count = lto_input_sleb128 (ib);
@@ -1161,11 +1161,11 @@ ipa_reference_read_summary (void)
var_index);
add_static_var (v_decl);
bitmap_set_bit (l->statics_written, DECL_UID (v_decl));
- }
+ }
}
- lto_destroy_simple_input_block (file_data,
- LTO_section_ipa_reference,
+ lto_destroy_simple_input_block (file_data,
+ LTO_section_ipa_reference,
ib, data, len);
}
}
@@ -1212,7 +1212,7 @@ propagate (void)
int i;
cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
- if (dump_file)
+ if (dump_file)
dump_cgraph (dump_file);
/* Propagate the local information thru the call graph to produce
@@ -1226,18 +1226,18 @@ propagate (void)
for (i = 0; i < order_pos; i++ )
{
ipa_reference_vars_info_t node_info;
- ipa_reference_global_vars_info_t node_g =
+ ipa_reference_global_vars_info_t node_g =
XCNEW (struct ipa_reference_global_vars_info_d);
ipa_reference_local_vars_info_t node_l;
struct cgraph_edge *e;
-
+
bool read_all;
bool write_all;
struct ipa_dfs_info * w_info;
node = order[i];
node_info = get_reference_vars_info (node);
- if (!node_info)
+ if (!node_info)
{
dump_cgraph_node (stderr, node);
dump_cgraph (stderr);
@@ -1254,7 +1254,7 @@ propagate (void)
if (cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE)
read_write_all_from_decl (node->decl, &read_all, &write_all);
- for (e = node->callees; e; e = e->next_callee)
+ for (e = node->callees; e; e = e->next_callee)
if (cgraph_function_body_availability (e->callee) <= AVAIL_OVERWRITABLE)
read_write_all_from_decl (e->callee->decl, &read_all, &write_all);
@@ -1265,14 +1265,14 @@ propagate (void)
w = w_info->next_cycle;
while (w)
{
- ipa_reference_local_vars_info_t w_l =
+ ipa_reference_local_vars_info_t w_l =
get_reference_vars_info (w)->local;
/* When function is overwrittable, we can not assume anything. */
if (cgraph_function_body_availability (w) <= AVAIL_OVERWRITABLE)
read_write_all_from_decl (w->decl, &read_all, &write_all);
- for (e = w->callees; e; e = e->next_callee)
+ for (e = w->callees; e; e = e->next_callee)
if (cgraph_function_body_availability (e->callee) <= AVAIL_OVERWRITABLE)
read_write_all_from_decl (e->callee->decl, &read_all, &write_all);
@@ -1285,20 +1285,20 @@ propagate (void)
/* Initialized the bitmaps for the reduced nodes */
- if (read_all)
+ if (read_all)
node_g->statics_read = all_module_statics;
- else
+ else
{
node_g->statics_read = BITMAP_ALLOC (&global_info_obstack);
- bitmap_copy (node_g->statics_read,
+ bitmap_copy (node_g->statics_read,
node_l->statics_read);
}
- if (write_all)
+ if (write_all)
node_g->statics_written = all_module_statics;
else
{
node_g->statics_written = BITMAP_ALLOC (&global_info_obstack);
- bitmap_copy (node_g->statics_written,
+ bitmap_copy (node_g->statics_written,
node_l->statics_written);
}
@@ -1307,10 +1307,10 @@ propagate (void)
w = w_info->next_cycle;
while (w)
{
- ipa_reference_vars_info_t w_ri =
+ ipa_reference_vars_info_t w_ri =
get_reference_vars_info (w);
ipa_reference_local_vars_info_t w_l = w_ri->local;
-
+
/* These global bitmaps are initialized from the local info
of all of the nodes in the region. However there is no
need to do any work if the bitmaps were set to
@@ -1332,7 +1332,7 @@ propagate (void)
w = w_info->next_cycle;
while (w)
{
- ipa_reference_vars_info_t w_ri =
+ ipa_reference_vars_info_t w_ri =
get_reference_vars_info (w);
gcc_assert (!w_ri->global);
@@ -1360,8 +1360,8 @@ propagate (void)
node_info = get_reference_vars_info (node);
node_g = node_info->global;
node_l = node_info->local;
- fprintf (dump_file,
- "\nFunction name:%s/%i:",
+ fprintf (dump_file,
+ "\nFunction name:%s/%i:",
cgraph_node_name (node), node->uid);
fprintf (dump_file, "\n locals read: ");
if (node_l->statics_read)
@@ -1382,9 +1382,9 @@ propagate (void)
w_info = (struct ipa_dfs_info *) node->aux;
w = w_info->next_cycle;
- while (w)
+ while (w)
{
- ipa_reference_vars_info_t w_ri =
+ ipa_reference_vars_info_t w_ri =
get_reference_vars_info (w);
ipa_reference_local_vars_info_t w_l = w_ri->local;
fprintf (dump_file, "\n next cycle: %s/%i ",
@@ -1404,7 +1404,7 @@ propagate (void)
fprintf(dump_file, "%s ",
get_static_name (index));
}
-
+
w_info = (struct ipa_dfs_info *) w->aux;
w = w_info->next_cycle;
@@ -1440,20 +1440,20 @@ propagate (void)
node = order[i];
node_info = get_reference_vars_info (node);
node_g = node_info->global;
-
+
/* Create the complimentary sets. These are more useful for
certain apis. */
node_g->statics_not_read = BITMAP_ALLOC (&global_info_obstack);
node_g->statics_not_written = BITMAP_ALLOC (&global_info_obstack);
- if (node_g->statics_read != all_module_statics)
- bitmap_and_compl (node_g->statics_not_read,
+ if (node_g->statics_read != all_module_statics)
+ bitmap_and_compl (node_g->statics_not_read,
all_module_statics,
node_g->statics_read);
- if (node_g->statics_written
- != all_module_statics)
- bitmap_and_compl (node_g->statics_not_written,
+ if (node_g->statics_written
+ != all_module_statics)
+ bitmap_and_compl (node_g->statics_not_written,
all_module_statics,
node_g->statics_written);
}
@@ -1465,13 +1465,13 @@ propagate (void)
ipa_reference_vars_info_t node_info;
node_info = get_reference_vars_info (node);
/* Get rid of the aux information. */
-
+
if (node->aux)
{
free (node->aux);
node->aux = NULL;
}
-
+
if (cgraph_function_body_availability (node) == AVAIL_OVERWRITABLE)
clean_function (node);
else if (node_info)
diff --git a/gcc/ipa-struct-reorg.c b/gcc/ipa-struct-reorg.c
index d2187b82b99..43ed0a53c94 100644
--- a/gcc/ipa-struct-reorg.c
+++ b/gcc/ipa-struct-reorg.c
@@ -223,14 +223,14 @@ get_type_of_var (tree var)
{
if (!var)
return NULL;
-
+
if (TREE_CODE (var) == PARM_DECL)
return DECL_ARG_TYPE (var);
- else
+ else
return TREE_TYPE (var);
}
-/* Set of actions we do for each newly generated STMT. */
+/* Set of actions we do for each newly generated STMT. */
static inline void
finalize_stmt (gimple stmt)
@@ -248,8 +248,8 @@ finalize_stmt_and_append (gimple_seq *stmts, gimple stmt)
finalize_stmt (stmt);
}
-/* Given structure type SRT_TYPE and field FIELD,
- this function is looking for a field with the same name
+/* Given structure type SRT_TYPE and field FIELD,
+ this function is looking for a field with the same name
and type as FIELD in STR_TYPE. It returns it if found,
or NULL_TREE otherwise. */
@@ -261,7 +261,7 @@ find_field_in_struct_1 (tree str_type, tree field)
if (!DECL_NAME (field))
return NULL;
- for (str_field = TYPE_FIELDS (str_type); str_field;
+ for (str_field = TYPE_FIELDS (str_type); str_field;
str_field = TREE_CHAIN (str_field))
{
const char *str_field_name;
@@ -278,7 +278,7 @@ find_field_in_struct_1 (tree str_type, tree field)
if (!strcmp (str_field_name, field_name))
{
- /* Check field types. */
+ /* Check field types. */
if (is_equal_types (TREE_TYPE (str_field), TREE_TYPE (field)))
return str_field;
}
@@ -287,14 +287,14 @@ find_field_in_struct_1 (tree str_type, tree field)
return NULL_TREE;
}
-/* Given a field declaration FIELD_DECL, this function
+/* Given a field declaration FIELD_DECL, this function
returns corresponding field entry in structure STR. */
static struct field_entry *
find_field_in_struct (d_str str, tree field_decl)
{
int i;
-
+
tree field = find_field_in_struct_1 (str->decl, field_decl);
for (i = 0; i < str->num_fields; i++)
@@ -304,8 +304,8 @@ find_field_in_struct (d_str str, tree field_decl)
return NULL;
}
-/* This function checks whether ARG is a result of multiplication
- of some number by STRUCT_SIZE. If yes, the function returns true
+/* This function checks whether ARG is a result of multiplication
+ of some number by STRUCT_SIZE. If yes, the function returns true
and this number is filled into NUM. */
static bool
@@ -322,7 +322,7 @@ is_result_of_mult (tree arg, tree *num, tree struct_size)
tree lhs = gimple_assign_lhs (size_def_stmt);
/* We expect temporary here. */
- if (!is_gimple_reg (lhs))
+ if (!is_gimple_reg (lhs))
return false;
if (gimple_assign_rhs_code (size_def_stmt) == MULT_EXPR)
@@ -350,8 +350,8 @@ is_result_of_mult (tree arg, tree *num, tree struct_size)
/* This function returns true if access ACC corresponds to the pattern
- generated by compiler when an address of element i of an array
- of structures STR_DECL (pointed by p) is calculated (p[i]). If this
+ generated by compiler when an address of element i of an array
+ of structures STR_DECL (pointed by p) is calculated (p[i]). If this
pattern is recognized correctly, this function returns true
and fills missing fields in ACC. Otherwise it returns false. */
@@ -362,7 +362,7 @@ decompose_indirect_ref_acc (tree str_decl, struct field_access_site *acc)
tree struct_size, op0, op1;
tree before_cast;
enum tree_code rhs_code;
-
+
ref_var = TREE_OPERAND (acc->ref, 0);
if (TREE_CODE (ref_var) != SSA_NAME)
@@ -383,8 +383,8 @@ decompose_indirect_ref_acc (tree str_decl, struct field_access_site *acc)
op0 = gimple_assign_rhs1 (acc->ref_def_stmt);
op1 = gimple_assign_rhs2 (acc->ref_def_stmt);
- if (!is_array_access_through_pointer_and_index (rhs_code, op0, op1,
- &acc->base, &acc->offset,
+ if (!is_array_access_through_pointer_and_index (rhs_code, op0, op1,
+ &acc->base, &acc->offset,
&acc->cast_stmt))
return false;
@@ -398,7 +398,7 @@ decompose_indirect_ref_acc (tree str_decl, struct field_access_site *acc)
if (SSA_NAME_IS_DEFAULT_DEF (before_cast))
- return false;
+ return false;
struct_size = TYPE_SIZE_UNIT (str_decl);
@@ -409,7 +409,7 @@ decompose_indirect_ref_acc (tree str_decl, struct field_access_site *acc)
}
-/* This function checks whether the access ACC of structure type STR
+/* This function checks whether the access ACC of structure type STR
is of the form suitable for transformation. If yes, it returns true.
False otherwise. */
@@ -444,37 +444,37 @@ make_field_acc_node (void)
static struct field_access_site *
is_in_field_accs (gimple stmt, htab_t f_accs)
{
- return (struct field_access_site *)
+ return (struct field_access_site *)
htab_find_with_hash (f_accs, stmt, htab_hash_pointer (stmt));
}
-/* This function adds an access ACC to the hashtable
+/* This function adds an access ACC to the hashtable
F_ACCS of field accesses. */
static void
-add_field_acc_to_acc_sites (struct field_access_site *acc,
+add_field_acc_to_acc_sites (struct field_access_site *acc,
htab_t f_accs)
{
void **slot;
-
+
gcc_assert (!is_in_field_accs (acc->stmt, f_accs));
slot = htab_find_slot_with_hash (f_accs, acc->stmt,
- htab_hash_pointer (acc->stmt),
+ htab_hash_pointer (acc->stmt),
INSERT);
- *slot = acc;
+ *slot = acc;
}
-/* This function adds the VAR to vector of variables of
- an access site defined by statement STMT. If access entry
- with statement STMT does not exist in hashtable of
- accesses ACCS, this function creates it. */
+/* This function adds the VAR to vector of variables of
+ an access site defined by statement STMT. If access entry
+ with statement STMT does not exist in hashtable of
+ accesses ACCS, this function creates it. */
static void
add_access_to_acc_sites (gimple stmt, tree var, htab_t accs)
{
struct access_site *acc;
- acc = (struct access_site *)
+ acc = (struct access_site *)
htab_find_with_hash (accs, stmt, htab_hash_pointer (stmt));
if (!acc)
@@ -488,18 +488,18 @@ add_access_to_acc_sites (gimple stmt, tree var, htab_t accs)
htab_hash_pointer (stmt), INSERT);
*slot = acc;
- }
+ }
VEC_safe_push (tree, heap, acc->vars, var);
}
-/* This function adds NEW_DECL to function
+/* This function adds NEW_DECL to function
referenced vars, and marks it for renaming. */
static void
finalize_var_creation (tree new_decl)
{
- add_referenced_var (new_decl);
- mark_sym_for_renaming (new_decl);
+ add_referenced_var (new_decl);
+ mark_sym_for_renaming (new_decl);
}
/* This function finalizes VAR creation if it is a global VAR_DECL. */
@@ -525,8 +525,8 @@ insert_global_to_varpool (tree new_decl)
varpool_finalize_decl (new_decl);
}
-/* This function finalizes the creation of new variables,
- defined by *SLOT->new_vars. */
+/* This function finalizes the creation of new variables,
+ defined by *SLOT->new_vars. */
static int
finalize_new_vars_creation (void **slot, void *data ATTRIBUTE_UNUSED)
@@ -553,7 +553,7 @@ find_var_in_new_vars_vec (new_var var, tree new_type)
{
tree type = strip_type(get_type_of_var (n_var));
gcc_assert (type);
-
+
if (type == new_type)
return n_var;
}
@@ -562,7 +562,7 @@ find_var_in_new_vars_vec (new_var var, tree new_type)
}
/* This function returns new_var node, the orig_var of which is DECL.
- It looks for new_var's in NEW_VARS_HTAB. If not found,
+ It looks for new_var's in NEW_VARS_HTAB. If not found,
the function returns NULL. */
static new_var
@@ -617,7 +617,7 @@ gen_size (tree num, tree type, tree *res)
else
{
tree C = build_int_cst (TREE_TYPE (num), exact_log2 (struct_size_int));
-
+
new_stmt = gimple_build_assign (*res, fold_build2 (LSHIFT_EXPR,
TREE_TYPE (num),
num, C));
@@ -627,8 +627,8 @@ gen_size (tree num, tree type, tree *res)
return new_stmt;
}
-/* This function generates and returns a statement, that cast variable
- BEFORE_CAST to NEW_TYPE. The cast result variable is stored
+/* This function generates and returns a statement, that cast variable
+ BEFORE_CAST to NEW_TYPE. The cast result variable is stored
into RES_P. ORIG_CAST_STMT is the original cast statement. */
static gimple
@@ -657,7 +657,7 @@ make_edge_and_fix_phis_of_dest (basic_block bb, edge e)
edge new_e;
tree arg;
gimple_stmt_iterator si;
-
+
new_e = make_edge (bb, e->dest, e->flags);
for (si = gsi_start_phis (new_e->dest); !gsi_end_p (si); gsi_next (&si))
@@ -680,8 +680,8 @@ insert_before_stmt (gimple stmt, gimple new_stmt)
if (!stmt || !new_stmt)
return;
- bsi = gsi_for_stmt (stmt);
- gsi_insert_before (&bsi, new_stmt, GSI_SAME_STMT);
+ bsi = gsi_for_stmt (stmt);
+ gsi_insert_before (&bsi, new_stmt, GSI_SAME_STMT);
}
/* Insert NEW_STMTS after STMT. */
@@ -694,8 +694,8 @@ insert_seq_after_stmt (gimple stmt, gimple_seq new_stmts)
if (!stmt || !new_stmts)
return;
- bsi = gsi_for_stmt (stmt);
- gsi_insert_seq_after (&bsi, new_stmts, GSI_SAME_STMT);
+ bsi = gsi_for_stmt (stmt);
+ gsi_insert_seq_after (&bsi, new_stmts, GSI_SAME_STMT);
}
/* Insert NEW_STMT after STMT. */
@@ -708,8 +708,8 @@ insert_after_stmt (gimple stmt, gimple new_stmt)
if (!stmt || !new_stmt)
return;
- bsi = gsi_for_stmt (stmt);
- gsi_insert_after (&bsi, new_stmt, GSI_SAME_STMT);
+ bsi = gsi_for_stmt (stmt);
+ gsi_insert_after (&bsi, new_stmt, GSI_SAME_STMT);
}
/* This function returns vector of allocation sites
@@ -717,14 +717,14 @@ insert_after_stmt (gimple stmt, gimple new_stmt)
static fallocs_t
get_fallocs (tree fn_decl)
-{
+{
return (fallocs_t) htab_find_with_hash (alloc_sites, fn_decl,
htab_hash_pointer (fn_decl));
}
/* If ALLOC_STMT is D.2225_7 = <alloc_func> (D.2224_6);
and it is a part of allocation of a structure,
- then it is usually followed by a cast stmt
+ then it is usually followed by a cast stmt
p_8 = (struct str_t *) D.2225_7;
which is returned by this function. */
@@ -737,7 +737,7 @@ get_final_alloc_stmt (gimple alloc_stmt)
if (!alloc_stmt)
return NULL;
-
+
if (!is_gimple_call (alloc_stmt))
return NULL;
@@ -752,14 +752,14 @@ get_final_alloc_stmt (gimple alloc_stmt)
return final_stmt;
}
-/* This function returns true if STMT is one of allocation
+/* This function returns true if STMT is one of allocation
sites of function FN_DECL. It returns false otherwise. */
static bool
is_part_of_malloc (gimple stmt, tree fn_decl)
{
fallocs_t fallocs = get_fallocs (fn_decl);
-
+
if (fallocs)
{
alloc_site_t *call;
@@ -780,8 +780,8 @@ struct find_stmt_data
gimple stmt;
};
-/* This function looks for DATA->stmt among
- the statements involved in the field access,
+/* This function looks for DATA->stmt among
+ the statements involved in the field access,
defined by SLOT. It stops when it's found. */
static int
@@ -834,7 +834,7 @@ struct exclude_data
d_str str;
};
-/* This function returns component_ref with the BASE and
+/* This function returns component_ref with the BASE and
field named FIELD_ID from structure TYPE. */
static inline tree
@@ -842,7 +842,7 @@ build_comp_ref (tree base, tree field_id, tree type)
{
tree field;
bool found = false;
-
+
/* Find field of structure type with the same name as field_id. */
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
@@ -860,9 +860,9 @@ build_comp_ref (tree base, tree field_id, tree type)
}
-/* This struct represent data used for walk_tree
+/* This struct represent data used for walk_tree
called from function find_pos_in_stmt.
- - ref is a tree to be found,
+ - ref is a tree to be found,
- and pos is a pointer that points to ref in stmt. */
struct ref_pos
{
@@ -872,7 +872,7 @@ struct ref_pos
};
-/* This is a callback function for walk_tree, called from
+/* This is a callback function for walk_tree, called from
collect_accesses_in_bb function. DATA is a pointer to ref_pos structure.
When *TP is equal to DATA->ref, the walk_tree stops,
and found position, equal to TP, is assigned to DATA->pos. */
@@ -892,7 +892,7 @@ find_pos_in_stmt_1 (tree *tp, int *walk_subtrees, void * data)
}
r_pos->container = t;
- *walk_subtrees = 1;
+ *walk_subtrees = 1;
return NULL_TREE;
}
@@ -915,13 +915,13 @@ find_pos_in_stmt (gimple stmt, tree ref, struct ref_pos * r_pos)
return r_pos->pos;
}
-/* This structure is used to represent array
+/* This structure is used to represent array
or pointer-to wrappers of structure type.
- For example, if type1 is structure type,
- then for type1 ** we generate two type_wrapper
- structures with wrap = 0 each one.
- It's used to unwind the original type up to
- structure type, replace it with the new structure type
+ For example, if type1 is structure type,
+ then for type1 ** we generate two type_wrapper
+ structures with wrap = 0 each one.
+ It's used to unwind the original type up to
+ structure type, replace it with the new structure type
and wrap it back in the opposite order. */
typedef struct type_wrapper
@@ -930,13 +930,13 @@ typedef struct type_wrapper
bool wrap;
/* Relevant for arrays as domain or index. */
- tree domain;
+ tree domain;
}type_wrapper_t;
DEF_VEC_O (type_wrapper_t);
DEF_VEC_ALLOC_O (type_wrapper_t, heap);
-/* This function replace field access ACC by the new
+/* This function replace field access ACC by the new
field access of structure type NEW_TYPE. */
static void
@@ -951,7 +951,7 @@ replace_field_acc (struct field_access_site *acc, tree new_type)
VEC (type_wrapper_t, heap) *wrapper = VEC_alloc (type_wrapper_t, heap, 10);
type_wrapper_t *wr_p = NULL;
struct ref_pos r_pos;
-
+
while (TREE_CODE (ref_var) == INDIRECT_REF
|| TREE_CODE (ref_var) == ARRAY_REF)
{
@@ -979,9 +979,9 @@ replace_field_acc (struct field_access_site *acc, tree new_type)
{
tree type = TREE_TYPE (TREE_TYPE (new_ref));
- wr_p = VEC_last (type_wrapper_t, wrapper);
+ wr_p = VEC_last (type_wrapper_t, wrapper);
if (wr_p->wrap) /* Array. */
- new_ref = build4 (ARRAY_REF, type, new_ref,
+ new_ref = build4 (ARRAY_REF, type, new_ref,
wr_p->domain, NULL_TREE, NULL_TREE);
else /* Pointer. */
new_ref = build1 (INDIRECT_REF, type, new_ref);
@@ -989,10 +989,10 @@ replace_field_acc (struct field_access_site *acc, tree new_type)
}
new_acc = build_comp_ref (new_ref, field_id, new_type);
- VEC_free (type_wrapper_t, heap, wrapper);
+ VEC_free (type_wrapper_t, heap, wrapper);
if (is_gimple_assign (acc->stmt))
- {
+ {
lhs = gimple_assign_lhs (acc->stmt);
rhs = gimple_assign_rhs1 (acc->stmt);
@@ -1013,11 +1013,11 @@ replace_field_acc (struct field_access_site *acc, tree new_type)
gcc_assert (pos);
*pos = new_acc;
}
-
+
finalize_stmt (acc->stmt);
}
-/* This function replace field access ACC by a new field access
+/* This function replace field access ACC by a new field access
of structure type NEW_TYPE. */
static void
@@ -1032,10 +1032,10 @@ replace_field_access_stmt (struct field_access_site *acc, tree new_type)
gcc_unreachable ();
}
-/* This function looks for d_str, represented by TYPE, in the structures
- vector. If found, it returns an index of found structure. Otherwise
+/* This function looks for d_str, represented by TYPE, in the structures
+ vector. If found, it returns an index of found structure. Otherwise
it returns a length of the structures vector. */
-
+
static unsigned
find_structure (tree type)
{
@@ -1052,9 +1052,9 @@ find_structure (tree type)
}
/* In this function we create new statements that have the same
- form as ORIG_STMT, but of type NEW_TYPE. The statements
- treated by this function are simple assignments,
- like assignments: p.8_7 = p; or statements with rhs of
+ form as ORIG_STMT, but of type NEW_TYPE. The statements
+ treated by this function are simple assignments,
+ like assignments: p.8_7 = p; or statements with rhs of
tree codes PLUS_EXPR and MINUS_EXPR. */
static gimple
@@ -1069,7 +1069,7 @@ create_base_plus_offset (gimple orig_stmt, tree new_type, tree offset)
gcc_assert (TREE_CODE (lhs) == VAR_DECL
|| TREE_CODE (lhs) == SSA_NAME);
-
+
new_lhs = find_new_var_of_type (lhs, new_type);
gcc_assert (new_lhs);
finalize_var_creation (new_lhs);
@@ -1084,12 +1084,12 @@ create_base_plus_offset (gimple orig_stmt, tree new_type, tree offset)
tree op1 = gimple_assign_rhs2 (orig_stmt);
unsigned str0, str1;
unsigned length = VEC_length (structure, structures);
-
- str0 = find_structure (strip_type (get_type_of_var (op0)));
+
+ str0 = find_structure (strip_type (get_type_of_var (op0)));
str1 = find_structure (strip_type (get_type_of_var (op1)));
gcc_assert ((str0 != length) || (str1 != length));
-
+
if (str0 != length)
new_op0 = find_new_var_of_type (op0, new_type);
if (str1 != length)
@@ -1105,7 +1105,7 @@ create_base_plus_offset (gimple orig_stmt, tree new_type, tree offset)
default:
gcc_unreachable();
}
-
+
new_stmt = gimple_build_assign_with_ops (gimple_assign_rhs_code (orig_stmt),
new_lhs, new_op0, new_op1);
finalize_stmt (new_stmt);
@@ -1113,7 +1113,7 @@ create_base_plus_offset (gimple orig_stmt, tree new_type, tree offset)
return new_stmt;
}
-/* Given a field access F_ACC of the FIELD, this function
+/* Given a field access F_ACC of the FIELD, this function
replaces it by the new field access. */
static void
@@ -1126,7 +1126,7 @@ create_new_field_access (struct field_access_site *f_acc,
gimple mult_stmt;
gimple cast_stmt;
tree cast_res = NULL;
-
+
if (f_acc->num)
{
mult_stmt = gen_size (f_acc->num, new_type, &size_res);
@@ -1135,7 +1135,7 @@ create_new_field_access (struct field_access_site *f_acc,
if (f_acc->cast_stmt)
{
- cast_stmt = gen_cast_stmt (size_res, new_type,
+ cast_stmt = gen_cast_stmt (size_res, new_type,
f_acc->cast_stmt, &cast_res);
insert_after_stmt (f_acc->cast_stmt, cast_stmt);
}
@@ -1148,7 +1148,7 @@ create_new_field_access (struct field_access_site *f_acc,
else
offset = size_res;
- new_stmt = create_base_plus_offset (f_acc->ref_def_stmt,
+ new_stmt = create_base_plus_offset (f_acc->ref_def_stmt,
new_type, offset);
insert_after_stmt (f_acc->ref_def_stmt, new_stmt);
}
@@ -1158,9 +1158,9 @@ create_new_field_access (struct field_access_site *f_acc,
replace_field_access_stmt (f_acc, new_type);
}
-/* This function creates a new condition statement
+/* This function creates a new condition statement
corresponding to the original COND_STMT, adds new basic block
- and redirects condition edges. NEW_VAR is a new condition
+ and redirects condition edges. NEW_VAR is a new condition
variable located in the condition statement at the position POS. */
static void
@@ -1192,7 +1192,7 @@ create_new_stmts_for_cond_expr_1 (tree new_var, gimple cond_stmt, unsigned pos)
/* Create false and true edges from new_bb. */
make_edge_and_fix_phis_of_dest (new_bb, true_e);
make_edge_and_fix_phis_of_dest (new_bb, false_e);
-
+
/* Redirect one of original edges to point to new_bb. */
if (gimple_cond_code (cond_stmt) == NE_EXPR)
redirect_edge_succ (true_e, new_bb);
@@ -1200,8 +1200,8 @@ create_new_stmts_for_cond_expr_1 (tree new_var, gimple cond_stmt, unsigned pos)
redirect_edge_succ (false_e, new_bb);
}
-/* This function creates new condition statements corresponding
- to original condition STMT, one for each new type, and
+/* This function creates new condition statements corresponding
+ to original condition STMT, one for each new type, and
recursively redirect edges to newly generated basic blocks. */
static void
@@ -1231,13 +1231,13 @@ create_new_stmts_for_cond_expr (gimple stmt)
gcc_assert (s0 || s1);
/* For now we allow only comparison with 0 or NULL. */
gcc_assert (integer_zerop (arg0) || integer_zerop (arg1));
-
+
str = integer_zerop (arg0) ?
- VEC_index (structure, structures, str1):
+ VEC_index (structure, structures, str1):
VEC_index (structure, structures, str0);
arg = integer_zerop (arg0) ? arg1 : arg0;
pos = integer_zerop (arg0) ? 1 : 0;
-
+
for (i = 0; VEC_iterate (tree, str->new_types, i, type); i++)
{
tree new_arg;
@@ -1248,7 +1248,7 @@ create_new_stmts_for_cond_expr (gimple stmt)
}
/* This function looks for VAR in STMT, and replace it with NEW_VAR.
- If needed, it wraps NEW_VAR in pointers and indirect references
+ If needed, it wraps NEW_VAR in pointers and indirect references
before insertion. */
static void
@@ -1265,13 +1265,13 @@ insert_new_var_in_stmt (gimple stmt, tree var, tree new_var)
{
tree type = TREE_TYPE (TREE_TYPE (new_var));
- if (TREE_CODE(r_pos.container) == INDIRECT_REF)
+ if (TREE_CODE(r_pos.container) == INDIRECT_REF)
new_var = build1 (INDIRECT_REF, type, new_var);
else
new_var = build_fold_addr_expr (new_var);
pos = find_pos_in_stmt (stmt, r_pos.container, &r_pos);
}
-
+
*pos = new_var;
}
@@ -1305,7 +1305,7 @@ create_general_new_stmt (struct access_site *acc, tree new_type)
if (is_gimple_assign (new_stmt))
{
lhs = gimple_assign_lhs (new_stmt);
-
+
if (TREE_CODE (lhs) == SSA_NAME)
lhs = SSA_NAME_VAR (lhs);
if (gimple_assign_rhs_code (new_stmt) == SSA_NAME)
@@ -1316,12 +1316,12 @@ create_general_new_stmt (struct access_site *acc, tree new_type)
if (gimple_assign_rhs_code (new_stmt) == CONSTRUCTOR)
{
/* Dealing only with empty constructors right now. */
- gcc_assert (VEC_empty (constructor_elt,
+ gcc_assert (VEC_empty (constructor_elt,
CONSTRUCTOR_ELTS (rhs)));
rhs = build_constructor (new_type, 0);
gimple_assign_set_rhs1 (new_stmt, rhs);
}
-
+
if (lhs == var)
gimple_assign_set_lhs (new_stmt, new_var);
else if (rhs == var)
@@ -1356,7 +1356,7 @@ create_new_stmts_for_general_acc (struct access_site *acc, d_str str)
}
}
-/* This function creates a new general access of structure STR
+/* This function creates a new general access of structure STR
to replace the access ACC. */
static void
@@ -1413,7 +1413,7 @@ create_new_field_acc (void **slot, void *data)
return 1;
}
-/* This function creates new accesses for the structure
+/* This function creates new accesses for the structure
type STR in basic block BB. */
static void
@@ -1425,21 +1425,21 @@ create_new_accs_for_struct (d_str str, basic_block bb)
dt.str = str;
dt.bb = bb;
dt.field_index = -1;
-
+
for (i = 0; i < str->num_fields; i++)
{
dt.field_index = i;
if (str->fields[i].acc_sites)
- htab_traverse (str->fields[i].acc_sites,
+ htab_traverse (str->fields[i].acc_sites,
create_new_field_acc, &dt);
- }
+ }
if (str->accs)
htab_traverse (str->accs, create_new_acc, &dt);
}
-/* This function inserts new variables from new_var,
- defined by SLOT, into varpool. */
+/* This function inserts new variables from new_var,
+ defined by SLOT, into varpool. */
static int
update_varpool_with_new_var (void **slot, void *data ATTRIBUTE_UNUSED)
@@ -1453,7 +1453,7 @@ update_varpool_with_new_var (void **slot, void *data ATTRIBUTE_UNUSED)
return 1;
}
-/* This function prints a field access site, defined by SLOT. */
+/* This function prints a field access site, defined by SLOT. */
static int
dump_field_acc (void **slot, void *data ATTRIBUTE_UNUSED)
@@ -1491,7 +1491,7 @@ malloc_hash (const void *x)
return htab_hash_pointer (((const_fallocs_t)x)->func);
}
-/* This function returns nonzero if function of func_alloc_sites' X
+/* This function returns nonzero if function of func_alloc_sites' X
is equal to Y. */
static int
@@ -1513,7 +1513,7 @@ free_accs (void **slot, void *data ATTRIBUTE_UNUSED)
return 1;
}
-/* This is a callback function for traversal over field accesses.
+/* This is a callback function for traversal over field accesses.
It frees a field access represented by SLOT. */
static int
@@ -1525,7 +1525,7 @@ free_field_accs (void **slot, void *data ATTRIBUTE_UNUSED)
return 1;
}
-/* This function inserts TYPE into vector of UNSUITABLE_TYPES,
+/* This function inserts TYPE into vector of UNSUITABLE_TYPES,
if it is not there yet. */
static void
@@ -1542,7 +1542,7 @@ add_unsuitable_type (VEC (tree, heap) **unsuitable_types, tree type)
for (i = 0; VEC_iterate (tree, *unsuitable_types, i, t); i++)
if (is_equal_types (t, type))
break;
-
+
if (i == VEC_length (tree, *unsuitable_types))
VEC_safe_push (tree, heap, *unsuitable_types, type);
}
@@ -1566,10 +1566,10 @@ get_type_name (tree type)
/* This function is a temporary hack to overcome the types problem.
When several compilation units are compiled together
- with -combine, the TYPE_MAIN_VARIANT of the same type
+ with -combine, the TYPE_MAIN_VARIANT of the same type
can appear differently in different compilation units.
Therefore this function first compares type names.
- If there are no names, structure bodies are recursively
+ If there are no names, structure bodies are recursively
compared. */
static bool
@@ -1595,13 +1595,13 @@ is_equal_types (tree type1, tree type2)
name1 = get_type_name (type1);
name2 = get_type_name (type2);
-
+
if (name1 && name2 && !strcmp (name1, name2))
return true;
if (name1 && name2 && strcmp (name1, name2))
return false;
-
+
switch (TREE_CODE (type1))
{
case POINTER_TYPE:
@@ -1618,7 +1618,7 @@ is_equal_types (tree type1, tree type2)
{
tree field1;
/* Compare fields of structure. */
- for (field1 = TYPE_FIELDS (type1); field1;
+ for (field1 = TYPE_FIELDS (type1); field1;
field1 = TREE_CHAIN (field1))
{
tree field2 = find_field_in_struct_1 (type2, field1);
@@ -1680,7 +1680,7 @@ static void
free_accesses (htab_t accs)
{
if (accs)
- htab_traverse (accs, free_accs, NULL);
+ htab_traverse (accs, free_accs, NULL);
htab_delete (accs);
}
@@ -1690,7 +1690,7 @@ static void
free_field_accesses (htab_t f_accs)
{
if (f_accs)
- htab_traverse (f_accs, free_field_accs, NULL);
+ htab_traverse (f_accs, free_field_accs, NULL);
htab_delete (f_accs);
}
@@ -1707,17 +1707,17 @@ update_cgraph_with_malloc_call (gimple malloc_stmt, tree context)
return;
malloc_fn_decl = gimple_call_fndecl (malloc_stmt);
-
+
src = cgraph_node (context);
dest = cgraph_node (malloc_fn_decl);
- cgraph_create_edge (src, dest, malloc_stmt,
+ cgraph_create_edge (src, dest, malloc_stmt,
gimple_bb (malloc_stmt)->count,
compute_call_stmt_bb_frequency
(context, gimple_bb (malloc_stmt)),
gimple_bb (malloc_stmt)->loop_depth);
}
-/* This function generates set of statements required
+/* This function generates set of statements required
to allocate number NUM of structures of type NEW_TYPE.
The statements are stored in NEW_STMTS. The statement that contain
call to malloc is returned. MALLOC_STMT is an original call to malloc. */
@@ -1736,7 +1736,7 @@ create_new_malloc (gimple malloc_stmt, tree new_type, gimple_seq *new_stmts,
gcc_assert (num && malloc_stmt && new_type);
*new_stmts = gimple_seq_alloc ();
- /* Generate argument to malloc as multiplication of num
+ /* Generate argument to malloc as multiplication of num
and size of new_type. */
new_stmt = gen_size (num, new_type, &new_malloc_size);
gimple_seq_add_stmt (new_stmts, new_stmt);
@@ -1746,7 +1746,7 @@ create_new_malloc (gimple malloc_stmt, tree new_type, gimple_seq *new_stmts,
add_referenced_var (malloc_res);
malloc_fn_decl = gimple_call_fndecl (malloc_stmt);
- call_stmt = gimple_build_call (malloc_fn_decl, 1, new_malloc_size);
+ call_stmt = gimple_build_call (malloc_fn_decl, 1, new_malloc_size);
gimple_call_set_lhs (call_stmt, malloc_res);
finalize_stmt_and_append (new_stmts, call_stmt);
@@ -1755,16 +1755,16 @@ create_new_malloc (gimple malloc_stmt, tree new_type, gimple_seq *new_stmts,
gcc_assert (final_stmt);
new_stmt = gen_cast_stmt (malloc_res, new_type, final_stmt, &cast_res);
gimple_seq_add_stmt (new_stmts, new_stmt);
-
- return call_stmt;
+
+ return call_stmt;
}
-/* This function returns a tree representing
- the number of instances of structure STR_DECL allocated
+/* This function returns a tree representing
+ the number of instances of structure STR_DECL allocated
by allocation STMT. If new statements are generated,
they are filled into NEW_STMTS_P. */
-static tree
+static tree
gen_num_of_structs_in_malloc (gimple stmt, tree str_decl,
gimple_seq *new_stmts_p)
{
@@ -1784,10 +1784,10 @@ gen_num_of_structs_in_malloc (gimple stmt, tree str_decl,
if (TREE_CODE (arg) != SSA_NAME
&& !TREE_CONSTANT (arg))
return NULL_TREE;
-
+
struct_size = TYPE_SIZE_UNIT (str_decl);
struct_size_int = TREE_INT_CST_LOW (struct_size);
-
+
gcc_assert (struct_size);
if (TREE_CODE (arg) == SSA_NAME)
@@ -1796,7 +1796,7 @@ gen_num_of_structs_in_malloc (gimple stmt, tree str_decl,
gimple div_stmt;
if (is_result_of_mult (arg, &num, struct_size))
- return num;
+ return num;
num = create_tmp_var (integer_type_node, NULL);
@@ -1809,9 +1809,9 @@ gen_num_of_structs_in_malloc (gimple stmt, tree str_decl,
else
{
tree C = build_int_cst (integer_type_node,
- exact_log2 (struct_size_int));
+ exact_log2 (struct_size_int));
- div_stmt = gimple_build_assign_with_ops (RSHIFT_EXPR, num, arg, C);
+ div_stmt = gimple_build_assign_with_ops (RSHIFT_EXPR, num, arg, C);
}
gimple_seq_add_stmt (new_stmts_p, div_stmt);
finalize_stmt (div_stmt);
@@ -1819,16 +1819,16 @@ gen_num_of_structs_in_malloc (gimple stmt, tree str_decl,
}
if (CONSTANT_CLASS_P (arg)
- && multiple_of_p (TREE_TYPE (struct_size), arg, struct_size))
+ && multiple_of_p (TREE_TYPE (struct_size), arg, struct_size))
return int_const_binop (TRUNC_DIV_EXPR, arg, struct_size, 0);
- return NULL_TREE;
+ return NULL_TREE;
}
/* This function is a callback for traversal on new_var's hashtable.
- SLOT is a pointer to new_var. This function prints to dump_file
- an original variable and all new variables from the new_var
- pointed by *SLOT. */
+ SLOT is a pointer to new_var. This function prints to dump_file
+ an original variable and all new variables from the new_var
+ pointed by *SLOT. */
static int
dump_new_var (void **slot, void *data ATTRIBUTE_UNUSED)
@@ -1848,9 +1848,9 @@ dump_new_var (void **slot, void *data ATTRIBUTE_UNUSED)
for (i = 0;
VEC_iterate (tree, n_var->new_vars, i, var); i++)
- {
+ {
var_type = get_type_of_var (var);
-
+
fprintf (dump_file, " ");
print_generic_expr (dump_file, var, 0);
fprintf (dump_file, " of type ");
@@ -1862,7 +1862,7 @@ dump_new_var (void **slot, void *data ATTRIBUTE_UNUSED)
/* This function copies attributes form ORIG_DECL to NEW_DECL. */
-static inline void
+static inline void
copy_decl_attributes (tree new_decl, tree orig_decl)
{
@@ -1874,7 +1874,7 @@ copy_decl_attributes (tree new_decl, tree orig_decl)
DECL_CONTEXT (new_decl) = DECL_CONTEXT (orig_decl);
TREE_THIS_VOLATILE (new_decl) = TREE_THIS_VOLATILE (orig_decl);
TREE_ADDRESSABLE (new_decl) = TREE_ADDRESSABLE (orig_decl);
-
+
if (TREE_CODE (orig_decl) == VAR_DECL)
{
TREE_READONLY (new_decl) = TREE_READONLY (orig_decl);
@@ -1882,8 +1882,8 @@ copy_decl_attributes (tree new_decl, tree orig_decl)
}
}
-/* This function wraps NEW_STR_TYPE in pointers or arrays wrapper
- the same way as a structure type is wrapped in DECL.
+/* This function wraps NEW_STR_TYPE in pointers or arrays wrapper
+ the same way as a structure type is wrapped in DECL.
It returns the generated type. */
static inline tree
@@ -1894,10 +1894,10 @@ gen_struct_type (tree decl, tree new_str_type)
VEC (type_wrapper_t, heap) *wrapper = VEC_alloc (type_wrapper_t, heap, 10);
type_wrapper_t wr;
type_wrapper_t *wr_p;
-
+
while (POINTER_TYPE_P (type_orig)
|| TREE_CODE (type_orig) == ARRAY_TYPE)
- {
+ {
if (POINTER_TYPE_P (type_orig))
{
wr.wrap = 0;
@@ -1915,17 +1915,17 @@ gen_struct_type (tree decl, tree new_str_type)
while (VEC_length (type_wrapper_t, wrapper) != 0)
{
- wr_p = VEC_last (type_wrapper_t, wrapper);
+ wr_p = VEC_last (type_wrapper_t, wrapper);
if (wr_p->wrap) /* Array. */
new_type = build_array_type (new_type, wr_p->domain);
else /* Pointer. */
new_type = build_pointer_type (new_type);
-
+
VEC_pop (type_wrapper_t, wrapper);
}
- VEC_free (type_wrapper_t, heap, wrapper);
+ VEC_free (type_wrapper_t, heap, wrapper);
return new_type;
}
@@ -1944,7 +1944,7 @@ gen_var_name (tree orig_decl, unsigned HOST_WIDE_INT i)
|| !IDENTIFIER_POINTER (DECL_NAME (orig_decl)))
return NULL;
- /* If the original variable has a name, create an
+ /* If the original variable has a name, create an
appropriate new name for the new variable. */
old_name = IDENTIFIER_POINTER (DECL_NAME (orig_decl));
@@ -1962,12 +1962,12 @@ add_to_new_vars_htab (new_var new_node, htab_t new_vars_htab)
void **slot;
slot = htab_find_slot_with_hash (new_vars_htab, new_node->orig_var,
- htab_hash_pointer (new_node->orig_var),
+ htab_hash_pointer (new_node->orig_var),
INSERT);
*slot = new_node;
}
-/* This function creates and returns new_var_data node
+/* This function creates and returns new_var_data node
with empty new_vars and orig_var equal to VAR. */
static new_var
@@ -2001,14 +2001,14 @@ is_candidate (tree var, tree *type_p, VEC (tree, heap) **unsuitable_types)
if (TREE_CODE (var) == VAR_DECL
&& DECL_INITIAL (var) != NULL_TREE)
initialized = true;
-
+
type = get_type_of_var (var);
if (type)
{
type = TYPE_MAIN_VARIANT (strip_type (type));
if (TREE_CODE (type) != RECORD_TYPE)
- return false;
+ return false;
else
{
if (initialized && unsuitable_types && *unsuitable_types)
@@ -2017,7 +2017,7 @@ is_candidate (tree var, tree *type_p, VEC (tree, heap) **unsuitable_types)
{
fprintf (dump_file, "The type ");
print_generic_expr (dump_file, type, 0);
- fprintf (dump_file, " is initialized...Excluded.");
+ fprintf (dump_file, " is initialized...Excluded.");
}
add_unsuitable_type (unsuitable_types, type);
}
@@ -2037,7 +2037,7 @@ field_acc_hash (const void *x)
return htab_hash_pointer (((const struct field_access_site *)x)->stmt);
}
-/* This function returns nonzero if stmt of field_access_site X
+/* This function returns nonzero if stmt of field_access_site X
is equal to Y. */
static int
@@ -2046,7 +2046,7 @@ field_acc_eq (const void *x, const void *y)
return ((const struct field_access_site *)x)->stmt == (const_gimple)y;
}
-/* This function prints an access site, defined by SLOT. */
+/* This function prints an access site, defined by SLOT. */
static int
dump_acc (void **slot, void *data ATTRIBUTE_UNUSED)
@@ -2063,7 +2063,7 @@ dump_acc (void **slot, void *data ATTRIBUTE_UNUSED)
for (i = 0; VEC_iterate (tree, acc->vars, i, var); i++)
{
print_generic_expr (dump_file, var, 0);
- fprintf(dump_file, ", ");
+ fprintf(dump_file, ", ");
}
return 1;
}
@@ -2077,7 +2077,7 @@ free_struct_cluster (struct field_cluster* cluster)
if (cluster)
{
if (cluster->fields_in_cluster)
- sbitmap_free (cluster->fields_in_cluster);
+ sbitmap_free (cluster->fields_in_cluster);
if (cluster->sibling)
free_struct_cluster (cluster->sibling);
free (cluster);
@@ -2093,11 +2093,11 @@ free_data_struct (d_str d_node)
if (!d_node)
return;
-
+
if (dump_file)
{
fprintf (dump_file, "\nRemoving data structure \"");
- print_generic_expr (dump_file, d_node->decl, 0);
+ print_generic_expr (dump_file, d_node->decl, 0);
fprintf (dump_file, "\" from data_struct_list.");
}
@@ -2139,7 +2139,7 @@ create_new_alloc_sites (fallocs_t m_data, tree context)
{
alloc_site_t *call;
unsigned j;
-
+
for (j = 0; VEC_iterate (alloc_site_t, m_data->allocs, j, call); j++)
{
gimple stmt = call->stmt;
@@ -2157,9 +2157,9 @@ create_new_alloc_sites (fallocs_t m_data, tree context)
insert_seq_after_stmt (last_stmt, new_stmts);
last_stmt = last_stmt_tmp;
}
-
- /* Generate an allocation sites for each new structure type. */
- for (i = 0; VEC_iterate (tree, str->new_types, i, type); i++)
+
+ /* Generate an allocation sites for each new structure type. */
+ for (i = 0; VEC_iterate (tree, str->new_types, i, type); i++)
{
gimple new_malloc_stmt = NULL;
gimple last_stmt_tmp = NULL;
@@ -2174,7 +2174,7 @@ create_new_alloc_sites (fallocs_t m_data, tree context)
}
}
-/* This function prints new variables from hashtable
+/* This function prints new variables from hashtable
NEW_VARS_HTAB to dump_file. */
static void
@@ -2188,7 +2188,7 @@ dump_new_vars (htab_t new_vars_htab)
}
/* Given an original variable ORIG_DECL of structure type STR,
- this function generates new variables of the types defined
+ this function generates new variables of the types defined
by STR->new_type. Generated types are saved in new_var node NODE.
ORIG_DECL should has VAR_DECL tree_code. */
@@ -2198,31 +2198,31 @@ create_new_var_1 (tree orig_decl, d_str str, new_var node)
unsigned i;
tree type;
- for (i = 0;
+ for (i = 0;
VEC_iterate (tree, str->new_types, i, type); i++)
{
tree new_decl = NULL;
tree new_name;
new_name = gen_var_name (orig_decl, i);
- type = gen_struct_type (orig_decl, type);
+ type = gen_struct_type (orig_decl, type);
if (is_global_var (orig_decl))
new_decl = build_decl (DECL_SOURCE_LOCATION (orig_decl),
- VAR_DECL, new_name, type);
+ VAR_DECL, new_name, type);
else
{
const char *name = new_name ? IDENTIFIER_POINTER (new_name) : NULL;
- new_decl = create_tmp_var (type, name);
+ new_decl = create_tmp_var (type, name);
}
-
+
copy_decl_attributes (new_decl, orig_decl);
VEC_safe_push (tree, heap, node->new_vars, new_decl);
}
}
-/* This function creates new variables to
- substitute the original variable VAR_DECL and adds
+/* This function creates new variables to
+ substitute the original variable VAR_DECL and adds
them to the new_var's hashtable NEW_VARS_HTAB. */
static void
@@ -2238,7 +2238,7 @@ create_new_var (tree var_decl, htab_t new_vars_htab)
if (!is_candidate (var_decl, &type, NULL))
return;
-
+
i = find_structure (type);
if (i == VEC_length (structure, structures))
return;
@@ -2265,9 +2265,9 @@ new_var_eq (const void *x, const void *y)
return ((const_new_var)x)->orig_var == (const_tree)y;
}
-/* This function check whether a structure type represented by STR
- escapes due to ipa-type-escape analysis. If yes, this type is added
- to UNSUITABLE_TYPES vector. */
+/* This function check whether a structure type represented by STR
+ escapes due to ipa-type-escape analysis. If yes, this type is added
+ to UNSUITABLE_TYPES vector. */
static void
check_type_escape (d_str str, VEC (tree, heap) **unsuitable_types)
@@ -2301,8 +2301,8 @@ acc_eq (const void *x, const void *y)
return ((const struct access_site *)x)->stmt == (const_gimple)y;
}
-/* Given a structure declaration STRUCT_DECL, and number of fields
- in the structure NUM_FIELDS, this function creates and returns
+/* Given a structure declaration STRUCT_DECL, and number of fields
+ in the structure NUM_FIELDS, this function creates and returns
corresponding field_entry's. */
static struct field_entry *
@@ -2312,7 +2312,7 @@ get_fields (tree struct_decl, int num_fields)
tree t = TYPE_FIELDS (struct_decl);
int idx = 0;
- list =
+ list =
(struct field_entry *) xmalloc (num_fields * sizeof (struct field_entry));
for (idx = 0 ; t; t = TREE_CHAIN (t), idx++)
@@ -2320,7 +2320,7 @@ get_fields (tree struct_decl, int num_fields)
{
list[idx].index = idx;
list[idx].decl = t;
- list[idx].acc_sites =
+ list[idx].acc_sites =
htab_create (32, field_acc_hash, field_acc_eq, NULL);
list[idx].count = 0;
list[idx].field_mapping = NULL_TREE;
@@ -2341,7 +2341,7 @@ dump_access_sites (htab_t accs)
htab_traverse (accs, dump_acc, NULL);
}
-/* This function is a callback for alloc_sites hashtable
+/* This function is a callback for alloc_sites hashtable
traversal. SLOT is a pointer to fallocs_t. This function
removes all allocations of the structure defined by DATA. */
@@ -2378,16 +2378,16 @@ remove_str_allocs (d_str str)
/* This function removes the structure with index I from structures vector. */
-static void
+static void
remove_structure (unsigned i)
-{
+{
d_str str;
if (i >= VEC_length (structure, structures))
return;
str = VEC_index (structure, structures, i);
-
+
/* Before removing the structure str, we have to remove its
allocations from alloc_sites hashtable. */
remove_str_allocs (str);
@@ -2409,7 +2409,7 @@ is_safe_cond_expr (gimple cond_stmt)
if (gimple_cond_code (cond_stmt) != EQ_EXPR
&& gimple_cond_code (cond_stmt) != NE_EXPR)
return false;
-
+
arg0 = gimple_cond_lhs (cond_stmt);
arg1 = gimple_cond_rhs (cond_stmt);
@@ -2418,7 +2418,7 @@ is_safe_cond_expr (gimple cond_stmt)
s0 = (str0 != length) ? true : false;
s1 = (str1 != length) ? true : false;
-
+
if (!s0 && !s1)
return false;
@@ -2429,10 +2429,10 @@ is_safe_cond_expr (gimple cond_stmt)
return true;
}
-/* This function excludes statements, that are
+/* This function excludes statements, that are
part of allocation sites or field accesses, from the
- hashtable of general accesses. SLOT represents general
- access that will be checked. DATA is a pointer to
+ hashtable of general accesses. SLOT represents general
+ access that will be checked. DATA is a pointer to
exclude_data structure. */
static int
@@ -2452,7 +2452,7 @@ exclude_from_accs (void **slot, void *data)
return 1;
}
-/* Callback function for walk_tree called from collect_accesses_in_bb
+/* Callback function for walk_tree called from collect_accesses_in_bb
function. DATA is the statement which is analyzed. */
static tree
@@ -2480,7 +2480,7 @@ get_stmt_accesses (tree *tp, int *walk_subtrees, void *data)
fprintf (dump_file, "\nThe type ");
print_generic_expr (dump_file, type, 0);
fprintf (dump_file, " has bitfield.");
- }
+ }
remove_structure (i);
}
}
@@ -2503,7 +2503,7 @@ get_stmt_accesses (tree *tp, int *walk_subtrees, void *data)
if (i != VEC_length (structure, structures))
{
d_str str = VEC_index (structure, structures, i);
- struct field_entry * field =
+ struct field_entry * field =
find_field_in_struct (str, field_decl);
if (field)
@@ -2517,7 +2517,7 @@ get_stmt_accesses (tree *tp, int *walk_subtrees, void *data)
acc->ref = ref;
acc->field_decl = field_decl;
- /* Check whether the access is of the form
+ /* Check whether the access is of the form
we can deal with. */
if (!decompose_access (str->decl, acc))
{
@@ -2525,11 +2525,11 @@ get_stmt_accesses (tree *tp, int *walk_subtrees, void *data)
{
fprintf (dump_file, "\nThe type ");
print_generic_expr (dump_file, type, 0);
- fprintf (dump_file,
+ fprintf (dump_file,
" has complicate access in statement ");
print_gimple_stmt (dump_file, stmt, 0, 0);
}
-
+
remove_structure (i);
free (acc);
}
@@ -2544,7 +2544,7 @@ get_stmt_accesses (tree *tp, int *walk_subtrees, void *data)
}
*walk_subtrees = 0;
}
- }
+ }
}
}
break;
@@ -2560,7 +2560,7 @@ get_stmt_accesses (tree *tp, int *walk_subtrees, void *data)
*walk_subtrees = 1;
walk_tree (&t, get_stmt_accesses, data, NULL);
}
- *walk_subtrees = 0;
+ *walk_subtrees = 0;
}
break;
@@ -2607,7 +2607,7 @@ free_structures (void)
}
/* This function is a callback for traversal over new_var's hashtable.
- SLOT is a pointer to new_var. This function frees memory allocated
+ SLOT is a pointer to new_var. This function frees memory allocated
for new_var and pointed by *SLOT. */
static int
@@ -2627,7 +2627,7 @@ static void
free_new_vars_htab (htab_t new_vars_htab)
{
if (new_vars_htab)
- htab_traverse (new_vars_htab, free_new_var, NULL);
+ htab_traverse (new_vars_htab, free_new_var, NULL);
htab_delete (new_vars_htab);
new_vars_htab = NULL;
}
@@ -2662,8 +2662,8 @@ create_new_local_vars (void)
{
tree var;
referenced_var_iterator rvi;
-
- new_local_vars = htab_create (num_referenced_vars,
+
+ new_local_vars = htab_create (num_referenced_vars,
new_var_hash, new_var_eq, NULL);
FOR_EACH_REFERENCED_VAR (var, rvi)
@@ -2673,7 +2673,7 @@ create_new_local_vars (void)
}
if (new_local_vars)
- htab_traverse (new_local_vars, finalize_new_vars_creation, NULL);
+ htab_traverse (new_local_vars, finalize_new_vars_creation, NULL);
dump_new_vars (new_local_vars);
}
@@ -2685,7 +2685,7 @@ print_shift (unsigned HOST_WIDE_INT shift)
unsigned HOST_WIDE_INT sh = shift;
while (sh--)
- fprintf (dump_file, " ");
+ fprintf (dump_file, " ");
}
/* This function updates field_mapping of FIELDS in CLUSTER with NEW_TYPE. */
@@ -2698,11 +2698,11 @@ update_fields_mapping (struct field_cluster *cluster, tree new_type,
for (i = 0; i < num_fields; i++)
if (TEST_BIT (cluster->fields_in_cluster, i))
- fields[i].field_mapping = new_type;
+ fields[i].field_mapping = new_type;
}
-/* This functions builds structure with FIELDS,
- NAME and attributes similar to ORIG_STRUCT.
+/* This functions builds structure with FIELDS,
+ NAME and attributes similar to ORIG_STRUCT.
It returns the newly created structure. */
static tree
@@ -2716,7 +2716,7 @@ build_basic_struct (tree fields, tree name, tree orig_struct)
attributes = unshare_expr (TYPE_ATTRIBUTES (orig_struct));
ref = make_node (RECORD_TYPE);
TYPE_SIZE (ref) = 0;
- decl_attributes (&ref, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
+ decl_attributes (&ref, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
TYPE_PACKED (ref) = TYPE_PACKED (orig_struct);
for (x = fields; x; x = TREE_CHAIN (x))
{
@@ -2730,13 +2730,13 @@ build_basic_struct (tree fields, tree name, tree orig_struct)
return ref;
}
-/* This function copies FIELDS from CLUSTER into TREE_CHAIN as part
- of preparation for new structure building. NUM_FIELDS is a total
- number of fields in the structure. The function returns newly
+/* This function copies FIELDS from CLUSTER into TREE_CHAIN as part
+ of preparation for new structure building. NUM_FIELDS is a total
+ number of fields in the structure. The function returns newly
generated fields. */
static tree
-create_fields (struct field_cluster * cluster,
+create_fields (struct field_cluster * cluster,
struct field_entry * fields, int num_fields)
{
int i;
@@ -2760,9 +2760,9 @@ create_fields (struct field_cluster * cluster,
}
-/* This function creates a cluster name. The name is based on
+/* This function creates a cluster name. The name is based on
the original structure name, if it is present. It has a form:
-
+
<original_struct_name>_sub.<CLUST_NUM>
The original structure name is taken from the type of DECL.
@@ -2780,16 +2780,16 @@ gen_cluster_name (tree decl, int clust_num, int str_num)
char * prefix;
char * new_name;
size_t len;
-
+
if (!orig_name)
ASM_FORMAT_PRIVATE_NAME(tmp_name, "struct", str_num);
len = strlen (tmp_name ? tmp_name : orig_name) + strlen ("_sub");
prefix = XALLOCAVEC (char, len + 1);
- memcpy (prefix, tmp_name ? tmp_name : orig_name,
+ memcpy (prefix, tmp_name ? tmp_name : orig_name,
strlen (tmp_name ? tmp_name : orig_name));
- strcpy (prefix + strlen (tmp_name ? tmp_name : orig_name), "_sub");
-
+ strcpy (prefix + strlen (tmp_name ? tmp_name : orig_name), "_sub");
+
ASM_FORMAT_PRIVATE_NAME (new_name, prefix, clust_num);
return get_identifier (new_name);
}
@@ -2818,7 +2818,7 @@ check_bitfields (d_str str, VEC (tree, heap) **unsuitable_types)
}
}
-/* This function adds to UNSUITABLE_TYPES those types that escape
+/* This function adds to UNSUITABLE_TYPES those types that escape
due to results of ipa-type-escape analysis. See ipa-type-escape.[c,h]. */
static void
@@ -2861,8 +2861,8 @@ exclude_returned_types (VEC (tree, heap) **unsuitable_types)
}
}
-/* This function looks for parameters of local functions
- which are of structure types, or derived from them (arrays
+/* This function looks for parameters of local functions
+ which are of structure types, or derived from them (arrays
of structures, pointers to structures, or their combinations).
We are not handling peeling of such structures right now.
The found structures types are added to UNSUITABLE_TYPES vector. */
@@ -2877,7 +2877,7 @@ exclude_types_passed_to_local_func (VEC (tree, heap) **unsuitable_types)
{
tree fn = c_node->decl;
tree arg;
-
+
for (arg = DECL_ARGUMENTS (fn); arg; arg = TREE_CHAIN (arg))
{
tree type = TREE_TYPE (arg);
@@ -2885,30 +2885,30 @@ exclude_types_passed_to_local_func (VEC (tree, heap) **unsuitable_types)
type = strip_type (type);
if (TREE_CODE (type) == RECORD_TYPE)
{
- add_unsuitable_type (unsuitable_types,
+ add_unsuitable_type (unsuitable_types,
TYPE_MAIN_VARIANT (type));
if (dump_file)
{
fprintf (dump_file, "\nPointer to type \"");
print_generic_expr (dump_file, type, 0);
- fprintf (dump_file,
- "\" is passed to local function...Excluded.");
+ fprintf (dump_file,
+ "\" is passed to local function...Excluded.");
}
}
}
}
}
-/* This function analyzes structure form of structures
+/* This function analyzes structure form of structures
potential for transformation. If we are not capable to transform
structure of some form, we remove it from the structures hashtable.
- Right now we cannot handle nested structs, when nesting is
- through any level of pointers or arrays.
+ Right now we cannot handle nested structs, when nesting is
+ through any level of pointers or arrays.
TBD: release these constrains in future.
- Note, that in this function we suppose that all structures
- in the program are members of the structures hashtable right now,
+ Note, that in this function we suppose that all structures
+ in the program are members of the structures hashtable right now,
without excluding escaping types. */
static void
@@ -2919,7 +2919,7 @@ check_struct_form (d_str str, VEC (tree, heap) **unsuitable_types)
for (i = 0; i < str->num_fields; i++)
{
tree f_type = strip_type(TREE_TYPE (str->fields[i].decl));
-
+
if (TREE_CODE (f_type) == RECORD_TYPE)
{
add_unsuitable_type (unsuitable_types, TYPE_MAIN_VARIANT (f_type));
@@ -2967,13 +2967,13 @@ add_structure (tree type)
if (dump_file)
{
fprintf (dump_file, "\nAdding data structure \"");
- print_generic_expr (dump_file, type, 0);
+ print_generic_expr (dump_file, type, 0);
fprintf (dump_file, "\" to data_struct_list.");
}
}
/* This function adds an allocation site to alloc_sites hashtable.
- The allocation site appears in STMT of function FN_DECL and
+ The allocation site appears in STMT of function FN_DECL and
allocates the structure represented by STR. */
static void
@@ -2985,7 +2985,7 @@ add_alloc_site (tree fn_decl, gimple stmt, d_str str)
m_call.stmt = stmt;
m_call.str = str;
- fallocs =
+ fallocs =
(fallocs_t) htab_find_with_hash (alloc_sites,
fn_decl, htab_hash_pointer (fn_decl));
@@ -2993,7 +2993,7 @@ add_alloc_site (tree fn_decl, gimple stmt, d_str str)
{
void **slot;
- fallocs = (fallocs_t)
+ fallocs = (fallocs_t)
xmalloc (sizeof (struct func_alloc_sites));
fallocs->func = fn_decl;
fallocs->allocs = VEC_alloc (alloc_site_t, heap, 1);
@@ -3001,9 +3001,9 @@ add_alloc_site (tree fn_decl, gimple stmt, d_str str)
htab_hash_pointer (fn_decl), INSERT);
*slot = fallocs;
}
- VEC_safe_push (alloc_site_t, heap,
+ VEC_safe_push (alloc_site_t, heap,
fallocs->allocs, &m_call);
-
+
if (dump_file)
{
fprintf (dump_file, "\nAdding stmt ");
@@ -3015,7 +3015,7 @@ add_alloc_site (tree fn_decl, gimple stmt, d_str str)
/* This function returns true if the result of STMT, that contains a call
to an allocation function, is cast to one of the structure types.
STMT should be of the form: T.2 = <alloc_func> (T.1);
- If true, I_P contains an index of an allocated structure.
+ If true, I_P contains an index of an allocated structure.
Otherwise I_P contains the length of the vector of structures. */
static bool
@@ -3039,7 +3039,7 @@ is_alloc_of_struct (gimple stmt, unsigned *i_p)
lhs = gimple_assign_lhs (final_stmt);
type = get_type_of_var (lhs);
-
+
if (!type)
return false;
@@ -3055,8 +3055,8 @@ is_alloc_of_struct (gimple stmt, unsigned *i_p)
return true;
}
-/* This function prints non-field and field accesses
- of the structure STR. */
+/* This function prints non-field and field accesses
+ of the structure STR. */
static void
dump_accs (d_str str)
@@ -3070,17 +3070,17 @@ dump_accs (d_str str)
{
fprintf (dump_file, "\nAccess site of field ");
print_generic_expr (dump_file, str->fields[i].decl, 0);
- dump_field_acc_sites (str->fields[i].acc_sites);
+ dump_field_acc_sites (str->fields[i].acc_sites);
fprintf (dump_file, ":\n");
}
fprintf (dump_file, "\nGeneral access sites\n");
- dump_access_sites (str->accs);
+ dump_access_sites (str->accs);
}
/* This function checks whether an access statement, pointed by SLOT,
is a condition we are capable to transform. It returns false if not,
setting bool *DATA to false. */
-
+
static int
safe_cond_expr_check (void **slot, void *data)
{
@@ -3113,7 +3113,7 @@ exclude_alloc_and_field_accs_1 (d_str str, struct cgraph_node *node)
dt.fn_decl = node->decl;
if (dt.str->accs)
- htab_traverse (dt.str->accs, exclude_from_accs, &dt);
+ htab_traverse (dt.str->accs, exclude_from_accs, &dt);
}
/* Collect accesses to the structure types that appear in basic block BB. */
@@ -3151,7 +3151,7 @@ gen_cluster (sbitmap fields, d_str str)
{
struct field_cluster *crr_cluster = NULL;
- crr_cluster =
+ crr_cluster =
(struct field_cluster *) xcalloc (1, sizeof (struct field_cluster));
crr_cluster->sibling = str->struct_clustering;
str->struct_clustering = crr_cluster;
@@ -3165,17 +3165,17 @@ peel_field (int i, d_str ds)
{
struct field_cluster *crr_cluster = NULL;
- crr_cluster =
+ crr_cluster =
(struct field_cluster *) xcalloc (1, sizeof (struct field_cluster));
crr_cluster->sibling = ds->struct_clustering;
ds->struct_clustering = crr_cluster;
crr_cluster->fields_in_cluster =
sbitmap_alloc ((unsigned int) ds->num_fields);
sbitmap_zero (crr_cluster->fields_in_cluster);
- SET_BIT (crr_cluster->fields_in_cluster, i);
+ SET_BIT (crr_cluster->fields_in_cluster, i);
}
-/* This function calculates maximum field count in
+/* This function calculates maximum field count in
the structure STR. */
static gcov_type
@@ -3186,32 +3186,32 @@ get_max_field_count (d_str str)
for (i = 0; i < str->num_fields; i++)
if (str->fields[i].count > max)
- max = str->fields[i].count;
+ max = str->fields[i].count;
return max;
}
-/* Do struct-reorg transformation for individual function
- represented by NODE. All structure types relevant
+/* Do struct-reorg transformation for individual function
+ represented by NODE. All structure types relevant
for this function are transformed. */
static void
do_reorg_for_func (struct cgraph_node *node)
{
- create_new_local_vars ();
+ create_new_local_vars ();
create_new_alloc_sites_for_func (node);
create_new_accesses_for_func ();
update_ssa (TODO_update_ssa);
cleanup_tree_cfg ();
/* Free auxiliary data representing local variables. */
- free_new_vars_htab (new_local_vars);
+ free_new_vars_htab (new_local_vars);
}
/* Print structure TYPE, its name, if it exists, and body.
- INDENT defines the level of indentation (similar
- to the option -i of indent command). SHIFT parameter
- defines a number of spaces by which a structure will
+ INDENT defines the level of indentation (similar
+ to the option -i of indent command). SHIFT parameter
+ defines a number of spaces by which a structure will
be shifted right. */
static void
@@ -3229,21 +3229,21 @@ dump_struct_type (tree type, unsigned HOST_WIDE_INT indent,
print_generic_expr (dump_file, type, 0);
return;
}
-
+
print_shift (shift);
- struct_name = get_type_name (type);
+ struct_name = get_type_name (type);
fprintf (dump_file, "struct ");
- if (struct_name)
+ if (struct_name)
fprintf (dump_file, "%s\n",struct_name);
print_shift (shift);
fprintf (dump_file, "{\n");
-
- for (field = TYPE_FIELDS (type); field;
+
+ for (field = TYPE_FIELDS (type); field;
field = TREE_CHAIN (field))
{
unsigned HOST_WIDE_INT s = indent;
tree f_type = TREE_TYPE (field);
-
+
print_shift (shift);
while (s--)
fprintf (dump_file, " ");
@@ -3256,9 +3256,9 @@ dump_struct_type (tree type, unsigned HOST_WIDE_INT indent,
fprintf (dump_file, "}\n");
}
-/* This function creates new structure types to replace original type,
- indicated by STR->decl. The names of the new structure types are
- derived from the original structure type. If the original structure
+/* This function creates new structure types to replace original type,
+ indicated by STR->decl. The names of the new structure types are
+ derived from the original structure type. If the original structure
type has no name, we assume that its name is 'struct.<STR_NUM>'. */
static void
@@ -3268,28 +3268,28 @@ create_new_type (d_str str, int *str_num)
struct field_cluster *cluster = str->struct_clustering;
while (cluster)
- {
- tree name = gen_cluster_name (str->decl, cluster_num,
+ {
+ tree name = gen_cluster_name (str->decl, cluster_num,
*str_num);
tree fields;
tree new_type;
cluster_num++;
-
- fields = create_fields (cluster, str->fields,
+
+ fields = create_fields (cluster, str->fields,
str->num_fields);
new_type = build_basic_struct (fields, name, str->decl);
-
- update_fields_mapping (cluster, new_type,
+
+ update_fields_mapping (cluster, new_type,
str->fields, str->num_fields);
VEC_safe_push (tree, heap, str->new_types, new_type);
- cluster = cluster->sibling;
+ cluster = cluster->sibling;
}
(*str_num)++;
}
-/* This function is a callback for alloc_sites hashtable
- traversal. SLOT is a pointer to fallocs_t.
+/* This function is a callback for alloc_sites hashtable
+ traversal. SLOT is a pointer to fallocs_t.
This function frees memory pointed by *SLOT. */
static int
@@ -3322,8 +3322,8 @@ remove_unsuitable_types (VEC (tree, heap) *unsuitable_types)
}
/* Exclude structure types with bitfields.
- We would not want to interfere with other optimizations
- that can be done in this case. The structure types with
+ We would not want to interfere with other optimizations
+ that can be done in this case. The structure types with
bitfields are added to UNSUITABLE_TYPES vector. */
static void
@@ -3340,7 +3340,7 @@ exclude_types_with_bit_fields (VEC (tree, heap) **unsuitable_types)
1. if it's a type of parameter of a local function.
2. if it's a type of function return value.
- 3. if it escapes as a result of ipa-type-escape analysis.
+ 3. if it escapes as a result of ipa-type-escape analysis.
The escaping structure types are added to UNSUITABLE_TYPES vector. */
@@ -3352,8 +3352,8 @@ exclude_escaping_types (VEC (tree, heap) **unsuitable_types)
exclude_escaping_types_1 (unsuitable_types);
}
-/* This function analyzes whether the form of
- structure is such that we are capable to transform it.
+/* This function analyzes whether the form of
+ structure is such that we are capable to transform it.
Nested structures are checked here. Unsuitable structure
types are added to UNSUITABLE_TYPE vector. */
@@ -3367,8 +3367,8 @@ analyze_struct_form (VEC (tree, heap) **unsuitable_types)
check_struct_form (str, unsuitable_types);
}
-/* This function looks for structure types instantiated in the program.
- The candidate types are added to the structures vector.
+/* This function looks for structure types instantiated in the program.
+ The candidate types are added to the structures vector.
Unsuitable types are collected into UNSUITABLE_TYPES vector. */
static void
@@ -3379,7 +3379,7 @@ build_data_structure (VEC (tree, heap) **unsuitable_types)
struct varpool_node *current_varpool;
struct cgraph_node *c_node;
- /* Check global variables. */
+ /* Check global variables. */
FOR_EACH_STATIC_VARIABLE (current_varpool)
{
var = current_varpool->decl;
@@ -3387,11 +3387,11 @@ build_data_structure (VEC (tree, heap) **unsuitable_types)
add_structure (type);
}
- /* Now add structures that are types of function parameters and
+ /* Now add structures that are types of function parameters and
local variables. */
for (c_node = cgraph_nodes; c_node; c_node = c_node->next)
{
- enum availability avail =
+ enum availability avail =
cgraph_function_body_availability (c_node);
/* We need AVAIL_AVAILABLE for main function. */
@@ -3399,7 +3399,7 @@ build_data_structure (VEC (tree, heap) **unsuitable_types)
{
struct function *fn = DECL_STRUCT_FUNCTION (c_node->decl);
- for (var = DECL_ARGUMENTS (c_node->decl); var;
+ for (var = DECL_ARGUMENTS (c_node->decl); var;
var = TREE_CHAIN (var))
if (is_candidate (var, &type, unsuitable_types))
add_structure (type);
@@ -3413,7 +3413,7 @@ build_data_structure (VEC (tree, heap) **unsuitable_types)
}
/* Check function local variables. */
- for (var_list = fn->local_decls; var_list;
+ for (var_list = fn->local_decls; var_list;
var_list = TREE_CHAIN (var_list))
{
var = TREE_VALUE (var_list);
@@ -3425,7 +3425,7 @@ build_data_structure (VEC (tree, heap) **unsuitable_types)
}
}
-/* This function returns true if the program contains
+/* This function returns true if the program contains
a call to user defined allocation function, or other
functions that can interfere with struct-reorg optimizations. */
@@ -3437,7 +3437,7 @@ program_redefines_malloc_p (void)
struct cgraph_edge *c_edge;
tree fndecl;
tree fndecl2;
-
+
for (c_node = cgraph_nodes; c_node; c_node = c_node->next)
{
fndecl = c_node->decl;
@@ -3461,18 +3461,18 @@ program_redefines_malloc_p (void)
if (DECL_FUNCTION_CODE (fndecl2) == BUILT_IN_OBJECT_SIZE
|| !strcmp (fname, "__builtin_offsetof")
|| !strcmp (fname, "realloc"))
- return true;
+ return true;
}
}
}
-
+
return false;
}
-/* In this function we assume that an allocation statement
+/* In this function we assume that an allocation statement
var = (type_cast) malloc (size);
-
+
is converted into the following set of statements:
T.1 = size;
@@ -3480,8 +3480,8 @@ program_redefines_malloc_p (void)
T.3 = (type_cast) T.2;
var = T.3;
- In this function we collect into alloc_sites the allocation
- sites of variables of structure types that are present
+ In this function we collect into alloc_sites the allocation
+ sites of variables of structure types that are present
in structures vector. */
static void
@@ -3502,7 +3502,7 @@ collect_alloc_sites (void)
tree decl;
if (is_gimple_call (stmt)
- && (decl = gimple_call_fndecl (stmt))
+ && (decl = gimple_call_fndecl (stmt))
&& gimple_call_lhs (stmt))
{
unsigned i;
@@ -3513,7 +3513,7 @@ collect_alloc_sites (void)
if (DECL_FUNCTION_CODE (decl) == BUILT_IN_MALLOC)
{
d_str str;
-
+
str = VEC_index (structure, structures, i);
add_alloc_site (node->decl, stmt, str);
}
@@ -3521,15 +3521,15 @@ collect_alloc_sites (void)
{
if (dump_file)
{
- fprintf (dump_file,
+ fprintf (dump_file,
"\nUnsupported allocation function ");
print_gimple_stmt (dump_file, stmt, 0, 0);
}
- remove_structure (i);
+ remove_structure (i);
}
}
}
- }
+ }
}
}
}
@@ -3549,8 +3549,8 @@ dump_accesses (void)
dump_accs (str);
}
-/* This function checks whether the accesses of structures in condition
- expressions are of the kind we are capable to transform.
+/* This function checks whether the accesses of structures in condition
+ expressions are of the kind we are capable to transform.
If not, such structures are removed from the vector of structures. */
static void
@@ -3573,8 +3573,8 @@ check_cond_exprs (void)
}
}
-/* We exclude from non-field accesses of the structure
- all statements that will be treated as part of the structure
+/* We exclude from non-field accesses of the structure
+ all statements that will be treated as part of the structure
allocation sites or field accesses. */
static void
@@ -3587,7 +3587,7 @@ exclude_alloc_and_field_accs (struct cgraph_node *node)
exclude_alloc_and_field_accs_1 (str, node);
}
-/* This function collects accesses of the fields of the structures
+/* This function collects accesses of the fields of the structures
that appear at function FN. */
static void
@@ -3609,7 +3609,7 @@ static void
sum_counts (d_str str, gcov_type *hottest)
{
int i;
-
+
str->count = 0;
for (i = 0; i < str->num_fields; i++)
{
@@ -3617,7 +3617,7 @@ sum_counts (d_str str, gcov_type *hottest)
{
fprintf (dump_file, "\nCounter of field \"");
print_generic_expr (dump_file, str->fields[i].decl, 0);
- fprintf (dump_file, "\" is " HOST_WIDEST_INT_PRINT_DEC,
+ fprintf (dump_file, "\" is " HOST_WIDEST_INT_PRINT_DEC,
str->fields[i].count);
}
str->count += str->fields[i].count;
@@ -3635,7 +3635,7 @@ sum_counts (d_str str, gcov_type *hottest)
}
/* This function peels the field into separate structure if it's
- sufficiently hot, i.e. if its count provides at least 90% of
+ sufficiently hot, i.e. if its count provides at least 90% of
the maximum field count in the structure. */
static void
@@ -3646,7 +3646,7 @@ peel_hot_fields (d_str str)
int i;
sbitmap_ones (fields_left);
- max_field_count =
+ max_field_count =
(gcov_type) (get_max_field_count (str)/100)*90;
str->struct_clustering = NULL;
@@ -3655,7 +3655,7 @@ peel_hot_fields (d_str str)
{
if (str->count >= max_field_count)
{
- RESET_BIT (fields_left, i);
+ RESET_BIT (fields_left, i);
peel_field (i, str);
}
}
@@ -3665,10 +3665,10 @@ peel_hot_fields (d_str str)
gen_cluster (fields_left, str);
else
sbitmap_free (fields_left);
-}
+}
-/* This function is a helper for do_reorg. It goes over
- functions in call graph and performs actual transformation
+/* This function is a helper for do_reorg. It goes over
+ functions in call graph and performs actual transformation
on them. */
static void
@@ -3699,12 +3699,12 @@ do_reorg_1 (void)
}
/* This function creates new global struct variables.
- For each original variable, the set of new variables
- is created with the new structure types corresponding
- to the structure type of original variable.
+ For each original variable, the set of new variables
+ is created with the new structure types corresponding
+ to the structure type of original variable.
Only VAR_DECL variables are treated by this function. */
-static void
+static void
create_new_global_vars (void)
{
struct varpool_node *current_varpool;
@@ -3714,7 +3714,7 @@ create_new_global_vars (void)
for (i = 0; i < 2; i++)
{
if (i)
- new_global_vars = htab_create (varpool_size,
+ new_global_vars = htab_create (varpool_size,
new_var_hash, new_var_eq, NULL);
FOR_EACH_STATIC_VARIABLE(current_varpool)
{
@@ -3756,9 +3756,9 @@ dump_new_types (void)
dump_struct_type (str->decl, 2, 0);
fprintf (dump_file, "\nthe number of new types is %d\n",
VEC_length (tree, str->new_types));
- }
+ }
for (j = 0; VEC_iterate (tree, str->new_types, j, type); j++)
- dump_struct_type (type, 2, 0);
+ dump_struct_type (type, 2, 0);
}
}
@@ -3781,36 +3781,36 @@ static void
free_alloc_sites (void)
{
if (alloc_sites)
- htab_traverse (alloc_sites, free_falloc_sites, NULL);
+ htab_traverse (alloc_sites, free_falloc_sites, NULL);
htab_delete (alloc_sites);
alloc_sites = NULL;
}
-/* This function collects structures potential
- for peeling transformation, and inserts
+/* This function collects structures potential
+ for peeling transformation, and inserts
them into structures hashtable. */
-static void
+static void
collect_structures (void)
{
VEC (tree, heap) *unsuitable_types = VEC_alloc (tree, heap, 32);
structures = VEC_alloc (structure, heap, 32);
-
+
/* If program contains user defined mallocs, we give up. */
if (program_redefines_malloc_p ())
- return;
+ return;
- /* Build data structures hashtable of all data structures
+ /* Build data structures hashtable of all data structures
in the program. */
build_data_structure (&unsuitable_types);
- /* This function analyzes whether the form of
- structure is such that we are capable to transform it.
+ /* This function analyzes whether the form of
+ structure is such that we are capable to transform it.
Nested structures are checked here. */
analyze_struct_form (&unsuitable_types);
- /* This function excludes those structure types
+ /* This function excludes those structure types
that escape compilation unit. */
exclude_escaping_types (&unsuitable_types);
@@ -3831,11 +3831,11 @@ collect_allocation_sites (void)
collect_alloc_sites ();
}
-/* This function collects data accesses for the
- structures to be transformed. For each structure
+/* This function collects data accesses for the
+ structures to be transformed. For each structure
field it updates the count field in field_entry. */
-static void
+static void
collect_data_accesses (void)
{
struct cgraph_node *c_node;
@@ -3859,8 +3859,8 @@ collect_data_accesses (void)
}
/* We do not bother to transform cold structures.
- Coldness of the structure is defined relatively
- to the highest structure count among the structures
+ Coldness of the structure is defined relatively
+ to the highest structure count among the structures
to be transformed. It's triggered by the compiler parameter
--param struct-reorg-cold-struct-ratio=<value>
@@ -3896,7 +3896,7 @@ exclude_cold_structs (void)
i++;
}
-/* This function decomposes original structure into substructures,
+/* This function decomposes original structure into substructures,
i.e.clusters. */
static void
@@ -3938,13 +3938,13 @@ do_reorg (void)
/* Create new global variables. */
create_new_global_vars ();
- dump_new_vars (new_global_vars);
+ dump_new_vars (new_global_vars);
/* Decompose structures for each function separately. */
do_reorg_1 ();
/* Free auxiliary data collected for global variables. */
- free_new_vars_htab (new_global_vars);
+ free_new_vars_htab (new_global_vars);
}
/* Free all auxiliary data used by this optimization. */
@@ -3953,7 +3953,7 @@ static void
free_data_structs (void)
{
free_structures ();
- free_alloc_sites ();
+ free_alloc_sites ();
}
/* Perform structure decomposition (peeling). */
@@ -3962,15 +3962,15 @@ static void
reorg_structs (void)
{
- /* Stage1. */
+ /* Stage1. */
/* Collect structure types. */
collect_structures ();
/* Collect structure allocation sites. */
- collect_allocation_sites ();
+ collect_allocation_sites ();
/* Collect structure accesses. */
- collect_data_accesses ();
+ collect_data_accesses ();
/* We transform only hot structures. */
exclude_cold_structs ();
@@ -3979,13 +3979,13 @@ reorg_structs (void)
/* Decompose structures into substructures, i.e. clusters. */
peel_structs ();
- /* Stage3. */
+ /* Stage3. */
/* Do the actual transformation for each structure
from the structures hashtable. */
do_reorg ();
/* Free all auxiliary data used by this optimization. */
- free_data_structs ();
+ free_data_structs ();
}
/* Struct-reorg optimization entry point function. */
@@ -4003,11 +4003,11 @@ static bool
struct_reorg_gate (void)
{
return flag_ipa_struct_reorg
- && flag_whole_program
+ && flag_whole_program
&& (optimize > 0);
}
-struct simple_ipa_opt_pass pass_ipa_struct_reorg =
+struct simple_ipa_opt_pass pass_ipa_struct_reorg =
{
{
SIMPLE_IPA_PASS,
diff --git a/gcc/ipa-struct-reorg.h b/gcc/ipa-struct-reorg.h
index 20176f268c7..e6df1cf9052 100644
--- a/gcc/ipa-struct-reorg.h
+++ b/gcc/ipa-struct-reorg.h
@@ -74,7 +74,7 @@ struct field_entry
The original structure is decomposed into substructures, or clusters. */
struct field_cluster
{
- /* A bitmap of field indices. The set bit indicates that the field
+ /* A bitmap of field indices. The set bit indicates that the field
corresponding to it is a part of this cluster. */
sbitmap fields_in_cluster;
struct field_cluster *sibling;
diff --git a/gcc/ipa-type-escape.c b/gcc/ipa-type-escape.c
index edfaab0a0f8..0c8d229f3cf 100644
--- a/gcc/ipa-type-escape.c
+++ b/gcc/ipa-type-escape.c
@@ -90,7 +90,7 @@ enum escape_t
previous cases above. During the analysis phase, a bit is set in
one of these vectors if an operation of the offending class is
discovered to happen on the associated type. */
-
+
static bitmap global_types_exposed_parameter;
static bitmap global_types_full_escape;
@@ -134,7 +134,7 @@ static struct pointer_set_t *visited_stmts;
static bitmap_obstack ipa_obstack;
-/* Static functions from this file that are used
+/* Static functions from this file that are used
before being defined. */
static unsigned int look_for_casts (tree);
static bool is_cast_from_non_pointer (tree, gimple, void *);
@@ -144,11 +144,11 @@ static const char*
get_name_of_type (tree type)
{
tree name = TYPE_NAME (type);
-
+
if (!name)
/* Unnamed type, do what you like here. */
return "<UNNAMED>";
-
+
/* It will be a TYPE_DECL in the case of a typedef, otherwise, an
identifier_node */
if (TREE_CODE (name) == TYPE_DECL)
@@ -164,11 +164,11 @@ get_name_of_type (tree type)
}
else if (TREE_CODE (name) == IDENTIFIER_NODE)
return IDENTIFIER_POINTER (name);
- else
+ else
return "<UNNAMED>";
}
-struct type_brand_s
+struct type_brand_s
{
const char* name;
int seq;
@@ -176,7 +176,7 @@ struct type_brand_s
/* Splay tree comparison function on type_brand_s structures. */
-static int
+static int
compare_type_brand (splay_tree_key sk1, splay_tree_key sk2)
{
struct type_brand_s * k1 = (struct type_brand_s *) sk1;
@@ -185,7 +185,7 @@ compare_type_brand (splay_tree_key sk1, splay_tree_key sk2)
int value = strcmp(k1->name, k2->name);
if (value == 0)
return k2->seq - k1->seq;
- else
+ else
return value;
}
@@ -281,7 +281,7 @@ type_to_consider (tree type)
case VECTOR_TYPE:
case VOID_TYPE:
return true;
-
+
default:
return false;
}
@@ -291,7 +291,7 @@ type_to_consider (tree type)
the POINTER_TOs and if SEE_THRU_ARRAYS is true, remove all of the
ARRAY_OFs and POINTER_TOs. */
-static tree
+static tree
get_canon_type (tree type, bool see_thru_ptrs, bool see_thru_arrays)
{
splay_tree_node result;
@@ -300,16 +300,16 @@ get_canon_type (tree type, bool see_thru_ptrs, bool see_thru_arrays)
return NULL;
type = TYPE_MAIN_VARIANT (type);
- if (see_thru_arrays)
+ if (see_thru_arrays)
while (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
type = TYPE_MAIN_VARIANT (TREE_TYPE (type));
- else if (see_thru_ptrs)
+ else if (see_thru_ptrs)
while (POINTER_TYPE_P (type))
type = TYPE_MAIN_VARIANT (TREE_TYPE (type));
result = splay_tree_lookup (type_to_canon_type, (splay_tree_key) type);
-
+
if (result == NULL)
return discover_unique_type (type);
else return (tree) result->value;
@@ -331,9 +331,9 @@ get_canon_type_uid (tree type, bool see_thru_ptrs, bool see_thru_arrays)
number if TYPE is a pointer to a record or union. The number is
the number of pointer types stripped to get to the record or union
type. Return -1 if TYPE is none of the above. */
-
+
int
-ipa_type_escape_star_count_of_interesting_type (tree type)
+ipa_type_escape_star_count_of_interesting_type (tree type)
{
int count = 0;
/* Strip the *'s off. */
@@ -347,22 +347,22 @@ ipa_type_escape_star_count_of_interesting_type (tree type)
}
/* We are interested in records, and unions only. */
- if (TREE_CODE (type) == RECORD_TYPE
- || TREE_CODE (type) == QUAL_UNION_TYPE
+ if (TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE
|| TREE_CODE (type) == UNION_TYPE)
return count;
- else
+ else
return -1;
-}
+}
/* Return 0 if TYPE is a record or union type. Return a positive
number if TYPE is a pointer to a record or union. The number is
the number of pointer types stripped to get to the record or union
type. Return -1 if TYPE is none of the above. */
-
+
int
-ipa_type_escape_star_count_of_interesting_or_array_type (tree type)
+ipa_type_escape_star_count_of_interesting_or_array_type (tree type)
{
int count = 0;
/* Strip the *'s off. */
@@ -376,15 +376,15 @@ ipa_type_escape_star_count_of_interesting_or_array_type (tree type)
}
/* We are interested in records, and unions only. */
- if (TREE_CODE (type) == RECORD_TYPE
- || TREE_CODE (type) == QUAL_UNION_TYPE
+ if (TREE_CODE (type) == RECORD_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE
|| TREE_CODE (type) == UNION_TYPE)
return count;
- else
+ else
return -1;
-}
-
-
+}
+
+
/* Return true if the record, or union TYPE passed in escapes this
compilation unit. Note that all of the pointer-to's are removed
before testing since these may not be correct. */
@@ -394,19 +394,19 @@ ipa_type_escape_type_contained_p (tree type)
{
if (!initialized)
return false;
- return !bitmap_bit_p (global_types_full_escape,
+ return !bitmap_bit_p (global_types_full_escape,
get_canon_type_uid (type, true, false));
}
/* Return true if a modification to a field of type FIELD_TYPE cannot
clobber a record of RECORD_TYPE. */
-bool
+bool
ipa_type_escape_field_does_not_clobber_p (tree record_type, tree field_type)
-{
+{
splay_tree_node result;
int uid;
-
+
if (!initialized)
return false;
@@ -418,25 +418,25 @@ ipa_type_escape_field_does_not_clobber_p (tree record_type, tree field_type)
while (POINTER_TYPE_P (record_type))
{
record_type = TYPE_MAIN_VARIANT (TREE_TYPE (record_type));
- if (POINTER_TYPE_P (field_type))
+ if (POINTER_TYPE_P (field_type))
field_type = TYPE_MAIN_VARIANT (TREE_TYPE (field_type));
- else
+ else
/* However, if field_type is a union, this quick test is not
correct since one of the variants of the union may be a
pointer to type and we cannot see across that here. So we
just strip the remaining pointer tos off the record type
and fall thru to the more precise code. */
- if (TREE_CODE (field_type) == QUAL_UNION_TYPE
+ if (TREE_CODE (field_type) == QUAL_UNION_TYPE
|| TREE_CODE (field_type) == UNION_TYPE)
{
while (POINTER_TYPE_P (record_type))
record_type = TYPE_MAIN_VARIANT (TREE_TYPE (record_type));
break;
- }
- else
+ }
+ else
return true;
}
-
+
record_type = get_canon_type (record_type, true, true);
/* The record type must be contained. The field type may
escape. */
@@ -445,8 +445,8 @@ ipa_type_escape_field_does_not_clobber_p (tree record_type, tree field_type)
uid = TYPE_UID (record_type);
result = splay_tree_lookup (uid_to_addressof_down_map, (splay_tree_key) uid);
-
- if (result)
+
+ if (result)
{
bitmap field_type_map = (bitmap) result->value;
uid = get_canon_type_uid (field_type, true, true);
@@ -470,10 +470,10 @@ mark_type (tree type, enum escape_t escape_status)
int uid;
type = get_canon_type (type, true, true);
- if (!type)
+ if (!type)
return NULL;
- switch (escape_status)
+ switch (escape_status)
{
case EXPOSED_PARAMETER:
map = global_types_exposed_parameter;
@@ -494,7 +494,7 @@ mark_type (tree type, enum escape_t escape_status)
/* Efficiency hack. When things are bad, do not mess around
with this type anymore. */
bitmap_set_bit (global_types_exposed_parameter, uid);
- }
+ }
}
return type;
}
@@ -503,7 +503,7 @@ mark_type (tree type, enum escape_t escape_status)
EXPOSED_PARAMETER and the TYPE is a pointer type, the set is
changed to FULL_ESCAPE. */
-static void
+static void
mark_interesting_type (tree type, enum escape_t escape_status)
{
if (!type) return;
@@ -522,24 +522,24 @@ mark_interesting_type (tree type, enum escape_t escape_status)
/* Return true if PARENT is supertype of CHILD. Both types must be
known to be structures or unions. */
-
+
static bool
parent_type_p (tree parent, tree child)
{
int i;
tree binfo, base_binfo;
- if (TYPE_BINFO (parent))
+ if (TYPE_BINFO (parent))
for (binfo = TYPE_BINFO (parent), i = 0;
BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
{
tree binfotype = BINFO_TYPE (base_binfo);
- if (binfotype == child)
+ if (binfotype == child)
return true;
else if (parent_type_p (binfotype, child))
return true;
}
if (TREE_CODE (parent) == UNION_TYPE
- || TREE_CODE (parent) == QUAL_UNION_TYPE)
+ || TREE_CODE (parent) == QUAL_UNION_TYPE)
{
tree field;
/* Search all of the variants in the union to see if one of them
@@ -551,9 +551,9 @@ parent_type_p (tree parent, tree child)
tree field_type;
if (TREE_CODE (field) != FIELD_DECL)
continue;
-
+
field_type = TREE_TYPE (field);
- if (field_type == child)
+ if (field_type == child)
return true;
}
@@ -566,16 +566,16 @@ parent_type_p (tree parent, tree child)
tree field_type;
if (TREE_CODE (field) != FIELD_DECL)
continue;
-
+
field_type = TREE_TYPE (field);
- if (TREE_CODE (field_type) == RECORD_TYPE
- || TREE_CODE (field_type) == QUAL_UNION_TYPE
+ if (TREE_CODE (field_type) == RECORD_TYPE
+ || TREE_CODE (field_type) == QUAL_UNION_TYPE
|| TREE_CODE (field_type) == UNION_TYPE)
- if (parent_type_p (field_type, child))
+ if (parent_type_p (field_type, child))
return true;
}
}
-
+
if (TREE_CODE (parent) == RECORD_TYPE)
{
tree field;
@@ -586,19 +586,19 @@ parent_type_p (tree parent, tree child)
tree field_type;
if (TREE_CODE (field) != FIELD_DECL)
continue;
-
+
field_type = TREE_TYPE (field);
- if (field_type == child)
+ if (field_type == child)
return true;
/* You can only cast to the first field so if it does not
match, quit. */
- if (TREE_CODE (field_type) == RECORD_TYPE
- || TREE_CODE (field_type) == QUAL_UNION_TYPE
+ if (TREE_CODE (field_type) == RECORD_TYPE
+ || TREE_CODE (field_type) == QUAL_UNION_TYPE
|| TREE_CODE (field_type) == UNION_TYPE)
{
- if (parent_type_p (field_type, child))
+ if (parent_type_p (field_type, child))
return true;
- else
+ else
break;
}
}
@@ -609,7 +609,7 @@ parent_type_p (tree parent, tree child)
/* Return the number of pointer tos for TYPE and return TYPE with all
of these stripped off. */
-static int
+static int
count_stars (tree* type_ptr)
{
tree type = *type_ptr;
@@ -646,7 +646,7 @@ check_cast_type (tree to_type, tree from_type)
{
int to_stars = count_stars (&to_type);
int from_stars = count_stars (&from_type);
- if (to_stars != from_stars)
+ if (to_stars != from_stars)
return CT_SIDEWAYS;
if (to_type == from_type)
@@ -655,9 +655,9 @@ check_cast_type (tree to_type, tree from_type)
if (parent_type_p (to_type, from_type)) return CT_UP;
if (parent_type_p (from_type, to_type)) return CT_DOWN;
return CT_SIDEWAYS;
-}
+}
-/* This function returns nonzero if VAR is result of call
+/* This function returns nonzero if VAR is result of call
to malloc function. */
static bool
@@ -667,12 +667,12 @@ is_malloc_result (tree var)
if (!var)
return false;
-
+
if (SSA_NAME_IS_DEFAULT_DEF (var))
return false;
def_stmt = SSA_NAME_DEF_STMT (var);
-
+
if (!is_gimple_call (def_stmt))
return false;
@@ -685,9 +685,9 @@ is_malloc_result (tree var)
/* Check a cast FROM this variable, TO_TYPE. Mark the escaping types
if appropriate. Returns cast_type as detected. */
-
+
static enum cast_type
-check_cast (tree to_type, tree from)
+check_cast (tree to_type, tree from)
{
tree from_type = get_canon_type (TREE_TYPE (from), false, false);
bool to_interesting_type, from_interesting_type;
@@ -697,12 +697,12 @@ check_cast (tree to_type, tree from)
if (!from_type || !to_type || from_type == to_type)
return cast;
- to_interesting_type =
+ to_interesting_type =
ipa_type_escape_star_count_of_interesting_type (to_type) >= 0;
- from_interesting_type =
+ from_interesting_type =
ipa_type_escape_star_count_of_interesting_type (from_type) >= 0;
- if (to_interesting_type)
+ if (to_interesting_type)
if (from_interesting_type)
{
/* Both types are interesting. This can be one of four types
@@ -712,7 +712,7 @@ check_cast (tree to_type, tree from)
interesting here because if type is marked as escaping, all
of its subtypes escape. */
cast = check_cast_type (to_type, from_type);
- switch (cast)
+ switch (cast)
{
case CT_UP:
case CT_USELESS:
@@ -731,18 +731,18 @@ check_cast (tree to_type, tree from)
else
{
/* This code excludes two cases from marking as escaped:
-
+
1. if this is a cast of index of array of structures/unions
- that happens before accessing array element, we should not
+ that happens before accessing array element, we should not
mark it as escaped.
2. if this is a cast from the local that is a result from a
- call to malloc, do not mark the cast as bad.
+ call to malloc, do not mark the cast as bad.
*/
-
+
if (POINTER_TYPE_P (to_type) && !POINTER_TYPE_P (from_type))
cast = CT_FROM_NON_P;
- else if (TREE_CODE (from) == SSA_NAME
+ else if (TREE_CODE (from) == SSA_NAME
&& is_malloc_result (from))
cast = CT_FROM_MALLOC;
else
@@ -786,16 +786,16 @@ look_for_casts_stmt (gimple s)
cast = CT_NO_CAST;
return cast;
-}
+}
-typedef struct cast
+typedef struct cast
{
int type;
gimple stmt;
} cast_t;
-/* This function is a callback for walk_use_def_chains function called
+/* This function is a callback for walk_use_def_chains function called
from is_array_access_through_pointer_and_index. */
static bool
@@ -803,7 +803,7 @@ is_cast_from_non_pointer (tree var, gimple def_stmt, void *data)
{
if (!def_stmt || !var)
return false;
-
+
if (gimple_code (def_stmt) == GIMPLE_PHI)
return false;
@@ -812,13 +812,13 @@ is_cast_from_non_pointer (tree var, gimple def_stmt, void *data)
if (is_gimple_assign (def_stmt))
{
- use_operand_p use_p;
+ use_operand_p use_p;
ssa_op_iter iter;
unsigned int cast = look_for_casts_stmt (def_stmt);
/* Check that only one cast happened, and it's of non-pointer
type. */
- if ((cast & CT_FROM_NON_P) == (CT_FROM_NON_P)
+ if ((cast & CT_FROM_NON_P) == (CT_FROM_NON_P)
&& (cast & ~(CT_FROM_NON_P)) == 0)
{
((cast_t *)data)->stmt = def_stmt;
@@ -845,20 +845,20 @@ is_cast_from_non_pointer (tree var, gimple def_stmt, void *data)
is_cast_from_non_pointer, data, false);
if (((cast_t*)data)->type == -1)
break;
- }
+ }
}
/* The cast is harmful. */
else
((cast_t *)data)->type = -1;
- }
+ }
if (((cast_t*)data)->type == -1)
return true;
-
+
return false;
}
-/* When array element a_p[i] is accessed through the pointer a_p
+/* When array element a_p[i] is accessed through the pointer a_p
and index i, it's translated into the following sequence
in gimple:
@@ -868,41 +868,41 @@ is_cast_from_non_pointer (tree var, gimple def_stmt, void *data)
a_p.2_8 = a_p;
D.1608_9 = D.1606_7 + a_p.2_8;
- OP0 and OP1 are of the same pointer types and stand for
+ OP0 and OP1 are of the same pointer types and stand for
D.1606_7 and a_p.2_8 or vise versa.
This function checks that:
- 1. one of OP0 and OP1 (D.1606_7) has passed only one cast from
+ 1. one of OP0 and OP1 (D.1606_7) has passed only one cast from
non-pointer type (D.1606_7 = (struct str_t *) D.1605_6;).
- 2. one of OP0 and OP1 which has passed the cast from
- non-pointer type (D.1606_7), is actually generated by multiplication of
+ 2. one of OP0 and OP1 which has passed the cast from
+ non-pointer type (D.1606_7), is actually generated by multiplication of
index by size of type to which both OP0 and OP1 point to
(in this case D.1605_6 = i.1_5 * 16; ).
- 3. an address of def of the var to which was made cast (D.1605_6)
+ 3. an address of def of the var to which was made cast (D.1605_6)
was not taken.(How can it happen?)
The following items are checked implicitly by the end of algorithm:
- 4. one of OP0 and OP1 (a_p.2_8) have never been cast
- (because if it was cast to pointer type, its type, that is also
- the type of OP0 and OP1, will be marked as escaped during
- analysis of casting stmt (when check_cast() is called
- from scan_for_refs for this stmt)).
+ 4. one of OP0 and OP1 (a_p.2_8) have never been cast
+ (because if it was cast to pointer type, its type, that is also
+ the type of OP0 and OP1, will be marked as escaped during
+ analysis of casting stmt (when check_cast() is called
+ from scan_for_refs for this stmt)).
5. defs of OP0 and OP1 are not passed into externally visible function
(because if they are passed then their type, that is also the type of OP0
- and OP1, will be marked and escaped during check_call function called from
+ and OP1, will be marked and escaped during check_call function called from
scan_for_refs with call stmt).
- In total, 1-5 guaranty that it's an access to array by pointer and index.
+ In total, 1-5 guaranty that it's an access to array by pointer and index.
*/
bool
-is_array_access_through_pointer_and_index (enum tree_code code, tree op0,
+is_array_access_through_pointer_and_index (enum tree_code code, tree op0,
tree op1, tree *base, tree *offset,
gimple *offset_cast_stmt)
{
@@ -945,7 +945,7 @@ is_array_access_through_pointer_and_index (enum tree_code code, tree op0,
false);
pointer_set_destroy (visited_stmts);
- visited_stmts = pointer_set_create ();
+ visited_stmts = pointer_set_create ();
walk_use_def_chains (op1, is_cast_from_non_pointer,(void *)(&op1_cast),
false);
pointer_set_destroy (visited_stmts);
@@ -959,15 +959,15 @@ is_array_access_through_pointer_and_index (enum tree_code code, tree op0,
else if (op0_cast.type == 0 && op1_cast.type == 1)
{
*base = op0;
- *offset = op1;
+ *offset = op1;
*offset_cast_stmt = op1_cast.stmt;
}
else
return false;
}
-
- /* Check 2.
- offset_cast_stmt is of the form:
+
+ /* Check 2.
+ offset_cast_stmt is of the form:
D.1606_7 = (struct str_t *) D.1605_6; */
if (*offset_cast_stmt)
@@ -975,10 +975,10 @@ is_array_access_through_pointer_and_index (enum tree_code code, tree op0,
before_cast = SINGLE_SSA_TREE_OPERAND (*offset_cast_stmt, SSA_OP_USE);
if (!before_cast)
return false;
-
+
if (SSA_NAME_IS_DEFAULT_DEF (before_cast))
return false;
-
+
before_cast_def_stmt = SSA_NAME_DEF_STMT (before_cast);
if (!before_cast_def_stmt)
return false;
@@ -988,7 +988,7 @@ is_array_access_through_pointer_and_index (enum tree_code code, tree op0,
/* before_cast_def_stmt should be of the form:
D.1605_6 = i.1_5 * 16; */
-
+
if (is_gimple_assign (before_cast_def_stmt))
{
/* We expect temporary here. */
@@ -999,14 +999,14 @@ is_array_access_through_pointer_and_index (enum tree_code code, tree op0,
{
tree arg0 = gimple_assign_rhs1 (before_cast_def_stmt);
tree arg1 = gimple_assign_rhs2 (before_cast_def_stmt);
- tree unit_size =
+ tree unit_size =
TYPE_SIZE_UNIT (TREE_TYPE (TYPE_MAIN_VARIANT (TREE_TYPE (op0))));
- if (!(CONSTANT_CLASS_P (arg0)
+ if (!(CONSTANT_CLASS_P (arg0)
&& simple_cst_equal (arg0, unit_size))
- && !(CONSTANT_CLASS_P (arg1)
+ && !(CONSTANT_CLASS_P (arg1)
&& simple_cst_equal (arg1, unit_size)))
- return false;
+ return false;
}
else
return false;
@@ -1024,11 +1024,11 @@ is_array_access_through_pointer_and_index (enum tree_code code, tree op0,
/* Register the parameter and return types of function FN. The type
ESCAPES if the function is visible outside of the compilation
unit. */
-static void
-check_function_parameter_and_return_types (tree fn, bool escapes)
+static void
+check_function_parameter_and_return_types (tree fn, bool escapes)
{
tree arg;
-
+
if (TYPE_ARG_TYPES (TREE_TYPE (fn)))
{
for (arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
@@ -1057,7 +1057,7 @@ check_function_parameter_and_return_types (tree fn, bool escapes)
if (escapes)
{
tree type = get_canon_type (TREE_TYPE (TREE_TYPE (fn)), false, false);
- mark_interesting_type (type, EXPOSED_PARAMETER);
+ mark_interesting_type (type, EXPOSED_PARAMETER);
}
}
@@ -1080,7 +1080,7 @@ has_proper_scope_for_analysis (tree t)
/* Do not want to do anything with volatile except mark any
function that uses one to be not const or pure. */
- if (TREE_THIS_VOLATILE (t))
+ if (TREE_THIS_VOLATILE (t))
return;
/* Do not care about a local automatic that is not static. */
@@ -1093,12 +1093,12 @@ has_proper_scope_for_analysis (tree t)
constant, we can allow this variable in pure or const
functions but the scope is too large for our analysis to set
these bits ourselves. */
-
+
if (TREE_READONLY (t)
&& DECL_INITIAL (t)
&& is_gimple_min_invariant (DECL_INITIAL (t)))
; /* Read of a constant, do not change the function state. */
- else
+ else
{
/* The type escapes for all public and externs. */
mark_interesting_type (type, FULL_ESCAPE);
@@ -1120,7 +1120,7 @@ check_operand (tree t)
check_function_parameter_and_return_types (t, true);
else if (TREE_CODE (t) == VAR_DECL)
- has_proper_scope_for_analysis (t);
+ has_proper_scope_for_analysis (t);
}
/* Examine tree T for references. */
@@ -1156,11 +1156,11 @@ mark_interesting_addressof (tree to_type, tree from_type)
int from_uid;
int to_uid;
bitmap type_map;
- splay_tree_node result;
+ splay_tree_node result;
from_type = get_canon_type (from_type, false, false);
to_type = get_canon_type (to_type, false, false);
-
+
if (!from_type || !to_type)
return;
@@ -1168,41 +1168,41 @@ mark_interesting_addressof (tree to_type, tree from_type)
to_uid = TYPE_UID (to_type);
gcc_assert (ipa_type_escape_star_count_of_interesting_type (from_type) == 0);
-
+
/* Process the Y into X map pointer. */
- result = splay_tree_lookup (uid_to_addressof_down_map,
+ result = splay_tree_lookup (uid_to_addressof_down_map,
(splay_tree_key) from_uid);
-
- if (result)
- type_map = (bitmap) result->value;
- else
+
+ if (result)
+ type_map = (bitmap) result->value;
+ else
{
type_map = BITMAP_ALLOC (&ipa_obstack);
splay_tree_insert (uid_to_addressof_down_map,
- from_uid,
+ from_uid,
(splay_tree_value)type_map);
}
bitmap_set_bit (type_map, TYPE_UID (to_type));
-
+
/* Process the X into Y reverse map pointer. */
- result =
+ result =
splay_tree_lookup (uid_to_addressof_up_map, (splay_tree_key) to_uid);
-
- if (result)
- type_map = (bitmap) result->value;
- else
+
+ if (result)
+ type_map = (bitmap) result->value;
+ else
{
type_map = BITMAP_ALLOC (&ipa_obstack);
splay_tree_insert (uid_to_addressof_up_map,
- to_uid,
+ to_uid,
(splay_tree_value)type_map);
}
- bitmap_set_bit (type_map, TYPE_UID (from_type));
+ bitmap_set_bit (type_map, TYPE_UID (from_type));
}
/* Scan tree T to see if there are any addresses taken in within T. */
-static void
+static void
look_for_address_of (tree t)
{
if (TREE_CODE (t) == ADDR_EXPR)
@@ -1211,14 +1211,14 @@ look_for_address_of (tree t)
tree cref = TREE_OPERAND (t, 0);
/* If we have an expression of the form "&a.b.c.d", mark a.b,
- b.c and c.d. as having its address taken. */
+ b.c and c.d. as having its address taken. */
tree fielddecl = NULL_TREE;
while (cref!= x)
{
if (TREE_CODE (cref) == COMPONENT_REF)
{
fielddecl = TREE_OPERAND (cref, 1);
- mark_interesting_addressof (TREE_TYPE (fielddecl),
+ mark_interesting_addressof (TREE_TYPE (fielddecl),
DECL_FIELD_CONTEXT (fielddecl));
}
else if (TREE_CODE (cref) == ARRAY_REF)
@@ -1227,7 +1227,7 @@ look_for_address_of (tree t)
cref = TREE_OPERAND (cref, 0);
}
- if (TREE_CODE (x) == VAR_DECL)
+ if (TREE_CODE (x) == VAR_DECL)
has_proper_scope_for_analysis (x);
}
}
@@ -1235,7 +1235,7 @@ look_for_address_of (tree t)
/* Scan tree T to see if there are any casts within it. */
-static unsigned int
+static unsigned int
look_for_casts (tree t)
{
unsigned int cast = 0;
@@ -1245,7 +1245,7 @@ look_for_casts (tree t)
tree castfromvar = TREE_OPERAND (t, 0);
cast = cast | check_cast (TREE_TYPE (t), castfromvar);
}
- else
+ else
while (handled_component_p (t))
{
t = TREE_OPERAND (t, 0);
@@ -1264,7 +1264,7 @@ look_for_casts (tree t)
if (!cast)
cast = CT_NO_CAST;
return cast;
-}
+}
/* Check to see if T is a read or address of operation on a static var
we are interested in analyzing. */
@@ -1302,7 +1302,7 @@ check_asm (gimple stmt)
for (i = 0; i < gimple_asm_ninputs (stmt); i++)
check_rhs_var (gimple_asm_input_op (stmt, i));
-
+
/* There is no code here to check for asm memory clobbers. The
casual maintainer might think that such code would be necessary,
but that appears to be wrong. In other parts of the compiler,
@@ -1327,14 +1327,14 @@ check_call (gimple call)
for (i = 0; i < gimple_call_num_args (call); i++)
check_rhs_var (gimple_call_arg (call, i));
-
+
if (callee_t)
{
tree arg_type;
tree last_arg_type = NULL;
callee = cgraph_node(callee_t);
avail = cgraph_function_body_availability (callee);
-
+
/* Check that there are no implicit casts in the passing of
parameters. */
if (TYPE_ARG_TYPES (TREE_TYPE (callee_t)))
@@ -1349,15 +1349,15 @@ check_call (gimple call)
last_arg_type = TREE_VALUE(arg_type);
check_cast (last_arg_type, operand);
}
- else
+ else
/* The code reaches here for some unfortunate
builtin functions that do not have a list of
argument types. */
- break;
+ break;
}
- }
- else
- {
+ }
+ else
+ {
/* FIXME - According to Geoff Keating, we should never
have to do this; the front ends should always process
the arg list from the TYPE_ARG_LIST. */
@@ -1370,15 +1370,15 @@ check_call (gimple call)
{
last_arg_type = TREE_TYPE (arg_type);
check_cast (last_arg_type, operand);
- }
- else
+ }
+ else
/* The code reaches here for some unfortunate
builtin functions that do not have a list of
argument types. */
- break;
+ break;
}
}
-
+
/* In the case where we have a var_args function, we need to
check the remaining parameters against the last argument. */
arg_type = last_arg_type;
@@ -1387,7 +1387,7 @@ check_call (gimple call)
tree operand = gimple_call_arg (call, i);
if (arg_type)
check_cast (arg_type, operand);
- else
+ else
{
/* The code reaches here for some unfortunate
builtin functions that do not have a list of
@@ -1415,10 +1415,10 @@ check_call (gimple call)
tree type = get_canon_type (TREE_TYPE (operand), false, false);
mark_interesting_type (type, EXPOSED_PARAMETER);
}
-
- if (callee_t)
+
+ if (callee_t)
{
- tree type =
+ tree type =
get_canon_type (TREE_TYPE (TREE_TYPE (callee_t)), false, false);
mark_interesting_type (type, EXPOSED_PARAMETER);
}
@@ -1427,7 +1427,7 @@ check_call (gimple call)
/* CODE is the operation on OP0 and OP1. OP0 is the operand that we
*know* is a pointer type. OP1 may be a pointer type. */
-static bool
+static bool
okay_pointer_operation (enum tree_code code, tree op0, tree op1)
{
tree op0type = TYPE_MAIN_VARIANT (TREE_TYPE (op0));
@@ -1446,28 +1446,28 @@ okay_pointer_operation (enum tree_code code, tree op0, tree op1)
gimple offset_cast_stmt;
if (POINTER_TYPE_P (op0type)
- && TREE_CODE (op0) == SSA_NAME
- && TREE_CODE (op1) == SSA_NAME
- && is_array_access_through_pointer_and_index (code, op0, op1,
- &base,
- &offset,
+ && TREE_CODE (op0) == SSA_NAME
+ && TREE_CODE (op1) == SSA_NAME
+ && is_array_access_through_pointer_and_index (code, op0, op1,
+ &base,
+ &offset,
&offset_cast_stmt))
return true;
else
{
tree size_of_op0_points_to = TYPE_SIZE_UNIT (TREE_TYPE (op0type));
-
+
if (CONSTANT_CLASS_P (op1)
&& size_of_op0_points_to
- && multiple_of_p (TREE_TYPE (size_of_op0_points_to),
+ && multiple_of_p (TREE_TYPE (size_of_op0_points_to),
op1, size_of_op0_points_to))
return true;
- if (CONSTANT_CLASS_P (op0)
+ if (CONSTANT_CLASS_P (op0)
&& size_of_op0_points_to
- && multiple_of_p (TREE_TYPE (size_of_op0_points_to),
+ && multiple_of_p (TREE_TYPE (size_of_op0_points_to),
op0, size_of_op0_points_to))
- return true;
+ return true;
}
}
break;
@@ -1493,7 +1493,7 @@ check_assign (gimple t)
/* Next check the operands on the rhs to see if they are ok. */
switch (TREE_CODE_CLASS (gimple_assign_rhs_code (t)))
{
- case tcc_binary:
+ case tcc_binary:
{
tree op0 = gimple_assign_rhs1 (t);
tree type0 = get_canon_type (TREE_TYPE (op0), false, false);
@@ -1574,22 +1574,22 @@ check_assign (gimple t)
static void
scan_for_refs (gimple t)
{
- switch (gimple_code (t))
+ switch (gimple_code (t))
{
case GIMPLE_ASSIGN:
check_assign (t);
break;
- case GIMPLE_CALL:
+ case GIMPLE_CALL:
/* If this is a call to malloc, squirrel away the result so we
do mark the resulting cast as being bad. */
check_call (t);
break;
-
+
case GIMPLE_ASM:
check_asm (t);
break;
-
+
default:
break;
}
@@ -1600,8 +1600,8 @@ scan_for_refs (gimple t)
/* The init routine for analyzing global static variable usage. See
comments at top for description. */
-static void
-ipa_init (void)
+static void
+ipa_init (void)
{
bitmap_obstack_initialize (&ipa_obstack);
global_types_exposed_parameter = BITMAP_ALLOC (&ipa_obstack);
@@ -1629,7 +1629,7 @@ ipa_init (void)
compilation unit but their right hand sides may contain references
to variables defined within this unit. */
-static void
+static void
analyze_variable (struct varpool_node *vnode)
{
tree global = vnode->decl;
@@ -1654,11 +1654,11 @@ static void
analyze_function (struct cgraph_node *fn)
{
tree decl = fn->decl;
- check_function_parameter_and_return_types (decl,
+ check_function_parameter_and_return_types (decl,
fn->local.externally_visible);
if (dump_file)
fprintf (dump_file, "\n local analysis of %s", cgraph_node_name (fn));
-
+
{
struct function *this_cfun = DECL_STRUCT_FUNCTION (decl);
basic_block this_block;
@@ -1680,7 +1680,7 @@ analyze_function (struct cgraph_node *fn)
step = TREE_CHAIN (step))
{
tree var = TREE_VALUE (step);
- if (TREE_CODE (var) == VAR_DECL
+ if (TREE_CODE (var) == VAR_DECL
&& DECL_INITIAL (var)
&& !TREE_STATIC (var))
check_tree (DECL_INITIAL (var));
@@ -1695,31 +1695,31 @@ analyze_function (struct cgraph_node *fn)
static tree
type_for_uid (int uid)
{
- splay_tree_node result =
+ splay_tree_node result =
splay_tree_lookup (uid_to_canon_type, (splay_tree_key) uid);
-
+
if (result)
- return (tree) result->value;
+ return (tree) result->value;
else return NULL;
}
/* Return a bitmap with the subtypes of the type for UID. If it
does not exist, return either NULL or a new bitmap depending on the
- value of CREATE. */
+ value of CREATE. */
static bitmap
subtype_map_for_uid (int uid, bool create)
{
- splay_tree_node result = splay_tree_lookup (uid_to_subtype_map,
+ splay_tree_node result = splay_tree_lookup (uid_to_subtype_map,
(splay_tree_key) uid);
-
- if (result)
- return (bitmap) result->value;
+
+ if (result)
+ return (bitmap) result->value;
else if (create)
{
bitmap subtype_map = BITMAP_ALLOC (&ipa_obstack);
splay_tree_insert (uid_to_subtype_map,
- uid,
+ uid,
(splay_tree_value)subtype_map);
return subtype_map;
}
@@ -1751,17 +1751,17 @@ close_type_seen (tree type)
/* If we are doing a language with a type hierarchy, mark all of
the superclasses. */
- if (TYPE_BINFO (type))
+ if (TYPE_BINFO (type))
for (binfo = TYPE_BINFO (type), i = 0;
BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
{
tree binfo_type = BINFO_TYPE (base_binfo);
- bitmap subtype_map = subtype_map_for_uid
+ bitmap subtype_map = subtype_map_for_uid
(TYPE_UID (TYPE_MAIN_VARIANT (binfo_type)), true);
bitmap_set_bit (subtype_map, uid);
close_type_seen (get_canon_type (binfo_type, true, true));
}
-
+
/* If the field is a struct or union type, mark all of the
subfields. */
for (field = TYPE_FIELDS (type);
@@ -1781,7 +1781,7 @@ close_type_seen (tree type)
/* Take a TYPE that has been passed by value to an external function
and mark all of the fields that have pointer types as escaping. For
any of the non pointer types that are structures or unions,
- recurse. TYPE is never a pointer type. */
+ recurse. TYPE is never a pointer type. */
static void
close_type_exposed_parameter (tree type)
@@ -1814,13 +1814,13 @@ close_type_exposed_parameter (tree type)
mark_interesting_type (field_type, EXPOSED_PARAMETER);
/* Only recurse for non pointer types of structures and unions. */
- if (ipa_type_escape_star_count_of_interesting_type (field_type) == 0)
+ if (ipa_type_escape_star_count_of_interesting_type (field_type) == 0)
close_type_exposed_parameter (field_type);
}
}
/* The next function handles the case where a type fully escapes.
- This means that not only does the type itself escape,
+ This means that not only does the type itself escape,
a) the type of every field recursively escapes
b) the type of every subtype escapes as well as the super as well
@@ -1831,7 +1831,7 @@ close_type_exposed_parameter (tree type)
Take a TYPE that has had the address taken for an instance of it
and mark all of the types for its fields as having their addresses
- taken. */
+ taken. */
static void
close_type_full_escape (tree type)
@@ -1842,7 +1842,7 @@ close_type_full_escape (tree type)
tree binfo, base_binfo;
bitmap_iterator bi;
bitmap subtype_map;
- splay_tree_node address_result;
+ splay_tree_node address_result;
/* Strip off any pointer or array types. */
type = get_canon_type (type, true, true);
@@ -1858,7 +1858,7 @@ close_type_full_escape (tree type)
/* If we are doing a language with a type hierarchy, mark all of
the superclasses. */
- if (TYPE_BINFO (type))
+ if (TYPE_BINFO (type))
for (binfo = TYPE_BINFO (type), i = 0;
BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
{
@@ -1866,13 +1866,13 @@ close_type_full_escape (tree type)
binfotype = mark_type (binfotype, FULL_ESCAPE);
close_type_full_escape (binfotype);
}
-
+
/* Mark as escaped any types that have been down casted to
this type. */
if (subtype_map)
EXECUTE_IF_SET_IN_BITMAP (subtype_map, 0, i, bi)
{
- tree subtype = type_for_uid (i);
+ tree subtype = type_for_uid (i);
subtype = mark_type (subtype, FULL_ESCAPE);
close_type_full_escape (subtype);
}
@@ -1897,7 +1897,7 @@ close_type_full_escape (tree type)
/* For all of the types A that contain this type B and were part of
an expression like "&...A.B...", mark the A's as escaping. */
- address_result = splay_tree_lookup (uid_to_addressof_up_map,
+ address_result = splay_tree_lookup (uid_to_addressof_up_map,
(splay_tree_key) uid);
if (address_result)
{
@@ -1911,21 +1911,21 @@ close_type_full_escape (tree type)
/* Transitively close the addressof bitmap for the type with UID.
This means that if we had a.b and b.c, a would have both b and c in
- its maps. */
+ its maps. */
static bitmap
-close_addressof_down (int uid)
+close_addressof_down (int uid)
{
bitmap_iterator bi;
- splay_tree_node result =
+ splay_tree_node result =
splay_tree_lookup (uid_to_addressof_down_map, (splay_tree_key) uid);
bitmap map = NULL;
bitmap new_map;
unsigned int i;
-
- if (result)
+
+ if (result)
map = (bitmap) result->value;
- else
+ else
return NULL;
if (bitmap_bit_p (been_there_done_that, uid))
@@ -1949,9 +1949,9 @@ close_addressof_down (int uid)
{
bitmap submap = close_addressof_down (i);
bitmap_set_bit (new_map, i);
- if (submap)
+ if (submap)
bitmap_ior_into (new_map, submap);
- }
+ }
result->value = (splay_tree_value) new_map;
BITMAP_FREE (map);
@@ -1981,7 +1981,7 @@ type_escape_execute (void)
We do not want to process any of the clones so we check that this
is a master clone. However, we do need to process any
AVAIL_OVERWRITABLE functions (these are never clones) because
- they may cause a type variable to escape.
+ they may cause a type variable to escape.
*/
for (node = cgraph_nodes; node; node = node->next)
if (node->analyzed)
@@ -2032,7 +2032,7 @@ type_escape_execute (void)
contained an entry for Y if there had been an operation of the
form &X.Y. This step adds all of the fields contained within Y
(recursively) to X's map. */
-
+
result = splay_tree_min (uid_to_addressof_down_map);
while (result)
{
@@ -2050,7 +2050,7 @@ type_escape_execute (void)
{
tree type = (tree) result->value;
tree key = (tree) result->key;
- if (POINTER_TYPE_P (type)
+ if (POINTER_TYPE_P (type)
|| TREE_CODE (type) == ARRAY_TYPE)
{
splay_tree_remove (all_canon_types, (splay_tree_key) result->key);
@@ -2062,7 +2062,7 @@ type_escape_execute (void)
}
if (dump_file)
- {
+ {
EXECUTE_IF_SET_IN_BITMAP (global_types_seen, 0, i, bi)
{
/* The pointer types are in the global_types_full_escape
@@ -2073,7 +2073,7 @@ type_escape_execute (void)
print_generic_expr (dump_file, type, 0);
if (bitmap_bit_p (global_types_full_escape, i))
fprintf(dump_file, " escaped\n");
- else
+ else
fprintf(dump_file, " contained\n");
}
}
diff --git a/gcc/ipa-type-escape.h b/gcc/ipa-type-escape.h
index 1d0172ff028..0cb9a248bc3 100644
--- a/gcc/ipa-type-escape.h
+++ b/gcc/ipa-type-escape.h
@@ -24,7 +24,7 @@ along with GCC; see the file COPYING3. If not see
bool ipa_type_escape_type_contained_p (tree type);
bool ipa_type_escape_field_does_not_clobber_p (tree, tree);
-int ipa_type_escape_star_count_of_interesting_type (tree type);
+int ipa_type_escape_star_count_of_interesting_type (tree type);
int ipa_type_escape_star_count_of_interesting_or_array_type (tree type);
bool is_array_access_through_pointer_and_index (enum tree_code, tree, tree,
tree *, tree *, gimple *);
diff --git a/gcc/ipa-utils.c b/gcc/ipa-utils.c
index 0b7ec66ab2c..4b88f599d53 100644
--- a/gcc/ipa-utils.c
+++ b/gcc/ipa-utils.c
@@ -44,15 +44,15 @@ along with GCC; see the file COPYING3. If not see
that is printed before the nodes are printed. ORDER is an array of
cgraph_nodes that has COUNT useful nodes in it. */
-void
-ipa_utils_print_order (FILE* out,
- const char * note,
- struct cgraph_node** order,
- int count)
+void
+ipa_utils_print_order (FILE* out,
+ const char * note,
+ struct cgraph_node** order,
+ int count)
{
int i;
fprintf (out, "\n\n ordered call graph: %s\n", note);
-
+
for (i = count - 1; i >= 0; i--)
dump_cgraph_node(dump_file, order[i]);
fprintf (out, "\n");
@@ -86,17 +86,17 @@ searchc (struct searchc_env* env, struct cgraph_node *v,
{
struct cgraph_edge *edge;
struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
-
+
/* mark node as old */
v_info->new_node = false;
splay_tree_remove (env->nodes_marked_new, v->uid);
-
+
v_info->dfn_number = env->count;
v_info->low_link = env->count;
env->count++;
env->stack[(env->stack_size)++] = v;
v_info->on_stack = true;
-
+
for (edge = v->callees; edge; edge = edge->next_callee)
{
struct ipa_dfs_info * w_info;
@@ -108,16 +108,16 @@ searchc (struct searchc_env* env, struct cgraph_node *v,
if (w->aux && cgraph_function_body_availability (edge->callee) > AVAIL_OVERWRITABLE)
{
w_info = (struct ipa_dfs_info *) w->aux;
- if (w_info->new_node)
+ if (w_info->new_node)
{
searchc (env, w, ignore_edge);
v_info->low_link =
(v_info->low_link < w_info->low_link) ?
v_info->low_link : w_info->low_link;
- }
- else
- if ((w_info->dfn_number < v_info->dfn_number)
- && (w_info->on_stack))
+ }
+ else
+ if ((w_info->dfn_number < v_info->dfn_number)
+ && (w_info->on_stack))
v_info->low_link =
(w_info->dfn_number < v_info->low_link) ?
w_info->dfn_number : v_info->low_link;
@@ -125,7 +125,7 @@ searchc (struct searchc_env* env, struct cgraph_node *v,
}
- if (v_info->low_link == v_info->dfn_number)
+ if (v_info->low_link == v_info->dfn_number)
{
struct cgraph_node *last = NULL;
struct cgraph_node *x;
@@ -134,17 +134,17 @@ searchc (struct searchc_env* env, struct cgraph_node *v,
x = env->stack[--(env->stack_size)];
x_info = (struct ipa_dfs_info *) x->aux;
x_info->on_stack = false;
-
- if (env->reduce)
+
+ if (env->reduce)
{
x_info->next_cycle = last;
last = x;
- }
- else
+ }
+ else
env->result[env->order_pos++] = x;
- }
+ }
while (v != x);
- if (env->reduce)
+ if (env->reduce)
env->result[env->order_pos++] = v;
}
}
@@ -155,7 +155,7 @@ searchc (struct searchc_env* env, struct cgraph_node *v,
nodes. Only consider nodes that have the output bit set. */
int
-ipa_utils_reduced_inorder (struct cgraph_node **order,
+ipa_utils_reduced_inorder (struct cgraph_node **order,
bool reduce, bool allow_overwritable,
bool (*ignore_edge) (struct cgraph_edge *))
{
@@ -169,13 +169,13 @@ ipa_utils_reduced_inorder (struct cgraph_node **order,
env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
env.count = 1;
env.reduce = reduce;
-
- for (node = cgraph_nodes; node; node = node->next)
+
+ for (node = cgraph_nodes; node; node = node->next)
{
enum availability avail = cgraph_function_body_availability (node);
if (avail > AVAIL_OVERWRITABLE
- || (allow_overwritable
+ || (allow_overwritable
&& (avail == AVAIL_OVERWRITABLE)))
{
/* Reuse the info if it is already there. */
@@ -186,12 +186,12 @@ ipa_utils_reduced_inorder (struct cgraph_node **order,
info->on_stack = false;
info->next_cycle = NULL;
node->aux = info;
-
+
splay_tree_insert (env.nodes_marked_new,
- (splay_tree_key)node->uid,
+ (splay_tree_key)node->uid,
(splay_tree_value)node);
- }
- else
+ }
+ else
node->aux = NULL;
}
result = splay_tree_min (env.nodes_marked_new);
@@ -215,7 +215,7 @@ ipa_utils_reduced_inorder (struct cgraph_node **order,
tree
get_base_var (tree t)
{
- while (!SSA_VAR_P (t)
+ while (!SSA_VAR_P (t)
&& (!CONSTANT_CLASS_P (t))
&& TREE_CODE (t) != LABEL_DECL
&& TREE_CODE (t) != FUNCTION_DECL
@@ -225,5 +225,5 @@ get_base_var (tree t)
t = TREE_OPERAND (t, 0);
}
return t;
-}
+}
diff --git a/gcc/ipa-utils.h b/gcc/ipa-utils.h
index e70a01688e2..fa18a4f0b98 100644
--- a/gcc/ipa-utils.h
+++ b/gcc/ipa-utils.h
@@ -40,7 +40,7 @@ int ipa_utils_reduced_inorder (struct cgraph_node **, bool, bool,
bool (*ignore_edge) (struct cgraph_edge *));
tree get_base_var (tree);
-
+
#endif /* GCC_IPA_UTILS_H */
diff --git a/gcc/ipa.c b/gcc/ipa.c
index 4d52ed404f3..1b68a7a4092 100644
--- a/gcc/ipa.c
+++ b/gcc/ipa.c
@@ -114,7 +114,7 @@ update_inlined_to_pointer (struct cgraph_node *node, struct cgraph_node *inlined
/* Perform reachability analysis and reclaim all unreachable nodes.
If BEFORE_INLINING_P is true this function is called before inlining
- decisions has been made. If BEFORE_INLINING_P is false this function also
+ decisions has been made. If BEFORE_INLINING_P is false this function also
removes unneeded bodies of extern inline functions. */
bool
@@ -136,7 +136,7 @@ cgraph_remove_unreachable_nodes (bool before_inlining_p, FILE *file)
#endif
for (node = cgraph_nodes; node; node = node->next)
if (!cgraph_can_remove_if_no_direct_calls_p (node)
- && ((!DECL_EXTERNAL (node->decl))
+ && ((!DECL_EXTERNAL (node->decl))
|| !node->analyzed
|| before_inlining_p))
{
@@ -404,7 +404,7 @@ local_function_and_variable_visibility (void)
return function_and_variable_visibility (flag_whole_program && !flag_lto && !flag_whopr);
}
-struct simple_ipa_opt_pass pass_ipa_function_and_variable_visibility =
+struct simple_ipa_opt_pass pass_ipa_function_and_variable_visibility =
{
{
SIMPLE_IPA_PASS,
@@ -588,7 +588,7 @@ cgraph_node_set_remove (cgraph_node_set set, struct cgraph_node *node)
VEC_replace (cgraph_node_ptr, set->nodes, last_element->index,
last_node);
}
-
+
/* Remove element from hash table. */
htab_clear_slot (set->hashtab, slot);
ggc_free (element);
diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index edb761b0d71..16909a9a42a 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -58,7 +58,7 @@ ira_loop_tree_node_t ira_bb_nodes;
array. */
ira_loop_tree_node_t ira_loop_nodes;
-/* Map regno -> allocnos with given regno (see comments for
+/* Map regno -> allocnos with given regno (see comments for
allocno member `next_regno_allocno'). */
ira_allocno_t *ira_regno_allocno_map;
@@ -848,7 +848,7 @@ ira_copy_allocno_live_range_list (allocno_live_range_t r)
/* Merge ranges R1 and R2 and returns the result. The function
maintains the order of ranges and tries to minimize number of the
result ranges. */
-allocno_live_range_t
+allocno_live_range_t
ira_merge_allocno_live_ranges (allocno_live_range_t r1,
allocno_live_range_t r2)
{
@@ -1379,7 +1379,7 @@ ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
if (preorder_func != NULL)
(*preorder_func) (loop_node);
-
+
if (bb_p)
for (subloop_node = loop_node->children;
subloop_node != NULL;
@@ -1388,11 +1388,11 @@ ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
{
if (preorder_func != NULL)
(*preorder_func) (subloop_node);
-
+
if (postorder_func != NULL)
(*postorder_func) (subloop_node);
}
-
+
for (subloop_node = loop_node->subloops;
subloop_node != NULL;
subloop_node = subloop_node->subloop_next)
@@ -1434,7 +1434,7 @@ create_insn_allocnos (rtx x, bool output_p)
if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
-
+
ALLOCNO_NREFS (a)++;
ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
if (output_p)
@@ -1458,7 +1458,7 @@ create_insn_allocnos (rtx x, bool output_p)
create_insn_allocnos (XEXP (x, 0), false);
return;
}
- else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
+ else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
{
create_insn_allocnos (XEXP (x, 0), true);
@@ -1549,7 +1549,7 @@ create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
if (e->src != loop_node->loop->latch)
create_loop_allocnos (e);
-
+
edges = get_loop_exit_edges (loop_node->loop);
for (i = 0; VEC_iterate (edge, edges, i, e); i++)
create_loop_allocnos (e);
@@ -1663,10 +1663,10 @@ low_pressure_loop_node_p (ira_loop_tree_node_t node)
{
int i;
enum reg_class cover_class;
-
+
if (node->bb != NULL)
return false;
-
+
for (i = 0; i < ira_reg_class_cover_size; i++)
{
cover_class = ira_reg_class_cover[i];
@@ -1873,7 +1873,7 @@ ira_rebuild_regno_allocno_list (int regno)
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
regno_allocnos[n++] = a;
ira_assert (n > 0);
- qsort (regno_allocnos, n, sizeof (ira_allocno_t),
+ qsort (regno_allocnos, n, sizeof (ira_allocno_t),
regno_allocno_order_compare_func);
for (i = 1; i < n; i++)
ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
@@ -2302,7 +2302,7 @@ setup_min_max_conflict_allocno_ids (void)
[first_not_finished]))
first_not_finished++;
min = first_not_finished;
- }
+ }
if (min == i)
/* We could increase min further in this case but it is good
enough. */
@@ -2512,7 +2512,7 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
continue;
}
ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
-
+
if (ALLOCNO_MEM_OPTIMIZED_DEST (a) != NULL)
mem_dest_p = true;
if (REGNO (ALLOCNO_REG (a)) == REGNO (ALLOCNO_REG (parent_a)))
@@ -2623,7 +2623,7 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
ira_add_allocno_conflict (a, live_a);
}
}
-
+
for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
sparseset_clear_bit (allocnos_live, ALLOCNO_NUM (r->allocno));
}
diff --git a/gcc/ira-color.c b/gcc/ira-color.c
index 743ec3cd835..87237b5c90a 100644
--- a/gcc/ira-color.c
+++ b/gcc/ira-color.c
@@ -627,7 +627,7 @@ assign_hard_reg (ira_allocno_t allocno, bool retry_p)
if (a == allocno)
break;
}
- qsort (sorted_allocnos, j, sizeof (ira_allocno_t),
+ qsort (sorted_allocnos, j, sizeof (ira_allocno_t),
allocno_cost_compare_func);
for (i = 0; i < j; i++)
{
@@ -865,7 +865,7 @@ push_allocno_to_stack (ira_allocno_t allocno)
ira_allocno_t a, conflict_allocno;
enum reg_class cover_class;
ira_allocno_conflict_iterator aci;
-
+
ALLOCNO_IN_GRAPH_P (allocno) = false;
VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, allocno);
cover_class = ALLOCNO_COVER_CLASS (allocno);
@@ -1005,7 +1005,7 @@ push_allocno_to_spill (ira_allocno_t allocno)
}
/* Return the frequency of exit edges (if EXIT_P) or entry from/to the
- loop given by its LOOP_NODE. */
+ loop given by its LOOP_NODE. */
int
ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
{
@@ -1084,7 +1084,7 @@ allocno_spill_priority_compare (splay_tree_key k1, splay_tree_key k2)
{
int pri1, pri2, diff;
ira_allocno_t a1 = (ira_allocno_t) k1, a2 = (ira_allocno_t) k2;
-
+
pri1 = (ALLOCNO_TEMP (a1)
/ (ALLOCNO_LEFT_CONFLICTS_SIZE (a1)
* ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a1)][ALLOCNO_MODE (a1)]
@@ -1273,7 +1273,7 @@ push_allocnos_to_stack (void)
&& (allocno_pri > i_allocno_pri
|| (allocno_pri == i_allocno_pri
&& (allocno_cost > i_allocno_cost
- || (allocno_cost == i_allocno_cost
+ || (allocno_cost == i_allocno_cost
&& (ALLOCNO_NUM (allocno)
> ALLOCNO_NUM (i_allocno))))))))
{
@@ -1452,7 +1452,7 @@ setup_allocno_left_conflicts_size (ira_allocno_t allocno)
int last = (hard_regno
+ hard_regno_nregs
[hard_regno][ALLOCNO_MODE (conflict_allocno)]);
-
+
while (hard_regno < last)
{
if (! TEST_HARD_REG_BIT (temp_set, hard_regno))
@@ -1895,7 +1895,7 @@ print_loop_title (ira_loop_tree_node_t loop_tree_node)
for (j = 0; (int) j < ira_reg_class_cover_size; j++)
{
enum reg_class cover_class;
-
+
cover_class = ira_reg_class_cover[j];
if (loop_tree_node->reg_pressure[cover_class] == 0)
continue;
@@ -2078,7 +2078,7 @@ do_coloring (void)
100);
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
-
+
ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
@@ -2682,7 +2682,7 @@ ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
reg_max_ref_width[ALLOCNO_REGNO (a)]));
-
+
if (a == allocno)
break;
}
@@ -2990,7 +2990,7 @@ ira_reuse_stack_slot (int regno, unsigned int inherent_size,
if (slot->width < total_size
|| GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
continue;
-
+
EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
FIRST_PSEUDO_REGISTER, i, bi)
{
@@ -3173,7 +3173,7 @@ ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
int call_used_count, other_call_used_count;
int hard_regno, other_hard_regno;
- cost = calculate_spill_cost (regnos, in, out, insn,
+ cost = calculate_spill_cost (regnos, in, out, insn,
&length, &nrefs, &call_used_count, &hard_regno);
other_cost = calculate_spill_cost (other_regnos, in, out, insn,
&other_length, &other_nrefs,
diff --git a/gcc/ira-conflicts.c b/gcc/ira-conflicts.c
index 6d84e5643b5..251b527661f 100644
--- a/gcc/ira-conflicts.c
+++ b/gcc/ira-conflicts.c
@@ -152,7 +152,7 @@ build_conflict_bit_table (void)
}
}
}
-
+
for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
sparseset_clear_bit (allocnos_live, ALLOCNO_NUM (r->allocno));
}
@@ -235,7 +235,7 @@ get_dup_num (int op_num, bool use_commut_op_p)
{
case 'X':
return -1;
-
+
case 'm':
case 'o':
/* Accept a register which might be placed in memory. */
@@ -254,7 +254,7 @@ get_dup_num (int op_num, bool use_commut_op_p)
case 'g':
return -1;
-
+
case 'r':
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
case 'h': case 'j': case 'k': case 'l':
@@ -276,7 +276,7 @@ get_dup_num (int op_num, bool use_commut_op_p)
#endif
break;
}
-
+
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
if (original != -1 && original != c)
@@ -389,7 +389,7 @@ process_regs_for_copy (rtx reg1, rtx reg2, bool constraint_p,
ira_curr_regno_allocno_map[REGNO (reg2)],
freq, constraint_p, insn,
ira_curr_loop_tree_node);
- bitmap_set_bit (ira_curr_loop_tree_node->local_copies, cp->num);
+ bitmap_set_bit (ira_curr_loop_tree_node->local_copies, cp->num);
return true;
}
else
@@ -446,11 +446,11 @@ process_reg_shuffles (rtx reg, int op_num, int freq)
for (i = 0; i < recog_data.n_operands; i++)
{
another_reg = recog_data.operand[i];
-
+
if (!REG_SUBREG_P (another_reg) || op_num == i
|| recog_data.operand_type[i] != OP_OUT)
continue;
-
+
process_regs_for_copy (reg, another_reg, false, NULL_RTX, freq);
}
}
@@ -465,7 +465,7 @@ add_insn_allocno_copies (rtx insn)
const char *str;
bool commut_p, bound_p;
int i, j, freq;
-
+
freq = REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn));
if (freq == 0)
freq = 1;
diff --git a/gcc/ira-costs.c b/gcc/ira-costs.c
index f74a2cac433..9e11219ce01 100644
--- a/gcc/ira-costs.c
+++ b/gcc/ira-costs.c
@@ -367,7 +367,7 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
continue;
}
}
-
+
/* Scan all the constraint letters. See if the operand
matches any of the constraints. Collect the valid
register classes and see if this operand accepts
@@ -651,7 +651,7 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
{
ira_allocno_t a;
rtx op = ops[i];
-
+
if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
continue;
a = ira_curr_regno_allocno_map [REGNO (op)];
@@ -703,7 +703,7 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
if (! TEST_HARD_REG_BIT (reg_class_contents[rclass],
regno + nr))
break;
-
+
if (nr == (unsigned) hard_regno_nregs[regno][mode])
op_costs[i]->cost[k] = -frequency;
}
@@ -1180,7 +1180,7 @@ find_costs_and_classes (FILE *dump_file)
{
ira_allocno_t a;
ira_allocno_iterator ai;
-
+
pref = pref_buffer;
FOR_EACH_ALLOCNO (a, ai)
pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
@@ -1375,7 +1375,7 @@ find_costs_and_classes (FILE *dump_file)
if (regno_cover_class[i] == NO_REGS)
best = NO_REGS;
else
- {
+ {
/* Finding best class which is subset of the common
class. */
best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
@@ -1435,7 +1435,7 @@ find_costs_and_classes (FILE *dump_file)
pref[a_num] = best;
}
}
-
+
if (internal_flag_ira_verbose > 4 && dump_file)
{
if (allocno_p)
diff --git a/gcc/ira-emit.c b/gcc/ira-emit.c
index 1d4d8ea224f..9d66bfe48ff 100644
--- a/gcc/ira-emit.c
+++ b/gcc/ira-emit.c
@@ -109,7 +109,7 @@ static void
free_move_list (move_t head)
{
move_t next;
-
+
for (; head != NULL; head = next)
{
next = head->next;
@@ -450,7 +450,7 @@ change_loop (ira_loop_tree_node_t node)
if (node != ira_loop_tree_root)
{
-
+
if (node->bb != NULL)
{
FOR_BB_INSNS (node->bb, insn)
@@ -461,12 +461,12 @@ change_loop (ira_loop_tree_node_t node)
}
return;
}
-
+
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file,
" Changing RTL for loop %d (header bb%d)\n",
node->loop->num, node->loop->header->index);
-
+
parent = ira_curr_loop_tree_node->parent;
map = parent->regno_allocno_map;
EXECUTE_IF_SET_IN_REG_SET (ira_curr_loop_tree_node->border_allocnos,
diff --git a/gcc/ira-int.h b/gcc/ira-int.h
index 1327f945149..a32c837ac0f 100644
--- a/gcc/ira-int.h
+++ b/gcc/ira-int.h
@@ -482,7 +482,7 @@ struct ira_allocno
#define ALLOCNO_MAX(A) ((A)->max)
#define ALLOCNO_CONFLICT_ID(A) ((A)->conflict_id)
-/* Map regno -> allocnos with given regno (see comments for
+/* Map regno -> allocnos with given regno (see comments for
allocno member `next_regno_allocno'). */
extern ira_allocno_t *ira_regno_allocno_map;
@@ -590,7 +590,7 @@ extern int ira_max_nregs;
} \
((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
|= ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
-
+
#define CLEAR_ALLOCNO_SET_BIT(R, I, MIN, MAX) __extension__ \
(({ int _min = (MIN), _max = (MAX), _i = (I); \
@@ -680,18 +680,18 @@ ira_allocno_set_iter_cond (ira_allocno_set_iterator *i, int *n)
{
i->word_num++;
i->bit_num = i->word_num * IRA_INT_BITS;
-
+
/* If we have reached the end, break. */
if (i->bit_num >= i->nel)
return false;
}
-
+
/* Skip bits that are zero. */
for (; (i->word & 1) == 0; i->word >>= 1)
i->bit_num++;
-
+
*n = (int) i->bit_num + i->start_val;
-
+
return true;
}
@@ -1086,20 +1086,20 @@ ira_allocno_conflict_iter_cond (ira_allocno_conflict_iterator *i,
for (; i->word == 0; i->word = ((IRA_INT_TYPE *) i->vec)[i->word_num])
{
i->word_num++;
-
+
/* If we have reached the end, break. */
if (i->word_num * sizeof (IRA_INT_TYPE) >= i->size)
return false;
-
+
i->bit_num = i->word_num * IRA_INT_BITS;
}
-
+
/* Skip bits that are zero. */
for (; (i->word & 1) == 0; i->word >>= 1)
i->bit_num++;
-
+
*a = ira_conflict_id_allocno_map[i->bit_num + i->base_conflict_id];
-
+
return true;
}
}
diff --git a/gcc/ira-lives.c b/gcc/ira-lives.c
index ea241f4732c..4302598233a 100644
--- a/gcc/ira-lives.c
+++ b/gcc/ira-lives.c
@@ -234,7 +234,7 @@ clear_allocno_live (ira_allocno_t a)
if (high_pressure_start_point[cl] >= 0
&& curr_reg_pressure[cl] <= ira_available_class_regs[cl])
high_pressure_start_point[cl] = -1;
-
+
}
}
sparseset_clear_bit (allocnos_live, ALLOCNO_NUM (a));
@@ -416,10 +416,10 @@ make_pseudo_conflict (rtx reg, enum reg_class cl, rtx dreg, bool advance_p)
if (GET_CODE (reg) == SUBREG)
reg = SUBREG_REG (reg);
-
+
if (! REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
return advance_p;
-
+
a = ira_curr_regno_allocno_map[REGNO (reg)];
if (! reg_classes_intersect_p (cl, ALLOCNO_COVER_CLASS (a)))
return advance_p;
@@ -447,7 +447,7 @@ check_and_make_def_use_conflict (rtx dreg, enum reg_class def_cl,
{
if (! reg_classes_intersect_p (def_cl, use_cl))
return advance_p;
-
+
advance_p = make_pseudo_conflict (recog_data.operand[use],
use_cl, dreg, advance_p);
/* Reload may end up swapping commutative operands, so you
@@ -480,41 +480,41 @@ check_and_make_def_conflict (int alt, int def, enum reg_class def_cl)
enum reg_class use_cl, acl;
bool advance_p;
rtx dreg = recog_data.operand[def];
-
+
if (def_cl == NO_REGS)
return;
-
+
if (GET_CODE (dreg) == SUBREG)
dreg = SUBREG_REG (dreg);
-
+
if (! REG_P (dreg) || REGNO (dreg) < FIRST_PSEUDO_REGISTER)
return;
-
+
a = ira_curr_regno_allocno_map[REGNO (dreg)];
acl = ALLOCNO_COVER_CLASS (a);
if (! reg_classes_intersect_p (acl, def_cl))
return;
-
+
advance_p = true;
-
+
for (use = 0; use < recog_data.n_operands; use++)
{
if (use == def || recog_data.operand_type[use] == OP_OUT)
continue;
-
+
if (recog_op_alt[use][alt].anything_ok)
use_cl = ALL_REGS;
else
use_cl = recog_op_alt[use][alt].cl;
-
+
advance_p = check_and_make_def_use_conflict (dreg, def_cl, use,
use_cl, advance_p);
-
+
if ((use_match = recog_op_alt[use][alt].matches) >= 0)
{
if (use_match == def)
continue;
-
+
if (recog_op_alt[use_match][alt].anything_ok)
use_cl = ALL_REGS;
else
@@ -573,7 +573,7 @@ mark_hard_reg_early_clobbers (rtx insn, bool live_p)
if (DF_REF_FLAGS_IS_SET (*def_rec, DF_REF_MUST_CLOBBER))
{
rtx dreg = DF_REF_REG (*def_rec);
-
+
if (GET_CODE (dreg) == SUBREG)
dreg = SUBREG_REG (dreg);
if (! REG_P (dreg) || REGNO (dreg) >= FIRST_PSEUDO_REGISTER)
@@ -581,7 +581,7 @@ mark_hard_reg_early_clobbers (rtx insn, bool live_p)
/* Hard register clobbers are believed to be early clobber
because there is no way to say that non-operand hard
- register clobbers are not early ones. */
+ register clobbers are not early ones. */
if (live_p)
mark_ref_live (*def_rec);
else
@@ -638,7 +638,7 @@ single_reg_class (const char *constraints, rtx op, rtx equiv_const)
&& GET_MODE (equiv_const) == VOIDmode))))
return NO_REGS;
break;
-
+
case 's':
if ((CONSTANT_P (op) && !CONST_INT_P (op)
&& (GET_CODE (op) != CONST_DOUBLE || GET_MODE (op) != VOIDmode))
@@ -649,7 +649,7 @@ single_reg_class (const char *constraints, rtx op, rtx equiv_const)
|| GET_MODE (equiv_const) != VOIDmode)))
return NO_REGS;
break;
-
+
case 'I':
case 'J':
case 'K':
@@ -666,7 +666,7 @@ single_reg_class (const char *constraints, rtx op, rtx equiv_const)
c, constraints)))
return NO_REGS;
break;
-
+
case 'E':
case 'F':
if (GET_CODE (op) == CONST_DOUBLE
@@ -679,7 +679,7 @@ single_reg_class (const char *constraints, rtx op, rtx equiv_const)
== MODE_VECTOR_FLOAT)))))
return NO_REGS;
break;
-
+
case 'G':
case 'H':
if ((GET_CODE (op) == CONST_DOUBLE
@@ -707,7 +707,7 @@ single_reg_class (const char *constraints, rtx op, rtx equiv_const)
return NO_REGS;
cl = next_cl;
break;
-
+
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
next_cl
@@ -720,7 +720,7 @@ single_reg_class (const char *constraints, rtx op, rtx equiv_const)
return NO_REGS;
cl = next_cl;
break;
-
+
default:
return NO_REGS;
}
@@ -758,7 +758,7 @@ ira_implicitly_set_insn_hard_regs (HARD_REG_SET *set)
if (GET_CODE (op) == SUBREG)
op = SUBREG_REG (op);
-
+
if (GET_CODE (op) == SCRATCH
|| (REG_P (op) && (regno = REGNO (op)) >= FIRST_PSEUDO_REGISTER))
{
@@ -825,7 +825,7 @@ process_single_reg_class_operands (bool in_p, int freq)
if (GET_CODE (operand) == SUBREG)
operand = SUBREG_REG (operand);
-
+
if (REG_P (operand)
&& (regno = REGNO (operand)) >= FIRST_PSEUDO_REGISTER)
{
@@ -883,7 +883,7 @@ bb_has_abnormal_call_pred (basic_block bb)
{
edge e;
edge_iterator ei;
-
+
FOR_EACH_EDGE (e, ei, bb->preds)
{
if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
@@ -926,7 +926,7 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
if (TEST_HARD_REG_BIT (hard_regs_live, i))
{
enum reg_class cover_class, cl;
-
+
cover_class = ira_class_translate[REGNO_REG_CLASS (i)];
for (j = 0;
(cl = ira_reg_class_super_classes[cover_class][j])
@@ -943,14 +943,14 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
EXECUTE_IF_SET_IN_BITMAP (reg_live_out, FIRST_PSEUDO_REGISTER, j, bi)
{
ira_allocno_t a = ira_curr_regno_allocno_map[j];
-
+
if (a == NULL)
continue;
ira_assert (! sparseset_bit_p (allocnos_live, ALLOCNO_NUM (a)));
set_allocno_live (a);
make_regno_born (j);
}
-
+
freq = REG_FREQ_FROM_BB (bb);
if (freq == 0)
freq = 1;
@@ -971,10 +971,10 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
{
df_ref *def_rec, *use_rec;
bool call_p;
-
+
if (!NONDEBUG_INSN_P (insn))
continue;
-
+
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Insn %u(l%d): point = %d\n",
INSN_UID (insn), loop_tree_node->parent->loop->num,
@@ -1027,11 +1027,11 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
}
}
}
-
+
extract_insn (insn);
preprocess_constraints ();
process_single_reg_class_operands (false, freq);
-
+
/* See which defined values die here. */
for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
if (!call_p || !DF_REF_FLAGS_IS_SET (*def_rec, DF_REF_MAY_CLOBBER))
@@ -1044,7 +1044,7 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, i)
{
ira_allocno_t a = ira_allocnos[i];
-
+
if (allocno_saved_at_call[i] != last_call_num)
/* Here we are mimicking caller-save.c behaviour
which does not save hard register at a call if
@@ -1074,7 +1074,7 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
}
}
}
-
+
make_early_clobber_and_input_conflicts ();
curr_point++;
@@ -1084,7 +1084,7 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
mark_ref_live (*use_rec);
process_single_reg_class_operands (true, freq);
-
+
set_p = mark_hard_reg_early_clobbers (insn, true);
if (set_p)
@@ -1097,12 +1097,12 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
for (use_rec = DF_INSN_USES (insn); *use_rec; use_rec++)
{
rtx ureg = DF_REF_REG (*use_rec);
-
+
if (GET_CODE (ureg) == SUBREG)
ureg = SUBREG_REG (ureg);
if (! REG_P (ureg) || REGNO (ureg) >= FIRST_PSEUDO_REGISTER)
continue;
-
+
mark_ref_live (*use_rec);
}
}
@@ -1223,7 +1223,7 @@ remove_some_program_points_and_update_live_ranges (void)
allocno_live_range_t r;
bitmap born_or_died;
bitmap_iterator bi;
-
+
born_or_died = ira_allocate_bitmap ();
FOR_EACH_ALLOCNO (a, ai)
{
diff --git a/gcc/ira.c b/gcc/ira.c
index a3e899f8313..7165caa1b18 100644
--- a/gcc/ira.c
+++ b/gcc/ira.c
@@ -167,7 +167,7 @@ along with GCC; see the file COPYING3. If not see
o Coloring. Now IRA has all necessary info to start graph coloring
process. It is done in each region on top-down traverse of the
region tree (file ira-color.c). There are following subpasses:
-
+
* Optional aggressive coalescing of allocnos in the region.
* Putting allocnos onto the coloring stack. IRA uses Briggs
@@ -447,7 +447,7 @@ setup_class_hard_regs (void)
hard_regno = reg_alloc_order[i];
#else
hard_regno = i;
-#endif
+#endif
if (TEST_HARD_REG_BIT (processed_hard_reg_set, hard_regno))
continue;
SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno);
@@ -843,7 +843,7 @@ setup_class_translate (void)
for (cl = 0; cl < N_REG_CLASSES; cl++)
ira_class_translate[cl] = NO_REGS;
-
+
if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
for (cl = 0; cl < LIM_REG_CLASSES; cl++)
{
@@ -852,7 +852,7 @@ setup_class_translate (void)
for (i = 0; i < ira_reg_class_cover_size; i++)
{
HARD_REG_SET temp_hard_regset2;
-
+
cover_class = ira_reg_class_cover[i];
COPY_HARD_REG_SET (temp_hard_regset2,
reg_class_contents[cover_class]);
@@ -921,7 +921,7 @@ setup_class_translate (void)
}
/* Order numbers of cover classes in original target cover class
- array, -1 for non-cover classes. */
+ array, -1 for non-cover classes. */
static int cover_class_order[N_REG_CLASSES];
/* The function used to sort the important classes. */
@@ -1072,7 +1072,7 @@ setup_reg_class_relations (void)
AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
if (ira_reg_class_union[cl1][cl2] == NO_REGS
|| (hard_reg_set_subset_p (temp_set2, temp_hard_regset)
-
+
&& (! hard_reg_set_equal_p (temp_set2,
temp_hard_regset)
/* Ignore unavailable hard registers and
@@ -1151,7 +1151,7 @@ setup_hard_regno_cover_class (void)
break;
}
}
-
+
}
}
@@ -1392,7 +1392,7 @@ compute_regs_asm_clobbered (char *regs_asm_clobbered)
basic_block bb;
memset (regs_asm_clobbered, 0, sizeof (char) * FIRST_PSEUDO_REGISTER);
-
+
FOR_EACH_BB (bb)
{
rtx insn;
@@ -1409,7 +1409,7 @@ compute_regs_asm_clobbered (char *regs_asm_clobbered)
{
unsigned int i;
enum machine_mode mode = GET_MODE (DF_REF_REAL_REG (def));
- unsigned int end = dregno
+ unsigned int end = dregno
+ hard_regno_nregs[dregno][mode] - 1;
for (i = dregno; i <= end; ++i)
@@ -1534,12 +1534,12 @@ find_reg_equiv_invariant_const (void)
{
insn = XEXP (list, 0);
note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
-
+
if (note == NULL_RTX)
continue;
x = XEXP (note, 0);
-
+
if (! function_invariant_p (x)
|| ! flag_pic
/* A function invariant is often CONSTANT_P but may
@@ -1663,7 +1663,7 @@ calculate_allocation_cost (void)
ira_assert (hard_regno < 0
|| ! ira_hard_reg_not_in_set_p
(hard_regno, ALLOCNO_MODE (a),
- reg_class_contents[ALLOCNO_COVER_CLASS (a)]));
+ reg_class_contents[ALLOCNO_COVER_CLASS (a)]));
if (hard_regno < 0)
{
cost = ALLOCNO_MEMORY_COST (a);
@@ -1742,7 +1742,7 @@ fix_reg_equiv_init (void)
int max_regno = max_reg_num ();
int i, new_regno;
rtx x, prev, next, insn, set;
-
+
if (reg_equiv_init_size < max_regno)
{
reg_equiv_init
@@ -1791,7 +1791,7 @@ print_redundant_copies (void)
ira_allocno_t a;
ira_copy_t cp, next_cp;
ira_allocno_iterator ai;
-
+
FOR_EACH_ALLOCNO (a, ai)
{
if (ALLOCNO_CAP_MEMBER (a) != NULL)
@@ -1828,7 +1828,7 @@ setup_preferred_alternate_classes_for_new_pseudos (int start)
for (i = start; i < max_regno; i++)
{
old_regno = ORIGINAL_REGNO (regno_reg_rtx[i]);
- ira_assert (i != old_regno);
+ ira_assert (i != old_regno);
setup_reg_classes (i, reg_preferred_class (old_regno),
reg_alternate_class (old_regno),
reg_cover_class (old_regno));
@@ -1862,7 +1862,7 @@ too_high_register_pressure_p (void)
{
int i;
enum reg_class cover_class;
-
+
for (i = 0; i < ira_reg_class_cover_size; i++)
{
cover_class = ira_reg_class_cover[i];
@@ -2237,7 +2237,7 @@ memref_used_between_p (rtx memref, rtx start, rtx end)
{
if (!NONDEBUG_INSN_P (insn))
continue;
-
+
if (memref_referenced_p (memref, PATTERN (insn)))
return 1;
@@ -2303,7 +2303,7 @@ update_equiv_regs (void)
basic_block bb;
int loop_depth;
bitmap cleared_regs;
-
+
/* We need to keep track of whether or not we recorded a LABEL_REF so
that we know if the jump optimizer needs to be rerun. */
recorded_label_ref = 0;
@@ -2791,7 +2791,7 @@ init_live_subregs (bool init_value, sbitmap *live_subregs,
to init all of the subregs to ones else init to 0. */
if (init_value)
sbitmap_ones (live_subregs[allocnum]);
- else
+ else
sbitmap_zero (live_subregs[allocnum]);
/* Set the number of bits that we really want. */
@@ -2827,10 +2827,10 @@ build_insn_chain (void)
{
bitmap_iterator bi;
rtx insn;
-
+
CLEAR_REG_SET (live_relevant_regs);
memset (live_subregs_used, 0, max_regno * sizeof (int));
-
+
EXECUTE_IF_SET_IN_BITMAP (DF_LR_OUT (bb), 0, i, bi)
{
if (i >= FIRST_PSEUDO_REGISTER)
@@ -2858,7 +2858,7 @@ build_insn_chain (void)
next = c;
*p = c;
p = &c->prev;
-
+
c->insn = insn;
c->block = bb->index;
@@ -2867,7 +2867,7 @@ build_insn_chain (void)
{
df_ref def = *def_rec;
unsigned int regno = DF_REF_REGNO (def);
-
+
/* Ignore may clobbers because these are generated
from calls. However, every other kind of def is
added to dead_or_set. */
@@ -2895,11 +2895,11 @@ build_insn_chain (void)
&& !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT))
{
unsigned int start = SUBREG_BYTE (reg);
- unsigned int last = start
+ unsigned int last = start
+ GET_MODE_SIZE (GET_MODE (reg));
init_live_subregs
- (bitmap_bit_p (live_relevant_regs, regno),
+ (bitmap_bit_p (live_relevant_regs, regno),
live_subregs, live_subregs_used, regno, reg);
if (!DF_REF_FLAGS_IS_SET
@@ -2922,7 +2922,7 @@ build_insn_chain (void)
RESET_BIT (live_subregs[regno], start);
start++;
}
-
+
if (sbitmap_empty_p (live_subregs[regno]))
{
live_subregs_used[regno] = 0;
@@ -2949,7 +2949,7 @@ build_insn_chain (void)
}
}
}
-
+
bitmap_and_compl_into (live_relevant_regs, elim_regset);
bitmap_copy (&c->live_throughout, live_relevant_regs);
@@ -2959,18 +2959,18 @@ build_insn_chain (void)
df_ref use = *use_rec;
unsigned int regno = DF_REF_REGNO (use);
rtx reg = DF_REF_REG (use);
-
+
/* DF_REF_READ_WRITE on a use means that this use
is fabricated from a def that is a partial set
to a multiword reg. Here, we only model the
subreg case that is not wrapped in ZERO_EXTRACT
precisely so we do not need to look at the
fabricated use. */
- if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
- && !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
+ if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
+ && !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
&& DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG))
continue;
-
+
/* Add the last use of each var to dead_or_set. */
if (!bitmap_bit_p (live_relevant_regs, regno))
{
@@ -2982,23 +2982,23 @@ build_insn_chain (void)
else if (pseudo_for_reload_consideration_p (regno))
bitmap_set_bit (&c->dead_or_set, regno);
}
-
+
if (regno < FIRST_PSEUDO_REGISTER
|| pseudo_for_reload_consideration_p (regno))
{
if (GET_CODE (reg) == SUBREG
&& !DF_REF_FLAGS_IS_SET (use,
DF_REF_SIGN_EXTRACT
- | DF_REF_ZERO_EXTRACT))
+ | DF_REF_ZERO_EXTRACT))
{
unsigned int start = SUBREG_BYTE (reg);
- unsigned int last = start
+ unsigned int last = start
+ GET_MODE_SIZE (GET_MODE (reg));
-
+
init_live_subregs
- (bitmap_bit_p (live_relevant_regs, regno),
+ (bitmap_bit_p (live_relevant_regs, regno),
live_subregs, live_subregs_used, regno, reg);
-
+
/* Ignore the paradoxical bits. */
if ((int)last > live_subregs_used[regno])
last = live_subregs_used[regno];
@@ -3025,12 +3025,12 @@ build_insn_chain (void)
labels and jump tables that are just hanging out in between
the basic blocks. See pr33676. */
insn = BB_HEAD (bb);
-
+
/* Skip over the barriers and cruft. */
- while (insn && (BARRIER_P (insn) || NOTE_P (insn)
+ while (insn && (BARRIER_P (insn) || NOTE_P (insn)
|| BLOCK_FOR_INSN (insn) == bb))
insn = PREV_INSN (insn);
-
+
/* While we add anything except barriers and notes, the focus is
to get the labels and jump tables into the
reload_insn_chain. */
@@ -3040,19 +3040,19 @@ build_insn_chain (void)
{
if (BLOCK_FOR_INSN (insn))
break;
-
+
c = new_insn_chain ();
c->next = next;
next = c;
*p = c;
p = &c->prev;
-
+
/* The block makes no sense here, but it is what the old
code did. */
c->block = bb->index;
c->insn = insn;
bitmap_copy (&c->live_throughout, live_relevant_regs);
- }
+ }
insn = PREV_INSN (insn);
}
}
@@ -3145,7 +3145,7 @@ ira (FILE *f)
#endif
bitmap_obstack_initialize (&ira_bitmap_obstack);
if (optimize)
- {
+ {
max_regno = max_reg_num ();
ira_reg_equiv_len = max_regno;
ira_reg_equiv_invariant_p
@@ -3165,39 +3165,39 @@ ira (FILE *f)
max_regno_before_ira = allocated_reg_info_size = max_reg_num ();
ira_setup_eliminable_regset ();
-
+
ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
ira_load_cost = ira_store_cost = ira_shuffle_cost = 0;
ira_move_loops_num = ira_additional_jumps_num = 0;
-
+
ira_assert (current_loops == NULL);
flow_loops_find (&ira_loops);
current_loops = &ira_loops;
-
+
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, "Building IRA IR\n");
loops_p = ira_build (optimize
&& (flag_ira_region == IRA_REGION_ALL
|| flag_ira_region == IRA_REGION_MIXED));
-
+
ira_assert (ira_conflicts_p || !loops_p);
saved_flag_ira_share_spill_slots = flag_ira_share_spill_slots;
if (too_high_register_pressure_p ())
/* It is just wasting compiler's time to pack spilled pseudos into
- stack slots in this case -- prohibit it. */
+ stack slots in this case -- prohibit it. */
flag_ira_share_spill_slots = FALSE;
ira_color ();
-
+
ira_max_point_before_emit = ira_max_point;
-
+
ira_emit (loops_p);
-
+
if (ira_conflicts_p)
{
max_regno = max_reg_num ();
-
+
if (! loops_p)
ira_initiate_assign ();
else
@@ -3206,14 +3206,14 @@ ira (FILE *f)
setup_preferred_alternate_classes_for_new_pseudos
(allocated_reg_info_size);
allocated_reg_info_size = max_regno;
-
+
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
fprintf (ira_dump_file, "Flattening IR\n");
ira_flattening (max_regno_before_ira, ira_max_point_before_emit);
/* New insns were generated: add notes and recalculate live
info. */
df_analyze ();
-
+
flow_loops_find (&ira_loops);
current_loops = &ira_loops;
@@ -3224,17 +3224,17 @@ ira (FILE *f)
}
setup_reg_renumber ();
-
+
calculate_allocation_cost ();
-
+
#ifdef ENABLE_IRA_CHECKING
if (ira_conflicts_p)
check_allocation ();
#endif
-
+
delete_trivially_dead_insns (get_insns (), max_reg_num ());
max_regno = max_reg_num ();
-
+
/* And the reg_equiv_memory_loc array. */
VEC_safe_grow (rtx, gc, reg_equiv_memory_loc_vec, max_regno);
memset (VEC_address (rtx, reg_equiv_memory_loc_vec), 0,
@@ -3255,7 +3255,7 @@ ira (FILE *f)
if (ira_conflicts_p)
{
fix_reg_equiv_init ();
-
+
#ifdef ENABLE_IRA_CHECKING
print_redundant_copies ();
#endif
@@ -3268,7 +3268,7 @@ ira (FILE *f)
memset (ira_spilled_reg_stack_slots, 0,
max_regno * sizeof (struct ira_spilled_reg_stack_slot));
}
-
+
timevar_pop (TV_IRA);
timevar_push (TV_RELOAD);
@@ -3286,15 +3286,15 @@ ira (FILE *f)
if (ira_conflicts_p)
{
ira_free (ira_spilled_reg_stack_slots);
-
+
ira_finish_assign ();
-
- }
+
+ }
if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
&& overall_cost_before != ira_overall_cost)
fprintf (ira_dump_file, "+++Overall after reload %d\n", ira_overall_cost);
ira_destroy ();
-
+
flag_ira_share_spill_slots = saved_flag_ira_share_spill_slots;
flow_loops_free (&ira_loops);
@@ -3305,11 +3305,11 @@ ira (FILE *f)
regstat_free_ri ();
regstat_free_n_sets_and_refs ();
-
+
if (optimize)
{
cleanup_cfg (CLEANUP_EXPENSIVE);
-
+
ira_free (ira_reg_equiv_invariant_p);
ira_free (ira_reg_equiv_const);
}
diff --git a/gcc/jump.c b/gcc/jump.c
index 6ebc7ef1cd7..c95c2116ba6 100644
--- a/gcc/jump.c
+++ b/gcc/jump.c
@@ -233,7 +233,7 @@ mark_all_labels (rtx f)
&& (rtx_equal_p (label_dest, XEXP (pc_src, 1))
|| rtx_equal_p (label_dest,
XEXP (pc_src, 2))))))
-
+
{
/* The CODE_LABEL referred to in the note must be the
CODE_LABEL in the LABEL_REF of the "set". We can
@@ -1452,7 +1452,7 @@ redirect_jump (rtx jump, rtx nlabel, int delete_unused)
}
/* Fix up JUMP_LABEL and label ref counts after OLABEL has been replaced with
- NLABEL in JUMP.
+ NLABEL in JUMP.
If DELETE_UNUSED is positive, delete related insn to OLABEL if its ref
count has dropped to zero. */
void
@@ -1547,7 +1547,7 @@ invert_jump_1 (rtx jump, rtx nlabel)
return 0;
ok = invert_exp_1 (SET_SRC (x), jump);
gcc_assert (ok);
-
+
if (num_validated_changes () == ochanges)
return 0;
diff --git a/gcc/lambda-code.c b/gcc/lambda-code.c
index e7a49951a67..e5fe6299f28 100644
--- a/gcc/lambda-code.c
+++ b/gcc/lambda-code.c
@@ -4,17 +4,17 @@
Contributed by Daniel Berlin <dberlin@dberlin.org>
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -47,25 +47,25 @@
/* This loop nest code generation is based on non-singular matrix
math.
-
+
A little terminology and a general sketch of the algorithm. See "A singular
loop transformation framework based on non-singular matrices" by Wei Li and
Keshav Pingali for formal proofs that the various statements below are
- correct.
+ correct.
A loop iteration space represents the points traversed by the loop. A point in the
iteration space can be represented by a vector of size <loop depth>. You can
therefore represent the iteration space as an integral combinations of a set
- of basis vectors.
+ of basis vectors.
A loop iteration space is dense if every integer point between the loop
bounds is a point in the iteration space. Every loop with a step of 1
therefore has a dense iteration space.
for i = 1 to 3, step 1 is a dense iteration space.
-
+
A loop iteration space is sparse if it is not dense. That is, the iteration
- space skips integer points that are within the loop bounds.
+ space skips integer points that are within the loop bounds.
for i = 1 to 3, step 2 is a sparse iteration space, because the integer point
2 is skipped.
@@ -75,14 +75,14 @@
space using min/max and floor/ceil.
For a dense source space, we take the transformation matrix, decompose it
- into a lower triangular part (H) and a unimodular part (U).
+ into a lower triangular part (H) and a unimodular part (U).
We then compute the auxiliary space from the unimodular part (source loop
nest . U = auxiliary space) , which has two important properties:
1. It traverses the iterations in the same lexicographic order as the source
space.
2. It is a dense space when the source is a dense space (even if the target
space is going to be sparse).
-
+
Given the auxiliary space, we use the lower triangular part to compute the
bounds in the target space by simple matrix multiplication.
The gaps in the target space (IE the new loop step sizes) will be the
@@ -104,12 +104,12 @@
are closed under composition, this is okay). We can then use the base space
(which is dense) plus the composed transformation matrix, to compute the rest
of the transform using the dense space algorithm above.
-
+
In other words, our sparse source space (B) is decomposed into a dense base
space (A), and a matrix (L) that transforms A into B, such that A.L = B.
We then compute the composition of L and the user transformation matrix (T),
so that T is now a transform from A to the result, instead of from B to the
- result.
+ result.
IE A.(LT) = result instead of B.T = result
Since A is now a dense source space, we can use the dense source space
algorithm above to compute the result of applying transform (LT) to A.
@@ -117,7 +117,7 @@
Fourier-Motzkin elimination is used to compute the bounds of the base space
of the lattice. */
-static bool perfect_nestify (struct loop *, VEC(tree,heap) *,
+static bool perfect_nestify (struct loop *, VEC(tree,heap) *,
VEC(tree,heap) *, VEC(int,heap) *,
VEC(tree,heap) *);
/* Lattice stuff that is internal to the code generation algorithm. */
@@ -293,7 +293,7 @@ print_lambda_linear_expression (FILE * outfile,
}
/* Print a lambda loop structure LOOP to OUTFILE. The depth/number of
- coefficients is given by DEPTH, the number of invariants is
+ coefficients is given by DEPTH, the number of invariants is
given by INVARIANTS, and the character to start variable names with is given
by START. */
@@ -420,7 +420,7 @@ lambda_lattice_compute_base (lambda_loopnest nest,
/* Otherwise, we need the lower bound expression (which must
be an affine function) to determine the base. */
expression = LL_LOWER_BOUND (loop);
- gcc_assert (expression && !LLE_NEXT (expression)
+ gcc_assert (expression && !LLE_NEXT (expression)
&& LLE_DENOMINATOR (expression) == 1);
/* The lower triangular portion of the base is going to be the
@@ -467,23 +467,23 @@ least_common_multiple (int a, int b)
rewriting these as a <= b, x >= constant, and delete the x variable.
You can then repeat this for any remaining x variables, and then we have
an easy to use variable <= constant (or no variables at all) form that we
- can construct our bounds from.
-
+ can construct our bounds from.
+
In our case, each time we eliminate, we construct part of the bound from
- the ith variable, then delete the ith variable.
-
+ the ith variable, then delete the ith variable.
+
Remember the constant are in our vector a, our coefficient matrix is A,
and our invariant coefficient matrix is B.
-
+
SIZE is the size of the matrices being passed.
DEPTH is the loop nest depth.
INVARIANTS is the number of loop invariants.
A, B, and a are the coefficient matrix, invariant coefficient, and a
vector of constants, respectively. */
-static lambda_loopnest
+static lambda_loopnest
compute_nest_using_fourier_motzkin (int size,
- int depth,
+ int depth,
int invariants,
lambda_matrix A,
lambda_matrix B,
@@ -517,7 +517,7 @@ compute_nest_using_fourier_motzkin (int size,
if (A[j][i] < 0)
{
/* Any linear expression in the matrix with a coefficient less
- than 0 becomes part of the new lower bound. */
+ than 0 becomes part of the new lower bound. */
expression = lambda_linear_expression_new (depth, invariants,
lambda_obstack);
@@ -542,7 +542,7 @@ compute_nest_using_fourier_motzkin (int size,
else if (A[j][i] > 0)
{
/* Any linear expression with a coefficient greater than 0
- becomes part of the new upper bound. */
+ becomes part of the new upper bound. */
expression = lambda_linear_expression_new (depth, invariants,
lambda_obstack);
for (k = 0; k < i; k++)
@@ -620,14 +620,14 @@ compute_nest_using_fourier_motzkin (int size,
}
/* Compute the loop bounds for the auxiliary space NEST.
- Input system used is Ax <= b. TRANS is the unimodular transformation.
- Given the original nest, this function will
+ Input system used is Ax <= b. TRANS is the unimodular transformation.
+ Given the original nest, this function will
1. Convert the nest into matrix form, which consists of a matrix for the
- coefficients, a matrix for the
- invariant coefficients, and a vector for the constants.
+ coefficients, a matrix for the
+ invariant coefficients, and a vector for the constants.
2. Use the matrix form to calculate the lattice base for the nest (which is
- a dense space)
- 3. Compose the dense space transform with the user specified transform, to
+ a dense space)
+ 3. Compose the dense space transform with the user specified transform, to
get a transform we can easily calculate transformed bounds for.
4. Multiply the composed transformation matrix times the matrix form of the
loop.
@@ -700,7 +700,7 @@ lambda_compute_auxillary_space (lambda_loopnest nest,
size++;
/* Need to increase matrix sizes above. */
gcc_assert (size <= 127);
-
+
}
/* Then do the exact same thing for the upper bounds. */
@@ -768,7 +768,7 @@ lambda_compute_auxillary_space (lambda_loopnest nest,
}
/* Compute the loop bounds for the target space, using the bounds of
- the auxiliary nest AUXILLARY_NEST, and the triangular matrix H.
+ the auxiliary nest AUXILLARY_NEST, and the triangular matrix H.
The target space loop bounds are computed by multiplying the triangular
matrix H by the auxiliary nest, to get the new loop bounds. The sign of
the loop steps (positive or negative) is then used to swap the bounds if
@@ -1030,10 +1030,10 @@ lambda_compute_step_signs (lambda_trans_matrix trans, lambda_vector stepsigns)
1. Computing a lattice base for the transformation
2. Composing the dense base with the specified transformation (TRANS)
3. Decomposing the combined transformation into a lower triangular portion,
- and a unimodular portion.
+ and a unimodular portion.
4. Computing the auxiliary nest using the unimodular portion.
5. Computing the target nest using the auxiliary nest and the lower
- triangular portion. */
+ triangular portion. */
lambda_loopnest
lambda_loopnest_transform (lambda_loopnest nest, lambda_trans_matrix trans,
@@ -1187,7 +1187,7 @@ gcc_tree_to_linear_expression (int depth, tree expr,
/* Return the depth of the loopnest NEST */
-static int
+static int
depth_of_nest (struct loop *nest)
{
size_t depth = 0;
@@ -1362,7 +1362,7 @@ gcc_loop_to_lambda_loop (struct loop *loop, int depth,
outerinductionvars, *invariants,
0, lambda_obstack);
}
-
+
if (!lbound)
{
@@ -1383,20 +1383,20 @@ gcc_loop_to_lambda_loop (struct loop *loop, int depth,
else if (TREE_CODE (test_lhs) == SSA_NAME
&& invariant_in_loop_and_outer_loops (loop, test_lhs))
VEC_quick_push (tree, *invariants, test_lhs);
-
+
/* The non-induction variable part of the test is the upper bound variable.
*/
if (test_lhs == inductionvar)
uboundvar = test_rhs;
else
uboundvar = test_lhs;
-
+
/* We only size the vectors assuming we have, at max, 2 times as many
invariants as we do loops (one for each bound).
This is just an arbitrary number, but it has to be matched against the
code below. */
gcc_assert (VEC_length (tree, *invariants) <= (unsigned int) (2 * depth));
-
+
/* We might have some leftover. */
if (gimple_cond_code (exit_cond) == LT_EXPR)
@@ -1407,7 +1407,7 @@ gcc_loop_to_lambda_loop (struct loop *loop, int depth,
extra = -1 * stepint;
else if (gimple_cond_code (exit_cond) == EQ_EXPR)
extra = 1 * stepint;
-
+
ubound = gcc_tree_to_linear_expression (depth, uboundvar,
outerinductionvars,
*invariants, extra, lambda_obstack);
@@ -1449,7 +1449,7 @@ find_induction_var_from_exit_cond (struct loop *loop)
/* Find the side that is invariant in this loop. The ivar must be the other
side. */
-
+
if (expr_invariant_in_loop_p (loop, test_lhs))
ivarop = test_rhs;
else if (expr_invariant_in_loop_p (loop, test_rhs))
@@ -1466,7 +1466,7 @@ DEF_VEC_P(lambda_loop);
DEF_VEC_ALLOC_P(lambda_loop,heap);
/* Generate a lambda loopnest from a gcc loopnest LOOP_NEST.
- Return the new loop nest.
+ Return the new loop nest.
INDUCTIONVARS is a pointer to an array of induction variables for the
loopnest that will be filled in during this process.
INVARIANTS is a pointer to an array of invariants that will be filled in
@@ -1514,7 +1514,7 @@ gcc_loopnest_to_lambda_loopnest (struct loop *loop_nest,
{
if (dump_file)
fprintf (dump_file,
- "Not a perfect loop nest and couldn't convert to one.\n");
+ "Not a perfect loop nest and couldn't convert to one.\n");
goto fail;
}
else if (dump_file)
@@ -1532,19 +1532,19 @@ gcc_loopnest_to_lambda_loopnest (struct loop *loop_nest,
VEC_free (tree, heap, uboundvars);
VEC_free (tree, heap, lboundvars);
VEC_free (int, heap, steps);
-
+
return ret;
}
-/* Convert a lambda body vector LBV to a gcc tree, and return the new tree.
+/* Convert a lambda body vector LBV to a gcc tree, and return the new tree.
STMTS_TO_INSERT is a pointer to a tree where the statements we need to be
inserted for us are stored. INDUCTION_VARS is the array of induction
variables for the loop this LBV is from. TYPE is the tree type to use for
the variables and trees involved. */
static tree
-lbv_to_gcc_expression (lambda_body_vector lbv,
- tree type, VEC(tree,heap) *induction_vars,
+lbv_to_gcc_expression (lambda_body_vector lbv,
+ tree type, VEC(tree,heap) *induction_vars,
gimple_seq *stmts_to_insert)
{
int k;
@@ -1566,7 +1566,7 @@ lbv_to_gcc_expression (lambda_body_vector lbv,
Return the tree that represents the final value of the expression.
LLE is the linear expression to convert.
OFFSET is the linear offset to apply to the expression.
- TYPE is the tree type to use for the variables and math.
+ TYPE is the tree type to use for the variables and math.
INDUCTION_VARS is a vector of induction variables for the loops.
INVARIANTS is a vector of the loop nest invariants.
WRAP specifies what tree code to wrap the results in, if there is more than
@@ -1594,7 +1594,7 @@ lle_to_gcc_expression (lambda_linear_expression lle,
{
expr = build_linear_expr (type, LLE_COEFFICIENTS (lle), induction_vars);
expr = fold_build2 (PLUS_EXPR, type, expr,
- build_linear_expr (type,
+ build_linear_expr (type,
LLE_INVARIANT_COEFFICIENTS (lle),
invariants));
@@ -1669,20 +1669,20 @@ remove_iv (gimple iv_stmt)
else
{
gsi_remove (&si, true);
- release_defs (iv_stmt);
+ release_defs (iv_stmt);
}
}
/* Transform a lambda loopnest NEW_LOOPNEST, which had TRANSFORM applied to
it, back into gcc code. This changes the
loops, their induction variables, and their bodies, so that they
- match the transformed loopnest.
+ match the transformed loopnest.
OLD_LOOPNEST is the loopnest before we've replaced it with the new
loopnest.
OLD_IVS is a vector of induction variables from the old loopnest.
INVARIANTS is a vector of loop invariants from the old loopnest.
NEW_LOOPNEST is the new lambda loopnest to replace OLD_LOOPNEST with.
- TRANSFORM is the matrix transform that was applied to OLD_LOOPNEST to get
+ TRANSFORM is the matrix transform that was applied to OLD_LOOPNEST to get
NEW_LOOPNEST. */
void
@@ -1742,10 +1742,10 @@ lambda_loopnest_to_gcc_loopnest (struct loop *old_loopnest,
/* Linear offset is a bit tricky to handle. Punt on the unhandled
cases for now. */
offset = LL_LINEAR_OFFSET (newloop);
-
+
gcc_assert (LLE_DENOMINATOR (offset) == 1 &&
lambda_vector_zerop (LLE_COEFFICIENTS (offset), depth));
-
+
/* Now build the new lower bounds, and insert the statements
necessary to generate it on the loop preheader. */
stmts = NULL;
@@ -1798,9 +1798,9 @@ lambda_loopnest_to_gcc_loopnest (struct loop *old_loopnest,
/* Replace the exit condition with the new upper bound
comparison. */
-
+
testtype = LL_STEP (newloop) >= 0 ? LE_EXPR : GE_EXPR;
-
+
/* We want to build a conditional where true means exit the loop, and
false means continue the loop.
So swap the testtype if this isn't the way things are.*/
@@ -1844,7 +1844,7 @@ lambda_loopnest_to_gcc_loopnest (struct loop *old_loopnest,
depth = VEC_length (tree, new_ivs);
lbv = lambda_body_vector_new (depth, lambda_obstack);
LBV_COEFFICIENTS (lbv)[i] = 1;
-
+
newlbv = lambda_body_vector_compute_new (transform, lbv,
lambda_obstack);
@@ -1909,7 +1909,7 @@ static bool
stmt_uses_phi_result (gimple stmt, tree phi_result)
{
tree use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
-
+
/* This is conservatively true, because we only want SIMPLE bumpers
of the form x +- constant for our pass. */
return (use == phi_result);
@@ -1917,7 +1917,7 @@ stmt_uses_phi_result (gimple stmt, tree phi_result)
/* STMT is a bumper stmt for LOOP if the version it defines is used in the
in-loop-edge in a phi node, and the operand it uses is the result of that
- phi node.
+ phi node.
I.E. i_29 = i_3 + 1
i_3 = PHI (0, i_29); */
@@ -1928,7 +1928,7 @@ stmt_is_bumper_for_loop (struct loop *loop, gimple stmt)
tree def;
imm_use_iterator iter;
use_operand_p use_p;
-
+
def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF);
if (!def)
return false;
@@ -1941,7 +1941,7 @@ stmt_is_bumper_for_loop (struct loop *loop, gimple stmt)
if (phi_loop_edge_uses_def (loop, use, def))
if (stmt_uses_phi_result (stmt, PHI_RESULT (use)))
return true;
- }
+ }
}
return false;
}
@@ -1952,7 +1952,7 @@ stmt_is_bumper_for_loop (struct loop *loop, gimple stmt)
innermost loop body.
If S is a program statement, then
- i.e.
+ i.e.
DO I = 1, 20
S1
DO J = 1, 20
@@ -1960,14 +1960,14 @@ stmt_is_bumper_for_loop (struct loop *loop, gimple stmt)
END DO
END DO
is not a perfect loop nest because of S1.
-
+
DO I = 1, 20
DO J = 1, 20
S1
...
END DO
- END DO
- is a perfect loop nest.
+ END DO
+ is a perfect loop nest.
Since we don't have high level loops anymore, we basically have to walk our
statements and ignore those that are there because the loop needs them (IE
@@ -2025,7 +2025,7 @@ perfect_nest_p (struct loop *loop)
of body basic block. */
static void
-replace_uses_equiv_to_x_with_y (struct loop *loop, gimple stmt, tree x,
+replace_uses_equiv_to_x_with_y (struct loop *loop, gimple stmt, tree x,
int xstep, tree y, tree yinit,
htab_t replacements,
gimple_stmt_iterator *firstbsi)
@@ -2128,7 +2128,7 @@ exit_phi_for_loop_p (struct loop *loop, gimple stmt)
|| gimple_phi_num_args (stmt) != 1
|| gimple_bb (stmt) != single_exit (loop)->dest)
return false;
-
+
return true;
}
@@ -2140,12 +2140,12 @@ can_put_in_inner_loop (struct loop *inner, gimple stmt)
{
imm_use_iterator imm_iter;
use_operand_p use_p;
-
+
gcc_assert (is_gimple_assign (stmt));
if (gimple_vuse (stmt)
|| !stmt_invariant_in_loop_p (inner, stmt))
return false;
-
+
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_assign_lhs (stmt))
{
if (!exit_phi_for_loop_p (inner, USE_STMT (use_p)))
@@ -2156,7 +2156,7 @@ can_put_in_inner_loop (struct loop *inner, gimple stmt)
return false;
}
}
- return true;
+ return true;
}
/* Return true if STMT can be put *after* the inner loop of LOOP. */
@@ -2169,13 +2169,13 @@ can_put_after_inner_loop (struct loop *loop, gimple stmt)
if (gimple_vuse (stmt))
return false;
-
+
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_assign_lhs (stmt))
{
if (!exit_phi_for_loop_p (loop, USE_STMT (use_p)))
{
basic_block immbb = gimple_bb (USE_STMT (use_p));
-
+
if (!dominated_by_p (CDI_DOMINATORS,
immbb,
loop->inner->header)
@@ -2271,7 +2271,7 @@ cannot_convert_bb_to_perfect_nest (basic_block bb, struct loop *loop)
gimple exit_condition = get_loop_exit_condition (loop);
for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
- {
+ {
gimple stmt = gsi_stmt (bsi);
if (stmt == exit_condition
@@ -2297,7 +2297,7 @@ cannot_convert_bb_to_perfect_nest (basic_block bb, struct loop *loop)
right now. This test ensures that the statement comes
completely *after* the inner loop. */
if (!dominated_by_p (CDI_DOMINATORS,
- gimple_bb (stmt),
+ gimple_bb (stmt),
loop->inner->header))
return true;
}
@@ -2320,7 +2320,7 @@ can_convert_to_perfect_nest (struct loop *loop)
/* Can't handle triply nested+ loops yet. */
if (!loop->inner || loop->inner->inner)
return false;
-
+
bbs = get_loop_body (loop);
for (i = 0; i < loop->num_nodes; i++)
if (bbs[i]->loop_father == loop
@@ -2334,10 +2334,10 @@ can_convert_to_perfect_nest (struct loop *loop)
gsi_next (&si))
if (gimple_phi_num_args (gsi_stmt (si)) != 1)
goto fail;
-
+
free (bbs);
return true;
-
+
fail:
free (bbs);
return false;
@@ -2353,7 +2353,7 @@ DEF_VEC_ALLOC_I(source_location,heap);
UBOUNDS are the upper bounds for the loops to transform
STEPS is the STEPS for the loops to transform.
LOOPIVS is the induction variables for the loops to transform.
-
+
Basically, for the case of
FOR (i = 0; i < 50; i++)
@@ -2375,7 +2375,7 @@ DEF_VEC_ALLOC_I(source_location,heap);
<whatever>
}
}
-
+
FOR (i = 0; i < 50; i ++)
{
<some code>
@@ -2411,7 +2411,7 @@ perfect_nestify (struct loop *loop,
olddest = single_exit (loop)->dest;
preheaderbb = split_edge (single_exit (loop));
headerbb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
-
+
/* Push the exit phi nodes that we are moving. */
for (bsi = gsi_start_phis (olddest); !gsi_end_p (bsi); gsi_next (&bsi))
{
@@ -2420,7 +2420,7 @@ perfect_nestify (struct loop *loop,
VEC_reserve (source_location, heap, locations, 1);
VEC_quick_push (tree, phis, PHI_RESULT (phi));
VEC_quick_push (tree, phis, PHI_ARG_DEF (phi, 0));
- VEC_quick_push (source_location, locations,
+ VEC_quick_push (source_location, locations,
gimple_phi_arg_location (phi, 0));
}
e = redirect_edge_and_branch (single_succ_edge (preheaderbb), headerbb);
@@ -2436,7 +2436,7 @@ perfect_nestify (struct loop *loop,
tree phiname;
source_location locus;
def = VEC_pop (tree, phis);
- phiname = VEC_pop (tree, phis);
+ phiname = VEC_pop (tree, phis);
locus = VEC_pop (source_location, locations);
phi = create_phi_node (phiname, preheaderbb);
add_phi_arg (phi, def, single_pred_edge (preheaderbb), locus);
@@ -2446,7 +2446,7 @@ perfect_nestify (struct loop *loop,
bodybb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
latchbb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb);
- make_edge (headerbb, bodybb, EDGE_FALLTHRU);
+ make_edge (headerbb, bodybb, EDGE_FALLTHRU);
cond_stmt = gimple_build_cond (NE_EXPR, integer_one_node, integer_zero_node,
NULL_TREE, NULL_TREE);
bsi = gsi_start_bb (bodybb);
@@ -2456,7 +2456,7 @@ perfect_nestify (struct loop *loop,
make_edge (latchbb, headerbb, EDGE_FALLTHRU);
/* Update the loop structures. */
- newloop = duplicate_loop (loop, olddest->loop_father);
+ newloop = duplicate_loop (loop, olddest->loop_father);
newloop->header = headerbb;
newloop->latch = latchbb;
add_bb_to_loop (latchbb, newloop);
@@ -2464,7 +2464,7 @@ perfect_nestify (struct loop *loop,
add_bb_to_loop (headerbb, newloop);
set_immediate_dominator (CDI_DOMINATORS, bodybb, headerbb);
set_immediate_dominator (CDI_DOMINATORS, headerbb, preheaderbb);
- set_immediate_dominator (CDI_DOMINATORS, preheaderbb,
+ set_immediate_dominator (CDI_DOMINATORS, preheaderbb,
single_exit (loop)->src);
set_immediate_dominator (CDI_DOMINATORS, latchbb, bodybb);
set_immediate_dominator (CDI_DOMINATORS, olddest,
@@ -2476,7 +2476,7 @@ perfect_nestify (struct loop *loop,
standard_iv_increment_position (newloop, &bsi, &insert_after);
create_iv (VEC_index (tree, lbounds, 0),
build_int_cst (TREE_TYPE (oldivvar), VEC_index (int, steps, 0)),
- ivvar, newloop, &bsi, insert_after, &ivvar, &ivvarinced);
+ ivvar, newloop, &bsi, insert_after, &ivvar, &ivvarinced);
/* Create the new upper bound. This may be not just a variable, so we copy
it to one just in case. */
@@ -2498,7 +2498,7 @@ perfect_nestify (struct loop *loop,
update_stmt (exit_condition);
replacements = htab_create_ggc (20, tree_map_hash,
tree_map_eq, NULL);
- bbs = get_loop_body_in_dom_order (loop);
+ bbs = get_loop_body_in_dom_order (loop);
/* Now move the statements, and replace the induction variable in the moved
statements with the correct loop induction variable. */
oldivvar = VEC_index (tree, loopivs, 0);
@@ -2513,7 +2513,7 @@ perfect_nestify (struct loop *loop,
The only time can_convert_to_perfect_nest returns true when we
have statements before the inner loop is if they can be moved
- into the inner loop.
+ into the inner loop.
The only time can_convert_to_perfect_nest returns true when we
have statements after the inner loop is if they can be moved into
@@ -2521,11 +2521,11 @@ perfect_nestify (struct loop *loop,
if (dominated_by_p (CDI_DOMINATORS, loop->inner->header, bbs[i]))
{
- gimple_stmt_iterator header_bsi
+ gimple_stmt_iterator header_bsi
= gsi_after_labels (loop->inner->header);
for (bsi = gsi_start_bb (bbs[i]); !gsi_end_p (bsi);)
- {
+ {
gimple stmt = gsi_stmt (bsi);
if (stmt == exit_condition
@@ -2540,14 +2540,14 @@ perfect_nestify (struct loop *loop,
}
}
else
- {
+ {
/* Note that the bsi only needs to be explicitly incremented
when we don't move something, since it is automatically
incremented when we do. */
for (bsi = gsi_start_bb (bbs[i]); !gsi_end_p (bsi);)
- {
+ {
gimple stmt = gsi_stmt (bsi);
-
+
if (stmt == exit_condition
|| not_interesting_stmt (stmt)
|| stmt_is_bumper_for_loop (loop, stmt))
@@ -2555,8 +2555,8 @@ perfect_nestify (struct loop *loop,
gsi_next (&bsi);
continue;
}
-
- replace_uses_equiv_to_x_with_y
+
+ replace_uses_equiv_to_x_with_y
(loop, stmt, oldivvar, VEC_index (int, steps, 0), ivvar,
VEC_index (tree, lbounds, 0), replacements, &firstbsi);
@@ -2569,7 +2569,7 @@ perfect_nestify (struct loop *loop,
mark_sym_for_renaming (gimple_vop (cfun));
}
}
-
+
}
}
@@ -2592,7 +2592,7 @@ perfect_nestify (struct loop *loop,
the zero vector." S.Muchnick. */
bool
-lambda_transform_legal_p (lambda_trans_matrix trans,
+lambda_transform_legal_p (lambda_trans_matrix trans,
int nb_loops,
VEC (ddr_p, heap) *dependence_relations)
{
@@ -2631,7 +2631,7 @@ lambda_transform_legal_p (lambda_trans_matrix trans,
/* Conservatively answer: "this transformation is not valid". */
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
return false;
-
+
/* If the dependence could not be captured by a distance vector,
conservatively answer that the transform is not valid. */
if (DDR_NUM_DIST_VECTS (ddr) == 0)
@@ -2640,7 +2640,7 @@ lambda_transform_legal_p (lambda_trans_matrix trans,
/* Compute trans.dist_vect */
for (j = 0; j < DDR_NUM_DIST_VECTS (ddr); j++)
{
- lambda_matrix_vector_mult (LTM_MATRIX (trans), nb_loops, nb_loops,
+ lambda_matrix_vector_mult (LTM_MATRIX (trans), nb_loops, nb_loops,
DDR_DIST_VECT (ddr, j), distres);
if (!lambda_vector_lexico_pos (distres, nb_loops))
@@ -2736,7 +2736,7 @@ av_for_af_base (tree base_expr, lambda_vector cy, struct access_matrix *am,
case MULT_EXPR:
if (TREE_CODE (TREE_OPERAND (base_expr, 0)) == INTEGER_CST)
- result = av_for_af_base (TREE_OPERAND (base_expr, 1),
+ result = av_for_af_base (TREE_OPERAND (base_expr, 1),
cy, am, cst *
int_cst_value (TREE_OPERAND (base_expr, 0)));
else if (TREE_CODE (TREE_OPERAND (base_expr, 1)) == INTEGER_CST)
diff --git a/gcc/lambda-mat.c b/gcc/lambda-mat.c
index 85e80b14442..fb9098b20e1 100644
--- a/gcc/lambda-mat.c
+++ b/gcc/lambda-mat.c
@@ -27,7 +27,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-flow.h"
#include "lambda.h"
-static void lambda_matrix_get_column (lambda_matrix, int, int,
+static void lambda_matrix_get_column (lambda_matrix, int, int,
lambda_vector);
/* Allocate a matrix of M rows x N cols. */
@@ -39,7 +39,7 @@ lambda_matrix_new (int m, int n)
int i;
mat = GGC_NEWVEC (lambda_vector, m);
-
+
for (i = 0; i < m; i++)
mat[i] = lambda_vector_new (n);
@@ -318,7 +318,7 @@ lambda_matrix_inverse (lambda_matrix mat, lambda_matrix inv, int n)
a = mat[0][0];
b = mat[1][0];
c = mat[0][1];
- d = mat[1][1];
+ d = mat[1][1];
inv[0][0] = d;
inv[0][1] = -c;
inv[1][0] = -b;
@@ -483,7 +483,7 @@ lambda_matrix_hermite (lambda_matrix mat, int n,
/* Given an M x N integer matrix A, this function determines an M x
M unimodular matrix U, and an M x N echelon matrix S such that
"U.A = S". This decomposition is also known as "right Hermite".
-
+
Ref: Algorithm 2.1 page 33 in "Loop Transformations for
Restructuring Compilers" Utpal Banerjee. */
@@ -528,7 +528,7 @@ lambda_matrix_right_hermite (lambda_matrix A, int m, int n,
/* Given an M x N integer matrix A, this function determines an M x M
unimodular matrix V, and an M x N echelon matrix S such that "A =
V.S". This decomposition is also known as "left Hermite".
-
+
Ref: Algorithm 2.2 page 36 in "Loop Transformations for
Restructuring Compilers" Utpal Banerjee. */
diff --git a/gcc/lambda-trans.c b/gcc/lambda-trans.c
index 1d107cd0630..d34a63e2e86 100644
--- a/gcc/lambda-trans.c
+++ b/gcc/lambda-trans.c
@@ -34,7 +34,7 @@ lambda_trans_matrix
lambda_trans_matrix_new (int colsize, int rowsize)
{
lambda_trans_matrix ret;
-
+
ret = GGC_NEW (struct lambda_trans_matrix_s);
LTM_MATRIX (ret) = lambda_matrix_new (rowsize, colsize);
LTM_ROWSIZE (ret) = rowsize;
@@ -56,14 +56,14 @@ lambda_trans_matrix_id_p (lambda_trans_matrix mat)
/* Compute the inverse of the transformation matrix MAT. */
-lambda_trans_matrix
+lambda_trans_matrix
lambda_trans_matrix_inverse (lambda_trans_matrix mat)
{
lambda_trans_matrix inverse;
int determinant;
-
+
inverse = lambda_trans_matrix_new (LTM_ROWSIZE (mat), LTM_COLSIZE (mat));
- determinant = lambda_matrix_inverse (LTM_MATRIX (mat), LTM_MATRIX (inverse),
+ determinant = lambda_matrix_inverse (LTM_MATRIX (mat), LTM_MATRIX (inverse),
LTM_ROWSIZE (mat));
LTM_DENOMINATOR (inverse) = determinant;
return inverse;
@@ -75,6 +75,6 @@ lambda_trans_matrix_inverse (lambda_trans_matrix mat)
void
print_lambda_trans_matrix (FILE *outfile, lambda_trans_matrix mat)
{
- print_lambda_matrix (outfile, LTM_MATRIX (mat), LTM_ROWSIZE (mat),
+ print_lambda_matrix (outfile, LTM_MATRIX (mat), LTM_ROWSIZE (mat),
LTM_COLSIZE (mat));
}
diff --git a/gcc/lambda.h b/gcc/lambda.h
index 94ca90644e4..189c1fc50b3 100644
--- a/gcc/lambda.h
+++ b/gcc/lambda.h
@@ -78,16 +78,16 @@ typedef struct lambda_body_vector_s
#define LBV_SIZE(T) ((T)->size)
#define LBV_DENOMINATOR(T) ((T)->denominator)
-/* Piecewise linear expression.
+/* Piecewise linear expression.
This structure represents a linear expression with terms for the invariants
- and induction variables of a loop.
+ and induction variables of a loop.
COEFFICIENTS is a vector of coefficients for the induction variables, one
per loop in the loop nest.
CONSTANT is the constant portion of the linear expression
INVARIANT_COEFFICIENTS is a vector of coefficients for the loop invariants,
one per invariant.
DENOMINATOR is the denominator for all of the coefficients and constants in
- the expression.
+ the expression.
The linear expressions can be linked together using the NEXT field, in
order to represent MAX or MIN of a group of linear expressions. */
typedef struct lambda_linear_expression_s
@@ -131,7 +131,7 @@ typedef struct lambda_loop_s
#define LL_LINEAR_OFFSET(T) ((T)->linear_offset)
#define LL_STEP(T) ((T)->step)
-/* Loop nest structure.
+/* Loop nest structure.
The loop nest structure consists of a set of loop structures (defined
above) in LOOPS, along with an integer representing the DEPTH of the loop,
and an integer representing the number of INVARIANTS in the loop. Both of
@@ -187,7 +187,7 @@ void lambda_matrix_hermite (lambda_matrix, int, lambda_matrix, lambda_matrix);
void lambda_matrix_left_hermite (lambda_matrix, int, int, lambda_matrix, lambda_matrix);
void lambda_matrix_right_hermite (lambda_matrix, int, int, lambda_matrix, lambda_matrix);
int lambda_matrix_first_nz_vec (lambda_matrix, int, int, int);
-void lambda_matrix_project_to_null (lambda_matrix, int, int, int,
+void lambda_matrix_project_to_null (lambda_matrix, int, int, int,
lambda_vector);
void print_lambda_matrix (FILE *, lambda_matrix, int, int);
@@ -199,7 +199,7 @@ lambda_trans_matrix lambda_trans_matrix_basis (lambda_trans_matrix);
lambda_trans_matrix lambda_trans_matrix_padding (lambda_trans_matrix);
lambda_trans_matrix lambda_trans_matrix_inverse (lambda_trans_matrix);
void print_lambda_trans_matrix (FILE *, lambda_trans_matrix);
-void lambda_matrix_vector_mult (lambda_matrix, int, int, lambda_vector,
+void lambda_matrix_vector_mult (lambda_matrix, int, int, lambda_vector,
lambda_vector);
bool lambda_trans_matrix_id_p (lambda_trans_matrix);
@@ -262,7 +262,7 @@ lambda_vector_mult_const (lambda_vector vec1, lambda_vector vec2,
/* Negate vector VEC1 with length SIZE and store it in VEC2. */
-static inline void
+static inline void
lambda_vector_negate (lambda_vector vec1, lambda_vector vec2,
int size)
{
@@ -303,7 +303,7 @@ lambda_vector_copy (lambda_vector vec1, lambda_vector vec2,
/* Return true if vector VEC1 of length SIZE is the zero vector. */
-static inline bool
+static inline bool
lambda_vector_zerop (lambda_vector vec1, int size)
{
int i;
@@ -322,7 +322,7 @@ lambda_vector_clear (lambda_vector vec1, int size)
}
/* Return true if two vectors are equal. */
-
+
static inline bool
lambda_vector_equal (lambda_vector vec1, lambda_vector vec2, int size)
{
@@ -370,7 +370,7 @@ lambda_vector_first_nz (lambda_vector vec1, int n, int start)
/* Multiply a vector by a matrix. */
static inline void
-lambda_vector_matrix_mult (lambda_vector vect, int m, lambda_matrix mat,
+lambda_vector_matrix_mult (lambda_vector vect, int m, lambda_matrix mat,
int n, lambda_vector dest)
{
int i, j;
@@ -382,7 +382,7 @@ lambda_vector_matrix_mult (lambda_vector vect, int m, lambda_matrix mat,
/* Compare two vectors returning an integer less than, equal to, or
greater than zero if the first argument is considered to be respectively
- less than, equal to, or greater than the second.
+ less than, equal to, or greater than the second.
We use the lexicographic order. */
static inline int
@@ -423,7 +423,7 @@ print_lambda_vector (FILE * outfile, lambda_vector vector, int n)
/* Compute the greatest common divisor of two numbers using
Euclid's algorithm. */
-static inline int
+static inline int
gcd (int a, int b)
{
int x, y, z;
@@ -462,7 +462,7 @@ lambda_vector_gcd (lambda_vector vector, int size)
other words, when the first nonzero element is positive. */
static inline bool
-lambda_vector_lexico_pos (lambda_vector v,
+lambda_vector_lexico_pos (lambda_vector v,
unsigned n)
{
unsigned i;
diff --git a/gcc/langhooks.c b/gcc/langhooks.c
index 633caf58c53..8505ec40b98 100644
--- a/gcc/langhooks.c
+++ b/gcc/langhooks.c
@@ -162,7 +162,7 @@ lhd_set_decl_assembler_name (tree decl)
&& (TREE_STATIC (decl)
|| DECL_EXTERNAL (decl)
|| TREE_PUBLIC (decl))));
-
+
/* By default, assume the name to use in assembly code is the same
as that used in the source language. (That's correct for C, and
GCC used to set DECL_ASSEMBLER_NAME to the same value as
@@ -170,7 +170,7 @@ lhd_set_decl_assembler_name (tree decl)
compatibility with existing front-ends. This assumption is wrapped
in a target hook, to allow for target-specific modification of the
identifier.
-
+
Can't use just the variable's own name for a variable whose scope
is less than the whole compilation. Concatenate a distinguishing
number - we use the DECL_UID. */
@@ -181,7 +181,7 @@ lhd_set_decl_assembler_name (tree decl)
{
const char *name = IDENTIFIER_POINTER (DECL_NAME (decl));
char *label;
-
+
ASM_FORMAT_PRIVATE_NAME (label, name, DECL_UID (decl));
id = get_identifier (label);
}
diff --git a/gcc/lcm.c b/gcc/lcm.c
index 18579b9d782..2c0bc8445db 100644
--- a/gcc/lcm.c
+++ b/gcc/lcm.c
@@ -483,7 +483,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- qin = qout = worklist =
+ qin = qout = worklist =
XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
/* We want a maximal solution. */
diff --git a/gcc/libgcov.c b/gcc/libgcov.c
index 6b13940e4a7..4076409e11c 100644
--- a/gcc/libgcov.c
+++ b/gcc/libgcov.c
@@ -88,8 +88,8 @@ static gcov_unsigned_t gcov_crc32;
static size_t gcov_max_filename = 0;
#ifdef TARGET_POSIX_IO
-/* Make sure path component of the given FILENAME exists, create
- missing directories. FILENAME must be writable.
+/* Make sure path component of the given FILENAME exists, create
+ missing directories. FILENAME must be writable.
Returns zero on success, or -1 if an error occurred. */
static int
@@ -114,7 +114,7 @@ create_file_directory (char *filename)
*s = sep;
return -1;
};
-
+
*s = sep;
};
return 0;
@@ -123,9 +123,9 @@ create_file_directory (char *filename)
/* Check if VERSION of the info block PTR matches libgcov one.
Return 1 on success, or zero in case of versions mismatch.
- If FILENAME is not NULL, its value used for reporting purposes
+ If FILENAME is not NULL, its value used for reporting purposes
instead of value from the info block. */
-
+
static int
gcov_version (struct gcov_info *ptr, gcov_unsigned_t version,
const char *filename)
@@ -136,7 +136,7 @@ gcov_version (struct gcov_info *ptr, gcov_unsigned_t version,
GCOV_UNSIGNED2STRING (v, version);
GCOV_UNSIGNED2STRING (e, GCOV_VERSION);
-
+
fprintf (stderr,
"profiling:%s:Version mismatch - expected %.4s got %.4s\n",
filename? filename : ptr->filename, e, v);
@@ -203,7 +203,7 @@ gcov_exit (void)
if (gcov_prefix_strip < 0)
gcov_prefix_strip = 0;
}
-
+
prefix_length = strlen(gcov_prefix);
/* Remove an unnecessary trailing '/' */
@@ -212,13 +212,13 @@ gcov_exit (void)
}
else
prefix_length = 0;
-
+
/* Allocate and initialize the filename scratch space. */
gi_filename = (char *) alloca (prefix_length + gcov_max_filename + 1);
if (prefix_length)
memcpy (gi_filename, gcov_prefix, prefix_length);
gi_filename_up = gi_filename + prefix_length;
-
+
/* Now merge each file. */
for (gi_ptr = gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
{
@@ -236,8 +236,8 @@ gcov_exit (void)
memset (&this_object, 0, sizeof (this_object));
memset (&object, 0, sizeof (object));
-
- /* Build relocated filename, stripping off leading
+
+ /* Build relocated filename, stripping off leading
directories from the initial filename if requested. */
if (gcov_prefix_strip > 0)
{
@@ -294,7 +294,7 @@ gcov_exit (void)
fi_stride += __alignof__ (struct gcov_fn_info) - 1;
fi_stride &= ~(__alignof__ (struct gcov_fn_info) - 1);
}
-
+
if (!gcov_open (gi_filename))
{
#ifdef TARGET_POSIX_IO
@@ -331,7 +331,7 @@ gcov_exit (void)
if (length != gi_ptr->stamp)
/* Read from a different compilation. Overwrite the file. */
goto rewrite;
-
+
/* Merge execution counts for each function. */
for (f_ix = 0; f_ix < gi_ptr->n_functions; f_ix++)
{
@@ -360,10 +360,10 @@ gcov_exit (void)
if (!((1 << t_ix) & gi_ptr->ctr_mask))
continue;
-
+
n_counts = fi_ptr->n_ctrs[c_ix];
merge = gi_ptr->counts[c_ix].merge;
-
+
tag = gcov_read_unsigned ();
length = gcov_read_unsigned ();
if (tag != GCOV_TAG_FOR_COUNTER (t_ix)
@@ -382,7 +382,7 @@ gcov_exit (void)
while (1)
{
int is_program;
-
+
eof_pos = gcov_position ();
tag = gcov_read_unsigned ();
if (!tag)
@@ -404,11 +404,11 @@ gcov_exit (void)
}
}
goto rewrite;
-
+
read_error:;
fprintf (stderr, error < 0 ? "profiling:%s:Overflow merging\n"
: "profiling:%s:Error merging\n", gi_filename);
-
+
read_fatal:;
gcov_close ();
continue;
@@ -438,7 +438,7 @@ gcov_exit (void)
if (cs_obj->run_max < cs_tobj->run_max)
cs_obj->run_max = cs_tobj->run_max;
cs_obj->sum_max += cs_tobj->run_max;
-
+
if (!cs_prg->runs++)
cs_prg->num = cs_tprg->num;
else if (cs_prg->num != cs_tprg->num)
@@ -450,7 +450,7 @@ gcov_exit (void)
}
else if (cs_obj->num || cs_prg->num)
goto read_mismatch;
-
+
if (!cs_all->runs && cs_prg->runs)
memcpy (cs_all, cs_prg, sizeof (*cs_all));
else if (!all.checksum
@@ -463,7 +463,7 @@ gcov_exit (void)
all.checksum = ~0u;
}
}
-
+
c_ix = 0;
for (t_ix = 0; t_ix < GCOV_COUNTERS; t_ix++)
if ((1 << t_ix) & gi_ptr->ctr_mask)
@@ -473,11 +473,11 @@ gcov_exit (void)
}
program.checksum = gcov_crc32;
-
+
/* Write out the data. */
gcov_write_tag_length (GCOV_DATA_MAGIC, GCOV_VERSION);
gcov_write_unsigned (gi_ptr->stamp);
-
+
/* Write execution counts for each function. */
for (f_ix = 0; f_ix < gi_ptr->n_functions; f_ix++)
{
@@ -498,7 +498,7 @@ gcov_exit (void)
continue;
n_counts = fi_ptr->n_ctrs[c_ix];
-
+
gcov_write_tag_length (GCOV_TAG_FOR_COUNTER (t_ix),
GCOV_TAG_COUNTER_LENGTH (n_counts));
c_ptr = values[c_ix];
@@ -544,7 +544,7 @@ __gcov_init (struct gcov_info *info)
/* Refresh the longest file name information */
if (filename_length > gcov_max_filename)
gcov_max_filename = filename_length;
-
+
do
{
unsigned ix;
@@ -560,12 +560,12 @@ __gcov_init (struct gcov_info *info)
}
}
while (*ptr++);
-
+
gcov_crc32 = crc32;
-
+
if (!gcov_list)
atexit (gcov_exit);
-
+
info->next = gcov_list;
gcov_list = info;
}
@@ -586,7 +586,7 @@ __gcov_flush (void)
{
unsigned t_ix;
const struct gcov_ctr_info *ci_ptr;
-
+
for (t_ix = 0, ci_ptr = gi_ptr->counts; t_ix != GCOV_COUNTERS; t_ix++)
if ((1 << t_ix) & gi_ptr->ctr_mask)
{
@@ -628,7 +628,7 @@ __gcov_merge_ior (gcov_type *counters, unsigned n_counters)
reads the same number of counters from the gcov file. The counters
are split into 3-tuples where the members of the tuple have
meanings:
-
+
-- the stored candidate on the most common value of the measured entity
-- counter
-- total number of evaluations of the value */
@@ -666,7 +666,7 @@ __gcov_merge_single (gcov_type *counters, unsigned n_counters)
given an array COUNTERS of N_COUNTERS old counters and it reads the
same number of counters from the gcov file. The counters are split
into 4-tuples where the members of the tuple have meanings:
-
+
-- the last value of the measured entity
-- the stored candidate on the most common difference
-- counter
@@ -770,7 +770,7 @@ __gcov_one_value_profiler (gcov_type *counters, gcov_type value)
#ifdef L_gcov_indirect_call_profiler
/* Tries to determine the most common value among its inputs. */
void
-__gcov_indirect_call_profiler (gcov_type* counter, gcov_type value,
+__gcov_indirect_call_profiler (gcov_type* counter, gcov_type value,
void* cur_func, void* callee_func)
{
/* If the C++ virtual tables contain function descriptors then one
diff --git a/gcc/lists.c b/gcc/lists.c
index 99846710305..5517e5f6a62 100644
--- a/gcc/lists.c
+++ b/gcc/lists.c
@@ -51,12 +51,12 @@ free_list (rtx *listp, rtx *unused_listp)
gcc_assert (unused_listp != &unused_insn_list
|| GET_CODE (prev_link) == INSN_LIST);
-
+
while (link)
{
gcc_assert (unused_listp != &unused_insn_list
|| GET_CODE (prev_link) == INSN_LIST);
-
+
prev_link = link;
link = XEXP (link, 1);
}
diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c
index 1f5856f581b..0fa0fe21e51 100644
--- a/gcc/loop-doloop.c
+++ b/gcc/loop-doloop.c
@@ -317,7 +317,7 @@ add_test (rtx cond, edge *e, basic_block dest)
redirect_edge_and_branch_force (*e, dest);
return false;
}
-
+
JUMP_LABEL (jump) = label;
/* The jump is supposed to handle an unlikely special case. */
@@ -462,7 +462,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
set_zero->count = preheader->count;
set_zero->frequency = preheader->frequency;
}
-
+
if (EDGE_COUNT (set_zero->preds) == 0)
{
/* All the conditions were simplified to false, remove the
@@ -477,7 +477,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
sequence = get_insns ();
end_sequence ();
emit_insn_after (sequence, BB_END (set_zero));
-
+
set_immediate_dominator (CDI_DOMINATORS, set_zero,
recompute_dominator (CDI_DOMINATORS,
set_zero));
@@ -530,7 +530,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
if (true_prob_val)
{
/* Seems safer to use the branch probability. */
- add_reg_note (jump_insn, REG_BR_PROB,
+ add_reg_note (jump_insn, REG_BR_PROB,
GEN_INT (desc->in_edge->probability));
}
}
diff --git a/gcc/loop-init.c b/gcc/loop-init.c
index a1a91639dc4..da01f04879e 100644
--- a/gcc/loop-init.c
+++ b/gcc/loop-init.c
@@ -72,7 +72,7 @@ loop_optimizer_init (unsigned flags)
if (flags & LOOPS_HAVE_FALLTHRU_PREHEADERS)
cp_flags |= CP_FALLTHRU_PREHEADERS;
-
+
create_preheaders (cp_flags);
}
@@ -126,7 +126,7 @@ loop_optimizer_finalize (void)
/* Checking. */
#ifdef ENABLE_CHECKING
- /* FIXME: no point to verify flow info after bundling on ia64. Use this
+ /* FIXME: no point to verify flow info after bundling on ia64. Use this
hack for achieving this. */
if (!reload_completed)
verify_flow_info ();
@@ -177,7 +177,7 @@ static unsigned int
rtl_loop_init (void)
{
gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT);
-
+
if (dump_file)
dump_flow_info (dump_file, dump_flags);
@@ -269,7 +269,7 @@ struct rtl_opt_pass pass_rtl_move_loop_invariants =
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
- 0, /* todo_flags_start */
+ 0, /* todo_flags_start */
TODO_df_verify |
TODO_df_finish | TODO_verify_rtl_sharing |
TODO_dump_func /* todo_flags_finish */
diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c
index baf249ba3a2..03f0a13baa0 100644
--- a/gcc/loop-invariant.c
+++ b/gcc/loop-invariant.c
@@ -179,14 +179,14 @@ static VEC(invariant_p,heap) *invariants;
/* Check the size of the invariant table and realloc if necessary. */
-static void
+static void
check_invariant_table_size (void)
{
if (invariant_table_size < DF_DEFS_TABLE_SIZE())
{
unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
invariant_table = XRESIZEVEC (struct invariant *, invariant_table, new_size);
- memset (&invariant_table[invariant_table_size], 0,
+ memset (&invariant_table[invariant_table_size], 0,
(new_size - invariant_table_size) * sizeof (struct rtx_iv *));
invariant_table_size = new_size;
}
@@ -776,26 +776,26 @@ check_dependency (basic_block bb, df_ref use, bitmap depends_on)
struct df_link *defs;
struct def *def_data;
struct invariant *inv;
-
+
if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
return false;
-
+
defs = DF_REF_CHAIN (use);
if (!defs)
return true;
-
+
if (defs->next)
return false;
-
+
def = defs->ref;
check_invariant_table_size ();
inv = invariant_table[DF_REF_ID(def)];
if (!inv)
return false;
-
+
def_data = inv->def;
gcc_assert (def_data != NULL);
-
+
def_bb = DF_REF_BB (def);
/* Note that in case bb == def_bb, we know that the definition
dominates insn, because def has invariant_table[DF_REF_ID(def)]
@@ -803,7 +803,7 @@ check_dependency (basic_block bb, df_ref use, bitmap depends_on)
sequentially. */
if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
return false;
-
+
bitmap_set_bit (depends_on, def_data->invno);
return true;
}
@@ -826,7 +826,7 @@ check_dependencies (rtx insn, bitmap depends_on)
for (use_rec = DF_INSN_INFO_EQ_USES (insn_info); *use_rec; use_rec++)
if (!check_dependency (bb, *use_rec, depends_on))
return false;
-
+
return true;
}
@@ -1020,7 +1020,7 @@ get_cover_class_and_nregs (rtx insn, int *nregs)
rtx reg;
enum reg_class cover_class;
rtx set = single_set (insn);
-
+
/* Considered invariant insns have only one set. */
gcc_assert (set != NULL_RTX);
reg = SET_DEST (set);
@@ -1324,7 +1324,7 @@ find_invariants_to_move (bool speed)
return;
if (flag_ira_loop_pressure)
- /* REGS_USED is actually never used when the flag is on. */
+ /* REGS_USED is actually never used when the flag is on. */
regs_used = 0;
else
/* We do not really do a good job in estimating number of
@@ -1334,7 +1334,7 @@ find_invariants_to_move (bool speed)
unsigned int n_regs = DF_REG_SIZE (df);
regs_used = 2;
-
+
for (i = 0; i < n_regs; i++)
{
if (!DF_REGNO_FIRST_DEF (i) && DF_REGNO_LAST_USE (i))
@@ -1457,7 +1457,7 @@ move_invariant_reg (struct loop *loop, unsigned invno)
{
*use->pos = reg;
df_insn_rescan (use->insn);
- }
+ }
}
return true;
@@ -1529,7 +1529,7 @@ free_inv_motion_data (void)
{
def = inv->def;
gcc_assert (def != NULL);
-
+
free_use_list (def->uses);
free (def);
invariant_table[i] = NULL;
@@ -1590,7 +1590,7 @@ static rtx regs_set[(FIRST_PSEUDO_REGISTER > MAX_RECOG_OPERANDS
static int n_regs_set;
/* Return cover class and number of needed hard registers (through
- *NREGS) of register REGNO. */
+ *NREGS) of register REGNO. */
static enum reg_class
get_regno_cover_class (int regno, int *nregs)
{
@@ -1735,7 +1735,7 @@ mark_ref_regs (rtx x)
if (code == REG)
{
struct loop *loop;
-
+
for (loop = curr_loop;
loop != current_loops->tree_root;
loop = loop_outer (loop))
@@ -1750,7 +1750,7 @@ mark_ref_regs (rtx x)
else if (fmt[i] == 'E')
{
int j;
-
+
for (j = 0; j < XVECLEN (x, i); j++)
mark_ref_regs (XVECEXP (x, i, j));
}
@@ -1802,20 +1802,20 @@ calculate_loop_reg_pressure (void)
mark_ref_regs (PATTERN (insn));
n_regs_set = 0;
note_stores (PATTERN (insn), mark_reg_clobber, NULL);
-
+
/* Mark any registers dead after INSN as dead now. */
-
+
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_DEAD)
mark_reg_death (XEXP (link, 0));
-
+
/* Mark any registers set in INSN as live,
and mark them as conflicting with all other live regs.
Clobbers are processed again, so they conflict with
the registers that are set. */
-
+
note_stores (PATTERN (insn), mark_reg_store, NULL);
-
+
#ifdef AUTO_INC_DEC
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_INC)
@@ -1827,7 +1827,7 @@ calculate_loop_reg_pressure (void)
REGNO (regs_set[n_regs_set]));
if (! note)
continue;
-
+
mark_reg_death (XEXP (note, 0));
}
}
@@ -1865,7 +1865,7 @@ calculate_loop_reg_pressure (void)
for (i = 0; (int) i < ira_reg_class_cover_size; i++)
{
enum reg_class cover_class;
-
+
cover_class = ira_reg_class_cover[i];
if (LOOP_DATA (loop)->max_reg_pressure[cover_class] == 0)
continue;
diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c
index af25d02ed61..16e9a52697d 100644
--- a/gcc/loop-iv.c
+++ b/gcc/loop-iv.c
@@ -1,19 +1,19 @@
/* Rtl-level induction variable analysis.
Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -35,7 +35,7 @@ along with GCC; see the file COPYING3. If not see
iv_analysis_done () to clean up the memory.
The available functions are:
-
+
iv_analyze (insn, reg, iv): Stores the description of the induction variable
corresponding to the use of register REG in INSN to IV. Returns true if
REG is an induction variable in INSN. false otherwise.
@@ -168,14 +168,14 @@ lowpart_subreg (enum machine_mode outer_mode, rtx expr,
subreg_lowpart_offset (outer_mode, inner_mode));
}
-static void
+static void
check_iv_ref_table_size (void)
{
if (iv_ref_table_size < DF_DEFS_TABLE_SIZE())
{
unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
iv_ref_table = XRESIZEVEC (struct rtx_iv *, iv_ref_table, new_size);
- memset (&iv_ref_table[iv_ref_table_size], 0,
+ memset (&iv_ref_table[iv_ref_table_size], 0,
(new_size - iv_ref_table_size) * sizeof (struct rtx_iv *));
iv_ref_table_size = new_size;
}
@@ -330,7 +330,7 @@ iv_get_reaching_def (rtx insn, rtx reg, df_ref *def)
basic_block def_bb, use_bb;
rtx def_insn;
bool dom_p;
-
+
*def = NULL;
if (!simple_reg_p (reg))
return GRD_INVALID;
@@ -859,7 +859,7 @@ iv_analyze_biv (rtx def, struct rtx_iv *iv)
print_rtl (dump_file, def);
fprintf (dump_file, " for bivness.\n");
}
-
+
if (!REG_P (def))
{
if (!CONSTANT_P (def))
@@ -919,7 +919,7 @@ iv_analyze_biv (rtx def, struct rtx_iv *iv)
return iv->base != NULL_RTX;
}
-/* Analyzes expression RHS used at INSN and stores the result to *IV.
+/* Analyzes expression RHS used at INSN and stores the result to *IV.
The mode of the induction variable is MODE. */
bool
@@ -943,7 +943,7 @@ iv_analyze_expr (rtx insn, rtx rhs, enum machine_mode mode, struct rtx_iv *iv)
{
if (!iv_analyze_op (insn, rhs, iv))
return false;
-
+
if (iv->mode == VOIDmode)
{
iv->mode = mode;
@@ -1057,7 +1057,7 @@ iv_analyze_def (df_ref def, struct rtx_iv *iv)
fprintf (dump_file, " in insn ");
print_rtl_single (dump_file, insn);
}
-
+
check_iv_ref_table_size ();
if (DF_REF_IV (def))
{
@@ -1749,7 +1749,7 @@ simplify_using_condition (rtx cond, rtx *expr, regset altered)
*expr = const_true_rtx;
return;
}
-
+
if (reve && implies_p (cond, reve))
{
*expr = const0_rtx;
@@ -1855,7 +1855,7 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
default:
gcc_unreachable ();
}
-
+
simplify_using_initial_values (loop, UNKNOWN, &head);
if (head == aggr)
{
@@ -1876,7 +1876,7 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
*expr = tail;
return;
}
-
+
XEXP (*expr, 0) = head;
XEXP (*expr, 1) = tail;
return;
@@ -1942,7 +1942,7 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
if (CALL_P (insn))
{
int i;
-
+
/* Kill all call clobbered registers. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
@@ -2137,7 +2137,7 @@ canonicalize_iv_subregs (struct rtx_iv *iv0, struct rtx_iv *iv1,
and iv0 and iv1 are both ivs iterating in SI mode, but calculated
in different modes. This does not seem impossible to handle, but
it hardly ever occurs in practice.
-
+
The only exception is the case when one of operands is invariant.
For example pentium 3 generates comparisons like
(lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we
@@ -2299,7 +2299,7 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition,
goto fail;
if (iv0.extend_mode == VOIDmode)
iv0.mode = iv0.extend_mode = mode;
-
+
op1 = XEXP (condition, 1);
if (!iv_analyze (insn, op1, &iv1))
goto fail;
@@ -2867,7 +2867,7 @@ find_simple_exit (struct loop *loop, struct niter_desc *desc)
{
if (flow_bb_inside_loop_p (loop, e->dest))
continue;
-
+
check_simple_exit (loop, e, &act);
if (!act.simple_p)
continue;
@@ -2886,7 +2886,7 @@ find_simple_exit (struct loop *loop, struct niter_desc *desc)
if (act.infinite && !desc->infinite)
continue;
}
-
+
*desc = act;
}
}
@@ -2953,15 +2953,15 @@ get_simple_loop_desc (struct loop *loop)
if (desc->simple_p && (desc->assumptions || desc->infinite))
{
- const char *wording;
+ const char *wording;
- /* Assume that no overflow happens and that the loop is finite.
+ /* Assume that no overflow happens and that the loop is finite.
We already warned at the tree level if we ran optimizations there. */
if (!flag_tree_loop_optimize && warn_unsafe_loop_optimizations)
{
if (desc->infinite)
{
- wording =
+ wording =
flag_unsafe_loop_optimizations
? N_("assuming that the loop is not infinite")
: N_("cannot optimize possibly infinite loops");
@@ -2970,7 +2970,7 @@ get_simple_loop_desc (struct loop *loop)
}
if (desc->assumptions)
{
- wording =
+ wording =
flag_unsafe_loop_optimizations
? N_("assuming that the loop counter does not overflow")
: N_("cannot optimize loop, the loop counter may overflow");
diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c
index 8812e68805b..6b7fe8ad091 100644
--- a/gcc/loop-unroll.c
+++ b/gcc/loop-unroll.c
@@ -32,7 +32,7 @@ along with GCC; see the file COPYING3. If not see
#include "output.h"
#include "expr.h"
#include "hashtab.h"
-#include "recog.h"
+#include "recog.h"
/* This pass performs loop unrolling and peeling. We only perform these
optimizations on innermost loops (with single exception) because
@@ -82,7 +82,7 @@ struct iv_to_split
unsigned loc[3]; /* Location where the definition of the induction
variable occurs in the insn. For example if
N_LOC is 2, the expression is located at
- XEXP (XEXP (single_set, loc[0]), loc[1]). */
+ XEXP (XEXP (single_set, loc[0]), loc[1]). */
};
/* Information about accumulators to expand. */
@@ -91,14 +91,14 @@ struct var_to_expand
{
rtx insn; /* The insn in that the variable expansion occurs. */
rtx reg; /* The accumulator which is expanded. */
- VEC(rtx,heap) *var_expansions; /* The copies of the accumulator which is expanded. */
+ VEC(rtx,heap) *var_expansions; /* The copies of the accumulator which is expanded. */
struct var_to_expand *next; /* Next entry in walking order. */
- enum rtx_code op; /* The type of the accumulation - addition, subtraction
+ enum rtx_code op; /* The type of the accumulation - addition, subtraction
or multiplication. */
int expansion_count; /* Count the number of expansions generated so far. */
int reuse_expansion; /* The expansion we intend to reuse to expand
- the accumulator. If REUSE_EXPANSION is 0 reuse
- the original accumulator. Else use
+ the accumulator. If REUSE_EXPANSION is 0 reuse
+ the original accumulator. Else use
var_expansions[REUSE_EXPANSION - 1]. */
unsigned accum_pos; /* The position in which the accumulator is placed in
the insn src. For example in x = x + something
@@ -462,13 +462,13 @@ peel_loop_completely (struct loop *loop)
edge ein;
struct niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
-
+
npeel = desc->niter;
if (npeel)
{
bool ok;
-
+
wont_exit = sbitmap_alloc (npeel + 1);
sbitmap_ones (wont_exit);
RESET_BIT (wont_exit, 0);
@@ -479,7 +479,7 @@ peel_loop_completely (struct loop *loop)
if (flag_split_ivs_in_unroller)
opt_info = analyze_insns_in_loop (loop);
-
+
opt_info_start_duplication (opt_info);
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
npeel,
@@ -492,7 +492,7 @@ peel_loop_completely (struct loop *loop)
gcc_assert (ok);
free (wont_exit);
-
+
if (opt_info)
{
apply_opt_in_copies (opt_info, npeel, false, true);
@@ -609,7 +609,7 @@ decide_unroll_constant_iterations (struct loop *loop, int flags)
loop->lpt_decision.decision = LPT_UNROLL_CONSTANT;
loop->lpt_decision.times = best_unroll;
-
+
if (dump_file)
fprintf (dump_file,
";; Decided to unroll the constant times rolling loop, %d times.\n",
@@ -649,7 +649,7 @@ unroll_loop_constant_iterations (struct loop *loop)
bool exit_at_end = loop_exit_at_end_p (loop);
struct opt_info *opt_info = NULL;
bool ok;
-
+
niter = desc->niter;
/* Should not get here (such loop should be peeled instead). */
@@ -661,10 +661,10 @@ unroll_loop_constant_iterations (struct loop *loop)
sbitmap_ones (wont_exit);
remove_edges = NULL;
- if (flag_split_ivs_in_unroller
+ if (flag_split_ivs_in_unroller
|| flag_variable_expansion_in_unroller)
opt_info = analyze_insns_in_loop (loop);
-
+
if (!exit_at_end)
{
/* The exit is not at the end of the loop; leave exit test
@@ -693,8 +693,8 @@ unroll_loop_constant_iterations (struct loop *loop)
gcc_assert (ok);
if (opt_info && exit_mod > 1)
- apply_opt_in_copies (opt_info, exit_mod, false, false);
-
+ apply_opt_in_copies (opt_info, exit_mod, false, false);
+
desc->noloop_assumptions = NULL_RTX;
desc->niter -= exit_mod;
desc->niter_max -= exit_mod;
@@ -719,7 +719,7 @@ unroll_loop_constant_iterations (struct loop *loop)
RESET_BIT (wont_exit, 0);
if (desc->noloop_assumptions)
RESET_BIT (wont_exit, 1);
-
+
opt_info_start_duplication (opt_info);
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
exit_mod + 1,
@@ -730,7 +730,7 @@ unroll_loop_constant_iterations (struct loop *loop)
? DLTHE_RECORD_COPY_NUMBER
: 0));
gcc_assert (ok);
-
+
if (opt_info && exit_mod > 0)
apply_opt_in_copies (opt_info, exit_mod + 1, false, false);
@@ -746,7 +746,7 @@ unroll_loop_constant_iterations (struct loop *loop)
}
/* Now unroll the loop. */
-
+
opt_info_start_duplication (opt_info);
ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
max_unroll,
@@ -770,7 +770,7 @@ unroll_loop_constant_iterations (struct loop *loop)
{
basic_block exit_block = get_bb_copy (desc->in_edge->src);
/* Find a new in and out edge; they are in the last copy we have made. */
-
+
if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
{
desc->out_edge = EDGE_SUCC (exit_block, 0);
@@ -869,7 +869,7 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags)
loop->lpt_decision.decision = LPT_UNROLL_RUNTIME;
loop->lpt_decision.times = i - 1;
-
+
if (dump_file)
fprintf (dump_file,
";; Decided to unroll the runtime computable "
@@ -888,7 +888,7 @@ split_edge_and_insert (edge e, rtx insns)
if (!insns)
return NULL;
- bb = split_edge (e);
+ bb = split_edge (e);
emit_insn_after (insns, BB_END (bb));
/* ??? We used to assume that INSNS can contain control flow insns, and
@@ -973,11 +973,11 @@ unroll_loop_runtime_iterations (struct loop *loop)
bool exit_at_end = loop_exit_at_end_p (loop);
struct opt_info *opt_info = NULL;
bool ok;
-
+
if (flag_split_ivs_in_unroller
|| flag_variable_expansion_in_unroller)
opt_info = analyze_insns_in_loop (loop);
-
+
/* Remember blocks whose dominators will have to be updated. */
dom_bbs = NULL;
@@ -1119,7 +1119,7 @@ unroll_loop_runtime_iterations (struct loop *loop)
sbitmap_ones (wont_exit);
RESET_BIT (wont_exit, may_exit_copy);
opt_info_start_duplication (opt_info);
-
+
ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
max_unroll,
wont_exit, desc->out_edge,
@@ -1129,7 +1129,7 @@ unroll_loop_runtime_iterations (struct loop *loop)
? DLTHE_RECORD_COPY_NUMBER
: 0));
gcc_assert (ok);
-
+
if (opt_info)
{
apply_opt_in_copies (opt_info, max_unroll, true, true);
@@ -1143,7 +1143,7 @@ unroll_loop_runtime_iterations (struct loop *loop)
basic_block exit_block = get_bb_copy (desc->in_edge->src);
/* Find a new in and out edge; they are in the last copy we have
made. */
-
+
if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
{
desc->out_edge = EDGE_SUCC (exit_block, 0);
@@ -1266,7 +1266,7 @@ decide_peel_simple (struct loop *loop, int flags)
/* Success. */
loop->lpt_decision.decision = LPT_PEEL_SIMPLE;
loop->lpt_decision.times = npeel;
-
+
if (dump_file)
fprintf (dump_file, ";; Decided to simply peel the loop, %d times.\n",
loop->lpt_decision.times);
@@ -1294,15 +1294,15 @@ peel_loop_simple (struct loop *loop)
struct niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
bool ok;
-
+
if (flag_split_ivs_in_unroller && npeel > 1)
opt_info = analyze_insns_in_loop (loop);
-
+
wont_exit = sbitmap_alloc (npeel + 1);
sbitmap_zero (wont_exit);
-
+
opt_info_start_duplication (opt_info);
-
+
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
npeel, wont_exit, NULL,
NULL, DLTHE_FLAG_UPDATE_FREQ
@@ -1312,7 +1312,7 @@ peel_loop_simple (struct loop *loop)
gcc_assert (ok);
free (wont_exit);
-
+
if (opt_info)
{
apply_opt_in_copies (opt_info, npeel, false, false);
@@ -1411,7 +1411,7 @@ decide_unroll_stupid (struct loop *loop, int flags)
loop->lpt_decision.decision = LPT_UNROLL_STUPID;
loop->lpt_decision.times = i - 1;
-
+
if (dump_file)
fprintf (dump_file,
";; Decided to unroll the loop stupidly, %d times.\n",
@@ -1443,16 +1443,16 @@ unroll_loop_stupid (struct loop *loop)
struct niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
bool ok;
-
+
if (flag_split_ivs_in_unroller
|| flag_variable_expansion_in_unroller)
opt_info = analyze_insns_in_loop (loop);
-
-
+
+
wont_exit = sbitmap_alloc (nunroll + 1);
sbitmap_zero (wont_exit);
opt_info_start_duplication (opt_info);
-
+
ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
nunroll, wont_exit,
NULL, NULL,
@@ -1461,7 +1461,7 @@ unroll_loop_stupid (struct loop *loop)
? DLTHE_RECORD_COPY_NUMBER
: 0));
gcc_assert (ok);
-
+
if (opt_info)
{
apply_opt_in_copies (opt_info, nunroll, true, true);
@@ -1513,7 +1513,7 @@ ve_info_hash (const void *ves)
return (hashval_t) INSN_UID (((const struct var_to_expand *) ves)->insn);
}
-/* Return true if IVTS1 and IVTS2 (which are really both of type
+/* Return true if IVTS1 and IVTS2 (which are really both of type
"var_to_expand *") refer to the same instruction. */
static int
@@ -1521,7 +1521,7 @@ ve_info_eq (const void *ivts1, const void *ivts2)
{
const struct var_to_expand *const i1 = (const struct var_to_expand *) ivts1;
const struct var_to_expand *const i2 = (const struct var_to_expand *) ivts2;
-
+
return i1->insn == i2->insn;
}
@@ -1534,12 +1534,12 @@ referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg)
unsigned i;
int count_ref = 0;
rtx insn;
-
- body = get_loop_body (loop);
+
+ body = get_loop_body (loop);
for (i = 0; i < loop->num_nodes; i++)
{
bb = body[i];
-
+
FOR_BB_INSNS (bb, insn)
{
if (rtx_referenced_p (reg, insn))
@@ -1550,14 +1550,14 @@ referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg)
}
/* Determine whether INSN contains an accumulator
- which can be expanded into separate copies,
+ which can be expanded into separate copies,
one for each copy of the LOOP body.
-
+
for (i = 0 ; i < n; i++)
sum += a[i];
-
+
==>
-
+
sum += a[i]
....
i = i+1;
@@ -1567,8 +1567,8 @@ referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg)
sum2 += a[i];
....
- Return NULL if INSN contains no opportunity for expansion of accumulator.
- Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant
+ Return NULL if INSN contains no opportunity for expansion of accumulator.
+ Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant
information and return a pointer to it.
*/
@@ -1583,10 +1583,10 @@ analyze_insn_to_expand_var (struct loop *loop, rtx insn)
set = single_set (insn);
if (!set)
return NULL;
-
+
dest = SET_DEST (set);
src = SET_SRC (set);
-
+
if (GET_CODE (src) != PLUS
&& GET_CODE (src) != MINUS
&& GET_CODE (src) != MULT)
@@ -1606,12 +1606,12 @@ analyze_insn_to_expand_var (struct loop *loop, rtx insn)
op1 = XEXP (src, 0);
op2 = XEXP (src, 1);
-
+
if (!REG_P (dest)
&& !(GET_CODE (dest) == SUBREG
&& REG_P (SUBREG_REG (dest))))
return NULL;
-
+
if (rtx_equal_p (dest, op1))
accum_pos = 0;
else if (rtx_equal_p (dest, op2))
@@ -1623,7 +1623,7 @@ analyze_insn_to_expand_var (struct loop *loop, rtx insn)
the initialization of the expansions with zero and the summation of
the expansions at the end of the computation will yield wrong results
for (x = something - x) thus avoid using it in that case. */
- if (accum_pos == 1
+ if (accum_pos == 1
&& GET_CODE (src) == MINUS)
return NULL;
@@ -1631,15 +1631,15 @@ analyze_insn_to_expand_var (struct loop *loop, rtx insn)
if (!referenced_in_one_insn_in_loop_p (loop, dest))
return NULL;
-
+
if (rtx_referenced_p (dest, something))
return NULL;
-
- mode1 = GET_MODE (dest);
+
+ mode1 = GET_MODE (dest);
mode2 = GET_MODE (something);
- if ((FLOAT_MODE_P (mode1)
- || FLOAT_MODE_P (mode2))
- && !flag_associative_math)
+ if ((FLOAT_MODE_P (mode1)
+ || FLOAT_MODE_P (mode2))
+ && !flag_associative_math)
return NULL;
if (dump_file)
@@ -1660,11 +1660,11 @@ analyze_insn_to_expand_var (struct loop *loop, rtx insn)
ves->expansion_count = 0;
ves->reuse_expansion = 0;
ves->accum_pos = accum_pos;
- return ves;
+ return ves;
}
/* Determine whether there is an induction variable in INSN that
- we would like to split during unrolling.
+ we would like to split during unrolling.
I.e. replace
@@ -1684,7 +1684,7 @@ analyze_insn_to_expand_var (struct loop *loop, rtx insn)
i = i0 + 2
...
- Return NULL if INSN contains no interesting IVs. Otherwise, allocate
+ Return NULL if INSN contains no interesting IVs. Otherwise, allocate
an IV_TO_SPLIT structure, fill it with the relevant information and return a
pointer to it. */
@@ -1734,7 +1734,7 @@ analyze_iv_to_split_insn (rtx insn)
ivts->next = NULL;
ivts->n_loc = 1;
ivts->loc[0] = 1;
-
+
return ivts;
}
@@ -1757,7 +1757,7 @@ analyze_insns_in_loop (struct loop *loop)
VEC (edge, heap) *edges = get_loop_exit_edges (loop);
edge exit;
bool can_apply = false;
-
+
iv_analysis_loop_init (loop);
body = get_loop_body (loop);
@@ -1769,10 +1769,10 @@ analyze_insns_in_loop (struct loop *loop)
opt_info->iv_to_split_head = NULL;
opt_info->iv_to_split_tail = &opt_info->iv_to_split_head;
}
-
+
/* Record the loop exit bb and loop preheader before the unrolling. */
opt_info->loop_preheader = loop_preheader_edge (loop)->src;
-
+
if (VEC_length (edge, edges) == 1)
{
exit = VEC_index (edge, edges, 0);
@@ -1782,7 +1782,7 @@ analyze_insns_in_loop (struct loop *loop)
can_apply = true;
}
}
-
+
if (flag_variable_expansion_in_unroller
&& can_apply)
{
@@ -1792,7 +1792,7 @@ analyze_insns_in_loop (struct loop *loop)
opt_info->var_to_expand_head = NULL;
opt_info->var_to_expand_tail = &opt_info->var_to_expand_head;
}
-
+
for (i = 0; i < loop->num_nodes; i++)
{
bb = body[i];
@@ -1803,10 +1803,10 @@ analyze_insns_in_loop (struct loop *loop)
{
if (!INSN_P (insn))
continue;
-
+
if (opt_info->insns_to_split)
ivts = analyze_iv_to_split_insn (insn);
-
+
if (ivts)
{
slot1 = htab_find_slot (opt_info->insns_to_split, ivts, INSERT);
@@ -1816,10 +1816,10 @@ analyze_insns_in_loop (struct loop *loop)
opt_info->iv_to_split_tail = &ivts->next;
continue;
}
-
+
if (opt_info->insns_with_var_to_expand)
ves = analyze_insn_to_expand_var (loop, insn);
-
+
if (ves)
{
slot2 = htab_find_slot (opt_info->insns_with_var_to_expand, ves, INSERT);
@@ -1830,7 +1830,7 @@ analyze_insns_in_loop (struct loop *loop)
}
}
}
-
+
VEC_free (edge, heap, edges);
free (body);
return opt_info;
@@ -1839,7 +1839,7 @@ analyze_insns_in_loop (struct loop *loop)
/* Called just before loop duplication. Records start of duplicated area
to OPT_INFO. */
-static void
+static void
opt_info_start_duplication (struct opt_info *opt_info)
{
if (opt_info)
@@ -1952,7 +1952,7 @@ split_iv (struct iv_to_split *ivts, rtx insn, unsigned delta)
seq = get_insns ();
end_sequence ();
emit_insn_before (seq, insn);
-
+
if (validate_change (insn, loc, var, 0))
return;
@@ -1970,7 +1970,7 @@ split_iv (struct iv_to_split *ivts, rtx insn, unsigned delta)
emit_move_insn (dest, src);
seq = get_insns ();
end_sequence ();
-
+
emit_insn_before (seq, insn);
delete_insn (insn);
}
@@ -1982,22 +1982,22 @@ static rtx
get_expansion (struct var_to_expand *ve)
{
rtx reg;
-
+
if (ve->reuse_expansion == 0)
reg = ve->reg;
else
reg = VEC_index (rtx, ve->var_expansions, ve->reuse_expansion - 1);
-
+
if (VEC_length (rtx, ve->var_expansions) == (unsigned) ve->reuse_expansion)
ve->reuse_expansion = 0;
- else
+ else
ve->reuse_expansion++;
-
+
return reg;
}
-/* Given INSN replace the uses of the accumulator recorded in VE
+/* Given INSN replace the uses of the accumulator recorded in VE
with a new register. */
static void
@@ -2005,10 +2005,10 @@ expand_var_during_unrolling (struct var_to_expand *ve, rtx insn)
{
rtx new_reg, set;
bool really_new_expansion = false;
-
+
set = single_set (insn);
gcc_assert (set);
-
+
/* Generate a new register only if the expansion limit has not been
reached. Else reuse an already existing expansion. */
if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count)
@@ -2021,7 +2021,7 @@ expand_var_during_unrolling (struct var_to_expand *ve, rtx insn)
validate_change (insn, &SET_DEST (set), new_reg, 1);
validate_change (insn, &XEXP (SET_SRC (set), ve->accum_pos), new_reg, 1);
-
+
if (apply_change_group ())
if (really_new_expansion)
{
@@ -2037,7 +2037,7 @@ expand_var_during_unrolling (struct var_to_expand *ve, rtx insn)
way we can prevent cases where the sign of the final result is
effected by the sign of the expansion. Here is an example to
demonstrate this:
-
+
for (i = 0 ; i < n; i++)
sum += something;
@@ -2051,7 +2051,7 @@ expand_var_during_unrolling (struct var_to_expand *ve, rtx insn)
i = i+1
sum2 += something;
....
-
+
When SUM is initialized with -zero and SOMETHING is also -zero; the
final result of sum should be -zero thus the expansions sum1 and sum2
should be initialized with -zero as well (otherwise we will get +zero
@@ -2068,16 +2068,16 @@ insert_var_expansion_initialization (struct var_to_expand *ve,
if (VEC_length (rtx, ve->var_expansions) == 0)
return;
-
+
start_sequence ();
- if (ve->op == PLUS || ve->op == MINUS)
+ if (ve->op == PLUS || ve->op == MINUS)
for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++)
{
if (honor_signed_zero_p)
zero_init = simplify_gen_unary (NEG, mode, CONST0_RTX (mode), mode);
else
zero_init = CONST0_RTX (mode);
-
+
emit_move_insn (var, zero_init);
}
else if (ve->op == MULT)
@@ -2086,15 +2086,15 @@ insert_var_expansion_initialization (struct var_to_expand *ve,
zero_init = CONST1_RTX (GET_MODE (var));
emit_move_insn (var, zero_init);
}
-
+
seq = get_insns ();
end_sequence ();
-
+
insn = BB_HEAD (place);
while (!NOTE_INSN_BASIC_BLOCK_P (insn))
insn = NEXT_INSN (insn);
-
- emit_insn_after (seq, insn);
+
+ emit_insn_after (seq, insn);
}
/* Combine the variable expansions at the loop exit. PLACE is the
@@ -2110,7 +2110,7 @@ combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place)
if (VEC_length (rtx, ve->var_expansions) == 0)
return;
-
+
start_sequence ();
if (ve->op == PLUS || ve->op == MINUS)
for (i = 0; VEC_iterate (rtx, ve->var_expansions, i, var); i++)
@@ -2124,13 +2124,13 @@ combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place)
sum = simplify_gen_binary (MULT, GET_MODE (ve->reg),
var, sum);
}
-
+
expr = force_operand (sum, ve->reg);
if (expr != ve->reg)
emit_move_insn (ve->reg, expr);
seq = get_insns ();
end_sequence ();
-
+
insn = BB_HEAD (place);
while (!NOTE_INSN_BASIC_BLOCK_P (insn))
insn = NEXT_INSN (insn);
@@ -2138,18 +2138,18 @@ combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place)
emit_insn_after (seq, insn);
}
-/* Apply loop optimizations in loop copies using the
- data which gathered during the unrolling. Structure
+/* Apply loop optimizations in loop copies using the
+ data which gathered during the unrolling. Structure
OPT_INFO record that data.
-
+
UNROLLING is true if we unrolled (not peeled) the loop.
REWRITE_ORIGINAL_BODY is true if we should also rewrite the original body of
the loop (as it should happen in complete unrolling, but not in ordinary
peeling of the loop). */
static void
-apply_opt_in_copies (struct opt_info *opt_info,
- unsigned n_copies, bool unrolling,
+apply_opt_in_copies (struct opt_info *opt_info,
+ unsigned n_copies, bool unrolling,
bool rewrite_original_loop)
{
unsigned i, delta;
@@ -2157,21 +2157,21 @@ apply_opt_in_copies (struct opt_info *opt_info,
rtx insn, orig_insn, next;
struct iv_to_split ivts_templ, *ivts;
struct var_to_expand ve_templ, *ves;
-
+
/* Sanity check -- we need to put initialization in the original loop
body. */
gcc_assert (!unrolling || rewrite_original_loop);
-
+
/* Allocate the basic variables (i0). */
if (opt_info->insns_to_split)
for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next)
allocate_basic_variable (ivts);
-
+
for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++)
{
bb = BASIC_BLOCK (i);
orig_bb = get_bb_original (bb);
-
+
/* bb->aux holds position in copy sequence initialized by
duplicate_loop_to_header_edge. */
delta = determine_split_iv_delta ((size_t)bb->aux, n_copies,
@@ -2183,24 +2183,24 @@ apply_opt_in_copies (struct opt_info *opt_info,
next = NEXT_INSN (insn);
if (!INSN_P (insn))
continue;
-
+
while (!INSN_P (orig_insn))
orig_insn = NEXT_INSN (orig_insn);
-
+
ivts_templ.insn = orig_insn;
ve_templ.insn = orig_insn;
-
+
/* Apply splitting iv optimization. */
if (opt_info->insns_to_split)
{
ivts = (struct iv_to_split *)
htab_find (opt_info->insns_to_split, &ivts_templ);
-
+
if (ivts)
{
gcc_assert (GET_CODE (PATTERN (insn))
== GET_CODE (PATTERN (orig_insn)));
-
+
if (!delta)
insert_base_initialization (ivts, insn);
split_iv (ivts, insn, delta);
@@ -2212,7 +2212,7 @@ apply_opt_in_copies (struct opt_info *opt_info,
ves = (struct var_to_expand *)
htab_find (opt_info->insns_with_var_to_expand, &ve_templ);
if (ves)
- {
+ {
gcc_assert (GET_CODE (PATTERN (insn))
== GET_CODE (PATTERN (orig_insn)));
expand_var_during_unrolling (ves, insn);
@@ -2224,9 +2224,9 @@ apply_opt_in_copies (struct opt_info *opt_info,
if (!rewrite_original_loop)
return;
-
+
/* Initialize the variable expansions in the loop preheader
- and take care of combining them at the loop exit. */
+ and take care of combining them at the loop exit. */
if (opt_info->insns_with_var_to_expand)
{
for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
@@ -2234,7 +2234,7 @@ apply_opt_in_copies (struct opt_info *opt_info,
for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
combine_var_copies_in_loop_exit (ves, opt_info->loop_exit);
}
-
+
/* Rewrite also the original loop body. Find them as originals of the blocks
in the last copied iteration, i.e. those that have
get_bb_copy (get_bb_original (bb)) == bb. */
@@ -2244,17 +2244,17 @@ apply_opt_in_copies (struct opt_info *opt_info,
orig_bb = get_bb_original (bb);
if (get_bb_copy (orig_bb) != bb)
continue;
-
+
delta = determine_split_iv_delta (0, n_copies, unrolling);
for (orig_insn = BB_HEAD (orig_bb);
orig_insn != NEXT_INSN (BB_END (bb));
orig_insn = next)
{
next = NEXT_INSN (orig_insn);
-
+
if (!INSN_P (orig_insn))
continue;
-
+
ivts_templ.insn = orig_insn;
if (opt_info->insns_to_split)
{
@@ -2268,7 +2268,7 @@ apply_opt_in_copies (struct opt_info *opt_info,
continue;
}
}
-
+
}
}
}
diff --git a/gcc/lower-subreg.c b/gcc/lower-subreg.c
index 3ce714b2bf8..590e8e8e01b 100644
--- a/gcc/lower-subreg.c
+++ b/gcc/lower-subreg.c
@@ -1307,7 +1307,7 @@ decompose_multiword_subregs (void)
BITMAP_FREE (b);
}
- VEC_free (bitmap, heap, reg_copy_graph);
+ VEC_free (bitmap, heap, reg_copy_graph);
BITMAP_FREE (decomposable_context);
BITMAP_FREE (non_decomposable_context);
diff --git a/gcc/lto-cgraph.c b/gcc/lto-cgraph.c
index a3c7719ffbf..0f77df1a145 100644
--- a/gcc/lto-cgraph.c
+++ b/gcc/lto-cgraph.c
@@ -80,7 +80,7 @@ lto_cgraph_encoder_encode (lto_cgraph_encoder_t encoder,
{
int ref;
void **slot;
-
+
slot = pointer_map_contains (encoder->map, node);
if (!slot)
{
@@ -116,7 +116,7 @@ lto_cgraph_encoder_deref (lto_cgraph_encoder_t encoder, int ref)
if (ref == LCC_NOT_FOUND)
return NULL;
- return VEC_index (cgraph_node_ptr, encoder->nodes, ref);
+ return VEC_index (cgraph_node_ptr, encoder->nodes, ref);
}
@@ -142,11 +142,11 @@ lto_output_edge (struct lto_simple_output_block *ob, struct cgraph_edge *edge,
lto_output_uleb128_stream (ob->main_stream, LTO_cgraph_edge);
ref = lto_cgraph_encoder_lookup (encoder, edge->caller);
- gcc_assert (ref != LCC_NOT_FOUND);
+ gcc_assert (ref != LCC_NOT_FOUND);
lto_output_sleb128_stream (ob->main_stream, ref);
ref = lto_cgraph_encoder_lookup (encoder, edge->callee);
- gcc_assert (ref != LCC_NOT_FOUND);
+ gcc_assert (ref != LCC_NOT_FOUND);
lto_output_sleb128_stream (ob->main_stream, ref);
lto_output_sleb128_stream (ob->main_stream, edge->count);
@@ -197,15 +197,15 @@ lto_output_node (struct lto_simple_output_block *ob, struct cgraph_node *node,
case AVAIL_LOCAL:
tag = LTO_cgraph_avail_node;
break;
-
+
case AVAIL_OVERWRITABLE:
tag = LTO_cgraph_overwritable_node;
break;
-
+
default:
gcc_unreachable ();
}
-
+
if (boundary_p)
tag = LTO_cgraph_unavail_node;
@@ -221,7 +221,7 @@ lto_output_node (struct lto_simple_output_block *ob, struct cgraph_node *node,
Boundary nodes: There are nodes that are not part of SET but are
called from within SET. We artificially make them look like
- externally visible nodes with no function body.
+ externally visible nodes with no function body.
Cherry-picked nodes: These are nodes we pulled from other
translation units into SET during IPA-inlining. We make them as
@@ -273,15 +273,15 @@ lto_output_node (struct lto_simple_output_block *ob, struct cgraph_node *node,
if (tag != LTO_cgraph_unavail_node)
{
- lto_output_sleb128_stream (ob->main_stream,
+ lto_output_sleb128_stream (ob->main_stream,
node->local.inline_summary.estimated_self_stack_size);
- lto_output_sleb128_stream (ob->main_stream,
+ lto_output_sleb128_stream (ob->main_stream,
node->local.inline_summary.self_size);
- lto_output_sleb128_stream (ob->main_stream,
+ lto_output_sleb128_stream (ob->main_stream,
node->local.inline_summary.size_inlining_benefit);
- lto_output_sleb128_stream (ob->main_stream,
+ lto_output_sleb128_stream (ob->main_stream,
node->local.inline_summary.self_time);
- lto_output_sleb128_stream (ob->main_stream,
+ lto_output_sleb128_stream (ob->main_stream,
node->local.inline_summary.time_inlining_benefit);
}
@@ -488,9 +488,9 @@ input_overwrite_node (struct lto_file_decl_data *file_data,
}
-/* Read a node from input_block IB. TAG is the node's tag just read.
+/* Read a node from input_block IB. TAG is the node's tag just read.
Return the node read or overwriten. */
-
+
static struct cgraph_node *
input_node (struct lto_file_decl_data *file_data,
struct lto_input_block *ib,
@@ -529,7 +529,7 @@ input_node (struct lto_file_decl_data *file_data,
node->count = lto_input_sleb128 (ib);
bp = lto_input_bitpack (ib);
-
+
if (tag != LTO_cgraph_unavail_node)
{
stack_size = lto_input_sleb128 (ib);
@@ -650,7 +650,7 @@ input_cgraph_1 (struct lto_file_decl_data *file_data,
{
if (tag == LTO_cgraph_edge)
input_edge (ib, nodes);
- else
+ else
{
node = input_node (file_data, ib, tag);
if (node == NULL || node->decl == NULL_TREE)
@@ -736,18 +736,18 @@ input_cgraph (void)
size_t len;
struct lto_input_block *ib;
- ib = lto_create_simple_input_block (file_data, LTO_section_cgraph,
+ ib = lto_create_simple_input_block (file_data, LTO_section_cgraph,
&data, &len);
input_profile_summary (ib);
file_data->cgraph_node_encoder = lto_cgraph_encoder_new ();
input_cgraph_1 (file_data, ib);
- lto_destroy_simple_input_block (file_data, LTO_section_cgraph,
+ lto_destroy_simple_input_block (file_data, LTO_section_cgraph,
ib, data, len);
-
+
/* Assume that every file read needs to be processed by LTRANS. */
if (flag_wpa)
lto_mark_file_for_ltrans (file_data);
- }
+ }
/* Clear out the aux field that was used to store enough state to
tell which nodes should be overwritten. */
diff --git a/gcc/lto-compress.c b/gcc/lto-compress.c
index 8d745f6a047..e29ded397fe 100644
--- a/gcc/lto-compress.c
+++ b/gcc/lto-compress.c
@@ -245,7 +245,7 @@ lto_uncompress_block (struct lto_compression_stream *stream,
}
/* Finalize STREAM uncompression, and free stream allocations.
-
+
Because of the way LTO IL streams are compressed, there may be several
concatenated compressed segments in the accumulated data, so for this
function we iterate decompressions until no data remains. */
diff --git a/gcc/lto-opts.c b/gcc/lto-opts.c
index fd485b9db60..f96dfab522d 100644
--- a/gcc/lto-opts.c
+++ b/gcc/lto-opts.c
@@ -308,7 +308,7 @@ lto_write_options (void)
header.lto_header.major_version = LTO_major_version;
header.lto_header.minor_version = LTO_minor_version;
header.lto_header.section_type = LTO_section_opts;
-
+
header.compressed_size = 0;
header.main_size = stream.total_size;
diff --git a/gcc/lto-section-in.c b/gcc/lto-section-in.c
index da2384e0839..9277b12005f 100644
--- a/gcc/lto-section-in.c
+++ b/gcc/lto-section-in.c
@@ -59,7 +59,7 @@ const char *lto_section_name[LTO_N_SECTION_TYPES] =
"opts"
};
-unsigned char
+unsigned char
lto_input_1_unsigned (struct lto_input_block *ib)
{
if (ib->p >= ib->len)
@@ -72,7 +72,7 @@ lto_input_1_unsigned (struct lto_input_block *ib)
/* Read an ULEB128 Number of IB. */
-unsigned HOST_WIDE_INT
+unsigned HOST_WIDE_INT
lto_input_uleb128 (struct lto_input_block *ib)
{
unsigned HOST_WIDE_INT result = 0;
@@ -92,7 +92,7 @@ lto_input_uleb128 (struct lto_input_block *ib)
/* HOST_WIDEST_INT version of lto_input_uleb128. IB is as in
lto_input_uleb128. */
-unsigned HOST_WIDEST_INT
+unsigned HOST_WIDEST_INT
lto_input_widest_uint_uleb128 (struct lto_input_block *ib)
{
unsigned HOST_WIDEST_INT result = 0;
@@ -111,7 +111,7 @@ lto_input_widest_uint_uleb128 (struct lto_input_block *ib)
/* Read an SLEB128 Number of IB. */
-HOST_WIDE_INT
+HOST_WIDE_INT
lto_input_sleb128 (struct lto_input_block *ib)
{
HOST_WIDE_INT result = 0;
@@ -137,7 +137,7 @@ lto_input_sleb128 (struct lto_input_block *ib)
/* Hooks so that the ipa passes can call into the lto front end to get
sections. */
-static struct lto_file_decl_data ** file_decl_data;
+static struct lto_file_decl_data ** file_decl_data;
static lto_get_section_data_f* get_section_f;
static lto_free_section_data_f* free_section_f;
@@ -146,8 +146,8 @@ static lto_free_section_data_f* free_section_f;
used by the ipa passes to get the data that they will
deserialize. */
-void
-lto_set_in_hooks (struct lto_file_decl_data ** data,
+void
+lto_set_in_hooks (struct lto_file_decl_data ** data,
lto_get_section_data_f* get_f,
lto_free_section_data_f* free_f)
{
@@ -206,9 +206,9 @@ struct lto_data_header
returned. */
const char *
-lto_get_section_data (struct lto_file_decl_data *file_data,
+lto_get_section_data (struct lto_file_decl_data *file_data,
enum lto_section_type section_type,
- const char *name,
+ const char *name,
size_t *len)
{
const char *data = (get_section_f) (file_data, section_type, name, len);
@@ -233,9 +233,9 @@ lto_get_section_data (struct lto_file_decl_data *file_data,
header = (struct lto_data_header *) xmalloc (header_length);
header->data = data;
header->len = *len;
-
+
buffer.data = (char *) header;
- buffer.length = header_length;
+ buffer.length = header_length;
stream = lto_start_uncompression (lto_append_data, &buffer);
lto_uncompress_block (stream, data, *len);
@@ -250,8 +250,8 @@ lto_get_section_data (struct lto_file_decl_data *file_data,
parameters are the same as above. DATA is the data to be freed and
LEN is the length of that data. */
-void
-lto_free_section_data (struct lto_file_decl_data *file_data,
+void
+lto_free_section_data (struct lto_file_decl_data *file_data,
enum lto_section_type section_type,
const char *name,
const char *data,
@@ -285,16 +285,16 @@ lto_free_section_data (struct lto_file_decl_data *file_data,
used to free the section. Return NULL if the section is not present. */
struct lto_input_block *
-lto_create_simple_input_block (struct lto_file_decl_data *file_data,
+lto_create_simple_input_block (struct lto_file_decl_data *file_data,
enum lto_section_type section_type,
const char **datar, size_t *len)
{
const char *data = lto_get_section_data (file_data, section_type, NULL, len);
- const struct lto_simple_header * header
+ const struct lto_simple_header * header
= (const struct lto_simple_header *) data;
struct lto_input_block* ib_main;
- int32_t main_offset = sizeof (struct lto_simple_header);
+ int32_t main_offset = sizeof (struct lto_simple_header);
if (!data)
return NULL;
@@ -316,7 +316,7 @@ lto_create_simple_input_block (struct lto_file_decl_data *file_data,
that call. */
void
-lto_destroy_simple_input_block (struct lto_file_decl_data *file_data,
+lto_destroy_simple_input_block (struct lto_file_decl_data *file_data,
enum lto_section_type section_type,
struct lto_input_block *ib,
const char *data, size_t len)
diff --git a/gcc/lto-section-out.c b/gcc/lto-section-out.c
index e347027c709..895394f3a1b 100644
--- a/gcc/lto-section-out.c
+++ b/gcc/lto-section-out.c
@@ -76,7 +76,7 @@ lto_delete_extern_inline_states (void)
/* Force all the functions in DECLS to be output as extern inline.
DECLS is a bitmap indexed by DECL_UID. */
-
+
void
lto_force_functions_extern_inline (bitmap decls)
{
@@ -345,7 +345,7 @@ lto_output_uleb128_stream (struct lto_output_stream *obs,
while (work != 0);
}
-/* Identical to output_uleb128_stream above except using unsigned
+/* Identical to output_uleb128_stream above except using unsigned
HOST_WIDEST_INT type. For efficiency on host where unsigned HOST_WIDEST_INT
is not native, we only use this if we know that HOST_WIDE_INT is not wide
enough. */
@@ -448,7 +448,7 @@ lto_output_field_decl_index (struct lto_out_decl_state *decl_state,
/* Output a function DECL to OBS. */
void
-lto_output_fn_decl_index (struct lto_out_decl_state *decl_state,
+lto_output_fn_decl_index (struct lto_out_decl_state *decl_state,
struct lto_output_stream * obs, tree decl)
{
unsigned int index;
@@ -539,9 +539,9 @@ lto_destroy_simple_output_block (struct lto_simple_output_block *ob)
header.lto_header.major_version = LTO_major_version;
header.lto_header.minor_version = LTO_minor_version;
header.lto_header.section_type = LTO_section_cgraph;
-
+
header.compressed_size = 0;
-
+
header.main_size = ob->main_stream->total_size;
header_stream = XCNEW (struct lto_output_stream);
diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c
index 751e70472e4..9559869b5ba 100644
--- a/gcc/lto-streamer-in.c
+++ b/gcc/lto-streamer-in.c
@@ -140,7 +140,7 @@ input_string_internal (struct data_in *data_in, struct lto_input_block *ib,
unsigned int len;
unsigned int loc;
const char *result;
-
+
loc = lto_input_uleb128 (ib);
LTO_INIT_INPUT_BLOCK (str_tab, data_in->strings, loc, data_in->strings_len);
len = lto_input_uleb128 (&str_tab);
@@ -148,7 +148,7 @@ input_string_internal (struct data_in *data_in, struct lto_input_block *ib,
if (str_tab.p + len > data_in->strings_len)
internal_error ("bytecode stream: string too long for the string table");
-
+
result = (const char *)(data_in->strings + str_tab.p);
return result;
@@ -220,7 +220,7 @@ input_record_start (struct lto_input_block *ib)
{
enum LTO_tags tag = (enum LTO_tags) lto_input_uleb128 (ib);
return tag;
-}
+}
/* Lookup STRING in file_name_hash_table. If found, return the existing
@@ -311,7 +311,7 @@ lto_input_location (struct lto_input_block *ib, struct data_in *data_in)
function scope for the read tree. */
static tree
-lto_input_tree_ref (struct lto_input_block *ib, struct data_in *data_in,
+lto_input_tree_ref (struct lto_input_block *ib, struct data_in *data_in,
struct function *fn, enum LTO_tags tag)
{
unsigned HOST_WIDE_INT ix_u;
@@ -606,7 +606,7 @@ input_eh_regions (struct lto_input_block *ib, struct data_in *data_in,
HOST_WIDE_INT i, root_region, len;
enum LTO_tags tag;
static bool eh_initialized_p = false;
-
+
tag = input_record_start (ib);
if (tag == LTO_null)
return;
@@ -718,7 +718,7 @@ make_new_block (struct function *fn, unsigned int index)
/* Read the CFG for function FN from input block IB. */
-static void
+static void
input_cfg (struct lto_input_block *ib, struct function *fn)
{
unsigned int bb_count;
@@ -729,7 +729,7 @@ input_cfg (struct lto_input_block *ib, struct function *fn)
init_empty_tree_cfg_for_function (fn);
init_ssa_operands ();
- profile_status_for_function (fn) =
+ profile_status_for_function (fn) =
(enum profile_status_d) lto_input_uleb128 (ib);
bb_count = lto_input_uleb128 (ib);
@@ -740,7 +740,7 @@ input_cfg (struct lto_input_block *ib, struct function *fn)
basic_block_info_for_function (fn), bb_count);
if (bb_count > VEC_length (basic_block, label_to_block_map_for_function (fn)))
- VEC_safe_grow_cleared (basic_block, gc,
+ VEC_safe_grow_cleared (basic_block, gc,
label_to_block_map_for_function (fn), bb_count);
index = lto_input_sleb128 (ib);
@@ -771,7 +771,7 @@ input_cfg (struct lto_input_block *ib, struct function *fn)
dest = BASIC_BLOCK_FOR_FUNCTION (fn, dest_index);
- if (dest == NULL)
+ if (dest == NULL)
dest = make_new_block (fn, dest_index);
e = make_edge (bb, dest, edge_flags);
@@ -822,10 +822,10 @@ input_phi (struct lto_input_block *ib, basic_block bb, struct data_in *data_in,
int src_index = lto_input_uleb128 (ib);
location_t arg_loc = lto_input_location (ib, data_in);
basic_block sbb = BASIC_BLOCK_FOR_FUNCTION (fn, src_index);
-
+
edge e = NULL;
int j;
-
+
for (j = 0; j < len; j++)
if (EDGE_PRED (bb, j)->src == sbb)
{
@@ -833,7 +833,7 @@ input_phi (struct lto_input_block *ib, basic_block bb, struct data_in *data_in,
break;
}
- add_phi_arg (result, def, e, arg_loc);
+ add_phi_arg (result, def, e, arg_loc);
}
return result;
@@ -870,7 +870,7 @@ input_ssa_names (struct lto_input_block *ib, struct data_in *data_in,
set_default_def (SSA_NAME_VAR (ssa_name), ssa_name);
i = lto_input_uleb128 (ib);
- }
+ }
}
@@ -1146,12 +1146,12 @@ input_gimple_stmt (struct lto_input_block *ib, struct data_in *data_in,
return stmt;
}
-
+
/* Read a basic block with tag TAG from DATA_IN using input block IB.
FN is the function being processed. */
static void
-input_bb (struct lto_input_block *ib, enum LTO_tags tag,
+input_bb (struct lto_input_block *ib, enum LTO_tags tag,
struct data_in *data_in, struct function *fn)
{
unsigned int index;
@@ -1257,7 +1257,7 @@ fixup_call_stmt_edges (struct cgraph_node *orig, gimple *stmts)
/* Read the body of function FN_DECL from DATA_IN using input block IB. */
static void
-input_function (tree fn_decl, struct data_in *data_in,
+input_function (tree fn_decl, struct data_in *data_in,
struct lto_input_block *ib)
{
struct function *fn;
@@ -1303,7 +1303,7 @@ input_function (tree fn_decl, struct data_in *data_in,
/* Read all function arguments. We need to re-map them here to the
arguments of the merged function declaration. */
- args = lto_input_tree (ib, data_in);
+ args = lto_input_tree (ib, data_in);
for (oarg = args, narg = DECL_ARGUMENTS (fn_decl);
oarg && narg;
oarg = TREE_CHAIN (oarg), narg = TREE_CHAIN (narg))
@@ -1363,7 +1363,7 @@ input_function (tree fn_decl, struct data_in *data_in,
fixup_call_stmt_edges (node, stmts);
execute_all_ipa_stmt_fixups (node, stmts);
- update_ssa (TODO_update_ssa_only_virtuals);
+ update_ssa (TODO_update_ssa_only_virtuals);
free_dominance_info (CDI_DOMINATORS);
free_dominance_info (CDI_POST_DOMINATORS);
free (stmts);
@@ -1390,7 +1390,7 @@ input_alias_pairs (struct lto_input_block *ib, struct data_in *data_in)
{
const char *orig_name, *new_name;
alias_pair *p;
-
+
p = VEC_safe_push (alias_pair, gc, alias_pairs, NULL);
p->decl = var;
p->target = lto_input_tree (ib, data_in);
@@ -1414,7 +1414,7 @@ input_alias_pairs (struct lto_input_block *ib, struct data_in *data_in)
section type is LTO_section_function_body, FN must be the decl for
that function. */
-static void
+static void
lto_read_body (struct lto_file_decl_data *file_data, tree fn_decl,
const char *data, enum lto_section_type section_type)
{
@@ -1427,7 +1427,7 @@ lto_read_body (struct lto_file_decl_data *file_data, tree fn_decl,
struct lto_input_block ib_main;
header = (const struct lto_function_header *) data;
- cfg_offset = sizeof (struct lto_function_header);
+ cfg_offset = sizeof (struct lto_function_header);
main_offset = cfg_offset + header->cfg_size;
string_offset = main_offset + header->main_size;
@@ -1440,7 +1440,7 @@ lto_read_body (struct lto_file_decl_data *file_data, tree fn_decl,
data + main_offset,
0,
header->main_size);
-
+
data_in = lto_data_in_create (file_data, data + string_offset,
header->string_size, NULL);
@@ -1482,7 +1482,7 @@ lto_read_body (struct lto_file_decl_data *file_data, tree fn_decl,
pop_cfun ();
}
- else
+ else
{
input_alias_pairs (&ib_main, data_in);
}
@@ -1495,7 +1495,7 @@ lto_read_body (struct lto_file_decl_data *file_data, tree fn_decl,
/* Read the body of FN_DECL using DATA. FILE_DATA holds the global
decls and types. */
-void
+void
lto_input_function_body (struct lto_file_decl_data *file_data,
tree fn_decl, const char *data)
{
@@ -1507,7 +1507,7 @@ lto_input_function_body (struct lto_file_decl_data *file_data,
/* Read in VAR_DECL using DATA. FILE_DATA holds the global decls and
types. */
-void
+void
lto_input_constructors_and_inits (struct lto_file_decl_data *file_data,
const char *data)
{
@@ -1586,7 +1586,7 @@ unpack_ts_real_cst_value_fields (struct bitpack_d *bp, tree expr)
unsigned i;
REAL_VALUE_TYPE r;
REAL_VALUE_TYPE *rp;
-
+
r.cl = (unsigned) bp_unpack_value (bp, 2);
r.decimal = (unsigned) bp_unpack_value (bp, 1);
r.sign = (unsigned) bp_unpack_value (bp, 1);
@@ -1609,7 +1609,7 @@ static void
unpack_ts_fixed_cst_value_fields (struct bitpack_d *bp, tree expr)
{
struct fixed_value fv;
-
+
fv.data.low = (HOST_WIDE_INT) bp_unpack_value (bp, HOST_BITS_PER_WIDE_INT);
fv.data.high = (HOST_WIDE_INT) bp_unpack_value (bp, HOST_BITS_PER_WIDE_INT);
TREE_FIXED_CST (expr) = fv;
@@ -1954,7 +1954,7 @@ lto_input_chain (struct lto_input_block *ib, struct data_in *data_in)
{
int i, count;
tree first, prev, curr;
-
+
first = prev = NULL_TREE;
count = lto_input_sleb128 (ib);
for (i = 0; i < count; i++)
@@ -1972,7 +1972,7 @@ lto_input_chain (struct lto_input_block *ib, struct data_in *data_in)
return first;
}
-
+
/* Read all pointer fields in the TS_COMMON structure of EXPR from input
block IB. DATA_IN contains tables and descriptors for the
file being read. */
@@ -2073,7 +2073,7 @@ lto_input_ts_decl_with_vis_tree_pointers (struct lto_input_block *ib,
struct data_in *data_in, tree expr)
{
tree id;
-
+
id = lto_input_tree (ib, data_in);
if (id)
{
@@ -2417,7 +2417,7 @@ lto_register_var_decl_in_symtab (struct data_in *data_in, tree decl)
confuse any attempt to unmangle it. */
const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
char *label;
-
+
ASM_FORMAT_PRIVATE_NAME (label, name, DECL_UID (decl));
SET_DECL_ASSEMBLER_NAME (decl, get_identifier (label));
rest_of_decl_compilation (decl, 1, 0);
@@ -2462,7 +2462,7 @@ lto_register_function_decl_in_symtab (struct data_in *data_in, tree decl)
attempt to unmangle it. */
const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
char *label;
-
+
ASM_FORMAT_PRIVATE_NAME (label, name, DECL_UID (decl));
SET_DECL_ASSEMBLER_NAME (decl, get_identifier (label));
@@ -2484,11 +2484,11 @@ lto_register_function_decl_in_symtab (struct data_in *data_in, tree decl)
/* Also register the reverse mapping so that we can find the
new name given to an existing assembler name (used when
- restoring alias pairs in input_constructors_or_inits. */
+ restoring alias pairs in input_constructors_or_inits. */
lto_record_renamed_decl (data_in->file_data,
IDENTIFIER_POINTER (new_assembler_name),
IDENTIFIER_POINTER (old_assembler_name));
- }
+ }
}
/* If this variable has already been declared, queue the
@@ -2663,7 +2663,7 @@ lto_input_tree (struct lto_input_block *ib, struct data_in *data_in)
{
enum LTO_tags tag;
tree result;
-
+
tag = input_record_start (ib);
gcc_assert ((unsigned) tag < (unsigned) LTO_NUM_TAGS);
diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c
index 1c5f9103934..7389081182b 100644
--- a/gcc/lto-streamer-out.c
+++ b/gcc/lto-streamer-out.c
@@ -86,7 +86,7 @@ eq_string_slot_node (const void *p1, const void *p2)
/* Free the string slot pointed-to by P. */
-static void
+static void
string_slot_free (void *p)
{
struct string_slot *slot = (struct string_slot *) p;
@@ -365,7 +365,7 @@ pack_ts_real_cst_value_fields (struct bitpack_d *bp, tree expr)
{
unsigned i;
REAL_VALUE_TYPE r;
-
+
r = TREE_REAL_CST (expr);
bp_pack_value (bp, r.cl, 2);
bp_pack_value (bp, r.decimal, 1);
@@ -758,7 +758,7 @@ static void
lto_output_chain (struct output_block *ob, tree t, bool ref_p)
{
int i, count;
-
+
count = list_length (t);
output_sleb128 (ob, count);
for (i = 0; i < count; i++)
@@ -1612,7 +1612,7 @@ static void
output_phi (struct output_block *ob, gimple phi)
{
unsigned i, len = gimple_phi_num_args (phi);
-
+
output_record_start (ob, lto_gimple_code_to_tag (GIMPLE_PHI));
output_uleb128 (ob, SSA_NAME_VERSION (PHI_RESULT (phi)));
@@ -1732,7 +1732,7 @@ output_bb (struct output_block *ob, basic_block bb, struct function *fn)
gimple stmt = gsi_stmt (bsi);
output_gimple_stmt (ob, stmt);
-
+
/* Emit the EH region holding STMT. */
region = lookup_stmt_eh_lp_fn (fn, stmt);
if (region != 0)
@@ -1785,14 +1785,14 @@ produce_asm (struct output_block *ob, tree fn)
/* The entire header is stream computed here. */
memset (&header, 0, sizeof (struct lto_function_header));
-
+
/* Write the header. */
header.lto_header.major_version = LTO_major_version;
header.lto_header.minor_version = LTO_minor_version;
header.lto_header.section_type = section_type;
-
+
header.compressed_size = 0;
-
+
if (section_type == LTO_section_function_body)
header.cfg_size = ob->cfg_stream->total_size;
header.main_size = ob->main_stream->total_size;
@@ -2037,7 +2037,7 @@ copy_function (struct cgraph_node *node)
VEC_safe_push (tree, heap, encoder->trees, trees[j]);
encoder->next_index = n;
}
-
+
lto_free_section_data (file_data, LTO_section_function_body, name,
data, len);
free (output_stream);
@@ -2122,7 +2122,7 @@ struct ipa_opt_pass_d pass_ipa_lto_gimple_out =
};
-/* Write each node in encoded by ENCODER to OB, as well as those reachable
+/* Write each node in encoded by ENCODER to OB, as well as those reachable
from it and required for correct representation of its semantics.
Each node in ENCODER must be a global declaration or a type. A node
is written only once, even if it appears multiple times in the
@@ -2230,7 +2230,7 @@ lto_output_decl_state_refs (struct output_block *ob,
unsigned i;
int32_t ref;
tree decl;
-
+
/* Write reference to FUNCTION_DECL. If there is not function,
write reference to void_type_node. */
decl = (state->fn_decl) ? state->fn_decl : void_type_node;
@@ -2442,7 +2442,7 @@ produce_asm_for_decls (cgraph_node_set set)
needed. */
output_unreferenced_globals (set);
- memset (&header, 0, sizeof (struct lto_decl_header));
+ memset (&header, 0, sizeof (struct lto_decl_header));
section_name = lto_get_section_name (LTO_section_decls, NULL);
lto_begin_section (section_name, !flag_wpa);
@@ -2488,7 +2488,7 @@ produce_asm_for_decls (cgraph_node_set set)
lto_output_data_stream (header_stream, &header, sizeof header);
lto_write_stream (header_stream);
free (header_stream);
-
+
/* Write the main out-decl state, followed by out-decl states of
functions. */
decl_state_stream = ((struct lto_output_stream *)
@@ -2504,7 +2504,7 @@ produce_asm_for_decls (cgraph_node_set set)
lto_output_decl_state_refs (ob, decl_state_stream, fn_out_state);
}
lto_write_stream (decl_state_stream);
- free(decl_state_stream);
+ free(decl_state_stream);
lto_write_stream (ob->main_stream);
lto_write_stream (ob->string_stream);
diff --git a/gcc/lto-streamer.c b/gcc/lto-streamer.c
index 5b925db504e..01664f7bce5 100644
--- a/gcc/lto-streamer.c
+++ b/gcc/lto-streamer.c
@@ -190,7 +190,7 @@ print_lto_report (void)
fprintf (stderr, "[%s] # of input files: "
HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_input_files);
- fprintf (stderr, "[%s] # of input cgraph nodes: "
+ fprintf (stderr, "[%s] # of input cgraph nodes: "
HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
lto_stats.num_input_cgraph_nodes);
@@ -544,7 +544,7 @@ lto_streamer_cache_insert_1 (struct lto_streamer_cache_d *cache,
*ix_p = ix;
if (offset_p)
- *offset_p = offset;
+ *offset_p = offset;
return existed_p;
}
@@ -697,7 +697,7 @@ lto_get_common_nodes (void)
These should be assured in pass_ipa_free_lang_data. */
gcc_assert (fileptr_type_node == ptr_type_node);
gcc_assert (TYPE_MAIN_VARIANT (fileptr_type_node) == ptr_type_node);
-
+
seen_nodes = pointer_set_create ();
/* Skip itk_char. char_type_node is shared with the appropriately
@@ -816,7 +816,7 @@ gate_lto_out (void)
session can be started on both reader and writer using ORIG_T
as a breakpoint value in both sessions.
- Note that this mapping is transient and only valid while T is
+ Note that this mapping is transient and only valid while T is
being reconstructed. Once T is fully built, the mapping is
removed. */
diff --git a/gcc/lto-streamer.h b/gcc/lto-streamer.h
index c3880b61efa..703d6fabbff 100644
--- a/gcc/lto-streamer.h
+++ b/gcc/lto-streamer.h
@@ -61,7 +61,7 @@ along with GCC; see the file COPYING3. If not see
entry, there is word with the offset within the section to the
entry.
- 7) THE LABEL NAMES.
+ 7) THE LABEL NAMES.
Since most labels do not have names, this section my be of zero
length. It consists of an array of string table references, one
@@ -70,18 +70,18 @@ along with GCC; see the file COPYING3. If not see
the negative ones do not. The positive index can be used to
find the name in this array.
- 9) THE CFG.
+ 9) THE CFG.
10) Index into the local decls. Since local decls can have local
decls inside them, they must be read in randomly in order to
- properly restore them.
+ properly restore them.
11-12) GIMPLE FOR THE LOCAL DECLS AND THE FUNCTION BODY.
The gimple consists of a set of records.
THE FUNCTION
-
+
At the top level of (8) is the function. It consists of five
pieces:
@@ -172,7 +172,7 @@ struct bitpack_d
(GIMPLE statements, basic blocks, EH regions, tree nodes, etc).
NOTE, when adding new LTO tags, also update lto_tag_name. */
-enum LTO_tags
+enum LTO_tags
{
LTO_null = 0,
@@ -284,7 +284,7 @@ DEF_VEC_ALLOC_I(ld_plugin_symbol_resolution_t, heap);
/* Macro to define convenience functions for type and decl streams
- in lto_file_decl_data. */
+ in lto_file_decl_data. */
#define DEFINE_DECL_STREAM_FUNCS(UPPER_NAME, name) \
static inline tree \
lto_file_decl_data_get_ ## name (struct lto_file_decl_data *data, \
@@ -307,17 +307,17 @@ lto_file_decl_data_num_ ## name ## s (struct lto_file_decl_data *data) \
or function. The first parameter is the file data that contains
the information. The second parameter is the type of information
to be obtained. The third parameter is the name of the function
- and is only used when finding a function body; otherwise it is
+ and is only used when finding a function body; otherwise it is
NULL. The fourth parameter is the length of the data returned. */
-typedef const char* (lto_get_section_data_f) (struct lto_file_decl_data *,
+typedef const char* (lto_get_section_data_f) (struct lto_file_decl_data *,
enum lto_section_type,
- const char *,
+ const char *,
size_t *);
/* Return the data found from the above call. The first three
parameters are the same as above. The fourth parameter is the data
itself and the fifth is the lenght of the data. */
-typedef void (lto_free_section_data_f) (struct lto_file_decl_data *,
+typedef void (lto_free_section_data_f) (struct lto_file_decl_data *,
enum lto_section_type,
const char *,
const char *,
@@ -357,7 +357,7 @@ struct lto_streamer_cache_d
/* Structure used as buffer for reading an LTO file. */
-struct lto_input_block
+struct lto_input_block
{
const char *data;
unsigned int p;
@@ -673,7 +673,7 @@ struct data_in
/* Number of named labels. Used to find the index of unnamed labels
since they share space with the named labels. */
- unsigned int num_named_labels;
+ unsigned int num_named_labels;
/* Number of unnamed labels. */
unsigned int num_unnamed_labels;
@@ -692,13 +692,13 @@ struct data_in
/* In lto-section-in.c */
extern struct lto_input_block * lto_create_simple_input_block (
- struct lto_file_decl_data *,
+ struct lto_file_decl_data *,
enum lto_section_type, const char **, size_t *);
extern void
-lto_destroy_simple_input_block (struct lto_file_decl_data *,
+lto_destroy_simple_input_block (struct lto_file_decl_data *,
enum lto_section_type,
struct lto_input_block *, const char *, size_t);
-extern void lto_set_in_hooks (struct lto_file_decl_data **,
+extern void lto_set_in_hooks (struct lto_file_decl_data **,
lto_get_section_data_f *,
lto_free_section_data_f *);
extern struct lto_file_decl_data **lto_get_file_decl_data (void);
diff --git a/gcc/lto-symtab.c b/gcc/lto-symtab.c
index 3b2823bb12e..ab96b48cd36 100644
--- a/gcc/lto-symtab.c
+++ b/gcc/lto-symtab.c
@@ -86,7 +86,7 @@ lto_symtab_entry_eq (const void *p1, const void *p2)
}
/* Returns non-zero if P points to an lto_symtab_entry_def struct that needs
- to be marked for GC. */
+ to be marked for GC. */
static int
lto_symtab_entry_marked_p (const void *p)
diff --git a/gcc/lto-wpa-fixup.c b/gcc/lto-wpa-fixup.c
index 469af5a267c..0839aa9df9a 100644
--- a/gcc/lto-wpa-fixup.c
+++ b/gcc/lto-wpa-fixup.c
@@ -73,7 +73,7 @@ static bitmap lto_nothrow_fndecls;
#include "a.h"
a::a() {}
a::~a() {}
-
+
When main.cc is compiled, gcc only sees the constructor declaration, so
the constructor and hence the call to it are marked as exception throwing.
When a.cc is compiled, the body of the constructor is available and is
@@ -111,7 +111,7 @@ lto_fixup_nothrow_decls (void)
gcc_assert (call_stmt);
if (lookup_stmt_eh_lp_fn (caller_function, call_stmt) != 0)
remove_stmt_from_eh_lp_fn (caller_function, call_stmt);
- }
+ }
}
}
@@ -123,7 +123,7 @@ lto_mark_nothrow_fndecl (tree fndecl)
gcc_assert (TREE_CODE (fndecl) == FUNCTION_DECL);
if (!lto_nothrow_fndecls)
lto_nothrow_fndecls = lto_bitmap_alloc ();
-
+
bitmap_set_bit (lto_nothrow_fndecls, DECL_UID (fndecl));
}
@@ -153,7 +153,7 @@ lto_output_wpa_fixup (cgraph_node_set set)
{
struct cgraph_edge *e;
struct cgraph_node *n;
-
+
n = csi_node (csi);
fndecl = n->decl;
@@ -183,7 +183,7 @@ lto_output_wpa_fixup (cgraph_node_set set)
}
/* Write out number of DECLs, followed by the DECLs. */
- count = VEC_length (tree, decls);
+ count = VEC_length (tree, decls);
lto_output_uleb128_stream (ob->main_stream, count);
for (i = 0; i < count; i++)
{
diff --git a/gcc/matrix-reorg.c b/gcc/matrix-reorg.c
index d2687b86e47..71c7419a8f5 100644
--- a/gcc/matrix-reorg.c
+++ b/gcc/matrix-reorg.c
@@ -2,7 +2,7 @@
Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
Contributed by Razya Ladelsky <razya@il.ibm.com>
Originally written by Revital Eres and Mustafa Hagog.
-
+
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
@@ -20,7 +20,7 @@ along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/*
- Matrix flattening optimization tries to replace a N-dimensional
+ Matrix flattening optimization tries to replace a N-dimensional
matrix with its equivalent M-dimensional matrix, where M < N.
This first implementation focuses on global matrices defined dynamically.
@@ -43,31 +43,31 @@ along with GCC; see the file COPYING3. If not see
and transformation.
The driver of the optimization is matrix_reorg ().
-
-
+
+
Analysis phase:
===============
- We'll number the dimensions outside-in, meaning the most external
- is 0, then 1, and so on.
- The analysis part of the optimization determines K, the escape
- level of a N-dimensional matrix (K <= N), that allows flattening of
+ We'll number the dimensions outside-in, meaning the most external
+ is 0, then 1, and so on.
+ The analysis part of the optimization determines K, the escape
+ level of a N-dimensional matrix (K <= N), that allows flattening of
the external dimensions 0,1,..., K-1. Escape level 0 means that the
whole matrix escapes and no flattening is possible.
-
- The analysis part is implemented in analyze_matrix_allocation_site()
+
+ The analysis part is implemented in analyze_matrix_allocation_site()
and analyze_matrix_accesses().
Transformation phase:
=====================
- In this phase we define the new flattened matrices that replace the
- original matrices in the code.
- Implemented in transform_allocation_sites(),
- transform_access_sites().
+ In this phase we define the new flattened matrices that replace the
+ original matrices in the code.
+ Implemented in transform_allocation_sites(),
+ transform_access_sites().
Matrix Transposing
==================
- The idea of Matrix Transposing is organizing the matrix in a different
+ The idea of Matrix Transposing is organizing the matrix in a different
layout such that the dimensions are reordered.
This could produce better cache behavior in some cases.
@@ -77,7 +77,7 @@ along with GCC; see the file COPYING3. If not see
for (j=0; j<M; j++)
access to a[i][j]
- This loop can produce good cache behavior because the elements of
+ This loop can produce good cache behavior because the elements of
the inner dimension are accessed sequentially.
However, if the accesses of the matrix were of the following form:
@@ -86,27 +86,27 @@ along with GCC; see the file COPYING3. If not see
for (j=0; j<M; j++)
access to a[j][i]
- In this loop we iterate the columns and not the rows.
- Therefore, replacing the rows and columns
+ In this loop we iterate the columns and not the rows.
+ Therefore, replacing the rows and columns
would have had an organization with better (cache) locality.
Replacing the dimensions of the matrix is called matrix transposing.
- This example, of course, could be enhanced to multiple dimensions matrices
+ This example, of course, could be enhanced to multiple dimensions matrices
as well.
- Since a program could include all kind of accesses, there is a decision
- mechanism, implemented in analyze_transpose(), which implements a
+ Since a program could include all kind of accesses, there is a decision
+ mechanism, implemented in analyze_transpose(), which implements a
heuristic that tries to determine whether to transpose the matrix or not,
according to the form of the more dominant accesses.
- This decision is transferred to the flattening mechanism, and whether
+ This decision is transferred to the flattening mechanism, and whether
the matrix was transposed or not, the matrix is flattened (if possible).
-
+
This decision making is based on profiling information and loop information.
- If profiling information is available, decision making mechanism will be
+ If profiling information is available, decision making mechanism will be
operated, otherwise the matrix will only be flattened (if possible).
- Both optimizations are described in the paper "Matrix flattening and
- transposing in GCC" which was presented in GCC summit 2006.
+ Both optimizations are described in the paper "Matrix flattening and
+ transposing in GCC" which was presented in GCC summit 2006.
http://www.gccsummit.org/2006/2006-GCC-Summit-Proceedings.pdf. */
#include "config.h"
@@ -291,7 +291,7 @@ struct matrix_info
*/
tree *dimension_size;
- /* An array which holds for each dimension it's original size
+ /* An array which holds for each dimension it's original size
(before transposing and flattening take place). */
tree *dimension_size_orig;
@@ -308,7 +308,7 @@ struct matrix_info
elements are of type "struct access_site_info *". */
VEC (access_site_info_p, heap) * access_l;
- /* A map of how the dimensions will be organized at the end of
+ /* A map of how the dimensions will be organized at the end of
the analyses. */
int *dim_map;
};
@@ -408,7 +408,7 @@ mtt_info_eq (const void *mtt1, const void *mtt2)
return false;
}
-/* Return false if STMT may contain a vector expression.
+/* Return false if STMT may contain a vector expression.
In this situation, all matrices should not be flattened. */
static bool
may_flatten_matrices_1 (gimple stmt)
@@ -452,7 +452,7 @@ may_flatten_matrices_1 (gimple stmt)
return true;
}
-/* Return false if there are hand-written vectors in the program.
+/* Return false if there are hand-written vectors in the program.
We disable the flattening in such a case. */
static bool
may_flatten_matrices (struct cgraph_node *node)
@@ -687,7 +687,7 @@ ssa_accessed_in_assign_rhs (gimple stmt, struct ssa_acc_in_tree *a)
}
}
-/* Record the access/allocation site information for matrix MI so we can
+/* Record the access/allocation site information for matrix MI so we can
handle it later in transformation. */
static void
record_access_alloc_site_info (struct matrix_info *mi, gimple stmt, tree offset,
@@ -747,7 +747,7 @@ add_allocation_site (struct matrix_info *mi, gimple stmt, int level)
else
{
mark_min_matrix_escape_level (mi, level, stmt);
- /* cannot be that (level == min_malloc_level)
+ /* cannot be that (level == min_malloc_level)
we would have returned earlier. */
return;
}
@@ -785,7 +785,7 @@ add_allocation_site (struct matrix_info *mi, gimple stmt, int level)
will hold the size for each dimension; each malloc that allocates a
dimension has the size parameter; we use that parameter to
initialize the dimension size variable so we can use it later in
- the address calculations. LEVEL is the dimension we're inspecting.
+ the address calculations. LEVEL is the dimension we're inspecting.
Return if STMT is related to an allocation site. */
static void
@@ -840,12 +840,12 @@ analyze_matrix_allocation_site (struct matrix_info *mi, gimple stmt,
return;
}
}
- /* This is a call to malloc of level 'level'.
- mi->max_malloced_level-1 == level means that we've
- seen a malloc statement of level 'level' before.
- If the statement is not the same one that we've
- seen before, then there's another malloc statement
- for the same level, which means that we need to mark
+ /* This is a call to malloc of level 'level'.
+ mi->max_malloced_level-1 == level means that we've
+ seen a malloc statement of level 'level' before.
+ If the statement is not the same one that we've
+ seen before, then there's another malloc statement
+ for the same level, which means that we need to mark
it escaping. */
if (mi->malloc_for_level
&& mi->max_malloced_level-1 == level
@@ -864,26 +864,26 @@ analyze_matrix_allocation_site (struct matrix_info *mi, gimple stmt,
}
/* The transposing decision making.
- In order to to calculate the profitability of transposing, we collect two
+ In order to to calculate the profitability of transposing, we collect two
types of information regarding the accesses:
1. profiling information used to express the hotness of an access, that
- is how often the matrix is accessed by this access site (count of the
- access site).
+ is how often the matrix is accessed by this access site (count of the
+ access site).
2. which dimension in the access site is iterated by the inner
most loop containing this access.
- The matrix will have a calculated value of weighted hotness for each
+ The matrix will have a calculated value of weighted hotness for each
dimension.
- Intuitively the hotness level of a dimension is a function of how
- many times it was the most frequently accessed dimension in the
+ Intuitively the hotness level of a dimension is a function of how
+ many times it was the most frequently accessed dimension in the
highly executed access sites of this matrix.
As computed by following equation:
- m n
- __ __
- \ \ dim_hot_level[i] +=
+ m n
+ __ __
+ \ \ dim_hot_level[i] +=
/_ /_
- j i
+ j i
acc[j]->dim[i]->iter_by_inner_loop * count(j)
Where n is the number of dims and m is the number of the matrix
@@ -957,7 +957,7 @@ analyze_transpose (void **slot, void *data ATTRIBUTE_UNUSED)
return 1;
}
-/* Find the index which defines the OFFSET from base.
+/* Find the index which defines the OFFSET from base.
We walk from use to def until we find how the offset was defined. */
static tree
get_index_from_offset (tree offset, gimple def_stmt)
@@ -1043,9 +1043,9 @@ update_type_size (struct matrix_info *mi, gimple stmt, tree ssa_var,
}
}
-/* USE_STMT represents a GIMPLE_CALL, where one of the arguments is the
- ssa var that we want to check because it came from some use of matrix
- MI. CURRENT_INDIRECT_LEVEL is the indirection level we reached so
+/* USE_STMT represents a GIMPLE_CALL, where one of the arguments is the
+ ssa var that we want to check because it came from some use of matrix
+ MI. CURRENT_INDIRECT_LEVEL is the indirection level we reached so
far. */
static int
@@ -1122,14 +1122,14 @@ analyze_accesses_for_call_stmt (struct matrix_info *mi, tree ssa_var,
return current_indirect_level;
}
-/* USE_STMT represents a phi node of the ssa var that we want to
- check because it came from some use of matrix
+/* USE_STMT represents a phi node of the ssa var that we want to
+ check because it came from some use of matrix
MI.
We check all the escaping levels that get to the PHI node
and make sure they are all the same escaping;
if not (which is rare) we let the escaping level be the
minimum level that gets into that PHI because starting from
- that level we cannot expect the behavior of the indirections.
+ that level we cannot expect the behavior of the indirections.
CURRENT_INDIRECT_LEVEL is the indirection level we reached so far. */
static void
@@ -1186,8 +1186,8 @@ analyze_accesses_for_phi_node (struct matrix_info *mi, gimple use_stmt,
}
}
-/* USE_STMT represents an assign statement (the rhs or lhs include
- the ssa var that we want to check because it came from some use of matrix
+/* USE_STMT represents an assign statement (the rhs or lhs include
+ the ssa var that we want to check because it came from some use of matrix
MI. CURRENT_INDIRECT_LEVEL is the indirection level we reached so far. */
static int
@@ -1246,7 +1246,7 @@ analyze_accesses_for_assign_stmt (struct matrix_info *mi, tree ssa_var,
}
return current_indirect_level;
}
- /* Now, check the right-hand-side, to see how the SSA variable
+ /* Now, check the right-hand-side, to see how the SSA variable
is used. */
if (rhs_acc.var_found)
{
@@ -1317,8 +1317,8 @@ analyze_accesses_for_assign_stmt (struct matrix_info *mi, tree ssa_var,
/* One exception is when we are storing to the matrix
variable itself; this is the case of malloc, we must make
- sure that it's the one and only one call to malloc so
- we call analyze_matrix_allocation_site to check
+ sure that it's the one and only one call to malloc so
+ we call analyze_matrix_allocation_site to check
this out. */
if (TREE_CODE (lhs) != VAR_DECL || lhs != mi->decl)
mark_min_matrix_escape_level (mi, current_indirect_level,
@@ -1344,7 +1344,7 @@ analyze_accesses_for_assign_stmt (struct matrix_info *mi, tree ssa_var,
return current_indirect_level;
}
-/* Given a SSA_VAR (coming from a use statement of the matrix MI),
+/* Given a SSA_VAR (coming from a use statement of the matrix MI),
follow its uses and level of indirection and find out the minimum
indirection level it escapes in (the highest dimension) and the maximum
level it is accessed in (this will be the actual dimension of the
@@ -1400,7 +1400,7 @@ analyze_matrix_accesses (struct matrix_info *mi, tree ssa_var,
}
}
-typedef struct
+typedef struct
{
tree fn;
gimple stmt;
@@ -1457,7 +1457,7 @@ can_calculate_stmt_before_stmt (gimple stmt, sbitmap visited)
case GIMPLE_ASSIGN:
code = gimple_assign_rhs_code (stmt);
op1 = gimple_assign_rhs1 (stmt);
-
+
switch (code)
{
case POINTER_PLUS_EXPR:
@@ -1804,12 +1804,12 @@ compute_offset (HOST_WIDE_INT orig, HOST_WIDE_INT new_val, tree result)
according to the following equation:
a[I1][I2]...[Ik] , where D1..Dk is the length of each dimension and the
- escaping level is m <= k, and a' is the new allocated matrix,
+ escaping level is m <= k, and a' is the new allocated matrix,
will be translated to :
-
+
b[I(m+1)]...[Ik]
-
- where
+
+ where
b = a' + I1*D2...*Dm + I2*D3...Dm + ... + Im
*/
@@ -2017,7 +2017,7 @@ sort_dim_hot_level (gcov_type * a, int *dim_map, int n)
Make sure that we hold the size in the malloc site inside a
new global variable; this way we ensure that the size doesn't
change and it is accessible from all the other functions that
- uses the matrix. Also, the original calls to free are deleted,
+ uses the matrix. Also, the original calls to free are deleted,
and replaced by a new call to free the flattened matrix. */
static int
@@ -2413,7 +2413,7 @@ gate_matrix_reorg (void)
return flag_ipa_matrix_reorg && flag_whole_program;
}
-struct simple_ipa_opt_pass pass_ipa_matrix_reorg =
+struct simple_ipa_opt_pass pass_ipa_matrix_reorg =
{
{
SIMPLE_IPA_PASS,
diff --git a/gcc/mcf.c b/gcc/mcf.c
index 30f3070da33..af993fba057 100644
--- a/gcc/mcf.c
+++ b/gcc/mcf.c
@@ -1019,7 +1019,7 @@ find_augmenting_path (fixup_graph_type *fixup_graph,
2. Find an augmenting path form source to sink.
3. Send flow equal to the path's residual capacity along the edges of this path.
4. Repeat steps 2 and 3 until no new augmenting path is found.
-
+
Parameters:
SOURCE: index of source vertex (input)
SINK: index of sink vertex (input)
@@ -1239,9 +1239,9 @@ adjust_cfg_counts (fixup_graph_type *fixup_graph)
fprintf (dump_file, " = " HOST_WIDEST_INT_PRINT_DEC "\t(%.1f%%)\n",
e->count, e->probability * 100.0 / REG_BR_PROB_BASE);
}
- }
+ }
- ENTRY_BLOCK_PTR->count = sum_edge_counts (ENTRY_BLOCK_PTR->succs);
+ ENTRY_BLOCK_PTR->count = sum_edge_counts (ENTRY_BLOCK_PTR->succs);
EXIT_BLOCK_PTR->count = sum_edge_counts (EXIT_BLOCK_PTR->preds);
/* Compute edge probabilities. */
diff --git a/gcc/mode-switching.c b/gcc/mode-switching.c
index 5df61c890a2..140c513918e 100644
--- a/gcc/mode-switching.c
+++ b/gcc/mode-switching.c
@@ -377,7 +377,7 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
last_insn = return_copy;
}
while (nregs);
-
+
/* If we didn't see a full return value copy, verify that there
is a plausible reason for this. If some, but not all of the
return register is likely spilled, we can expect that there
@@ -397,7 +397,7 @@ create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
failures, so let it pass. */
|| (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
&& nregs != 1));
-
+
if (INSN_P (last_insn))
{
before_return_copy
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index fb6f548b0c9..cc9f788a8a7 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -96,7 +96,7 @@ along with GCC; see the file COPYING3. If not see
Currently SMS relies on the do-loop pattern to recognize such loops,
where (1) the control part comprises of all insns defining and/or
using a certain 'count' register and (2) the loop count can be
- adjusted by modifying this register prior to the loop.
+ adjusted by modifying this register prior to the loop.
TODO: Rely on cfgloop analysis instead. */
/* This page defines partial-schedule structures and functions for
@@ -168,7 +168,7 @@ struct undo_replace_buff_elem
};
-
+
static partial_schedule_ptr create_partial_schedule (int ii, ddg_ptr, int history);
static void free_partial_schedule (partial_schedule_ptr);
static void reset_partial_schedule (partial_schedule_ptr, int new_ii);
@@ -275,7 +275,7 @@ static struct haifa_sched_info sms_sched_info =
NULL, NULL,
0, 0,
- NULL, NULL, NULL,
+ NULL, NULL, NULL,
0
};
@@ -373,8 +373,8 @@ static int
res_MII (ddg_ptr g)
{
if (targetm.sched.sms_res_mii)
- return targetm.sched.sms_res_mii (g);
-
+ return targetm.sched.sms_res_mii (g);
+
return ((g->num_nodes - g->num_debug) / issue_rate);
}
@@ -705,7 +705,7 @@ generate_prolog_epilog (partial_schedule_ptr ps, struct loop *loop,
int i;
int last_stage = PS_STAGE_COUNT (ps) - 1;
edge e;
-
+
/* Generate the prolog, inserting its insns on the loop-entry edge. */
start_sequence ();
@@ -727,7 +727,7 @@ generate_prolog_epilog (partial_schedule_ptr ps, struct loop *loop,
for (i = 0; i < last_stage; i++)
duplicate_insns_of_cycles (ps, 0, i, 1, count_reg);
-
+
/* Put the prolog on the entry edge. */
e = loop_preheader_edge (loop);
split_edge_and_insert (e, get_insns ());
@@ -739,7 +739,7 @@ generate_prolog_epilog (partial_schedule_ptr ps, struct loop *loop,
for (i = 0; i < last_stage; i++)
duplicate_insns_of_cycles (ps, i + 1, last_stage, 0, count_reg);
-
+
/* Put the epilogue on the exit edge. */
gcc_assert (single_exit (loop));
e = single_exit (loop);
@@ -809,7 +809,7 @@ loop_canon_p (struct loop *loop)
if (dump_file)
{
rtx insn = BB_END (loop->header);
-
+
fprintf (dump_file, "SMS loop many exits ");
fprintf (dump_file, " %s %d (file, line)\n",
insn_file (insn), insn_line (insn));
@@ -822,7 +822,7 @@ loop_canon_p (struct loop *loop)
if (dump_file)
{
rtx insn = BB_END (loop->header);
-
+
fprintf (dump_file, "SMS loop many BBs. ");
fprintf (dump_file, " %s %d (file, line)\n",
insn_file (insn), insn_line (insn));
@@ -1011,7 +1011,7 @@ sms_schedule (void)
/* Don't handle BBs with calls or barriers, or !single_set insns,
or auto-increment insns (to avoid creating invalid reg-moves
- for the auto-increment insns).
+ for the auto-increment insns).
??? Should handle auto-increment insns.
??? Should handle insns defining subregs. */
for (insn = head; insn != NEXT_INSN (tail); insn = NEXT_INSN (insn))
@@ -1160,7 +1160,7 @@ sms_schedule (void)
if (ps){
stage_count = PS_STAGE_COUNT (ps);
gcc_assert(stage_count >= 1);
- }
+ }
/* Stage count of 1 means that there is no interleaving between
iterations, let the scheduling passes do the job. */
@@ -1198,14 +1198,14 @@ sms_schedule (void)
the closing_branch was scheduled and should appear in the last (ii-1)
row. Otherwise, we are free to schedule the branch, and we let nodes
that were scheduled at the first PS_MIN_CYCLE cycle appear in the first
- row; this should reduce stage_count to minimum.
+ row; this should reduce stage_count to minimum.
TODO: Revisit the issue of scheduling the insns of the
control part relative to the branch when the control part
has more than one insn. */
normalize_sched_times (ps);
rotate_partial_schedule (ps, PS_MIN_CYCLE (ps));
set_columns_for_ps (ps);
-
+
canon_loop (loop);
/* case the BCT count is not known , Do loop-versioning */
@@ -1241,7 +1241,7 @@ sms_schedule (void)
print_node_sched_params (dump_file, g->num_nodes, g);
/* Generate prolog and epilog. */
generate_prolog_epilog (ps, loop, count_reg, count_init);
-
+
free_undo_replace_buff (reg_move_replaces);
}
@@ -1377,7 +1377,7 @@ get_sched_window (partial_schedule_ptr ps, int *nodes_order, int i,
ddg_node_ptr v_node = e->src;
if (dump_file)
- {
+ {
fprintf (dump_file, "\nProcessing edge: ");
print_ddg_edge (dump_file, e);
fprintf (dump_file,
@@ -1395,7 +1395,7 @@ get_sched_window (partial_schedule_ptr ps, int *nodes_order, int i,
MAX (early_start, p_st + e->latency - (e->distance * ii));
if (dump_file)
- fprintf (dump_file,
+ fprintf (dump_file,
"pred st = %d; early_start = %d; latency: %d",
p_st, early_start, e->latency);
@@ -1444,7 +1444,7 @@ get_sched_window (partial_schedule_ptr ps, int *nodes_order, int i,
s_st - e->latency + (e->distance * ii));
if (dump_file)
- fprintf (dump_file,
+ fprintf (dump_file,
"succ st = %d; late_start = %d; latency = %d",
s_st, late_start, e->latency);
@@ -1503,7 +1503,7 @@ get_sched_window (partial_schedule_ptr ps, int *nodes_order, int i,
- (e->distance * ii));
if (dump_file)
- fprintf (dump_file,
+ fprintf (dump_file,
"pred st = %d; early_start = %d; latency = %d",
p_st, early_start, e->latency);
@@ -1541,7 +1541,7 @@ get_sched_window (partial_schedule_ptr ps, int *nodes_order, int i,
+ (e->distance * ii));
if (dump_file)
- fprintf (dump_file,
+ fprintf (dump_file,
"succ st = %d; late_start = %d; latency = %d",
s_st, late_start, e->latency);
@@ -1665,7 +1665,7 @@ calculate_must_precede_follow (ddg_node_ptr u_node, int start, int end,
&& e->latency == 0
we use the fact that latency is non-negative:
SCHED_TIME (e->dest) + (e->distance * ii) >=
- SCHED_TIME (e->dest) - e->latency + (e->distance * ii)) >=
+ SCHED_TIME (e->dest) - e->latency + (e->distance * ii)) >=
last_cycle_in_window
and check only if
SCHED_TIME (e->dest) + (e->distance * ii) == last_cycle_in_window */
@@ -2080,10 +2080,10 @@ check_nodes_order (int *node_order, int num_nodes)
SET_BIT (tmp, u);
}
-
+
if (dump_file)
fprintf (dump_file, "\n");
-
+
sbitmap_free (tmp);
}
@@ -2622,8 +2622,8 @@ ps_insn_find_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
}
/* Advances the PS_INSN one column in its current row; returns false
- in failure and true in success. Bit N is set in MUST_FOLLOW if
- the node with cuid N must be come after the node pointed to by
+ in failure and true in success. Bit N is set in MUST_FOLLOW if
+ the node with cuid N must be come after the node pointed to by
PS_I when scheduled in the same cycle. */
static int
ps_insn_advance_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
@@ -2671,9 +2671,9 @@ ps_insn_advance_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
}
/* Inserts a DDG_NODE to the given partial schedule at the given cycle.
- Returns 0 if this is not possible and a PS_INSN otherwise. Bit N is
- set in MUST_PRECEDE/MUST_FOLLOW if the node with cuid N must be come
- before/after (respectively) the node pointed to by PS_I when scheduled
+ Returns 0 if this is not possible and a PS_INSN otherwise. Bit N is
+ set in MUST_PRECEDE/MUST_FOLLOW if the node with cuid N must be come
+ before/after (respectively) the node pointed to by PS_I when scheduled
in the same cycle. */
static ps_insn_ptr
add_node_to_ps (partial_schedule_ptr ps, ddg_node_ptr node, int cycle,
@@ -2774,8 +2774,8 @@ ps_has_conflicts (partial_schedule_ptr ps, int from, int to)
/* Checks if the given node causes resource conflicts when added to PS at
cycle C. If not the node is added to PS and returned; otherwise zero
- is returned. Bit N is set in MUST_PRECEDE/MUST_FOLLOW if the node with
- cuid N must be come before/after (respectively) the node pointed to by
+ is returned. Bit N is set in MUST_PRECEDE/MUST_FOLLOW if the node with
+ cuid N must be come before/after (respectively) the node pointed to by
PS_I when scheduled in the same cycle. */
ps_insn_ptr
ps_add_node_check_conflicts (partial_schedule_ptr ps, ddg_node_ptr n,
diff --git a/gcc/omega.c b/gcc/omega.c
index e307ba29057..ff7da4e1609 100644
--- a/gcc/omega.c
+++ b/gcc/omega.c
@@ -1,5 +1,5 @@
-/* Source code for an implementation of the Omega test, an integer
- programming algorithm for dependence analysis, by William Pugh,
+/* Source code for an implementation of the Omega test, an integer
+ programming algorithm for dependence analysis, by William Pugh,
appeared in Supercomputing '91 and CACM Aug 92.
This code has no license restrictions, and is considered public
@@ -507,7 +507,7 @@ omega_pretty_print_problem (FILE *file, omega_pb pb)
none, le, lt
} partial_order_type;
- partial_order_type **po = XNEWVEC (partial_order_type *,
+ partial_order_type **po = XNEWVEC (partial_order_type *,
OMEGA_MAX_VARS * OMEGA_MAX_VARS);
int **po_eq = XNEWVEC (int *, OMEGA_MAX_VARS * OMEGA_MAX_VARS);
int *last_links = XNEWVEC (int, OMEGA_MAX_VARS);
@@ -673,7 +673,7 @@ omega_pretty_print_problem (FILE *file, omega_pb pb)
}
fprintf (file, "%s", omega_variable_to_str (pb, chain[0]));
-
+
for (multiprint = false, i = 1; i < m; i++)
{
v = chain[i - 1];
@@ -1310,7 +1310,7 @@ verify_omega_pb (omega_pb pb)
omega_copy_problem (tmp_problem, pb);
tmp_problem->safe_vars = 0;
tmp_problem->num_subs = 0;
-
+
for (e = pb->num_geqs - 1; e >= 0; e--)
if (pb->geqs[e].color == omega_red)
{
@@ -1358,7 +1358,7 @@ verify_omega_pb (omega_pb pb)
static void
adding_equality_constraint (omega_pb pb, int e)
{
- if (original_problem != no_problem
+ if (original_problem != no_problem
&& original_problem != pb
&& !conservative)
{
@@ -1525,7 +1525,7 @@ normalize_omega_problem (omega_pb pb)
{
i = packing[i0];
pb->geqs[e].coef[i] = pb->geqs[e].coef[i] / g;
- hashCode = hashCode * hash_key_multiplier * (i + 3)
+ hashCode = hashCode * hash_key_multiplier * (i + 3)
+ pb->geqs[e].coef[i];
}
}
@@ -1643,7 +1643,7 @@ normalize_omega_problem (omega_pb pb)
}
if (pb->geqs[e2].coef[0] == -cTerm
- && (create_color
+ && (create_color
|| pb->geqs[e].color == omega_black))
{
omega_copy_eqn (&pb->eqs[pb->num_eqs], &pb->geqs[e],
@@ -1685,7 +1685,7 @@ normalize_omega_problem (omega_pb pb)
e2 = fast_lookup[MAX_KEYS + eKey];
- if (e2 < e && pb->geqs[e2].key == eKey
+ if (e2 < e && pb->geqs[e2].key == eKey
&& pb->geqs[e2].color == omega_black)
{
if (pb->geqs[e2].coef[0] > cTerm)
@@ -1834,7 +1834,7 @@ cleanout_wildcards (omega_pb pb)
for (e2 = pb->num_eqs - 1; e2 >= 0; e2--)
if (e != e2 && pb->eqs[e2].coef[i]
&& (pb->eqs[e2].color == omega_red
- || (pb->eqs[e2].color == omega_black
+ || (pb->eqs[e2].color == omega_black
&& pb->eqs[e].color == omega_black)))
{
eqn eqn = &(pb->eqs[e2]);
@@ -1853,9 +1853,9 @@ cleanout_wildcards (omega_pb pb)
}
for (e2 = pb->num_geqs - 1; e2 >= 0; e2--)
- if (pb->geqs[e2].coef[i]
+ if (pb->geqs[e2].coef[i]
&& (pb->geqs[e2].color == omega_red
- || (pb->eqs[e].color == omega_black
+ || (pb->eqs[e].color == omega_black
&& pb->geqs[e2].color == omega_black)))
{
eqn eqn = &(pb->geqs[e2]);
@@ -1875,9 +1875,9 @@ cleanout_wildcards (omega_pb pb)
}
for (e2 = pb->num_subs - 1; e2 >= 0; e2--)
- if (pb->subs[e2].coef[i]
+ if (pb->subs[e2].coef[i]
&& (pb->subs[e2].color == omega_red
- || (pb->subs[e2].color == omega_black
+ || (pb->subs[e2].color == omega_black
&& pb->eqs[e].color == omega_black)))
{
eqn eqn = &(pb->subs[e2]);
@@ -1975,7 +1975,7 @@ omega_unprotect_1 (omega_pb pb, int *idx, bool *unprotect)
static void
resurrect_subs (omega_pb pb)
{
- if (pb->num_subs > 0
+ if (pb->num_subs > 0
&& please_no_equalities_in_simplified_problems == 0)
{
int i, e, n, m;
@@ -2132,7 +2132,7 @@ omega_eliminate_redundant (omega_pb pb, bool expensive)
continue;
foundPQ:
- pz = ((zeqs[e1] & zeqs[e2]) | (peqs[e1] & neqs[e2])
+ pz = ((zeqs[e1] & zeqs[e2]) | (peqs[e1] & neqs[e2])
| (neqs[e1] & peqs[e2]));
pp = peqs[e1] | peqs[e2];
pn = neqs[e1] | neqs[e2];
@@ -2162,7 +2162,7 @@ omega_eliminate_redundant (omega_pb pb, bool expensive)
if (alpha3 > 0)
{
/* Trying to prove e3 is redundant. */
- if (!implies (peqs[e3], pp)
+ if (!implies (peqs[e3], pp)
|| !implies (neqs[e3], pn))
goto nextE3;
@@ -2206,7 +2206,7 @@ omega_eliminate_redundant (omega_pb pb, bool expensive)
/* Trying to prove e3 <= 0 and therefore e3 = 0,
or trying to prove e3 < 0, and therefore the
problem has no solutions. */
- if (!implies (peqs[e3], pn)
+ if (!implies (peqs[e3], pn)
|| !implies (neqs[e3], pp))
goto nextE3;
@@ -2267,7 +2267,7 @@ omega_eliminate_redundant (omega_pb pb, bool expensive)
fprintf (dump_file, "\n\n");
}
- omega_copy_eqn (&pb->eqs[pb->num_eqs++],
+ omega_copy_eqn (&pb->eqs[pb->num_eqs++],
&pb->geqs[e3], pb->num_vars);
gcc_assert (pb->num_eqs <= OMEGA_MAX_EQS);
adding_equality_constraint (pb, pb->num_eqs - 1);
@@ -2469,12 +2469,12 @@ coalesce (omega_pb pb)
is_dead[e] = false;
for (e = 0; e < pb->num_geqs; e++)
- if (pb->geqs[e].color == omega_red
+ if (pb->geqs[e].color == omega_red
&& !pb->geqs[e].touched)
for (e2 = e + 1; e2 < pb->num_geqs; e2++)
- if (!pb->geqs[e2].touched
+ if (!pb->geqs[e2].touched
&& pb->geqs[e].key == -pb->geqs[e2].key
- && pb->geqs[e].coef[0] == -pb->geqs[e2].coef[0]
+ && pb->geqs[e].coef[0] == -pb->geqs[e2].coef[0]
&& pb->geqs[e2].color == omega_red)
{
omega_copy_eqn (&pb->eqs[pb->num_eqs++], &pb->geqs[e],
@@ -2527,7 +2527,7 @@ omega_eliminate_red (omega_pb pb, bool eliminate_all)
for (e = pb->num_geqs - 1; e >= 0; e--)
if (pb->geqs[e].color == omega_black && !is_dead[e])
for (e2 = e - 1; e2 >= 0; e2--)
- if (pb->geqs[e2].color == omega_black
+ if (pb->geqs[e2].color == omega_black
&& !is_dead[e2])
{
a = 0;
@@ -2557,7 +2557,7 @@ omega_eliminate_red (omega_pb pb, bool eliminate_all)
for (e3 = pb->num_geqs - 1; e3 >= 0; e3--)
if (pb->geqs[e3].color == omega_red)
{
- alpha1 = (pb->geqs[e2].coef[j] * pb->geqs[e3].coef[i]
+ alpha1 = (pb->geqs[e2].coef[j] * pb->geqs[e3].coef[i]
- pb->geqs[e2].coef[i] * pb->geqs[e3].coef[j]);
alpha2 = -(pb->geqs[e].coef[j] * pb->geqs[e3].coef[i]
- pb->geqs[e].coef[i] * pb->geqs[e3].coef[j]);
@@ -2577,7 +2577,7 @@ omega_eliminate_red (omega_pb pb, bool eliminate_all)
for (k = pb->num_vars; k >= 0; k--)
{
- c = (alpha1 * pb->geqs[e].coef[k]
+ c = (alpha1 * pb->geqs[e].coef[k]
+ alpha2 * pb->geqs[e2].coef[k]);
if (c != a * pb->geqs[e3].coef[k])
@@ -2743,7 +2743,7 @@ static void
omega_problem_reduced (omega_pb pb)
{
if (omega_verify_simplification
- && !in_approximate_mode
+ && !in_approximate_mode
&& verify_omega_pb (pb) == omega_false)
return;
@@ -2756,7 +2756,7 @@ omega_problem_reduced (omega_pb pb)
if (!please_no_equalities_in_simplified_problems)
coalesce (pb);
- if (omega_reduce_with_subs
+ if (omega_reduce_with_subs
|| please_no_equalities_in_simplified_problems)
chain_unprotect (pb);
else
@@ -3448,7 +3448,7 @@ omega_solve_eq (omega_pb pb, enum omega_result desired_res)
j = 0;
for (i = pb->num_vars; i != sv; i--)
- if (pb->eqs[e].coef[i] != 0
+ if (pb->eqs[e].coef[i] != 0
&& factor > abs (pb->eqs[e].coef[i]) + 1)
{
factor = abs (pb->eqs[e].coef[i]) + 1;
@@ -3591,7 +3591,7 @@ omega_solve_geq (omega_pb pb, enum omega_result desired_res)
c = int_div (c, -a);
if (upper_bound > c
- || (upper_bound == c
+ || (upper_bound == c
&& !omega_eqn_is_red (&pb->geqs[e], desired_res)))
{
upper_bound = c;
@@ -3857,9 +3857,9 @@ omega_solve_geq (omega_pb pb, enum omega_result desired_res)
lucky = (diff >= (Uc - 1) * (Lc - 1));
}
- if (maxC == 1
- || minC == -1
- || lucky
+ if (maxC == 1
+ || minC == -1
+ || lucky
|| in_approximate_mode)
{
neweqns = score = upper_bound_count * lower_bound_count;
@@ -3870,7 +3870,7 @@ omega_solve_geq (omega_pb pb, enum omega_result desired_res)
"\nlucky = %d, in_approximate_mode=%d \n",
omega_variable_to_str (pb, i),
upper_bound_count,
- lower_bound_count, minC, maxC, lucky,
+ lower_bound_count, minC, maxC, lucky,
in_approximate_mode);
if (!exact
@@ -4163,9 +4163,9 @@ omega_solve_geq (omega_pb pb, enum omega_result desired_res)
{
constantTerm = -int_div (constantTerm, coefficient);
- if (constantTerm > lower_bound
- || (constantTerm == lower_bound
- && (desired_res != omega_simplify
+ if (constantTerm > lower_bound
+ || (constantTerm == lower_bound
+ && (desired_res != omega_simplify
|| (pb->geqs[Ue].color == omega_black
&& pb->geqs[Le].color == omega_black))))
{
@@ -4285,7 +4285,7 @@ omega_solve_geq (omega_pb pb, enum omega_result desired_res)
}
else
{
- if (!conservative
+ if (!conservative
&& (desired_res != omega_simplify
|| (lb_color == omega_black
&& ub_color == omega_black))
@@ -4415,7 +4415,7 @@ omega_solve_geq (omega_pb pb, enum omega_result desired_res)
pb->geqs[e2].coef[n_vars + 1] = 0;
pb->geqs[e2].touched = 1;
- if (pb->geqs[Ue].color == omega_red
+ if (pb->geqs[Ue].color == omega_red
|| pb->geqs[Le].color == omega_red)
pb->geqs[e2].color = omega_red;
else
@@ -4803,7 +4803,7 @@ omega_solve_problem (omega_pb pb, enum omega_result desired_res)
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
- fprintf (dump_file,
+ fprintf (dump_file,
"Solve depth = %d, in_approximate_mode = %d, aborting\n",
omega_solve_depth, in_approximate_mode);
omega_print_problem (dump_file, pb);
@@ -4831,7 +4831,7 @@ omega_solve_problem (omega_pb pb, enum omega_result desired_res)
if (!omega_reduce_with_subs)
{
resurrect_subs (pb);
- gcc_assert (please_no_equalities_in_simplified_problems
+ gcc_assert (please_no_equalities_in_simplified_problems
|| !result || pb->num_subs == 0);
}
@@ -5117,7 +5117,7 @@ omega_unprotect_variable (omega_pb pb, int var)
{
for (e = pb->num_geqs - 1; e >= 0; e--)
{
- pb->geqs[e].coef[pb->num_vars] =
+ pb->geqs[e].coef[pb->num_vars] =
pb->geqs[e].coef[pb->safe_vars];
pb->geqs[e].coef[pb->safe_vars] = 0;
@@ -5310,7 +5310,7 @@ omega_query_variable (omega_pb pb, int i, int *lower_bound, int *upper_bound)
continue;
else
{
- *lower_bound = *upper_bound =
+ *lower_bound = *upper_bound =
-pb->eqs[e].coef[i] * pb->eqs[e].coef[0];
return false;
}
@@ -5425,7 +5425,7 @@ omega_query_variable_bounds (omega_pb pb, int i, int *l, int *u)
|| (pb->num_vars == 1 && pb->forwarding_address[i] == 1))
return false;
- if (abs (pb->forwarding_address[i]) == 1
+ if (abs (pb->forwarding_address[i]) == 1
&& pb->num_vars + pb->num_subs == 2
&& pb->num_eqs + pb->num_subs == 1)
{
diff --git a/gcc/omega.h b/gcc/omega.h
index 02c17987dee..e75466ec097 100644
--- a/gcc/omega.h
+++ b/gcc/omega.h
@@ -1,5 +1,5 @@
-/* Source code for an implementation of the Omega test, an integer
- programming algorithm for dependence analysis, by William Pugh,
+/* Source code for an implementation of the Omega test, an integer
+ programming algorithm for dependence analysis, by William Pugh,
appeared in Supercomputing '91 and CACM Aug 92.
This code has no license restrictions, and is considered public
@@ -52,7 +52,7 @@ enum omega_result {
/* Values used for labeling equations. Private (not used outside the
solver). */
-enum omega_eqn_color {
+enum omega_eqn_color {
omega_black = 0,
omega_red = 1
};
@@ -76,7 +76,7 @@ typedef struct omega_pb_d
{
/* The number of variables in the system of equations. */
int num_vars;
-
+
/* Safe variables are not eliminated during the Fourier-Motzkin
simplification of the system. Safe variables are all those
variables that are placed at the beginning of the array of
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 5cd9463c122..ba4d76495c3 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -46,7 +46,7 @@ along with GCC; see the file COPYING3. If not see
#include "cfgloop.h"
-/* Lowering of OpenMP parallel and workshare constructs proceeds in two
+/* Lowering of OpenMP parallel and workshare constructs proceeds in two
phases. The first phase scans the function looking for OMP statements
and then for variables that must be replaced to satisfy data sharing
clauses. The second phase expands code for the constructs, as well as
@@ -72,7 +72,7 @@ typedef struct omp_context
struct omp_context *outer;
gimple stmt;
- /* Map variables to fields in a structure that allows communication
+ /* Map variables to fields in a structure that allows communication
between sending and receiving threads. */
splay_tree field_map;
tree record_type;
@@ -293,7 +293,7 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
else
loop = &dummy_loop;
-
+
loop->v = gimple_omp_for_index (for_stmt, i);
gcc_assert (SSA_VAR_P (loop->v));
gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
@@ -494,7 +494,7 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
# BLOCK 2 (PAR_ENTRY_BB)
.omp_data_o.i = i;
#pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
-
+
# BLOCK 3 (WS_ENTRY_BB)
.omp_data_i = &.omp_data_o;
D.1667 = .omp_data_i->i;
@@ -1103,7 +1103,7 @@ dump_omp_region (FILE *file, struct omp_region *region, int indent)
fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
region->cont->index);
}
-
+
if (region->exit)
fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
region->exit->index);
@@ -1618,7 +1618,7 @@ create_omp_child_function (omp_context *ctx, bool task_copy)
DECL_ARGUMENTS (decl) = t;
}
- /* Allocate memory for the function structure. The call to
+ /* Allocate memory for the function structure. The call to
allocate_struct_function clobbers CFUN, so we need to restore
it afterward. */
push_struct_function (decl);
@@ -2237,7 +2237,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
/* Do all the fixed sized types in the first pass, and the variable sized
types in the second pass. This makes sure that the scalar arguments to
- the variable sized types are processed before we use them in the
+ the variable sized types are processed before we use them in the
variable sized operations. */
for (pass = 0; pass < 2; ++pass)
{
@@ -2385,7 +2385,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
/* ??? If VAR is not passed by reference, and the variable
hasn't been initialized yet, then we'll get a warning for
the store into the omp_data_s structure. Ideally, we'd be
- able to notice this and not store anything at all, but
+ able to notice this and not store anything at all, but
we're generating code too early. Suppress the warning. */
if (!by_ref)
TREE_NO_WARNING (var) = 1;
@@ -2887,7 +2887,7 @@ gimple_build_cond_empty (tree cond)
}
-/* Build the function calls to GOMP_parallel_start etc to actually
+/* Build the function calls to GOMP_parallel_start etc to actually
generate the parallel operation. REGION is the parallel region
being expanded. BB is the block where to insert the code. WS_ARGS
will be set if this is a call to a combined parallel+workshare
@@ -3494,7 +3494,7 @@ expand_omp_taskreg (struct omp_region *region)
}
/* Move the parallel region into CHILD_CFUN. */
-
+
if (gimple_in_ssa_p (cfun))
{
push_cfun (child_cfun);
@@ -3551,7 +3551,7 @@ expand_omp_taskreg (struct omp_region *region)
current_function_decl = save_current;
pop_cfun ();
}
-
+
/* Emit a library call to launch the children threads. */
if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
expand_parallel_call (region, new_bb, entry_stmt, ws_args);
@@ -4135,7 +4135,7 @@ expand_omp_for_static_nochunk (struct omp_region *region,
t = fold_convert (itype, t);
nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
-
+
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
t = fold_convert (itype, t);
threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
@@ -4199,7 +4199,7 @@ expand_omp_for_static_nochunk (struct omp_region *region,
false, GSI_CONTINUE_LINKING);
stmt = gimple_build_assign (fd->loop.v, t);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
-
+
t = fold_convert (itype, e0);
t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
if (POINTER_TYPE_P (type))
@@ -4247,7 +4247,7 @@ expand_omp_for_static_nochunk (struct omp_region *region,
find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
-
+
set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
set_immediate_dominator (CDI_DOMINATORS, body_bb,
recompute_dominator (CDI_DOMINATORS, body_bb));
@@ -4333,7 +4333,7 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
t = fold_convert (itype, t);
nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
-
+
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
t = fold_convert (itype, t);
threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
@@ -4457,7 +4457,7 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
-
+
/* Remove GIMPLE_OMP_CONTINUE. */
gsi_remove (&si, true);
@@ -4925,17 +4925,17 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
location_t loc;
/* We expect to find the following sequences:
-
+
load_bb:
GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
store_bb:
val = tmp OP something; (or: something OP tmp)
- GIMPLE_OMP_STORE (val)
+ GIMPLE_OMP_STORE (val)
- ???FIXME: Allow a more flexible sequence.
+ ???FIXME: Allow a more flexible sequence.
Perhaps use data flow to pick the statements.
-
+
*/
gsi = gsi_after_labels (store_bb);
@@ -5152,7 +5152,7 @@ expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
}
/* Note that we always perform the comparison as an integer, even for
- floating point. This allows the atomic operation to properly
+ floating point. This allows the atomic operation to properly
succeed even with NaNs and -0.0. */
stmt = gimple_build_cond_empty
(build2 (NE_EXPR, boolean_type_node,
@@ -5193,18 +5193,18 @@ expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
references are within #pragma omp atomic directives. According to
responses received from omp@openmp.org, appears to be within spec.
Which makes sense, since that's how several other compilers handle
- this situation as well.
+ this situation as well.
LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
expanding. STORED_VAL is the operand of the matching
GIMPLE_OMP_ATOMIC_STORE.
- We replace
- GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
+ We replace
+ GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
loaded_val = *addr;
and replace
GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
- *addr = stored_val;
+ *addr = stored_val;
*/
static bool
@@ -5243,12 +5243,12 @@ expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
return true;
}
-/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
- using expand_omp_atomic_fetch_op. If it failed, we try to
+/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
+ using expand_omp_atomic_fetch_op. If it failed, we try to
call expand_omp_atomic_pipeline, and if it fails too, the
ultimate fallback is wrapping the operation in a mutex
- (expand_omp_atomic_mutex). REGION is the atomic region built
- by build_omp_regions_1(). */
+ (expand_omp_atomic_mutex). REGION is the atomic region built
+ by build_omp_regions_1(). */
static void
expand_omp_atomic (struct omp_region *region)
@@ -5511,7 +5511,7 @@ gate_expand_omp (void)
return (flag_openmp != 0 && errorcount == 0);
}
-struct gimple_opt_pass pass_expand_omp =
+struct gimple_opt_pass pass_expand_omp =
{
{
GIMPLE_PASS,
@@ -5583,7 +5583,7 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_seq_add_seq (&body, l);
gimple_omp_section_set_last (sec_start);
}
-
+
gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
}
@@ -5776,7 +5776,7 @@ lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
bind_body = maybe_catch_exception (bind_body);
- t = gimple_build_omp_return
+ t = gimple_build_omp_return
(!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
OMP_CLAUSE_NOWAIT));
gimple_seq_add_stmt (&bind_body, t);
@@ -5978,7 +5978,7 @@ lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
tree clauses, cond, vinit;
enum tree_code cond_code;
gimple_seq stmts;
-
+
cond_code = fd->loop.cond_code;
cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
@@ -6107,7 +6107,7 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gsi_replace (gsi_p, new_stmt, true);
}
-/* Callback for walk_stmts. Check if the current statement only contains
+/* Callback for walk_stmts. Check if the current statement only contains
GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
static tree
@@ -6686,7 +6686,7 @@ execute_lower_omp (void)
return 0;
}
-struct gimple_opt_pass pass_lower_omp =
+struct gimple_opt_pass pass_lower_omp =
{
{
GIMPLE_PASS,
@@ -6721,7 +6721,7 @@ diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
if (label_ctx == branch_ctx)
return false;
-
+
/*
Previously we kept track of the label's entire context in diagnose_sb_[12]
so we could traverse it and issue a correct "exit" or "enter" error
@@ -6732,7 +6732,7 @@ diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
for issuing exit/enter error messages. If someone really misses the
distinct error message... patches welcome.
*/
-
+
#if 0
/* Try to avoid confusing the user by producing and error message
with correct "exit" or "enter" verbiage. We prefer "exit"
@@ -6785,7 +6785,7 @@ diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
-
+
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 39257f5a6c3..9e659dc3869 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -424,11 +424,11 @@ optab_for_tree_code (enum tree_code code, const_tree type,
return vec_shr_optab;
case VEC_WIDEN_MULT_HI_EXPR:
- return TYPE_UNSIGNED (type) ?
+ return TYPE_UNSIGNED (type) ?
vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
case VEC_WIDEN_MULT_LO_EXPR:
- return TYPE_UNSIGNED (type) ?
+ return TYPE_UNSIGNED (type) ?
vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
case VEC_UNPACK_HI_EXPR:
@@ -436,7 +436,7 @@ optab_for_tree_code (enum tree_code code, const_tree type,
vec_unpacku_hi_optab : vec_unpacks_hi_optab;
case VEC_UNPACK_LO_EXPR:
- return TYPE_UNSIGNED (type) ?
+ return TYPE_UNSIGNED (type) ?
vec_unpacku_lo_optab : vec_unpacks_lo_optab;
case VEC_UNPACK_FLOAT_HI_EXPR:
@@ -446,7 +446,7 @@ optab_for_tree_code (enum tree_code code, const_tree type,
case VEC_UNPACK_FLOAT_LO_EXPR:
/* The signedness is determined from input operand. */
- return TYPE_UNSIGNED (type) ?
+ return TYPE_UNSIGNED (type) ?
vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
case VEC_PACK_TRUNC_EXPR:
@@ -524,7 +524,7 @@ optab_for_tree_code (enum tree_code code, const_tree type,
E.g, when called to expand the following operations, this is how
the arguments will be initialized:
nops OP0 OP1 WIDE_OP
- widening-sum 2 oprnd0 - oprnd1
+ widening-sum 2 oprnd0 - oprnd1
widening-dot-product 3 oprnd0 oprnd1 oprnd2
widening-mult 2 oprnd0 oprnd1 -
type-promotion (vec-unpack) 1 oprnd0 - - */
@@ -532,11 +532,11 @@ optab_for_tree_code (enum tree_code code, const_tree type,
rtx
expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
rtx target, int unsignedp)
-{
+{
tree oprnd0, oprnd1, oprnd2;
enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
optab widen_pattern_optab;
- int icode;
+ int icode;
enum machine_mode xmode0, xmode1 = VOIDmode, wxmode = VOIDmode;
rtx temp;
rtx pat;
@@ -1426,7 +1426,7 @@ expand_binop_directly (enum machine_mode mode, optab binoptab,
rtx xop0 = op0, xop1 = op1;
rtx temp;
rtx swap;
-
+
if (target)
temp = target;
else
@@ -1443,7 +1443,7 @@ expand_binop_directly (enum machine_mode mode, optab binoptab,
xop0 = xop1;
xop1 = swap;
}
-
+
/* If we are optimizing, force expensive constants into a register. */
xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
if (!shift_optab_p (binoptab))
@@ -1454,21 +1454,21 @@ expand_binop_directly (enum machine_mode mode, optab binoptab,
seem that we don't need to convert CONST_INTs, but we do, so
that they're properly zero-extended, sign-extended or truncated
for their mode. */
-
+
if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
xop0 = convert_modes (mode0,
GET_MODE (xop0) != VOIDmode
? GET_MODE (xop0)
: mode,
xop0, unsignedp);
-
+
if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
xop1 = convert_modes (mode1,
GET_MODE (xop1) != VOIDmode
? GET_MODE (xop1)
: mode,
xop1, unsignedp);
-
+
/* If operation is commutative,
try to make the first operand a register.
Even better, try to make it the same as the target.
@@ -1483,16 +1483,16 @@ expand_binop_directly (enum machine_mode mode, optab binoptab,
/* Now, if insn's predicates don't allow our operands, put them into
pseudo regs. */
-
+
if (!insn_data[icode].operand[1].predicate (xop0, mode0)
&& mode0 != VOIDmode)
xop0 = copy_to_mode_reg (mode0, xop0);
-
+
if (!insn_data[icode].operand[2].predicate (xop1, mode1)
&& mode1 != VOIDmode)
xop1 = copy_to_mode_reg (mode1, xop1);
-
- if (binoptab == vec_pack_trunc_optab
+
+ if (binoptab == vec_pack_trunc_optab
|| binoptab == vec_pack_usat_optab
|| binoptab == vec_pack_ssat_optab
|| binoptab == vec_pack_ufix_trunc_optab
@@ -1509,7 +1509,7 @@ expand_binop_directly (enum machine_mode mode, optab binoptab,
if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
temp = gen_reg_rtx (tmp_mode);
-
+
pat = GEN_FCN (icode) (temp, xop0, xop1);
if (pat)
{
@@ -1523,7 +1523,7 @@ expand_binop_directly (enum machine_mode mode, optab binoptab,
return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
unsignedp, methods);
}
-
+
emit_insn (pat);
return temp;
}
@@ -1602,7 +1602,7 @@ expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
newop1 = expand_binop (GET_MODE (op1), sub_optab,
GEN_INT (bits), op1,
NULL_RTX, unsignedp, OPTAB_DIRECT);
-
+
temp = expand_binop_directly (mode, otheroptab, op0, newop1,
target, unsignedp, methods, last);
if (temp)
@@ -2794,10 +2794,10 @@ static rtx
expand_ctz (enum machine_mode mode, rtx op0, rtx target)
{
rtx seq, temp;
-
+
if (optab_handler (clz_optab, mode)->insn_code == CODE_FOR_nothing)
return 0;
-
+
start_sequence ();
temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
@@ -2827,7 +2827,7 @@ expand_ctz (enum machine_mode mode, rtx op0, rtx target)
/* Try calculating ffs(x) using ctz(x) if we have that instruction, or
else with the sequence used by expand_clz.
-
+
The ffs builtin promises to return zero for a zero value and ctz/clz
may have an undefined value in that case. If they do not give us a
convenient value, we have to generate a test and branch. */
@@ -2866,7 +2866,7 @@ expand_ffs (enum machine_mode mode, rtx op0, rtx target)
if (defined_at_zero && val == -1)
/* No correction needed at zero. */;
- else
+ else
{
/* We don't try to do anything clever with the situation found
on some processors (eg Alpha) where ctz(0:mode) ==
@@ -3795,7 +3795,7 @@ expand_copysign (rtx op0, rtx op1, rtx target)
with two operands: an output TARGET and an input OP0.
TARGET *must* be nonzero, and the output is always stored there.
CODE is an rtx code such that (CODE OP0) is an rtx that describes
- the value that is stored into TARGET.
+ the value that is stored into TARGET.
Return false if expansion failed. */
@@ -4238,7 +4238,7 @@ prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
/* There are two kinds of comparison routines. Biased routines
return 0/1/2, and unbiased routines return -1/0/1. Other parts
of gcc expect that the comparison operation is equivalent
- to the modified comparison. For signed comparisons compare the
+ to the modified comparison. For signed comparisons compare the
result against 1 in the biased case, and zero in the unbiased
case. For unsigned comparisons always compare against 1 after
biasing the unbiased result by adding 1. This gives us a way to
@@ -4249,7 +4249,7 @@ prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
if (!TARGET_LIB_INT_CMP_BIASED)
{
if (unsignedp)
- x = plus_constant (result, 1);
+ x = plus_constant (result, 1);
else
y = const0_rtx;
}
@@ -4258,7 +4258,7 @@ prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
ptest, pmode);
}
- else
+ else
prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
return;
@@ -5942,7 +5942,7 @@ gen_trunc_conv_libfunc (convert_optab tab,
if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
|| (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
-
+
if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
return;
@@ -5971,7 +5971,7 @@ gen_extend_conv_libfunc (convert_optab tab,
if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
|| (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
-
+
if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
return;
@@ -6361,7 +6361,7 @@ init_optabs (void)
init_optab (ssum_widen_optab, UNKNOWN);
init_optab (usum_widen_optab, UNKNOWN);
- init_optab (sdot_prod_optab, UNKNOWN);
+ init_optab (sdot_prod_optab, UNKNOWN);
init_optab (udot_prod_optab, UNKNOWN);
init_optab (vec_extract_optab, UNKNOWN);
diff --git a/gcc/optabs.h b/gcc/optabs.h
index c4acb17eedd..0161d3e6d3b 100644
--- a/gcc/optabs.h
+++ b/gcc/optabs.h
@@ -334,7 +334,7 @@ enum optab_index
OTI_vec_shr,
/* Extract specified elements from vectors, for vector load. */
OTI_vec_realign_load,
- /* Widening multiplication.
+ /* Widening multiplication.
The high/low part of the resulting vector of products is returned. */
OTI_vec_widen_umult_hi,
OTI_vec_widen_umult_lo,
diff --git a/gcc/opts-common.c b/gcc/opts-common.c
index d3cefe85e8e..b70b823db16 100644
--- a/gcc/opts-common.c
+++ b/gcc/opts-common.c
@@ -121,7 +121,7 @@ cancel_option (int opt_idx, int next_opt_idx, int orig_next_opt_idx)
if (cl_options [next_opt_idx].neg_index != orig_next_opt_idx)
return cancel_option (opt_idx, cl_options [next_opt_idx].neg_index,
orig_next_opt_idx);
-
+
return false;
}
diff --git a/gcc/opts.c b/gcc/opts.c
index 4e8fdcc6700..3a825ce364d 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -436,7 +436,7 @@ complain_wrong_lang (const char *text, const struct cl_option *option,
/* The LTO front end inherits all the options from the first front
end that was used. However, not all the original front end
options make sense in LTO.
-
+
A real solution would be to filter this in collect2, but collect2
does not have access to all the option attributes to know what to
filter. So, in lto1 we silently accept inherited flags and do
@@ -834,7 +834,7 @@ decode_options (unsigned int argc, const char **argv)
}
}
}
-
+
/* Use priority coloring if cover classes is not defined for the
target. */
if (targetm.ira_cover_classes == NULL)
@@ -1048,7 +1048,7 @@ decode_options (unsigned int argc, const char **argv)
#endif
))
{
- inform (input_location,
+ inform (input_location,
"-freorder-blocks-and-partition does not work with exceptions on this architecture");
flag_reorder_blocks_and_partition = 0;
flag_reorder_blocks = 1;
@@ -1335,7 +1335,7 @@ print_filtered_help (unsigned int include_flags,
printf (_(" None found. Use --help=%s to show *all* the options supported by the %s front-end\n"),
lang_names[i], lang_names[i]);
}
-
+
}
else if (! displayed)
printf (_(" All options with the desired characteristics have already been displayed\n"));
@@ -1640,8 +1640,8 @@ common_handle_option (size_t scode, const char *arg, int value,
break;
case OPT_Wlarger_than_:
- /* This form corresponds to -Wlarger-than-.
- Kept for backward compatibility.
+ /* This form corresponds to -Wlarger-than-.
+ Kept for backward compatibility.
Don't use it as the first argument of warning(). */
case OPT_Wlarger_than_eq:
@@ -2165,7 +2165,7 @@ handle_param (const char *carg)
free (arg);
}
-/* Used to set the level of strict aliasing warnings,
+/* Used to set the level of strict aliasing warnings,
when no level is specified (i.e., when -Wstrict-aliasing, and not
-Wstrict-aliasing=level was given).
ONOFF is assumed to take value 1 when -Wstrict-aliasing is specified,
@@ -2198,8 +2198,8 @@ set_fast_math_flags (int set)
}
}
-/* When -funsafe-math-optimizations is set the following
- flags are set as well. */
+/* When -funsafe-math-optimizations is set the following
+ flags are set as well. */
void
set_unsafe_math_optimizations_flags (int set)
{
@@ -2408,7 +2408,7 @@ enable_warning_as_error (const char *arg, int value, unsigned int lang_mask)
{
diagnostic_t kind = value ? DK_ERROR : DK_WARNING;
diagnostic_classify_diagnostic (global_dc, option_index, kind);
-
+
/* -Werror=foo implies -Wfoo. */
if (cl_options[option_index].var_type == CLVC_BOOLEAN
&& cl_options[option_index].flag_var
diff --git a/gcc/params.def b/gcc/params.def
index db9b75a8582..7e09701dda3 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -24,7 +24,7 @@ along with GCC; see the file COPYING3. If not see
- The enumeral corresponding to this parameter.
- - The name that can be used to set this parameter using the
+ - The name that can be used to set this parameter using the
command-line option `--param <name>=<value>'.
- A help string explaining how the parameter is used.
@@ -39,8 +39,8 @@ along with GCC; see the file COPYING3. If not see
Be sure to add an entry to invoke.texi summarizing the parameter. */
/* The threshold ratio between current and hottest structure counts.
- We say that if the ratio of the current structure count,
- calculated by profiling, to the hottest structure count
+ We say that if the ratio of the current structure count,
+ calculated by profiling, to the hottest structure count
in the program is less than this parameter, then structure
reorganization is not applied. The default is 10%. */
DEFPARAM (PARAM_STRUCT_REORG_COLD_STRUCT_RATIO,
@@ -64,7 +64,7 @@ DEFPARAM (PARAM_PREDICTABLE_BRANCH_OUTCOME,
definition for C++) are affected by this.
There are more restrictions to inlining: If inlined functions
call other functions, the already inlined instructions are
- counted and once the recursive inline limit (see
+ counted and once the recursive inline limit (see
"max-inline-insns" parameter) is exceeded, the acceptable size
gets decreased. */
DEFPARAM (PARAM_MAX_INLINE_INSNS_SINGLE,
@@ -123,7 +123,7 @@ DEFPARAM (PARAM_MAX_VARIABLE_EXPANSIONS,
"max-variable-expansions-in-unroller",
"If -fvariable-expansion-in-unroller is used, the maximum number of times that an individual variable will be expanded during loop unrolling",
1, 0, 0)
-
+
/* Limit loop autovectorization to loops with large enough iteration count. */
DEFPARAM (PARAM_MIN_VECT_LOOP_BOUND,
"min-vect-loop-bound",
@@ -152,10 +152,10 @@ DEFPARAM(PARAM_MAX_DELAY_SLOT_LIVE_SEARCH,
"The maximum number of instructions to consider to find accurate live register information",
333, 0, 0)
-/* This parameter limits the number of branch elements that the
+/* This parameter limits the number of branch elements that the
scheduler will track anti-dependencies through without resetting
- the tracking mechanism. Large functions with few calls or barriers
- can generate lists containing many 1000's of dependencies. Generally
+ the tracking mechanism. Large functions with few calls or barriers
+ can generate lists containing many 1000's of dependencies. Generally
the compiler either uses all available memory, or runs for far too long. */
DEFPARAM(PARAM_MAX_PENDING_LIST_LENGTH,
"max-pending-list-length",
@@ -221,7 +221,7 @@ DEFPARAM(PARAM_GCSE_AFTER_RELOAD_CRITICAL_FRACTION,
10, 0, 0)
/* This parameter limits the number of insns in a loop that will be unrolled,
and by how much the loop is unrolled.
-
+
This limit should be at most half of the peeling limits: loop unroller
decides to not unroll loops that iterate fewer than 2*number of allowed
unrollings and thus we would have loops that are neither peeled or unrolled
@@ -329,7 +329,7 @@ DEFPARAM (PARAM_ALIGN_LOOP_ITERATIONS,
For functions containing one loop with large known number of iterations
and other loops having unbounded loops we would end up predicting all
the other loops cold that is not usually the case. So we need to artificially
- flatten the profile.
+ flatten the profile.
We need to cut the maximal predicted iterations to large enough iterations
so the loop appears important, but safely within HOT_BB_COUNT_FRACTION
@@ -592,7 +592,7 @@ DEFPARAM (PARAM_INTEGER_SHARE_LIMIT,
PARAM_MIN_VIRTUAL_MAPPINGS specifies the minimum number of virtual
mappings that should be registered to trigger the heuristic.
-
+
PARAM_VIRTUAL_MAPPINGS_TO_SYMS_RATIO specifies the ratio between
mappings and symbols. If the number of virtual mappings is
PARAM_VIRTUAL_MAPPINGS_TO_SYMS_RATIO bigger than the number of
@@ -632,7 +632,7 @@ DEFPARAM (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS,
15, 0, 0)
/* This is the maximum number of fields a variable may have before the pointer analysis machinery
- will stop trying to treat it in a field-sensitive manner.
+ will stop trying to treat it in a field-sensitive manner.
There are programs out there with thousands of fields per structure, and handling them
field-sensitively is not worth the cost. */
DEFPARAM (PARAM_MAX_FIELDS_FOR_FIELD_SENSITIVE,
diff --git a/gcc/params.h b/gcc/params.h
index 56db145e1f6..e0bb4fa7e9b 100644
--- a/gcc/params.h
+++ b/gcc/params.h
@@ -52,10 +52,10 @@ typedef struct param_info
/* Minimum acceptable value. */
int min_value;
-
+
/* Maximum acceptable value, if greater than minimum */
int max_value;
-
+
/* A short description of the option. */
const char *const help;
} param_info;
diff --git a/gcc/passes.c b/gcc/passes.c
index 0c39a7a22bb..57b55c08fc9 100644
--- a/gcc/passes.c
+++ b/gcc/passes.c
@@ -114,7 +114,7 @@ void
print_current_pass (FILE *file)
{
if (current_pass)
- fprintf (file, "current pass = %s (%d)\n",
+ fprintf (file, "current pass = %s (%d)\n",
current_pass->name, current_pass->static_pass_number);
else
fprintf (file, "no current pass.\n");
@@ -126,7 +126,7 @@ void
debug_pass (void)
{
print_current_pass (stderr);
-}
+}
@@ -407,7 +407,7 @@ register_one_dump_file (struct opt_pass *pass)
/* Recursive worker function for register_dump_files. */
-static int
+static int
register_dump_files_1 (struct opt_pass *pass, int properties)
{
do
@@ -435,11 +435,11 @@ register_dump_files_1 (struct opt_pass *pass, int properties)
return properties;
}
-/* Register the dump files for the pipeline starting at PASS.
+/* Register the dump files for the pipeline starting at PASS.
PROPERTIES reflects the properties that are guaranteed to be available at
the beginning of the pipeline. */
-static void
+static void
register_dump_files (struct opt_pass *pass,int properties)
{
pass->properties_required |= properties;
@@ -479,8 +479,8 @@ make_pass_instance (struct opt_pass *pass, bool track_duplicates)
{
pass->todo_flags_start |= TODO_mark_first_instance;
pass->static_pass_number = -1;
- }
- return pass;
+ }
+ return pass;
}
/* Add a pass to the pass list. Duplicate the pass if it's already
@@ -493,7 +493,7 @@ next_pass_1 (struct opt_pass **list, struct opt_pass *pass)
gcc_assert (pass->name != NULL);
*list = make_pass_instance (pass, false);
-
+
return &(*list)->next;
}
@@ -513,7 +513,7 @@ struct pass_list_node
static struct pass_list_node *added_pass_nodes = NULL;
static struct pass_list_node *prev_added_pass_node;
-/* Insert the pass at the proper position. Return true if the pass
+/* Insert the pass at the proper position. Return true if the pass
is successfully added.
NEW_PASS_INFO - new pass to be inserted
@@ -543,7 +543,7 @@ position_pass (struct register_pass_info *new_pass_info,
struct pass_list_node *new_pass_node;
new_pass = make_pass_instance (new_pass_info->pass, true);
-
+
/* Insert the new pass instance based on the positioning op. */
switch (new_pass_info->pos_op)
{
@@ -554,7 +554,7 @@ position_pass (struct register_pass_info *new_pass_info,
/* Skip newly inserted pass to avoid repeated
insertions in the case where the new pass and the
existing one have the same name. */
- pass = new_pass;
+ pass = new_pass;
break;
case PASS_POS_INSERT_BEFORE:
new_pass->next = pass;
@@ -779,7 +779,7 @@ init_optimization_passes (void)
NEXT_PASS (pass_ipa_cp);
NEXT_PASS (pass_ipa_inline);
NEXT_PASS (pass_ipa_reference);
- NEXT_PASS (pass_ipa_pure_const);
+ NEXT_PASS (pass_ipa_pure_const);
NEXT_PASS (pass_ipa_type_escape);
NEXT_PASS (pass_ipa_pta);
NEXT_PASS (pass_ipa_struct_reorg);
@@ -906,7 +906,7 @@ init_optimization_passes (void)
we may get false warnings (e.g., testsuite/gcc.dg/uninit-5.c).
However, this also causes us to misdiagnose cases that should be
real warnings (e.g., testsuite/gcc.dg/pr18501.c).
-
+
To fix the false positives in uninit-5.c, we would have to
account for the predicates protecting the set and the use of each
variable. Using a representation like Gated Single Assignment
@@ -1037,23 +1037,23 @@ init_optimization_passes (void)
/* Register the passes with the tree dump code. */
register_dump_files (all_lowering_passes, PROP_gimple_any);
- register_dump_files (all_small_ipa_passes,
+ register_dump_files (all_small_ipa_passes,
PROP_gimple_any | PROP_gimple_lcf | PROP_gimple_leh
| PROP_cfg);
- register_dump_files (all_regular_ipa_passes,
+ register_dump_files (all_regular_ipa_passes,
PROP_gimple_any | PROP_gimple_lcf | PROP_gimple_leh
| PROP_cfg);
- register_dump_files (all_lto_gen_passes,
+ register_dump_files (all_lto_gen_passes,
PROP_gimple_any | PROP_gimple_lcf | PROP_gimple_leh
| PROP_cfg);
- register_dump_files (all_passes,
+ register_dump_files (all_passes,
PROP_gimple_any | PROP_gimple_lcf | PROP_gimple_leh
| PROP_cfg);
}
/* If we are in IPA mode (i.e., current_function_decl is NULL), call
function CALLBACK for every function in the call graph. Otherwise,
- call CALLBACK on the current function. */
+ call CALLBACK on the current function. */
static void
do_per_function (void (*callback) (void *data), void *data)
@@ -1090,7 +1090,7 @@ static GTY ((length ("nnodes"))) struct cgraph_node **order;
/* If we are in IPA mode (i.e., current_function_decl is NULL), call
function CALLBACK for every function in the call graph. Otherwise,
- call CALLBACK on the current function. */
+ call CALLBACK on the current function. */
static void
do_per_function_toporder (void (*callback) (void *data), void *data)
@@ -1152,7 +1152,7 @@ execute_function_todo (void *data)
if (cleanup && (cfun->curr_properties & PROP_ssa))
flags |= TODO_remove_unused_locals;
-
+
/* When cleanup_tree_cfg merges consecutive blocks, it may
perform some simplistic propagation when removing single
valued PHI nodes. This propagation may, in turn, cause the
@@ -1169,7 +1169,7 @@ execute_function_todo (void *data)
update_ssa (update_flags);
cfun->last_verified &= ~TODO_verify_ssa;
}
-
+
if (flags & TODO_update_address_taken)
execute_update_addresses_taken (true);
@@ -1179,7 +1179,7 @@ execute_function_todo (void *data)
execute_update_addresses_taken (true);
compute_may_aliases ();
}
-
+
if (flags & TODO_remove_unused_locals)
remove_unused_locals ();
@@ -1279,7 +1279,7 @@ execute_todo (unsigned int flags)
if (flags & TODO_ggc_collect)
ggc_collect ();
- /* Now that the dumping has been done, we can get rid of the optional
+ /* Now that the dumping has been done, we can get rid of the optional
df problems. */
if (flags & TODO_df_finish)
df_finish_pass ((flags & TODO_df_verify) != 0);
@@ -1386,7 +1386,7 @@ execute_ipa_summary_passes (struct ipa_opt_pass_d *ipa_pass)
struct opt_pass *pass = &ipa_pass->pass;
/* Execute all of the IPA_PASSes in the list. */
- if (ipa_pass->pass.type == IPA_PASS
+ if (ipa_pass->pass.type == IPA_PASS
&& (!pass->gate || pass->gate ())
&& ipa_pass->generate_summary)
{
@@ -1644,7 +1644,7 @@ ipa_write_summaries (void)
cgraph_node_set set;
struct cgraph_node **order;
int i, order_pos;
-
+
if (!flag_generate_lto || errorcount || sorrycount)
return;
diff --git a/gcc/plugin.c b/gcc/plugin.c
index bb967c385fe..c43e0c844a1 100644
--- a/gcc/plugin.c
+++ b/gcc/plugin.c
@@ -132,7 +132,7 @@ add_new_plugin (const char* plugin_name)
void **slot;
char *base_name = get_plugin_base_name (plugin_name);
- /* If this is the first -fplugin= option we encounter, create
+ /* If this is the first -fplugin= option we encounter, create
'plugin_name_args_tab' hash table. */
if (!plugin_name_args_tab)
plugin_name_args_tab = htab_create (10, htab_hash_string, htab_str_eq,
@@ -493,7 +493,7 @@ initialize_plugins (void)
return;
timevar_push (TV_PLUGIN_INIT);
-
+
#ifdef ENABLE_PLUGIN
/* Traverse and initialize each plugin specified in the command-line. */
htab_traverse_noresize (plugin_name_args_tab, init_one_plugin, NULL);
diff --git a/gcc/postreload-gcse.c b/gcc/postreload-gcse.c
index 57be7a5c39c..d7c8878c496 100644
--- a/gcc/postreload-gcse.c
+++ b/gcc/postreload-gcse.c
@@ -310,7 +310,7 @@ expr_equiv_p (const void *exp1p, const void *exp2p)
const struct expr *const exp1 = (const struct expr *) exp1p;
const struct expr *const exp2 = (const struct expr *) exp2p;
int equiv_p = exp_equiv_p (exp1->expr, exp2->expr, 0, true);
-
+
gcc_assert (!equiv_p || exp1->hash == exp2->hash);
return equiv_p;
}
@@ -349,7 +349,7 @@ insert_expr_in_table (rtx x, rtx insn)
slot = (struct expr **) htab_find_slot_with_hash (expr_table, cur_expr,
hash, INSERT);
-
+
if (! (*slot))
/* The expression isn't found, so insert it. */
*slot = cur_expr;
@@ -1002,7 +1002,7 @@ eliminate_partially_redundant_load (basic_block bb, rtx insn,
avail_insn = a_occr->insn;
avail_reg = get_avail_load_store_reg (avail_insn);
gcc_assert (avail_reg);
-
+
/* Make sure we can generate a move from register avail_reg to
dest. */
extract_insn (gen_move_insn (copy_rtx (dest),
@@ -1065,9 +1065,9 @@ eliminate_partially_redundant_load (basic_block bb, rtx insn,
if (/* No load can be replaced by copy. */
npred_ok == 0
- /* Prevent exploding the code. */
+ /* Prevent exploding the code. */
|| (optimize_bb_for_size_p (bb) && npred_ok > 1)
- /* If we don't have profile information we cannot tell if splitting
+ /* If we don't have profile information we cannot tell if splitting
a critical edge is profitable or not so don't do it. */
|| ((! profile_info || ! flag_branch_probabilities
|| targetm.cannot_modify_jumps_p ())
@@ -1295,7 +1295,7 @@ gcse_after_reload_main (rtx f ATTRIBUTE_UNUSED)
fprintf (dump_file, "\n\n");
}
}
-
+
/* We are finished with alias. */
end_alias_analysis ();
diff --git a/gcc/postreload.c b/gcc/postreload.c
index d23ae0b990a..eaedee13a74 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -794,7 +794,7 @@ reload_combine (void)
... (MEM (PLUS (REGZ) (REGY)))... .
First, check that we have (set (REGX) (PLUS (REGX) (REGY)))
- and that we know all uses of REGX before it dies.
+ and that we know all uses of REGX before it dies.
Also, explicitly check that REGX != REGY; our life information
does not yet show whether REGY changes in this insn. */
set = single_set (insn);
diff --git a/gcc/predict.c b/gcc/predict.c
index 058901e5903..eb5ddef2e38 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -66,7 +66,7 @@ along with GCC; see the file COPYING3. If not see
static sreal real_zero, real_one, real_almost_one, real_br_prob_base,
real_inv_br_prob_base, real_one_half, real_bb_freq_max;
-/* Random guesstimation given names.
+/* Random guesstimation given names.
PROV_VERY_UNLIKELY should be small enough so basic block predicted
by it gets bellow HOT_BB_FREQUENCY_FRANCTION. */
#define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
@@ -386,7 +386,7 @@ gimple_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
if (!preds)
return false;
-
+
for (i = (struct edge_prediction *) *preds; i; i = i->ep_next)
if (i->ep_predictor == predictor)
return true;
@@ -394,7 +394,7 @@ gimple_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
}
/* Return true when the probability of edge is reliable.
-
+
The profile guessing code is good at predicting branch outcome (ie.
taken/not taken), that is predicted right slightly over 75% of time.
It is however notoriously poor on predicting the probability itself.
@@ -504,7 +504,7 @@ void
remove_predictions_associated_with_edge (edge e)
{
void **preds;
-
+
if (!bb_predictions)
return;
@@ -787,7 +787,7 @@ combine_predictions_for_bb (basic_block bb)
first = e;
}
- /* When there is no successor or only one choice, prediction is easy.
+ /* When there is no successor or only one choice, prediction is easy.
We are lazy for now and predict only basic blocks with two outgoing
edges. It is possible to predict generic case too, but we have to
@@ -836,7 +836,7 @@ combine_predictions_for_bb (basic_block bb)
if (pred2->ep_edge != first)
probability2 = REG_BR_PROB_BASE - probability2;
- if ((probability < REG_BR_PROB_BASE / 2) !=
+ if ((probability < REG_BR_PROB_BASE / 2) !=
(probability2 < REG_BR_PROB_BASE / 2))
break;
@@ -1021,7 +1021,7 @@ predict_loops (void)
EDGE_PROBABILITY_RELIABLE from trusting the branch prediction
as this was causing regression in perl benchmark containing such
a wide loop. */
-
+
int probability = ((REG_BR_PROB_BASE
- predictor_info [(int) PRED_LOOP_EXIT].hitrate)
/ n_exits);
@@ -1033,7 +1033,7 @@ predict_loops (void)
predict_edge (e, PRED_LOOP_EXIT, probability);
}
}
-
+
/* Free basic blocks from get_loop_body. */
free (bbs);
}
@@ -1262,10 +1262,10 @@ expr_expected_value_1 (tree type, tree op0, enum tree_code code, tree op1, bitma
return NULL;
}
-/* Return constant EXPR will likely have at execution time, NULL if unknown.
+/* Return constant EXPR will likely have at execution time, NULL if unknown.
The function is used by builtin_expect branch predictor so the evidence
must come from this construct and additional possible constant folding.
-
+
We may want to implement more involved value guess (such as value range
propagation based prediction), but such tricks shall go to new
implementation. */
@@ -1931,11 +1931,11 @@ propagate_freq (basic_block head, bitmap tovisit)
if (e)
{
sreal tmp;
-
+
/* EDGE_INFO (e)->back_edge_prob
= ((e->probability * BLOCK_INFO (bb)->frequency)
/ REG_BR_PROB_BASE); */
-
+
sreal_init (&tmp, e->probability, 0);
sreal_mul (&tmp, &tmp, &BLOCK_INFO (bb)->frequency);
sreal_mul (&EDGE_INFO (e)->back_edge_prob,
@@ -1954,7 +1954,7 @@ propagate_freq (basic_block head, bitmap tovisit)
nextbb = e->dest;
else
BLOCK_INFO (last)->next = e->dest;
-
+
last = e->dest;
}
}
@@ -2222,7 +2222,7 @@ predictor_name (enum br_predictor predictor)
return predictor_info[predictor].name;
}
-struct gimple_opt_pass pass_profile =
+struct gimple_opt_pass pass_profile =
{
{
GIMPLE_PASS,
@@ -2241,7 +2241,7 @@ struct gimple_opt_pass pass_profile =
}
};
-struct gimple_opt_pass pass_strip_predict_hints =
+struct gimple_opt_pass pass_strip_predict_hints =
{
{
GIMPLE_PASS,
diff --git a/gcc/predict.def b/gcc/predict.def
index 3ac129ea70b..4b3e87aa568 100644
--- a/gcc/predict.def
+++ b/gcc/predict.def
@@ -32,7 +32,7 @@ along with GCC; see the file COPYING3. If not see
HITRATE is the probability that edge predicted by predictor as taken
will be really taken (so it should be always above
REG_BR_PROB_BASE / 2). */
-
+
/* A value used as final outcome of all heuristics. */
DEF_PREDICTOR (PRED_COMBINED, "combined", PROB_ALWAYS, 0)
diff --git a/gcc/pretty-print.c b/gcc/pretty-print.c
index 94952c3e152..56789490931 100644
--- a/gcc/pretty-print.c
+++ b/gcc/pretty-print.c
@@ -233,7 +233,7 @@ pp_base_format (pretty_printer *pp, text_info *text)
this point. */
memset (formatters, 0, sizeof formatters);
-
+
for (p = text->format_spec; *p; )
{
while (*p != '\0' && *p != '%')
@@ -249,7 +249,7 @@ pp_base_format (pretty_printer *pp, text_info *text)
{
case '\0':
gcc_unreachable ();
-
+
case '%':
obstack_1grow (&buffer->chunk_obstack, '%');
p++;
@@ -366,7 +366,7 @@ pp_base_format (pretty_printer *pp, text_info *text)
gcc_assert (chunk < PP_NL_ARGMAX * 2);
args[chunk++] = XOBFINISH (&buffer->chunk_obstack, const char *);
args[chunk] = 0;
-
+
/* Set output to the argument obstack, and switch line-wrapping and
prefixing off. */
buffer->obstack = &buffer->chunk_obstack;
diff --git a/gcc/pretty-print.h b/gcc/pretty-print.h
index 5c4ddca8043..3ba50353850 100644
--- a/gcc/pretty-print.h
+++ b/gcc/pretty-print.h
@@ -71,9 +71,9 @@ struct chunk_info
/* The output buffer datatype. This is best seen as an abstract datatype
whose fields should not be accessed directly by clients. */
-typedef struct
+typedef struct
{
- /* Obstack where the text is built up. */
+ /* Obstack where the text is built up. */
struct obstack formatted_obstack;
/* Obstack containing a chunked representation of the format
@@ -90,7 +90,7 @@ typedef struct
/* Where to output formatted text. */
FILE *stream;
- /* The amount of characters output so far. */
+ /* The amount of characters output so far. */
int line_length;
/* This must be large enough to hold any printed integer or
@@ -114,7 +114,7 @@ typedef struct
diagnostic_prefixing_rule_t rule;
/* The ideal upper bound of number of characters per line, as suggested
- by front-end. */
+ by front-end. */
int line_cutoff;
} pp_wrapping_mode_t;
@@ -140,7 +140,7 @@ typedef bool (*printer_fn) (pretty_printer *, text_info *, const char *,
/* TRUE if a newline character needs to be added before further
formatting. */
-#define pp_needs_newline(PP) pp_base (PP)->need_newline
+#define pp_needs_newline(PP) pp_base (PP)->need_newline
/* True if PRETTY-PRINTER is in line-wrapping mode. */
#define pp_is_wrapping_line(PP) (pp_line_cutoff (PP) > 0)
@@ -162,12 +162,12 @@ struct pretty_print_info
/* The prefix for each new line. */
const char *prefix;
-
+
/* Where to put whitespace around the entity being formatted. */
pp_padding padding;
-
+
/* The real upper bound of number of characters per line, taking into
- account the case of a very very looong prefix. */
+ account the case of a very very looong prefix. */
int maximum_length;
/* Indentation count. */
diff --git a/gcc/print-rtl.c b/gcc/print-rtl.c
index bc2854d9179..be380b11edb 100644
--- a/gcc/print-rtl.c
+++ b/gcc/print-rtl.c
@@ -293,7 +293,7 @@ print_rtx (const_rtx in_rtx)
#endif
break;
}
-
+
case NOTE_INSN_VAR_LOCATION:
#ifndef GENERATOR_FILE
fputc (' ', outfile);
diff --git a/gcc/print-tree.c b/gcc/print-tree.c
index f0a3294263a..a44d23a8474 100644
--- a/gcc/print-tree.c
+++ b/gcc/print-tree.c
@@ -199,7 +199,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
if (node == 0)
return;
-
+
code = TREE_CODE (node);
tclass = TREE_CODE_CLASS (code);
@@ -230,7 +230,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
if (table)
{
hash = ((unsigned long) node) % HASH_SIZE;
-
+
/* If node is in the table, just mention its address. */
for (b = table[hash]; b; b = b->next)
if (b->node == node)
@@ -238,7 +238,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
print_node_brief (file, prefix, node, indent);
return;
}
-
+
/* Add this node to the table. */
b = XNEW (struct bucket);
b->node = node;
@@ -354,7 +354,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
if (DECL_IGNORED_P (node))
fputs (" ignored", file);
if (DECL_ABSTRACT (node))
- fputs (" abstract", file);
+ fputs (" abstract", file);
if (DECL_EXTERNAL (node))
fputs (" external", file);
if (DECL_NONLOCAL (node))
@@ -428,7 +428,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
}
if (CODE_CONTAINS_STRUCT (code, TS_DECL_COMMON))
- {
+ {
if (DECL_VIRTUAL_P (node))
fputs (" virtual", file);
if (DECL_PRESERVE_P (node))
@@ -449,7 +449,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
fputs (" decl_6", file);
if (DECL_LANG_FLAG_7 (node))
fputs (" decl_7", file);
-
+
mode = DECL_MODE (node);
fprintf (file, " %s", GET_MODE_NAME (mode));
}
@@ -467,16 +467,16 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
xloc.column);
if (CODE_CONTAINS_STRUCT (code, TS_DECL_COMMON))
- {
+ {
print_node (file, "size", DECL_SIZE (node), indent + 4);
print_node (file, "unit size", DECL_SIZE_UNIT (node), indent + 4);
-
+
if (code != FUNCTION_DECL || DECL_BUILT_IN (node))
indent_to (file, indent + 3);
-
+
if (DECL_USER_ALIGN (node))
fprintf (file, " user");
-
+
fprintf (file, " align %d", DECL_ALIGN (node));
if (code == FIELD_DECL)
fprintf (file, " offset_align " HOST_WIDE_INT_PRINT_UNSIGNED,
@@ -504,7 +504,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
print_node_brief (file, "context", DECL_CONTEXT (node), indent + 4);
- if (CODE_CONTAINS_STRUCT (code, TS_DECL_COMMON))
+ if (CODE_CONTAINS_STRUCT (code, TS_DECL_COMMON))
{
print_node_brief (file, "attributes",
DECL_ATTRIBUTES (node), indent + 4);
@@ -627,7 +627,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
fprintf (file, " structural equality");
else
dump_addr (file, " canonical type ", TYPE_CANONICAL (node));
-
+
print_node (file, "attributes", TYPE_ATTRIBUTES (node), indent + 4);
if (INTEGRAL_TYPE_P (node) || code == REAL_TYPE
@@ -712,7 +712,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
for (i = 0; i < len; i++)
{
char temp[10];
-
+
sprintf (temp, "arg %d", i);
print_node (file, temp, TREE_OPERAND (node, i), indent + 4);
}
diff --git a/gcc/profile.c b/gcc/profile.c
index 0cc65521729..104956188bc 100644
--- a/gcc/profile.c
+++ b/gcc/profile.c
@@ -775,7 +775,7 @@ compute_value_histograms (histogram_values values)
gcov_type *histogram_counts[GCOV_N_VALUE_COUNTERS];
gcov_type *act_count[GCOV_N_VALUE_COUNTERS];
gcov_type *aact_count;
-
+
for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
n_histogram_counters[t] = 0;
@@ -943,8 +943,8 @@ branch_prob (void)
}
/* Edge with goto locus might get wrong coverage info unless
- it is the only edge out of BB.
- Don't do that when the locuses match, so
+ it is the only edge out of BB.
+ Don't do that when the locuses match, so
if (blah) goto something;
is not computed twice. */
if (last
@@ -1127,7 +1127,7 @@ branch_prob (void)
if (bb == ENTRY_BLOCK_PTR->next_bb)
{
- expanded_location curr_location =
+ expanded_location curr_location =
expand_location (DECL_SOURCE_LOCATION (current_function_decl));
output_location (curr_location.file, curr_location.line,
&offset, bb);
diff --git a/gcc/read-rtl.c b/gcc/read-rtl.c
index 5c65e26b02d..9f922ae8cd9 100644
--- a/gcc/read-rtl.c
+++ b/gcc/read-rtl.c
@@ -1483,7 +1483,7 @@ read_rtx_1 (FILE *infile, struct map_value **mode_maps)
if (c == EOF)
return 0;
-
+
if (c != '(')
fatal_expected_char (infile, '(', c);
@@ -1738,7 +1738,7 @@ read_rtx_variadic (FILE *infile, struct map_value **mode_maps, rtx form)
XEXP (q, 0) = XEXP (p, 1);
XEXP (q, 1) = read_rtx_1 (infile, mode_maps);
-
+
XEXP (p, 1) = q;
p = q;
c = read_skip_spaces (infile);
diff --git a/gcc/real.c b/gcc/real.c
index 98e7d7875eb..25e599dfe35 100644
--- a/gcc/real.c
+++ b/gcc/real.c
@@ -1343,7 +1343,7 @@ real_to_integer (const REAL_VALUE_TYPE *r)
if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG)
i = r->sig[SIGSZ-1];
- else
+ else
{
gcc_assert (HOST_BITS_PER_WIDE_INT == 2 * HOST_BITS_PER_LONG);
i = r->sig[SIGSZ-1];
@@ -1394,11 +1394,11 @@ real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh,
case rvc_normal:
if (r->decimal)
- {
+ {
decimal_real_to_integer2 (plow, phigh, r);
return;
}
-
+
exp = REAL_EXP (r);
if (exp <= 0)
goto underflow;
@@ -1415,7 +1415,7 @@ real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh,
high = t.sig[SIGSZ-1];
low = t.sig[SIGSZ-2];
}
- else
+ else
{
gcc_assert (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG);
high = t.sig[SIGSZ-1];
@@ -2117,7 +2117,7 @@ real_from_string2 (const char *s, enum machine_mode mode)
/* Initialize R from string S and desired MODE. */
-void
+void
real_from_string3 (REAL_VALUE_TYPE *r, const char *s, enum machine_mode mode)
{
if (DECIMAL_FLOAT_MODE_P (mode))
@@ -2126,8 +2126,8 @@ real_from_string3 (REAL_VALUE_TYPE *r, const char *s, enum machine_mode mode)
real_from_string (r, s);
if (mode != VOIDmode)
- real_convert (r, mode, r);
-}
+ real_convert (r, mode, r);
+}
/* Initialize R from the integer pair HIGH+LOW. */
@@ -2342,7 +2342,7 @@ dconst_e_ptr (void)
mpfr_exp (m, m, GMP_RNDN);
real_from_mpfr (&value, m, NULL_TREE, GMP_RNDN);
mpfr_clear (m);
-
+
}
return &value;
}
@@ -2497,7 +2497,7 @@ real_maxval (REAL_VALUE_TYPE *r, int sign, enum machine_mode mode)
fmt = REAL_MODE_FORMAT (mode);
gcc_assert (fmt);
memset (r, 0, sizeof (*r));
-
+
if (fmt->b == 10)
decimal_real_maxval (r, sign, mode);
else
@@ -4459,41 +4459,41 @@ const struct real_format vax_g_format =
/* Encode real R into a single precision DFP value in BUF. */
static void
encode_decimal_single (const struct real_format *fmt ATTRIBUTE_UNUSED,
- long *buf ATTRIBUTE_UNUSED,
+ long *buf ATTRIBUTE_UNUSED,
const REAL_VALUE_TYPE *r ATTRIBUTE_UNUSED)
{
encode_decimal32 (fmt, buf, r);
}
/* Decode a single precision DFP value in BUF into a real R. */
-static void
+static void
decode_decimal_single (const struct real_format *fmt ATTRIBUTE_UNUSED,
- REAL_VALUE_TYPE *r ATTRIBUTE_UNUSED,
+ REAL_VALUE_TYPE *r ATTRIBUTE_UNUSED,
const long *buf ATTRIBUTE_UNUSED)
{
decode_decimal32 (fmt, r, buf);
}
/* Encode real R into a double precision DFP value in BUF. */
-static void
+static void
encode_decimal_double (const struct real_format *fmt ATTRIBUTE_UNUSED,
- long *buf ATTRIBUTE_UNUSED,
+ long *buf ATTRIBUTE_UNUSED,
const REAL_VALUE_TYPE *r ATTRIBUTE_UNUSED)
{
encode_decimal64 (fmt, buf, r);
}
/* Decode a double precision DFP value in BUF into a real R. */
-static void
+static void
decode_decimal_double (const struct real_format *fmt ATTRIBUTE_UNUSED,
- REAL_VALUE_TYPE *r ATTRIBUTE_UNUSED,
+ REAL_VALUE_TYPE *r ATTRIBUTE_UNUSED,
const long *buf ATTRIBUTE_UNUSED)
{
decode_decimal64 (fmt, r, buf);
}
/* Encode real R into a quad precision DFP value in BUF. */
-static void
+static void
encode_decimal_quad (const struct real_format *fmt ATTRIBUTE_UNUSED,
long *buf ATTRIBUTE_UNUSED,
const REAL_VALUE_TYPE *r ATTRIBUTE_UNUSED)
@@ -4502,7 +4502,7 @@ encode_decimal_quad (const struct real_format *fmt ATTRIBUTE_UNUSED,
}
/* Decode a quad precision DFP value in BUF into a real R. */
-static void
+static void
decode_decimal_quad (const struct real_format *fmt ATTRIBUTE_UNUSED,
REAL_VALUE_TYPE *r ATTRIBUTE_UNUSED,
const long *buf ATTRIBUTE_UNUSED)
@@ -4515,7 +4515,7 @@ const struct real_format decimal_single_format =
{
encode_decimal_single,
decode_decimal_single,
- 10,
+ 10,
7,
7,
-94,
@@ -4527,7 +4527,7 @@ const struct real_format decimal_single_format =
true,
true,
true,
- true,
+ true,
true,
false
};
@@ -4570,8 +4570,8 @@ const struct real_format decimal_quad_format =
true,
true,
true,
- true,
- true,
+ true,
+ true,
true,
false
};
@@ -4998,13 +4998,13 @@ mpfr_from_real (mpfr_ptr m, const REAL_VALUE_TYPE *r, mp_rnd_t rndmode)
mpfr_set_inf (m, r->sign == 1 ? -1 : 1);
return;
}
-
+
if (r->cl == rvc_nan)
{
mpfr_set_nan (m);
return;
}
-
+
real_to_hexadecimal (buf, r, sizeof (buf), 0, 1);
/* mpfr_set_str() parses hexadecimal floats from strings in the same
format that GCC will output them. Nothing extra is needed. */
@@ -5054,7 +5054,7 @@ real_from_mpfr (REAL_VALUE_TYPE *r, mpfr_srcptr m, tree type, mp_rnd_t rndmode)
sprintf (buf, "0x.%sp%d", rstr, (int) exp);
mpfr_free_str (rstr);
-
+
real_from_string (r, buf);
}
diff --git a/gcc/recog.c b/gcc/recog.c
index 4ad3be9dfaf..5a6490edf97 100644
--- a/gcc/recog.c
+++ b/gcc/recog.c
@@ -286,7 +286,7 @@ canonicalize_change_group (rtx insn, rtx x)
else
return false;
}
-
+
/* This subroutine of apply_change_group verifies whether the changes to INSN
were valid; i.e. whether INSN can still be recognized. */
@@ -529,7 +529,7 @@ cancel_changes (int num)
rtx. */
static void
-simplify_while_replacing (rtx *loc, rtx to, rtx object,
+simplify_while_replacing (rtx *loc, rtx to, rtx object,
enum machine_mode op0_mode)
{
rtx x = *loc;
@@ -660,7 +660,7 @@ simplify_while_replacing (rtx *loc, rtx to, rtx object,
validate_change passing OBJECT. */
static void
-validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx object,
+validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx object,
bool simplify)
{
int i, j;
@@ -713,7 +713,7 @@ validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx object,
from, to, object, simplify);
}
else
- validate_replace_rtx_1 (&XVECEXP (x, 0, j), from, to, object,
+ validate_replace_rtx_1 (&XVECEXP (x, 0, j), from, to, object,
simplify);
}
}
@@ -724,7 +724,7 @@ validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx object,
validate_replace_rtx_1 (&XEXP (x, i), from, to, object, simplify);
else if (fmt[i] == 'E')
for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- validate_replace_rtx_1 (&XVECEXP (x, i, j), from, to, object,
+ validate_replace_rtx_1 (&XVECEXP (x, i, j), from, to, object,
simplify);
}
@@ -765,9 +765,9 @@ validate_replace_rtx (rtx from, rtx to, rtx insn)
}
/* Try replacing every occurrence of FROM in WHERE with TO. Assume that WHERE
- is a part of INSN. After all changes have been made, validate by seeing if
- INSN is still valid.
- validate_replace_rtx (from, to, insn) is equivalent to
+ is a part of INSN. After all changes have been made, validate by seeing if
+ INSN is still valid.
+ validate_replace_rtx (from, to, insn) is equivalent to
validate_replace_rtx_part (from, to, &PATTERN (insn), insn). */
int
@@ -778,8 +778,8 @@ validate_replace_rtx_part (rtx from, rtx to, rtx *where, rtx insn)
}
/* Same as above, but do not simplify rtx afterwards. */
-int
-validate_replace_rtx_part_nosimplify (rtx from, rtx to, rtx *where,
+int
+validate_replace_rtx_part_nosimplify (rtx from, rtx to, rtx *where,
rtx insn)
{
validate_replace_rtx_1 (where, from, to, insn, false);
@@ -1435,7 +1435,7 @@ asm_noperands (const_rtx body)
if (GET_CODE (XVECEXP (body, 0, 0)) == SET)
{
/* Multiple output operands, or 1 output plus some clobbers:
- body is
+ body is
[(set OUTPUT (asm_operands ...))... (clobber (reg ...))...]. */
/* Count backwards through CLOBBERs to determine number of SETs. */
for (i = XVECLEN (body, 0); i > 0; i--)
@@ -3674,7 +3674,7 @@ gate_do_final_split (void)
return 1;
#else
return 0;
-#endif
+#endif
}
struct rtl_opt_pass pass_split_for_shorten_branches =
diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c
index f3f71e9c534..a17f4f80e0d 100644
--- a/gcc/reg-stack.c
+++ b/gcc/reg-stack.c
@@ -1369,7 +1369,7 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat)
/* Uninitialized USE might happen for functions returning uninitialized
value. We will properly initialize the USE on the edge to EXIT_BLOCK,
so it is safe to ignore the use here. This is consistent with behavior
- of dataflow analyzer that ignores USE too. (This also imply that
+ of dataflow analyzer that ignores USE too. (This also imply that
forcibly initializing the register to NaN here would lead to ICE later,
since the REG_DEAD notes are not issued.) */
break;
@@ -3023,7 +3023,7 @@ convert_regs_1 (basic_block block)
control_flow_insn_deleted |= subst_stack_regs (insn, &regstack);
}
}
-
+
/* Amongst the insns possibly deleted during the substitution process above,
might have been the only trapping insn in the block. We purge the now
possibly dead EH edges here to avoid an ICE from fixup_abnormal_edges,
@@ -3047,7 +3047,7 @@ convert_regs_1 (basic_block block)
/* Something failed if the stack lives don't match. If we had malformed
asms, we zapped the instruction itself, but that didn't produce the
same pattern of register kills as before. */
-
+
gcc_assert (hard_reg_set_equal_p (regstack.reg_set, bi->out_reg_set)
|| any_malformed_asm);
bi->stack_out = regstack;
@@ -3063,7 +3063,7 @@ convert_regs_2 (basic_block block)
/* We process the blocks in a top-down manner, in a way such that one block
is only processed after all its predecessors. The number of predecessors
- of every block has already been computed. */
+ of every block has already been computed. */
stack = XNEWVEC (basic_block, n_basic_blocks);
sp = stack;
diff --git a/gcc/regcprop.c b/gcc/regcprop.c
index ac8350d1f4e..199ff295d02 100644
--- a/gcc/regcprop.c
+++ b/gcc/regcprop.c
@@ -889,7 +889,7 @@ copyprop_hardreg_forward (void)
processed, begin with the value data that was live at
the end of the predecessor block. */
/* ??? Ought to use more intelligent queuing of blocks. */
- if (single_pred_p (bb)
+ if (single_pred_p (bb)
&& TEST_BIT (visited, single_pred (bb)->index)
&& ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
all_vd[bb->index] = all_vd[single_pred (bb)->index];
@@ -899,7 +899,7 @@ copyprop_hardreg_forward (void)
copyprop_hardreg_forward_1 (bb, all_vd + bb->index);
}
- sbitmap_free (visited);
+ sbitmap_free (visited);
free (all_vd);
return 0;
}
diff --git a/gcc/reginfo.c b/gcc/reginfo.c
index 96e9bd9437a..ee6c7ee6447 100644
--- a/gcc/reginfo.c
+++ b/gcc/reginfo.c
@@ -996,7 +996,7 @@ free_reg_info (void)
}
/* Initialize some global data for this pass. */
-static unsigned int
+static unsigned int
reginfo_init (void)
{
if (df)
@@ -1298,7 +1298,7 @@ record_subregs_of_mode (rtx subreg)
}
/* Call record_subregs_of_mode for all the subregs in X. */
-static void
+static void
find_subregs_of_mode (rtx x)
{
enum rtx_code code = GET_CODE (x);
@@ -1307,7 +1307,7 @@ find_subregs_of_mode (rtx x)
if (code == SUBREG)
record_subregs_of_mode (x);
-
+
/* Time for some deep diving. */
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
diff --git a/gcc/regmove.c b/gcc/regmove.c
index 581af4c7edc..18afc5b6dcd 100644
--- a/gcc/regmove.c
+++ b/gcc/regmove.c
@@ -306,7 +306,7 @@ optimize_reg_copy_1 (rtx insn, rtx dest, rtx src)
if (sregno < FIRST_PSEUDO_REGISTER
&& reg_mentioned_p (dest, PATTERN (q)))
failed = 1;
-
+
/* Attempt to replace all uses. */
else if (!validate_replace_rtx (src, dest, q))
failed = 1;
@@ -527,7 +527,7 @@ optimize_reg_copy_3 (rtx insn, rtx dest, rtx src)
for (p = PREV_INSN (insn); p && ! reg_set_p (src_reg, p); p = PREV_INSN (p))
if (INSN_P (p) && BLOCK_FOR_INSN (p) != bb)
break;
-
+
if (! p || BLOCK_FOR_INSN (p) != bb)
return;
@@ -933,7 +933,7 @@ regmove_backward_pass (void)
FOR_EACH_BB_REVERSE (bb)
{
/* ??? Use the safe iterator because fixup_match_2 can remove
- insns via try_auto_increment. */
+ insns via try_auto_increment. */
FOR_BB_INSNS_REVERSE_SAFE (bb, insn, prev)
{
struct match match;
diff --git a/gcc/regrename.c b/gcc/regrename.c
index b9d68e7bbee..2b22aa68a9e 100644
--- a/gcc/regrename.c
+++ b/gcc/regrename.c
@@ -410,13 +410,13 @@ do_replace (struct du_head *head, int reg)
for (note = REG_NOTES (chain->insn); note; note = XEXP (note, 1))
{
- if (REG_NOTE_KIND (note) == REG_DEAD
+ if (REG_NOTE_KIND (note) == REG_DEAD
|| REG_NOTE_KIND (note) == REG_UNUSED)
{
rtx reg = XEXP (note, 0);
gcc_assert (HARD_REGISTER_P (reg));
-
- if (REGNO (reg) == base_regno)
+
+ if (REGNO (reg) == base_regno)
XEXP (note, 0) = *chain->loc;
}
}
diff --git a/gcc/regs.h b/gcc/regs.h
index 00be997695f..39c4fa32c68 100644
--- a/gcc/regs.h
+++ b/gcc/regs.h
@@ -54,7 +54,7 @@ extern int max_regno;
and is not using incremental df scanning.
At the end of a pass that uses REG_N_REFS and REG_N_SETS, a call
- should be made to regstat_free_n_sets_and_refs.
+ should be made to regstat_free_n_sets_and_refs.
Local alloc seems to play pretty loose with these values.
REG_N_REFS is set to 0 if the register is used in an asm.
diff --git a/gcc/regstat.c b/gcc/regstat.c
index 70ddfa4d84f..cb4471b235b 100644
--- a/gcc/regstat.c
+++ b/gcc/regstat.c
@@ -39,7 +39,7 @@ along with GCC; see the file COPYING3. If not see
struct regstat_n_sets_and_refs_t *regstat_n_sets_and_refs;
/*----------------------------------------------------------------------------
- REG_N_SETS and REG_N_REFS.
+ REG_N_SETS and REG_N_REFS.
----------------------------------------------------------------------------*/
/* If a pass need to change these values in some magical way or or the
@@ -117,7 +117,7 @@ size_t reg_info_p_size;
here. */
static void
-regstat_bb_compute_ri (unsigned int bb_index,
+regstat_bb_compute_ri (unsigned int bb_index,
bitmap live, bitmap do_not_gen, bitmap artificial_uses,
bitmap local_live, bitmap local_processed)
{
@@ -156,7 +156,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
bitmap_set_bit (artificial_uses, regno);
}
}
-
+
FOR_BB_INSNS_REVERSE (bb, insn)
{
unsigned int uid = INSN_UID (insn);
@@ -164,7 +164,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
bitmap_iterator bi;
struct df_mw_hardreg **mws_rec;
rtx link;
-
+
if (!NONDEBUG_INSN_P (insn))
continue;
@@ -176,7 +176,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
REG_LIVE_LENGTH (regno)++;
}
luid++;
-
+
bitmap_clear (do_not_gen);
link = REG_NOTES (insn);
@@ -190,7 +190,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
/* Process the defs. */
if (CALL_P (insn))
{
- bool can_throw = can_throw_internal (insn);
+ bool can_throw = can_throw_internal (insn);
bool set_jump = (find_reg_note (insn, REG_SETJMP, NULL) != NULL);
EXECUTE_IF_SET_IN_BITMAP (live, 0, regno, bi)
{
@@ -198,7 +198,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
REG_FREQ_CALLS_CROSSED (regno) += REG_FREQ_FROM_BB (bb);
if (can_throw)
REG_N_THROWING_CALLS_CROSSED (regno)++;
-
+
/* We have a problem with any pseudoreg that lives
across the setjmp. ANSI says that if a user variable
does not change in value between the setjmp and the
@@ -214,17 +214,17 @@ regstat_bb_compute_ri (unsigned int bb_index,
bitmap_set_bit (setjmp_crosses, regno);
}
}
-
+
/* We only care about real sets for calls. Clobbers only
may clobbers cannot be depended on. */
for (mws_rec = DF_INSN_UID_MWS (uid); *mws_rec; mws_rec++)
{
- struct df_mw_hardreg *mws = *mws_rec;
- if (DF_MWS_REG_DEF_P (mws))
+ struct df_mw_hardreg *mws = *mws_rec;
+ if (DF_MWS_REG_DEF_P (mws))
{
bool all_dead = true;
unsigned int r;
-
+
for (r=mws->start_regno; r <= mws->end_regno; r++)
if ((bitmap_bit_p (live, r))
|| bitmap_bit_p (artificial_uses, r))
@@ -232,7 +232,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
all_dead = false;
break;
}
-
+
if (all_dead)
{
unsigned int regno = mws->start_regno;
@@ -242,7 +242,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
}
}
}
-
+
/* All of the defs except the return value are some sort of
clobber. This code is for the return. */
for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
@@ -252,7 +252,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
|| (!(DF_REF_FLAGS (def) & (DF_REF_MUST_CLOBBER | DF_REF_MAY_CLOBBER))))
{
unsigned int dregno = DF_REF_REGNO (def);
-
+
if (bitmap_bit_p (live, dregno))
{
/* If we have seen this regno, then it has already been
@@ -275,7 +275,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
{
REG_LIVE_LENGTH (dregno)++;
}
-
+
if (dregno >= FIRST_PSEUDO_REGISTER)
{
REG_FREQ (dregno) += REG_FREQ_FROM_BB (bb);
@@ -284,16 +284,16 @@ regstat_bb_compute_ri (unsigned int bb_index,
else if (REG_BASIC_BLOCK (dregno) != bb->index)
REG_BASIC_BLOCK (dregno) = REG_BLOCK_GLOBAL;
}
-
+
if (!(DF_REF_FLAGS (def) & (DF_REF_MUST_CLOBBER + DF_REF_MAY_CLOBBER)))
bitmap_set_bit (do_not_gen, dregno);
-
+
/* Kill this register if it is not a subreg store or conditional store. */
if (!(DF_REF_FLAGS (def) & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
bitmap_clear_bit (live, dregno);
}
}
-
+
for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
{
df_ref use = *use_rec;
@@ -307,7 +307,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
else if (REG_BASIC_BLOCK (uregno) != bb->index)
REG_BASIC_BLOCK (uregno) = REG_BLOCK_GLOBAL;
}
-
+
if (!bitmap_bit_p (live, uregno))
{
/* This register is now live. */
@@ -326,7 +326,7 @@ regstat_bb_compute_ri (unsigned int bb_index,
}
}
}
-
+
/* Add the length of the block to all of the registers that were not
referenced, but still live in this block. */
bitmap_and_compl_into (live, local_processed);
@@ -376,8 +376,8 @@ regstat_compute_ri (void)
{
REG_BASIC_BLOCK (regno) = REG_BLOCK_UNKNOWN;
REG_LIVE_LENGTH (regno) = -1;
- }
-
+ }
+
BITMAP_FREE (local_live);
BITMAP_FREE (local_processed);
timevar_pop (TV_REG_STATS);
@@ -391,14 +391,14 @@ regstat_free_ri (void)
{
gcc_assert (reg_info_p);
reg_info_p_size = 0;
- free (reg_info_p);
+ free (reg_info_p);
reg_info_p = NULL;
BITMAP_FREE (setjmp_crosses);
}
-/* Return a bitmap containing the set of registers that cross a setjmp.
+/* Return a bitmap containing the set of registers that cross a setjmp.
The client should not change or delete this bitmap. */
bitmap
@@ -408,7 +408,7 @@ regstat_get_setjmp_crosses (void)
}
/*----------------------------------------------------------------------------
- Process REG_N_CALLS_CROSSED.
+ Process REG_N_CALLS_CROSSED.
This is used by sched_deps. A good implementation of sched-deps
would really process the blocks directly rather than going through
@@ -447,12 +447,12 @@ regstat_bb_compute_calls_crossed (unsigned int bb_index, bitmap live)
if ((DF_REF_FLAGS (use) & DF_REF_AT_TOP) == 0)
bitmap_set_bit (live, DF_REF_REGNO (use));
}
-
+
FOR_BB_INSNS_REVERSE (bb, insn)
{
unsigned int uid = INSN_UID (insn);
unsigned int regno;
-
+
if (!INSN_P (insn))
continue;
@@ -466,7 +466,7 @@ regstat_bb_compute_calls_crossed (unsigned int bb_index, bitmap live)
REG_FREQ_CALLS_CROSSED (regno) += REG_FREQ_FROM_BB (bb);
}
}
-
+
/* All of the defs except the return value are some sort of
clobber. This code is for the return. */
for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
@@ -480,7 +480,7 @@ regstat_bb_compute_calls_crossed (unsigned int bb_index, bitmap live)
bitmap_clear_bit (live, DF_REF_REGNO (def));
}
}
-
+
for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
{
df_ref use = *use_rec;
@@ -522,7 +522,7 @@ regstat_free_calls_crossed (void)
{
gcc_assert (reg_info_p);
reg_info_p_size = 0;
- free (reg_info_p);
+ free (reg_info_p);
reg_info_p = NULL;
}
diff --git a/gcc/reload.c b/gcc/reload.c
index a7791c2869e..97c6bacf25b 100644
--- a/gcc/reload.c
+++ b/gcc/reload.c
@@ -1286,7 +1286,7 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc,
Returning zero here ought to be safe as we take care in
find_reloads to not process the reloads when instruction was
replaced by USE. */
-
+
return 0;
}
}
@@ -2349,7 +2349,7 @@ decompose (rtx x)
{
rtx base = NULL_RTX, offset = 0;
rtx addr = XEXP (x, 0);
-
+
if (GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
|| GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
{
@@ -2359,7 +2359,7 @@ decompose (rtx x)
val.safe = REGNO (val.base) == STACK_POINTER_REGNUM;
return val;
}
-
+
if (GET_CODE (addr) == PRE_MODIFY || GET_CODE (addr) == POST_MODIFY)
{
if (GET_CODE (XEXP (addr, 1)) == PLUS
@@ -2373,7 +2373,7 @@ decompose (rtx x)
return val;
}
}
-
+
if (GET_CODE (addr) == CONST)
{
addr = XEXP (addr, 0);
@@ -2392,7 +2392,7 @@ decompose (rtx x)
offset = XEXP (addr, 1);
}
}
-
+
if (offset == 0)
{
base = addr;
@@ -2423,18 +2423,18 @@ decompose (rtx x)
base = gen_rtx_PLUS (GET_MODE (base), base, offset);
offset = const0_rtx;
}
-
+
if (all_const && GET_CODE (base) == PLUS)
base = gen_rtx_CONST (GET_MODE (base), base);
-
+
gcc_assert (CONST_INT_P (offset));
-
+
val.start = INTVAL (offset);
val.end = val.start + GET_MODE_SIZE (GET_MODE (x));
val.base = base;
}
break;
-
+
case REG:
val.reg_flag = 1;
val.start = true_regnum (x);
@@ -2565,7 +2565,7 @@ find_reloads (rtx insn, int replace, int ind_levels, int live_known,
a register. */
enum reg_class preferred_class[MAX_RECOG_OPERANDS];
char pref_or_nothing[MAX_RECOG_OPERANDS];
- /* Nonzero for a MEM operand whose entire address needs a reload.
+ /* Nonzero for a MEM operand whose entire address needs a reload.
May be -1 to indicate the entire address may or may not need a reload. */
int address_reloaded[MAX_RECOG_OPERANDS];
/* Nonzero for an address operand that needs to be completely reloaded.
@@ -5156,7 +5156,7 @@ find_reloads_address (enum machine_mode mode, rtx *memrefloc, rtx ad,
continue;
inner_code = GET_CODE (XEXP (ad, 0));
- if (!(GET_CODE (ad) == PLUS
+ if (!(GET_CODE (ad) == PLUS
&& CONST_INT_P (XEXP (ad, 1))
&& (inner_code == PLUS || inner_code == LO_SUM)))
continue;
@@ -5187,17 +5187,17 @@ find_reloads_address (enum machine_mode mode, rtx *memrefloc, rtx ad,
/* Form the adjusted address. */
if (GET_CODE (XEXP (ad, 0)) == PLUS)
- ad = gen_rtx_PLUS (GET_MODE (ad),
- op_index == 0 ? offset_reg : addend,
+ ad = gen_rtx_PLUS (GET_MODE (ad),
+ op_index == 0 ? offset_reg : addend,
op_index == 0 ? addend : offset_reg);
else
- ad = gen_rtx_LO_SUM (GET_MODE (ad),
- op_index == 0 ? offset_reg : addend,
+ ad = gen_rtx_LO_SUM (GET_MODE (ad),
+ op_index == 0 ? offset_reg : addend,
op_index == 0 ? addend : offset_reg);
*loc = ad;
cls = base_reg_class (mode, MEM, GET_CODE (addend));
- find_reloads_address_part (XEXP (ad, op_index),
+ find_reloads_address_part (XEXP (ad, op_index),
&XEXP (ad, op_index), cls,
GET_MODE (ad), opnum, type, ind_levels);
find_reloads_address_1 (mode,
@@ -5500,7 +5500,7 @@ find_reloads_address_1 (enum machine_mode mode, rtx x, int context,
#define REG_OK_FOR_CONTEXT(CONTEXT, REGNO, MODE, OUTER, INDEX) \
((CONTEXT) == 0 \
? regno_ok_for_base_p (REGNO, MODE, OUTER, INDEX) \
- : REGNO_OK_FOR_INDEX_P (REGNO))
+ : REGNO_OK_FOR_INDEX_P (REGNO))
enum reg_class context_reg_class;
RTX_CODE code = GET_CODE (x);
@@ -6007,7 +6007,7 @@ find_reloads_address_1 (enum machine_mode mode, rtx x, int context,
if ((unsigned) CLASS_MAX_NREGS (rclass, GET_MODE (SUBREG_REG (x)))
> reg_class_size[rclass])
{
- x = find_reloads_subreg_address (x, 0, opnum,
+ x = find_reloads_subreg_address (x, 0, opnum,
ADDR_TYPE (type),
ind_levels, insn);
push_reload (x, NULL_RTX, loc, (rtx*) 0, rclass,
@@ -6644,7 +6644,7 @@ reg_overlap_mentioned_for_reload_p (rtx x, rtx in)
else if (GET_CODE (x) == SCRATCH || GET_CODE (x) == PC
|| GET_CODE (x) == CC0)
return reg_mentioned_p (x, in);
- else
+ else
{
gcc_assert (GET_CODE (x) == PLUS);
@@ -7218,7 +7218,7 @@ find_inc_amount (rtx x, rtx inced)
REG_INC note in insn INSN. REGNO must refer to a hard register. */
#ifdef AUTO_INC_DEC
-static int
+static int
reg_inc_found_and_valid_p (unsigned int regno, unsigned int endregno,
rtx insn)
{
@@ -7228,13 +7228,13 @@ reg_inc_found_and_valid_p (unsigned int regno, unsigned int endregno,
if (! INSN_P (insn))
return 0;
-
+
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_INC)
{
unsigned int test = (int) REGNO (XEXP (link, 0));
if (test >= regno && test < endregno)
- return 1;
+ return 1;
}
return 0;
}
@@ -7242,7 +7242,7 @@ reg_inc_found_and_valid_p (unsigned int regno, unsigned int endregno,
#define reg_inc_found_and_valid_p(regno,endregno,insn) 0
-#endif
+#endif
/* Return 1 if register REGNO is the subject of a clobber in insn INSN.
If SETS is 1, also consider SETs. If SETS is 2, enable checking
@@ -7270,8 +7270,8 @@ regno_clobbered_p (unsigned int regno, rtx insn, enum machine_mode mode,
}
if (sets == 2 && reg_inc_found_and_valid_p (regno, endregno, insn))
- return 1;
-
+ return 1;
+
if (GET_CODE (PATTERN (insn)) == PARALLEL)
{
int i = XVECLEN (PATTERN (insn), 0) - 1;
@@ -7290,7 +7290,7 @@ regno_clobbered_p (unsigned int regno, rtx insn, enum machine_mode mode,
}
if (sets == 2
&& reg_inc_found_and_valid_p (regno, endregno, elt))
- return 1;
+ return 1;
}
}
diff --git a/gcc/reload1.c b/gcc/reload1.c
index ce049220608..2555dc8b33c 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -638,7 +638,7 @@ has_nonexceptional_receiver (void)
/* If we're not optimizing, then just err on the safe side. */
if (!optimize)
return true;
-
+
/* First determine which blocks can reach exit via normal paths. */
tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
@@ -648,7 +648,7 @@ has_nonexceptional_receiver (void)
/* Place the exit block on our worklist. */
EXIT_BLOCK_PTR->flags |= BB_REACHABLE;
*tos++ = EXIT_BLOCK_PTR;
-
+
/* Iterate: find everything reachable from what we've already seen. */
while (tos != worklist)
{
@@ -900,7 +900,7 @@ reload (rtx first, int global)
temp_pseudo_reg_arr = XNEWVEC (int, max_regno - LAST_VIRTUAL_REGISTER - 1);
for (n = 0, i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++)
temp_pseudo_reg_arr[n++] = i;
-
+
if (ira_conflicts_p)
/* Ask IRA to order pseudo-registers for better stack slot
sharing. */
@@ -1073,7 +1073,7 @@ reload (rtx first, int global)
something_changed = 1;
/* Even if the frame size remained the same, we might still have
- changed elimination offsets, e.g. if find_reloads called
+ changed elimination offsets, e.g. if find_reloads called
force_const_mem requiring the back end to allocate a constant
pool base register that needs to be saved on the stack. */
else if (!verify_initial_elim_offsets ())
@@ -1184,7 +1184,7 @@ reload (rtx first, int global)
if (! frame_pointer_needed)
FOR_EACH_BB (bb)
bitmap_clear_bit (df_get_live_in (bb), HARD_FRAME_POINTER_REGNUM);
-
+
/* Come here (with failure set nonzero) if we can't get enough spill
regs. */
failed:
@@ -3375,7 +3375,7 @@ eliminate_regs_in_insn (rtx insn, int replace)
/* First see if this insn remains valid when we make the
change. If not, try to replace the whole pattern with
a simple set (this may help if the original insn was a
- PARALLEL that was only recognized as single_set due to
+ PARALLEL that was only recognized as single_set due to
REG_UNUSED notes). If this isn't valid either, keep
the INSN_CODE the same and let reload fix it up. */
if (!validate_change (insn, &SET_SRC (old_set), new_src, 0))
@@ -3750,7 +3750,7 @@ update_eliminables (HARD_REG_SET *pset)
struct elim_table *ep;
for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
- if ((ep->from == HARD_FRAME_POINTER_REGNUM
+ if ((ep->from == HARD_FRAME_POINTER_REGNUM
&& targetm.frame_pointer_required ())
#ifdef ELIMINABLE_REGS
|| ! targetm.can_eliminate (ep->from, ep->to)
@@ -3861,7 +3861,7 @@ init_elim_table (void)
ep->can_eliminate = ep->can_eliminate_previous
= (targetm.can_eliminate (ep->from, ep->to)
&& ! (ep->to == STACK_POINTER_REGNUM
- && frame_pointer_needed
+ && frame_pointer_needed
&& (! SUPPORTS_STACK_ALIGNMENT
|| ! stack_realign_fp)));
}
@@ -3960,7 +3960,7 @@ finish_spills (int global)
in pseudo_previous_regs so we avoid reallocating it to the
same hard reg in a later pass. */
gcc_assert (reg_renumber[i] >= 0);
-
+
SET_HARD_REG_BIT (pseudo_previous_regs[i], reg_renumber[i]);
/* Mark it as no longer having a hard register home. */
reg_renumber[i] = -1;
@@ -4482,7 +4482,7 @@ reload_as_needed (int live_known)
unless X is an output reload reg of the current insn.
X may be a hard reg (the reload reg)
- or it may be a pseudo reg that was reloaded from.
+ or it may be a pseudo reg that was reloaded from.
When DATA is non-NULL just mark the registers in regset
to be forgotten later. */
@@ -5249,11 +5249,11 @@ gen_reload_chain_without_interm_reg_p (int r1, int r2)
reload has completed. */
result = constrain_operands (1);
}
-
+
delete_insns_since (last);
return result;
}
-
+
/* It looks like other cases in gen_reload are not possible for
chain reloads or do need an intermediate hard registers. */
return true;
@@ -8124,7 +8124,7 @@ emit_reload_insns (struct insn_chain *chain)
SET_HARD_REG_BIT (reg_reloaded_valid, src_regno + k);
if (HARD_REGNO_CALL_PART_CLOBBERED (src_regno + k,
mode))
- SET_HARD_REG_BIT (reg_reloaded_call_part_clobbered,
+ SET_HARD_REG_BIT (reg_reloaded_call_part_clobbered,
src_regno + k);
else
CLEAR_HARD_REG_BIT (reg_reloaded_call_part_clobbered,
@@ -8136,7 +8136,7 @@ emit_reload_insns (struct insn_chain *chain)
CLEAR_HARD_REG_BIT (reg_reloaded_died, src_regno);
}
reg_last_reload_reg[out_regno] = src_reg;
- /* We have to set reg_has_output_reload here, or else
+ /* We have to set reg_has_output_reload here, or else
forget_old_reloads_1 will clear reg_last_reload_reg
right away. */
SET_REGNO_REG_SET (&reg_has_output_reload,
@@ -8970,7 +8970,7 @@ fixup_abnormal_edges (void)
}
/* It may be that we don't find any such trapping insn. In this
- case we discovered quite late that the insn that had been
+ case we discovered quite late that the insn that had been
marked as can_throw_internal in fact couldn't trap at all.
So we should in fact delete the EH edges out of the block. */
else
diff --git a/gcc/resource.c b/gcc/resource.c
index 2bb3a1ad1e8..9dc44b9ac29 100644
--- a/gcc/resource.c
+++ b/gcc/resource.c
@@ -957,7 +957,7 @@ mark_target_live_regs (rtx insns, rtx target, struct resources *res)
/* Get starting and ending insn, handling the case where each might
be a SEQUENCE. */
- start_insn = (b == ENTRY_BLOCK_PTR->next_bb->index ?
+ start_insn = (b == ENTRY_BLOCK_PTR->next_bb->index ?
insns : BB_HEAD (BASIC_BLOCK (b)));
stop_insn = target;
diff --git a/gcc/rtl.c b/gcc/rtl.c
index 58867aec7cc..fe9c9514f1c 100644
--- a/gcc/rtl.c
+++ b/gcc/rtl.c
@@ -220,7 +220,7 @@ bool
shared_const_p (const_rtx orig)
{
gcc_assert (GET_CODE (orig) == CONST);
-
+
/* CONST can be shared if it contains a SYMBOL_REF. If it contains
a LABEL_REF, it isn't sharable. */
return (GET_CODE (XEXP (orig, 0)) == PLUS
@@ -350,7 +350,7 @@ int currently_expanding_to_rtl;
-/* Same as rtx_equal_p, but call CB on each pair of rtx if CB is not NULL.
+/* Same as rtx_equal_p, but call CB on each pair of rtx if CB is not NULL.
When the callback returns true, we continue with the new pair.
Whenever changing this function check if rtx_equal_p below doesn't need
changing as well. */
@@ -440,7 +440,7 @@ rtx_equal_p_cb (const_rtx x, const_rtx y, rtx_equal_p_callback_function cb)
/* And the corresponding elements must match. */
for (j = 0; j < XVECLEN (x, i); j++)
- if (rtx_equal_p_cb (XVECEXP (x, i, j),
+ if (rtx_equal_p_cb (XVECEXP (x, i, j),
XVECEXP (y, i, j), cb) == 0)
return 0;
break;
@@ -623,7 +623,7 @@ dump_rtx_statistics (void)
fprintf (stderr, "%-20s %7d %10d\n",
"Total", total_counts, total_sizes);
fprintf (stderr, "---------------------------------------\n");
-#endif
+#endif
}
#if defined ENABLE_RTL_CHECKING && (GCC_VERSION >= 2007)
diff --git a/gcc/rtl.def b/gcc/rtl.def
index acb7ee915b8..54f2a6028dd 100644
--- a/gcc/rtl.def
+++ b/gcc/rtl.def
@@ -42,7 +42,7 @@ along with GCC; see the file COPYING3. If not see
3. The print format, and type of each rtx->u.fld[] (field) in this rtx.
These formats are stored in rtx_format[].
The meaning of the formats is documented in front of this array in rtl.c
-
+
4. The class of the rtx. These are stored in rtx_class and are accessed
via the GET_RTX_CLASS macro. They are defined as follows:
@@ -160,10 +160,10 @@ DEF_RTL_EXPR(NOTE, "note", "iuuB0ni", RTX_EXTRA)
/* ----------------------------------------------------------------------
Top level constituents of INSN, JUMP_INSN and CALL_INSN.
---------------------------------------------------------------------- */
-
+
/* Conditionally execute code.
Operand 0 is the condition that if true, the code is executed.
- Operand 1 is the code to be executed (typically a SET).
+ Operand 1 is the code to be executed (typically a SET).
Semantics are that there are no side effects if the condition
is false. This pattern is created automatically by the if_convert
@@ -204,7 +204,7 @@ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEEi", RTX_EXTRA)
genrecog.c record positions within an insn.)
UNSPEC can occur all by itself in a PATTERN, as a component of a PARALLEL,
- or inside an expression.
+ or inside an expression.
UNSPEC by itself or as a component of a PARALLEL
is currently considered not deletable.
@@ -242,7 +242,7 @@ DEF_RTL_EXPR(ADDR_VEC, "addr_vec", "E", RTX_EXTRA)
The third, fourth and fifth operands are only valid when
CASE_VECTOR_SHORTEN_MODE is defined, and only in an optimizing
compilations. */
-
+
DEF_RTL_EXPR(ADDR_DIFF_VEC, "addr_diff_vec", "eEee0", RTX_EXTRA)
/* Memory prefetch, with attributes supported on some targets.
@@ -270,7 +270,7 @@ DEF_RTL_EXPR(SET, "set", "ee", RTX_EXTRA)
/* Indicate something is used in a way that we don't want to explain.
For example, subroutine calls will use the register
- in which the static chain is passed.
+ in which the static chain is passed.
USE can not appear as an operand of other rtx except for PARALLEL.
USE is not deletable, as it indicates that the operand
@@ -279,7 +279,7 @@ DEF_RTL_EXPR(USE, "use", "e", RTX_EXTRA)
/* Indicate something is clobbered in a way that we don't want to explain.
For example, subroutine calls will clobber some physical registers
- (the ones that are by convention not saved).
+ (the ones that are by convention not saved).
CLOBBER can not appear as an operand of other rtx except for PARALLEL.
CLOBBER of a hard register appearing by itself (not within PARALLEL)
@@ -394,7 +394,7 @@ DEF_RTL_EXPR(MEM, "mem", "e0", RTX_OBJ)
The operand is a CODE_LABEL found in the insn chain. */
DEF_RTL_EXPR(LABEL_REF, "label_ref", "u", RTX_CONST_OBJ)
-/* Reference to a named label:
+/* Reference to a named label:
Operand 0: label name
Operand 1: flags (see SYMBOL_FLAG_* in rtl.h)
Operand 2: tree from which this symbol is derived, or null.
@@ -673,7 +673,7 @@ DEF_RTL_EXPR(VEC_CONCAT, "vec_concat", "ee", RTX_BIN_ARITH)
submodes as the input vector mode, and the number of output parts must be
an integer multiple of the number of input parts. */
DEF_RTL_EXPR(VEC_DUPLICATE, "vec_duplicate", "e", RTX_UNARY)
-
+
/* Addition with signed saturation */
DEF_RTL_EXPR(SS_PLUS, "ss_plus", "ee", RTX_COMM_ARITH)
@@ -770,7 +770,7 @@ DEF_RTL_EXPR(MATCH_OPERATOR, "match_operator", "isE", RTX_MATCH)
to the PARALLEL and the initial expressions in the PARALLEL are matched.
Operand 0 is the operand-number, as in match_operand.
Operand 1 is a predicate to apply to the PARALLEL.
- Operand 2 is a vector of expressions, each of which must match the
+ Operand 2 is a vector of expressions, each of which must match the
corresponding element in the PARALLEL. */
DEF_RTL_EXPR(MATCH_PARALLEL, "match_parallel", "isE", RTX_MATCH)
@@ -858,14 +858,14 @@ DEF_RTL_EXPR(DEFINE_SPLIT, "define_split", "EsES", RTX_EXTRA)
C code to run to decide on a template to use. Otherwise, it is the
template to use.
4: C expression that must be true for split. This may start with "&&"
- in which case the split condition is the logical and of the insn
+ in which case the split condition is the logical and of the insn
condition and what follows the "&&" of this operand.
5: vector of insn patterns to place into a SEQUENCE
6: optionally, some C code to execute before generating the
insns. This might, for example, create some RTX's and store them in
elements of `recog_data.operand' for use by the vector of
insn-patterns.
- (`operands' is an alias here for `recog_data.operand').
+ (`operands' is an alias here for `recog_data.operand').
7: optionally, a vector of attributes for this insn. */
DEF_RTL_EXPR(DEFINE_INSN_AND_SPLIT, "define_insn_and_split", "sEsTsESV", RTX_EXTRA)
@@ -885,7 +885,7 @@ DEF_RTL_EXPR(DEFINE_PEEPHOLE2, "define_peephole2", "EsES", RTX_EXTRA)
insn-patterns.
(`operands' is an alias here for `recog_data.operand'). */
DEF_RTL_EXPR(DEFINE_EXPAND, "define_expand", "sEss", RTX_EXTRA)
-
+
/* Define a requirement for delay slots.
1st operand: Condition involving insn attributes that, if true,
indicates that the insn requires the number of delay slots
@@ -896,7 +896,7 @@ DEF_RTL_EXPR(DEFINE_EXPAND, "define_expand", "sEss", RTX_EXTRA)
The first must be true for an insn to occupy that delay slot
location. The second is true for all insns that can be
annulled if the branch is true and the third is true for all
- insns that can be annulled if the branch is false.
+ insns that can be annulled if the branch is false.
Multiple DEFINE_DELAYs may be present. They indicate differing
requirements for delay slots. */
@@ -964,7 +964,7 @@ DEF_RTL_EXPR(DEFINE_REGISTER_CONSTRAINT, "define_register_constraint", "sss", RT
(reg X) where X is a base register. It is suitable for constraints that
describe a subset of all address references.
- When in doubt, use plain DEFINE_CONSTRAINT.
+ When in doubt, use plain DEFINE_CONSTRAINT.
Operand:
0: The name of the constraint (often, but not always, a single letter).
@@ -988,7 +988,7 @@ DEF_RTL_EXPR(DEFINE_REGISTER_CONSTRAINT, "define_register_constraint", "sss", RT
DEF_RTL_EXPR(DEFINE_CONSTRAINT, "define_constraint", "sse", RTX_EXTRA)
DEF_RTL_EXPR(DEFINE_MEMORY_CONSTRAINT, "define_memory_constraint", "sse", RTX_EXTRA)
DEF_RTL_EXPR(DEFINE_ADDRESS_CONSTRAINT, "define_address_constraint", "sse", RTX_EXTRA)
-
+
/* Constructions for CPU pipeline description described by NDFAs. */
@@ -1024,7 +1024,7 @@ DEF_RTL_EXPR(EXCLUSION_SET, "exclusion_set", "ss", RTX_EXTRA)
an asymmetric relation. CPU units or unit patterns in the strings
are separated by commas. Pattern is one unit name or unit names
separated by white-spaces.
-
+
For example, it is useful for description that slot1 is reserved
after slot0 reservation for a VLIW processor. We could describe it
by the following construction
@@ -1049,7 +1049,7 @@ DEF_RTL_EXPR(PRESENCE_SET, "presence_set", "ss", RTX_EXTRA)
state reservation, checking for `final_presence_set' is done on the
result reservation. This construction is useful to describe a
reservation which is actually two subsequent reservations. For
- example, if we use
+ example, if we use
(presence_set "slot1" "slot0")
@@ -1122,7 +1122,7 @@ DEF_RTL_EXPR(DEFINE_AUTOMATON, "define_automaton", "s", RTX_EXTRA)
o "time" which means printing additional time statistics about
generation of automata.
-
+
o "v" which means generation of file describing the result
automata. The file has suffix `.dfa' and can be used for the
description verification and debugging.
@@ -1160,7 +1160,7 @@ DEF_RTL_EXPR(DEFINE_RESERVATION, "define_reservation", "ss", RTX_EXTRA)
allof = allof "+" repeat
| repeat
-
+
repeat = element "*" number
| element
diff --git a/gcc/rtl.h b/gcc/rtl.h
index a44c59c7afd..960237cec53 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -251,7 +251,7 @@ struct GTY((chain_next ("RTX_NEXT (&%h)"),
In a CODE_LABEL, part of the two-bit alternate entry field. */
unsigned int jump : 1;
/* In a CODE_LABEL, part of the two-bit alternate entry field.
- 1 in a MEM if it cannot trap.
+ 1 in a MEM if it cannot trap.
1 in a CALL_INSN logically equivalent to
ECF_LOOPING_CONST_OR_PURE and DECL_LOOPING_CONST_OR_PURE_P. */
unsigned int call : 1;
@@ -259,7 +259,7 @@ struct GTY((chain_next ("RTX_NEXT (&%h)"),
1 in a SUBREG used for SUBREG_PROMOTED_UNSIGNED_P.
1 in a SYMBOL_REF if it addresses something in the per-function
constants pool.
- 1 in a CALL_INSN logically equivalent to ECF_CONST and TREE_READONLY.
+ 1 in a CALL_INSN logically equivalent to ECF_CONST and TREE_READONLY.
1 in a NOTE, or EXPR_LIST for a const call.
1 in a JUMP_INSN, CALL_INSN, or INSN of an annulling branch. */
unsigned int unchanging : 1;
@@ -305,8 +305,8 @@ struct GTY((chain_next ("RTX_NEXT (&%h)"),
unsigned frame_related : 1;
/* 1 in a REG or PARALLEL that is the current function's return value.
1 in a MEM if it refers to a scalar.
- 1 in a SYMBOL_REF for a weak symbol.
- 1 in a CALL_INSN logically equivalent to ECF_PURE and DECL_PURE_P. */
+ 1 in a SYMBOL_REF for a weak symbol.
+ 1 in a CALL_INSN logically equivalent to ECF_PURE and DECL_PURE_P. */
unsigned return_val : 1;
/* The first element of the operands of this rtx.
@@ -946,7 +946,7 @@ enum var_init_status
/* Codes that appear in the NOTE_KIND field for kinds of notes
that are not line numbers. These codes are all negative.
-
+
Notice that we do not try to use zero here for any of
the special note codes because sometimes the source line
actually can be zero! This happens (for example) when we
@@ -2375,7 +2375,7 @@ extern rtx emit_library_call_value (rtx, rtx, enum libcall_type,
/* In varasm.c */
extern void init_varasm_once (void);
extern enum tls_model decl_default_tls_model (const_tree);
-
+
/* In rtl.c */
extern void traverse_md_constants (int (*) (void **, void *), void *);
struct md_constant { char *name, *value; };
diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c
index ab88f23a379..0e3b77bc0c9 100644
--- a/gcc/rtlanal.c
+++ b/gcc/rtlanal.c
@@ -611,7 +611,7 @@ count_occurrences (const_rtx x, const_rtx find, int count_dest)
if (XEXP (x, 1))
count += count_occurrences (XEXP (x, 1), find, count_dest);
return count;
-
+
case MEM:
if (MEM_P (find) && rtx_equal_p (x, find))
return 1;
@@ -2778,11 +2778,11 @@ for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
else if (result != 0)
/* Stop the traversal. */
return result;
-
+
if (*x == NULL_RTX)
/* There are no sub-expressions. */
continue;
-
+
i = non_rtx_starting_operands[GET_CODE (*x)];
if (i >= 0)
{
@@ -2807,11 +2807,11 @@ for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
else if (result != 0)
/* Stop the traversal. */
return result;
-
+
if (*x == NULL_RTX)
/* There are no sub-expressions. */
continue;
-
+
i = non_rtx_starting_operands[GET_CODE (*x)];
if (i >= 0)
{
@@ -2911,7 +2911,7 @@ int
commutative_operand_precedence (rtx op)
{
enum rtx_code code = GET_CODE (op);
-
+
/* Constants always come the second operand. Prefer "nice" constants. */
if (code == CONST_INT)
return -8;
@@ -2958,7 +2958,7 @@ commutative_operand_precedence (rtx op)
operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
is canonical, although it will usually be further simplified. */
return 2;
-
+
case RTX_UNARY:
/* Then prefer NEG and NOT. */
if (code == NEG || code == NOT)
@@ -3127,10 +3127,10 @@ subreg_get_info (unsigned int xregno, enum machine_mode xmode,
picking a different register class, or doing it in memory if
necessary.) An example of a value with holes is XCmode on 32-bit
x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
- 3 for each part, but in memory it's two 128-bit parts.
+ 3 for each part, but in memory it's two 128-bit parts.
Padding is assumed to be at the end (not necessarily the 'high part')
of each unit. */
- if ((offset / GET_MODE_SIZE (xmode_unit) + 1
+ if ((offset / GET_MODE_SIZE (xmode_unit) + 1
< GET_MODE_NUNITS (xmode))
&& (offset / GET_MODE_SIZE (xmode_unit)
!= ((offset + GET_MODE_SIZE (ymode) - 1)
@@ -3142,7 +3142,7 @@ subreg_get_info (unsigned int xregno, enum machine_mode xmode,
}
else
nregs_xmode = hard_regno_nregs[xregno][xmode];
-
+
nregs_ymode = hard_regno_nregs[xregno][ymode];
/* Paradoxical subregs are otherwise valid. */
@@ -3528,7 +3528,7 @@ label_is_jump_target_p (const_rtx label, const_rtx jump_insn)
/* Return an estimate of the cost of computing rtx X.
One use is in cse, to decide which expression to keep in the hash table.
Another is in rtl generation, to pick the cheapest way to multiply.
- Other uses like the latter are expected in the future.
+ Other uses like the latter are expected in the future.
SPEED parameter specify whether costs optimized for speed or size should
be returned. */
@@ -3602,7 +3602,7 @@ rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED, bool speed)
}
/* Return cost of address expression X.
- Expect that X is properly formed address reference.
+ Expect that X is properly formed address reference.
SPEED parameter specify whether costs optimized for speed or size should
be returned. */
@@ -3829,8 +3829,8 @@ nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x,
/* If this produces an integer result, we know which bits are set.
Code here used to clear bits outside the mode of X, but that is
now done above. */
- /* Mind that MODE is the mode the caller wants to look at this
- operation in, and not the actual operation mode. We can wind
+ /* Mind that MODE is the mode the caller wants to look at this
+ operation in, and not the actual operation mode. We can wind
up with (subreg:DI (gt:V4HI x y)), and we don't have anything
that describes the results of a vector compare. */
if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
@@ -4678,7 +4678,7 @@ insn_rtx_cost (rtx pat, bool speed)
If WANT_REG is nonzero, we wish the condition to be relative to that
register, if possible. Therefore, do not canonicalize the condition
- further. If ALLOW_CC_MODE is nonzero, allow the condition returned
+ further. If ALLOW_CC_MODE is nonzero, allow the condition returned
to be a compare to a CC mode register.
If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
diff --git a/gcc/sbitmap.c b/gcc/sbitmap.c
index 689cf975576..f429f70eccf 100644
--- a/gcc/sbitmap.c
+++ b/gcc/sbitmap.c
@@ -55,7 +55,7 @@ sbitmap_verify_popcount (const_sbitmap a)
{
unsigned ix;
unsigned int lastword;
-
+
if (!a->popcount)
return;
@@ -91,7 +91,7 @@ sbitmap_alloc (unsigned int n_elms)
sbitmap
sbitmap_alloc_with_popcount (unsigned int n_elms)
{
- sbitmap const bmap = sbitmap_alloc (n_elms);
+ sbitmap const bmap = sbitmap_alloc (n_elms);
bmap->popcount = XNEWVEC (unsigned char, bmap->size);
return bmap;
}
@@ -121,7 +121,7 @@ sbitmap_resize (sbitmap bmap, unsigned int n_elms, int def)
{
if (def)
{
- memset (bmap->elms + bmap->size, -1,
+ memset (bmap->elms + bmap->size, -1,
bytes - SBITMAP_SIZE_BYTES (bmap));
/* Set the new bits if the original last element. */
@@ -138,13 +138,13 @@ sbitmap_resize (sbitmap bmap, unsigned int n_elms, int def)
}
else
{
- memset (bmap->elms + bmap->size, 0,
+ memset (bmap->elms + bmap->size, 0,
bytes - SBITMAP_SIZE_BYTES (bmap));
if (bmap->popcount)
memset (bmap->popcount + bmap->size, 0,
- (size * sizeof (unsigned char))
+ (size * sizeof (unsigned char))
- (bmap->size * sizeof (unsigned char)));
-
+
}
}
else if (n_elms < bmap->n_bits)
@@ -247,7 +247,7 @@ sbitmap_copy (sbitmap dst, const_sbitmap src)
void
sbitmap_copy_n (sbitmap dst, const_sbitmap src, unsigned int n)
{
- memcpy (dst->elms, src->elms, sizeof (SBITMAP_ELT_TYPE) * n);
+ memcpy (dst->elms, src->elms, sizeof (SBITMAP_ELT_TYPE) * n);
if (dst->popcount)
memcpy (dst->popcount, src->popcount, sizeof (unsigned char) * n);
}
@@ -275,7 +275,7 @@ sbitmap_empty_p (const_sbitmap bmap)
/* Return false if any of the N bits are set in MAP starting at
START. */
-bool
+bool
sbitmap_range_empty_p (const_sbitmap bmap, unsigned int start, unsigned int n)
{
unsigned int i = start / SBITMAP_ELT_BITS;
@@ -295,7 +295,7 @@ sbitmap_range_empty_p (const_sbitmap bmap, unsigned int start, unsigned int n)
return (elm == 0);
}
- if (elm)
+ if (elm)
return false;
n -= SBITMAP_ELT_BITS - shift;
@@ -316,7 +316,7 @@ sbitmap_range_empty_p (const_sbitmap bmap, unsigned int start, unsigned int n)
elm = bmap->elms[i];
elm &= ((1 << n) - 1);
return (elm == 0);
- }
+ }
return true;
}
@@ -350,7 +350,7 @@ sbitmap_ones (sbitmap bmap)
bmap->elms[bmap->size - 1]
= (SBITMAP_ELT_TYPE)-1 >> (SBITMAP_ELT_BITS - last_bit);
if (bmap->popcount)
- bmap->popcount[bmap->size - 1]
+ bmap->popcount[bmap->size - 1]
= do_popcount (bmap->elms[bmap->size - 1]);
}
}
@@ -392,7 +392,7 @@ sbitmap_union_of_diff_cg (sbitmap dst, const_sbitmap a, const_sbitmap b, const_s
SBITMAP_ELT_TYPE changed = 0;
gcc_assert (!dst->popcount);
-
+
for (i = 0; i < n; i++)
{
const SBITMAP_ELT_TYPE tmp = *ap++ | (*bp++ & ~*cp++);
@@ -429,7 +429,7 @@ sbitmap_not (sbitmap dst, const_sbitmap src)
const_sbitmap_ptr srcp = src->elms;
unsigned int last_bit;
- gcc_assert (!dst->popcount);
+ gcc_assert (!dst->popcount);
for (i = 0; i < n; i++)
*dstp++ = ~*srcp++;
@@ -531,7 +531,7 @@ sbitmap_a_and_b (sbitmap dst, const_sbitmap a, const_sbitmap b)
if (wordchanged)
*popcountp = do_popcount (tmp);
popcountp++;
- }
+ }
*dstp++ = tmp;
}
#ifdef BITMAP_DEBUGGING
@@ -551,7 +551,7 @@ sbitmap_a_xor_b_cg (sbitmap dst, const_sbitmap a, const_sbitmap b)
const_sbitmap_ptr ap = a->elms;
const_sbitmap_ptr bp = b->elms;
SBITMAP_ELT_TYPE changed = 0;
-
+
gcc_assert (!dst->popcount);
for (i = 0; i < n; i++)
@@ -583,7 +583,7 @@ sbitmap_a_xor_b (sbitmap dst, const_sbitmap a, const_sbitmap b)
if (wordchanged)
*popcountp = do_popcount (tmp);
popcountp++;
- }
+ }
*dstp++ = tmp;
}
#ifdef BITMAP_DEBUGGING
@@ -635,7 +635,7 @@ sbitmap_a_or_b (sbitmap dst, const_sbitmap a, const_sbitmap b)
if (wordchanged)
*popcountp = do_popcount (tmp);
popcountp++;
- }
+ }
*dstp++ = tmp;
}
#ifdef BITMAP_DEBUGGING
@@ -756,7 +756,7 @@ sbitmap_intersection_of_succs (sbitmap dst, sbitmap *src, int bb)
e = EDGE_SUCC (b, ix);
if (e->dest == EXIT_BLOCK_PTR)
continue;
-
+
sbitmap_copy (dst, src[e->dest->index]);
break;
}
@@ -1061,7 +1061,7 @@ sbitmap_popcount (const_sbitmap a, unsigned long maxbit)
if (maxbit == 0)
return 0;
-
+
if (maxbit >= a->n_bits)
maxbit = a->n_bits;
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index fdc98fb3c4b..21632c3b5ed 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -879,7 +879,7 @@ ask_dependency_caches (dep_t dep)
&& anti_dependency_cache != NULL);
if (!(current_sched_info->flags & USE_DEPS_LIST))
- {
+ {
enum reg_note present_dep_type;
if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
@@ -897,9 +897,9 @@ ask_dependency_caches (dep_t dep)
return DEP_PRESENT;
}
else
- {
+ {
ds_t present_dep_types = 0;
-
+
if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
present_dep_types |= DEP_TRUE;
if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
@@ -1005,7 +1005,7 @@ update_dependency_caches (dep_t dep, enum reg_note old_type)
break;
default:
- gcc_unreachable ();
+ gcc_unreachable ();
}
}
@@ -1086,7 +1086,7 @@ update_dep (dep_t dep, dep_t new_dep,
dw = estimate_dep_weak (mem1, mem2);
ds = set_dep_weak (ds, BEGIN_DATA, dw);
}
-
+
new_status = ds_merge (dep_status, ds);
}
}
@@ -1123,7 +1123,7 @@ add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
&& DEP_PRO (new_dep) != DEP_CON (new_dep));
-
+
#ifdef ENABLE_CHECKING
check_dep (new_dep, mem1 != NULL);
#endif
@@ -1184,7 +1184,7 @@ add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
}
sd_add_dep (new_dep, resolved_p);
-
+
return DEP_CREATED;
}
@@ -1398,7 +1398,7 @@ add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
}
}
-/* Similar, but free *LISTP at the same time, when the context
+/* Similar, but free *LISTP at the same time, when the context
is not readonly. */
static void
@@ -1422,14 +1422,14 @@ add_dependence_list_and_free (struct deps *deps, rtx insn, rtx *listp,
}
}
-/* Remove all occurences of INSN from LIST. Return the number of
+/* Remove all occurences of INSN from LIST. Return the number of
occurences removed. */
static int
remove_from_dependence_list (rtx insn, rtx* listp)
{
int removed = 0;
-
+
while (*listp)
{
if (XEXP (*listp, 0) == insn)
@@ -1438,19 +1438,19 @@ remove_from_dependence_list (rtx insn, rtx* listp)
removed++;
continue;
}
-
+
listp = &XEXP (*listp, 1);
}
-
+
return removed;
}
/* Same as above, but process two lists at once. */
-static int
+static int
remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
{
int removed = 0;
-
+
while (*listp)
{
if (XEXP (*listp, 0) == insn)
@@ -1460,11 +1460,11 @@ remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
removed++;
continue;
}
-
+
listp = &XEXP (*listp, 1);
exprp = &XEXP (*exprp, 1);
}
-
+
return removed;
}
@@ -1585,7 +1585,7 @@ flush_pending_lists (struct deps *deps, rtx insn, int for_read,
{
if (for_write)
{
- add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
+ add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1, REG_DEP_ANTI);
if (!deps->readonly)
{
@@ -1597,7 +1597,7 @@ flush_pending_lists (struct deps *deps, rtx insn, int for_read,
add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
- add_dependence_list_and_free (deps, insn,
+ add_dependence_list_and_free (deps, insn,
&deps->last_pending_memory_flush, 1,
for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
if (!deps->readonly)
@@ -1660,7 +1660,7 @@ haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
{
dep_def _dep, *dep = &_dep;
-
+
init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
@@ -1787,7 +1787,7 @@ setup_insn_reg_uses (struct deps *deps, rtx insn)
use = create_insn_reg_use (i, insn);
use->next_regno_use = use;
reg_last = &deps->reg_last[i];
-
+
/* Create the cycle list of uses. */
for (list = reg_last->uses; list; list = XEXP (list, 1))
{
@@ -1858,7 +1858,7 @@ mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
{
enum reg_class cl;
int new_incr, last = regno + nregs;
-
+
while (regno < last)
{
gcc_assert (regno < FIRST_PSEUDO_REGISTER);
@@ -1940,7 +1940,7 @@ mark_hard_regno_death (int regno, int nregs)
{
enum reg_class cl;
int last = regno + nregs;
-
+
while (regno < last)
{
gcc_assert (regno < FIRST_PSEUDO_REGISTER);
@@ -2015,11 +2015,11 @@ setup_insn_reg_pressure_info (rtx insn)
reg_pressure_info[cl].unused_set_increase = 0;
reg_pressure_info[cl].change = 0;
}
-
+
note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
-
+
note_stores (PATTERN (insn), mark_insn_reg_store, insn);
-
+
#ifdef AUTO_INC_DEC
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_INC)
@@ -2029,7 +2029,7 @@ setup_insn_reg_pressure_info (rtx insn)
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_DEAD)
mark_reg_death (XEXP (link, 0));
-
+
len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
pressure_info
= INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
@@ -2055,7 +2055,7 @@ setup_insn_reg_pressure_info (rtx insn)
at the most toplevel SET. */
static bool can_start_lhs_rhs_p;
-/* Extend reg info for the deps context DEPS given that
+/* Extend reg info for the deps context DEPS given that
we have just generated a register numbered REGNO. */
static void
extend_deps_reg_info (struct deps *deps, int regno)
@@ -2074,10 +2074,10 @@ extend_deps_reg_info (struct deps *deps, int regno)
if (max_regno > deps->max_reg)
{
- deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
+ deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
max_regno);
memset (&deps->reg_last[deps->max_reg],
- 0, (max_regno - deps->max_reg)
+ 0, (max_regno - deps->max_reg)
* sizeof (struct deps_reg));
deps->max_reg = max_regno;
}
@@ -2830,14 +2830,14 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
-
+
if (!deps->readonly)
{
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
reg_last->uses_length++;
}
}
-
+
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
{
@@ -2846,7 +2846,7 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
add_dependence_list (insn, reg_last->implicit_sets, 0,
REG_DEP_ANTI);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
-
+
if (!deps->readonly)
{
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
@@ -2865,7 +2865,7 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
add_dependence_list (insn, reg_last->implicit_sets, 0,
REG_DEP_ANTI);
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
-
+
if (!deps->readonly)
{
reg_last->clobbers
@@ -2881,7 +2881,7 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
REG_DEP_ANTI);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
-
+
if (!deps->readonly)
{
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
@@ -2906,7 +2906,7 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
REG_DEP_ANTI);
add_dependence_list_and_free
(deps, insn, &reg_last->clobbers, 0, REG_DEP_OUTPUT);
-
+
if (!deps->readonly)
{
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
@@ -2921,7 +2921,7 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
REG_DEP_ANTI);
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
}
-
+
if (!deps->readonly)
{
reg_last->clobbers_length++;
@@ -2932,7 +2932,7 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
-
+
add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
REG_DEP_OUTPUT);
add_dependence_list_and_free (deps, insn,
@@ -2942,7 +2942,7 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
REG_DEP_OUTPUT);
add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
REG_DEP_ANTI);
-
+
if (!deps->readonly)
{
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
@@ -2961,7 +2961,7 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
-
+
if (!deps->readonly)
reg_last->implicit_sets
= alloc_INSN_LIST (insn, reg_last->implicit_sets);
@@ -3042,10 +3042,10 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
}
/* Flush pending lists on jumps, but not on speculative checks. */
- if (JUMP_P (insn) && !(sel_sched_p ()
+ if (JUMP_P (insn) && !(sel_sched_p ()
&& sel_insn_is_speculation_check (insn)))
flush_pending_lists (deps, insn, true, true);
-
+
if (!deps->readonly)
CLEAR_REG_SET (&deps->reg_conditional_sets);
reg_pending_barrier = NOT_A_BARRIER;
@@ -3120,7 +3120,7 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
&& deps->in_post_call_group_p == post_call_initial)
deps->in_post_call_group_p = post_call;
- if (!sel_sched_p () || sched_emulate_haifa_p)
+ if (!sel_sched_p () || sched_emulate_haifa_p)
{
SCHED_GROUP_P (insn) = 1;
CANT_MOVE (insn) = 1;
@@ -3146,7 +3146,7 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
{
sd_iterator_def sd_it;
dep_t dep;
-
+
for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
sd_iterator_cond (&sd_it, &dep);)
change_spec_dep_to_hard (sd_it);
@@ -3230,11 +3230,11 @@ deps_analyze_insn (struct deps *deps, rtx insn)
if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
{
- /* Make each JUMP_INSN (but not a speculative check)
+ /* Make each JUMP_INSN (but not a speculative check)
a scheduling barrier for memory references. */
if (!deps->readonly
- && JUMP_P (insn)
- && !(sel_sched_p ()
+ && JUMP_P (insn)
+ && !(sel_sched_p ()
&& sel_insn_is_speculation_check (insn)))
{
/* Keep the list a reasonable size. */
@@ -3293,7 +3293,7 @@ deps_analyze_insn (struct deps *deps, rtx insn)
/* For each insn which shouldn't cross a call, add a dependence
between that insn and this call insn. */
- add_dependence_list_and_free (deps, insn,
+ add_dependence_list_and_free (deps, insn,
&deps->sched_before_next_call, 1,
REG_DEP_ANTI);
@@ -3349,7 +3349,7 @@ deps_analyze_insn (struct deps *deps, rtx insn)
sched_deps_info->finish_insn ();
/* Fixup the dependencies in the sched group. */
- if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
+ if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
&& SCHED_GROUP_P (insn) && !sel_sched_p ())
fixup_sched_groups (insn);
}
@@ -3461,7 +3461,7 @@ sched_free_deps (rtx head, rtx tail, bool resolved_p)
}
/* Initialize variables for region data dependence analysis.
- When LAZY_REG_LAST is true, do not allocate reg_last array
+ When LAZY_REG_LAST is true, do not allocate reg_last array
of struct deps immediately. */
void
@@ -3494,7 +3494,7 @@ init_deps (struct deps *deps, bool lazy_reg_last)
deps->readonly = 0;
}
-/* Init only reg_last field of DEPS, which was not allocated before as
+/* Init only reg_last field of DEPS, which was not allocated before as
we inited DEPS lazily. */
void
init_deps_reg_last (struct deps *deps)
@@ -3521,7 +3521,7 @@ free_deps (struct deps *deps)
return;
}
deps->max_reg = 0;
-
+
free_INSN_LIST_list (&deps->pending_read_insns);
free_EXPR_LIST_list (&deps->pending_read_mems);
free_INSN_LIST_list (&deps->pending_write_insns);
@@ -3546,7 +3546,7 @@ free_deps (struct deps *deps)
CLEAR_REG_SET (&deps->reg_last_in_use);
CLEAR_REG_SET (&deps->reg_conditional_sets);
- /* As we initialize reg_last lazily, it is possible that we didn't allocate
+ /* As we initialize reg_last lazily, it is possible that we didn't allocate
it at all. */
if (deps->reg_last)
free (deps->reg_last);
@@ -3563,7 +3563,7 @@ remove_from_deps (struct deps *deps, rtx insn)
int removed;
unsigned i;
reg_set_iterator rsi;
-
+
removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
&deps->pending_read_mems);
if (!DEBUG_INSN_P (insn))
@@ -3593,7 +3593,7 @@ remove_from_deps (struct deps *deps, rtx insn)
if (CALL_P (insn))
{
remove_from_dependence_list (insn, &deps->last_function_call);
- remove_from_dependence_list (insn,
+ remove_from_dependence_list (insn,
&deps->last_function_call_may_noreturn);
}
remove_from_dependence_list (insn, &deps->sched_before_next_call);
@@ -3603,9 +3603,9 @@ remove_from_deps (struct deps *deps, rtx insn)
static void
init_deps_data_vector (void)
{
- int reserve = (sched_max_luid + 1
+ int reserve = (sched_max_luid + 1
- VEC_length (haifa_deps_insn_data_def, h_d_i_d));
- if (reserve > 0
+ if (reserve > 0
&& ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3 * sched_max_luid / 2);
@@ -3621,8 +3621,8 @@ sched_deps_init (bool global_p)
int insns_in_block = sched_max_luid / n_basic_blocks + 1;
init_deps_data_vector ();
-
- /* We use another caching mechanism for selective scheduling, so
+
+ /* We use another caching mechanism for selective scheduling, so
we don't use this one. */
if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
{
@@ -3636,7 +3636,7 @@ sched_deps_init (bool global_p)
extend_dependency_caches (sched_max_luid, true);
}
- if (global_p)
+ if (global_p)
{
dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
/* Allocate lists for one block at a time. */
@@ -3694,7 +3694,7 @@ sched_deps_finish (void)
VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
cache_size = 0;
-
+
if (true_dependency_cache)
{
int i;
@@ -3815,7 +3815,7 @@ add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
gcc_assert (insn == cur_insn);
else
cur_insn = insn;
-
+
note_dep (elem, ds);
if (!internal)
cur_insn = NULL;
@@ -4105,14 +4105,14 @@ check_dep (dep_t dep, bool relaxed_p)
gcc_assert (ds & DEP_TRUE);
else if (dt == REG_DEP_OUTPUT)
gcc_assert ((ds & DEP_OUTPUT)
- && !(ds & DEP_TRUE));
- else
+ && !(ds & DEP_TRUE));
+ else
gcc_assert ((dt == REG_DEP_ANTI)
&& (ds & DEP_ANTI)
&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
/* HARD_DEP can not appear in dep_status of a link. */
- gcc_assert (!(ds & HARD_DEP));
+ gcc_assert (!(ds & HARD_DEP));
/* Check that dependence status is set correctly when speculation is not
supported. */
@@ -4154,8 +4154,8 @@ check_dep (dep_t dep, bool relaxed_p)
/* Subsequent speculations should resolve true dependencies. */
gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
}
-
- /* Check that true and anti dependencies can't have other speculative
+
+ /* Check that true and anti dependencies can't have other speculative
statuses. */
if (ds & DEP_TRUE)
gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c
index f005d1dace0..f4875601f16 100644
--- a/gcc/sched-ebb.c
+++ b/gcc/sched-ebb.c
@@ -144,13 +144,13 @@ begin_schedule_ready (rtx insn, rtx last)
instruction scheduled after last control flow instruction.
In this case we can create new basic block. It is
always exactly one basic block last in the sequence. */
-
+
FOR_EACH_EDGE (e, ei, last_bb->succs)
if (e->flags & EDGE_FALLTHRU)
break;
#ifdef ENABLE_CHECKING
- gcc_assert (!e || !(e->flags & EDGE_COMPLEX));
+ gcc_assert (!e || !(e->flags & EDGE_COMPLEX));
gcc_assert (BLOCK_FOR_INSN (insn) == last_bb
&& !IS_SPECULATION_CHECK_P (insn)
@@ -176,7 +176,7 @@ begin_schedule_ready (rtx insn, rtx last)
else
/* Create an empty unreachable block after the INSN. */
bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb);
-
+
/* split_edge () creates BB before E->DEST. Keep in mind, that
this operation extends scheduling region till the end of BB.
Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
@@ -327,7 +327,7 @@ earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
{
rtx insn1 = DEP_PRO (back_dep);
- if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
+ if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
/* Found a DEF-USE dependence (insn1, load_insn). */
{
sd_iterator_def fore_sd_it;
@@ -464,7 +464,7 @@ schedule_ebb (rtx head, rtx tail)
{
basic_block first_bb, target_bb;
struct deps tmp_deps;
-
+
first_bb = BLOCK_FOR_INSN (head);
last_bb = BLOCK_FOR_INSN (tail);
@@ -491,7 +491,7 @@ schedule_ebb (rtx head, rtx tail)
}
else
/* Only recovery blocks can have their dependencies already calculated,
- and they always are single block ebbs. */
+ and they always are single block ebbs. */
gcc_assert (first_bb == last_bb);
/* Set priorities. */
@@ -516,7 +516,7 @@ schedule_ebb (rtx head, rtx tail)
/* We might pack all instructions into fewer blocks,
so we may made some of them empty. Can't assert (b == last_bb). */
-
+
/* Sanity check: verify that all region insns were scheduled. */
gcc_assert (sched_rgn_n_insns == rgn_n_insns);
@@ -643,7 +643,7 @@ ebb_add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
static void
ebb_add_block (basic_block bb, basic_block after)
{
- /* Recovery blocks are always bounded by BARRIERS,
+ /* Recovery blocks are always bounded by BARRIERS,
therefore, they always form single block EBB,
therefore, we can use rec->index to identify such EBBs. */
if (after == EXIT_BLOCK_PTR)
diff --git a/gcc/sched-int.h b/gcc/sched-int.h
index 35a517e91ff..0d3a855690e 100644
--- a/gcc/sched-int.h
+++ b/gcc/sched-int.h
@@ -541,7 +541,7 @@ struct deps
/* Shows the last value of reg_pending_barrier associated with the insn. */
enum reg_pending_barrier_mode last_reg_pending_barrier;
- /* True when this context should be treated as a readonly by
+ /* True when this context should be treated as a readonly by
the analysis. */
BOOL_BITFIELD readonly : 1;
};
@@ -563,7 +563,7 @@ struct haifa_sched_info
int (*can_schedule_ready_p) (rtx);
/* Return nonzero if there are more insns that should be scheduled. */
int (*schedule_more_p) (void);
- /* Called after an insn has all its hard dependencies resolved.
+ /* Called after an insn has all its hard dependencies resolved.
Adjusts status of instruction (which is passed through second parameter)
to indicate if instruction should be moved to the ready list or the
queue, or if it should silently discard it (until next resolved
@@ -602,7 +602,7 @@ struct haifa_sched_info
/* Hooks to support speculative scheduling. */
/* Called to notify frontend that instruction is being added (second
- parameter == 0) or removed (second parameter == 1). */
+ parameter == 0) or removed (second parameter == 1). */
void (*add_remove_insn) (rtx, int);
/* Called to notify frontend that instruction is being scheduled.
@@ -767,7 +767,7 @@ struct _haifa_insn_data
/* INTER_TICK is used to adjust INSN_TICKs of instructions from the
subsequent blocks in a region. */
int inter_tick;
-
+
/* See comment on QUEUE_INDEX macro in haifa-sched.c. */
int queue_index;
@@ -787,7 +787,7 @@ struct _haifa_insn_data
ds_t todo_spec;
/* What speculations were already applied. */
- ds_t done_spec;
+ ds_t done_spec;
/* What speculations are checked by this instruction. */
ds_t check_spec;
@@ -1011,7 +1011,7 @@ enum SPEC_TYPES_OFFSETS {
Therefore, it can appear only in TODO_SPEC field of an instruction. */
#define HARD_DEP (DEP_ANTI << 1)
-/* This represents the results of calling sched-deps.c functions,
+/* This represents the results of calling sched-deps.c functions,
which modify dependencies. */
enum DEPS_ADJUST_RESULT {
/* No dependence needed (e.g. producer == consumer). */
@@ -1024,7 +1024,7 @@ enum DEPS_ADJUST_RESULT {
DEP_CREATED
};
-/* Represents the bits that can be set in the flags field of the
+/* Represents the bits that can be set in the flags field of the
sched_info structure. */
enum SCHED_FLAGS {
/* If set, generate links between instruction as DEPS_LIST.
@@ -1329,7 +1329,7 @@ extern void compute_priorities (void);
extern void increase_insn_priority (rtx, int);
extern void debug_rgn_dependencies (int);
extern void debug_dependencies (rtx, rtx);
-extern void free_rgn_deps (void);
+extern void free_rgn_deps (void);
extern int contributes_to_priority (rtx, rtx);
extern void extend_rgns (int *, int *, sbitmap, int *);
extern void deps_join (struct deps *, struct deps *);
@@ -1486,7 +1486,7 @@ extern void sd_debug_lists (rtx, sd_list_types_def);
#endif /* INSN_SCHEDULING */
-/* Functions in sched-vis.c. These must be outside INSN_SCHEDULING as
+/* Functions in sched-vis.c. These must be outside INSN_SCHEDULING as
sched-vis.c is compiled always. */
extern void print_insn (char *, const_rtx, int);
extern void print_pattern (char *, const_rtx, int);
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index 1c05bfa3b92..68da37bbd7e 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -448,7 +448,7 @@ dump_region_dot (FILE *f, int rgn)
}
/* The same, but first open a file specified by FNAME. */
-void
+void
dump_region_dot_file (const char *fname, int rgn)
{
FILE *f = fopen (fname, "wt");
@@ -497,7 +497,7 @@ find_single_block_region (bool ebbs_p)
if (bb->next_bb == EXIT_BLOCK_PTR
|| LABEL_P (BB_HEAD (bb->next_bb)))
break;
-
+
FOR_EACH_EDGE (e, ei, bb->succs)
if ((e->flags & EDGE_FALLTHRU) != 0)
break;
@@ -783,7 +783,7 @@ haifa_find_rgns (void)
int *queue, *degree1 = NULL;
/* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
there basic blocks, which are forced to be region heads.
- This is done to try to assemble few smaller regions
+ This is done to try to assemble few smaller regions
from a too_large region. */
sbitmap extended_rgn_header = NULL;
bool extend_regions_p;
@@ -795,7 +795,7 @@ haifa_find_rgns (void)
block of each region. */
queue = XNEWVEC (int, n_basic_blocks);
-
+
extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
if (extend_regions_p)
{
@@ -851,11 +851,11 @@ haifa_find_rgns (void)
loop_head = max_hdr[bb->index];
if (extend_regions_p)
- /* We save degree in case when we meet a too_large region
- and cancel it. We need a correct degree later when
+ /* We save degree in case when we meet a too_large region
+ and cancel it. We need a correct degree later when
calling extend_rgns. */
memcpy (degree1, degree, last_basic_block * sizeof (int));
-
+
/* Decrease degree of all I's successors for topological
ordering. */
FOR_EACH_EDGE (e, ei, bb->succs)
@@ -1022,7 +1022,7 @@ haifa_find_rgns (void)
degree = degree1;
degree1 = t;
-
+
/* And force successors of BB to be region heads.
This may provide several smaller regions instead
of one too_large region. */
@@ -1037,10 +1037,10 @@ haifa_find_rgns (void)
if (extend_regions_p)
{
free (degree1);
-
+
sbitmap_a_or_b (header, header, extended_rgn_header);
sbitmap_free (extended_rgn_header);
-
+
extend_rgns (degree, &idx, header, max_hdr);
}
}
@@ -1084,8 +1084,8 @@ find_rgns (void)
static int gather_region_statistics (int **);
static void print_region_statistics (int *, int, int *, int);
-/* Calculate the histogram that shows the number of regions having the
- given number of basic blocks, and store it in the RSP array. Return
+/* Calculate the histogram that shows the number of regions having the
+ given number of basic blocks, and store it in the RSP array. Return
the size of this array. */
static int
gather_region_statistics (int **rsp)
@@ -1100,7 +1100,7 @@ gather_region_statistics (int **rsp)
gcc_assert (nr_blocks >= 1);
if (nr_blocks > a_sz)
- {
+ {
a = XRESIZEVEC (int, a, nr_blocks);
do
a[a_sz++] = 0;
@@ -1114,14 +1114,14 @@ gather_region_statistics (int **rsp)
return a_sz;
}
-/* Print regions statistics. S1 and S2 denote the data before and after
+/* Print regions statistics. S1 and S2 denote the data before and after
calling extend_rgns, respectively. */
static void
print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
{
int i;
-
- /* We iterate until s2_sz because extend_rgns does not decrease
+
+ /* We iterate until s2_sz because extend_rgns does not decrease
the maximal region size. */
for (i = 1; i < s2_sz; i++)
{
@@ -1175,28 +1175,28 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
/* This block already was processed in find_rgns. */
max_hdr[bbn] = -1;
}
-
+
/* The idea is to topologically walk through CFG in top-down order.
During the traversal, if all the predecessors of a node are
marked to be in the same region (they all have the same max_hdr),
- then current node is also marked to be a part of that region.
+ then current node is also marked to be a part of that region.
Otherwise the node starts its own region.
- CFG should be traversed until no further changes are made. On each
- iteration the set of the region heads is extended (the set of those
- blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
+ CFG should be traversed until no further changes are made. On each
+ iteration the set of the region heads is extended (the set of those
+ blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
set of all basic blocks, thus the algorithm is guaranteed to
terminate. */
while (rescan && iter < max_iter)
{
rescan = 0;
-
+
for (i = nblocks - 1; i >= 0; i--)
{
edge e;
edge_iterator ei;
int bbn = order[i];
-
+
if (max_hdr[bbn] != -1 && !TEST_BIT (header, bbn))
{
int hdr = -1;
@@ -1222,16 +1222,16 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
{
hdr = bbn;
break;
- }
+ }
}
else
/* BB starts its own region. */
{
hdr = bbn;
break;
- }
+ }
}
-
+
if (hdr == bbn)
{
/* If BB start its own region,
@@ -1240,7 +1240,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
rescan = 1;
}
else
- gcc_assert (hdr != -1);
+ gcc_assert (hdr != -1);
max_hdr[bbn] = hdr;
}
@@ -1248,17 +1248,17 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
iter++;
}
-
+
/* Statistics were gathered on the SPEC2000 package of tests with
mainline weekly snapshot gcc-4.1-20051015 on ia64.
-
+
Statistics for SPECint:
1 iteration : 1751 cases (38.7%)
2 iterations: 2770 cases (61.3%)
Blocks wrapped in regions by find_rgns without extension: 18295 blocks
Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
(We don't count single block regions here).
-
+
Statistics for SPECfp:
1 iteration : 621 cases (35.9%)
2 iterations: 1110 cases (64.1%)
@@ -1270,11 +1270,11 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
This can be overridden with max-sched-extend-regions-iters parameter:
0 - disable region extension,
N > 0 - do at most N iterations. */
-
+
if (sched_verbose && iter != 0)
fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter,
rescan ? "... failed" : "");
-
+
if (!rescan && iter != 0)
{
int *s1 = NULL, s1_sz = 0;
@@ -1294,7 +1294,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
edge e;
edge_iterator ei;
int num_bbs = 0, j, num_insns = 0, large;
-
+
large = too_large (bbn, &num_bbs, &num_insns);
degree[bbn] = -1;
@@ -1329,7 +1329,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
{
RGN_NR_BLOCKS (nr_regions) = 1;
nr_regions++;
- }
+ }
num_bbs = 1;
@@ -1338,7 +1338,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
int succn = order[j];
if (max_hdr[succn] == bbn)
- /* This cycle iterates over all basic blocks, that
+ /* This cycle iterates over all basic blocks, that
are supposed to be in the region with head BBN,
and wraps them into that region (or in single
block region). */
@@ -1346,7 +1346,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
gcc_assert (degree[succn] == 0);
degree[succn] = -1;
- rgn_bb_table[idx] = succn;
+ rgn_bb_table[idx] = succn;
BLOCK_TO_BB (succn) = large ? 0 : num_bbs++;
CONTAINING_RGN (succn) = nr_regions;
@@ -1361,7 +1361,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
}
idx++;
-
+
FOR_EACH_EDGE (e, ei, BASIC_BLOCK (succn)->succs)
if (e->dest != EXIT_BLOCK_PTR)
degree[e->dest->index]--;
@@ -1380,7 +1380,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
{
int *s2, s2_sz;
- /* Get the new statistics and print the comparison with the
+ /* Get the new statistics and print the comparison with the
one before calling this function. */
s2_sz = gather_region_statistics (&s2);
print_region_statistics (s1, s1_sz, s2, s2_sz);
@@ -1388,11 +1388,11 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
free (s2);
}
}
-
+
free (order);
free (max_hdr);
- *idxp = idx;
+ *idxp = idx;
}
/* Functions for regions scheduling information. */
@@ -1408,7 +1408,7 @@ compute_dom_prob_ps (int bb)
/* We shouldn't have any real ebbs yet. */
gcc_assert (ebb_head [bb] == bb + current_blocks);
-
+
if (IS_RGN_ENTRY (bb))
{
SET_BIT (dom[bb], 0);
@@ -2119,7 +2119,7 @@ init_ready_list (void)
/* Initialize ready list with all 'ready' insns in target block.
Count number of insns in the target block being scheduled. */
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
- {
+ {
try_ready (insn);
target_n_insns++;
@@ -2157,7 +2157,7 @@ can_schedule_ready_p (rtx insn)
if (INSN_BB (insn) != target_bb
&& IS_SPECULATIVE_INSN (insn)
&& !check_live (insn, INSN_BB (insn)))
- return 0;
+ return 0;
else
return 1;
}
@@ -2206,12 +2206,12 @@ new_ready (rtx next, ds_t ts)
int not_ex_free = 0;
/* For speculative insns, before inserting to ready/queue,
- check live, exception-free, and issue-delay. */
+ check live, exception-free, and issue-delay. */
if (!IS_VALID (INSN_BB (next))
|| CANT_MOVE (next)
|| (IS_SPECULATIVE_INSN (next)
&& ((recog_memoized (next) >= 0
- && min_insn_conflict_delay (curr_state, next, next)
+ && min_insn_conflict_delay (curr_state, next, next)
> PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
|| IS_SPECULATION_CHECK_P (next)
|| !check_live (next, INSN_BB (next))
@@ -2242,7 +2242,7 @@ new_ready (rtx next, ds_t ts)
ts = (ts & ~SPECULATIVE) | HARD_DEP;
}
}
-
+
return ts;
}
@@ -2325,7 +2325,7 @@ compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
add_branch_dependences. */
}
-/* This variable holds common_sched_info hooks and data relevant to
+/* This variable holds common_sched_info hooks and data relevant to
the interblock scheduler. */
static struct common_sched_info_def rgn_common_sched_info;
@@ -2872,9 +2872,9 @@ sched_is_disabled_for_current_region_p (void)
return true;
}
-/* Free all region dependencies saved in INSN_BACK_DEPS and
+/* Free all region dependencies saved in INSN_BACK_DEPS and
INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly
- when scheduling, so this function is supposed to be called from
+ when scheduling, so this function is supposed to be called from
the selective scheduling only. */
void
free_rgn_deps (void)
@@ -2884,7 +2884,7 @@ free_rgn_deps (void)
for (bb = 0; bb < current_nr_blocks; bb++)
{
rtx head, tail;
-
+
gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
@@ -2896,7 +2896,7 @@ static int rgn_n_insns;
/* Compute insn priority for a current region. */
void
-compute_priorities (void)
+compute_priorities (void)
{
int bb;
@@ -2904,7 +2904,7 @@ compute_priorities (void)
for (bb = 0; bb < current_nr_blocks; bb++)
{
rtx head, tail;
-
+
gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
@@ -2951,12 +2951,12 @@ schedule_region (int rgn)
{
basic_block first_bb, last_bb;
rtx head, tail;
-
+
first_bb = EBB_FIRST_BB (bb);
last_bb = EBB_LAST_BB (bb);
-
+
get_ebb_head_tail (first_bb, last_bb, &head, &tail);
-
+
if (no_real_insns_p (head, tail))
{
gcc_assert (first_bb == last_bb);
@@ -3127,7 +3127,7 @@ rgn_setup_region (int rgn)
/* Set variables for the current region. */
current_nr_blocks = RGN_NR_BLOCKS (rgn);
current_blocks = RGN_BLOCKS (rgn);
-
+
/* EBB_HEAD is a region-scope structure. But we realloc it for
each region to save time/memory/something else.
See comments in add_block1, for what reasons we allocate +1 element. */
@@ -3157,11 +3157,11 @@ sched_rgn_compute_dependencies (int rgn)
/* Initialize bitmap used in add_branch_dependences. */
insn_referenced = sbitmap_alloc (sched_max_luid);
sbitmap_zero (insn_referenced);
-
+
/* Compute backward dependencies. */
for (bb = 0; bb < current_nr_blocks; bb++)
compute_block_dependences (bb);
-
+
sbitmap_free (insn_referenced);
free_pending_lists ();
finish_deps_global ();
@@ -3185,7 +3185,7 @@ void
sched_rgn_local_init (int rgn)
{
int bb;
-
+
/* Compute interblock info: probabilities, split-edges, dominators, etc. */
if (current_nr_blocks > 1)
{
@@ -3242,7 +3242,7 @@ sched_rgn_local_init (int rgn)
}
/* Free data computed for the finished region. */
-void
+void
sched_rgn_local_free (void)
{
free (prob);
@@ -3372,7 +3372,7 @@ rgn_make_new_region_out_of_new_block (basic_block bb)
BLOCK_TO_BB (bb->index) = 0;
nr_regions++;
-
+
RGN_BLOCKS (nr_regions) = i + 1;
}
@@ -3389,7 +3389,7 @@ rgn_add_block (basic_block bb, basic_block after)
RGN_DONT_CALC_DEPS (nr_regions - 1) = (after == EXIT_BLOCK_PTR);
}
else
- {
+ {
int i, pos;
/* We need to fix rgn_table, block_to_bb, containing_rgn
@@ -3398,7 +3398,7 @@ rgn_add_block (basic_block bb, basic_block after)
BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
/* We extend ebb_head to one more position to
- easily find the last position of the last ebb in
+ easily find the last position of the last ebb in
the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
is _always_ valid for access. */
@@ -3417,24 +3417,24 @@ rgn_add_block (basic_block bb, basic_block after)
/* Source position: ebb_head[i]
Destination position: ebb_head[i] + 1
- Last position:
+ Last position:
RGN_BLOCKS (nr_regions) - 1
Number of elements to copy: (last_position) - (source_position) + 1
*/
-
+
memmove (rgn_bb_table + pos + 1,
rgn_bb_table + pos,
((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
* sizeof (*rgn_bb_table));
rgn_bb_table[pos] = bb->index;
-
+
for (; i <= current_nr_blocks; i++)
ebb_head [i]++;
i = CONTAINING_RGN (after->index);
CONTAINING_RGN (bb->index) = i;
-
+
RGN_HAS_REAL_EBB (i) = 1;
for (++i; i <= nr_regions; i++)
@@ -3451,7 +3451,7 @@ rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
int old_pos, new_pos, i;
BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
-
+
for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
rgn_bb_table[old_pos] != check_bb_nexti;
old_pos--);
@@ -3462,7 +3462,7 @@ rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
new_pos--);
new_pos++;
gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
-
+
gcc_assert (new_pos < old_pos);
memmove (rgn_bb_table + new_pos + 1,
@@ -3518,7 +3518,7 @@ static bool
gate_handle_sched2 (void)
{
#ifdef INSN_SCHEDULING
- return optimize > 0 && flag_schedule_insns_after_reload
+ return optimize > 0 && flag_schedule_insns_after_reload
&& dbg_cnt (sched2_func);
#else
return 0;
diff --git a/gcc/sched-vis.c b/gcc/sched-vis.c
index 906e9c31f2a..5754a56a5fa 100644
--- a/gcc/sched-vis.c
+++ b/gcc/sched-vis.c
@@ -779,7 +779,7 @@ print_rtl_slim_with_bb (FILE *f, rtx first, int flags)
print_rtl_slim (f, first, NULL, -1, flags);
}
-/* Same as above, but stop at LAST or when COUNT == 0.
+/* Same as above, but stop at LAST or when COUNT == 0.
If COUNT < 0 it will stop only at LAST or NULL rtx. */
void
print_rtl_slim (FILE *f, rtx first, rtx last, int count, int flags)
@@ -788,8 +788,8 @@ print_rtl_slim (FILE *f, rtx first, rtx last, int count, int flags)
rtx insn, tail;
tail = last ? NEXT_INSN (last) : NULL_RTX;
- for (insn = first;
- (insn != NULL) && (insn != tail) && (count != 0);
+ for (insn = first;
+ (insn != NULL) && (insn != tail) && (count != 0);
insn = NEXT_INSN (insn))
{
if ((flags & TDF_BLOCKS)
@@ -815,7 +815,7 @@ print_rtl_slim (FILE *f, rtx first, rtx last, int count, int flags)
}
}
-void
+void
debug_bb_slim (struct basic_block_def *bb)
{
print_rtl_slim (stderr, BB_HEAD (bb), BB_END (bb), -1, 32);
diff --git a/gcc/sdbout.c b/gcc/sdbout.c
index 7d5c820e8d0..f4541ea0763 100644
--- a/gcc/sdbout.c
+++ b/gcc/sdbout.c
@@ -1156,7 +1156,7 @@ sdbout_one_type (tree type)
{
tree child_type = BINFO_TYPE (child);
tree child_type_name;
-
+
if (TYPE_NAME (child_type) == 0)
continue;
if (TREE_CODE (TYPE_NAME (child_type)) == IDENTIFIER_NODE)
diff --git a/gcc/sel-sched-dump.c b/gcc/sel-sched-dump.c
index b307f52b841..dbf1c34fccc 100644
--- a/gcc/sel-sched-dump.c
+++ b/gcc/sel-sched-dump.c
@@ -75,7 +75,7 @@ static void
switch_dump (FILE *to)
{
gcc_assert (saved_sched_dump == NULL);
-
+
saved_sched_dump = sched_dump;
sched_dump = to;
}
@@ -89,7 +89,7 @@ restore_dump (void)
}
-/* Functions for dumping instructions, av sets, and exprs. */
+/* Functions for dumping instructions, av sets, and exprs. */
/* Default flags for dumping insns. */
static int dump_insn_rtx_flags = DUMP_INSN_RTX_PATTERN;
@@ -219,7 +219,7 @@ debug_vinsn (vinsn_t vi)
{
switch_dump (stderr);
dump_vinsn_1 (vi, debug_vinsn_flags);
- sel_print ("\n");
+ sel_print ("\n");
restore_dump ();
}
@@ -281,7 +281,7 @@ dump_expr_1 (expr_t expr, int flags)
if (orig_bb != 0)
sel_print ("orig_bb:%d;", orig_bb);
}
-
+
if (EXPR_TARGET_AVAILABLE (expr) < 1)
sel_print ("target:%d;", EXPR_TARGET_AVAILABLE (expr));
sel_print ("]");
@@ -368,10 +368,10 @@ dump_av_set (av_set_t av)
{
av_set_iterator i;
expr_t expr;
-
+
if (!sched_dump_to_dot_p)
sel_print ("{");
-
+
FOR_EACH_EXPR (expr, i, av)
{
dump_expr (expr);
@@ -380,7 +380,7 @@ dump_av_set (av_set_t av)
else
sel_print ("\n");
}
-
+
if (!sched_dump_to_dot_p)
sel_print ("}");
}
@@ -399,7 +399,7 @@ dump_lv_set (regset lv)
unsigned i;
reg_set_iterator rsi;
int count = 0;
-
+
EXECUTE_IF_SET_IN_REG_SET (lv, 0, i, rsi)
{
sel_print (" %d", i);
@@ -408,9 +408,9 @@ dump_lv_set (regset lv)
sel_print (" [%s]", reg_names[i]);
++count;
}
-
+
++count;
-
+
if (sched_dump_to_dot_p && count == 12)
{
count = 0;
@@ -418,7 +418,7 @@ dump_lv_set (regset lv)
}
}
}
-
+
sel_print ("}\n");
}
@@ -440,7 +440,7 @@ dump_blist (blist_t bnds)
for (; bnds; bnds = BLIST_NEXT (bnds))
{
bnd_t bnd = BLIST_BND (bnds);
-
+
sel_print ("[to: %d; ptr: ", INSN_UID (BND_TO (bnd)));
dump_ilist (BND_PTR (bnd));
sel_print ("] ");
@@ -465,7 +465,7 @@ dump_insn_vector (rtx_vec_t succs)
{
int i;
rtx succ;
-
+
for (i = 0; VEC_iterate (rtx, succs, i, succ); i++)
if (succ)
dump_insn (succ);
@@ -501,16 +501,16 @@ sel_print_insn (const_rtx insn, int aligned ATTRIBUTE_UNUSED)
{
static char buf[80];
- /* '+' before insn means it is a new cycle start and it's not been
+ /* '+' before insn means it is a new cycle start and it's not been
scheduled yet. '>' - has been scheduled. */
if (s_i_d && INSN_LUID (insn) > 0)
if (GET_MODE (insn) == TImode)
- sprintf (buf, "%s %4d",
- INSN_SCHED_TIMES (insn) > 0 ? "> " : "< ",
+ sprintf (buf, "%s %4d",
+ INSN_SCHED_TIMES (insn) > 0 ? "> " : "< ",
INSN_UID (insn));
else
- sprintf (buf, "%s %4d",
- INSN_SCHED_TIMES (insn) > 0 ? "! " : " ",
+ sprintf (buf, "%s %4d",
+ INSN_SCHED_TIMES (insn) > 0 ? "! " : " ",
INSN_UID (insn));
else
if (GET_MODE (insn) == TImode)
@@ -538,7 +538,7 @@ replace_str_in_buf (char *buf, const char *str1, const char *str2)
do
{
p = strstr (p, str1);
- if (p)
+ if (p)
{
char *p1 = p + str1_len;
/* Copy the rest of buf and '\0'. */
@@ -556,7 +556,7 @@ replace_str_in_buf (char *buf, const char *str1, const char *str2)
/* Copy str2. */
for (i = 0; i < str2_len; i++)
p[i] = str2[i];
-
+
p += str2_len;
buf_len += diff;
}
@@ -571,7 +571,7 @@ sel_prepare_string_for_dot_label (char *buf)
{
static char specials_from[7][2] = { "<", ">", "{", "|", "}", "\"",
"\n" };
- static char specials_to[7][3] = { "\\<", "\\>", "\\{", "\\|", "\\}",
+ static char specials_to[7][3] = { "\\<", "\\>", "\\{", "\\|", "\\}",
"\\\"", "\\l" };
unsigned i;
@@ -801,7 +801,7 @@ sel_dump_cfg_2 (FILE *f, int flags)
sched_dump_to_dot_p = false;
}
-/* Dump a cfg region to the file specified by TAG honoring flags.
+/* Dump a cfg region to the file specified by TAG honoring flags.
The file is created by the function. */
static void
sel_dump_cfg_1 (const char *tag, int flags)
diff --git a/gcc/sel-sched-dump.h b/gcc/sel-sched-dump.h
index 70750f9cdcd..d4ebd779a7a 100644
--- a/gcc/sel-sched-dump.h
+++ b/gcc/sel-sched-dump.h
@@ -1,4 +1,4 @@
-/* Instruction scheduling pass. Log dumping infrastructure.
+/* Instruction scheduling pass. Log dumping infrastructure.
Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
This file is part of GCC.
@@ -55,13 +55,13 @@ enum sel_dump_cfg_def
SEL_DUMP_CFG_BB_LOOP = 512,
/* The default flags for cfg dumping. */
- SEL_DUMP_CFG_FLAGS = (SEL_DUMP_CFG_CURRENT_REGION
- | SEL_DUMP_CFG_BB_NOTES_LIST
- | SEL_DUMP_CFG_AV_SET
- | SEL_DUMP_CFG_LV_SET
- | SEL_DUMP_CFG_BB_INSNS
- | SEL_DUMP_CFG_FENCES
- | SEL_DUMP_CFG_INSN_SEQNO
+ SEL_DUMP_CFG_FLAGS = (SEL_DUMP_CFG_CURRENT_REGION
+ | SEL_DUMP_CFG_BB_NOTES_LIST
+ | SEL_DUMP_CFG_AV_SET
+ | SEL_DUMP_CFG_LV_SET
+ | SEL_DUMP_CFG_BB_INSNS
+ | SEL_DUMP_CFG_FENCES
+ | SEL_DUMP_CFG_INSN_SEQNO
| SEL_DUMP_CFG_BB_LOOP)
};
@@ -117,7 +117,7 @@ enum dump_expr_def
{
/* Dump the vinsn behind this expression. */
DUMP_EXPR_VINSN = 2,
-
+
/* Dump expression's SPEC parameter. */
DUMP_EXPR_SPEC = 4,
@@ -146,7 +146,7 @@ extern void dump_expr_1 (expr_t, int);
extern void dump_expr (expr_t);
extern void debug_expr (expr_t);
-/* A enumeration for dumping flags of an insn. The difference from
+/* A enumeration for dumping flags of an insn. The difference from
dump_insn_rtx_def is that these fields are for insns in stream only. */
enum dump_insn_def
{
@@ -183,7 +183,7 @@ extern void sel_prepare_string_for_dot_label (char *);
When it is off, we are dumping to log. */
extern bool sched_dump_to_dot_p;
-/* This macro acts like printf but dumps information to the .dot file.
+/* This macro acts like printf but dumps information to the .dot file.
Used when dumping control flow. */
#define sel_print_to_dot(...) \
do { \
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index bd5560d610c..3484d861c4b 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -122,7 +122,7 @@ static struct
int n;
/* Its size. */
- int s;
+ int s;
} nop_pool = { NULL, 0, 0 };
/* The pool for basic block notes. */
@@ -134,7 +134,7 @@ rtx nop_pattern = NULL_RTX;
EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
rtx exit_insn = NULL_RTX;
-/* TRUE if while scheduling current region, which is loop, its preheader
+/* TRUE if while scheduling current region, which is loop, its preheader
was removed. */
bool preheader_removed = false;
@@ -259,10 +259,10 @@ init_fence_for_scheduling (fence_t f)
/* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
static void
-flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
- insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns,
- int *ready_ticks, int ready_ticks_size, insn_t sched_next,
- int cycle, int cycle_issued_insns,
+flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
+ insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns,
+ int *ready_ticks, int ready_ticks_size, insn_t sched_next,
+ int cycle, int cycle_issued_insns,
bool starts_cycle_p, bool after_stall_p)
{
fence_t f;
@@ -317,7 +317,7 @@ void
def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
{
def_t d;
-
+
_list_add (dl);
d = DEF_LIST_DEF (*dl);
@@ -328,11 +328,11 @@ def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
/* Functions to work with target contexts. */
-/* Bulk target context. It is convenient for debugging purposes to ensure
+/* Bulk target context. It is convenient for debugging purposes to ensure
that there are no uninitialized (null) target contexts. */
static tc_t bulk_tc = (tc_t) 1;
-/* Target hooks wrappers. In the future we can provide some default
+/* Target hooks wrappers. In the future we can provide some default
implementations for them. */
/* Allocate a store for the target context. */
@@ -424,7 +424,7 @@ reset_target_context (tc_t tc, bool clean_p)
init_target_context (tc, clean_p);
}
-/* Functions to work with dependence contexts.
+/* Functions to work with dependence contexts.
Dc (aka deps context, aka deps_t, aka struct deps *) is short for dependence
context. It accumulates information about processed insns to decide if
current insn is dependent on the processed ones. */
@@ -487,7 +487,7 @@ reset_deps_context (deps_t dc)
init_deps (dc, false);
}
-/* This structure describes the dependence analysis hooks for advancing
+/* This structure describes the dependence analysis hooks for advancing
dependence context. */
static struct sched_deps_info_def advance_deps_context_sched_deps_info =
{
@@ -598,11 +598,11 @@ init_fences (insn_t old_fence)
succ_iterator si;
bool first = true;
int ready_ticks_size = get_max_uid () + 1;
-
- FOR_EACH_SUCC_1 (succ, si, old_fence,
+
+ FOR_EACH_SUCC_1 (succ, si, old_fence,
SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
{
-
+
if (first)
first = false;
else
@@ -612,28 +612,28 @@ init_fences (insn_t old_fence)
state_create (),
create_deps_context () /* dc */,
create_target_context (true) /* tc */,
- NULL_RTX /* last_scheduled_insn */,
+ NULL_RTX /* last_scheduled_insn */,
NULL, /* executing_insns */
XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
ready_ticks_size,
NULL_RTX /* sched_next */,
- 1 /* cycle */, 0 /* cycle_issued_insns */,
- 1 /* starts_cycle_p */, 0 /* after_stall_p */);
+ 1 /* cycle */, 0 /* cycle_issued_insns */,
+ 1 /* starts_cycle_p */, 0 /* after_stall_p */);
}
}
/* Merges two fences (filling fields of fence F with resulting values) by
following rules: 1) state, target context and last scheduled insn are
- propagated from fallthrough edge if it is available;
+ propagated from fallthrough edge if it is available;
2) deps context and cycle is propagated from more probable edge;
- 3) all other fields are set to corresponding constant values.
+ 3) all other fields are set to corresponding constant values.
- INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
+ INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE and AFTER_STALL_P
are the corresponding fields of the second fence. */
static void
merge_fences (fence_t f, insn_t insn,
- state_t state, deps_t dc, void *tc,
+ state_t state, deps_t dc, void *tc,
rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns,
int *ready_ticks, int ready_ticks_size,
rtx sched_next, int cycle, bool after_stall_p)
@@ -643,22 +643,22 @@ merge_fences (fence_t f, insn_t insn,
gcc_assert (sel_bb_head_p (FENCE_INSN (f))
&& !sched_next && !FENCE_SCHED_NEXT (f));
- /* Check if we can decide which path fences came.
+ /* Check if we can decide which path fences came.
If we can't (or don't want to) - reset all. */
if (last_scheduled_insn == NULL
|| last_scheduled_insn_old == NULL
- /* This is a case when INSN is reachable on several paths from
- one insn (this can happen when pipelining of outer loops is on and
- there are two edges: one going around of inner loop and the other -
+ /* This is a case when INSN is reachable on several paths from
+ one insn (this can happen when pipelining of outer loops is on and
+ there are two edges: one going around of inner loop and the other -
right through it; in such case just reset everything). */
|| last_scheduled_insn == last_scheduled_insn_old)
{
state_reset (FENCE_STATE (f));
state_free (state);
-
+
reset_deps_context (FENCE_DC (f));
delete_deps_context (dc);
-
+
reset_target_context (FENCE_TC (f), true);
delete_target_context (tc);
@@ -669,7 +669,7 @@ merge_fences (fence_t f, insn_t insn,
VEC_free (rtx, gc, executing_insns);
free (ready_ticks);
if (FENCE_EXECUTING_INSNS (f))
- VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
+ VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
if (FENCE_READY_TICKS (f))
memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
@@ -680,7 +680,7 @@ merge_fences (fence_t f, insn_t insn,
edge candidate;
succ_iterator si;
insn_t succ;
-
+
/* Find fallthrough edge. */
gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
candidate = find_fallthru_edge (BLOCK_FOR_INSN (insn)->prev_bb);
@@ -692,16 +692,16 @@ merge_fences (fence_t f, insn_t insn,
/* No fallthrough edge leading to basic block of INSN. */
state_reset (FENCE_STATE (f));
state_free (state);
-
+
reset_target_context (FENCE_TC (f), true);
delete_target_context (tc);
-
+
FENCE_LAST_SCHEDULED_INSN (f) = NULL;
}
else
if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
{
- /* Would be weird if same insn is successor of several fallthrough
+ /* Would be weird if same insn is successor of several fallthrough
edges. */
gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
!= BLOCK_FOR_INSN (last_scheduled_insn_old));
@@ -754,10 +754,10 @@ merge_fences (fence_t f, insn_t insn,
delete_deps_context (dc);
VEC_free (rtx, gc, executing_insns);
free (ready_ticks);
-
+
FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
if (FENCE_EXECUTING_INSNS (f))
- VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
+ VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
if (FENCE_READY_TICKS (f))
memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
@@ -792,13 +792,13 @@ merge_fences (fence_t f, insn_t insn,
FENCE_SCHED_NEXT (f) = NULL;
}
-/* Add a new fence to NEW_FENCES list, initializing it from all
+/* Add a new fence to NEW_FENCES list, initializing it from all
other parameters. */
static void
add_to_fences (flist_tail_t new_fences, insn_t insn,
- state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
- VEC(rtx, gc) *executing_insns, int *ready_ticks,
- int ready_ticks_size, rtx sched_next, int cycle,
+ state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
+ VEC(rtx, gc) *executing_insns, int *ready_ticks,
+ int ready_ticks_size, rtx sched_next, int cycle,
int cycle_issued_insns, bool starts_cycle_p, bool after_stall_p)
{
fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
@@ -806,7 +806,7 @@ add_to_fences (flist_tail_t new_fences, insn_t insn,
if (! f)
{
flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
- last_scheduled_insn, executing_insns, ready_ticks,
+ last_scheduled_insn, executing_insns, ready_ticks,
ready_ticks_size, sched_next, cycle, cycle_issued_insns,
starts_cycle_p, after_stall_p);
@@ -815,8 +815,8 @@ add_to_fences (flist_tail_t new_fences, insn_t insn,
}
else
{
- merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
- executing_insns, ready_ticks, ready_ticks_size,
+ merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
+ executing_insns, ready_ticks, ready_ticks_size,
sched_next, cycle, after_stall_p);
}
}
@@ -829,14 +829,14 @@ move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
old = FLIST_FENCE (old_fences);
- f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
+ f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
FENCE_INSN (FLIST_FENCE (old_fences)));
if (f)
{
merge_fences (f, old->insn, old->state, old->dc, old->tc,
old->last_scheduled_insn, old->executing_insns,
old->ready_ticks, old->ready_ticks_size,
- old->sched_next, old->cycle,
+ old->sched_next, old->cycle,
old->after_stall_p);
}
else
@@ -849,37 +849,37 @@ move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
FENCE_INSN (old) = NULL;
}
-/* Add a new fence to NEW_FENCES list and initialize most of its data
+/* Add a new fence to NEW_FENCES list and initialize most of its data
as a clean one. */
void
add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
{
int ready_ticks_size = get_max_uid () + 1;
-
+
add_to_fences (new_fences,
succ, state_create (), create_deps_context (),
create_target_context (true),
- NULL_RTX, NULL,
+ NULL_RTX, NULL,
XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
NULL_RTX, FENCE_CYCLE (fence) + 1,
0, 1, FENCE_AFTER_STALL_P (fence));
}
-/* Add a new fence to NEW_FENCES list and initialize all of its data
+/* Add a new fence to NEW_FENCES list and initialize all of its data
from FENCE and SUCC. */
void
add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
{
- int * new_ready_ticks
+ int * new_ready_ticks
= XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
-
+
memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
add_to_fences (new_fences,
succ, state_create_copy (FENCE_STATE (fence)),
create_copy_of_deps_context (FENCE_DC (fence)),
create_copy_of_target_context (FENCE_TC (fence)),
- FENCE_LAST_SCHEDULED_INSN (fence),
+ FENCE_LAST_SCHEDULED_INSN (fence),
VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)),
new_ready_ticks,
FENCE_READY_TICKS_SIZE (fence),
@@ -959,19 +959,19 @@ free_regset_pool (void)
regset *v = regset_pool.v;
int i = 0;
int n = regset_pool.n;
-
+
regset *vv = regset_pool.vv;
int ii = 0;
int nn = regset_pool.nn;
-
+
int diff = 0;
-
+
gcc_assert (n <= nn);
-
+
/* Sort both vectors so it will be possible to compare them. */
qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
-
+
while (ii < nn)
{
if (v[i] == vv[ii])
@@ -979,17 +979,17 @@ free_regset_pool (void)
else
/* VV[II] was lost. */
diff++;
-
+
ii++;
}
-
+
gcc_assert (diff == regset_pool.diff);
}
#endif
-
+
/* If not true - we have a memory leak. */
gcc_assert (regset_pool.diff == 0);
-
+
while (regset_pool.n)
{
--regset_pool.n;
@@ -999,7 +999,7 @@ free_regset_pool (void)
free (regset_pool.v);
regset_pool.v = NULL;
regset_pool.s = 0;
-
+
free (regset_pool.vv);
regset_pool.vv = NULL;
regset_pool.nn = 0;
@@ -1009,8 +1009,8 @@ free_regset_pool (void)
}
-/* Functions to work with nop pools. NOP insns are used as temporary
- placeholders of the insns being scheduled to allow correct update of
+/* Functions to work with nop pools. NOP insns are used as temporary
+ placeholders of the insns being scheduled to allow correct update of
the data sets. When update is finished, NOPs are deleted. */
/* A vinsn that is used to represent a nop. This vinsn is shared among all
@@ -1051,7 +1051,7 @@ return_nop_to_pool (insn_t nop, bool full_tidying)
sel_remove_insn (nop, false, full_tidying);
if (nop_pool.n == nop_pool.s)
- nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
+ nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
(nop_pool.s = 2 * nop_pool.s + 1));
nop_pool.v[nop_pool.n++] = nop;
}
@@ -1067,7 +1067,7 @@ free_nop_pool (void)
}
-/* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
+/* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
The callback is given two rtxes XX and YY and writes the new rtxes
to NX and NY in case some needs to be skipped. */
static int
@@ -1075,7 +1075,7 @@ skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
{
const_rtx x = *xx;
const_rtx y = *yy;
-
+
if (GET_CODE (x) == UNSPEC
&& (targetm.sched.skip_rtx_p == NULL
|| targetm.sched.skip_rtx_p (x)))
@@ -1084,7 +1084,7 @@ skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
*ny = CONST_CAST_RTX (y);
return 1;
}
-
+
if (GET_CODE (y) == UNSPEC
&& (targetm.sched.skip_rtx_p == NULL
|| targetm.sched.skip_rtx_p (y)))
@@ -1093,18 +1093,18 @@ skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
*ny = XVECEXP (y, 0, 0);
return 1;
}
-
+
return 0;
}
-/* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
+/* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
to support ia64 speculation. When changes are needed, new rtx X and new mode
NMODE are written, and the callback returns true. */
static int
hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
rtx *nx, enum machine_mode* nmode)
{
- if (GET_CODE (x) == UNSPEC
+ if (GET_CODE (x) == UNSPEC
&& targetm.sched.skip_rtx_p
&& targetm.sched.skip_rtx_p (x))
{
@@ -1112,7 +1112,7 @@ hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
*nmode = VOIDmode;
return 1;
}
-
+
return 0;
}
@@ -1123,9 +1123,9 @@ lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
if (lhs == NULL || rhs == NULL)
return false;
- /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
- to use reg, if const can be used. Moreover, scheduling const as rhs may
- lead to mode mismatch cause consts don't have modes but they could be
+ /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
+ to use reg, if const can be used. Moreover, scheduling const as rhs may
+ lead to mode mismatch cause consts don't have modes but they could be
merged from branches where the same const used in different modes. */
if (CONSTANT_P (rhs))
return false;
@@ -1138,7 +1138,7 @@ lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
if (REG_P (rhs))
return false;
- /* See comment at find_used_regs_1 (*1) for explanation of this
+ /* See comment at find_used_regs_1 (*1) for explanation of this
restriction. */
/* FIXME: remove this later. */
if (MEM_P (lhs))
@@ -1152,8 +1152,8 @@ lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
return true;
}
-/* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
- FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
+/* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
+ FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
used e.g. for insns from recovery blocks. */
static void
vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
@@ -1164,12 +1164,12 @@ vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
VINSN_INSN_RTX (vi) = insn;
VINSN_COUNT (vi) = 0;
vi->cost = -1;
-
+
if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
else
deps_init_id (VINSN_ID (vi), insn, force_unique_p);
-
+
/* Hash vinsn depending on whether it is separable or not. */
hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
if (VINSN_SEPARABLE_P (vi))
@@ -1188,7 +1188,7 @@ vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
NULL, NULL, false, hrcf);
VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
}
-
+
insn_class = haifa_classify_insn (insn);
if (insn_class >= 2
&& (!targetm.sched.get_insn_spec_ds
@@ -1209,7 +1209,7 @@ vinsn_attach (vinsn_t vi)
VINSN_COUNT (vi)++;
}
-/* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
+/* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
VINSN_TYPE (VI). */
static vinsn_t
vinsn_create (insn_t insn, bool force_unique_p)
@@ -1222,13 +1222,13 @@ vinsn_create (insn_t insn, bool force_unique_p)
/* Return a copy of VI. When REATTACH_P is true, detach VI and attach
the copy. */
-vinsn_t
+vinsn_t
vinsn_copy (vinsn_t vi, bool reattach_p)
{
rtx copy;
bool unique = VINSN_UNIQUE_P (vi);
vinsn_t new_vi;
-
+
copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
new_vi = create_vinsn_from_insn_rtx (copy, unique);
if (reattach_p)
@@ -1253,7 +1253,7 @@ vinsn_delete (vinsn_t vi)
free (vi);
}
-/* Indicate that VI is no longer a part of some rtx object.
+/* Indicate that VI is no longer a part of some rtx object.
Remove VI if it is no longer needed. */
void
vinsn_detach (vinsn_t vi)
@@ -1360,21 +1360,21 @@ sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
}
/* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
- take it as a new vinsn instead of EXPR's vinsn.
- We simplify insns later, after scheduling region in
+ take it as a new vinsn instead of EXPR's vinsn.
+ We simplify insns later, after scheduling region in
simplify_changed_insns. */
insn_t
-sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
+sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
insn_t after)
{
expr_t emit_expr;
insn_t insn;
int flags;
-
- emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
+
+ emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
seqno);
insn = EXPR_INSN_RTX (emit_expr);
- add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
+ add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
flags = INSN_INIT_TODO_SSID;
if (INSN_LUID (insn) == 0)
@@ -1404,7 +1404,7 @@ sel_move_insn (expr_t expr, int seqno, insn_t after)
df_insn_change_bb (insn, bb);
if (BB_END (bb) == after)
BB_END (bb) = insn;
-
+
prepare_insn_expr (insn, seqno);
return insn;
}
@@ -1412,14 +1412,14 @@ sel_move_insn (expr_t expr, int seqno, insn_t after)
/* Functions to work with right-hand sides. */
-/* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
+/* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
VECT and return true when found. Use NEW_VINSN for comparison only when
- COMPARE_VINSNS is true. Write to INDP the index on which
- the search has stopped, such that inserting the new element at INDP will
+ COMPARE_VINSNS is true. Write to INDP the index on which
+ the search has stopped, such that inserting the new element at INDP will
retain VECT's sort order. */
static bool
-find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
- unsigned uid, vinsn_t new_vinsn,
+find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
+ unsigned uid, vinsn_t new_vinsn,
bool compare_vinsns, int *indp)
{
expr_history_def *arr;
@@ -1437,14 +1437,14 @@ find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
while (i <= j)
{
unsigned auid = arr[i].uid;
- vinsn_t avinsn = arr[i].new_expr_vinsn;
+ vinsn_t avinsn = arr[i].new_expr_vinsn;
if (auid == uid
- /* When undoing transformation on a bookkeeping copy, the new vinsn
- may not be exactly equal to the one that is saved in the vector.
+ /* When undoing transformation on a bookkeeping copy, the new vinsn
+ may not be exactly equal to the one that is saved in the vector.
This is because the insn whose copy we're checking was possibly
substituted itself. */
- && (! compare_vinsns
+ && (! compare_vinsns
|| vinsn_equal_p (avinsn, new_vinsn)))
{
*indp = i;
@@ -1459,16 +1459,16 @@ find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
return false;
}
-/* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
- the position found or -1, if no such value is in vector.
+/* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
+ the position found or -1, if no such value is in vector.
Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
int
-find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
+find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
vinsn_t new_vinsn, bool originators_p)
{
int ind;
- if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
+ if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
false, &ind))
return ind;
@@ -1481,18 +1481,18 @@ find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
return ind;
}
-
+
return -1;
}
-/* Insert new element in a sorted history vector pointed to by PVECT,
- if it is not there already. The element is searched using
+/* Insert new element in a sorted history vector pointed to by PVECT,
+ if it is not there already. The element is searched using
UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
the history of a transformation. */
void
insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
unsigned uid, enum local_trans_type type,
- vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
+ vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
ds_t spec_ds)
{
VEC(expr_history_def, heap) *vect = *pvect;
@@ -1506,17 +1506,17 @@ insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
{
expr_history_def *phist = VEC_index (expr_history_def, vect, ind);
- /* It is possible that speculation types of expressions that were
+ /* It is possible that speculation types of expressions that were
propagated through different paths will be different here. In this
case, merge the status to get the correct check later. */
if (phist->spec_ds != spec_ds)
phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
return;
}
-
+
temp.uid = uid;
temp.old_expr_vinsn = old_expr_vinsn;
- temp.new_expr_vinsn = new_expr_vinsn;
+ temp.new_expr_vinsn = new_expr_vinsn;
temp.spec_ds = spec_ds;
temp.type = type;
@@ -1535,15 +1535,15 @@ free_history_vect (VEC (expr_history_def, heap) **pvect)
if (! *pvect)
return;
-
- for (i = 0;
+
+ for (i = 0;
VEC_iterate (expr_history_def, *pvect, i, phist);
i++)
{
vinsn_detach (phist->old_expr_vinsn);
vinsn_detach (phist->new_expr_vinsn);
}
-
+
VEC_free (expr_history_def, heap, *pvect);
*pvect = NULL;
}
@@ -1565,7 +1565,7 @@ vinsn_equal_p (vinsn_t x, vinsn_t y)
return false;
repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
- if (VINSN_SEPARABLE_P (x))
+ if (VINSN_SEPARABLE_P (x))
{
/* Compare RHSes of VINSNs. */
gcc_assert (VINSN_RHS (x));
@@ -1585,7 +1585,7 @@ static void
init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
int sched_times, int orig_bb_index, ds_t spec_done_ds,
ds_t spec_to_check_ds, int orig_sched_cycle,
- VEC(expr_history_def, heap) *history, bool target_available,
+ VEC(expr_history_def, heap) *history, bool target_available,
bool was_substituted, bool was_renamed, bool needs_spec_check_p,
bool cant_move)
{
@@ -1626,7 +1626,7 @@ copy_expr (expr_t to, expr_t from)
expr_history_def *phist;
temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from));
- for (i = 0;
+ for (i = 0;
VEC_iterate (expr_history_def, temp, i, phist);
i++)
{
@@ -1635,17 +1635,17 @@ copy_expr (expr_t to, expr_t from)
}
}
- init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
+ init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
- EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
+ EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
EXPR_ORIG_SCHED_CYCLE (from), temp,
- EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
+ EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
EXPR_CANT_MOVE (from));
}
-/* Same, but the final expr will not ever be in av sets, so don't copy
+/* Same, but the final expr will not ever be in av sets, so don't copy
"uninteresting" data such as bitmap cache. */
void
copy_expr_onside (expr_t to, expr_t from)
@@ -1665,7 +1665,7 @@ prepare_insn_expr (insn_t insn, int seqno)
{
expr_t expr = INSN_EXPR (insn);
ds_t ds;
-
+
INSN_SEQNO (insn) = seqno;
EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
EXPR_SPEC (expr) = 0;
@@ -1688,12 +1688,12 @@ prepare_insn_expr (insn_t insn, int seqno)
}
/* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
- is non-null when expressions are merged from different successors at
+ is non-null when expressions are merged from different successors at
a split point. */
static void
update_target_availability (expr_t to, expr_t from, insn_t split_point)
{
- if (EXPR_TARGET_AVAILABLE (to) < 0
+ if (EXPR_TARGET_AVAILABLE (to) < 0
|| EXPR_TARGET_AVAILABLE (from) < 0)
EXPR_TARGET_AVAILABLE (to) = -1;
else
@@ -1707,9 +1707,9 @@ update_target_availability (expr_t to, expr_t from, insn_t split_point)
toind = EXPR_ORIG_BB_INDEX (to);
fromind = EXPR_ORIG_BB_INDEX (from);
-
+
if (toind && toind == fromind)
- /* Do nothing -- everything is done in
+ /* Do nothing -- everything is done in
merge_with_other_exprs. */
;
else
@@ -1721,7 +1721,7 @@ update_target_availability (expr_t to, expr_t from, insn_t split_point)
}
/* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
- is non-null when expressions are merged from different successors at
+ is non-null when expressions are merged from different successors at
a split point. */
static void
update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
@@ -1730,30 +1730,30 @@ update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
old_to_ds = EXPR_SPEC_DONE_DS (to);
old_from_ds = EXPR_SPEC_DONE_DS (from);
-
+
EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
/* When merging e.g. control & data speculative exprs, or a control
- speculative with a control&data speculative one, we really have
+ speculative with a control&data speculative one, we really have
to change vinsn too. Also, when speculative status is changed,
we also need to record this as a transformation in expr's history. */
if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
{
old_to_ds = ds_get_speculation_types (old_to_ds);
old_from_ds = ds_get_speculation_types (old_from_ds);
-
+
if (old_to_ds != old_from_ds)
{
ds_t record_ds;
-
- /* When both expressions are speculative, we need to change
+
+ /* When both expressions are speculative, we need to change
the vinsn first. */
if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
{
int res;
-
+
res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
gcc_assert (res >= 0);
}
@@ -1764,9 +1764,9 @@ update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
record_ds &= ~(old_to_ds & SPECULATIVE);
record_ds &= ~(old_from_ds & SPECULATIVE);
-
- insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
- INSN_UID (split_point), TRANS_SPECULATION,
+
+ insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
+ INSN_UID (split_point), TRANS_SPECULATION,
EXPR_VINSN (from), EXPR_VINSN (to),
record_ds);
}
@@ -1782,7 +1782,7 @@ merge_expr_data (expr_t to, expr_t from, insn_t split_point)
{
int i;
expr_history_def *phist;
-
+
/* For now, we just set the spec of resulting expr to be minimum of the specs
of merged exprs. */
if (EXPR_SPEC (to) > EXPR_SPEC (from))
@@ -1791,7 +1791,7 @@ merge_expr_data (expr_t to, expr_t from, insn_t split_point)
if (split_point)
EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
else
- EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
+ EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
EXPR_USEFULNESS (from));
if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
@@ -1803,17 +1803,17 @@ merge_expr_data (expr_t to, expr_t from, insn_t split_point)
if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
EXPR_ORIG_BB_INDEX (to) = 0;
- EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
+ EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
EXPR_ORIG_SCHED_CYCLE (from));
/* We keep this vector sorted. */
- for (i = 0;
- VEC_iterate (expr_history_def, EXPR_HISTORY_OF_CHANGES (from),
+ for (i = 0;
+ VEC_iterate (expr_history_def, EXPR_HISTORY_OF_CHANGES (from),
i, phist);
i++)
- insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
- phist->uid, phist->type,
- phist->old_expr_vinsn, phist->new_expr_vinsn,
+ insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
+ phist->uid, phist->type,
+ phist->old_expr_vinsn, phist->new_expr_vinsn,
phist->spec_ds);
EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
@@ -1825,7 +1825,7 @@ merge_expr_data (expr_t to, expr_t from, insn_t split_point)
}
/* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
- in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
+ in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
are merged from different successors at a split point. */
void
merge_expr (expr_t to, expr_t from, insn_t split_point)
@@ -1850,7 +1850,7 @@ merge_expr (expr_t to, expr_t from, insn_t split_point)
void
clear_expr (expr_t expr)
{
-
+
vinsn_detach (EXPR_VINSN (expr));
EXPR_VINSN (expr) = NULL;
@@ -1866,17 +1866,17 @@ set_unavailable_target_for_expr (expr_t expr, regset lv_set)
if (REG_P (EXPR_LHS (expr))
&& bitmap_bit_p (lv_set, REGNO (EXPR_LHS (expr))))
{
- /* If it's an insn like r1 = use (r1, ...), and it exists in
- different forms in each of the av_sets being merged, we can't say
- whether original destination register is available or not.
- However, this still works if destination register is not used
+ /* If it's an insn like r1 = use (r1, ...), and it exists in
+ different forms in each of the av_sets being merged, we can't say
+ whether original destination register is available or not.
+ However, this still works if destination register is not used
in the original expression: if the branch at which LV_SET we're
looking here is not actually 'other branch' in sense that same
- expression is available through it (but it can't be determined
+ expression is available through it (but it can't be determined
at computation stage because of transformations on one of the
- branches), it still won't affect the availability.
- Liveness of a register somewhere on a code motion path means
- it's either read somewhere on a codemotion path, live on
+ branches), it still won't affect the availability.
+ Liveness of a register somewhere on a code motion path means
+ it's either read somewhere on a codemotion path, live on
'other' branch, live at the point immediately following
the original operation, or is read by the original operation.
The latter case is filtered out in the condition below.
@@ -1885,7 +1885,7 @@ set_unavailable_target_for_expr (expr_t expr, regset lv_set)
miss a unifying code motion along both branches using a renamed
register, but it won't affect a code correctness since upon
an actual code motion a bookkeeping code would be generated. */
- if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
+ if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
REGNO (EXPR_LHS (expr))))
EXPR_TARGET_AVAILABLE (expr) = -1;
else
@@ -1896,8 +1896,8 @@ set_unavailable_target_for_expr (expr_t expr, regset lv_set)
{
unsigned regno;
reg_set_iterator rsi;
-
- EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
+
+ EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
0, regno, rsi)
if (bitmap_bit_p (lv_set, regno))
{
@@ -1915,7 +1915,7 @@ set_unavailable_target_for_expr (expr_t expr, regset lv_set)
}
}
-/* Try to make EXPR speculative. Return 1 when EXPR's pattern
+/* Try to make EXPR speculative. Return 1 when EXPR's pattern
or dependence status have changed, 2 when also the target register
became unavailable, 0 if nothing had to be changed. */
int
@@ -1940,7 +1940,7 @@ speculate_expr (expr_t expr, ds_t ds)
case 0:
EXPR_SPEC_DONE_DS (expr) = ds;
return current_ds != ds ? 1 : 0;
-
+
case 1:
{
rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
@@ -1950,9 +1950,9 @@ speculate_expr (expr_t expr, ds_t ds)
EXPR_SPEC_DONE_DS (expr) = ds;
EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
- /* Do not allow clobbering the address register of speculative
+ /* Do not allow clobbering the address register of speculative
insns. */
- if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
+ if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
expr_dest_regno (expr)))
{
EXPR_TARGET_AVAILABLE (expr) = false;
@@ -1993,7 +1993,7 @@ expr_dest_regno (expr_t expr)
return REGNO (dest);
}
-/* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
+/* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
AV_SET having unavailable target register. */
void
mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
@@ -2024,7 +2024,7 @@ void
av_set_add (av_set_t *setp, expr_t expr)
{
av_set_t elem;
-
+
gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
elem = av_set_add_element (setp);
copy_expr (_AV_SET_EXPR (elem), expr);
@@ -2114,10 +2114,10 @@ merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
EXPR_USEFULNESS (expr2) = 0;
merge_expr (expr2, expr, NULL);
-
+
/* Fix usefulness as it should be now REG_BR_PROB_BASE. */
EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
-
+
av_set_iter_remove (ip);
return expr2;
}
@@ -2180,7 +2180,7 @@ av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
join_distinct_sets (i.lp, fromp);
}
-/* Same as above, but also update availability of target register in
+/* Same as above, but also update availability of target register in
TOP judging by TO_LV_SET and FROM_LV_SET. */
void
av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
@@ -2197,16 +2197,16 @@ av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
if (expr2)
{
- /* It may be that the expressions have different destination
+ /* It may be that the expressions have different destination
registers, in which case we need to check liveness here. */
if (EXPR_SEPARABLE_P (expr1))
{
- int regno1 = (REG_P (EXPR_LHS (expr1))
+ int regno1 = (REG_P (EXPR_LHS (expr1))
? (int) expr_dest_regno (expr1) : -1);
- int regno2 = (REG_P (EXPR_LHS (expr2))
+ int regno2 = (REG_P (EXPR_LHS (expr2))
? (int) expr_dest_regno (expr2) : -1);
-
- /* ??? We don't have a way to check restrictions for
+
+ /* ??? We don't have a way to check restrictions for
*other* register on the current path, we did it only
for the current target register. Give up. */
if (regno1 != regno2)
@@ -2220,7 +2220,7 @@ av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
av_set_iter_remove (&i);
}
else
- /* EXPR1 is present in TOP, but not in FROMP. Check it on
+ /* EXPR1 is present in TOP, but not in FROMP. Check it on
FROM_LV_SET. */
set_unavailable_target_for_expr (expr1, from_lv_set);
}
@@ -2256,7 +2256,7 @@ av_set_leave_one_nonspec (av_set_t *setp)
av_set_iterator i;
bool has_one_nonspec = false;
- /* Keep all speculative exprs, and leave one non-speculative
+ /* Keep all speculative exprs, and leave one non-speculative
(the first one). */
FOR_EACH_EXPR_1 (expr, i, setp)
{
@@ -2297,7 +2297,7 @@ av_set_substract_cond_branches (av_set_t *avp)
av_set_iter_remove (&i);
}
-/* Multiplies usefulness attribute of each member of av-set *AVP by
+/* Multiplies usefulness attribute of each member of av-set *AVP by
value PROB / ALL_PROB. */
void
av_set_split_usefulness (av_set_t av, int prob, int all_prob)
@@ -2306,7 +2306,7 @@ av_set_split_usefulness (av_set_t av, int prob, int all_prob)
expr_t expr;
FOR_EACH_EXPR (expr, i, av)
- EXPR_USEFULNESS (expr) = (all_prob
+ EXPR_USEFULNESS (expr) = (all_prob
? (EXPR_USEFULNESS (expr) * prob) / all_prob
: 0);
}
@@ -2346,13 +2346,13 @@ static struct
} deps_init_id_data;
-/* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
+/* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
clonable. */
static void
setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
{
int type;
-
+
/* Determine whether INSN could be cloned and return appropriate vinsn type.
That clonable insns which can be separated into lhs and rhs have type SET.
Other clonable insns have type USE. */
@@ -2365,7 +2365,7 @@ setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
type = PC;
else if (type == DEBUG_INSN)
type = !force_unique_p ? USE : INSN;
-
+
IDATA_TYPE (id) = type;
IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
IDATA_REG_USES (id) = get_clear_regset_from_pool ();
@@ -2417,7 +2417,7 @@ deps_init_id_note_reg_set (int regno)
SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
#ifdef STACK_REGS
- /* Make instructions that set stack registers to be ineligible for
+ /* Make instructions that set stack registers to be ineligible for
renaming to avoid issues with find_used_regs. */
if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
deps_init_id_data.force_use_p = true;
@@ -2484,9 +2484,9 @@ deps_init_id_finish_insn (void)
if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
|| deps_init_id_data.force_use_p)
{
- /* This should be a USE, as we don't want to schedule its RHS
+ /* This should be a USE, as we don't want to schedule its RHS
separately. However, we still want to have them recorded
- for the purposes of substitution. That's why we don't
+ for the purposes of substitution. That's why we don't
simply call downgrade_to_use () here. */
gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
gcc_assert (!lhs == !rhs);
@@ -2529,9 +2529,9 @@ static void
setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
{
rtx pat = PATTERN (insn);
-
+
if (NONJUMP_INSN_P (insn)
- && GET_CODE (pat) == SET
+ && GET_CODE (pat) == SET
&& !force_unique_p)
{
IDATA_RHS (id) = SET_SRC (pat);
@@ -2550,7 +2550,7 @@ maybe_downgrade_id_to_use (idata_t id, insn_t insn)
df_ref *rec;
rtx lhs = IDATA_LHS (id);
rtx rhs = IDATA_RHS (id);
-
+
/* We downgrade only SETs. */
if (IDATA_TYPE (id) != SET)
return;
@@ -2560,11 +2560,11 @@ maybe_downgrade_id_to_use (idata_t id, insn_t insn)
IDATA_TYPE (id) = USE;
return;
}
-
+
for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
{
df_ref def = *rec;
-
+
if (DF_REF_INSN (def)
&& DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
&& loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
@@ -2574,7 +2574,7 @@ maybe_downgrade_id_to_use (idata_t id, insn_t insn)
}
#ifdef STACK_REGS
- /* Make instructions that set stack registers to be ineligible for
+ /* Make instructions that set stack registers to be ineligible for
renaming to avoid issues with find_used_regs. */
if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
{
@@ -2582,8 +2582,8 @@ maybe_downgrade_id_to_use (idata_t id, insn_t insn)
break;
}
#endif
- }
-
+ }
+
if (must_be_use)
IDATA_TYPE (id) = USE;
}
@@ -2595,12 +2595,12 @@ setup_id_reg_sets (idata_t id, insn_t insn)
unsigned uid = INSN_UID (insn);
df_ref *rec;
regset tmp = get_clear_regset_from_pool ();
-
+
for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
{
df_ref def = *rec;
unsigned int regno = DF_REF_REGNO (def);
-
+
/* Post modifies are treated like clobbers by sched-deps.c. */
if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
| DF_REF_PRE_POST_MODIFY)))
@@ -2610,7 +2610,7 @@ setup_id_reg_sets (idata_t id, insn_t insn)
SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
#ifdef STACK_REGS
- /* For stack registers, treat writes to them as writes
+ /* For stack registers, treat writes to them as writes
to the first one to be consistent with sched-deps.c. */
if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
@@ -2621,7 +2621,7 @@ setup_id_reg_sets (idata_t id, insn_t insn)
|| regno == STACK_POINTER_REGNUM)
bitmap_set_bit (tmp, regno);
}
-
+
for (rec = DF_INSN_UID_USES (uid); *rec; rec++)
{
df_ref use = *rec;
@@ -2636,7 +2636,7 @@ setup_id_reg_sets (idata_t id, insn_t insn)
SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
#ifdef STACK_REGS
- /* For stack registers, treat reads from them as reads from
+ /* For stack registers, treat reads from them as reads from
the first one to be consistent with sched-deps.c. */
if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
@@ -2734,7 +2734,7 @@ free_transformed_insns (void *p)
free (pti);
}
-/* Init the s_i_d data for INSN which should be inited just once, when
+/* Init the s_i_d data for INSN which should be inited just once, when
we first see the insn. */
static void
init_first_time_insn_data (insn_t insn)
@@ -2742,7 +2742,7 @@ init_first_time_insn_data (insn_t insn)
/* This should not be set if this is the first time we init data for
insn. */
gcc_assert (first_time_insn_init (insn));
-
+
/* These are needed for nops too. */
INSN_LIVE (insn) = get_regset_from_pool ();
INSN_LIVE_VALID_P (insn) = false;
@@ -2751,27 +2751,27 @@ init_first_time_insn_data (insn_t insn)
{
INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
- INSN_TRANSFORMED_INSNS (insn)
+ INSN_TRANSFORMED_INSNS (insn)
= htab_create (16, hash_transformed_insns,
eq_transformed_insns, free_transformed_insns);
init_deps (&INSN_DEPS_CONTEXT (insn), true);
}
}
-/* Free almost all above data for INSN that is scheduled already.
+/* Free almost all above data for INSN that is scheduled already.
Used for extra-large basic blocks. */
void
free_data_for_scheduled_insn (insn_t insn)
{
gcc_assert (! first_time_insn_init (insn));
-
+
if (! INSN_ANALYZED_DEPS (insn))
return;
-
+
BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
BITMAP_FREE (INSN_FOUND_DEPS (insn));
htab_delete (INSN_TRANSFORMED_INSNS (insn));
-
+
/* This is allocated only for bookkeeping insns. */
if (INSN_ORIGINATORS (insn))
BITMAP_FREE (INSN_ORIGINATORS (insn));
@@ -2779,7 +2779,7 @@ free_data_for_scheduled_insn (insn_t insn)
INSN_ANALYZED_DEPS (insn) = NULL;
- /* Clear the readonly flag so we would ICE when trying to recalculate
+ /* Clear the readonly flag so we would ICE when trying to recalculate
the deps context (as we believe that it should not happen). */
(&INSN_DEPS_CONTEXT (insn))->readonly = 0;
}
@@ -2856,7 +2856,7 @@ init_global_and_expr_for_insn (insn_t insn)
if (CANT_MOVE (insn)
|| INSN_ASM_P (insn)
|| SCHED_GROUP_P (insn)
- || prologue_epilogue_contains (insn)
+ || prologue_epilogue_contains (insn)
/* Exception handling insns are always unique. */
|| (flag_non_call_exceptions && can_throw_internal (insn))
/* TRAP_IF though have an INSN code is control_flow_insn_p (). */
@@ -2876,7 +2876,7 @@ init_global_and_expr_for_insn (insn_t insn)
/* Initialize INSN's expr. */
init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
- spec_done_ds, 0, 0, NULL, true, false, false, false,
+ spec_done_ds, 0, 0, NULL, true, false, false, false,
CANT_MOVE (insn));
}
@@ -2895,7 +2895,7 @@ sel_init_global_and_expr (bb_vec_t bbs)
extend_insn_data, /* extend_insn */
init_global_and_expr_for_insn /* init_insn */
};
-
+
sched_scan (&ssi, bbs, NULL, NULL, NULL);
}
@@ -2921,9 +2921,9 @@ finish_global_and_expr_insn (insn_t insn)
free_first_time_insn_data (insn);
INSN_WS_LEVEL (insn) = 0;
CANT_MOVE (insn) = 0;
-
- /* We can no longer assert this, as vinsns of this insn could be
- easily live in other insn's caches. This should be changed to
+
+ /* We can no longer assert this, as vinsns of this insn could be
+ easily live in other insn's caches. This should be changed to
a counter-like approach among all vinsns. */
gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
clear_expr (INSN_EXPR (insn));
@@ -2963,8 +2963,8 @@ sel_finish_global_and_expr (void)
}
-/* In the below hooks, we merely calculate whether or not a dependence
- exists, and in what part of insn. However, we will need more data
+/* In the below hooks, we merely calculate whether or not a dependence
+ exists, and in what part of insn. However, we will need more data
when we'll start caching dependence requests. */
/* Container to hold information for dependency analysis. */
@@ -3077,7 +3077,7 @@ has_dependence_note_reg_clobber (int regno)
if (reg_last->sets)
*dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
-
+
if (reg_last->uses)
*dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
}
@@ -3234,7 +3234,7 @@ has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
/* We init this field lazily. */
if (dc->reg_last == NULL)
init_deps_reg_last (dc);
-
+
if (!dc->readonly)
{
has_dependence_data.pro = NULL;
@@ -3264,13 +3264,13 @@ has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
/* Do not allow stores to memory to move through checks. Currently
we don't move this to sched-deps.c as the check doesn't have
- obvious places to which this dependence can be attached.
+ obvious places to which this dependence can be attached.
FIMXE: this should go to a hook. */
if (EXPR_LHS (expr)
&& MEM_P (EXPR_LHS (expr))
&& sel_insn_is_speculation_check (pred))
has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
-
+
*has_dep_pp = has_dependence_data.has_dep_p;
ds = 0;
for (i = 0; i < DEPS_IN_NOWHERE; i++)
@@ -3281,9 +3281,9 @@ has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
}
-/* Dependence hooks implementation that checks dependence latency constraints
- on the insns being scheduled. The entry point for these routines is
- tick_check_p predicate. */
+/* Dependence hooks implementation that checks dependence latency constraints
+ on the insns being scheduled. The entry point for these routines is
+ tick_check_p predicate. */
static struct
{
@@ -3393,7 +3393,7 @@ tick_check_p (expr_t expr, deps_t dc, fence_t fence)
tick_check_data.cycle = 0;
tick_check_data.seen_true_dep_p = false;
sched_deps_info = &tick_check_sched_deps_info;
-
+
gcc_assert (!dc->readonly);
dc->readonly = 1;
deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
@@ -3416,7 +3416,7 @@ lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
if (lhs == NULL || dest == NULL)
return false;
-
+
return rtx_equal_p (lhs, dest);
}
@@ -3436,7 +3436,7 @@ sel_insn_is_speculation_check (rtx insn)
return s_i_d && !! INSN_SPEC_CHECKED_DS (insn);
}
-/* Extracts machine mode MODE and destination location DST_LOC
+/* Extracts machine mode MODE and destination location DST_LOC
for given INSN. */
void
get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
@@ -3455,7 +3455,7 @@ get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
*mode = GET_MODE (*dst_loc);
}
-/* Returns true when moving through JUMP will result in bookkeeping
+/* Returns true when moving through JUMP will result in bookkeeping
creation. */
bool
bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
@@ -3478,7 +3478,7 @@ insn_is_the_only_one_in_bb_p (insn_t insn)
}
#ifdef ENABLE_CHECKING
-/* Check that the region we're scheduling still has at most one
+/* Check that the region we're scheduling still has at most one
backedge. */
static void
verify_backedges (void)
@@ -3488,13 +3488,13 @@ verify_backedges (void)
int i, n = 0;
edge e;
edge_iterator ei;
-
+
for (i = 0; i < current_nr_blocks; i++)
FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs)
if (in_current_region_p (e->dest)
&& BLOCK_TO_BB (e->dest->index) < i)
n++;
-
+
gcc_assert (n <= 1);
}
}
@@ -3515,9 +3515,9 @@ maybe_tidy_empty_bb (basic_block bb)
/* Keep empty bb only if this block immediately precedes EXIT and
has incoming non-fallthrough edge. Otherwise remove it. */
if (!sel_bb_empty_p (bb)
- || (single_succ_p (bb)
+ || (single_succ_p (bb)
&& single_succ (bb) == EXIT_BLOCK_PTR
- && (!single_pred_p (bb)
+ && (!single_pred_p (bb)
|| !(single_pred_edge (bb)->flags & EDGE_FALLTHRU))))
return false;
@@ -3579,7 +3579,7 @@ maybe_tidy_empty_bb (basic_block bb)
return true;
}
-/* Tidy the control flow after we have removed original insn from
+/* Tidy the control flow after we have removed original insn from
XBB. Return true if we have removed some blocks. When FULL_TIDYING
is true, also try to optimize control flow on non-empty blocks. */
bool
@@ -3587,12 +3587,12 @@ tidy_control_flow (basic_block xbb, bool full_tidying)
{
bool changed = true;
insn_t first, last;
-
+
/* First check whether XBB is empty. */
changed = maybe_tidy_empty_bb (xbb);
if (changed || !full_tidying)
return changed;
-
+
/* Check if there is a unnecessary jump after insn left. */
if (jump_leads_only_to_bb_p (BB_END (xbb), xbb->next_bb)
&& INSN_SCHED_TIMES (BB_END (xbb)) == 0
@@ -3618,11 +3618,11 @@ tidy_control_flow (basic_block xbb, bool full_tidying)
while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
}
/* Check if there is an unnecessary jump in previous basic block leading
- to next basic block left after removing INSN from stream.
- If it is so, remove that jump and redirect edge to current
- basic block (where there was INSN before deletion). This way
- when NOP will be deleted several instructions later with its
- basic block we will not get a jump to next instruction, which
+ to next basic block left after removing INSN from stream.
+ If it is so, remove that jump and redirect edge to current
+ basic block (where there was INSN before deletion). This way
+ when NOP will be deleted several instructions later with its
+ basic block we will not get a jump to next instruction, which
can be harmful. */
if (first == last
&& !sel_bb_empty_p (xbb)
@@ -3656,8 +3656,8 @@ tidy_control_flow (basic_block xbb, bool full_tidying)
return changed;
}
-/* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
- do not delete insn's data, because it will be later re-emitted.
+/* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
+ do not delete insn's data, because it will be later re-emitted.
Return true if we have removed some blocks afterwards. */
bool
sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
@@ -3752,15 +3752,15 @@ get_seqno_of_a_pred (insn_t insn)
&& !in_current_region_p (single_pred (bb)))
{
/* We can have preds outside a region when splitting edges
- for pipelining of an outer loop. Use succ instead.
+ for pipelining of an outer loop. Use succ instead.
There should be only one of them. */
insn_t succ = NULL;
succ_iterator si;
bool first = true;
-
+
gcc_assert (flag_sel_sched_pipelining_outer_loops
&& current_loop_nest);
- FOR_EACH_SUCC_1 (succ, si, insn,
+ FOR_EACH_SUCC_1 (succ, si, insn,
SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
{
gcc_assert (first);
@@ -3779,7 +3779,7 @@ get_seqno_of_a_pred (insn_t insn)
gcc_assert (n == 1);
seqno = INSN_SEQNO (preds[0]);
-
+
free (preds);
}
}
@@ -3802,7 +3802,7 @@ get_seqno_by_preds (rtx insn)
return INSN_SEQNO (tmp);
else
tmp = PREV_INSN (tmp);
-
+
cfg_preds (bb, &preds, &n);
for (i = 0, seqno = -1; i < n; i++)
seqno = MAX (seqno, INSN_SEQNO (preds[i]));
@@ -3862,14 +3862,14 @@ static void
extend_insn_data (void)
{
int reserve;
-
+
sched_extend_target ();
sched_deps_init (false);
/* Extend data structures for insns from current region. */
- reserve = (sched_max_luid + 1
+ reserve = (sched_max_luid + 1
- VEC_length (sel_insn_data_def, s_i_d));
- if (reserve > 0
+ if (reserve > 0
&& ! VEC_space (sel_insn_data_def, s_i_d, reserve))
{
int size;
@@ -3878,7 +3878,7 @@ extend_insn_data (void)
size = sched_max_luid + 1024;
else
size = 3 * sched_max_luid / 2;
-
+
VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
}
@@ -3895,7 +3895,7 @@ finish_insns (void)
for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++)
{
sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i);
-
+
if (sid_entry->live)
return_regset_to_pool (sid_entry->live);
if (sid_entry->analyzed_deps)
@@ -3908,13 +3908,13 @@ finish_insns (void)
if (EXPR_VINSN (&sid_entry->expr))
{
clear_expr (&sid_entry->expr);
-
+
/* Also, clear CANT_MOVE bit here, because we really don't want it
to be passed to the next region. */
CANT_MOVE_BY_LUID (i) = 0;
}
}
-
+
VEC_free (sel_insn_data_def, heap, s_i_d);
}
@@ -3963,7 +3963,7 @@ init_insn_data (insn_t insn)
if (insn_init_create_new_vinsn_p)
change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
-
+
if (first_time_insn_init (insn))
init_first_time_insn_data (insn);
}
@@ -3974,13 +3974,13 @@ static void
init_simplejump_data (insn_t insn)
{
init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
- REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false,
+ REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false,
false, true);
INSN_SEQNO (insn) = get_seqno_of_a_pred (insn);
init_first_time_insn_data (insn);
}
-/* Perform deferred initialization of insns. This is used to process
+/* Perform deferred initialization of insns. This is used to process
a new jump that may be created by redirect_edge. */
void
sel_init_new_insn (insn_t insn, int flags)
@@ -3993,7 +3993,7 @@ sel_init_new_insn (insn_t insn, int flags)
extend_bb_info ();
create_initial_data_sets (BLOCK_FOR_INSN (insn));
}
-
+
if (flags & INSN_INIT_TODO_LUID)
sched_init_luids (NULL, NULL, NULL, insn);
@@ -4009,7 +4009,7 @@ sel_init_new_insn (insn_t insn, int flags)
extend_insn_data ();
init_simplejump_data (insn);
}
-
+
gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
== CONTAINING_RGN (BB_TO_BLOCK (0)));
}
@@ -4024,7 +4024,7 @@ init_lv_set (basic_block bb)
gcc_assert (!BB_LV_SET_VALID_P (bb));
BB_LV_SET (bb) = get_regset_from_pool ();
- COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
+ COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
BB_LV_SET_VALID_P (bb) = true;
}
@@ -4033,10 +4033,10 @@ static void
copy_lv_set_from (basic_block bb, basic_block from_bb)
{
gcc_assert (!BB_LV_SET_VALID_P (bb));
-
+
COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
BB_LV_SET_VALID_P (bb) = true;
-}
+}
/* Initialize lv set of all bb headers. */
void
@@ -4417,7 +4417,7 @@ alloc_succs_info (void)
if (succs_info_pool.top == succs_info_pool.max_top)
{
int i;
-
+
if (++succs_info_pool.max_top >= succs_info_pool.size)
gcc_unreachable ();
@@ -4436,23 +4436,23 @@ alloc_succs_info (void)
void
free_succs_info (struct succs_info * sinfo)
{
- gcc_assert (succs_info_pool.top >= 0
+ gcc_assert (succs_info_pool.top >= 0
&& &succs_info_pool.stack[succs_info_pool.top] == sinfo);
succs_info_pool.top--;
/* Clear stale info. */
- VEC_block_remove (rtx, sinfo->succs_ok,
+ VEC_block_remove (rtx, sinfo->succs_ok,
0, VEC_length (rtx, sinfo->succs_ok));
- VEC_block_remove (rtx, sinfo->succs_other,
+ VEC_block_remove (rtx, sinfo->succs_other,
0, VEC_length (rtx, sinfo->succs_other));
- VEC_block_remove (int, sinfo->probs_ok,
+ VEC_block_remove (int, sinfo->probs_ok,
0, VEC_length (int, sinfo->probs_ok));
sinfo->all_prob = 0;
sinfo->succs_ok_n = 0;
sinfo->all_succs_n = 0;
}
-/* Compute successor info for INSN. FLAGS are the flags passed
+/* Compute successor info for INSN. FLAGS are the flags passed
to the FOR_EACH_SUCC_1 iterator. */
struct succs_info *
compute_succs_info (insn_t insn, short flags)
@@ -4472,10 +4472,10 @@ compute_succs_info (insn_t insn, short flags)
{
VEC_safe_push (rtx, heap, sinfo->succs_ok, succ);
VEC_safe_push (int, heap, sinfo->probs_ok,
- /* FIXME: Improve calculation when skipping
+ /* FIXME: Improve calculation when skipping
inner loop to exits. */
- (si.bb_end
- ? si.e1->probability
+ (si.bb_end
+ ? si.e1->probability
: REG_BR_PROB_BASE));
sinfo->succs_ok_n++;
}
@@ -4494,7 +4494,7 @@ compute_succs_info (insn_t insn, short flags)
return sinfo;
}
-/* Return the predecessors of BB in PREDS and their number in N.
+/* Return the predecessors of BB in PREDS and their number in N.
Empty blocks are skipped. SIZE is used to allocate PREDS. */
static void
cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
@@ -4517,7 +4517,7 @@ cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
else
{
if (*n == *size)
- *preds = XRESIZEVEC (insn_t, *preds,
+ *preds = XRESIZEVEC (insn_t, *preds,
(*size = 2 * *size + 1));
(*preds)[(*n)++] = bb_end;
}
@@ -4526,8 +4526,8 @@ cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
gcc_assert (*n != 0);
}
-/* Find all predecessors of BB and record them in PREDS and their number
- in N. Empty blocks are skipped, and only normal (forward in-region)
+/* Find all predecessors of BB and record them in PREDS and their number
+ in N. Empty blocks are skipped, and only normal (forward in-region)
edges are processed. */
static void
cfg_preds (basic_block bb, insn_t **preds, int *n)
@@ -4565,7 +4565,7 @@ sel_num_cfg_preds_gt_1 (insn_t insn)
return false;
}
-/* Returns true when BB should be the end of an ebb. Adapted from the
+/* Returns true when BB should be the end of an ebb. Adapted from the
code in sched-ebb.c. */
bool
bb_ends_ebb_p (basic_block bb)
@@ -4573,7 +4573,7 @@ bb_ends_ebb_p (basic_block bb)
basic_block next_bb = bb_next_bb (bb);
edge e;
edge_iterator ei;
-
+
if (next_bb == EXIT_BLOCK_PTR
|| bitmap_bit_p (forced_ebb_heads, next_bb->index)
|| (LABEL_P (BB_HEAD (next_bb))
@@ -4607,7 +4607,7 @@ in_same_ebb_p (insn_t insn, insn_t succ)
{
if (ptr == BLOCK_FOR_INSN (succ))
return true;
-
+
if (bb_ends_ebb_p (ptr))
return false;
@@ -4629,7 +4629,7 @@ recompute_rev_top_order (void)
if (!rev_top_order_index || rev_top_order_index_len < last_basic_block)
{
- rev_top_order_index_len = last_basic_block;
+ rev_top_order_index_len = last_basic_block;
rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
rev_top_order_index_len);
}
@@ -4665,7 +4665,7 @@ clear_outdated_rtx_info (basic_block bb)
EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
/* We cannot use the changed caches, as previously we could ignore
- the LHS dependence due to enabled renaming and transform
+ the LHS dependence due to enabled renaming and transform
the expression, and currently we'll be unable to do this. */
htab_empty (INSN_TRANSFORMED_INSNS (insn));
}
@@ -4716,12 +4716,12 @@ alloc_sched_pools (void)
int succs_size;
succs_size = MAX_WS + 1;
- succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
+ succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
succs_info_pool.size = succs_size;
succs_info_pool.top = -1;
succs_info_pool.max_top = -1;
- sched_lists_pool = create_alloc_pool ("sel-sched-lists",
+ sched_lists_pool = create_alloc_pool ("sel-sched-lists",
sizeof (struct _list_node), 500);
}
@@ -4730,7 +4730,7 @@ void
free_sched_pools (void)
{
int i;
-
+
free_alloc_pool (sched_lists_pool);
gcc_assert (succs_info_pool.top == -1);
for (i = 0; i < succs_info_pool.max_top; i++)
@@ -4743,7 +4743,7 @@ free_sched_pools (void)
}
-/* Returns a position in RGN where BB can be inserted retaining
+/* Returns a position in RGN where BB can be inserted retaining
topological order. */
static int
find_place_to_insert_bb (basic_block bb, int rgn)
@@ -4751,7 +4751,7 @@ find_place_to_insert_bb (basic_block bb, int rgn)
bool has_preds_outside_rgn = false;
edge e;
edge_iterator ei;
-
+
/* Find whether we have preds outside the region. */
FOR_EACH_EDGE (e, ei, bb->preds)
if (!in_current_region_p (e->src))
@@ -4759,7 +4759,7 @@ find_place_to_insert_bb (basic_block bb, int rgn)
has_preds_outside_rgn = true;
break;
}
-
+
/* Recompute the top order -- needed when we have > 1 pred
and in case we don't have preds outside. */
if (flag_sel_sched_pipelining_outer_loops
@@ -4771,11 +4771,11 @@ find_place_to_insert_bb (basic_block bb, int rgn)
for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
{
cur_bbi = BB_TO_BLOCK (i);
- if (rev_top_order_index[bbi]
+ if (rev_top_order_index[bbi]
< rev_top_order_index[cur_bbi])
break;
}
-
+
/* We skipped the right block, so we increase i. We accomodate
it for increasing by step later, so we decrease i. */
return (i + 1) - 1;
@@ -4798,9 +4798,9 @@ find_place_to_insert_bb (basic_block bb, int rgn)
if (EDGE_COUNT (bb->succs) > 0)
{
int pred_bbi;
-
+
gcc_assert (EDGE_COUNT (bb->preds) == 1);
-
+
pred_bbi = EDGE_PRED (bb, 0)->src->index;
return BLOCK_TO_BB (pred_bbi);
}
@@ -4820,14 +4820,14 @@ delete_and_free_basic_block (basic_block bb)
bitmap_clear_bit (blocks_to_reschedule, bb->index);
- /* Can't assert av_set properties because we use sel_aremove_bb
- when removing loop preheader from the region. At the point of
+ /* Can't assert av_set properties because we use sel_aremove_bb
+ when removing loop preheader from the region. At the point of
removing the preheader we already have deallocated sel_region_bb_info. */
gcc_assert (BB_LV_SET (bb) == NULL
&& !BB_LV_SET_VALID_P (bb)
&& BB_AV_LEVEL (bb) == 0
&& BB_AV_SET (bb) == NULL);
-
+
delete_basic_block (bb);
}
@@ -4844,13 +4844,13 @@ add_block_to_current_region (basic_block bb)
gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
&& ebb_head[bbi] == pos);
-
+
/* Make a place for the new block. */
extend_regions ();
for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
BLOCK_TO_BB (rgn_bb_table[i])++;
-
+
memmove (rgn_bb_table + pos + 1,
rgn_bb_table + pos,
(RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
@@ -4861,7 +4861,7 @@ add_block_to_current_region (basic_block bb)
CONTAINING_RGN (bb->index) = rgn;
RGN_NR_BLOCKS (rgn)++;
-
+
for (i = rgn + 1; i <= nr_regions; i++)
RGN_BLOCKS (i)++;
}
@@ -4891,7 +4891,7 @@ remove_bb_from_region (basic_block bb)
RGN_BLOCKS (i)--;
}
-/* Add BB to the current region and update all data. If BB is NULL, add all
+/* Add BB to the current region and update all data. If BB is NULL, add all
blocks from last_added_blocks vector. */
static void
sel_add_bb (basic_block bb)
@@ -4901,23 +4901,23 @@ sel_add_bb (basic_block bb)
sched_init_bbs ();
sel_init_bbs (last_added_blocks, NULL);
- /* When bb is passed explicitly, the vector should contain
+ /* When bb is passed explicitly, the vector should contain
the only element that equals to bb; otherwise, the vector
should not be NULL. */
gcc_assert (last_added_blocks != NULL);
-
+
if (bb != NULL)
{
gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
- && VEC_index (basic_block,
- last_added_blocks, 0) == bb);
+ && VEC_index (basic_block,
+ last_added_blocks, 0) == bb);
add_block_to_current_region (bb);
/* We associate creating/deleting data sets with the first insn
appearing / disappearing in the bb. */
if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
create_initial_data_sets (bb);
-
+
VEC_free (basic_block, heap, last_added_blocks);
}
else
@@ -4926,14 +4926,14 @@ sel_add_bb (basic_block bb)
int i;
basic_block temp_bb = NULL;
- for (i = 0;
+ for (i = 0;
VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
{
add_block_to_current_region (bb);
temp_bb = bb;
}
- /* We need to fetch at least one bb so we know the region
+ /* We need to fetch at least one bb so we know the region
to update. */
gcc_assert (temp_bb != NULL);
bb = temp_bb;
@@ -4944,17 +4944,17 @@ sel_add_bb (basic_block bb)
rgn_setup_region (CONTAINING_RGN (bb->index));
}
-/* Remove BB from the current region and update all data.
+/* Remove BB from the current region and update all data.
If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
static void
sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
{
gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
-
+
remove_bb_from_region (bb);
return_bb_to_pool (bb);
bitmap_clear_bit (blocks_to_reschedule, bb->index);
-
+
if (remove_from_cfg_p)
delete_and_free_basic_block (bb);
@@ -4967,15 +4967,15 @@ move_bb_info (basic_block merge_bb, basic_block empty_bb)
{
gcc_assert (in_current_region_p (merge_bb));
- concat_note_lists (BB_NOTE_LIST (empty_bb),
+ concat_note_lists (BB_NOTE_LIST (empty_bb),
&BB_NOTE_LIST (merge_bb));
BB_NOTE_LIST (empty_bb) = NULL_RTX;
}
-/* Remove an empty basic block EMPTY_BB. When MERGE_UP_P is true, we put
- EMPTY_BB's note lists into its predecessor instead of putting them
- into the successor. When REMOVE_FROM_CFG_P is true, also remove
+/* Remove an empty basic block EMPTY_BB. When MERGE_UP_P is true, we put
+ EMPTY_BB's note lists into its predecessor instead of putting them
+ into the successor. When REMOVE_FROM_CFG_P is true, also remove
the empty block. */
void
sel_remove_empty_bb (basic_block empty_bb, bool merge_up_p,
@@ -4998,7 +4998,7 @@ sel_remove_empty_bb (basic_block empty_bb, bool merge_up_p,
merge_bb = bb_next_bb (empty_bb);
- /* Redirect incoming edges (except fallthrough one) of EMPTY_BB to its
+ /* Redirect incoming edges (except fallthrough one) of EMPTY_BB to its
successor block. */
for (ei = ei_start (empty_bb->preds);
(e = ei_safe_edge (ei)); )
@@ -5084,15 +5084,15 @@ remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
sel_remove_bb (empty_bb, remove_from_cfg_p);
}
-/* An implementation of create_basic_block hook, which additionally updates
+/* An implementation of create_basic_block hook, which additionally updates
per-bb data structures. */
static basic_block
sel_create_basic_block (void *headp, void *endp, basic_block after)
{
basic_block new_bb;
insn_t new_bb_note;
-
- gcc_assert (flag_sel_sched_pipelining_outer_loops
+
+ gcc_assert (flag_sel_sched_pipelining_outer_loops
|| last_added_blocks == NULL);
new_bb_note = get_bb_note_from_pool ();
@@ -5142,7 +5142,7 @@ change_loops_latches (basic_block from, basic_block to)
}
}
-/* Splits BB on two basic blocks, adding it to the region and extending
+/* Splits BB on two basic blocks, adding it to the region and extending
per-bb data structures. Returns the newly created bb. */
static basic_block
sel_split_block (basic_block bb, rtx after)
@@ -5154,7 +5154,7 @@ sel_split_block (basic_block bb, rtx after)
sel_add_bb (new_bb);
/* This should be called after sel_add_bb, because this uses
- CONTAINING_RGN for the new block, which is not yet initialized.
+ CONTAINING_RGN for the new block, which is not yet initialized.
FIXME: this function may be a no-op now. */
change_loops_latches (bb, new_bb);
@@ -5194,7 +5194,7 @@ check_for_new_jump (basic_block bb, int prev_max_uid)
return NULL;
}
-/* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
+/* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
New means having UID at least equal to PREV_MAX_UID. */
static rtx
find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
@@ -5204,7 +5204,7 @@ find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
/* Return immediately if no new insns were emitted. */
if (get_max_uid () == prev_max_uid)
return NULL;
-
+
/* Now check both blocks for new jumps. It will ever be only one. */
if ((jump = check_for_new_jump (from, prev_max_uid)))
return jump;
@@ -5228,15 +5228,15 @@ sel_split_edge (edge e)
prev_max_uid = get_max_uid ();
new_bb = split_edge (e);
- if (flag_sel_sched_pipelining_outer_loops
+ if (flag_sel_sched_pipelining_outer_loops
&& current_loop_nest)
{
int i;
basic_block bb;
- /* Some of the basic blocks might not have been added to the loop.
+ /* Some of the basic blocks might not have been added to the loop.
Add them here, until this is fixed in force_fallthru. */
- for (i = 0;
+ for (i = 0;
VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
if (!bb->loop_father)
{
@@ -5305,9 +5305,9 @@ sel_create_recovery_block (insn_t orig_insn)
sched_create_recovery_edges (first_bb, recovery_block, second_bb);
if (current_loops != NULL)
add_bb_to_loop (recovery_block, first_bb->loop_father);
-
+
sel_add_bb (recovery_block);
-
+
jump = BB_END (recovery_block);
gcc_assert (sel_bb_head (recovery_block) == jump);
sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
@@ -5334,9 +5334,9 @@ sel_redirect_edge_and_branch_force (edge e, basic_block to)
basic_block jump_bb, src;
int prev_max_uid;
rtx jump;
-
+
gcc_assert (!sel_bb_empty_p (e->src));
-
+
src = e->src;
prev_max_uid = get_max_uid ();
jump_bb = redirect_edge_and_branch_force (e, to);
@@ -5349,7 +5349,7 @@ sel_redirect_edge_and_branch_force (edge e, basic_block to)
if (current_loop_nest
&& pipelining_p)
gcc_assert (loop_latch_edge (current_loop_nest));
-
+
jump = find_new_jump (src, jump_bb, prev_max_uid);
if (jump)
sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
@@ -5495,7 +5495,7 @@ change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
/* Helpers for global init. */
/* This structure is used to be able to call existing bundling mechanism
and calculate insn priorities. */
-static struct haifa_sched_info sched_sel_haifa_sched_info =
+static struct haifa_sched_info sched_sel_haifa_sched_info =
{
NULL, /* init_ready_list */
NULL, /* can_schedule_ready_p */
@@ -5517,7 +5517,7 @@ static struct haifa_sched_info sched_sel_haifa_sched_info =
};
/* Setup special insns used in the scheduler. */
-void
+void
setup_nop_and_exit_insns (void)
{
gcc_assert (nop_pattern == NULL_RTX
@@ -5561,9 +5561,9 @@ free_nop_vinsn (void)
void
sel_set_sched_flags (void)
{
- /* ??? This means that set_sched_flags were called, and we decided to
+ /* ??? This means that set_sched_flags were called, and we decided to
support speculation. However, set_sched_flags also modifies flags
- on current_sched_info, doing this only at global init. And we
+ on current_sched_info, doing this only at global init. And we
sometimes change c_s_i later. So put the correct flags again. */
if (spec_info && targetm.sched.set_sched_flags)
targetm.sched.set_sched_flags (spec_info);
@@ -5588,9 +5588,9 @@ sel_setup_sched_infos (void)
common_sched_info = &sel_common_sched_info;
current_sched_info = &sched_sel_haifa_sched_info;
- current_sched_info->sched_max_insns_priority =
+ current_sched_info->sched_max_insns_priority =
get_rgn_sched_max_insns_priority ();
-
+
sel_set_sched_flags ();
}
@@ -5624,14 +5624,14 @@ sel_create_new_region (void)
/* FIXME: This will work only when EBBs are not created. */
if (new_rgn_number != 0)
- RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
+ RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
RGN_NR_BLOCKS (new_rgn_number - 1);
else
RGN_BLOCKS (new_rgn_number) = 0;
/* Set the blocks of the next region so the other functions may
calculate the number of blocks in the region. */
- RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
+ RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
RGN_NR_BLOCKS (new_rgn_number);
nr_regions++;
@@ -5647,8 +5647,8 @@ bb_top_order_comparator (const void *x, const void *y)
basic_block bb1 = *(const basic_block *) x;
basic_block bb2 = *(const basic_block *) y;
- gcc_assert (bb1 == bb2
- || rev_top_order_index[bb1->index]
+ gcc_assert (bb1 == bb2
+ || rev_top_order_index[bb1->index]
!= rev_top_order_index[bb2->index]);
/* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
@@ -5659,7 +5659,7 @@ bb_top_order_comparator (const void *x, const void *y)
return 1;
}
-/* Create a region for LOOP and return its number. If we don't want
+/* Create a region for LOOP and return its number. If we don't want
to pipeline LOOP, return -1. */
static int
make_region_from_loop (struct loop *loop)
@@ -5673,10 +5673,10 @@ make_region_from_loop (struct loop *loop)
basic_block *loop_blocks;
basic_block preheader_block;
- if (loop->num_nodes
+ if (loop->num_nodes
> (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
return -1;
-
+
/* Don't pipeline loops whose latch belongs to some of its inner loops. */
for (inner = loop->inner; inner; inner = inner->inner)
if (flow_bb_inside_loop_p (inner, loop->latch))
@@ -5708,14 +5708,14 @@ make_region_from_loop (struct loop *loop)
{
/* Add only those blocks that haven't been scheduled in the inner loop.
The exception is the basic blocks with bookkeeping code - they should
- be added to the region (and they actually don't belong to the loop
+ be added to the region (and they actually don't belong to the loop
body, but to the region containing that loop body). */
gcc_assert (new_rgn_number >= 0);
if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index))
{
- sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
+ sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
new_rgn_number);
SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index);
}
@@ -5753,11 +5753,11 @@ make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks)
/* Create region(s) from loop nest LOOP, such that inner loops will be
- pipelined before outer loops. Returns true when a region for LOOP
+ pipelined before outer loops. Returns true when a region for LOOP
is created. */
static bool
make_regions_from_loop_nest (struct loop *loop)
-{
+{
struct loop *cur_loop;
int rgn_number;
@@ -5813,10 +5813,10 @@ considered_for_pipelining_p (struct loop *loop)
if (loop_depth (loop) == 0)
return false;
- /* Now, the loop could be too large or irreducible. Check whether its
- region is in LOOP_NESTS.
- We determine the region number of LOOP as the region number of its
- latch. We can't use header here, because this header could be
+ /* Now, the loop could be too large or irreducible. Check whether its
+ region is in LOOP_NESTS.
+ We determine the region number of LOOP as the region number of its
+ latch. We can't use header here, because this header could be
just removed preheader and it will give us the wrong region number.
Latch can't be used because it could be in the inner loop too. */
if (LOOP_MARKED_FOR_PIPELINING_P (loop) && pipelining_p)
@@ -5826,11 +5826,11 @@ considered_for_pipelining_p (struct loop *loop)
gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests));
return true;
}
-
+
return false;
}
-/* Makes regions from the rest of the blocks, after loops are chosen
+/* Makes regions from the rest of the blocks, after loops are chosen
for pipelining. */
static void
make_regions_from_the_rest (void)
@@ -5849,7 +5849,7 @@ make_regions_from_the_rest (void)
cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
new_regions = nr_regions;
- /* Make regions from all the rest basic blocks - those that don't belong to
+ /* Make regions from all the rest basic blocks - those that don't belong to
any loop or belong to irreducible loops. Prepare the data structures
for extend_rgns. */
@@ -5872,10 +5872,10 @@ make_regions_from_the_rest (void)
loop_hdr[bb->index] = bb->loop_father->num;
}
- /* For each basic block degree is calculated as the number of incoming
+ /* For each basic block degree is calculated as the number of incoming
edges, that are going out of bbs that are not yet scheduled.
The basic blocks that are scheduled have degree value of zero. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB (bb)
{
degree[bb->index] = 0;
@@ -5927,9 +5927,9 @@ void sel_finish_pipelining (void)
rev_top_order_index = NULL;
}
-/* This function replaces the find_rgns when
+/* This function replaces the find_rgns when
FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
-void
+void
sel_find_rgns (void)
{
sel_init_pipelining ();
@@ -5947,7 +5947,7 @@ sel_find_rgns (void)
}
/* Make regions from all the rest basic blocks and schedule them.
- These blocks include blocks that don't belong to any loop or belong
+ These blocks include blocks that don't belong to any loop or belong
to irreducible loops. */
make_regions_from_the_rest ();
@@ -5956,15 +5956,15 @@ sel_find_rgns (void)
bbs_in_loop_rgns = NULL;
}
-/* Adds the preheader blocks from previous loop to current region taking
- it from LOOP_PREHEADER_BLOCKS (current_loop_nest).
+/* Adds the preheader blocks from previous loop to current region taking
+ it from LOOP_PREHEADER_BLOCKS (current_loop_nest).
This function is only used with -fsel-sched-pipelining-outer-loops. */
void
sel_add_loop_preheaders (void)
{
int i;
basic_block bb;
- VEC(basic_block, heap) *preheader_blocks
+ VEC(basic_block, heap) *preheader_blocks
= LOOP_PREHEADER_BLOCKS (current_loop_nest);
for (i = 0;
@@ -5975,9 +5975,9 @@ sel_add_loop_preheaders (void)
VEC_free (basic_block, heap, preheader_blocks);
}
-/* While pipelining outer loops, returns TRUE if BB is a loop preheader.
- Please note that the function should also work when pipelining_p is
- false, because it is used when deciding whether we should or should
+/* While pipelining outer loops, returns TRUE if BB is a loop preheader.
+ Please note that the function should also work when pipelining_p is
+ false, because it is used when deciding whether we should or should
not reschedule pipelined code. */
bool
sel_is_loop_preheader_p (basic_block bb)
@@ -5997,7 +5997,7 @@ sel_is_loop_preheader_p (basic_block bb)
Check that the above code is equivalent to what we did before. */
if (in_current_region_p (current_loop_nest->header))
- gcc_assert (!(BLOCK_TO_BB (bb->index)
+ gcc_assert (!(BLOCK_TO_BB (bb->index)
< BLOCK_TO_BB (current_loop_nest->header->index)));
/* Support the situation when the latch block of outer loop
@@ -6018,13 +6018,13 @@ jump_leads_only_to_bb_p (insn_t jump, basic_block dest_bb)
{
basic_block jump_bb = BLOCK_FOR_INSN (jump);
- /* It is not jump, jump with side-effects or jump can lead to several
+ /* It is not jump, jump with side-effects or jump can lead to several
basic blocks. */
if (!onlyjump_p (jump)
|| !any_uncondjump_p (jump))
return false;
- /* Several outgoing edges, abnormal edge or destination of jump is
+ /* Several outgoing edges, abnormal edge or destination of jump is
not DEST_BB. */
if (EDGE_COUNT (jump_bb->succs) != 1
|| EDGE_SUCC (jump_bb, 0)->flags & EDGE_ABNORMAL
@@ -6036,7 +6036,7 @@ jump_leads_only_to_bb_p (insn_t jump, basic_block dest_bb)
}
/* Removes the loop preheader from the current region and saves it in
- PREHEADER_BLOCKS of the father loop, so they will be added later to
+ PREHEADER_BLOCKS of the father loop, so they will be added later to
region that represents an outer loop. */
static void
sel_remove_loop_preheader (void)
@@ -6045,7 +6045,7 @@ sel_remove_loop_preheader (void)
int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
basic_block bb;
bool all_empty_p = true;
- VEC(basic_block, heap) *preheader_blocks
+ VEC(basic_block, heap) *preheader_blocks
= LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
gcc_assert (current_loop_nest);
@@ -6056,7 +6056,7 @@ sel_remove_loop_preheader (void)
{
bb = BASIC_BLOCK (BB_TO_BLOCK (i));
- /* If the basic block belongs to region, but doesn't belong to
+ /* If the basic block belongs to region, but doesn't belong to
corresponding loop, then it should be a preheader. */
if (sel_is_loop_preheader_p (bb))
{
@@ -6065,13 +6065,13 @@ sel_remove_loop_preheader (void)
all_empty_p = false;
}
}
-
+
/* Remove these blocks only after iterating over the whole region. */
for (i = VEC_length (basic_block, preheader_blocks) - 1;
i >= old_len;
i--)
{
- bb = VEC_index (basic_block, preheader_blocks, i);
+ bb = VEC_index (basic_block, preheader_blocks, i);
sel_remove_bb (bb, false);
}
@@ -6101,9 +6101,9 @@ sel_remove_loop_preheader (void)
gcc_assert (BB_NOTE_LIST (bb) == NULL);
delete_and_free_basic_block (bb);
- /* Check if after deleting preheader there is a nonconditional
- jump in PREV_BB that leads to the next basic block NEXT_BB.
- If it is so - delete this jump and clear data sets of its
+ /* Check if after deleting preheader there is a nonconditional
+ jump in PREV_BB that leads to the next basic block NEXT_BB.
+ If it is so - delete this jump and clear data sets of its
basic block if it becomes empty. */
if (next_bb->prev_bb == prev_bb
&& prev_bb != ENTRY_BLOCK_PTR
diff --git a/gcc/sel-sched-ir.h b/gcc/sel-sched-ir.h
index 9563d2a0a4f..1950a65e77f 100644
--- a/gcc/sel-sched-ir.h
+++ b/gcc/sel-sched-ir.h
@@ -68,7 +68,7 @@ typedef _xlist_t ilist_t;
#define ILIST_INSN(L) (_XLIST_X (L))
#define ILIST_NEXT(L) (_XLIST_NEXT (L))
-/* This lists possible transformations that done locally, i.e. in
+/* This lists possible transformations that done locally, i.e. in
moveup_expr. */
enum local_trans_type
{
@@ -76,7 +76,7 @@ enum local_trans_type
TRANS_SPECULATION
};
-/* This struct is used to record the history of expression's
+/* This struct is used to record the history of expression's
transformations. */
struct expr_history_def_1
{
@@ -114,8 +114,8 @@ struct _expr
control on scheduling. */
int spec;
- /* Degree of speculativeness measured as probability of executing
- instruction's original basic block given relative to
+ /* Degree of speculativeness measured as probability of executing
+ instruction's original basic block given relative to
the current scheduling point. */
int usefulness;
@@ -128,7 +128,7 @@ struct _expr
/* Number of times the insn was scheduled. */
int sched_times;
- /* A basic block index this was originated from. Zero when there is
+ /* A basic block index this was originated from. Zero when there is
more than one originator. */
int orig_bb_index;
@@ -140,23 +140,23 @@ struct _expr
(used only during move_op ()). */
ds_t spec_to_check_ds;
- /* Cycle on which original insn was scheduled. Zero when it has not yet
+ /* Cycle on which original insn was scheduled. Zero when it has not yet
been scheduled or more than one originator. */
int orig_sched_cycle;
/* This vector contains the history of insn's transformations. */
VEC(expr_history_def, heap) *history_of_changes;
- /* True (1) when original target (register or memory) of this instruction
+ /* True (1) when original target (register or memory) of this instruction
is available for scheduling, false otherwise. -1 means we're not sure;
please run find_used_regs to clarify. */
signed char target_available;
- /* True when this expression needs a speculation check to be scheduled.
+ /* True when this expression needs a speculation check to be scheduled.
This is used during find_used_regs. */
BOOL_BITFIELD needs_spec_check_p : 1;
- /* True when the expression was substituted. Used for statistical
+ /* True when the expression was substituted. Used for statistical
purposes. */
BOOL_BITFIELD was_substituted : 1;
@@ -195,7 +195,7 @@ typedef expr_def *expr_t;
#define EXPR_CANT_MOVE(EXPR) ((EXPR)->cant_move)
#define EXPR_WAS_CHANGED(EXPR) (VEC_length (expr_history_def, \
- EXPR_HISTORY_OF_CHANGES (EXPR)) > 0)
+ EXPR_HISTORY_OF_CHANGES (EXPR)) > 0)
/* Insn definition for list of original insns in find_used_regs. */
struct _def
@@ -204,7 +204,7 @@ struct _def
/* FIXME: Get rid of CROSSES_CALL in each def, since if we're moving up
rhs from two different places, but only one of the code motion paths
- crosses a call, we can't use any of the call_used_regs, no matter which
+ crosses a call, we can't use any of the call_used_regs, no matter which
path or whether all paths crosses a call. Thus we should move CROSSES_CALL
to static params. */
bool crosses_call;
@@ -232,7 +232,7 @@ struct _bnd
/* This set moved to the fence. */
av_set_t av1;
-
+
/* Deps context at this boundary. As long as we have one boundary per fence,
this is just a pointer to the same deps context as in the corresponding
fence. */
@@ -286,7 +286,7 @@ struct _fence
/* A vector of insns that are scheduled but not yet completed. */
VEC (rtx,gc) *executing_insns;
- /* A vector indexed by UIDs that caches the earliest cycle on which
+ /* A vector indexed by UIDs that caches the earliest cycle on which
an insn can be scheduled on this fence. */
int *ready_ticks;
@@ -464,7 +464,7 @@ _list_iter_remove_nofree (_list_iterator *ip)
#define _FOR_EACH_1(TYPE, ELEM, I, LP) \
for (_list_iter_start (&(I), (LP), true); \
_list_iter_cond_##TYPE (*(I).lp, &(ELEM)); \
- _list_iter_next (&(I)))
+ _list_iter_next (&(I)))
/* _xlist_t functions. */
@@ -666,7 +666,7 @@ struct vinsn_def
#define VINSN_MAY_TRAP_P(VI) ((VI)->may_trap_p)
-/* An entry of the hashtable describing transformations happened when
+/* An entry of the hashtable describing transformations happened when
moving up through an insn. */
struct transformed_insns
{
@@ -709,18 +709,18 @@ struct _sel_insn_data
/* An INSN_UID bit is set when deps analysis result is already known. */
bitmap analyzed_deps;
- /* An INSN_UID bit is set when a hard dep was found, not set when
+ /* An INSN_UID bit is set when a hard dep was found, not set when
no dependence is found. This is meaningful only when the analyzed_deps
bitmap has its bit set. */
bitmap found_deps;
- /* An INSN_UID bit is set when this is a bookkeeping insn generated from
+ /* An INSN_UID bit is set when this is a bookkeeping insn generated from
a parent with this uid. */
bitmap originators;
/* A hashtable caching the result of insn transformations through this one. */
htab_t transformed_insns;
-
+
/* A context incapsulating this insn. */
struct deps deps_context;
@@ -767,8 +767,8 @@ extern sel_insn_data_def insn_sid (insn_t);
#define INSN_ASM_P(INSN) (SID (INSN)->asm_p)
#define INSN_SCHED_NEXT(INSN) (SID (INSN)->sched_next)
#define INSN_ANALYZED_DEPS(INSN) (SID (INSN)->analyzed_deps)
-#define INSN_FOUND_DEPS(INSN) (SID (INSN)->found_deps)
-#define INSN_DEPS_CONTEXT(INSN) (SID (INSN)->deps_context)
+#define INSN_FOUND_DEPS(INSN) (SID (INSN)->found_deps)
+#define INSN_DEPS_CONTEXT(INSN) (SID (INSN)->deps_context)
#define INSN_ORIGINATORS(INSN) (SID (INSN)->originators)
#define INSN_ORIGINATORS_BY_UID(UID) (SID_BY_UID (UID)->originators)
#define INSN_TRANSFORMED_INSNS(INSN) (SID (INSN)->transformed_insns)
@@ -898,7 +898,7 @@ extern void sel_finish_global_bb_info (void);
#define BB_LV_SET_VALID_P(BB) (SEL_GLOBAL_BB_INFO (BB)->lv_set_valid_p)
/* Per basic block data for the region. */
-typedef struct
+typedef struct
{
/* This insn stream is constructed in such a way that it should be
traversed by PREV_INSN field - (*not* NEXT_INSN). */
@@ -947,11 +947,11 @@ extern sbitmap bbs_pipelined;
extern bool enable_moveup_set_path_p;
extern bool pipelining_p;
extern bool bookkeeping_p;
-extern int max_insns_to_rename;
+extern int max_insns_to_rename;
extern bool preheader_removed;
/* Software lookahead window size.
- According to the results in Nakatani and Ebcioglu [1993], window size of 16
+ According to the results in Nakatani and Ebcioglu [1993], window size of 16
is enough to extract most ILP in integer code. */
#define MAX_WS (PARAM_VALUE (PARAM_SELSCHED_MAX_LOOKAHEAD))
@@ -969,7 +969,7 @@ typedef struct
/* The previous edge saved after skipping empty blocks. */
edge e2;
-
+
/* Edge iterator used when there are successors in other basic blocks. */
edge_iterator ei;
@@ -979,8 +979,8 @@ typedef struct
/* Flags that are passed to the iterator. We return only successors
that comply to these flags. */
short flags;
-
- /* When flags include SUCCS_ALL, this will be set to the exact type
+
+ /* When flags include SUCCS_ALL, this will be set to the exact type
of the sucessor we're traversing now. */
short current_flags;
@@ -998,7 +998,7 @@ struct succs_info
/* Successors that correspond to the flags. */
insn_vec_t succs_ok;
- /* Their probabilities. As of now, we don't need this for other
+ /* Their probabilities. As of now, we don't need this for other
successors. */
VEC(int,heap) *probs_ok;
@@ -1027,7 +1027,7 @@ extern bool in_current_region_p (basic_block);
static inline bool
inner_loop_header_p (basic_block bb)
{
- struct loop *inner_loop;
+ struct loop *inner_loop;
if (!current_loop_nest)
return false;
@@ -1048,7 +1048,7 @@ inner_loop_header_p (basic_block bb)
return true;
}
- return false;
+ return false;
}
/* Return exit edges of LOOP, filtering out edges with the same dest bb. */
@@ -1066,7 +1066,7 @@ get_loop_exit_edges_unique_dests (const struct loop *loop)
int i;
edge e;
bool was_dest = false;
-
+
for (i = 0; VEC_iterate (edge, edges, i, e); i++)
if (e->dest == exit->e->dest)
{
@@ -1101,10 +1101,10 @@ sel_bb_empty_or_nop_p (basic_block bb)
return true;
}
-/* Collect all loop exits recursively, skipping empty BBs between them.
+/* Collect all loop exits recursively, skipping empty BBs between them.
E.g. if BB is a loop header which has several loop exits,
traverse all of them and if any of them turns out to be another loop header
- (after skipping empty BBs), add its loop exits to the resulting vector
+ (after skipping empty BBs), add its loop exits to the resulting vector
as well. */
static inline VEC(edge, heap) *
get_all_loop_exits (basic_block bb)
@@ -1129,12 +1129,12 @@ get_all_loop_exits (basic_block bb)
struct loop *pred_loop = NULL;
int i;
edge e;
-
+
for (this_loop = bb->loop_father;
this_loop && this_loop != current_loop_nest;
this_loop = loop_outer (this_loop))
pred_loop = this_loop;
-
+
this_loop = pred_loop;
gcc_assert (this_loop != NULL);
@@ -1145,17 +1145,17 @@ get_all_loop_exits (basic_block bb)
if (in_current_region_p (e->dest))
{
VEC(edge, heap) *next_exits = get_all_loop_exits (e->dest);
-
+
if (next_exits)
{
int j;
edge ne;
-
+
/* Add all loop exits for the current edge into the
resulting vector. */
for (j = 0; VEC_iterate (edge, next_exits, j, ne); j++)
VEC_safe_push (edge, heap, exits, ne);
-
+
/* Remove the original edge. */
VEC_ordered_remove (edge, exits, i);
@@ -1181,7 +1181,7 @@ get_all_loop_exits (basic_block bb)
/* Include successors that are outside of the current region. */
#define SUCCS_OUT (4)
-/* When pipelining of the outer loops is enabled, skip innermost loops
+/* When pipelining of the outer loops is enabled, skip innermost loops
to their exits. */
#define SUCCS_SKIP_TO_LOOP_EXITS (8)
@@ -1244,7 +1244,7 @@ _succ_iter_cond (succ_iterator *ip, rtx *succp, rtx insn,
}
else
{
- while (1)
+ while (1)
{
edge e_tmp = NULL;
@@ -1253,12 +1253,12 @@ _succ_iter_cond (succ_iterator *ip, rtx *succp, rtx insn,
{
do
{
- VEC_iterate (edge, ip->loop_exits,
+ VEC_iterate (edge, ip->loop_exits,
ip->current_exit, e_tmp);
ip->current_exit++;
}
while (e_tmp && !check (e_tmp, ip));
-
+
if (!e_tmp)
VEC_free (edge, heap, ip->loop_exits);
}
@@ -1278,8 +1278,8 @@ _succ_iter_cond (succ_iterator *ip, rtx *succp, rtx insn,
/* Consider bb as a possible loop header. */
if ((ip->flags & SUCCS_SKIP_TO_LOOP_EXITS)
&& flag_sel_sched_pipelining_outer_loops
- && (!in_current_region_p (bb)
- || BLOCK_TO_BB (ip->bb->index)
+ && (!in_current_region_p (bb)
+ || BLOCK_TO_BB (ip->bb->index)
< BLOCK_TO_BB (bb->index)))
{
/* Get all loop exits recursively. */
@@ -1288,7 +1288,7 @@ _succ_iter_cond (succ_iterator *ip, rtx *succp, rtx insn,
if (ip->loop_exits)
{
ip->current_exit = 0;
- /* Move the iterator now, because we won't do
+ /* Move the iterator now, because we won't do
succ_iter_next until loop exits will end. */
ei_next (&(ip->ei));
break;
@@ -1390,8 +1390,8 @@ _eligible_successor_edge_p (edge e1, succ_iterator *ip)
bb = nbb;
continue;
}
-
- if (!in_current_region_p (bb)
+
+ if (!in_current_region_p (bb)
&& !(flags & SUCCS_OUT))
return false;
@@ -1401,22 +1401,22 @@ _eligible_successor_edge_p (edge e1, succ_iterator *ip)
e2 = EDGE_SUCC (bb, 0);
bb = e2->dest;
}
-
+
/* Save the second edge for later checks. */
ip->e2 = e2;
if (in_current_region_p (bb))
{
- /* BLOCK_TO_BB sets topological order of the region here.
- It is important to use real predecessor here, which is ip->bb,
- as we may well have e1->src outside current region,
+ /* BLOCK_TO_BB sets topological order of the region here.
+ It is important to use real predecessor here, which is ip->bb,
+ as we may well have e1->src outside current region,
when skipping to loop exits. */
bool succeeds_in_top_order = (BLOCK_TO_BB (ip->bb->index)
< BLOCK_TO_BB (bb->index));
/* This is true for the all cases except the last one. */
ip->current_flags = SUCCS_NORMAL;
-
+
/* We are advancing forward in the region, as usual. */
if (succeeds_in_top_order)
{
@@ -1426,9 +1426,9 @@ _eligible_successor_edge_p (edge e1, succ_iterator *ip)
return !!(flags & SUCCS_NORMAL);
}
- /* This is a back edge. During pipelining we ignore back edges,
+ /* This is a back edge. During pipelining we ignore back edges,
but only when it leads to the same loop. It can lead to the header
- of the outer loop, which will also be the preheader of
+ of the outer loop, which will also be the preheader of
the current loop. */
if (pipelining_p
&& e1->src->loop_father == bb->loop_father)
@@ -1463,12 +1463,12 @@ bb_next_bb (basic_block bb)
case 0:
return bb->next_bb;
- case 1:
+ case 1:
return single_succ (bb);
case 2:
return FALLTHRU_EDGE (bb)->dest;
-
+
default:
return bb->next_bb;
}
@@ -1536,11 +1536,11 @@ extern void merge_expr_data (expr_t, expr_t, insn_t);
extern void merge_expr (expr_t, expr_t, insn_t);
extern void clear_expr (expr_t);
extern unsigned expr_dest_regno (expr_t);
-extern rtx expr_dest_reg (expr_t);
-extern int find_in_history_vect (VEC(expr_history_def, heap) *,
+extern rtx expr_dest_reg (expr_t);
+extern int find_in_history_vect (VEC(expr_history_def, heap) *,
rtx, vinsn_t, bool);
-extern void insert_in_history_vect (VEC(expr_history_def, heap) **,
- unsigned, enum local_trans_type,
+extern void insert_in_history_vect (VEC(expr_history_def, heap) **,
+ unsigned, enum local_trans_type,
vinsn_t, vinsn_t, ds_t);
extern void mark_unavailable_targets (av_set_t, av_set_t, regset);
extern int speculate_expr (expr_t, ds_t);
diff --git a/gcc/sel-sched.c b/gcc/sel-sched.c
index 05061c7ed71..e5ebc57387b 100644
--- a/gcc/sel-sched.c
+++ b/gcc/sel-sched.c
@@ -56,7 +56,7 @@ along with GCC; see the file COPYING3. If not see
The below implementation follows the original approach with the following
changes:
- o the scheduler works after register allocation (but can be also tuned
+ o the scheduler works after register allocation (but can be also tuned
to work before RA);
o some instructions are not copied or register renamed;
o conditional jumps are not moved with code duplication;
@@ -69,67 +69,67 @@ along with GCC; see the file COPYING3. If not see
Terminology
===========
- A vinsn, or virtual insn, is an insn with additional data characterizing
- insn pattern, such as LHS, RHS, register sets used/set/clobbered, etc.
- Vinsns also act as smart pointers to save memory by reusing them in
+ A vinsn, or virtual insn, is an insn with additional data characterizing
+ insn pattern, such as LHS, RHS, register sets used/set/clobbered, etc.
+ Vinsns also act as smart pointers to save memory by reusing them in
different expressions. A vinsn is described by vinsn_t type.
An expression is a vinsn with additional data characterizing its properties
- at some point in the control flow graph. The data may be its usefulness,
+ at some point in the control flow graph. The data may be its usefulness,
priority, speculative status, whether it was renamed/subsituted, etc.
An expression is described by expr_t type.
- Availability set (av_set) is a set of expressions at a given control flow
+ Availability set (av_set) is a set of expressions at a given control flow
point. It is represented as av_set_t. The expressions in av sets are kept
- sorted in the terms of expr_greater_p function. It allows to truncate
+ sorted in the terms of expr_greater_p function. It allows to truncate
the set while leaving the best expressions.
-
+
A fence is a point through which code motion is prohibited. On each step,
we gather a parallel group of insns at a fence. It is possible to have
multiple fences. A fence is represented via fence_t.
A boundary is the border between the fence group and the rest of the code.
Currently, we never have more than one boundary per fence, as we finalize
- the fence group when a jump is scheduled. A boundary is represented
+ the fence group when a jump is scheduled. A boundary is represented
via bnd_t.
High-level overview
===================
The scheduler finds regions to schedule, schedules each one, and finalizes.
- The regions are formed starting from innermost loops, so that when the inner
+ The regions are formed starting from innermost loops, so that when the inner
loop is pipelined, its prologue can be scheduled together with yet unprocessed
- outer loop. The rest of acyclic regions are found using extend_rgns:
+ outer loop. The rest of acyclic regions are found using extend_rgns:
the blocks that are not yet allocated to any regions are traversed in top-down
- order, and a block is added to a region to which all its predecessors belong;
+ order, and a block is added to a region to which all its predecessors belong;
otherwise, the block starts its own region.
The main scheduling loop (sel_sched_region_2) consists of just
scheduling on each fence and updating fences. For each fence,
we fill a parallel group of insns (fill_insns) until some insns can be added.
- First, we compute available exprs (av-set) at the boundary of the current
- group. Second, we choose the best expression from it. If the stall is
+ First, we compute available exprs (av-set) at the boundary of the current
+ group. Second, we choose the best expression from it. If the stall is
required to schedule any of the expressions, we advance the current cycle
- appropriately. So, the final group does not exactly correspond to a VLIW
+ appropriately. So, the final group does not exactly correspond to a VLIW
word. Third, we move the chosen expression to the boundary (move_op)
and update the intermediate av sets and liveness sets. We quit fill_insns
when either no insns left for scheduling or we have scheduled enough insns
- so we feel like advancing a scheduling point.
+ so we feel like advancing a scheduling point.
Computing available expressions
===============================
The computation (compute_av_set) is a bottom-up traversal. At each insn,
- we're moving the union of its successors' sets through it via
- moveup_expr_set. The dependent expressions are removed. Local
- transformations (substitution, speculation) are applied to move more
+ we're moving the union of its successors' sets through it via
+ moveup_expr_set. The dependent expressions are removed. Local
+ transformations (substitution, speculation) are applied to move more
exprs. Then the expr corresponding to the current insn is added.
The result is saved on each basic block header.
When traversing the CFG, we're moving down for no more than max_ws insns.
Also, we do not move down to ineligible successors (is_ineligible_successor),
which include moving along a back-edge, moving to already scheduled code,
- and moving to another fence. The first two restrictions are lifted during
+ and moving to another fence. The first two restrictions are lifted during
pipelining, which allows us to move insns along a back-edge. We always have
an acyclic region for scheduling because we forbid motion through fences.
@@ -138,11 +138,11 @@ along with GCC; see the file COPYING3. If not see
We sort the final availability set via sel_rank_for_schedule, then we remove
expressions which are not yet ready (tick_check_p) or which dest registers
- cannot be used. For some of them, we choose another register via
- find_best_reg. To do this, we run find_used_regs to calculate the set of
+ cannot be used. For some of them, we choose another register via
+ find_best_reg. To do this, we run find_used_regs to calculate the set of
registers which cannot be used. The find_used_regs function performs
a traversal of code motion paths for an expr. We consider for renaming
- only registers which are from the same regclass as the original one and
+ only registers which are from the same regclass as the original one and
using which does not interfere with any live ranges. Finally, we convert
the resulting set to the ready list format and use max_issue and reorder*
hooks similarly to the Haifa scheduler.
@@ -150,22 +150,22 @@ along with GCC; see the file COPYING3. If not see
Scheduling the best expression
==============================
- We run the move_op routine to perform the same type of code motion paths
+ We run the move_op routine to perform the same type of code motion paths
traversal as in find_used_regs. (These are working via the same driver,
code_motion_path_driver.) When moving down the CFG, we look for original
- instruction that gave birth to a chosen expression. We undo
+ instruction that gave birth to a chosen expression. We undo
the transformations performed on an expression via the history saved in it.
- When found, we remove the instruction or leave a reg-reg copy/speculation
- check if needed. On a way up, we insert bookkeeping copies at each join
- point. If a copy is not needed, it will be removed later during this
+ When found, we remove the instruction or leave a reg-reg copy/speculation
+ check if needed. On a way up, we insert bookkeeping copies at each join
+ point. If a copy is not needed, it will be removed later during this
traversal. We update the saved av sets and liveness sets on the way up, too.
Finalizing the schedule
=======================
- When pipelining, we reschedule the blocks from which insns were pipelined
- to get a tighter schedule. On Itanium, we also perform bundling via
- the same routine from ia64.c.
+ When pipelining, we reschedule the blocks from which insns were pipelined
+ to get a tighter schedule. On Itanium, we also perform bundling via
+ the same routine from ia64.c.
Dependence analysis changes
===========================
@@ -173,30 +173,30 @@ along with GCC; see the file COPYING3. If not see
We augmented the sched-deps.c with hooks that get called when a particular
dependence is found in a particular part of an insn. Using these hooks, we
can do several actions such as: determine whether an insn can be moved through
- another (has_dependence_p, moveup_expr); find out whether an insn can be
- scheduled on the current cycle (tick_check_p); find out registers that
- are set/used/clobbered by an insn and find out all the strange stuff that
- restrict its movement, like SCHED_GROUP_P or CANT_MOVE (done in
+ another (has_dependence_p, moveup_expr); find out whether an insn can be
+ scheduled on the current cycle (tick_check_p); find out registers that
+ are set/used/clobbered by an insn and find out all the strange stuff that
+ restrict its movement, like SCHED_GROUP_P or CANT_MOVE (done in
init_global_and_expr_for_insn).
Initialization changes
======================
- There are parts of haifa-sched.c, sched-deps.c, and sched-rgn.c that are
+ There are parts of haifa-sched.c, sched-deps.c, and sched-rgn.c that are
reused in all of the schedulers. We have split up the initialization of data
- of such parts into different functions prefixed with scheduler type and
+ of such parts into different functions prefixed with scheduler type and
postfixed with the type of data initialized: {,sel_,haifa_}sched_{init,finish},
sched_rgn_init/finish, sched_deps_init/finish, sched_init_{luids/bbs}, etc.
- The same splitting is done with current_sched_info structure:
- dependence-related parts are in sched_deps_info, common part is in
+ The same splitting is done with current_sched_info structure:
+ dependence-related parts are in sched_deps_info, common part is in
common_sched_info, and haifa/sel/etc part is in current_sched_info.
-
+
Target contexts
===============
As we now have multiple-point scheduling, this would not work with backends
- which save some of the scheduler state to use it in the target hooks.
- For this purpose, we introduce a concept of target contexts, which
+ which save some of the scheduler state to use it in the target hooks.
+ For this purpose, we introduce a concept of target contexts, which
encapsulate such information. The backend should implement simple routines
of allocating/freeing/setting such a context. The scheduler calls these
as target hooks and handles the target context as an opaque pointer (similar
@@ -206,29 +206,29 @@ along with GCC; see the file COPYING3. If not see
================
As the correct data dependence graph is not supported during scheduling (which
- is to be changed in mid-term), we cache as much of the dependence analysis
- results as possible to avoid reanalyzing. This includes: bitmap caches on
- each insn in stream of the region saying yes/no for a query with a pair of
+ is to be changed in mid-term), we cache as much of the dependence analysis
+ results as possible to avoid reanalyzing. This includes: bitmap caches on
+ each insn in stream of the region saying yes/no for a query with a pair of
UIDs; hashtables with the previously done transformations on each insn in
stream; a vector keeping a history of transformations on each expr.
Also, we try to minimize the dependence context used on each fence to check
whether the given expression is ready for scheduling by removing from it
- insns that are definitely completed the execution. The results of
+ insns that are definitely completed the execution. The results of
tick_check_p checks are also cached in a vector on each fence.
- We keep a valid liveness set on each insn in a region to avoid the high
+ We keep a valid liveness set on each insn in a region to avoid the high
cost of recomputation on large basic blocks.
Finally, we try to minimize the number of needed updates to the availability
- sets. The updates happen in two cases: when fill_insns terminates,
+ sets. The updates happen in two cases: when fill_insns terminates,
we advance all fences and increase the stage number to show that the region
has changed and the sets are to be recomputed; and when the next iteration
of a loop in fill_insns happens (but this one reuses the saved av sets
on bb headers.) Thus, we try to break the fill_insns loop only when
"significant" number of insns from the current scheduling window was
scheduled. This should be made a target param.
-
+
TODO: correctly support the data dependence graph at all stages and get rid
of all caches. This should speed up the scheduler.
@@ -238,17 +238,17 @@ along with GCC; see the file COPYING3. If not see
References:
S.-M. Moon and K. Ebcioglu. Parallelizing nonnumerical code with
- selective scheduling and software pipelining.
- ACM TOPLAS, Vol 19, No. 6, pages 853--898, Nov. 1997.
+ selective scheduling and software pipelining.
+ ACM TOPLAS, Vol 19, No. 6, pages 853--898, Nov. 1997.
- Andrey Belevantsev, Maxim Kuvyrkov, Vladimir Makarov, Dmitry Melnik,
- and Dmitry Zhurikhin. An interblock VLIW-targeted instruction scheduler
+ Andrey Belevantsev, Maxim Kuvyrkov, Vladimir Makarov, Dmitry Melnik,
+ and Dmitry Zhurikhin. An interblock VLIW-targeted instruction scheduler
for GCC. In Proceedings of GCC Developers' Summit 2006.
- Arutyun Avetisyan, Andrey Belevantsev, and Dmitry Melnik. GCC Instruction
+ Arutyun Avetisyan, Andrey Belevantsev, and Dmitry Melnik. GCC Instruction
Scheduler and Software Pipeliner on the Itanium Platform. EPIC-7 Workshop.
http://rogue.colorado.edu/EPIC7/.
-
+
*/
/* True when pipelining is enabled. */
@@ -264,19 +264,19 @@ int max_insns_to_rename;
/* Definitions of local types and macros. */
/* Represents possible outcomes of moving an expression through an insn. */
-enum MOVEUP_EXPR_CODE
- {
+enum MOVEUP_EXPR_CODE
+ {
/* The expression is not changed. */
- MOVEUP_EXPR_SAME,
+ MOVEUP_EXPR_SAME,
/* Not changed, but requires a new destination register. */
- MOVEUP_EXPR_AS_RHS,
+ MOVEUP_EXPR_AS_RHS,
/* Cannot be moved. */
- MOVEUP_EXPR_NULL,
+ MOVEUP_EXPR_NULL,
/* Changed (substituted or speculated). */
- MOVEUP_EXPR_CHANGED
+ MOVEUP_EXPR_CHANGED
};
/* The container to be passed into rtx search & replace functions. */
@@ -291,11 +291,11 @@ struct rtx_search_arg
typedef struct rtx_search_arg *rtx_search_arg_p;
-/* This struct contains precomputed hard reg sets that are needed when
+/* This struct contains precomputed hard reg sets that are needed when
computing registers available for renaming. */
-struct hard_regs_data
+struct hard_regs_data
{
- /* For every mode, this stores registers available for use with
+ /* For every mode, this stores registers available for use with
that mode. */
HARD_REG_SET regs_for_mode[NUM_MACHINE_MODES];
@@ -307,7 +307,7 @@ struct hard_regs_data
that the whole set is not computed yet. */
HARD_REG_SET regs_for_rename[FIRST_PSEUDO_REGISTER];
- /* For every mode, this stores registers not available due to
+ /* For every mode, this stores registers not available due to
call clobbering. */
HARD_REG_SET regs_for_call_clobbered[NUM_MACHINE_MODES];
@@ -334,16 +334,16 @@ struct reg_rename
bool crosses_call;
};
-/* A global structure that contains the needed information about harg
+/* A global structure that contains the needed information about harg
regs. */
static struct hard_regs_data sel_hrd;
-/* This structure holds local data used in code_motion_path_driver hooks on
- the same or adjacent levels of recursion. Here we keep those parameters
- that are not used in code_motion_path_driver routine itself, but only in
- its hooks. Moreover, all parameters that can be modified in hooks are
- in this structure, so all other parameters passed explicitly to hooks are
+/* This structure holds local data used in code_motion_path_driver hooks on
+ the same or adjacent levels of recursion. Here we keep those parameters
+ that are not used in code_motion_path_driver routine itself, but only in
+ its hooks. Moreover, all parameters that can be modified in hooks are
+ in this structure, so all other parameters passed explicitly to hooks are
read-only. */
struct cmpd_local_params
{
@@ -361,7 +361,7 @@ struct cmpd_local_params
def_list_t old_original_insns;
/* Local params used in move_op_* functions. */
- /* True when we have removed last insn in the block which was
+ /* True when we have removed last insn in the block which was
also a boundary. Do not update anything or create bookkeeping copies. */
BOOL_BITFIELD removed_last_insn : 1;
};
@@ -430,11 +430,11 @@ struct code_motion_path_driver_info_def
Used only with move_op_*. */
void (*ascend) (insn_t, void *);
- /* Called on the ascending pass, before returning from the current basic
+ /* Called on the ascending pass, before returning from the current basic
block or from the whole traversal. */
void (*at_first_insn) (insn_t, cmpd_local_params_p, void *);
- /* When processing successors in move_op we need only descend into
+ /* When processing successors in move_op we need only descend into
SUCCS_NORMAL successors, while in find_used_regs we need SUCCS_ALL. */
int succ_flags;
@@ -450,8 +450,8 @@ struct code_motion_path_driver_info_def *code_motion_path_driver_info;
code_motion_path_driver. */
extern struct code_motion_path_driver_info_def move_op_hooks, fur_hooks;
-/* True if/when we want to emulate Haifa scheduler in the common code.
- This is used in sched_rgn_local_init and in various places in
+/* True if/when we want to emulate Haifa scheduler in the common code.
+ This is used in sched_rgn_local_init and in various places in
sched-deps.c. */
int sched_emulate_haifa_p;
@@ -469,9 +469,9 @@ static bool enable_schedule_as_rhs_p;
/* Used in verify_target_availability to assert that target reg is reported
unavailabile by both TARGET_UNAVAILABLE and find_used_regs only if
- we haven't scheduled anything on the previous fence.
+ we haven't scheduled anything on the previous fence.
if scheduled_something_on_previous_fence is true, TARGET_UNAVAILABLE can
- have more conservative value than the one returned by the
+ have more conservative value than the one returned by the
find_used_regs, thus we shouldn't assert that these values are equal. */
static bool scheduled_something_on_previous_fence;
@@ -518,7 +518,7 @@ typedef VEC(vinsn_t, heap) *vinsn_vec_t;
for the detailed explanations. */
static vinsn_vec_t vec_bookkeeping_blocked_vinsns = NULL;
-/* This vector has vinsns which are scheduled with renaming on the first fence
+/* This vector has vinsns which are scheduled with renaming on the first fence
and then seen on the second. For expressions with such vinsns, target
availability information may be wrong. */
static vinsn_vec_t vec_target_unavailable_vinsns = NULL;
@@ -529,8 +529,8 @@ DEF_VEC_P(insn_t);
DEF_VEC_ALLOC_P(insn_t,heap);
static VEC(insn_t, heap) *vec_temp_moveop_nops = NULL;
-/* These bitmaps record original instructions scheduled on the current
- iteration and bookkeeping copies created by them. */
+/* These bitmaps record original instructions scheduled on the current
+ iteration and bookkeeping copies created by them. */
static bitmap current_originators = NULL;
static bitmap current_copies = NULL;
@@ -561,7 +561,7 @@ static basic_block find_block_for_bookkeeping (edge e1, edge e2, bool lax);
static rtx get_dest_from_orig_ops (av_set_t);
static basic_block generate_bookkeeping_insn (expr_t, edge, edge);
-static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *,
+static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *,
def_list_t *);
static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t, bool*);
static int code_motion_path_driver (insn_t, av_set_t, ilist_t,
@@ -582,7 +582,7 @@ advance_one_cycle (fence_t fence)
unsigned i;
int cycle;
rtx insn;
-
+
advance_state (FENCE_STATE (fence));
cycle = ++FENCE_CYCLE (fence);
FENCE_ISSUED_INSNS (fence) = 0;
@@ -627,7 +627,7 @@ in_fallthru_bb_p (rtx insn, rtx succ)
return bb == BLOCK_FOR_INSN (succ);
}
-/* Construct successor fences from OLD_FENCEs and put them in NEW_FENCES.
+/* Construct successor fences from OLD_FENCEs and put them in NEW_FENCES.
When a successor will continue a ebb, transfer all parameters of a fence
to the new fence. ORIG_MAX_SEQNO is the maximal seqno before this round
of scheduling helping to distinguish between the old and the new code. */
@@ -651,20 +651,20 @@ extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences,
}
gcc_assert (was_here_p && insn != NULL_RTX);
- /* When in the "middle" of the block, just move this fence
+ /* When in the "middle" of the block, just move this fence
to the new list. */
bb = BLOCK_FOR_INSN (insn);
if (! sel_bb_end_p (insn)
- || (single_succ_p (bb)
+ || (single_succ_p (bb)
&& single_pred_p (single_succ (bb))))
{
insn_t succ;
- succ = (sel_bb_end_p (insn)
+ succ = (sel_bb_end_p (insn)
? sel_bb_head (single_succ (bb))
: NEXT_INSN (insn));
- if (INSN_SEQNO (succ) > 0
+ if (INSN_SEQNO (succ) > 0
&& INSN_SEQNO (succ) <= orig_max_seqno
&& INSN_SCHED_TIMES (succ) <= 0)
{
@@ -672,7 +672,7 @@ extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences,
move_fence_to_fences (old_fences, new_fences);
if (sched_verbose >= 1)
- sel_print ("Fence %d continues as %d[%d] (state continue)\n",
+ sel_print ("Fence %d continues as %d[%d] (state continue)\n",
INSN_UID (insn), INSN_UID (succ), BLOCK_NUM (succ));
}
return;
@@ -687,11 +687,11 @@ extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences,
&& (pipelining_p || INSN_SCHED_TIMES (succ) <= 0))
{
bool b = (in_same_ebb_p (insn, succ)
- || in_fallthru_bb_p (insn, succ));
+ || in_fallthru_bb_p (insn, succ));
if (sched_verbose >= 1)
- sel_print ("Fence %d continues as %d[%d] (state %s)\n",
- INSN_UID (insn), INSN_UID (succ),
+ sel_print ("Fence %d continues as %d[%d] (state %s)\n",
+ INSN_UID (insn), INSN_UID (succ),
BLOCK_NUM (succ), b ? "continue" : "reset");
if (b)
@@ -709,8 +709,8 @@ extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences,
/* Functions to support substitution. */
-/* Returns whether INSN with dependence status DS is eligible for
- substitution, i.e. it's a copy operation x := y, and RHS that is
+/* Returns whether INSN with dependence status DS is eligible for
+ substitution, i.e. it's a copy operation x := y, and RHS that is
moved up through this insn should be substituted. */
static bool
can_substitute_through_p (insn_t insn, ds_t ds)
@@ -722,19 +722,19 @@ can_substitute_through_p (insn_t insn, ds_t ds)
|| ! INSN_LHS (insn))
return false;
- /* Now we just need to make sure the INSN_RHS consists of only one
+ /* Now we just need to make sure the INSN_RHS consists of only one
simple REG rtx. */
- if (REG_P (INSN_LHS (insn))
+ if (REG_P (INSN_LHS (insn))
&& REG_P (INSN_RHS (insn)))
- return true;
+ return true;
return false;
}
-/* Substitute all occurences of INSN's destination in EXPR' vinsn with INSN's
+/* Substitute all occurences of INSN's destination in EXPR' vinsn with INSN's
source (if INSN is eligible for substitution). Returns TRUE if
substitution was actually performed, FALSE otherwise. Substitution might
be not performed because it's either EXPR' vinsn doesn't contain INSN's
- destination or the resulting insn is invalid for the target machine.
+ destination or the resulting insn is invalid for the target machine.
When UNDO is true, perform unsubstitution instead (the difference is in
the part of rtx on which validate_replace_rtx is called). */
static bool
@@ -747,9 +747,9 @@ substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo)
rtx old, new_rtx;
/* Do not try to replace in SET_DEST. Although we'll choose new
- register for the RHS, we don't want to change RHS' original reg.
+ register for the RHS, we don't want to change RHS' original reg.
If the insn is not SET, we may still be able to substitute something
- in it, and if we're here (don't have deps), it doesn't write INSN's
+ in it, and if we're here (don't have deps), it doesn't write INSN's
dest. */
where = (has_rhs
? &VINSN_RHS (*vi)
@@ -766,32 +766,32 @@ substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo)
new_rtx = copy_rtx (undo ? INSN_LHS (insn) : INSN_RHS (insn));
new_insn = create_copy_of_insn_rtx (VINSN_INSN_RTX (*vi));
- /* Where we'll replace.
+ /* Where we'll replace.
WHERE_REPLACE should point inside NEW_INSN, so INSN_RHS couldn't be
used instead of SET_SRC. */
where_replace = (has_rhs
? &SET_SRC (PATTERN (new_insn))
: &PATTERN (new_insn));
- new_insn_valid
- = validate_replace_rtx_part_nosimplify (old, new_rtx, where_replace,
+ new_insn_valid
+ = validate_replace_rtx_part_nosimplify (old, new_rtx, where_replace,
new_insn);
/* ??? Actually, constrain_operands result depends upon choice of
destination register. E.g. if we allow single register to be an rhs,
- and if we try to move dx=ax(as rhs) through ax=dx, we'll result
+ and if we try to move dx=ax(as rhs) through ax=dx, we'll result
in invalid insn dx=dx, so we'll loose this rhs here.
Just can't come up with significant testcase for this, so just
leaving it for now. */
if (new_insn_valid)
{
- change_vinsn_in_expr (expr,
+ change_vinsn_in_expr (expr,
create_vinsn_from_insn_rtx (new_insn, false));
- /* Do not allow clobbering the address register of speculative
+ /* Do not allow clobbering the address register of speculative
insns. */
if ((EXPR_SPEC_DONE_DS (expr) & SPECULATIVE)
- && bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
+ && bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
expr_dest_regno (expr)))
EXPR_TARGET_AVAILABLE (expr) = false;
@@ -805,7 +805,7 @@ substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo)
}
/* Helper function for count_occurences_equiv. */
-static int
+static int
count_occurrences_1 (rtx *cur_rtx, void *arg)
{
rtx_search_arg_p p = (rtx_search_arg_p) arg;
@@ -838,7 +838,7 @@ count_occurrences_1 (rtx *cur_rtx, void *arg)
&& REGNO (SUBREG_REG (*cur_rtx)) == REGNO (p->x))
{
/* ??? Do not support substituting regs inside subregs. In that case,
- simplify_subreg will be called by validate_replace_rtx, and
+ simplify_subreg will be called by validate_replace_rtx, and
unsubstitution will fail later. */
p->n = 0;
return 1;
@@ -848,9 +848,9 @@ count_occurrences_1 (rtx *cur_rtx, void *arg)
return 0;
}
-/* Return the number of places WHAT appears within WHERE.
+/* Return the number of places WHAT appears within WHERE.
Bail out when we found a reference occupying several hard registers. */
-static int
+static int
count_occurrences_equiv (rtx what, rtx where)
{
struct rtx_search_arg arg;
@@ -890,10 +890,10 @@ create_insn_rtx_with_rhs (vinsn_t vi, rtx rhs_rtx)
return insn_rtx;
}
-/* Returns whether INSN's src can be replaced with register number
+/* Returns whether INSN's src can be replaced with register number
NEW_SRC_REG. E.g. the following insn is valid for i386:
- (insn:HI 2205 6585 2207 727 ../../gcc/libiberty/regex.c:3337
+ (insn:HI 2205 6585 2207 727 ../../gcc/libiberty/regex.c:3337
(set (mem/s:QI (plus:SI (plus:SI (reg/f:SI 7 sp)
(reg:SI 0 ax [orig:770 c1 ] [770]))
(const_int 288 [0x120])) [0 str S1 A8])
@@ -901,14 +901,14 @@ create_insn_rtx_with_rhs (vinsn_t vi, rtx rhs_rtx)
(nil))
But if we change (const_int 0 [0x0]) to (reg:QI 4 si), it will be invalid
- because of operand constraints:
+ because of operand constraints:
(define_insn "*movqi_1"
[(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m")
(match_operand:QI 1 "general_operand" " q,qn,qm,q,rn,qm,qn")
)]
-
- So do constrain_operands here, before choosing NEW_SRC_REG as best
+
+ So do constrain_operands here, before choosing NEW_SRC_REG as best
reg for rhs. */
static bool
@@ -971,7 +971,7 @@ create_insn_rtx_with_lhs (vinsn_t vi, rtx lhs_rtx)
return insn_rtx;
}
-/* Substitute lhs in the given expression EXPR for the register with number
+/* Substitute lhs in the given expression EXPR for the register with number
NEW_REGNO. SET_DEST may be arbitrary rtx, not only register. */
static void
replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg)
@@ -990,7 +990,7 @@ replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg)
/* Returns whether VI writes either one of the USED_REGS registers or,
if a register is a hard one, one of the UNAVAILABLE_HARD_REGS registers. */
static bool
-vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs,
+vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs,
HARD_REG_SET unavailable_hard_regs)
{
unsigned regno;
@@ -1017,10 +1017,10 @@ vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs,
return false;
}
-/* Returns register class of the output register in INSN.
+/* Returns register class of the output register in INSN.
Returns NO_REGS for call insns because some targets have constraints on
destination register of a call insn.
-
+
Code adopted from regrename.c::build_def_use. */
static enum reg_class
get_reg_class (rtx insn)
@@ -1063,7 +1063,7 @@ get_reg_class (rtx insn)
{
int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops];
enum reg_class cl = recog_op_alt[opn][alt].cl;
-
+
if (recog_data.operand_type[opn] == OP_OUT ||
recog_data.operand_type[opn] == OP_INOUT)
return cl;
@@ -1098,7 +1098,7 @@ init_hard_regno_rename (int regno)
}
#endif
-/* A wrapper around HARD_REGNO_RENAME_OK that will look into the hard regs
+/* A wrapper around HARD_REGNO_RENAME_OK that will look into the hard regs
data first. */
static inline bool
sel_hard_regno_rename_ok (int from ATTRIBUTE_UNUSED, int to ATTRIBUTE_UNUSED)
@@ -1121,7 +1121,7 @@ static void
init_regs_for_mode (enum machine_mode mode)
{
int cur_reg;
-
+
CLEAR_HARD_REG_SET (sel_hrd.regs_for_mode[mode]);
CLEAR_HARD_REG_SET (sel_hrd.regs_for_call_clobbered[mode]);
@@ -1129,11 +1129,11 @@ init_regs_for_mode (enum machine_mode mode)
{
int nregs = hard_regno_nregs[cur_reg][mode];
int i;
-
+
for (i = nregs - 1; i >= 0; --i)
if (fixed_regs[cur_reg + i]
|| global_regs[cur_reg + i]
- /* Can't use regs which aren't saved by
+ /* Can't use regs which aren't saved by
the prologue. */
|| !TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg + i)
#ifdef LEAF_REGISTERS
@@ -1144,20 +1144,20 @@ init_regs_for_mode (enum machine_mode mode)
#endif
)
break;
-
- if (i >= 0)
+
+ if (i >= 0)
continue;
-
+
/* See whether it accepts all modes that occur in
original insns. */
if (! HARD_REGNO_MODE_OK (cur_reg, mode))
continue;
-
+
if (HARD_REGNO_CALL_PART_CLOBBERED (cur_reg, mode))
- SET_HARD_REG_BIT (sel_hrd.regs_for_call_clobbered[mode],
+ SET_HARD_REG_BIT (sel_hrd.regs_for_call_clobbered[mode],
cur_reg);
-
- /* If the CUR_REG passed all the checks above,
+
+ /* If the CUR_REG passed all the checks above,
then it's ok. */
SET_HARD_REG_BIT (sel_hrd.regs_for_mode[mode], cur_reg);
}
@@ -1176,12 +1176,12 @@ init_hard_regs_data (void)
for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
if (df_regs_ever_live_p (cur_reg) || call_used_regs[cur_reg])
SET_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg);
-
- /* Initialize registers that are valid based on mode when this is
+
+ /* Initialize registers that are valid based on mode when this is
really needed. */
for (cur_mode = 0; cur_mode < NUM_MACHINE_MODES; cur_mode++)
sel_hrd.regs_for_mode_ok[cur_mode] = false;
-
+
/* Mark that all HARD_REGNO_RENAME_OK is not calculated. */
for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
CLEAR_HARD_REG_SET (sel_hrd.regs_for_rename[cur_reg]);
@@ -1192,9 +1192,9 @@ init_hard_regs_data (void)
for (cur_reg = FIRST_STACK_REG; cur_reg <= LAST_STACK_REG; cur_reg++)
SET_HARD_REG_BIT (sel_hrd.stack_regs, cur_reg);
#endif
-}
+}
-/* Mark hardware regs in REG_RENAME_P that are not suitable
+/* Mark hardware regs in REG_RENAME_P that are not suitable
for renaming rhs in INSN due to hardware restrictions (register class,
modes compatibility etc). This doesn't affect original insn's dest reg,
if it isn't in USED_REGS. DEF is a definition insn of rhs for which the
@@ -1216,7 +1216,7 @@ mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
gcc_assert (reg_rename_p);
orig_dest = SET_DEST (PATTERN (def->orig_insn));
-
+
/* We have decided not to rename 'mem = something;' insns, as 'something'
is usually a register. */
if (!REG_P (orig_dest))
@@ -1230,10 +1230,10 @@ mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
mode = GET_MODE (orig_dest);
- /* Stop when mode is not supported for renaming. Also can't proceed
- if the original register is one of the fixed_regs, global_regs or
+ /* Stop when mode is not supported for renaming. Also can't proceed
+ if the original register is one of the fixed_regs, global_regs or
frame pointer. */
- if (fixed_regs[regno]
+ if (fixed_regs[regno]
|| global_regs[regno]
#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
|| (frame_pointer_needed && regno == HARD_FRAME_POINTER_REGNUM)
@@ -1252,19 +1252,19 @@ mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
}
/* If something allocated on stack in this function, mark frame pointer
- register unavailable, considering also modes.
+ register unavailable, considering also modes.
FIXME: it is enough to do this once per all original defs. */
if (frame_pointer_needed)
{
int i;
for (i = hard_regno_nregs[FRAME_POINTER_REGNUM][Pmode]; i--;)
- SET_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
+ SET_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
FRAME_POINTER_REGNUM + i);
#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
for (i = hard_regno_nregs[HARD_FRAME_POINTER_REGNUM][Pmode]; i--;)
- SET_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
+ SET_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
HARD_FRAME_POINTER_REGNUM + i);
#endif
}
@@ -1273,27 +1273,27 @@ mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
/* For the stack registers the presence of FIRST_STACK_REG in USED_REGS
is equivalent to as if all stack regs were in this set.
I.e. no stack register can be renamed, and even if it's an original
- register here we make sure it won't be lifted over it's previous def
- (it's previous def will appear as if it's a FIRST_STACK_REG def.
+ register here we make sure it won't be lifted over it's previous def
+ (it's previous def will appear as if it's a FIRST_STACK_REG def.
The HARD_REGNO_RENAME_OK covers other cases in condition below. */
if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG)
- && REGNO_REG_SET_P (used_regs, FIRST_STACK_REG))
- IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
+ && REGNO_REG_SET_P (used_regs, FIRST_STACK_REG))
+ IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
sel_hrd.stack_regs);
-#endif
+#endif
- /* If there's a call on this path, make regs from call_used_reg_set
+ /* If there's a call on this path, make regs from call_used_reg_set
unavailable. */
if (def->crosses_call)
- IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
+ IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
call_used_reg_set);
- /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and crosses_call,
+ /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and crosses_call,
but not register classes. */
if (!reload_completed)
return;
- /* Leave regs as 'available' only from the current
+ /* Leave regs as 'available' only from the current
register class. */
cl = get_reg_class (def->orig_insn);
gcc_assert (cl != NO_REGS);
@@ -1303,13 +1303,13 @@ mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
/* Leave only registers available for this mode. */
if (!sel_hrd.regs_for_mode_ok[mode])
init_regs_for_mode (mode);
- AND_HARD_REG_SET (reg_rename_p->available_for_renaming,
+ AND_HARD_REG_SET (reg_rename_p->available_for_renaming,
sel_hrd.regs_for_mode[mode]);
/* Exclude registers that are partially call clobbered. */
if (def->crosses_call
&& ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))
- AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
+ AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
sel_hrd.regs_for_call_clobbered[mode]);
/* Leave only those that are ok to rename. */
@@ -1326,12 +1326,12 @@ mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
if (! sel_hard_regno_rename_ok (regno + i, cur_reg + i))
break;
- if (i >= 0)
- CLEAR_HARD_REG_BIT (reg_rename_p->available_for_renaming,
+ if (i >= 0)
+ CLEAR_HARD_REG_BIT (reg_rename_p->available_for_renaming,
cur_reg);
}
- AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
+ AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
reg_rename_p->unavailable_hard_regs);
/* Regno is always ok from the renaming part of view, but it really
@@ -1347,24 +1347,24 @@ static int reg_rename_tick[FIRST_PSEUDO_REGISTER];
/* Indicates the number of times renaming happened before the current one. */
static int reg_rename_this_tick;
-/* Choose the register among free, that is suitable for storing
+/* Choose the register among free, that is suitable for storing
the rhs value.
ORIGINAL_INSNS is the list of insns where the operation (rhs)
- originally appears. There could be multiple original operations
- for single rhs since we moving it up and merging along different
+ originally appears. There could be multiple original operations
+ for single rhs since we moving it up and merging along different
paths.
Some code is adapted from regrename.c (regrename_optimize).
If original register is available, function returns it.
Otherwise it performs the checks, so the new register should
comply with the following:
- - it should not violate any live ranges (such registers are in
+ - it should not violate any live ranges (such registers are in
REG_RENAME_P->available_for_renaming set);
- it should not be in the HARD_REGS_USED regset;
- it should be in the class compatible with original uses;
- it should not be clobbered through reference with different mode;
- - if we're in the leaf function, then the new register should
+ - if we're in the leaf function, then the new register should
not be in the LEAF_REGISTERS;
- etc.
@@ -1375,8 +1375,8 @@ static int reg_rename_this_tick;
If no register satisfies the above conditions, NULL_RTX is returned. */
static rtx
-choose_best_reg_1 (HARD_REG_SET hard_regs_used,
- struct reg_rename *reg_rename_p,
+choose_best_reg_1 (HARD_REG_SET hard_regs_used,
+ struct reg_rename *reg_rename_p,
def_list_t original_insns, bool *is_orig_reg_p_ptr)
{
int best_new_reg;
@@ -1396,9 +1396,9 @@ choose_best_reg_1 (HARD_REG_SET hard_regs_used,
gcc_assert (REG_P (orig_dest));
- /* Check that all original operations have the same mode.
+ /* Check that all original operations have the same mode.
This is done for the next loop; if we'd return from this
- loop, we'd check only part of them, but in this case
+ loop, we'd check only part of them, but in this case
it doesn't matter. */
if (mode == VOIDmode)
mode = GET_MODE (orig_dest);
@@ -1413,16 +1413,16 @@ choose_best_reg_1 (HARD_REG_SET hard_regs_used,
if (i == n)
{
gcc_assert (mode != VOIDmode);
-
+
/* Hard registers should not be shared. */
return gen_rtx_REG (mode, regno);
}
}
-
+
*is_orig_reg_p_ptr = false;
best_new_reg = -1;
-
- /* Among all available regs choose the register that was
+
+ /* Among all available regs choose the register that was
allocated earliest. */
EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
0, cur_reg, hrsi)
@@ -1433,7 +1433,7 @@ choose_best_reg_1 (HARD_REG_SET hard_regs_used,
|| reg_rename_tick[cur_reg] < reg_rename_tick[best_new_reg])
{
best_new_reg = cur_reg;
-
+
/* Return immediately when we know there's no better reg. */
if (! reg_rename_tick[best_new_reg])
break;
@@ -1453,10 +1453,10 @@ choose_best_reg_1 (HARD_REG_SET hard_regs_used,
/* A wrapper around choose_best_reg_1 () to verify that we make correct
assumptions about available registers in the function. */
static rtx
-choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p,
+choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p,
def_list_t original_insns, bool *is_orig_reg_p_ptr)
{
- rtx best_reg = choose_best_reg_1 (hard_regs_used, reg_rename_p,
+ rtx best_reg = choose_best_reg_1 (hard_regs_used, reg_rename_p,
original_insns, is_orig_reg_p_ptr);
gcc_assert (best_reg == NULL_RTX
@@ -1465,25 +1465,25 @@ choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p,
return best_reg;
}
-/* Choose the pseudo register for storing rhs value. As this is supposed
+/* Choose the pseudo register for storing rhs value. As this is supposed
to work before reload, we return either the original register or make
- the new one. The parameters are the same that in choose_nest_reg_1
- functions, except that USED_REGS may contain pseudos.
+ the new one. The parameters are the same that in choose_nest_reg_1
+ functions, except that USED_REGS may contain pseudos.
If we work with hard regs, check also REG_RENAME_P->UNAVAILABLE_HARD_REGS.
- TODO: take into account register pressure while doing this. Up to this
- moment, this function would never return NULL for pseudos, but we should
+ TODO: take into account register pressure while doing this. Up to this
+ moment, this function would never return NULL for pseudos, but we should
not rely on this. */
static rtx
-choose_best_pseudo_reg (regset used_regs,
- struct reg_rename *reg_rename_p,
+choose_best_pseudo_reg (regset used_regs,
+ struct reg_rename *reg_rename_p,
def_list_t original_insns, bool *is_orig_reg_p_ptr)
{
def_list_iterator i;
def_t def;
enum machine_mode mode = VOIDmode;
bool bad_hard_regs = false;
-
+
/* We should not use this after reload. */
gcc_assert (!reload_completed);
@@ -1494,35 +1494,35 @@ choose_best_pseudo_reg (regset used_regs,
{
rtx dest = SET_DEST (PATTERN (def->orig_insn));
int orig_regno;
-
+
gcc_assert (REG_P (dest));
-
+
/* Check that all original operations have the same mode. */
if (mode == VOIDmode)
mode = GET_MODE (dest);
else
gcc_assert (mode == GET_MODE (dest));
orig_regno = REGNO (dest);
-
+
if (!REGNO_REG_SET_P (used_regs, orig_regno))
{
if (orig_regno < FIRST_PSEUDO_REGISTER)
{
gcc_assert (df_regs_ever_live_p (orig_regno));
-
- /* For hard registers, we have to check hardware imposed
+
+ /* For hard registers, we have to check hardware imposed
limitations (frame/stack registers, calls crossed). */
- if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
+ if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
orig_regno))
{
- /* Don't let register cross a call if it doesn't already
- cross one. This condition is written in accordance with
+ /* Don't let register cross a call if it doesn't already
+ cross one. This condition is written in accordance with
that in sched-deps.c sched_analyze_reg(). */
- if (!reg_rename_p->crosses_call
+ if (!reg_rename_p->crosses_call
|| REG_N_CALLS_CROSSED (orig_regno) > 0)
- return gen_rtx_REG (mode, orig_regno);
+ return gen_rtx_REG (mode, orig_regno);
}
-
+
bad_hard_regs = true;
}
else
@@ -1531,13 +1531,13 @@ choose_best_pseudo_reg (regset used_regs,
}
*is_orig_reg_p_ptr = false;
-
+
/* We had some original hard registers that couldn't be used.
Those were likely special. Don't try to create a pseudo. */
if (bad_hard_regs)
return NULL_RTX;
-
- /* We haven't found a register from original operations. Get a new one.
+
+ /* We haven't found a register from original operations. Get a new one.
FIXME: control register pressure somehow. */
{
rtx new_reg = gen_reg_rtx (mode);
@@ -1555,7 +1555,7 @@ choose_best_pseudo_reg (regset used_regs,
/* True when target of EXPR is available due to EXPR_TARGET_AVAILABLE,
USED_REGS and REG_RENAME_P->UNAVAILABLE_HARD_REGS. */
static void
-verify_target_availability (expr_t expr, regset used_regs,
+verify_target_availability (expr_t expr, regset used_regs,
struct reg_rename *reg_rename_p)
{
unsigned n, i, regno;
@@ -1564,7 +1564,7 @@ verify_target_availability (expr_t expr, regset used_regs,
if (!REG_P (EXPR_LHS (expr)) || EXPR_TARGET_AVAILABLE (expr) < 0)
return;
-
+
regno = expr_dest_regno (expr);
mode = GET_MODE (EXPR_LHS (expr));
target_available = EXPR_TARGET_AVAILABLE (expr) == 1;
@@ -1579,31 +1579,31 @@ verify_target_availability (expr_t expr, regset used_regs,
hard_available = false;
}
- /* When target is not available, it may be due to hard register
+ /* When target is not available, it may be due to hard register
restrictions, e.g. crosses calls, so we check hard_available too. */
if (target_available)
gcc_assert (live_available);
else
- /* Check only if we haven't scheduled something on the previous fence,
+ /* Check only if we haven't scheduled something on the previous fence,
cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues
and having more than one fence, we may end having targ_un in a block
- in which successors target register is actually available.
+ in which successors target register is actually available.
The last condition handles the case when a dependence from a call insn
- was created in sched-deps.c for insns with destination registers that
- never crossed a call before, but do cross one after our code motion.
+ was created in sched-deps.c for insns with destination registers that
+ never crossed a call before, but do cross one after our code motion.
- FIXME: in the latter case, we just uselessly called find_used_regs,
- because we can't move this expression with any other register
+ FIXME: in the latter case, we just uselessly called find_used_regs,
+ because we can't move this expression with any other register
as well. */
- gcc_assert (scheduled_something_on_previous_fence || !live_available
- || !hard_available
- || (!reload_completed && reg_rename_p->crosses_call
+ gcc_assert (scheduled_something_on_previous_fence || !live_available
+ || !hard_available
+ || (!reload_completed && reg_rename_p->crosses_call
&& REG_N_CALLS_CROSSED (regno) == 0));
}
-/* Collect unavailable registers due to liveness for EXPR from BNDS
- into USED_REGS. Save additional information about available
+/* Collect unavailable registers due to liveness for EXPR from BNDS
+ into USED_REGS. Save additional information about available
registers and unavailable due to hardware restriction registers
into REG_RENAME_P structure. Save original insns into ORIGINAL_INSNS
list. */
@@ -1669,8 +1669,8 @@ try_replace_dest_reg (ilist_t orig_insns, rtx best_reg, expr_t expr)
return true;
}
-/* Select and assign best register to EXPR searching from BNDS.
- Set *IS_ORIG_REG_P to TRUE if original register was selected.
+/* Select and assign best register to EXPR searching from BNDS.
+ Set *IS_ORIG_REG_P to TRUE if original register was selected.
Return FALSE if no register can be chosen, which could happen when:
* EXPR_SEPARABLE_P is true but we were unable to find suitable register;
* EXPR_SEPARABLE_P is false but the insn sets/clobbers one of the registers
@@ -1699,11 +1699,11 @@ find_best_reg_for_expr (expr_t expr, blist_t bnds, bool *is_orig_reg_p)
#ifdef ENABLE_CHECKING
/* If after reload, make sure we're working with hard regs here. */
- if (reload_completed)
+ if (reload_completed)
{
reg_set_iterator rsi;
unsigned i;
-
+
EXECUTE_IF_SET_IN_REG_SET (used_regs, FIRST_PSEUDO_REGISTER, i, rsi)
gcc_unreachable ();
}
@@ -1876,7 +1876,7 @@ create_speculation_check (expr_t c_expr, ds_t check_ds, insn_t orig_insn)
thrown by the non-control-speculative load. */
check_ds = ds_get_max_dep_weak (check_ds);
speculate_expr (c_expr, check_ds);
-
+
return insn;
}
@@ -1902,7 +1902,7 @@ identical_copy_p (rtx insn)
return REGNO (lhs) == REGNO (rhs);
}
-/* Undo all transformations on *AV_PTR that were done when
+/* Undo all transformations on *AV_PTR that were done when
moving through INSN. */
static void
undo_transformations (av_set_t *av_ptr, rtx insn)
@@ -1911,19 +1911,19 @@ undo_transformations (av_set_t *av_ptr, rtx insn)
expr_t expr;
av_set_t new_set = NULL;
- /* First, kill any EXPR that uses registers set by an insn. This is
+ /* First, kill any EXPR that uses registers set by an insn. This is
required for correctness. */
FOR_EACH_EXPR_1 (expr, av_iter, av_ptr)
if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (expr))
- && bitmap_intersect_p (INSN_REG_SETS (insn),
+ && bitmap_intersect_p (INSN_REG_SETS (insn),
VINSN_REG_USES (EXPR_VINSN (expr)))
/* When an insn looks like 'r1 = r1', we could substitute through
it, but the above condition will still hold. This happened with
- gcc.c-torture/execute/961125-1.c. */
+ gcc.c-torture/execute/961125-1.c. */
&& !identical_copy_p (insn))
{
if (sched_verbose >= 6)
- sel_print ("Expr %d removed due to use/set conflict\n",
+ sel_print ("Expr %d removed due to use/set conflict\n",
INSN_UID (EXPR_INSN_RTX (expr)));
av_set_iter_remove (&av_iter);
}
@@ -1938,23 +1938,23 @@ undo_transformations (av_set_t *av_ptr, rtx insn)
{
expr_history_def *phist;
- phist = VEC_index (expr_history_def,
+ phist = VEC_index (expr_history_def,
EXPR_HISTORY_OF_CHANGES (expr),
index);
- switch (phist->type)
+ switch (phist->type)
{
case TRANS_SPECULATION:
{
ds_t old_ds, new_ds;
-
+
/* Compute the difference between old and new speculative
- statuses: that's what we need to check.
+ statuses: that's what we need to check.
Earlier we used to assert that the status will really
change. This no longer works because only the probability
bits in the status may have changed during compute_av_set,
- and in the case of merging different probabilities of the
- same speculative status along different paths we do not
+ and in the case of merging different probabilities of the
+ same speculative status along different paths we do not
record this in the history vector. */
old_ds = phist->spec_ds;
new_ds = EXPR_SPEC_DONE_DS (expr);
@@ -1962,7 +1962,7 @@ undo_transformations (av_set_t *av_ptr, rtx insn)
old_ds &= SPECULATIVE;
new_ds &= SPECULATIVE;
new_ds &= ~old_ds;
-
+
EXPR_SPEC_TO_CHECK_DS (expr) |= new_ds;
break;
}
@@ -1971,14 +1971,14 @@ undo_transformations (av_set_t *av_ptr, rtx insn)
expr_def _tmp_expr, *tmp_expr = &_tmp_expr;
vinsn_t new_vi;
bool add = true;
-
+
new_vi = phist->old_expr_vinsn;
-
- gcc_assert (VINSN_SEPARABLE_P (new_vi)
+
+ gcc_assert (VINSN_SEPARABLE_P (new_vi)
== EXPR_SEPARABLE_P (expr));
copy_expr (tmp_expr, expr);
- if (vinsn_equal_p (phist->new_expr_vinsn,
+ if (vinsn_equal_p (phist->new_expr_vinsn,
EXPR_VINSN (tmp_expr)))
change_vinsn_in_expr (tmp_expr, new_vi);
else
@@ -1995,7 +1995,7 @@ undo_transformations (av_set_t *av_ptr, rtx insn)
gcc_unreachable ();
}
}
-
+
}
av_set_union_and_clear (av_ptr, &new_set, NULL);
@@ -2005,7 +2005,7 @@ undo_transformations (av_set_t *av_ptr, rtx insn)
/* Moveup_* helpers for code motion and computing av sets. */
/* Propagates EXPR inside an insn group through THROUGH_INSN.
- The difference from the below function is that only substitution is
+ The difference from the below function is that only substitution is
performed. */
static enum MOVEUP_EXPR_CODE
moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn)
@@ -2026,8 +2026,8 @@ moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn)
{
/* Can't substitute UNIQUE VINSNs. */
gcc_assert (!VINSN_UNIQUE_P (vi));
-
- if (can_substitute_through_p (through_insn,
+
+ if (can_substitute_through_p (through_insn,
has_dep_p[DEPS_IN_RHS])
&& substitute_reg_in_expr (expr, through_insn, false))
{
@@ -2043,7 +2043,7 @@ moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn)
/* This can catch output dependencies in COND_EXECs. */
if (has_dep_p[DEPS_IN_INSN])
return MOVEUP_EXPR_NULL;
-
+
/* This is either an output or an anti dependence, which usually have
a zero latency. Allow this here, if we'd be wrong, tick_check_p
will fix this. */
@@ -2111,11 +2111,11 @@ moving_insn_creates_bookkeeping_block_p (insn_t insn,
}
/* Modifies EXPR so it can be moved through the THROUGH_INSN,
- performing necessary transformations. Record the type of transformation
- made in PTRANS_TYPE, when it is not NULL. When INSIDE_INSN_GROUP,
+ performing necessary transformations. Record the type of transformation
+ made in PTRANS_TYPE, when it is not NULL. When INSIDE_INSN_GROUP,
permit all dependencies except true ones, and try to remove those
- too via forward substitution. All cases when a non-eliminable
- non-zero cost dependency exists inside an insn group will be fixed
+ too via forward substitution. All cases when a non-eliminable
+ non-zero cost dependency exists inside an insn group will be fixed
in tick_check_p instead. */
static enum MOVEUP_EXPR_CODE
moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
@@ -2142,7 +2142,7 @@ moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
{
basic_block fallthru_bb;
- /* Do not move checks and do not move jumps through other
+ /* Do not move checks and do not move jumps through other
jumps. */
if (control_flow_insn_p (through_insn)
|| sel_insn_is_speculation_check (insn))
@@ -2152,13 +2152,13 @@ moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
if (bookkeeping_can_be_created_if_moved_through_p (through_insn))
return MOVEUP_EXPR_NULL;
- /* The jump should have a clear fallthru block, and
+ /* The jump should have a clear fallthru block, and
this block should be in the current region. */
if ((fallthru_bb = fallthru_bb_of_jump (insn)) == NULL
|| ! in_current_region_p (fallthru_bb))
return MOVEUP_EXPR_NULL;
-
- /* And it should be mutually exclusive with through_insn, or
+
+ /* And it should be mutually exclusive with through_insn, or
be an unconditional jump. */
if (! any_uncondjump_p (insn)
&& ! sched_insns_conditions_mutex_p (insn, through_insn)
@@ -2199,7 +2199,7 @@ moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
}
else
{
- /* We can move UNIQUE insn up only as a whole and unchanged,
+ /* We can move UNIQUE insn up only as a whole and unchanged,
so it shouldn't have any dependencies. */
if (VINSN_UNIQUE_P (vi))
return MOVEUP_EXPR_NULL;
@@ -2228,9 +2228,9 @@ moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
return MOVEUP_EXPR_NULL;
if (has_dep_p[DEPS_IN_LHS])
- {
+ {
/* Only separable insns can be moved up with the new register.
- Anyways, we should mark that the original register is
+ Anyways, we should mark that the original register is
unavailable. */
if (!enable_schedule_as_rhs_p || !EXPR_SEPARABLE_P (expr))
return MOVEUP_EXPR_NULL;
@@ -2249,10 +2249,10 @@ moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
y = x; y = x;
z = y*2; y = y*2;
- In Ex.1 y*2 can be substituted for x*2 and the whole operation can be
+ In Ex.1 y*2 can be substituted for x*2 and the whole operation can be
moved above y=x assignment as z=x*2.
- In Ex.2 y*2 also can be substituted for x*2, but only the right hand
+ In Ex.2 y*2 also can be substituted for x*2, but only the right hand
side can be moved because of the output dependency. The operation was
cropped to its rhs above. */
if (has_dep_p[DEPS_IN_RHS])
@@ -2265,7 +2265,7 @@ moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
if (can_speculate_dep_p (*rhs_dsp))
{
int res;
-
+
res = speculate_expr (expr, *rhs_dsp);
if (res >= 0)
{
@@ -2302,24 +2302,24 @@ moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
if (CANT_MOVE_TRAPPING (expr, through_insn))
return MOVEUP_EXPR_NULL;
- return (was_changed
- ? MOVEUP_EXPR_CHANGED
- : (as_rhs
+ return (was_changed
+ ? MOVEUP_EXPR_CHANGED
+ : (as_rhs
? MOVEUP_EXPR_AS_RHS
: MOVEUP_EXPR_SAME));
}
-/* Try to look at bitmap caches for EXPR and INSN pair, return true
+/* Try to look at bitmap caches for EXPR and INSN pair, return true
if successful. When INSIDE_INSN_GROUP, also try ignore dependencies
that can exist within a parallel group. Write to RES the resulting
code for moveup_expr. */
-static bool
+static bool
try_bitmap_cache (expr_t expr, insn_t insn,
bool inside_insn_group,
enum MOVEUP_EXPR_CODE *res)
{
int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
-
+
/* First check whether we've analyzed this situation already. */
if (bitmap_bit_p (INSN_ANALYZED_DEPS (insn), expr_uid))
{
@@ -2346,13 +2346,13 @@ try_bitmap_cache (expr_t expr, insn_t insn,
sel_print ("unchanged (as RHS, cached, inside insn group)\n");
*res = MOVEUP_EXPR_SAME;
return true;
-
+
}
else
EXPR_TARGET_AVAILABLE (expr) = false;
- /* This is the only case when propagation result can change over time,
- as we can dynamically switch off scheduling as RHS. In this case,
+ /* This is the only case when propagation result can change over time,
+ as we can dynamically switch off scheduling as RHS. In this case,
just check the flag to reach the correct decision. */
if (enable_schedule_as_rhs_p)
{
@@ -2374,27 +2374,27 @@ try_bitmap_cache (expr_t expr, insn_t insn,
return false;
}
-/* Try to look at bitmap caches for EXPR and INSN pair, return true
+/* Try to look at bitmap caches for EXPR and INSN pair, return true
if successful. Write to RES the resulting code for moveup_expr. */
-static bool
+static bool
try_transformation_cache (expr_t expr, insn_t insn,
enum MOVEUP_EXPR_CODE *res)
{
- struct transformed_insns *pti
+ struct transformed_insns *pti
= (struct transformed_insns *)
htab_find_with_hash (INSN_TRANSFORMED_INSNS (insn),
- &EXPR_VINSN (expr),
+ &EXPR_VINSN (expr),
VINSN_HASH_RTX (EXPR_VINSN (expr)));
if (pti)
{
- /* This EXPR was already moved through this insn and was
- changed as a result. Fetch the proper data from
+ /* This EXPR was already moved through this insn and was
+ changed as a result. Fetch the proper data from
the hashtable. */
- insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
- INSN_UID (insn), pti->type,
- pti->vinsn_old, pti->vinsn_new,
+ insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
+ INSN_UID (insn), pti->type,
+ pti->vinsn_old, pti->vinsn_new,
EXPR_SPEC_DONE_DS (expr));
-
+
if (INSN_IN_STREAM_P (VINSN_INSN_RTX (pti->vinsn_new)))
pti->vinsn_new = vinsn_copy (pti->vinsn_new, true);
change_vinsn_in_expr (expr, pti->vinsn_new);
@@ -2405,7 +2405,7 @@ try_transformation_cache (expr_t expr, insn_t insn,
ds_t ds;
ds = EXPR_SPEC_DONE_DS (expr);
-
+
EXPR_SPEC_DONE_DS (expr) = pti->ds;
EXPR_NEEDS_SPEC_CHECK_P (expr) |= pti->needs_check;
}
@@ -2426,16 +2426,16 @@ try_transformation_cache (expr_t expr, insn_t insn,
/* Update bitmap caches on INSN with result RES of propagating EXPR. */
static void
-update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group,
+update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group,
enum MOVEUP_EXPR_CODE res)
{
int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
- /* Do not cache result of propagating jumps through an insn group,
+ /* Do not cache result of propagating jumps through an insn group,
as it is always true, which is not useful outside the group. */
if (inside_insn_group)
return;
-
+
if (res == MOVEUP_EXPR_NULL)
{
bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
@@ -2458,16 +2458,16 @@ update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group,
/* Update hashtable on INSN with changed EXPR, old EXPR_OLD_VINSN
and transformation type TRANS_TYPE. */
static void
-update_transformation_cache (expr_t expr, insn_t insn,
+update_transformation_cache (expr_t expr, insn_t insn,
bool inside_insn_group,
- enum local_trans_type trans_type,
+ enum local_trans_type trans_type,
vinsn_t expr_old_vinsn)
{
struct transformed_insns *pti;
if (inside_insn_group)
return;
-
+
pti = XNEW (struct transformed_insns);
pti->vinsn_old = expr_old_vinsn;
pti->vinsn_new = EXPR_VINSN (expr);
@@ -2477,13 +2477,13 @@ update_transformation_cache (expr_t expr, insn_t insn,
pti->needs_check = EXPR_NEEDS_SPEC_CHECK_P (expr);
vinsn_attach (pti->vinsn_old);
vinsn_attach (pti->vinsn_new);
- *((struct transformed_insns **)
+ *((struct transformed_insns **)
htab_find_slot_with_hash (INSN_TRANSFORMED_INSNS (insn),
pti, VINSN_HASH_RTX (expr_old_vinsn),
INSERT)) = pti;
}
-/* Same as moveup_expr, but first looks up the result of
+/* Same as moveup_expr, but first looks up the result of
transformation in caches. */
static enum MOVEUP_EXPR_CODE
moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group)
@@ -2493,7 +2493,7 @@ moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group)
if (sched_verbose >= 6)
{
- sel_print ("Moving ");
+ sel_print ("Moving ");
dump_expr (expr);
sel_print (" through %d: ", INSN_UID (insn));
}
@@ -2519,7 +2519,7 @@ moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group)
bool unique_p = VINSN_UNIQUE_P (expr_old_vinsn);
enum local_trans_type trans_type = TRANS_SUBSTITUTION;
- /* ??? Invent something better than this. We can't allow old_vinsn
+ /* ??? Invent something better than this. We can't allow old_vinsn
to go, we need it for the history vector. */
vinsn_attach (expr_old_vinsn);
@@ -2549,9 +2549,9 @@ moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group)
case MOVEUP_EXPR_CHANGED:
gcc_assert (INSN_UID (EXPR_INSN_RTX (expr)) != expr_uid
|| EXPR_SPEC_DONE_DS (expr) != expr_old_spec_ds);
- insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
- INSN_UID (insn), trans_type,
- expr_old_vinsn, EXPR_VINSN (expr),
+ insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
+ INSN_UID (insn), trans_type,
+ expr_old_vinsn, EXPR_VINSN (expr),
expr_old_spec_ds);
update_transformation_cache (expr, insn, inside_insn_group,
trans_type, expr_old_vinsn);
@@ -2572,7 +2572,7 @@ moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group)
return res;
}
-/* Moves an av set AVP up through INSN, performing necessary
+/* Moves an av set AVP up through INSN, performing necessary
transformations. */
static void
moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group)
@@ -2580,9 +2580,9 @@ moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group)
av_set_iterator i;
expr_t expr;
- FOR_EACH_EXPR_1 (expr, i, avp)
- {
-
+ FOR_EACH_EXPR_1 (expr, i, avp)
+ {
+
switch (moveup_expr_cached (expr, insn, inside_insn_group))
{
case MOVEUP_EXPR_SAME:
@@ -2596,7 +2596,7 @@ moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group)
case MOVEUP_EXPR_CHANGED:
expr = merge_with_other_exprs (avp, &i, expr);
break;
-
+
default:
gcc_unreachable ();
}
@@ -2608,13 +2608,13 @@ static void
moveup_set_inside_insn_group (av_set_t *avp, ilist_t path)
{
int last_cycle;
-
+
if (sched_verbose >= 6)
sel_print ("Moving expressions up in the insn group...\n");
if (! path)
return;
last_cycle = INSN_SCHED_CYCLE (ILIST_INSN (path));
- while (path
+ while (path
&& INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
{
moveup_set_expr (avp, ILIST_INSN (path), true);
@@ -2632,11 +2632,11 @@ equal_after_moveup_path_p (expr_t expr, ilist_t path, expr_t expr_vliw)
copy_expr_onside (tmp, expr);
last_cycle = path ? INSN_SCHED_CYCLE (ILIST_INSN (path)) : 0;
- while (path
+ while (path
&& res
&& INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
{
- res = (moveup_expr_cached (tmp, ILIST_INSN (path), true)
+ res = (moveup_expr_cached (tmp, ILIST_INSN (path), true)
!= MOVEUP_EXPR_NULL);
path = ILIST_NEXT (path);
}
@@ -2657,7 +2657,7 @@ equal_after_moveup_path_p (expr_t expr, ilist_t path, expr_t expr_vliw)
/* Functions that compute av and lv sets. */
-/* Returns true if INSN is not a downward continuation of the given path P in
+/* Returns true if INSN is not a downward continuation of the given path P in
the current stage. */
static bool
is_ineligible_successor (insn_t insn, ilist_t p)
@@ -2681,8 +2681,8 @@ is_ineligible_successor (insn_t insn, ilist_t p)
/* is already visited. */
|| (INSN_SEQNO (insn) == INSN_SEQNO (prev_insn)
&& (ilist_is_in_p (p, insn)
- /* We can reach another fence here and still seqno of insn
- would be equal to seqno of prev_insn. This is possible
+ /* We can reach another fence here and still seqno of insn
+ would be equal to seqno of prev_insn. This is possible
when prev_insn is a previously created bookkeeping copy.
In that case it'd get a seqno of insn. Thus, check here
whether insn is in current fence too. */
@@ -2690,8 +2690,8 @@ is_ineligible_successor (insn_t insn, ilist_t p)
/* Was already scheduled on this round. */
|| (INSN_SEQNO (insn) > INSN_SEQNO (prev_insn)
&& IN_CURRENT_FENCE_P (insn))
- /* An insn from another fence could also be
- scheduled earlier even if this insn is not in
+ /* An insn from another fence could also be
+ scheduled earlier even if this insn is not in
a fence list right now. Check INSN_SCHED_CYCLE instead. */
|| (!pipelining_p
&& INSN_SCHED_TIMES (insn) > 0))
@@ -2700,9 +2700,9 @@ is_ineligible_successor (insn_t insn, ilist_t p)
return false;
}
-/* Computes the av_set below the last bb insn INSN, doing all the 'dirty work'
- of handling multiple successors and properly merging its av_sets. P is
- the current path traversed. WS is the size of lookahead window.
+/* Computes the av_set below the last bb insn INSN, doing all the 'dirty work'
+ of handling multiple successors and properly merging its av_sets. P is
+ the current path traversed. WS is the size of lookahead window.
Return the av set computed. */
static av_set_t
compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
@@ -2715,7 +2715,7 @@ compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
gcc_assert (sel_bb_end_p (insn));
- /* Find different kind of successors needed for correct computing of
+ /* Find different kind of successors needed for correct computing of
SPEC and TARGET_AVAILABLE attributes. */
sinfo = compute_succs_info (insn, SUCCS_NORMAL);
@@ -2739,14 +2739,14 @@ compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
/* We will edit SUCC_SET and EXPR_SPEC field of its elements. */
succ_set = compute_av_set_inside_bb (succ, p, ws, true);
- av_set_split_usefulness (succ_set,
- VEC_index (int, sinfo->probs_ok, is),
+ av_set_split_usefulness (succ_set,
+ VEC_index (int, sinfo->probs_ok, is),
sinfo->all_prob);
- if (sinfo->all_succs_n > 1
+ if (sinfo->all_succs_n > 1
&& sinfo->all_succs_n == sinfo->succs_ok_n)
{
- /* Find EXPR'es that came from *all* successors and save them
+ /* Find EXPR'es that came from *all* successors and save them
into expr_in_all_succ_branches. This set will be used later
for calculating speculation attributes of EXPR'es. */
if (is == 0)
@@ -2760,7 +2760,7 @@ compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
{
av_set_iterator i;
expr_t expr;
-
+
FOR_EACH_EXPR_1 (expr, i, &expr_in_all_succ_branches)
if (!av_set_is_in_p (succ_set, EXPR_VINSN (expr)))
av_set_iter_remove (&i);
@@ -2775,7 +2775,7 @@ compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
basic_block bb1 = BLOCK_FOR_INSN (succ);
gcc_assert (BB_LV_SET_VALID_P (bb0) && BB_LV_SET_VALID_P (bb1));
- av_set_union_and_live (&av1, &succ_set,
+ av_set_union_and_live (&av1, &succ_set,
BB_LV_SET (bb0),
BB_LV_SET (bb1),
insn);
@@ -2784,22 +2784,22 @@ compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
av_set_union_and_clear (&av1, &succ_set, insn);
}
- /* Check liveness restrictions via hard way when there are more than
+ /* Check liveness restrictions via hard way when there are more than
two successors. */
if (sinfo->succs_ok_n > 2)
for (is = 0; VEC_iterate (rtx, sinfo->succs_ok, is, succ); is++)
{
basic_block succ_bb = BLOCK_FOR_INSN (succ);
-
+
gcc_assert (BB_LV_SET_VALID_P (succ_bb));
- mark_unavailable_targets (av1, BB_AV_SET (succ_bb),
+ mark_unavailable_targets (av1, BB_AV_SET (succ_bb),
BB_LV_SET (succ_bb));
}
-
- /* Finally, check liveness restrictions on paths leaving the region. */
+
+ /* Finally, check liveness restrictions on paths leaving the region. */
if (sinfo->all_succs_n > sinfo->succs_ok_n)
for (is = 0; VEC_iterate (rtx, sinfo->succs_other, is, succ); is++)
- mark_unavailable_targets
+ mark_unavailable_targets
(av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (succ)));
if (sinfo->all_succs_n > 1)
@@ -2807,21 +2807,21 @@ compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
av_set_iterator i;
expr_t expr;
- /* Increase the spec attribute of all EXPR'es that didn't come
+ /* Increase the spec attribute of all EXPR'es that didn't come
from all successors. */
FOR_EACH_EXPR (expr, i, av1)
if (!av_set_is_in_p (expr_in_all_succ_branches, EXPR_VINSN (expr)))
EXPR_SPEC (expr)++;
av_set_clear (&expr_in_all_succ_branches);
-
- /* Do not move conditional branches through other
- conditional branches. So, remove all conditional
+
+ /* Do not move conditional branches through other
+ conditional branches. So, remove all conditional
branches from av_set if current operator is a conditional
branch. */
av_set_substract_cond_branches (&av1);
}
-
+
ilist_remove (&p);
free_succs_info (sinfo);
@@ -2835,9 +2835,9 @@ compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
return av1;
}
-/* This function computes av_set for the FIRST_INSN by dragging valid
- av_set through all basic block insns either from the end of basic block
- (computed using compute_av_set_at_bb_end) or from the insn on which
+/* This function computes av_set for the FIRST_INSN by dragging valid
+ av_set through all basic block insns either from the end of basic block
+ (computed using compute_av_set_at_bb_end) or from the insn on which
MAX_WS was exceeded. It uses compute_av_set_at_bb_end to compute av_set
below the basic block and handling conditional branches.
FIRST_INSN - the basic block head, P - path consisting of the insns
@@ -2845,7 +2845,7 @@ compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
and bb ends are added to the path), WS - current window size,
NEED_COPY_P - true if we'll make a copy of av_set before returning it. */
static av_set_t
-compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
+compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
bool need_copy_p)
{
insn_t cur_insn;
@@ -2865,7 +2865,7 @@ compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
return NULL;
}
- /* If insn already has valid av(insn) computed, just return it. */
+ /* If insn already has valid av(insn) computed, just return it. */
if (AV_SET_VALID_P (first_insn))
{
av_set_t av_set;
@@ -2888,9 +2888,9 @@ compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
ilist_add (&p, first_insn);
/* As the result after this loop have completed, in LAST_INSN we'll
- have the insn which has valid av_set to start backward computation
- from: it either will be NULL because on it the window size was exceeded
- or other valid av_set as returned by compute_av_set for the last insn
+ have the insn which has valid av_set to start backward computation
+ from: it either will be NULL because on it the window size was exceeded
+ or other valid av_set as returned by compute_av_set for the last insn
of the basic block. */
for (last_insn = first_insn; last_insn != after_bb_end;
last_insn = NEXT_INSN (last_insn))
@@ -2907,12 +2907,12 @@ compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
/* The special case: the last insn of the BB may be an
ineligible_successor due to its SEQ_NO that was set on
it as a bookkeeping. */
- if (last_insn != first_insn
+ if (last_insn != first_insn
&& is_ineligible_successor (last_insn, p))
{
if (sched_verbose >= 6)
sel_print ("Insn %d is ineligible_successor\n", INSN_UID (last_insn));
- break;
+ break;
}
if (DEBUG_INSN_P (last_insn))
@@ -2920,7 +2920,7 @@ compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
if (end_ws > max_ws)
{
- /* We can reach max lookahead size at bb_header, so clean av_set
+ /* We can reach max lookahead size at bb_header, so clean av_set
first. */
INSN_WS_LEVEL (last_insn) = global_level;
@@ -2940,7 +2940,7 @@ compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
{
av = NULL;
- /* This is needed only to obtain av_sets that are identical to
+ /* This is needed only to obtain av_sets that are identical to
those computed by the old compute_av_set version. */
if (last_insn == first_insn && !INSN_NOP_P (last_insn))
av_set_add (&av, INSN_EXPR (last_insn));
@@ -2952,16 +2952,16 @@ compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
/* Compute av_set in AV starting from below the LAST_INSN up to
location above the FIRST_INSN. */
for (cur_insn = PREV_INSN (last_insn); cur_insn != PREV_INSN (first_insn);
- cur_insn = PREV_INSN (cur_insn))
+ cur_insn = PREV_INSN (cur_insn))
if (!INSN_NOP_P (cur_insn))
{
expr_t expr;
-
+
moveup_set_expr (&av, cur_insn, false);
-
- /* If the expression for CUR_INSN is already in the set,
+
+ /* If the expression for CUR_INSN is already in the set,
replace it by the new one. */
- expr = av_set_lookup (av, INSN_VINSN (cur_insn));
+ expr = av_set_lookup (av, INSN_VINSN (cur_insn));
if (expr != NULL)
{
clear_expr (expr);
@@ -3058,16 +3058,16 @@ compute_live (insn_t insn)
if (!ignore_first)
{
regset src = NULL;
-
+
if (sel_bb_head_p (insn) && BB_LV_SET_VALID_P (bb))
src = BB_LV_SET (bb);
- else
+ else
{
gcc_assert (in_current_region_p (bb));
if (INSN_LIVE_VALID_P (insn))
src = INSN_LIVE (insn);
}
-
+
if (src)
{
lv = get_regset_from_pool ();
@@ -3078,7 +3078,7 @@ compute_live (insn_t insn)
COPY_REG_SET (BB_LV_SET (bb), lv);
BB_LV_SET_VALID_P (bb) = true;
}
-
+
return_regset_to_pool (lv);
return lv;
}
@@ -3088,8 +3088,8 @@ compute_live (insn_t insn)
ignore_first = false;
gcc_assert (in_current_region_p (bb));
- /* Find a valid LV set in this block or below, if needed.
- Start searching from the next insn: either ignore_first is true, or
+ /* Find a valid LV set in this block or below, if needed.
+ Start searching from the next insn: either ignore_first is true, or
INSN doesn't have a correct live set. */
temp = NEXT_INSN (insn);
final = NEXT_INSN (BB_END (bb));
@@ -3120,11 +3120,11 @@ compute_live (insn_t insn)
if (sel_bb_head_p (insn))
{
basic_block bb = BLOCK_FOR_INSN (insn);
-
+
COPY_REG_SET (BB_LV_SET (bb), lv);
BB_LV_SET_VALID_P (bb) = true;
}
-
+
/* We return LV to the pool, but will not clear it there. Thus we can
legimatelly use LV till the next use of regset_pool_get (). */
return_regset_to_pool (lv);
@@ -3145,8 +3145,8 @@ compute_live_below_insn (rtx insn, regset regs)
{
rtx succ;
succ_iterator si;
-
- FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
+
+ FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
IOR_REG_SET (regs, compute_live (succ));
}
@@ -3189,7 +3189,7 @@ get_spec_check_type_for_insn (insn_t insn, expr_t expr)
return to_check_ds;
}
-/* Find the set of registers that are unavailable for storing expres
+/* Find the set of registers that are unavailable for storing expres
while moving ORIG_OPS up on the path starting from INSN due to
liveness (USED_REGS) or hardware restrictions (REG_RENAME_P).
@@ -3198,32 +3198,32 @@ get_spec_check_type_for_insn (insn_t insn, expr_t expr)
REG_RENAME_P denotes the set of hardware registers that
can not be used with renaming due to the register class restrictions,
- mode restrictions and other (the register we'll choose should be
+ mode restrictions and other (the register we'll choose should be
compatible class with the original uses, shouldn't be in call_used_regs,
should be HARD_REGNO_RENAME_OK etc).
Returns TRUE if we've found all original insns, FALSE otherwise.
This function utilizes code_motion_path_driver (formerly find_used_regs_1)
- to traverse the code motion paths. This helper function finds registers
- that are not available for storing expres while moving ORIG_OPS up on the
+ to traverse the code motion paths. This helper function finds registers
+ that are not available for storing expres while moving ORIG_OPS up on the
path starting from INSN. A register considered as used on the moving path,
if one of the following conditions is not satisfied:
- (1) a register not set or read on any path from xi to an instance of
- the original operation,
- (2) not among the live registers of the point immediately following the
+ (1) a register not set or read on any path from xi to an instance of
+ the original operation,
+ (2) not among the live registers of the point immediately following the
first original operation on a given downward path, except for the
original target register of the operation,
- (3) not live on the other path of any conditional branch that is passed
+ (3) not live on the other path of any conditional branch that is passed
by the operation, in case original operations are not present on
both paths of the conditional branch.
All the original operations found during the traversal are saved in the
ORIGINAL_INSNS list.
- REG_RENAME_P->CROSSES_CALL is true, if there is a call insn on the path
- from INSN to original insn. In this case CALL_USED_REG_SET will be added
+ REG_RENAME_P->CROSSES_CALL is true, if there is a call insn on the path
+ from INSN to original insn. In this case CALL_USED_REG_SET will be added
to unavailable hard regs at the point original operation is found. */
static bool
@@ -3246,10 +3246,10 @@ find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs,
sparams.crosses_call = false;
sparams.original_insns = original_insns;
sparams.used_regs = used_regs;
-
+
/* Set the appropriate hooks and data. */
code_motion_path_driver_info = &fur_hooks;
-
+
res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
reg_rename_p->crosses_call |= sparams.crosses_call;
@@ -3260,12 +3260,12 @@ find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs,
/* ??? We calculate whether an expression needs a check when computing
av sets. This information is not as precise as it could be due to
merging this bit in merge_expr. We can do better in find_used_regs,
- but we want to avoid multiple traversals of the same code motion
+ but we want to avoid multiple traversals of the same code motion
paths. */
FOR_EACH_EXPR (expr, expr_iter, orig_ops)
needs_spec_check_p |= EXPR_NEEDS_SPEC_CHECK_P (expr);
- /* Mark hardware regs in REG_RENAME_P that are not suitable
+ /* Mark hardware regs in REG_RENAME_P that are not suitable
for renaming expr in INSN due to hardware restrictions (register class,
modes compatibility etc). */
FOR_EACH_DEF (def, i, *original_insns)
@@ -3275,7 +3275,7 @@ find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs,
if (VINSN_SEPARABLE_P (vinsn))
mark_unavailable_hard_regs (def, reg_rename_p, used_regs);
- /* Do not allow clobbering of ld.[sa] address in case some of the
+ /* Do not allow clobbering of ld.[sa] address in case some of the
original operations need a check. */
if (needs_spec_check_p)
IOR_REG_SET (used_regs, VINSN_REG_USES (vinsn));
@@ -3305,15 +3305,15 @@ sel_target_adjust_priority (expr_t expr)
gcc_assert (EXPR_PRIORITY_ADJ (expr) >= 0);
if (sched_verbose >= 2)
- sel_print ("sel_target_adjust_priority: insn %d, %d +%d = %d.\n",
- INSN_UID (EXPR_INSN_RTX (expr)), EXPR_PRIORITY (expr),
+ sel_print ("sel_target_adjust_priority: insn %d, %d +%d = %d.\n",
+ INSN_UID (EXPR_INSN_RTX (expr)), EXPR_PRIORITY (expr),
EXPR_PRIORITY_ADJ (expr), new_priority);
return new_priority;
}
/* Rank two available exprs for schedule. Never return 0 here. */
-static int
+static int
sel_rank_for_schedule (const void *x, const void *y)
{
expr_t tmp = *(const expr_t *) y;
@@ -3326,7 +3326,7 @@ sel_rank_for_schedule (const void *x, const void *y)
tmp2_vinsn = EXPR_VINSN (tmp2);
tmp_insn = EXPR_INSN_RTX (tmp);
tmp2_insn = EXPR_INSN_RTX (tmp2);
-
+
/* Schedule debug insns as early as possible. */
if (DEBUG_INSN_P (tmp_insn) && !DEBUG_INSN_P (tmp2_insn))
return -1;
@@ -3336,7 +3336,7 @@ sel_rank_for_schedule (const void *x, const void *y)
/* Prefer SCHED_GROUP_P insns to any others. */
if (SCHED_GROUP_P (tmp_insn) != SCHED_GROUP_P (tmp2_insn))
{
- if (VINSN_UNIQUE_P (tmp_vinsn) && VINSN_UNIQUE_P (tmp2_vinsn))
+ if (VINSN_UNIQUE_P (tmp_vinsn) && VINSN_UNIQUE_P (tmp2_vinsn))
return SCHED_GROUP_P (tmp2_insn) ? 1 : -1;
/* Now uniqueness means SCHED_GROUP_P is set, because schedule groups
@@ -3375,7 +3375,7 @@ sel_rank_for_schedule (const void *x, const void *y)
val = p2 * EXPR_USEFULNESS (tmp2) - p1 * EXPR_USEFULNESS (tmp);
}
else
- val = EXPR_PRIORITY (tmp2) - EXPR_PRIORITY (tmp)
+ val = EXPR_PRIORITY (tmp2) - EXPR_PRIORITY (tmp)
+ EXPR_PRIORITY_ADJ (tmp2) - EXPR_PRIORITY_ADJ (tmp);
if (val)
return val;
@@ -3405,20 +3405,20 @@ sel_rank_for_schedule (const void *x, const void *y)
}
/* Prefer an old insn to a bookkeeping insn. */
- if (INSN_UID (tmp_insn) < first_emitted_uid
+ if (INSN_UID (tmp_insn) < first_emitted_uid
&& INSN_UID (tmp2_insn) >= first_emitted_uid)
return -1;
- if (INSN_UID (tmp_insn) >= first_emitted_uid
+ if (INSN_UID (tmp_insn) >= first_emitted_uid
&& INSN_UID (tmp2_insn) < first_emitted_uid)
return 1;
- /* Prefer an insn with smaller UID, as a last resort.
+ /* Prefer an insn with smaller UID, as a last resort.
We can't safely use INSN_LUID as it is defined only for those insns
that are in the stream. */
return INSN_UID (tmp_insn) - INSN_UID (tmp2_insn);
}
-/* Filter out expressions from av set pointed to by AV_PTR
+/* Filter out expressions from av set pointed to by AV_PTR
that are pipelined too many times. */
static void
process_pipelined_exprs (av_set_t *av_ptr)
@@ -3427,7 +3427,7 @@ process_pipelined_exprs (av_set_t *av_ptr)
av_set_iterator si;
/* Don't pipeline already pipelined code as that would increase
- number of unnecessary register moves. */
+ number of unnecessary register moves. */
FOR_EACH_EXPR_1 (expr, si, av_ptr)
{
if (EXPR_SCHED_TIMES (expr)
@@ -3498,8 +3498,8 @@ process_spec_exprs (av_set_t *av_ptr)
}
}
-/* Search for any use-like insns in AV_PTR and decide on scheduling
- them. Return one when found, and NULL otherwise.
+/* Search for any use-like insns in AV_PTR and decide on scheduling
+ them. Return one when found, and NULL otherwise.
Note that we check here whether a USE could be scheduled to avoid
an infinite loop later. */
static expr_t
@@ -3581,7 +3581,7 @@ vinsn_vec_has_expr_p (vinsn_vec_t vinsn_vec, expr_t expr)
another pattern due to substitution, and we can't choose
different register as in the above case. Check all registers
being written instead. */
- if (bitmap_intersect_p (VINSN_REG_SETS (vinsn),
+ if (bitmap_intersect_p (VINSN_REG_SETS (vinsn),
VINSN_REG_SETS (EXPR_VINSN (expr))))
return true;
}
@@ -3642,7 +3642,7 @@ vinsn_vec_clear (vinsn_vec_t *vinsn_vec)
{
vinsn_t vinsn;
int n;
-
+
for (n = 0; VEC_iterate (vinsn_t, *vinsn_vec, n, vinsn); n++)
vinsn_detach (vinsn);
VEC_block_remove (vinsn_t, *vinsn_vec, 0, len);
@@ -3657,7 +3657,7 @@ vinsn_vec_add (vinsn_vec_t *vinsn_vec, expr_t expr)
VEC_safe_push (vinsn_t, heap, *vinsn_vec, EXPR_VINSN (expr));
}
-/* Free the vector representing blocked expressions. */
+/* Free the vector representing blocked expressions. */
static void
vinsn_vec_free (vinsn_vec_t *vinsn_vec)
{
@@ -3672,15 +3672,15 @@ void sel_add_to_insn_priority (rtx insn, int amount)
EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) += amount;
if (sched_verbose >= 2)
- sel_print ("sel_add_to_insn_priority: insn %d, by %d (now %d+%d).\n",
+ sel_print ("sel_add_to_insn_priority: insn %d, by %d (now %d+%d).\n",
INSN_UID (insn), amount, EXPR_PRIORITY (INSN_EXPR (insn)),
EXPR_PRIORITY_ADJ (INSN_EXPR (insn)));
}
-/* Turn AV into a vector, filter inappropriate insns and sort it. Return
+/* Turn AV into a vector, filter inappropriate insns and sort it. Return
true if there is something to schedule. BNDS and FENCE are current
boundaries and fence, respectively. If we need to stall for some cycles
- before an expr from AV would become available, write this number to
+ before an expr from AV would become available, write this number to
*PNEED_STALL. */
static bool
fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
@@ -3706,7 +3706,7 @@ fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
for each insn. */
gcc_assert (VEC_empty (expr_t, vec_av_set));
FOR_EACH_EXPR (expr, si, av)
- {
+ {
VEC_safe_push (expr_t, heap, vec_av_set, expr);
gcc_assert (EXPR_PRIORITY_ADJ (expr) == 0 || *pneed_stall);
@@ -3742,12 +3742,12 @@ fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
continue;
}
- /* Set number of sched_next insns (just in case there
+ /* Set number of sched_next insns (just in case there
could be several). */
if (FENCE_SCHED_NEXT (fence))
sched_next_worked++;
-
- /* Check all liveness requirements and try renaming.
+
+ /* Check all liveness requirements and try renaming.
FIXME: try to minimize calls to this. */
target_available = EXPR_TARGET_AVAILABLE (expr);
@@ -3768,13 +3768,13 @@ fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
INSN_UID (insn));
continue;
}
-
+
if (target_available == true)
{
/* Do nothing -- we can use an existing register. */
is_orig_reg_p = EXPR_SEPARABLE_P (expr);
}
- else if (/* Non-separable instruction will never
+ else if (/* Non-separable instruction will never
get another register. */
(target_available == false
&& !EXPR_SEPARABLE_P (expr))
@@ -3786,7 +3786,7 @@ fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
{
VEC_unordered_remove (expr_t, vec_av_set, n);
if (sched_verbose >= 4)
- sel_print ("Expr %d has no suitable target register\n",
+ sel_print ("Expr %d has no suitable target register\n",
INSN_UID (insn));
continue;
}
@@ -3854,20 +3854,20 @@ fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
if (need_cycles > 0)
{
stalled++;
- min_need_stall = (min_need_stall < 0
+ min_need_stall = (min_need_stall < 0
? need_cycles
: MIN (min_need_stall, need_cycles));
VEC_unordered_remove (expr_t, vec_av_set, n);
if (sched_verbose >= 4)
- sel_print ("Expr %d is not ready until cycle %d (cached)\n",
+ sel_print ("Expr %d is not ready until cycle %d (cached)\n",
INSN_UID (insn),
FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
continue;
}
}
- /* Now resort to dependence analysis to find whether EXPR might be
+ /* Now resort to dependence analysis to find whether EXPR might be
stalled due to dependencies from FENCE's context. */
need_cycles = tick_check_p (expr, dc, fence);
new_prio = EXPR_PRIORITY (expr) + EXPR_PRIORITY_ADJ (expr) + need_cycles;
@@ -3881,24 +3881,24 @@ fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
if (INSN_UID (insn) >= FENCE_READY_TICKS_SIZE (fence))
{
int new_size = INSN_UID (insn) * 3 / 2;
-
- FENCE_READY_TICKS (fence)
+
+ FENCE_READY_TICKS (fence)
= (int *) xrecalloc (FENCE_READY_TICKS (fence),
new_size, FENCE_READY_TICKS_SIZE (fence),
sizeof (int));
}
- FENCE_READY_TICKS (fence)[INSN_UID (insn)]
- = FENCE_CYCLE (fence) + need_cycles;
-
+ FENCE_READY_TICKS (fence)[INSN_UID (insn)]
+ = FENCE_CYCLE (fence) + need_cycles;
+
stalled++;
- min_need_stall = (min_need_stall < 0
+ min_need_stall = (min_need_stall < 0
? need_cycles
: MIN (min_need_stall, need_cycles));
VEC_unordered_remove (expr_t, vec_av_set, n);
-
+
if (sched_verbose >= 4)
- sel_print ("Expr %d is not ready yet until cycle %d\n",
+ sel_print ("Expr %d is not ready yet until cycle %d\n",
INSN_UID (insn),
FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
continue;
@@ -3933,10 +3933,10 @@ fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
/* Sort the vector. */
qsort (VEC_address (expr_t, vec_av_set), VEC_length (expr_t, vec_av_set),
sizeof (expr_t), sel_rank_for_schedule);
-
+
if (sched_verbose >= 4)
{
- sel_print ("Total ready exprs: %d, stalled: %d\n",
+ sel_print ("Total ready exprs: %d, stalled: %d\n",
VEC_length (expr_t, vec_av_set), stalled);
sel_print ("Sorted av set (%d): ", VEC_length (expr_t, vec_av_set));
for (n = 0; VEC_iterate (expr_t, vec_av_set, n, expr); n++)
@@ -3959,7 +3959,7 @@ convert_vec_av_set_to_ready (void)
/* Allocate and fill the ready list from the sorted vector. */
ready.n_ready = VEC_length (expr_t, vec_av_set);
ready.first = ready.n_ready - 1;
-
+
gcc_assert (ready.n_ready > 0);
if (ready.n_ready > max_issue_size)
@@ -3967,7 +3967,7 @@ convert_vec_av_set_to_ready (void)
max_issue_size = ready.n_ready;
sched_extend_ready_list (ready.n_ready);
}
-
+
for (n = 0; VEC_iterate (expr_t, vec_av_set, n, expr); n++)
{
vinsn_t vi = EXPR_VINSN (expr);
@@ -3980,8 +3980,8 @@ convert_vec_av_set_to_ready (void)
/* Initialize ready list from *AV_PTR for the max_issue () call.
If any unrecognizable insn found in *AV_PTR, return it (and skip
- max_issue). BND and FENCE are current boundary and fence,
- respectively. If we need to stall for some cycles before an expr
+ max_issue). BND and FENCE are current boundary and fence,
+ respectively. If we need to stall for some cycles before an expr
from *AV_PTR would become available, write this number to *PNEED_STALL. */
static expr_t
fill_ready_list (av_set_t *av_ptr, blist_t bnds, fence_t fence,
@@ -3992,7 +3992,7 @@ fill_ready_list (av_set_t *av_ptr, blist_t bnds, fence_t fence,
/* We do not support multiple boundaries per fence. */
gcc_assert (BLIST_NEXT (bnds) == NULL);
- /* Process expressions required special handling, i.e. pipelined,
+ /* Process expressions required special handling, i.e. pipelined,
speculative and recog() < 0 expressions first. */
process_pipelined_exprs (av_ptr);
process_spec_exprs (av_ptr);
@@ -4021,8 +4021,8 @@ fill_ready_list (av_set_t *av_ptr, blist_t bnds, fence_t fence,
static bool
sel_dfa_new_cycle (insn_t insn, fence_t fence)
{
- int last_scheduled_cycle = FENCE_LAST_SCHEDULED_INSN (fence)
- ? INSN_SCHED_CYCLE (FENCE_LAST_SCHEDULED_INSN (fence))
+ int last_scheduled_cycle = FENCE_LAST_SCHEDULED_INSN (fence)
+ ? INSN_SCHED_CYCLE (FENCE_LAST_SCHEDULED_INSN (fence))
: FENCE_CYCLE (fence) - 1;
bool res = false;
int sort_p = 0;
@@ -4084,7 +4084,7 @@ invoke_reorder_hooks (fence_t fence)
&& !SCHED_GROUP_P (ready_element (&ready, 0)))
{
if (ready.n_ready == 1)
- issue_more =
+ issue_more =
targetm.sched.reorder2 (sched_dump, sched_verbose,
ready_lastpos (&ready),
&ready.n_ready, FENCE_CYCLE (fence));
@@ -4105,7 +4105,7 @@ invoke_reorder_hooks (fence_t fence)
ran_hook = true;
}
- else
+ else
issue_more = issue_rate;
/* Ensure that ready list and vec_av_set are in line with each other,
@@ -4126,7 +4126,7 @@ invoke_reorder_hooks (fence_t fence)
break;
gcc_assert (j < n);
- tmp = vec[i];
+ tmp = vec[i];
vec[i] = vec[j];
vec[j] = tmp;
}
@@ -4135,9 +4135,9 @@ invoke_reorder_hooks (fence_t fence)
return issue_more;
}
-/* Return an EXPR correponding to INDEX element of ready list, if
- FOLLOW_READY_ELEMENT is true (i.e., an expr of
- ready_element (&ready, INDEX) will be returned), and to INDEX element of
+/* Return an EXPR correponding to INDEX element of ready list, if
+ FOLLOW_READY_ELEMENT is true (i.e., an expr of
+ ready_element (&ready, INDEX) will be returned), and to INDEX element of
ready.vec otherwise. */
static inline expr_t
find_expr_for_ready (int index, bool follow_ready_element)
@@ -4159,7 +4159,7 @@ static int
invoke_dfa_lookahead_guard (void)
{
int i, n;
- bool have_hook
+ bool have_hook
= targetm.sched.first_cycle_multipass_dfa_lookahead_guard != NULL;
if (sched_verbose >= 2)
@@ -4171,18 +4171,18 @@ invoke_dfa_lookahead_guard (void)
insn_t insn;
int r;
- /* In this loop insn is Ith element of the ready list given by
+ /* In this loop insn is Ith element of the ready list given by
ready_element, not Ith element of ready.vec. */
insn = ready_element (&ready, i);
-
+
if (! have_hook || i == 0)
r = 0;
else
r = !targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn);
-
+
gcc_assert (INSN_CODE (insn) >= 0);
-
- /* Only insns with ready_try = 0 can get here
+
+ /* Only insns with ready_try = 0 can get here
from fill_ready_list. */
gcc_assert (ready_try [i] == 0);
ready_try[i] = r;
@@ -4190,7 +4190,7 @@ invoke_dfa_lookahead_guard (void)
n++;
expr = find_expr_for_ready (i, true);
-
+
if (sched_verbose >= 2)
{
dump_vinsn (EXPR_VINSN (expr));
@@ -4221,7 +4221,7 @@ calculate_privileged_insns (void)
min_spec_insn = ready_element (&ready, i);
min_spec_expr = find_expr_for_ready (i, true);
}
-
+
cur_insn = ready_element (&ready, i);
cur_expr = find_expr_for_ready (i, true);
@@ -4240,7 +4240,7 @@ calculate_privileged_insns (void)
return privileged_n;
}
-/* Call the rest of the hooks after the choice was made. Return
+/* Call the rest of the hooks after the choice was made. Return
the number of insns that still can be issued given that the current
number is ISSUE_MORE. FENCE and BEST_INSN are the current fence
and the insn chosen for scheduling, respectively. */
@@ -4251,11 +4251,11 @@ invoke_aftermath_hooks (fence_t fence, rtx best_insn, int issue_more)
/* First, call dfa_new_cycle, and then variable_issue, if available. */
sel_dfa_new_cycle (best_insn, fence);
-
+
if (targetm.sched.variable_issue)
{
memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
- issue_more =
+ issue_more =
targetm.sched.variable_issue (sched_dump, sched_verbose, best_insn,
issue_more);
memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
@@ -4287,7 +4287,7 @@ estimate_insn_cost (rtx insn, state_t state)
return cost;
}
-/* Return the cost of issuing EXPR on the FENCE as estimated by DFA.
+/* Return the cost of issuing EXPR on the FENCE as estimated by DFA.
This function properly handles ASMs, USEs etc. */
static int
get_expr_cost (expr_t expr, fence_t fence)
@@ -4296,7 +4296,7 @@ get_expr_cost (expr_t expr, fence_t fence)
if (recog_memoized (insn) < 0)
{
- if (!FENCE_STARTS_CYCLE_P (fence)
+ if (!FENCE_STARTS_CYCLE_P (fence)
/* FIXME: Is this condition necessary? */
&& VINSN_UNIQUE_P (EXPR_VINSN (expr))
&& INSN_ASM_P (insn))
@@ -4314,7 +4314,7 @@ get_expr_cost (expr_t expr, fence_t fence)
return estimate_insn_cost (insn, FENCE_STATE (fence));
}
-/* Find the best insn for scheduling, either via max_issue or just take
+/* Find the best insn for scheduling, either via max_issue or just take
the most prioritized available. */
static int
choose_best_insn (fence_t fence, int privileged_n, int *index)
@@ -4361,16 +4361,16 @@ choose_best_insn (fence_t fence, int privileged_n, int *index)
return can_issue;
}
-/* Choose the best expr from *AV_VLIW_PTR and a suitable register for it.
- BNDS and FENCE are current boundaries and scheduling fence respectively.
- Return the expr found and NULL if nothing can be issued atm.
- Write to PNEED_STALL the number of cycles to stall if no expr was found. */
+/* Choose the best expr from *AV_VLIW_PTR and a suitable register for it.
+ BNDS and FENCE are current boundaries and scheduling fence respectively.
+ Return the expr found and NULL if nothing can be issued atm.
+ Write to PNEED_STALL the number of cycles to stall if no expr was found. */
static expr_t
find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence,
int *pneed_stall)
{
expr_t best;
-
+
/* Choose the best insn for scheduling via:
1) sorting the ready list based on priority;
2) calling the reorder hook;
@@ -4383,7 +4383,7 @@ find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence,
can_issue_more = invoke_reorder_hooks (fence);
if (can_issue_more > 0)
{
- /* Try choosing the best insn until we find one that is could be
+ /* Try choosing the best insn until we find one that is could be
scheduled due to liveness restrictions on its destination register.
In the future, we'd like to choose once and then just probe insns
in the order of their priority. */
@@ -4393,7 +4393,7 @@ find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence,
if (can_issue_more)
best = find_expr_for_ready (index, true);
}
- /* We had some available insns, so if we can't issue them,
+ /* We had some available insns, so if we can't issue them,
we have a stall. */
if (can_issue_more == 0)
{
@@ -4409,7 +4409,7 @@ find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence,
if (can_issue_more == 0)
*pneed_stall = 1;
}
-
+
if (sched_verbose >= 2)
{
if (best != NULL)
@@ -4429,10 +4429,10 @@ find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence,
/* Functions that implement the core of the scheduler. */
-/* Emit an instruction from EXPR with SEQNO and VINSN after
+/* Emit an instruction from EXPR with SEQNO and VINSN after
PLACE_TO_INSERT. */
static insn_t
-emit_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
+emit_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
insn_t place_to_insert)
{
/* This assert fails when we have identical instructions
@@ -4446,15 +4446,15 @@ emit_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
if (EXPR_WAS_RENAMED (expr))
{
unsigned regno = expr_dest_regno (expr);
-
+
if (HARD_REGISTER_NUM_P (regno))
{
df_set_regs_ever_live (regno, true);
reg_rename_tick[regno] = ++reg_rename_this_tick;
}
}
-
- return sel_gen_insn_from_expr_after (expr, vinsn, seqno,
+
+ return sel_gen_insn_from_expr_after (expr, vinsn, seqno,
place_to_insert);
}
@@ -4724,7 +4724,7 @@ find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point)
/* Check if we are about to insert bookkeeping copy before a jump, and use
jump's seqno for the copy; otherwise, use JOIN_POINT's seqno. */
next = NEXT_INSN (place_to_insert);
- if (INSN_P (next)
+ if (INSN_P (next)
&& JUMP_P (next)
&& BLOCK_FOR_INSN (next) == BLOCK_FOR_INSN (place_to_insert))
{
@@ -4737,8 +4737,8 @@ find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point)
{
seqno = get_seqno_by_preds (place_to_insert);
- /* Sometimes the fences can move in such a way that there will be
- no instructions with positive seqno around this bookkeeping.
+ /* Sometimes the fences can move in such a way that there will be
+ no instructions with positive seqno around this bookkeeping.
This means that there will be no way to get to it by a regular
fence movement. Never mind because we pick up such pieces for
rescheduling anyways, so any positive value will do for now. */
@@ -4748,7 +4748,7 @@ find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point)
seqno = 1;
}
}
-
+
gcc_assert (seqno > 0);
return seqno;
}
@@ -4814,7 +4814,7 @@ generate_bookkeeping_insn (expr_t c_expr, edge e1, edge e2)
return BLOCK_FOR_INSN (new_insn);
}
-/* Remove from AV_PTR all insns that may need bookkeeping when scheduling
+/* Remove from AV_PTR all insns that may need bookkeeping when scheduling
on FENCE, but we are unable to copy them. */
static void
remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr)
@@ -4822,11 +4822,11 @@ remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr)
expr_t expr;
av_set_iterator i;
- /* An expression does not need bookkeeping if it is available on all paths
- from current block to original block and current block dominates
- original block. We check availability on all paths by examining
- EXPR_SPEC; this is not equivalent, because it may be positive even
- if expr is available on all paths (but if expr is not available on
+ /* An expression does not need bookkeeping if it is available on all paths
+ from current block to original block and current block dominates
+ original block. We check availability on all paths by examining
+ EXPR_SPEC; this is not equivalent, because it may be positive even
+ if expr is available on all paths (but if expr is not available on
any path, EXPR_SPEC will be positive). */
FOR_EACH_EXPR_1 (expr, i, av_ptr)
@@ -4859,15 +4859,15 @@ remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr)
NOTE BASIC BLOCK:
...
- We can schedule jump one cycle earlier, than mov, because they cannot be
+ We can schedule jump one cycle earlier, than mov, because they cannot be
executed together as their predicates are mutually exclusive.
- This is done in this way: first, new fallthrough basic block is created
- after jump (it is always can be done, because there already should be a
+ This is done in this way: first, new fallthrough basic block is created
+ after jump (it is always can be done, because there already should be a
fallthrough block, where control flow goes in case of predicate being true -
- in our example; otherwise there should be a dependence between those
- instructions and jump and we cannot schedule jump right now);
- next, all instructions between jump and current scheduling point are moved
+ in our example; otherwise there should be a dependence between those
+ instructions and jump and we cannot schedule jump right now);
+ next, all instructions between jump and current scheduling point are moved
to this new block. And the result is this:
NOTE BASIC BLOCK:
@@ -4957,7 +4957,7 @@ remove_temp_moveop_nops (bool full_tidying)
{
int i;
insn_t insn;
-
+
for (i = 0; VEC_iterate (insn_t, vec_temp_moveop_nops, i, insn); i++)
{
gcc_assert (INSN_NOP_P (insn));
@@ -4966,7 +4966,7 @@ remove_temp_moveop_nops (bool full_tidying)
/* Empty the vector. */
if (VEC_length (insn_t, vec_temp_moveop_nops) > 0)
- VEC_block_remove (insn_t, vec_temp_moveop_nops, 0,
+ VEC_block_remove (insn_t, vec_temp_moveop_nops, 0,
VEC_length (insn_t, vec_temp_moveop_nops));
}
@@ -4995,7 +4995,7 @@ remove_insns_for_debug (blist_t bnds, av_set_t *av_vliw_p)
}
}
-/* Compute available instructions on BNDS. FENCE is the current fence. Write
+/* Compute available instructions on BNDS. FENCE is the current fence. Write
the computed set to *AV_VLIW_P. */
static void
compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p)
@@ -5040,7 +5040,7 @@ compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p)
BND_AV1 (bnd) = av_set_copy (BND_AV (bnd));
moveup_set_inside_insn_group (&BND_AV1 (bnd), NULL);
-
+
av1_copy = av_set_copy (BND_AV1 (bnd));
av_set_union_and_clear (av_vliw_p, &av1_copy, NULL);
}
@@ -5053,8 +5053,8 @@ compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p)
}
}
-/* Calculate the sequential av set on BND corresponding to the EXPR_VLIW
- expression. When FOR_MOVEOP is true, also replace the register of
+/* Calculate the sequential av set on BND corresponding to the EXPR_VLIW
+ expression. When FOR_MOVEOP is true, also replace the register of
expressions found with the register from EXPR_VLIW. */
static av_set_t
find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop)
@@ -5062,15 +5062,15 @@ find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop)
av_set_t expr_seq = NULL;
expr_t expr;
av_set_iterator i;
-
+
FOR_EACH_EXPR (expr, i, BND_AV (bnd))
{
if (equal_after_moveup_path_p (expr, NULL, expr_vliw))
{
if (for_moveop)
{
- /* The sequential expression has the right form to pass
- to move_op except when renaming happened. Put the
+ /* The sequential expression has the right form to pass
+ to move_op except when renaming happened. Put the
correct register in EXPR then. */
if (EXPR_SEPARABLE_P (expr) && REG_P (EXPR_LHS (expr)))
{
@@ -5079,10 +5079,10 @@ find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop)
replace_dest_with_reg_in_expr (expr, EXPR_LHS (expr_vliw));
stat_renamed_scheduled++;
}
- /* Also put the correct TARGET_AVAILABLE bit on the expr.
- This is needed when renaming came up with original
+ /* Also put the correct TARGET_AVAILABLE bit on the expr.
+ This is needed when renaming came up with original
register. */
- else if (EXPR_TARGET_AVAILABLE (expr)
+ else if (EXPR_TARGET_AVAILABLE (expr)
!= EXPR_TARGET_AVAILABLE (expr_vliw))
{
gcc_assert (EXPR_TARGET_AVAILABLE (expr_vliw) == 1);
@@ -5094,10 +5094,10 @@ find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop)
}
av_set_add (&expr_seq, expr);
-
- /* With substitution inside insn group, it is possible
- that more than one expression in expr_seq will correspond
- to expr_vliw. In this case, choose one as the attempt to
+
+ /* With substitution inside insn group, it is possible
+ that more than one expression in expr_seq will correspond
+ to expr_vliw. In this case, choose one as the attempt to
move both leads to miscompiles. */
break;
}
@@ -5109,7 +5109,7 @@ find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop)
dump_av_set (expr_seq);
sel_print ("\n");
}
-
+
return expr_seq;
}
@@ -5120,7 +5120,7 @@ move_nop_to_previous_block (insn_t nop, basic_block prev_bb)
{
insn_t prev_insn, next_insn, note;
- gcc_assert (sel_bb_head_p (nop)
+ gcc_assert (sel_bb_head_p (nop)
&& prev_bb == BLOCK_FOR_INSN (nop)->prev_bb);
note = bb_note (BLOCK_FOR_INSN (nop));
prev_insn = sel_bb_end (prev_bb);
@@ -5179,10 +5179,10 @@ prepare_place_to_insert (bnd_t bnd)
return place_to_insert;
}
-/* Find original instructions for EXPR_SEQ and move it to BND boundary.
+/* Find original instructions for EXPR_SEQ and move it to BND boundary.
Return the expression to emit in C_EXPR. */
static bool
-move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
+move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
av_set_t expr_seq, expr_t c_expr)
{
bool b, should_move;
@@ -5199,23 +5199,23 @@ move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
bitmap_clear (current_copies);
bitmap_clear (current_originators);
- b = move_op (BND_TO (bnd), expr_seq, expr_vliw,
+ b = move_op (BND_TO (bnd), expr_seq, expr_vliw,
get_dest_from_orig_ops (expr_seq), c_expr, &should_move);
- /* We should be able to find the expression we've chosen for
+ /* We should be able to find the expression we've chosen for
scheduling. */
gcc_assert (b);
-
+
if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop)
stat_insns_needed_bookkeeping++;
-
+
EXECUTE_IF_SET_IN_BITMAP (current_copies, 0, book_uid, bi)
{
/* We allocate these bitmaps lazily. */
if (! INSN_ORIGINATORS_BY_UID (book_uid))
INSN_ORIGINATORS_BY_UID (book_uid) = BITMAP_ALLOC (NULL);
-
- bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid),
+
+ bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid),
current_originators);
}
@@ -5236,7 +5236,7 @@ debug_state (state_t state)
sel_print ("\n");
}
-/* Advance state on FENCE with INSN. Return true if INSN is
+/* Advance state on FENCE with INSN. Return true if INSN is
an ASM, and we should advance state once more. */
static bool
advance_state_on_fence (fence_t fence, insn_t insn)
@@ -5247,7 +5247,7 @@ advance_state_on_fence (fence_t fence, insn_t insn)
{
int res;
state_t temp_state = alloca (dfa_state_size);
-
+
gcc_assert (!INSN_ASM_P (insn));
asm_p = false;
@@ -5263,10 +5263,10 @@ advance_state_on_fence (fence_t fence, insn_t insn)
if (FENCE_ISSUED_INSNS (fence) > issue_rate)
gcc_unreachable ();
}
- }
+ }
else
{
- /* This could be an ASM insn which we'd like to schedule
+ /* This could be an ASM insn which we'd like to schedule
on the next cycle. */
asm_p = INSN_ASM_P (insn);
if (!FENCE_STARTS_CYCLE_P (fence) && asm_p)
@@ -5286,7 +5286,7 @@ static void
update_fence_and_insn (fence_t fence, insn_t insn, int need_stall)
{
bool asm_p;
-
+
/* First, reflect that something is scheduled on this fence. */
asm_p = advance_state_on_fence (fence, insn);
FENCE_LAST_SCHEDULED_INSN (fence) = insn;
@@ -5310,10 +5310,10 @@ update_fence_and_insn (fence_t fence, insn_t insn, int need_stall)
INSN_SCHED_CYCLE (insn) = FENCE_CYCLE (fence);
/* This does not account for adjust_cost hooks, just add the biggest
- constant the hook may add to the latency. TODO: make this
+ constant the hook may add to the latency. TODO: make this
a target dependent constant. */
- INSN_READY_CYCLE (insn)
- = INSN_SCHED_CYCLE (insn) + (INSN_CODE (insn) < 0
+ INSN_READY_CYCLE (insn)
+ = INSN_SCHED_CYCLE (insn) + (INSN_CODE (insn) < 0
? 1
: maximal_insn_latency (insn) + 1);
@@ -5321,7 +5321,7 @@ update_fence_and_insn (fence_t fence, insn_t insn, int need_stall)
FENCE_AFTER_STALL_P (fence) = 0;
if (asm_p || need_stall)
advance_one_cycle (fence);
-
+
/* Indicate that we've scheduled something on this fence. */
FENCE_SCHEDULED_P (fence) = true;
scheduled_something_on_previous_fence = true;
@@ -5346,11 +5346,11 @@ update_boundaries (fence_t fence, bnd_t bnd, insn_t insn, blist_t *bndsp,
insn_t succ;
advance_deps_context (BND_DC (bnd), insn);
- FOR_EACH_SUCC_1 (succ, si, insn,
+ FOR_EACH_SUCC_1 (succ, si, insn,
SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
{
ilist_t ptr = ilist_copy (BND_PTR (bnd));
-
+
ilist_add (&ptr, insn);
if (DEBUG_INSN_P (insn) && sel_bb_end_p (insn)
@@ -5370,7 +5370,7 @@ update_boundaries (fence_t fence, bnd_t bnd, insn_t insn, blist_t *bndsp,
blist_add (bnds_tailp, succ, ptr, BND_DC (bnd));
bnds_tailp = &BLIST_NEXT (*bnds_tailp);
}
-
+
blist_remove (bndsp);
return bnds_tailp;
}
@@ -5388,14 +5388,14 @@ schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
expr_seq = find_sequential_best_exprs (bnd, expr_vliw, true);
/* In case of scheduling a jump skipping some other instructions,
- prepare CFG. After this, jump is at the boundary and can be
+ prepare CFG. After this, jump is at the boundary and can be
scheduled as usual insn by MOVE_OP. */
if (vinsn_cond_branch_p (EXPR_VINSN (expr_vliw)))
{
insn = EXPR_INSN_RTX (expr_vliw);
-
+
/* Speculative jumps are not handled. */
- if (insn != BND_TO (bnd)
+ if (insn != BND_TO (bnd)
&& !sel_insn_is_speculation_check (insn))
move_cond_jump (insn, bnd);
}
@@ -5404,15 +5404,15 @@ schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
place_to_insert = prepare_place_to_insert (bnd);
should_move = move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr);
clear_expr (c_expr);
-
- /* Add the instruction. The corner case to care about is when
- the expr_seq set has more than one expr, and we chose the one that
- is not equal to expr_vliw. Then expr_vliw may be insn in stream, and
+
+ /* Add the instruction. The corner case to care about is when
+ the expr_seq set has more than one expr, and we chose the one that
+ is not equal to expr_vliw. Then expr_vliw may be insn in stream, and
we can't use it. Generate the new vinsn. */
if (INSN_IN_STREAM_P (EXPR_INSN_RTX (expr_vliw)))
{
vinsn_t vinsn_new;
-
+
vinsn_new = vinsn_copy (EXPR_VINSN (expr_vliw), false);
change_vinsn_in_expr (expr_vliw, vinsn_new);
should_move = false;
@@ -5420,7 +5420,7 @@ schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
if (should_move)
insn = sel_move_insn (expr_vliw, seqno, place_to_insert);
else
- insn = emit_insn_from_expr_after (expr_vliw, NULL, seqno,
+ insn = emit_insn_from_expr_after (expr_vliw, NULL, seqno,
place_to_insert);
/* Return the nops generated for preserving of data sets back
@@ -5430,8 +5430,8 @@ schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
remove_temp_moveop_nops (!DEBUG_INSN_P (insn));
av_set_clear (&expr_seq);
-
- /* Save the expression scheduled so to reset target availability if we'll
+
+ /* Save the expression scheduled so to reset target availability if we'll
meet it later on the same fence. */
if (EXPR_WAS_RENAMED (expr_vliw))
vinsn_vec_add (&vec_target_unavailable_vinsns, INSN_EXPR (insn));
@@ -5449,7 +5449,7 @@ static void
stall_for_cycles (fence_t fence, int n)
{
int could_more;
-
+
could_more = n > 1 || FENCE_ISSUED_INSNS (fence) < issue_rate;
while (n--)
advance_one_cycle (fence);
@@ -5457,8 +5457,8 @@ stall_for_cycles (fence_t fence, int n)
FENCE_AFTER_STALL_P (fence) = 1;
}
-/* Gather a parallel group of insns at FENCE and assign their seqno
- to SEQNO. All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP
+/* Gather a parallel group of insns at FENCE and assign their seqno
+ to SEQNO. All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP
list for later recalculation of seqnos. */
static void
fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
@@ -5468,7 +5468,7 @@ fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
insn_t insn = FENCE_INSN (fence);
if (sched_verbose >= 2)
- sel_print ("Starting fill_insns for insn %d, cycle %d\n",
+ sel_print ("Starting fill_insns for insn %d, cycle %d\n",
INSN_UID (insn), FENCE_CYCLE (fence));
blist_add (&bnds, insn, NULL, FENCE_DC (fence));
@@ -5521,7 +5521,7 @@ fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
}
}
while (! expr_vliw && need_stall);
-
+
/* Now either we've selected expr_vliw or we have nothing to schedule. */
if (!expr_vliw)
{
@@ -5533,7 +5533,7 @@ fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
bnds_tailp1 = bnds_tailp;
do
- /* This code will be executed only once until we'd have several
+ /* This code will be executed only once until we'd have several
boundaries per fence. */
{
bnd_t bnd = BLIST_BND (*bndsp);
@@ -5543,7 +5543,7 @@ fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
bndsp = &BLIST_NEXT (*bndsp);
continue;
}
-
+
insn = schedule_expr_on_boundary (bnd, expr_vliw, seqno);
last_insn_was_debug = DEBUG_INSN_P (insn);
if (last_insn_was_debug)
@@ -5566,15 +5566,15 @@ fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
as this will bring two boundaries and, hence, necessity to handle
information for two or more blocks concurrently. */
if ((last_insn_was_debug ? was_debug_bb_end_p : sel_bb_end_p (insn))
- || (was_stall
- && (was_stall >= max_stall
+ || (was_stall
+ && (was_stall >= max_stall
|| scheduled_insns >= max_insns)))
break;
}
while (bnds);
gcc_assert (!FENCE_BNDS (fence));
-
+
/* Update boundaries of the FENCE. */
while (bnds)
{
@@ -5587,7 +5587,7 @@ fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
if (!ilist_is_in_p (FENCE_BNDS (fence), insn))
ilist_add (&FENCE_BNDS (fence), insn);
}
-
+
blist_remove (&bnds);
}
@@ -5633,7 +5633,7 @@ update_and_record_unavailable_insns (basic_block book_block)
expr_t cur_expr;
rtx bb_end = sel_bb_end (book_block);
- /* First, get correct liveness in the bookkeeping block. The problem is
+ /* First, get correct liveness in the bookkeeping block. The problem is
the range between the bookeeping insn and the end of block. */
update_liveness_on_insn (bb_end);
if (control_flow_insn_p (bb_end))
@@ -5646,18 +5646,18 @@ update_and_record_unavailable_insns (basic_block book_block)
{
old_av_set = av_set_copy (BB_AV_SET (book_block));
update_data_sets (sel_bb_head (book_block));
-
+
/* Traverse all the expressions in the old av_set and check whether
CUR_EXPR is in new AV_SET. */
FOR_EACH_EXPR (cur_expr, i, old_av_set)
{
- expr_t new_expr = av_set_lookup (BB_AV_SET (book_block),
+ expr_t new_expr = av_set_lookup (BB_AV_SET (book_block),
EXPR_VINSN (cur_expr));
- if (! new_expr
- /* In this case, we can just turn off the E_T_A bit, but we can't
+ if (! new_expr
+ /* In this case, we can just turn off the E_T_A bit, but we can't
represent this information with the current vector. */
- || EXPR_TARGET_AVAILABLE (new_expr)
+ || EXPR_TARGET_AVAILABLE (new_expr)
!= EXPR_TARGET_AVAILABLE (cur_expr))
/* Unfortunately, the below code could be also fired up on
separable insns.
@@ -5669,21 +5669,21 @@ update_and_record_unavailable_insns (basic_block book_block)
}
}
-/* The main effect of this function is that sparams->c_expr is merged
+/* The main effect of this function is that sparams->c_expr is merged
with (or copied to) lparams->c_expr_merged. If there's only one successor,
we avoid merging anything by copying sparams->c_expr to lparams->c_expr_merged.
- lparams->c_expr_merged is copied back to sparams->c_expr after all
- successors has been traversed. lparams->c_expr_local is an expr allocated
- on stack in the caller function, and is used if there is more than one
- successor.
+ lparams->c_expr_merged is copied back to sparams->c_expr after all
+ successors has been traversed. lparams->c_expr_local is an expr allocated
+ on stack in the caller function, and is used if there is more than one
+ successor.
SUCC is one of the SUCCS_NORMAL successors of INSN,
MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ,
LPARAMS and STATIC_PARAMS contain the parameters described above. */
static void
-move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED,
- insn_t succ ATTRIBUTE_UNUSED,
- int moveop_drv_call_res,
+move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED,
+ insn_t succ ATTRIBUTE_UNUSED,
+ int moveop_drv_call_res,
cmpd_local_params_p lparams, void *static_params)
{
moveop_static_params_p sparams = (moveop_static_params_p) static_params;
@@ -5708,15 +5708,15 @@ move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED,
probability and will never be scheduled because of
weakness_cutoff in find_best_expr.
- We call merge_expr_data here instead of merge_expr
+ We call merge_expr_data here instead of merge_expr
because due to speculation C_EXPR and X may have the
same insns with different speculation types. And as of
- now such insns are considered non-equal.
+ now such insns are considered non-equal.
- However, EXPR_SCHED_TIMES is different -- we must get
- SCHED_TIMES from a real insn, not a bookkeeping copy.
+ However, EXPR_SCHED_TIMES is different -- we must get
+ SCHED_TIMES from a real insn, not a bookkeeping copy.
We force this here. Instead, we may consider merging
- SCHED_TIMES to the maximum instead of minimum in the
+ SCHED_TIMES to the maximum instead of minimum in the
below function. */
int old_times = EXPR_SCHED_TIMES (lparams->c_expr_merged);
@@ -5735,16 +5735,16 @@ move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED,
if SUCC is one of SUCCS_BACK or SUCCS_OUT.
STATIC_PARAMS contain USED_REGS set. */
static void
-fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED, insn_t succ,
- int moveop_drv_call_res,
- cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
+fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED, insn_t succ,
+ int moveop_drv_call_res,
+ cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
void *static_params)
{
regset succ_live;
fur_static_params_p sparams = (fur_static_params_p) static_params;
/* Here we compute live regsets only for branches that do not lie
- on the code motion paths. These branches correspond to value
+ on the code motion paths. These branches correspond to value
MOVEOP_DRV_CALL_RES==0 and include SUCCS_BACK and SUCCS_OUT, though
for such branches code_motion_path_driver is not called. */
if (moveop_drv_call_res != 0)
@@ -5763,7 +5763,7 @@ fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED, insn_t succ,
into SP->CEXPR. */
static void
move_op_after_merge_succs (cmpd_local_params_p lp, void *sparams)
-{
+{
moveop_static_params_p sp = (moveop_static_params_p) sparams;
sp->c_expr = lp->c_expr_merged;
@@ -5798,10 +5798,10 @@ track_scheduled_insns_and_blocks (rtx insn)
stat_bookkeeping_copies--;
}
-/* Emit a register-register copy for INSN if needed. Return true if
+/* Emit a register-register copy for INSN if needed. Return true if
emitted one. PARAMS is the move_op static parameters. */
static bool
-maybe_emit_renaming_copy (rtx insn,
+maybe_emit_renaming_copy (rtx insn,
moveop_static_params_p params)
{
bool insn_emitted = false;
@@ -5815,25 +5815,25 @@ maybe_emit_renaming_copy (rtx insn,
if (cur_reg != NULL_RTX && REGNO (params->dest) != REGNO (cur_reg))
{
insn_t reg_move_insn, reg_move_insn_rtx;
-
- reg_move_insn_rtx = create_insn_rtx_with_rhs (INSN_VINSN (insn),
+
+ reg_move_insn_rtx = create_insn_rtx_with_rhs (INSN_VINSN (insn),
params->dest);
- reg_move_insn = sel_gen_insn_from_rtx_after (reg_move_insn_rtx,
- INSN_EXPR (insn),
- INSN_SEQNO (insn),
+ reg_move_insn = sel_gen_insn_from_rtx_after (reg_move_insn_rtx,
+ INSN_EXPR (insn),
+ INSN_SEQNO (insn),
insn);
EXPR_SPEC_DONE_DS (INSN_EXPR (reg_move_insn)) = 0;
replace_dest_with_reg_in_expr (params->c_expr, params->dest);
-
+
insn_emitted = true;
params->was_renamed = true;
}
-
+
return insn_emitted;
}
-/* Emit a speculative check for INSN speculated as EXPR if needed.
- Return true if we've emitted one. PARAMS is the move_op static
+/* Emit a speculative check for INSN speculated as EXPR if needed.
+ Return true if we've emitted one. PARAMS is the move_op static
parameters. */
static bool
maybe_emit_speculative_check (rtx insn, expr_t expr,
@@ -5855,17 +5855,17 @@ maybe_emit_speculative_check (rtx insn, expr_t expr,
EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
x = insn;
}
-
+
gcc_assert (EXPR_SPEC_DONE_DS (INSN_EXPR (x)) == 0
&& EXPR_SPEC_TO_CHECK_DS (INSN_EXPR (x)) == 0);
return insn_emitted;
}
-/* Handle transformations that leave an insn in place of original
- insn such as renaming/speculation. Return true if one of such
+/* Handle transformations that leave an insn in place of original
+ insn such as renaming/speculation. Return true if one of such
transformations actually happened, and we have emitted this insn. */
static bool
-handle_emitting_transformations (rtx insn, expr_t expr,
+handle_emitting_transformations (rtx insn, expr_t expr,
moveop_static_params_p params)
{
bool insn_emitted = false;
@@ -5874,7 +5874,7 @@ handle_emitting_transformations (rtx insn, expr_t expr,
insn_emitted |= maybe_emit_speculative_check (insn, expr, params);
return insn_emitted;
-}
+}
/* If INSN is the only insn in the basic block (not counting JUMP,
which may be a jump to next insn, and DEBUG_INSNs), we want to
@@ -5929,7 +5929,7 @@ remove_insn_from_stream (rtx insn, bool only_disconnect)
/* If there's only one insn in the BB, make sure that a nop is
inserted into it, so the basic block won't disappear when we'll
delete INSN below with sel_remove_insn. It should also survive
- till the return to fill_insns. */
+ till the return to fill_insns. */
if (need_nop_to_preserve_insn_bb (insn))
{
insn_t nop = get_nop_from_pool (insn);
@@ -5941,17 +5941,17 @@ remove_insn_from_stream (rtx insn, bool only_disconnect)
}
/* This function is called when original expr is found.
- INSN - current insn traversed, EXPR - the corresponding expr found.
+ INSN - current insn traversed, EXPR - the corresponding expr found.
LPARAMS is the local parameters of code modion driver, STATIC_PARAMS
is static parameters of move_op. */
static void
-move_op_orig_expr_found (insn_t insn, expr_t expr,
- cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
+move_op_orig_expr_found (insn_t insn, expr_t expr,
+ cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
void *static_params)
{
bool only_disconnect, insn_emitted;
moveop_static_params_p params = (moveop_static_params_p) static_params;
-
+
copy_expr_onside (params->c_expr, INSN_EXPR (insn));
track_scheduled_insns_and_blocks (insn);
insn_emitted = handle_emitting_transformations (insn, expr, params);
@@ -5981,8 +5981,8 @@ fur_orig_expr_found (insn_t insn, expr_t expr ATTRIBUTE_UNUSED,
def_list_add (params->original_insns, insn, params->crosses_call);
/* Mark the registers that do not meet the following condition:
- (2) not among the live registers of the point
- immediately following the first original operation on
+ (2) not among the live registers of the point
+ immediately following the first original operation on
a given downward path, except for the original target
register of the operation. */
tmp = get_clear_regset_from_pool ();
@@ -6003,7 +6003,7 @@ fur_orig_expr_found (insn_t insn, expr_t expr ATTRIBUTE_UNUSED,
168: di=dx
REG_DEAD: dx
*/
- /* FIXME: see comment above and enable MEM_P
+ /* FIXME: see comment above and enable MEM_P
in vinsn_separable_p. */
gcc_assert (!VINSN_SEPARABLE_P (INSN_VINSN (insn))
|| !MEM_P (INSN_LHS (insn)));
@@ -6012,15 +6012,15 @@ fur_orig_expr_found (insn_t insn, expr_t expr ATTRIBUTE_UNUSED,
/* This function is called on the ascending pass, before returning from
current basic block. */
static void
-move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
+move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
void *static_params)
{
moveop_static_params_p sparams = (moveop_static_params_p) static_params;
basic_block book_block = NULL;
- /* When we have removed the boundary insn for scheduling, which also
+ /* When we have removed the boundary insn for scheduling, which also
happened to be the end insn in its bb, we don't need to update sets. */
- if (!lparams->removed_last_insn
+ if (!lparams->removed_last_insn
&& lparams->e1
&& sel_bb_head_p (insn))
{
@@ -6032,13 +6032,13 @@ move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
/* Update data sets for the current insn. */
update_data_sets (insn);
}
-
+
/* If bookkeeping code was inserted, we need to update av sets of basic
- block that received bookkeeping. After generation of bookkeeping insn,
+ block that received bookkeeping. After generation of bookkeeping insn,
bookkeeping block does not contain valid av set because we are not following
- the original algorithm in every detail with regards to e.g. renaming
+ the original algorithm in every detail with regards to e.g. renaming
simple reg-reg copies. Consider example:
-
+
bookkeeping block scheduling fence
\ /
\ join /
@@ -6049,7 +6049,7 @@ move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
/ \
r1 := r2 r1 := r3
- We try to schedule insn "r1 := r3" on the current
+ We try to schedule insn "r1 := r3" on the current
scheduling fence. Also, note that av set of bookkeeping block
contain both insns "r1 := r2" and "r1 := r3". When the insn has
been scheduled, the CFG is as follows:
@@ -6069,23 +6069,23 @@ move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
and bookkeeping code was generated at the bookeeping block. This
way insn "r1 := r2" is no longer available as a whole instruction
(but only as expr) ahead of insn "r1 := r3" in bookkeeping block.
- This situation is handled by calling update_data_sets.
+ This situation is handled by calling update_data_sets.
Since update_data_sets is called only on the bookkeeping block, and
- it also may have predecessors with av_sets, containing instructions that
+ it also may have predecessors with av_sets, containing instructions that
are no longer available, we save all such expressions that become
unavailable during data sets update on the bookkeeping block in
- VEC_BOOKKEEPING_BLOCKED_VINSNS. Later we avoid selecting such
- expressions for scheduling. This allows us to avoid recomputation of
+ VEC_BOOKKEEPING_BLOCKED_VINSNS. Later we avoid selecting such
+ expressions for scheduling. This allows us to avoid recomputation of
av_sets outside the code motion path. */
-
+
if (book_block)
update_and_record_unavailable_insns (book_block);
/* If INSN was previously marked for deletion, it's time to do it. */
if (lparams->removed_last_insn)
insn = PREV_INSN (insn);
-
+
/* Do not tidy control flow at the topmost moveop, as we can erroneously
kill a block with a single nop in which the insn should be emitted. */
if (lparams->e1)
@@ -6095,8 +6095,8 @@ move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
/* This function is called on the ascending pass, before returning from the
current basic block. */
static void
-fur_at_first_insn (insn_t insn,
- cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
+fur_at_first_insn (insn_t insn,
+ cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
void *static_params ATTRIBUTE_UNUSED)
{
gcc_assert (!sel_bb_head_p (insn) || AV_SET_VALID_P (insn)
@@ -6121,11 +6121,11 @@ move_op_ascend (insn_t insn, void *static_params)
update_liveness_on_insn (insn);
}
-/* This function is called on enter to the basic block.
- Returns TRUE if this block already have been visited and
+/* This function is called on enter to the basic block.
+ Returns TRUE if this block already have been visited and
code_motion_path_driver should return 1, FALSE otherwise. */
static int
-fur_on_enter (insn_t insn ATTRIBUTE_UNUSED, cmpd_local_params_p local_params,
+fur_on_enter (insn_t insn ATTRIBUTE_UNUSED, cmpd_local_params_p local_params,
void *static_params, bool visited_p)
{
fur_static_params_p sparams = (fur_static_params_p) static_params;
@@ -6149,8 +6149,8 @@ fur_on_enter (insn_t insn ATTRIBUTE_UNUSED, cmpd_local_params_p local_params,
/* Same as above but for move_op. */
static int
-move_op_on_enter (insn_t insn ATTRIBUTE_UNUSED,
- cmpd_local_params_p local_params ATTRIBUTE_UNUSED,
+move_op_on_enter (insn_t insn ATTRIBUTE_UNUSED,
+ cmpd_local_params_p local_params ATTRIBUTE_UNUSED,
void *static_params ATTRIBUTE_UNUSED, bool visited_p)
{
if (visited_p)
@@ -6158,10 +6158,10 @@ move_op_on_enter (insn_t insn ATTRIBUTE_UNUSED,
return 1;
}
-/* This function is called while descending current basic block if current
+/* This function is called while descending current basic block if current
insn is not the original EXPR we're searching for.
- Return value: FALSE, if code_motion_path_driver should perform a local
+ Return value: FALSE, if code_motion_path_driver should perform a local
cleanup and return 0 itself;
TRUE, if code_motion_path_driver should continue. */
static bool
@@ -6175,14 +6175,14 @@ move_op_orig_expr_not_found (insn_t insn, av_set_t orig_ops ATTRIBUTE_UNUSED,
#endif
/* If we're scheduling separate expr, in order to generate correct code
- we need to stop the search at bookkeeping code generated with the
+ we need to stop the search at bookkeeping code generated with the
same destination register or memory. */
if (lhs_of_insn_equals_to_dest_p (insn, sparams->dest))
return false;
return true;
}
-/* This function is called while descending current basic block if current
+/* This function is called while descending current basic block if current
insn is not the original EXPR we're searching for.
Return value: TRUE (code_motion_path_driver should continue). */
@@ -6244,7 +6244,7 @@ struct code_motion_path_driver_info_def move_op_hooks = {
"move_op"
};
-/* Hooks and data to perform find_used_regs operations
+/* Hooks and data to perform find_used_regs operations
with code_motion_path_driver. */
struct code_motion_path_driver_info_def fur_hooks = {
fur_on_enter,
@@ -6259,16 +6259,16 @@ struct code_motion_path_driver_info_def fur_hooks = {
};
/* Traverse all successors of INSN. For each successor that is SUCCS_NORMAL
- code_motion_path_driver is called recursively. Original operation
- was found at least on one path that is starting with one of INSN's
+ code_motion_path_driver is called recursively. Original operation
+ was found at least on one path that is starting with one of INSN's
successors (this fact is asserted). ORIG_OPS is expressions we're looking
for, PATH is the path we've traversed, STATIC_PARAMS is the parameters
- of either move_op or find_used_regs depending on the caller.
+ of either move_op or find_used_regs depending on the caller.
Return 0 if we haven't found expression, 1 if we found it, -1 if we don't
know for sure at this point. */
static int
-code_motion_process_successors (insn_t insn, av_set_t orig_ops,
+code_motion_process_successors (insn_t insn, av_set_t orig_ops,
ilist_t path, void *static_params)
{
int res = 0;
@@ -6285,19 +6285,19 @@ code_motion_process_successors (insn_t insn, av_set_t orig_ops,
lparams.c_expr_merged = NULL;
/* We need to process only NORMAL succs for move_op, and collect live
- registers from ALL branches (including those leading out of the
- region) for find_used_regs.
+ registers from ALL branches (including those leading out of the
+ region) for find_used_regs.
In move_op, there can be a case when insn's bb number has changed
- due to created bookkeeping. This happens very rare, as we need to
- move expression from the beginning to the end of the same block.
- Rescan successors in this case. */
+ due to created bookkeeping. This happens very rare, as we need to
+ move expression from the beginning to the end of the same block.
+ Rescan successors in this case. */
rescan:
bb = BLOCK_FOR_INSN (insn);
- old_index = bb->index;
+ old_index = bb->index;
old_succs = EDGE_COUNT (bb->succs);
-
+
FOR_EACH_SUCC_1 (succ, succ_i, insn, code_motion_path_driver_info->succ_flags)
{
int b;
@@ -6308,7 +6308,7 @@ code_motion_process_successors (insn_t insn, av_set_t orig_ops,
/* Go deep into recursion only for NORMAL edges (non-backedges within the
current region). */
if (succ_i.current_flags == SUCCS_NORMAL)
- b = code_motion_path_driver (succ, orig_ops, path, &lparams,
+ b = code_motion_path_driver (succ, orig_ops, path, &lparams,
static_params);
else
b = 0;
@@ -6330,20 +6330,20 @@ code_motion_process_successors (insn_t insn, av_set_t orig_ops,
}
#ifdef ENABLE_CHECKING
- /* Here, RES==1 if original expr was found at least for one of the
+ /* Here, RES==1 if original expr was found at least for one of the
successors. After the loop, RES may happen to have zero value
- only if at some point the expr searched is present in av_set, but is
- not found below. In most cases, this situation is an error.
+ only if at some point the expr searched is present in av_set, but is
+ not found below. In most cases, this situation is an error.
The exception is when the original operation is blocked by
bookkeeping generated for another fence or for another path in current
move_op. */
- gcc_assert (res == 1
- || (res == 0
+ gcc_assert (res == 1
+ || (res == 0
&& av_set_could_be_blocked_by_bookkeeping_p (orig_ops,
static_params))
|| res == -1);
#endif
-
+
/* Merge data, clean up, etc. */
if (res != -1 && code_motion_path_driver_info->after_merge_succs)
code_motion_path_driver_info->after_merge_succs (&lparams, static_params);
@@ -6352,8 +6352,8 @@ code_motion_process_successors (insn_t insn, av_set_t orig_ops,
}
-/* Perform a cleanup when the driver is about to terminate. ORIG_OPS_P
- is the pointer to the av set with expressions we were looking for,
+/* Perform a cleanup when the driver is about to terminate. ORIG_OPS_P
+ is the pointer to the av set with expressions we were looking for,
PATH_P is the pointer to the traversed path. */
static inline void
code_motion_path_driver_cleanup (av_set_t *orig_ops_p, ilist_t *path_p)
@@ -6362,20 +6362,20 @@ code_motion_path_driver_cleanup (av_set_t *orig_ops_p, ilist_t *path_p)
av_set_clear (orig_ops_p);
}
-/* The driver function that implements move_op or find_used_regs
- functionality dependent whether code_motion_path_driver_INFO is set to
- &MOVE_OP_HOOKS or &FUR_HOOKS. This function implements the common parts
+/* The driver function that implements move_op or find_used_regs
+ functionality dependent whether code_motion_path_driver_INFO is set to
+ &MOVE_OP_HOOKS or &FUR_HOOKS. This function implements the common parts
of code (CFG traversal etc) that are shared among both functions. INSN
is the insn we're starting the search from, ORIG_OPS are the expressions
we're searching for, PATH is traversed path, LOCAL_PARAMS_IN are local
parameters of the driver, and STATIC_PARAMS are static parameters of
- the caller.
+ the caller.
Returns whether original instructions were found. Note that top-level
code_motion_path_driver always returns true. */
static int
-code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
- cmpd_local_params_p local_params_in,
+code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
+ cmpd_local_params_p local_params_in,
void *static_params)
{
expr_t expr = NULL;
@@ -6401,7 +6401,7 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
sel_print ("Insn %d is ineligible successor\n", INSN_UID (insn));
return false;
}
-
+
/* The block can have invalid av set, in which case it was created earlier
during move_op. Return immediately. */
if (sel_bb_head_p (insn))
@@ -6418,19 +6418,19 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
{
/* We have already found an original operation on this branch, do not
go any further and just return TRUE here. If we don't stop here,
- function can have exponential behaviour even on the small code
+ function can have exponential behaviour even on the small code
with many different paths (e.g. with data speculation and
recovery blocks). */
if (sched_verbose >= 6)
sel_print ("Block %d already visited in this traversal\n", bb->index);
if (code_motion_path_driver_info->on_enter)
- return code_motion_path_driver_info->on_enter (insn,
+ return code_motion_path_driver_info->on_enter (insn,
local_params_in,
- static_params,
+ static_params,
true);
}
}
-
+
if (code_motion_path_driver_info->on_enter)
code_motion_path_driver_info->on_enter (insn, local_params_in,
static_params, false);
@@ -6449,12 +6449,12 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
}
/* For non-speculative insns we have to leave only one form of the
- original operation, because if we don't, we may end up with
+ original operation, because if we don't, we may end up with
different C_EXPRes and, consequently, with bookkeepings for different
expression forms along the same code motion path. That may lead to
- generation of incorrect code. So for each code motion we stick to
- the single form of the instruction, except for speculative insns
- which we need to keep in different forms with all speculation
+ generation of incorrect code. So for each code motion we stick to
+ the single form of the instruction, except for speculative insns
+ which we need to keep in different forms with all speculation
types. */
av_set_leave_one_nonspec (&orig_ops);
@@ -6468,7 +6468,7 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
bb_tail = sel_bb_end (bb);
/* Descend the basic block in search of the original expr; this part
- corresponds to the part of the original move_op procedure executed
+ corresponds to the part of the original move_op procedure executed
before the recursive call. */
for (;;)
{
@@ -6479,7 +6479,7 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
If this insn doesn't contain currently scheduling OP, then proceed
with searching and look at its successors. Operations we're searching
- for could have changed when moving up through this insn via
+ for could have changed when moving up through this insn via
substituting. In this case, perform unsubstitution on them first.
When traversing the DAG below this insn is finished, insert
@@ -6495,11 +6495,11 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
if (sched_verbose >= 6)
sel_print ("Found original operation at insn %d\n", INSN_UID (insn));
- code_motion_path_driver_info->orig_expr_found
+ code_motion_path_driver_info->orig_expr_found
(insn, expr, local_params_in, static_params);
/* Step back, so on the way back we'll start traversing from the
- previous insn (or we'll see that it's bb_note and skip that
+ previous insn (or we'll see that it's bb_note and skip that
loop). */
if (insn == first_insn)
{
@@ -6513,17 +6513,17 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
{
/* We haven't found the original expr, continue descending the basic
block. */
- if (code_motion_path_driver_info->orig_expr_not_found
+ if (code_motion_path_driver_info->orig_expr_not_found
(insn, orig_ops, static_params))
{
- /* Av set ops could have been changed when moving through this
+ /* Av set ops could have been changed when moving through this
insn. To find them below it, we have to un-substitute them. */
undo_transformations (&orig_ops, insn);
}
else
{
/* Clean up and return, if the hook tells us to do so. It may
- happen if we've encountered the previously created
+ happen if we've encountered the previously created
bookkeeping. */
code_motion_path_driver_cleanup (&orig_ops, &path);
return -1;
@@ -6539,7 +6539,7 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
insn = NEXT_INSN (insn);
}
- /* Here INSN either points to the insn before the original insn (may be
+ /* Here INSN either points to the insn before the original insn (may be
bb_note, if original insn was a bb_head) or to the bb_end. */
if (!expr)
{
@@ -6552,9 +6552,9 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
if (insn != first_insn)
ilist_add (&path, insn);
- /* Process_successors should be able to find at least one
- successor for which code_motion_path_driver returns TRUE. */
- res = code_motion_process_successors (insn, orig_ops,
+ /* Process_successors should be able to find at least one
+ successor for which code_motion_path_driver returns TRUE. */
+ res = code_motion_process_successors (insn, orig_ops,
path, static_params);
/* Remove bb tail from path. */
@@ -6564,10 +6564,10 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
if (res != 1)
{
/* This is the case when one of the original expr is no longer available
- due to bookkeeping created on this branch with the same register.
+ due to bookkeeping created on this branch with the same register.
In the original algorithm, which doesn't have update_data_sets call
- on a bookkeeping block, it would simply result in returning
- FALSE when we've encountered a previously generated bookkeeping
+ on a bookkeeping block, it would simply result in returning
+ FALSE when we've encountered a previously generated bookkeeping
insn in moveop_orig_expr_not_found. */
code_motion_path_driver_cleanup (&orig_ops, &path);
return res;
@@ -6577,23 +6577,23 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
/* Don't need it any more. */
av_set_clear (&orig_ops);
- /* Backward pass: now, when we have C_EXPR computed, we'll drag it to
+ /* Backward pass: now, when we have C_EXPR computed, we'll drag it to
the beginning of the basic block. */
before_first = PREV_INSN (first_insn);
while (insn != before_first)
- {
+ {
if (code_motion_path_driver_info->ascend)
code_motion_path_driver_info->ascend (insn, static_params);
insn = PREV_INSN (insn);
}
-
+
/* Now we're at the bb head. */
insn = first_insn;
ilist_remove (&path);
local_params_in->removed_last_insn = removed_last_insn;
code_motion_path_driver_info->at_first_insn (insn, local_params_in, static_params);
-
+
/* This should be the very last operation as at bb head we could change
the numbering by creating bookkeeping blocks. */
if (removed_last_insn)
@@ -6602,15 +6602,15 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
return true;
}
-/* Move up the operations from ORIG_OPS set traversing the dag starting
+/* Move up the operations from ORIG_OPS set traversing the dag starting
from INSN. PATH represents the edges traversed so far.
DEST is the register chosen for scheduling the current expr. Insert
bookkeeping code in the join points. EXPR_VLIW is the chosen expression,
- C_EXPR is how it looks like at the given cfg point.
+ C_EXPR is how it looks like at the given cfg point.
Set *SHOULD_MOVE to indicate whether we have only disconnected
one of the insns found.
- Returns whether original instructions were found, which is asserted
+ Returns whether original instructions were found, which is asserted
to be true in the caller. */
static bool
move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
@@ -6620,7 +6620,7 @@ move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
struct cmpd_local_params lparams;
bool res;
- /* Init params for code_motion_path_driver. */
+ /* Init params for code_motion_path_driver. */
sparams.dest = dest;
sparams.c_expr = c_expr;
sparams.uid = INSN_UID (EXPR_INSN_RTX (expr_vliw));
@@ -6632,7 +6632,7 @@ move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
/* We haven't visited any blocks yet. */
bitmap_clear (code_motion_visited_blocks);
-
+
/* Set appropriate hooks and data. */
code_motion_path_driver_info = &move_op_hooks;
res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
@@ -6651,8 +6651,8 @@ move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
/* Current number of seqno used in init_seqno and init_seqno_1. */
static int cur_seqno;
-/* A helper for init_seqno. Traverse the region starting from BB and
- compute seqnos for visited insns, marking visited bbs in VISITED_BBS.
+/* A helper for init_seqno. Traverse the region starting from BB and
+ compute seqnos for visited insns, marking visited bbs in VISITED_BBS.
Clear visited blocks from BLOCKS_TO_RESCHEDULE. */
static void
init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
@@ -6666,7 +6666,7 @@ init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
if (blocks_to_reschedule)
bitmap_clear_bit (blocks_to_reschedule, bb->index);
- FOR_EACH_SUCC_1 (succ_insn, si, BB_END (bb),
+ FOR_EACH_SUCC_1 (succ_insn, si, BB_END (bb),
SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
{
basic_block succ = BLOCK_FOR_INSN (succ_insn);
@@ -6687,10 +6687,10 @@ init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
}
/* Initialize seqnos for the current region. NUMBER_OF_INSNS is the number
- of instructions in the region, BLOCKS_TO_RESCHEDULE contains blocks on
+ of instructions in the region, BLOCKS_TO_RESCHEDULE contains blocks on
which we're rescheduling when pipelining, FROM is the block where
traversing region begins (it may not be the head of the region when
- pipelining, but the head of the loop instead).
+ pipelining, but the head of the loop instead).
Returns the maximal seqno found. */
static int
@@ -6731,7 +6731,7 @@ sel_setup_region_sched_flags (void)
{
enable_schedule_as_rhs_p = 1;
bookkeeping_p = 1;
- pipelining_p = (bookkeeping_p
+ pipelining_p = (bookkeeping_p
&& (flag_sel_sched_pipelining != 0)
&& current_loop_nest != NULL);
max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME);
@@ -6812,7 +6812,7 @@ sel_region_init (int rgn)
rgn_setup_region (rgn);
- /* Even if sched_is_disabled_for_current_region_p() is true, we still
+ /* Even if sched_is_disabled_for_current_region_p() is true, we still
do region initialization here so the region can be bundled correctly,
but we'll skip the scheduling in sel_sched_region (). */
if (current_region_empty_p ())
@@ -6861,10 +6861,10 @@ sel_region_init (int rgn)
: 0);
if (current_nr_blocks == header + 1)
- update_liveness_on_insn
+ update_liveness_on_insn
(sel_bb_head (BASIC_BLOCK (BB_TO_BLOCK (header))));
}
-
+
/* Set hooks so that no newly generated insn will go out unnoticed. */
sel_register_cfg_hooks ();
@@ -6908,7 +6908,7 @@ simplify_changed_insns (void)
{
expr_t expr = INSN_EXPR (insn);
- if (EXPR_WAS_SUBSTITUTED (expr))
+ if (EXPR_WAS_SUBSTITUTED (expr))
validate_simplify_insn (insn);
}
}
@@ -6963,7 +6963,7 @@ reset_sched_cycles_in_current_ebb (void)
state_reset (curr_state);
advance_state (curr_state);
-
+
for (insn = current_sched_info->head;
insn != current_sched_info->next_tail;
insn = NEXT_INSN (insn))
@@ -6992,7 +6992,7 @@ reset_sched_cycles_in_current_ebb (void)
on the cycle. */
haifa_cost = 1;
else
- /* This is a use/clobber insn. It should not change
+ /* This is a use/clobber insn. It should not change
cost. */
haifa_cost = 0;
}
@@ -7022,11 +7022,11 @@ reset_sched_cycles_in_current_ebb (void)
debug_state (curr_state);
}
- /* The DFA may report that e.g. insn requires 2 cycles to be
- issued, but on the next cycle it says that insn is ready
+ /* The DFA may report that e.g. insn requires 2 cycles to be
+ issued, but on the next cycle it says that insn is ready
to go. Check this here. */
if (!after_stall
- && real_insn
+ && real_insn
&& haifa_cost > 0
&& estimate_insn_cost (insn, curr_state) == 0)
break;
@@ -7109,7 +7109,7 @@ put_TImodes (void)
}
}
-/* Perform MD_FINISH on EBBs comprising current region. When
+/* Perform MD_FINISH on EBBs comprising current region. When
RESET_SCHED_CYCLES_P is true, run a pass emulating the scheduler
to produce correct sched cycles on insns. */
static void
@@ -7155,7 +7155,7 @@ sel_region_target_finish (bool reset_sched_cycles_p)
}
/* Free the scheduling data for the current region. When RESET_SCHED_CYCLES_P
- is true, make an additional pass emulating scheduler to get correct insn
+ is true, make an additional pass emulating scheduler to get correct insn
cycles for md_finish calls. */
static void
sel_region_finish (bool reset_sched_cycles_p)
@@ -7236,7 +7236,7 @@ schedule_on_fences (flist_t fences, int max_seqno,
int seqno = 0;
flist_t fences2;
bool first_p = true;
-
+
/* Choose the next fence group to schedule.
The fact that insn can be scheduled only once
on the cycle is guaranteed by two properties:
@@ -7272,7 +7272,7 @@ schedule_on_fences (flist_t fences, int max_seqno,
}
/* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
- don't need to keep bookkeeping-invalidated and target-unavailable
+ don't need to keep bookkeeping-invalidated and target-unavailable
vinsns any more. */
vinsn_vec_clear (&vec_bookkeeping_blocked_vinsns);
vinsn_vec_clear (&vec_target_unavailable_vinsns);
@@ -7288,7 +7288,7 @@ find_min_max_seqno (flist_t fences, int *min_seqno, int *max_seqno)
while ((fences = FLIST_NEXT (fences)))
{
int seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
-
+
if (*min_seqno > seqno)
*min_seqno = seqno;
else if (*max_seqno < seqno)
@@ -7297,7 +7297,7 @@ find_min_max_seqno (flist_t fences, int *min_seqno, int *max_seqno)
}
/* Calculate new fences from FENCES. */
-static flist_t
+static flist_t
calculate_new_fences (flist_t fences, int orig_max_seqno)
{
flist_t old_fences = fences;
@@ -7308,7 +7308,7 @@ calculate_new_fences (flist_t fences, int orig_max_seqno)
{
fence_t fence = FLIST_FENCE (fences);
insn_t insn;
-
+
if (!FENCE_BNDS (fence))
{
/* This fence doesn't have any successors. */
@@ -7322,7 +7322,7 @@ calculate_new_fences (flist_t fences, int orig_max_seqno)
gcc_assert (seqno > 0 && seqno <= orig_max_seqno);
if (sched_verbose >= 1)
- sel_print ("Fence %d[%d] has not changed\n",
+ sel_print ("Fence %d[%d] has not changed\n",
INSN_UID (insn),
BLOCK_NUM (insn));
move_fence_to_fences (fences, new_fences);
@@ -7340,14 +7340,14 @@ calculate_new_fences (flist_t fences, int orig_max_seqno)
are the miminum and maximum seqnos of the group, HIGHEST_SEQNO_IN_USE is
the highest seqno used in a region. Return the updated highest seqno. */
static int
-update_seqnos_and_stage (int min_seqno, int max_seqno,
- int highest_seqno_in_use,
+update_seqnos_and_stage (int min_seqno, int max_seqno,
+ int highest_seqno_in_use,
ilist_t *pscheduled_insns)
{
int new_hs;
ilist_iterator ii;
insn_t insn;
-
+
/* Actually, new_hs is the seqno of the instruction, that was
scheduled first (i.e. it is the first one in SCHEDULED_INSNS). */
if (*pscheduled_insns)
@@ -7378,9 +7378,9 @@ update_seqnos_and_stage (int min_seqno, int max_seqno,
return new_hs;
}
-/* The main driver for scheduling a region. This function is responsible
- for correct propagation of fences (i.e. scheduling points) and creating
- a group of parallel insns at each of them. It also supports
+/* The main driver for scheduling a region. This function is responsible
+ for correct propagation of fences (i.e. scheduling points) and creating
+ a group of parallel insns at each of them. It also supports
pipelining. ORIG_MAX_SEQNO is the maximal seqno before this pass
of scheduling. */
static void
@@ -7417,8 +7417,8 @@ sel_sched_region_2 (int orig_max_seqno)
stat_substitutions_total);
}
-/* Schedule a region. When pipelining, search for possibly never scheduled
- bookkeeping code and schedule it. Reschedule pipelined code without
+/* Schedule a region. When pipelining, search for possibly never scheduled
+ bookkeeping code and schedule it. Reschedule pipelined code without
pipelining after. */
static void
sel_sched_region_1 (void)
@@ -7426,9 +7426,9 @@ sel_sched_region_1 (void)
int number_of_insns;
int orig_max_seqno;
- /* Remove empty blocks that might be in the region from the beginning.
+ /* Remove empty blocks that might be in the region from the beginning.
We need to do save sched_max_luid before that, as it actually shows
- the number of insns in the region, and purge_empty_blocks can
+ the number of insns in the region, and purge_empty_blocks can
alter it. */
number_of_insns = sched_max_luid - 1;
purge_empty_blocks ();
@@ -7495,7 +7495,7 @@ sel_sched_region_1 (void)
{
bb = EBB_FIRST_BB (i);
- /* While pipelining outer loops, skip bundling for loop
+ /* While pipelining outer loops, skip bundling for loop
preheaders. Those will be rescheduled in the outer
loop. */
if (sel_is_loop_preheader_p (bb))
@@ -7503,7 +7503,7 @@ sel_sched_region_1 (void)
clear_outdated_rtx_info (bb);
continue;
}
-
+
if (bitmap_bit_p (blocks_to_reschedule, bb->index))
{
flist_tail_init (new_fences);
@@ -7514,13 +7514,13 @@ sel_sched_region_1 (void)
bitmap_set_bit (forced_ebb_heads, bb->index);
bitmap_clear_bit (blocks_to_reschedule, bb->index);
-
+
gcc_assert (fences == NULL);
init_fences (bb_note (bb));
-
+
sel_sched_region_2 (orig_max_seqno);
-
+
do_p = true;
break;
}
@@ -7550,7 +7550,7 @@ sel_sched_region (int rgn)
else
/* Force initialization of INSN_SCHED_CYCLEs for correct bundling. */
reset_sched_cycles_p = true;
-
+
sel_region_finish (reset_sched_cycles_p);
}
@@ -7571,7 +7571,7 @@ sel_global_init (void)
sched_init_bbs ();
/* Reset AFTER_RECOVERY if it has been set by the 1st scheduler pass. */
after_recovery = 0;
- can_issue_more = issue_rate;
+ can_issue_more = issue_rate;
sched_extend_target ();
sched_deps_init (true);
diff --git a/gcc/sel-sched.h b/gcc/sel-sched.h
index 8e0b5d50c98..d2dce32432e 100644
--- a/gcc/sel-sched.h
+++ b/gcc/sel-sched.h
@@ -1,4 +1,4 @@
-/* Instruction scheduling pass.
+/* Instruction scheduling pass.
Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
This file is part of GCC.
diff --git a/gcc/sese.c b/gcc/sese.c
index bf315f97a26..338f482eec1 100644
--- a/gcc/sese.c
+++ b/gcc/sese.c
@@ -348,7 +348,7 @@ free_sese (sese region)
SESE_LOOPS (region) = BITMAP_ALLOC (NULL);
VEC_free (tree, heap, SESE_PARAMS (region));
- VEC_free (loop_p, heap, SESE_LOOP_NEST (region));
+ VEC_free (loop_p, heap, SESE_LOOP_NEST (region));
if (SESE_PARAMS_INDEX (region))
htab_delete (SESE_PARAMS_INDEX (region));
@@ -599,7 +599,7 @@ sese_adjust_liveout_phis (sese region, htab_t rename_map, basic_block bb,
/* Rename the SSA_NAMEs used in STMT and that appear in MAP. */
-static void
+static void
rename_variables_in_stmt (gimple stmt, htab_t map, gimple_stmt_iterator *insert_gsi)
{
ssa_op_iter iter;
@@ -723,16 +723,16 @@ expand_scalar_variables_call (gimple stmt, basic_block bb, sese region,
static tree
expand_scalar_variables_ssa_name (tree op0, basic_block bb,
- sese region, htab_t map,
+ sese region, htab_t map,
gimple_stmt_iterator *gsi)
{
gimple def_stmt;
tree new_op;
-
+
if (is_parameter (region, op0)
|| is_iv (op0))
return get_rename (map, op0);
-
+
def_stmt = SSA_NAME_DEF_STMT (op0);
/* Check whether we already have a rename for OP0. */
@@ -741,7 +741,7 @@ expand_scalar_variables_ssa_name (tree op0, basic_block bb,
if (new_op != op0
&& gimple_bb (SSA_NAME_DEF_STMT (new_op)) == bb)
return new_op;
-
+
if (gimple_bb (def_stmt) == bb)
{
/* If the defining statement is in the basic block already
@@ -788,8 +788,8 @@ expand_scalar_variables_ssa_name (tree op0, basic_block bb,
used to translate the names of induction variables. */
static tree
-expand_scalar_variables_expr (tree type, tree op0, enum tree_code code,
- tree op1, basic_block bb, sese region,
+expand_scalar_variables_expr (tree type, tree op0, enum tree_code code,
+ tree op1, basic_block bb, sese region,
htab_t map, gimple_stmt_iterator *gsi)
{
if (TREE_CODE_CLASS (code) == tcc_constant
@@ -859,7 +859,7 @@ expand_scalar_variables_expr (tree type, tree op0, enum tree_code code,
enum tree_code op0_code = TREE_CODE (op0);
tree op0_expr = expand_scalar_variables_expr (op0_type, op0, op0_code,
NULL, bb, region, map, gsi);
-
+
return fold_build1 (code, type, op0_expr);
}
@@ -895,7 +895,7 @@ expand_scalar_variables_expr (tree type, tree op0, enum tree_code code,
only induction variables from the generated code: MAP contains the
induction variables renaming mapping, and is used to translate the
names of induction variables. */
-
+
static void
expand_scalar_variables_stmt (gimple stmt, basic_block bb, sese region,
htab_t map, gimple_stmt_iterator *gsi)
@@ -958,11 +958,11 @@ expand_scalar_variables_stmt (gimple stmt, basic_block bb, sese region,
induction variables renaming mapping, and is used to translate the
names of induction variables. */
-static void
+static void
expand_scalar_variables (basic_block bb, sese region, htab_t map)
{
gimple_stmt_iterator gsi;
-
+
for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
{
gimple stmt = gsi_stmt (gsi);
@@ -973,12 +973,12 @@ expand_scalar_variables (basic_block bb, sese region, htab_t map)
/* Rename all the SSA_NAMEs from block BB according to the MAP. */
-static void
+static void
rename_variables (basic_block bb, htab_t map)
{
gimple_stmt_iterator gsi;
gimple_stmt_iterator insert_gsi = gsi_start_bb (bb);
-
+
for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
rename_variables_in_stmt (gsi_stmt (gsi), map, &insert_gsi);
}
@@ -1006,7 +1006,7 @@ get_true_edge_from_guard_bb (basic_block bb)
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->flags & EDGE_TRUE_VALUE)
+ if (e->flags & EDGE_TRUE_VALUE)
return e;
gcc_unreachable ();
@@ -1022,7 +1022,7 @@ get_false_edge_from_guard_bb (basic_block bb)
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
- if (!(e->flags & EDGE_TRUE_VALUE))
+ if (!(e->flags & EDGE_TRUE_VALUE))
return e;
gcc_unreachable ();
@@ -1323,7 +1323,7 @@ graphite_copy_stmts_from_block (basic_block bb, basic_block new_bb, htab_t map)
/* Copies BB and includes in the copied BB all the statements that can
be reached following the use-def chains from the memory accesses,
and returns the next edge following this new block. */
-
+
edge
copy_bb_and_scalar_dependences (basic_block bb, sese region,
edge next_e, htab_t map)
diff --git a/gcc/sese.h b/gcc/sese.h
index 73d5adec45d..c126a6964f8 100644
--- a/gcc/sese.h
+++ b/gcc/sese.h
@@ -101,7 +101,7 @@ bb_in_region (basic_block bb, basic_block entry, basic_block exit)
predecessors of EXIT are dominated by ENTRY. */
FOR_EACH_EDGE (e, ei, exit->preds)
dominated_by_p (CDI_DOMINATORS, e->src, entry);
-
+
/* Check that there are no edges going out of the region: the
entry is post-dominated by the exit. FIXME: This cannot be
checked right now as the CDI_POST_DOMINATORS are needed. */
@@ -138,7 +138,7 @@ defined_in_sese_p (tree name, sese region)
/* Returns true when LOOP is in REGION. */
-static inline bool
+static inline bool
loop_in_sese_p (struct loop *loop, sese region)
{
return (bb_in_sese_p (loop->header, region)
@@ -153,7 +153,7 @@ loop_in_sese_p (struct loop *loop, sese region)
loop_0
loop_1
{
- S0
+ S0
<- region start
S1
@@ -162,7 +162,7 @@ loop_in_sese_p (struct loop *loop, sese region)
S3
<- region end
- }
+ }
loop_0 does not exist in the region -> invalid
loop_1 exists, but is not completely contained in the region -> depth 0
@@ -376,7 +376,7 @@ static inline rename_map_elt
new_rename_map_elt (tree old_name, tree expr)
{
rename_map_elt res;
-
+
res = XNEW (struct rename_map_elt_s);
res->old_name = old_name;
res->expr = expr;
@@ -402,7 +402,7 @@ static inline ivtype_map_elt
new_ivtype_map_elt (const char *cloog_iv, tree type)
{
ivtype_map_elt res;
-
+
res = XNEW (struct ivtype_map_elt_s);
res->cloog_iv = cloog_iv;
res->type = type;
@@ -429,19 +429,19 @@ typedef struct gimple_bb
/* Lists containing the restrictions of the conditional statements
dominating this bb. This bb can only be executed, if all conditions
are true.
-
+
Example:
-
+
for (i = 0; i <= 20; i++)
{
A
-
+
if (2i <= 8)
B
}
-
+
So for B there is an additional condition (2i <= 8).
-
+
List of COND_EXPR and SWITCH_EXPR. A COND_EXPR is true only if the
corresponding element in CONDITION_CASES is not NULL_TREE. For a
SWITCH_EXPR the corresponding element in CONDITION_CASES is a
@@ -466,7 +466,7 @@ gbb_loop (struct gimple_bb *gbb)
return GBB_BB (gbb)->loop_father;
}
-/* Returns the gimple loop, that corresponds to the loop_iterator_INDEX.
+/* Returns the gimple loop, that corresponds to the loop_iterator_INDEX.
If there is no corresponding gimple loop, we return NULL. */
static inline loop_p
@@ -491,7 +491,7 @@ nb_common_loops (sese region, gimple_bb_p gbb1, gimple_bb_p gbb2)
loop_p l1 = gbb_loop (gbb1);
loop_p l2 = gbb_loop (gbb2);
loop_p common = find_common_loop (l1, l2);
-
+
return sese_loop_depth (region, common);
}
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index c0427770267..5e384d4e8f4 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -86,7 +86,7 @@ mode_signbit_p (enum machine_mode mode, const_rtx x)
width = GET_MODE_BITSIZE (mode);
if (width == 0)
return false;
-
+
if (width <= HOST_BITS_PER_WIDE_INT
&& CONST_INT_P (x))
val = INTVAL (x);
@@ -594,7 +594,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
/* (not (ashiftrt foo C)) where C is the number of bits in FOO
minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
so we can perform the above simplification. */
-
+
if (STORE_FLAG_VALUE == -1
&& GET_CODE (op) == ASHIFTRT
&& GET_CODE (XEXP (op, 1))
@@ -658,11 +658,11 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
if (GET_CODE (op) == PLUS
&& XEXP (op, 1) == const1_rtx)
return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
-
+
/* Similarly, (neg (not X)) is (plus X 1). */
if (GET_CODE (op) == NOT)
return plus_constant (XEXP (op, 0), 1);
-
+
/* (neg (minus X Y)) can become (minus Y X). This transformation
isn't safe for modes with signed zeros, since if X and Y are
both +0, (minus Y X) is the same as (minus X Y). If the
@@ -672,7 +672,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
-
+
if (GET_CODE (op) == PLUS
&& !HONOR_SIGNED_ZEROS (mode)
&& !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
@@ -725,7 +725,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
&& INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
return simplify_gen_binary (ASHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
-
+
/* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
if (GET_CODE (op) == XOR
&& XEXP (op, 1) == const1_rtx
@@ -798,7 +798,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
replace the TRUNCATE with a SUBREG. Note that this is also
valid if TRULY_NOOP_TRUNCATION is false for the corresponding
modes we just have to apply a different definition for
- truncation. But don't do this for an (LSHIFTRT (MULT ...))
+ truncation. But don't do this for an (LSHIFTRT (MULT ...))
since this will cause problems with the umulXi3_highpart
patterns. */
if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
@@ -1059,7 +1059,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
default:
break;
}
-
+
return 0;
}
@@ -2220,7 +2220,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
&& (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
return op1;
-
+
/* Canonicalize (X & C1) | C2. */
if (GET_CODE (op0) == AND
&& CONST_INT_P (trueop1)
@@ -3442,23 +3442,23 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
arg0s = arg0;
arg1s = arg1;
}
-
+
/* Compute the value of the arithmetic. */
-
+
switch (code)
{
case PLUS:
val = arg0s + arg1s;
break;
-
+
case MINUS:
val = arg0s - arg1s;
break;
-
+
case MULT:
val = arg0s * arg1s;
break;
-
+
case DIV:
if (arg1s == 0
|| (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
@@ -3466,7 +3466,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
return 0;
val = arg0s / arg1s;
break;
-
+
case MOD:
if (arg1s == 0
|| (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
@@ -3474,7 +3474,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
return 0;
val = arg0s % arg1s;
break;
-
+
case UDIV:
if (arg1 == 0
|| (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
@@ -3482,7 +3482,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
return 0;
val = (unsigned HOST_WIDE_INT) arg0 / arg1;
break;
-
+
case UMOD:
if (arg1 == 0
|| (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
@@ -3490,19 +3490,19 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
return 0;
val = (unsigned HOST_WIDE_INT) arg0 % arg1;
break;
-
+
case AND:
val = arg0 & arg1;
break;
-
+
case IOR:
val = arg0 | arg1;
break;
-
+
case XOR:
val = arg0 ^ arg1;
break;
-
+
case LSHIFTRT:
case ASHIFT:
case ASHIFTRT:
@@ -3517,56 +3517,56 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
return 0;
-
+
val = (code == ASHIFT
? ((unsigned HOST_WIDE_INT) arg0) << arg1
: ((unsigned HOST_WIDE_INT) arg0) >> arg1);
-
+
/* Sign-extend the result for arithmetic right shifts. */
if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
val |= ((HOST_WIDE_INT) -1) << (width - arg1);
break;
-
+
case ROTATERT:
if (arg1 < 0)
return 0;
-
+
arg1 %= width;
val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
| (((unsigned HOST_WIDE_INT) arg0) >> arg1));
break;
-
+
case ROTATE:
if (arg1 < 0)
return 0;
-
+
arg1 %= width;
val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
| (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
break;
-
+
case COMPARE:
/* Do nothing here. */
return 0;
-
+
case SMIN:
val = arg0s <= arg1s ? arg0s : arg1s;
break;
-
+
case UMIN:
val = ((unsigned HOST_WIDE_INT) arg0
<= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
break;
-
+
case SMAX:
val = arg0s > arg1s ? arg0s : arg1s;
break;
-
+
case UMAX:
val = ((unsigned HOST_WIDE_INT) arg0
> (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
break;
-
+
case SS_PLUS:
case US_PLUS:
case SS_MINUS:
@@ -3579,7 +3579,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
case US_ASHIFT:
/* ??? There are simplifications that can be done. */
return 0;
-
+
default:
gcc_unreachable ();
}
@@ -3808,7 +3808,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
}
else
tem = simplify_binary_operation (ncode, mode, lhs, rhs);
-
+
/* Reject "simplifications" that just wrap the two
arguments in a CONST. Failure to do so can result
in infinite recursion with simplify_binary_operation
@@ -3855,7 +3855,7 @@ simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
&& CONSTANT_P (ops[0].op)
&& ops[0].neg)
return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
-
+
/* We suppressed creation of trivial CONST expressions in the
combination loop to avoid recursion. Create one manually now.
The combination loop should have ensured that there is exactly
@@ -3942,7 +3942,7 @@ simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
}
#else
return NULL_RTX;
-#endif
+#endif
}
if (VECTOR_MODE_P (mode))
{
@@ -4179,7 +4179,7 @@ simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
return NULL_RTX;
}
-enum
+enum
{
CMP_EQ = 1,
CMP_LT = 2,
@@ -4191,7 +4191,7 @@ enum
/* Convert the known results for EQ, LT, GT, LTU, GTU contained in
KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
- For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
+ For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
For floating-point comparisons, assume that the operands were ordered. */
@@ -4826,7 +4826,7 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
and then repacking them again for OUTERMODE. */
static rtx
-simplify_immed_subreg (enum machine_mode outermode, rtx op,
+simplify_immed_subreg (enum machine_mode outermode, rtx op,
enum machine_mode innermode, unsigned int byte)
{
/* We support up to 512-bit values (for V8DFmode). */
@@ -4874,17 +4874,17 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
gcc_assert (BITS_PER_UNIT % value_bit == 0);
/* I don't know how to handle endianness of sub-units. */
gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
-
+
for (elem = 0; elem < num_elem; elem++)
{
unsigned char * vp;
rtx el = elems[elem];
-
+
/* Vectors are kept in target memory order. (This is probably
a mistake.) */
{
unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
- unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
+ unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
/ BITS_PER_UNIT);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
@@ -4892,19 +4892,19 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
+ (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
vp = value + (bytele * BITS_PER_UNIT) / value_bit;
}
-
+
switch (GET_CODE (el))
{
case CONST_INT:
for (i = 0;
- i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
+ i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
i += value_bit)
*vp++ = INTVAL (el) >> i;
/* CONST_INTs are always logically sign-extended. */
for (; i < elem_bitsize; i += value_bit)
*vp++ = INTVAL (el) < 0 ? -1 : 0;
break;
-
+
case CONST_DOUBLE:
if (GET_MODE (el) == VOIDmode)
{
@@ -4950,7 +4950,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
ibase = i;
*vp++ = tmp[ibase / 32] >> i % 32;
}
-
+
/* It shouldn't matter what's done here, so fill it with
zero. */
for (; i < elem_bitsize; i += value_bit)
@@ -4976,7 +4976,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
*vp++ = 0;
}
break;
-
+
default:
gcc_unreachable ();
}
@@ -4988,7 +4988,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
will already have offset 0. */
if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
{
- unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
+ unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
- byte);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
@@ -5004,7 +5004,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
value_start = byte * (BITS_PER_UNIT / value_bit);
/* Re-pack the value. */
-
+
if (VECTOR_MODE_P (outermode))
{
num_elem = GET_MODE_NUNITS (outermode);
@@ -5028,12 +5028,12 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
for (elem = 0; elem < num_elem; elem++)
{
unsigned char *vp;
-
+
/* Vectors are stored in target memory order. (This is probably
a mistake.) */
{
unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
- unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
+ unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
/ BITS_PER_UNIT);
unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
@@ -5056,7 +5056,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
for (; i < elem_bitsize; i += value_bit)
hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
<< (i - HOST_BITS_PER_WIDE_INT));
-
+
/* immed_double_const doesn't call trunc_int_for_mode. I don't
know why. */
if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
@@ -5067,13 +5067,13 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
return NULL_RTX;
}
break;
-
+
case MODE_FLOAT:
case MODE_DECIMAL_FLOAT:
{
REAL_VALUE_TYPE r;
long tmp[max_bitsize / 32];
-
+
/* real_from_target wants its input in words affected by
FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
and use WORDS_BIG_ENDIAN instead; see the documentation
@@ -5116,7 +5116,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
}
break;
-
+
default:
gcc_unreachable ();
}
@@ -5428,7 +5428,7 @@ simplify_subreg (enum machine_mode outermode, rtx op,
&& CONST_INT_P (XEXP (op, 1))
&& (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
&& INTVAL (XEXP (op, 1)) >= 0
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
&& byte == subreg_lowpart_offset (outermode, innermode))
{
int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
diff --git a/gcc/stack-ptr-mod.c b/gcc/stack-ptr-mod.c
index 2d9ce82c98e..e7d92a5e7b9 100644
--- a/gcc/stack-ptr-mod.c
+++ b/gcc/stack-ptr-mod.c
@@ -1,4 +1,4 @@
-/* Discover if the stack pointer is modified in a function.
+/* Discover if the stack pointer is modified in a function.
Copyright (C) 2007, 2008, 2009
Free Software Foundation, Inc.
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 5418fee50a0..14f13812b36 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -937,7 +937,7 @@ expand_asm_operands (tree string, tree outputs, tree inputs,
ASM_OPERANDS_INPUT (body, i) = op;
ASM_OPERANDS_INPUT_CONSTRAINT_EXP (body, i)
- = gen_rtx_ASM_INPUT (TYPE_MODE (type),
+ = gen_rtx_ASM_INPUT (TYPE_MODE (type),
ggc_strdup (constraints[i + noutputs]));
if (tree_conflicts_with_clobbers_p (val, &clobbered_regs))
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index e512db83979..6f08d8cdf97 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -2098,7 +2098,7 @@ layout_type (tree type)
change the result of vector_mode_supported_p and have_regs_of_mode
on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can
change on a per-function basis. */
-/* ??? Possibly a better solution is to run through all the types
+/* ??? Possibly a better solution is to run through all the types
referenced by a function and re-compute the TYPE_MODE once, rather
than make the TYPE_MODE macro call a function. */
diff --git a/gcc/store-motion.c b/gcc/store-motion.c
index 64260ac165f..4878729eec0 100644
--- a/gcc/store-motion.c
+++ b/gcc/store-motion.c
@@ -276,7 +276,7 @@ store_ops_ok (const_rtx x, int *regs_set)
}
/* Helper for extract_mentioned_regs. */
-
+
static int
extract_mentioned_regs_1 (rtx *loc, void *data)
{
@@ -803,7 +803,7 @@ insert_store (struct st_expr * expr, edge e)
if (!(tmp->flags & EDGE_FAKE))
{
int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
-
+
gcc_assert (index != EDGE_INDEX_NO_EDGE);
if (! TEST_BIT (st_insert_map[index], expr->index))
break;
@@ -1184,7 +1184,7 @@ one_store_motion_pass (void)
INDEX_EDGE (edge_list, x)->dest->index);
continue;
}
-
+
/* Now we want to insert the new stores which are going to be needed. */
FOR_EACH_BB (bb)
@@ -1246,8 +1246,8 @@ struct rtl_opt_pass pass_rtl_store_motion =
{
RTL_PASS,
"store_motion", /* name */
- gate_rtl_store_motion, /* gate */
- execute_rtl_store_motion, /* execute */
+ gate_rtl_store_motion, /* gate */
+ execute_rtl_store_motion, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
diff --git a/gcc/stringpool.c b/gcc/stringpool.c
index 072af72de8f..79f8cb2c8d1 100644
--- a/gcc/stringpool.c
+++ b/gcc/stringpool.c
@@ -215,7 +215,7 @@ gt_pch_n_S (const void *x)
to restore the string pool. */
struct GTY(()) string_pool_data {
- struct ht_identifier * *
+ struct ht_identifier * *
GTY((length ("%h.nslots"),
nested_ptr (union tree_node, "%h ? GCC_IDENT_TO_HT_IDENT (%h) : NULL",
"%h ? HT_IDENT_TO_GCC_IDENT (%h) : NULL")))
diff --git a/gcc/stub-objc.c b/gcc/stub-objc.c
index fc3d4169f0a..b7748f79c6e 100644
--- a/gcc/stub-objc.c
+++ b/gcc/stub-objc.c
@@ -55,7 +55,7 @@ void
objc_check_decl (tree ARG_UNUSED (decl))
{
}
-
+
int
objc_is_reserved_word (tree ARG_UNUSED (ident))
{
@@ -88,7 +88,7 @@ objc_rewrite_function_call (tree function, tree ARG_UNUSED (first_param))
tree
objc_message_selector (void)
-{
+{
return 0;
}
@@ -295,7 +295,7 @@ void
objc_begin_try_stmt (location_t ARG_UNUSED (try_locus), tree ARG_UNUSED (body))
{
}
-
+
void
objc_begin_catch_clause (tree ARG_UNUSED (decl))
{
@@ -324,4 +324,4 @@ objc_generate_write_barrier (tree ARG_UNUSED (lhs),
tree ARG_UNUSED (rhs))
{
return 0;
-}
+}
diff --git a/gcc/sync-builtins.def b/gcc/sync-builtins.def
index 86cb0aa3f16..b51d5dfbdaf 100644
--- a/gcc/sync-builtins.def
+++ b/gcc/sync-builtins.def
@@ -23,7 +23,7 @@ along with GCC; see the file COPYING3. If not see
DEF_SYNC_BUILTIN (ENUM, NAME, TYPE, ATTRS)
See builtins.def for details. */
-
+
/* Synchronization Primitives. The "_N" version is the one that the user
is supposed to be using. It's overloaded, and is resolved to one of the
"_1" through "_16" versions, plus some extra casts. */
diff --git a/gcc/target-def.h b/gcc/target-def.h
index ddab977aa2f..c57977b1277 100644
--- a/gcc/target-def.h
+++ b/gcc/target-def.h
@@ -392,8 +392,8 @@
default_builtin_vector_alignment_reachable
#define TARGET_VECTORIZE_BUILTIN_VEC_PERM 0
#define TARGET_SUPPORT_VECTOR_MISALIGNMENT \
- default_builtin_support_vector_misalignment
-
+ default_builtin_support_vector_misalignment
+
#define TARGET_VECTORIZE \
{ \
diff --git a/gcc/target.h b/gcc/target.h
index 6d62d5207e5..477a512d703 100644
--- a/gcc/target.h
+++ b/gcc/target.h
@@ -474,7 +474,7 @@ struct gcc_target
tree (* builtin_conversion) (unsigned, tree);
/* Target builtin that implements vector widening multiplication.
- builtin_mul_widen_eve computes the element-by-element products
+ builtin_mul_widen_eve computes the element-by-element products
for the even elements, and builtin_mul_widen_odd computes the
element-by-element products for the odd elements. */
tree (* builtin_mul_widen_even) (tree);
@@ -493,7 +493,7 @@ struct gcc_target
/* Return true if the target supports misaligned store/load of a
specific factor denoted in the third parameter. The last parameter
is true if the access is defined in a packed struct. */
- bool (* builtin_support_vector_misalignment) (enum machine_mode,
+ bool (* builtin_support_vector_misalignment) (enum machine_mode,
const_tree, int, bool);
} vectorize;
@@ -973,7 +973,7 @@ struct gcc_target
then it should be for the callee; otherwise for the caller. */
rtx (*static_chain) (const_tree fndecl, bool incoming_p);
- /* Fill in the trampoline at MEM with a call to FNDECL and a
+ /* Fill in the trampoline at MEM with a call to FNDECL and a
static chain value of CHAIN. */
void (*trampoline_init) (rtx mem, tree fndecl, rtx chain);
@@ -1035,7 +1035,7 @@ struct gcc_target
/* Return the smallest number of different values for which it is best to
use a jump-table instead of a tree of conditional branches. */
unsigned int (* case_values_threshold) (void);
-
+
/* Retutn true if a function must have and use a frame pointer. */
bool (* frame_pointer_required) (void);
@@ -1115,7 +1115,7 @@ struct gcc_target
/* Prefixes for proxy variable and template. */
const char *var_prefix;
const char *tmpl_prefix;
-
+
/* Function to generate field definitions of the proxy variable. */
tree (*var_fields) (tree, tree *);
@@ -1128,7 +1128,7 @@ struct gcc_target
/* Whether we can emit debug information for TLS vars. */
bool debug_form_tls_address;
- } emutls;
+ } emutls;
struct target_option_hooks {
/* Function to validate the attribute((option(...))) strings or NULL. If
@@ -1160,8 +1160,8 @@ struct gcc_target
/* For targets that need to mark extra registers as live on entry to
the function, they should define this target hook and set their
- bits in the bitmap passed in. */
- void (*live_on_entry) (bitmap);
+ bits in the bitmap passed in. */
+ void (*live_on_entry) (bitmap);
/* True if unwinding tables should be generated by default. */
bool unwind_tables_default;
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index dfc470c869e..d619ae53c3e 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -396,7 +396,7 @@ default_fixed_point_supported_p (void)
/* NULL if INSN insn is valid within a low-overhead loop, otherwise returns
an error message.
-
+
This function checks whether a given INSN is valid within a low-overhead
loop. If INSN is invalid it returns the reason for that, otherwise it
returns NULL. A called function may clobber any special registers required
@@ -409,10 +409,10 @@ default_invalid_within_doloop (const_rtx insn)
{
if (CALL_P (insn))
return "Function call in loop.";
-
+
if (JUMP_TABLE_DATA_P (insn))
return "Computed branch in the loop.";
-
+
return NULL;
}
@@ -472,7 +472,7 @@ hook_int_CUMULATIVE_ARGS_mode_tree_bool_0 (
return 0;
}
-void
+void
hook_void_bitmap (bitmap regs ATTRIBUTE_UNUSED)
{
}
@@ -517,7 +517,7 @@ default_stack_protect_guard (void)
static GTY(()) tree stack_chk_fail_decl;
-tree
+tree
default_external_stack_protect_fail (void)
{
tree t = stack_chk_fail_decl;
@@ -815,7 +815,7 @@ default_builtin_vector_alignment_reachable (const_tree type, bool is_packed)
}
/* By default, assume that a target supports any factor of misalignment
- memory access if it supports movmisalign patten.
+ memory access if it supports movmisalign patten.
is_packed is true if the memory access is defined in a packed struct. */
bool
default_builtin_support_vector_misalignment (enum machine_mode mode,
diff --git a/gcc/targhooks.h b/gcc/targhooks.h
index 365496b9825..631bdf216df 100644
--- a/gcc/targhooks.h
+++ b/gcc/targhooks.h
@@ -80,7 +80,7 @@ extern bool default_builtin_vector_alignment_reachable (const_tree, bool);
extern bool
default_builtin_support_vector_misalignment (enum machine_mode mode,
const_tree,
- int, bool);
+ int, bool);
/* These are here, and not in hooks.[ch], because not all users of
hooks.h include tm.h, and thus we don't have CUMULATIVE_ARGS. */
diff --git a/gcc/timevar.c b/gcc/timevar.c
index d0aa15b5ca1..4ba2864a235 100644
--- a/gcc/timevar.c
+++ b/gcc/timevar.c
@@ -313,7 +313,7 @@ timevar_pop_1 (timevar_id_t timevar)
struct timevar_stack_def *popped = stack;
gcc_assert (&timevars[timevar] == stack->timevar);
-
+
/* What time is it? */
get_time (&now);
diff --git a/gcc/tlink.c b/gcc/tlink.c
index 969c75d0aa6..1894c6c709a 100644
--- a/gcc/tlink.c
+++ b/gcc/tlink.c
@@ -729,7 +729,7 @@ scan_linker_output (const char *fname)
if (sym && sym->tweaked)
{
error ("'%s' was assigned to '%s', but was not defined "
- "during recompilation, or vice versa",
+ "during recompilation, or vice versa",
sym->key, sym->file->key);
fclose (stream);
return 0;
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 44f10d66ffc..ccba7498c37 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -201,7 +201,7 @@ int optimize_size = 0;
/* True if this is the lto front end. This is used to disable
gimple generation and lowering passes that are normally run on the
output of a front end. These passes must be bypassed for lto since
- they have already been done before the gimple was written. */
+ they have already been done before the gimple was written. */
bool in_lto_p = false;
@@ -886,9 +886,9 @@ check_global_declaration_1 (tree decl)
&& ! (TREE_CODE (decl) == VAR_DECL && DECL_REGISTER (decl))
/* Otherwise, ask the language. */
&& lang_hooks.decls.warn_unused_global (decl))
- warning ((TREE_CODE (decl) == FUNCTION_DECL)
- ? OPT_Wunused_function
- : OPT_Wunused_variable,
+ warning ((TREE_CODE (decl) == FUNCTION_DECL)
+ ? OPT_Wunused_function
+ : OPT_Wunused_variable,
"%q+D defined but not used", decl);
}
@@ -1123,7 +1123,7 @@ compile_file (void)
/* Invoke registered plugin callbacks. */
invoke_plugin_callbacks (PLUGIN_FINISH_UNIT, NULL);
-
+
/* This must be at the end. Some target ports emit end of file directives
into the assembly file here, and hence we can not output anything to the
assembly file after this point. */
@@ -1312,7 +1312,7 @@ print_to_stderr (print_switch_type type, const char * text)
case SWITCH_TYPE_LINE_START:
return 0;
-
+
case SWITCH_TYPE_PASSED:
case SWITCH_TYPE_ENABLED:
fputc (' ', stderr);
@@ -1932,7 +1932,7 @@ process_options (void)
/* Unless over-ridden for the target, assume that all DWARF levels
may be emitted, if DWARF2_DEBUG is selected. */
- if (dwarf_strict < 0)
+ if (dwarf_strict < 0)
dwarf_strict = 0;
/* A lot of code assumes write_symbols == NO_DEBUG if the debugging
@@ -2439,7 +2439,7 @@ toplev_main (int argc, char **argv)
if (!exit_after_options)
do_compile ();
- if (warningcount || errorcount)
+ if (warningcount || errorcount)
print_ignored_options ();
/* Invoke registered plugin callbacks if any. */
diff --git a/gcc/toplev.h b/gcc/toplev.h
index 88ab7e8ecf0..53f981c7141 100644
--- a/gcc/toplev.h
+++ b/gcc/toplev.h
@@ -66,7 +66,7 @@ extern void error_at (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3);
extern void fatal_error (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2)
ATTRIBUTE_NORETURN;
/* Pass one of the OPT_W* from options.h as the second parameter. */
-extern bool pedwarn (location_t, int, const char *, ...)
+extern bool pedwarn (location_t, int, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern bool permerror (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3);
extern void sorry (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2);
diff --git a/gcc/tracer.c b/gcc/tracer.c
index 8f92482d000..918ed784537 100644
--- a/gcc/tracer.c
+++ b/gcc/tracer.c
@@ -306,7 +306,7 @@ tail_duplicate (void)
nduplicated += counts [bb2->index];
e = find_edge (bb, bb2);
-
+
copy = duplicate_block (bb2, e, bb);
flush_pending_stmts (e);
diff --git a/gcc/tree-affine.c b/gcc/tree-affine.c
index b67064b3735..1c3745ec730 100644
--- a/gcc/tree-affine.c
+++ b/gcc/tree-affine.c
@@ -1,18 +1,18 @@
/* Operations with affine combinations of trees.
Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -122,7 +122,7 @@ aff_combination_scale (aff_tree *comb, double_int scale)
comb->n++;
}
else
- comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
+ comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
double_int_to_tree (type, scale));
}
}
@@ -182,7 +182,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
else
elt = fold_build2 (MULT_EXPR, type,
fold_convert (type, elt),
- double_int_to_tree (type, scale));
+ double_int_to_tree (type, scale));
if (comb->rest)
comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
@@ -471,7 +471,7 @@ aff_combination_remove_elt (aff_tree *comb, unsigned m)
/* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
C * COEF is added to R. */
-
+
static void
aff_combination_add_product (aff_tree *c, double_int coef, tree val,
@@ -534,7 +534,7 @@ aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
/* Returns the element of COMB whose value is VAL, or NULL if no such
element exists. If IDX is not NULL, it is set to the index of VAL in
COMB. */
-
+
static struct aff_comb_elt *
aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
{
@@ -803,7 +803,7 @@ print_aff (FILE *file, aff_tree *val)
{
fprintf (file, " [%d] = ", i);
print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
-
+
fprintf (file, " * ");
dump_double_int (file, val->elts[i].coef, uns);
if (i != val->n - 1)
diff --git a/gcc/tree-affine.h b/gcc/tree-affine.h
index 676ec8b1bac..0abda96a9da 100644
--- a/gcc/tree-affine.h
+++ b/gcc/tree-affine.h
@@ -1,18 +1,18 @@
/* Operations with affine combinations of trees.
Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -28,7 +28,7 @@ struct aff_comb_elt
{
/* The value of the element. */
tree val;
-
+
/* Its coefficient in the combination. */
double_int coef;
};
@@ -47,13 +47,13 @@ typedef struct affine_tree_combination
/* Elements and their coefficients. Type of elements may be different from
TYPE, but their sizes must be the same (STRIP_NOPS is applied to the
elements).
-
+
The coefficients are always sign extended from the precision of TYPE
(regardless of signedness of TYPE). */
struct aff_comb_elt elts[MAX_AFF_ELTS];
/* Remainder of the expression. Usually NULL, used only if there are more
- than MAX_AFF_ELTS elements. Type of REST will be either sizetype for
+ than MAX_AFF_ELTS elements. Type of REST will be either sizetype for
TYPE of POINTER_TYPEs or TYPE. */
tree rest;
} aff_tree;
diff --git a/gcc/tree-browser.def b/gcc/tree-browser.def
index 660792ef9a5..cd8145b684d 100644
--- a/gcc/tree-browser.def
+++ b/gcc/tree-browser.def
@@ -19,7 +19,7 @@ along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* First field in the following declarations is the code of the command
- used by the tree browser.
+ used by the tree browser.
Second field is what is parsed in order to recognize a command.
Third field is used for printing the help message. */
@@ -82,12 +82,12 @@ DEFTBCODE (TB_MIN, "min", "Field accessor.")
DEFTBCODE (TB_MAX, "max", "Field accessor.")
/* Searching commands. */
-DEFTBCODE (TB_SEARCH_CODE, "sc", "Search a node having a TREE_CODE given as a parameter.")
+DEFTBCODE (TB_SEARCH_CODE, "sc", "Search a node having a TREE_CODE given as a parameter.")
DEFTBCODE (TB_SEARCH_NAME, "sn", "Search an identifier having a name given as a parameter.")
/* Printing commands. */
-DEFTBCODE (TB_PRETTY_PRINT, "pp", "Pretty print current node.")
-DEFTBCODE (TB_PRINT, "p", "Prints the current node.")
+DEFTBCODE (TB_PRETTY_PRINT, "pp", "Pretty print current node.")
+DEFTBCODE (TB_PRINT, "p", "Prints the current node.")
/*
diff --git a/gcc/tree-call-cdce.c b/gcc/tree-call-cdce.c
index b71978016c5..8b1ece89f56 100644
--- a/gcc/tree-call-cdce.c
+++ b/gcc/tree-call-cdce.c
@@ -4,17 +4,17 @@
Contributed by Xinliang David Li <davidxl@google.com>
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -49,7 +49,7 @@ along with GCC; see the file COPYING3. If not see
not used, the compiler can still not eliminate the call without
powerful interprocedural analysis to prove that the errno is not
checked. However, if the conditions under which the error occurs
- are known, the compiler can conditionally dead code eliminate the
+ are known, the compiler can conditionally dead code eliminate the
calls by shrink-wrapping the semi-dead calls into the error condition:
built_in_call (args)
@@ -65,7 +65,7 @@ along with GCC; see the file COPYING3. If not see
With this change, call to log (x) is effectively eliminated, as
in majority of the cases, log won't be called with x out of
range. The branch is totally predictable, so the branch cost
- is low.
+ is low.
Note that library functions are not supposed to clear errno to zero without
error. See IEEE Std 1003.1, section 2.3 Error Numbers, and section 7.5:3 of
@@ -81,12 +81,12 @@ along with GCC; see the file COPYING3. If not see
inlining). */
-/* A structure for representing input domain of
+/* A structure for representing input domain of
a function argument in integer. If the lower
- bound is -inf, has_lb is set to false. If the
- upper bound is +inf, has_ub is false.
- is_lb_inclusive and is_ub_inclusive are flags
- to indicate if lb and ub value are inclusive
+ bound is -inf, has_lb is set to false. If the
+ upper bound is +inf, has_ub is false.
+ is_lb_inclusive and is_ub_inclusive are flags
+ to indicate if lb and ub value are inclusive
respectively. */
typedef struct input_domain
@@ -100,11 +100,11 @@ typedef struct input_domain
} inp_domain;
/* A helper function to construct and return an input
- domain object. LB is the lower bound, HAS_LB is
+ domain object. LB is the lower bound, HAS_LB is
a boolean flag indicating if the lower bound exists,
and LB_INCLUSIVE is a boolean flag indicating if the
lower bound is inclusive or not. UB, HAS_UB, and
- UB_INCLUSIVE have the same meaning, but for upper
+ UB_INCLUSIVE have the same meaning, but for upper
bound of the domain. */
static inp_domain
@@ -121,13 +121,13 @@ get_domain (int lb, bool has_lb, bool lb_inclusive,
return domain;
}
-/* A helper function to check the target format for the
+/* A helper function to check the target format for the
argument type. In this implementation, only IEEE formats
- are supported. ARG is the call argument to be checked.
+ are supported. ARG is the call argument to be checked.
Returns true if the format is supported. To support other
target formats, function get_no_error_domain needs to be
- enhanced to have range bounds properly computed. Since
- the check is cheap (very small number of candidates
+ enhanced to have range bounds properly computed. Since
+ the check is cheap (very small number of candidates
to be checked), the result is not cached for each float type. */
static bool
@@ -136,7 +136,7 @@ check_target_format (tree arg)
tree type;
enum machine_mode mode;
const struct real_format *rfmt;
-
+
type = TREE_TYPE (arg);
mode = TYPE_MODE (type);
rfmt = REAL_MODE_FORMAT (mode);
@@ -147,16 +147,16 @@ check_target_format (tree arg)
&& (rfmt == &ieee_double_format || rfmt == &mips_double_format
|| rfmt == &motorola_double_format))
/* For long double, we can not really check XFmode
- which is only defined on intel platforms.
- Candidate pre-selection using builtin function
- code guarantees that we are checking formats
+ which is only defined on intel platforms.
+ Candidate pre-selection using builtin function
+ code guarantees that we are checking formats
for long double modes: double, quad, and extended. */
- || (mode != SFmode && mode != DFmode
+ || (mode != SFmode && mode != DFmode
&& (rfmt == &ieee_quad_format
|| rfmt == &mips_quad_format
|| rfmt == &ieee_extended_motorola_format
- || rfmt == &ieee_extended_intel_96_format
- || rfmt == &ieee_extended_intel_128_format
+ || rfmt == &ieee_extended_intel_96_format
+ || rfmt == &ieee_extended_intel_128_format
|| rfmt == &ieee_extended_intel_96_round_53_format)))
return true;
@@ -167,7 +167,7 @@ check_target_format (tree arg)
/* A helper function to help select calls to pow that are suitable for
conditional DCE transformation. It looks for pow calls that can be
guided with simple conditions. Such calls either have constant base
- values or base values converted from integers. Returns true if
+ values or base values converted from integers. Returns true if
the pow call POW_CALL is a candidate. */
/* The maximum integer bit size for base argument of a pow call
@@ -218,7 +218,7 @@ check_pow (gimple pow_call)
int bit_sz;
/* Only handles cases where base value is converted
- from integer values. */
+ from integer values. */
base_def = SSA_NAME_DEF_STMT (base);
if (gimple_code (base_def) != GIMPLE_ASSIGN)
return false;
@@ -277,7 +277,7 @@ is_call_dce_candidate (gimple call)
fn = gimple_call_fndecl (call);
if (!fn
- || !DECL_BUILT_IN (fn)
+ || !DECL_BUILT_IN (fn)
|| (DECL_BUILT_IN_CLASS (fn) != BUILT_IN_NORMAL))
return false;
@@ -324,11 +324,11 @@ is_call_dce_candidate (gimple call)
TEMP_NAME1/TEMP_NAME2 are names of the temporaries,
CONDS is a vector holding the produced GIMPLE statements,
and NCONDS points to the variable holding the number
- of logical comparisons. CONDS is either empty or
+ of logical comparisons. CONDS is either empty or
a list ended with a null tree. */
static void
-gen_one_condition (tree arg, int lbub,
+gen_one_condition (tree arg, int lbub,
enum tree_code tcode,
const char *temp_name1,
const char *temp_name2,
@@ -367,14 +367,14 @@ gen_one_condition (tree arg, int lbub,
out of input domain check. ARG is the call argument
to be runtime checked, DOMAIN holds the valid domain
for the given function, CONDS points to the vector
- holding the result GIMPLE statements. *NCONDS is
- the number of logical comparisons. This function
+ holding the result GIMPLE statements. *NCONDS is
+ the number of logical comparisons. This function
produces no more than two logical comparisons, one
for lower bound check, one for upper bound check. */
static void
gen_conditions_for_domain (tree arg, inp_domain domain,
- VEC (gimple, heap) *conds,
+ VEC (gimple, heap) *conds,
unsigned *nconds)
{
if (domain.has_lb)
@@ -401,7 +401,7 @@ gen_conditions_for_domain (tree arg, inp_domain domain,
/* A helper function to generate condition
code for the y argument in call pow (some_const, y).
- See candidate selection in check_pow. Since the
+ See candidate selection in check_pow. Since the
candidates' base values have a limited range,
the guarded code generated for y are simple:
if (y > max_y)
@@ -420,8 +420,8 @@ gen_conditions_for_pow_cst_base (tree base, tree expn,
VEC (gimple, heap) *conds,
unsigned *nconds)
{
- inp_domain exp_domain;
- /* Validate the range of the base constant to make
+ inp_domain exp_domain;
+ /* Validate the range of the base constant to make
sure it is consistent with check_pow. */
REAL_VALUE_TYPE mv;
REAL_VALUE_TYPE bcv = TREE_REAL_CST (base);
@@ -444,11 +444,11 @@ gen_conditions_for_pow_cst_base (tree base, tree expn,
the max exp value is computed based on the size
of the integer type (i.e. max possible base value).
The resulting input domain for exp argument is thus
- conservative (smaller than the max value allowed by
- the runtime value of the base). BASE is the integer
- base value, EXPN is the expression for the exponent
- argument, *CONDS is the vector to hold resulting
- statements, and *NCONDS is the number of logical
+ conservative (smaller than the max value allowed by
+ the runtime value of the base). BASE is the integer
+ base value, EXPN is the expression for the exponent
+ argument, *CONDS is the vector to hold resulting
+ statements, and *NCONDS is the number of logical
conditions. */
static void
@@ -471,7 +471,7 @@ gen_conditions_for_pow_int_base (tree base, tree expn,
base_var = SSA_NAME_VAR (base_val0);
int_type = TREE_TYPE (base_var);
bit_sz = TYPE_PRECISION (int_type);
- gcc_assert (bit_sz > 0
+ gcc_assert (bit_sz > 0
&& bit_sz <= MAX_BASE_INT_BIT_SIZE);
/* Determine the max exp argument value according to
@@ -544,7 +544,7 @@ gen_conditions_for_pow_int_base (tree base, tree expn,
and *NCONDS is the number of logical conditions. */
static void
-gen_conditions_for_pow (gimple pow_call, VEC (gimple, heap) *conds,
+gen_conditions_for_pow (gimple pow_call, VEC (gimple, heap) *conds,
unsigned *nconds)
{
tree base, expn;
@@ -576,15 +576,15 @@ gen_conditions_for_pow (gimple pow_call, VEC (gimple, heap) *conds,
resulting region can be conservative (smaller) than the actual
one and rounded to integers. Some of the bounds are documented
in the standard, while other limit constants are computed
- assuming IEEE floating point format (for SF and DF modes).
- Since IEEE only sets minimum requirements for long double format,
- different long double formats exist under different implementations
- (e.g, 64 bit double precision (DF), 80 bit double-extended
- precision (XF), and 128 bit quad precision (QF) ). For simplicity,
- in this implementation, the computed bounds for long double assume
- 64 bit format (DF), and are therefore conservative. Another
+ assuming IEEE floating point format (for SF and DF modes).
+ Since IEEE only sets minimum requirements for long double format,
+ different long double formats exist under different implementations
+ (e.g, 64 bit double precision (DF), 80 bit double-extended
+ precision (XF), and 128 bit quad precision (QF) ). For simplicity,
+ in this implementation, the computed bounds for long double assume
+ 64 bit format (DF), and are therefore conservative. Another
assumption is that single precision float type is always SF mode,
- and double type is DF mode. This function is quite
+ and double type is DF mode. This function is quite
implementation specific, so it may not be suitable to be part of
builtins.c. This needs to be revisited later to see if it can
be leveraged in x87 assembly expansion. */
@@ -668,22 +668,22 @@ get_no_error_domain (enum built_in_function fnc)
return get_domain (0, true, true,
0, false, false);
default:
- gcc_unreachable ();
+ gcc_unreachable ();
}
- gcc_unreachable ();
+ gcc_unreachable ();
}
/* The function to generate shrink wrap conditions for a partially
dead builtin call whose return value is not used anywhere,
but has to be kept live due to potential error condition.
- BI_CALL is the builtin call, CONDS is the vector of statements
- for condition code, NCODES is the pointer to the number of
+ BI_CALL is the builtin call, CONDS is the vector of statements
+ for condition code, NCODES is the pointer to the number of
logical conditions. Statements belonging to different logical
condition are separated by NULL tree in the vector. */
static void
-gen_shrink_wrap_conditions (gimple bi_call, VEC (gimple, heap) *conds,
+gen_shrink_wrap_conditions (gimple bi_call, VEC (gimple, heap) *conds,
unsigned int *nconds)
{
gimple call;
@@ -718,12 +718,12 @@ gen_shrink_wrap_conditions (gimple bi_call, VEC (gimple, heap) *conds,
/* Probability of the branch (to the call) is taken. */
#define ERR_PROB 0.01
-/* The function to shrink wrap a partially dead builtin call
- whose return value is not used anywhere, but has to be kept
+/* The function to shrink wrap a partially dead builtin call
+ whose return value is not used anywhere, but has to be kept
live due to potential error condition. Returns true if the
transformation actually happens. */
-static bool
+static bool
shrink_wrap_one_built_in_call (gimple bi_call)
{
gimple_stmt_iterator bi_call_bsi;
@@ -743,7 +743,7 @@ shrink_wrap_one_built_in_call (gimple bi_call)
/* This can happen if the condition generator decides
it is not beneficial to do the transformation. Just
- return false and do not do any transformation for
+ return false and do not do any transformation for
the call. */
if (nconds == 0)
return false;
@@ -788,7 +788,7 @@ shrink_wrap_one_built_in_call (gimple bi_call)
bi_call_in_edge0->flags |= EDGE_TRUE_VALUE;
guard_bb0 = bi_call_bb;
bi_call_bb = bi_call_in_edge0->dest;
- join_tgt_in_edge_fall_thru = make_edge (guard_bb0, join_tgt_bb,
+ join_tgt_in_edge_fall_thru = make_edge (guard_bb0, join_tgt_bb,
EDGE_FALSE_VALUE);
bi_call_in_edge0->probability = REG_BR_PROB_BASE * ERR_PROB;
@@ -851,7 +851,7 @@ shrink_wrap_conditional_dead_built_in_calls (VEC (gimple, heap) *calls)
unsigned i = 0;
unsigned n = VEC_length (gimple, calls);
- if (n == 0)
+ if (n == 0)
return false;
for (; i < n ; i++)
@@ -909,7 +909,7 @@ tree_call_cdce (void)
/* As we introduced new control-flow we need to insert PHI-nodes
for the call-clobbers of the remaining call. */
mark_sym_for_renaming (gimple_vop (cfun));
- return (TODO_update_ssa | TODO_cleanup_cfg | TODO_ggc_collect
+ return (TODO_update_ssa | TODO_cleanup_cfg | TODO_ggc_collect
| TODO_remove_unused_locals);
}
else
@@ -922,7 +922,7 @@ gate_call_cdce (void)
/* The limit constants used in the implementation
assume IEEE floating point format. Other formats
can be supported in the future if needed. */
- return flag_tree_builtin_call_dce != 0 && optimize_function_for_speed_p (cfun);
+ return flag_tree_builtin_call_dce != 0 && optimize_function_for_speed_p (cfun);
}
struct gimple_opt_pass pass_call_cdce =
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 13aa63f1d59..3a086898147 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -144,9 +144,9 @@ init_empty_tree_cfg_for_function (struct function *fn)
label_to_block_map_for_function (fn),
initial_cfg_capacity);
- SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK,
+ SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK,
ENTRY_BLOCK_PTR_FOR_FUNCTION (fn));
- SET_BASIC_BLOCK_FOR_FUNCTION (fn, EXIT_BLOCK,
+ SET_BASIC_BLOCK_FOR_FUNCTION (fn, EXIT_BLOCK,
EXIT_BLOCK_PTR_FOR_FUNCTION (fn));
ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->next_bb
@@ -2439,11 +2439,11 @@ reinstall_phi_args (edge new_edge, edge old_edge)
edge_var_map *vm;
int i;
gimple_stmt_iterator phis;
-
+
v = redirect_edge_var_map_vector (old_edge);
if (!v)
return;
-
+
for (i = 0, phis = gsi_start_phis (new_edge->dest);
VEC_iterate (edge_var_map, v, i, vm) && !gsi_end_p (phis);
i++, gsi_next (&phis))
@@ -2451,12 +2451,12 @@ reinstall_phi_args (edge new_edge, edge old_edge)
gimple phi = gsi_stmt (phis);
tree result = redirect_edge_var_map_result (vm);
tree arg = redirect_edge_var_map_def (vm);
-
+
gcc_assert (result == gimple_phi_result (phi));
-
+
add_phi_arg (phi, arg, new_edge, redirect_edge_var_map_location (vm));
}
-
+
redirect_edge_var_map_clear (old_edge);
}
@@ -3379,7 +3379,7 @@ do_pointer_plus_expr_check:
}
return false;
- }
+ }
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
@@ -3622,7 +3622,7 @@ verify_gimple_return (gimple stmt)
return values from the original source. */
if (op == NULL)
return false;
-
+
if (!is_gimple_val (op)
&& TREE_CODE (op) != RESULT_DECL)
{
@@ -4321,7 +4321,7 @@ gimple_verify_flow_info (void)
{
edge true_edge;
edge false_edge;
-
+
extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
if (!true_edge
@@ -4499,13 +4499,13 @@ gimple_make_forwarder_block (edge fallthru)
for (gsi = gsi_start_phis (dummy); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi, new_phi;
-
+
phi = gsi_stmt (gsi);
var = gimple_phi_result (phi);
new_phi = create_phi_node (var, bb);
SSA_NAME_DEF_STMT (var) = new_phi;
gimple_phi_set_result (phi, make_ssa_name (SSA_NAME_VAR (var), phi));
- add_phi_arg (new_phi, gimple_phi_result (phi), fallthru,
+ add_phi_arg (new_phi, gimple_phi_result (phi), fallthru,
UNKNOWN_LOCATION);
}
@@ -4801,7 +4801,7 @@ gimple_split_block (basic_block bb, void *stmt)
return new_bb;
/* Split the statement list - avoid re-creating new containers as this
- brings ugly quadratic memory consumption in the inliner.
+ brings ugly quadratic memory consumption in the inliner.
(We are still quadratic since we need to update stmt BB pointers,
sadly.) */
list = gsi_split_seq_before (&gsi);
@@ -4887,7 +4887,7 @@ gimple_duplicate_bb (basic_block bb)
return new_bb;
}
-/* Add phi arguments to the phi nodes in E_COPY->dest according to
+/* Add phi arguments to the phi nodes in E_COPY->dest according to
the phi arguments coming from the equivalent edge at
the phi nodes of DEST. */
@@ -4896,8 +4896,8 @@ add_phi_args_after_redirect (edge e_copy, edge orig_e)
{
gimple_stmt_iterator psi, psi_copy;
gimple phi, phi_copy;
- tree def;
-
+ tree def;
+
for (psi = gsi_start_phis (orig_e->dest),
psi_copy = gsi_start_phis (e_copy->dest);
!gsi_end_p (psi);
@@ -4958,7 +4958,7 @@ add_phi_args_after_copy_edge (edge e_copy)
phi = gsi_stmt (psi);
phi_copy = gsi_stmt (psi_copy);
def = PHI_ARG_DEF_FROM_EDGE (phi, e);
- add_phi_arg (phi_copy, def, e_copy,
+ add_phi_arg (phi_copy, def, e_copy,
gimple_phi_arg_location_from_edge (phi, e));
}
}
@@ -5157,8 +5157,8 @@ gimple_duplicate_sese_region (edge entry, edge exit,
is moved to ENTRY. Returns true if duplication succeeds, false
otherwise.
- For example,
-
+ For example,
+
some_code;
if (cond)
A;
@@ -5288,21 +5288,21 @@ gimple_duplicate_sese_tail (edge entry ATTRIBUTE_UNUSED, edge exit ATTRIBUTE_UNU
cond_stmt = last_stmt (exit->src);
gcc_assert (gimple_code (cond_stmt) == GIMPLE_COND);
cond_stmt = gimple_copy (cond_stmt);
-
- /* If the block consisting of the exit condition has the latch as
- successor, then the body of the loop is executed before
- the exit condition is tested. In such case, moving the
- condition to the entry, causes that the loop will iterate
- one less iteration (which is the wanted outcome, since we
- peel out the last iteration). If the body is executed after
- the condition, moving the condition to the entry requires
+
+ /* If the block consisting of the exit condition has the latch as
+ successor, then the body of the loop is executed before
+ the exit condition is tested. In such case, moving the
+ condition to the entry, causes that the loop will iterate
+ one less iteration (which is the wanted outcome, since we
+ peel out the last iteration). If the body is executed after
+ the condition, moving the condition to the entry requires
decrementing one iteration. */
if (exits[1]->dest == orig_loop->latch)
new_rhs = gimple_cond_rhs (cond_stmt);
else
{
new_rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (gimple_cond_rhs (cond_stmt)),
- gimple_cond_rhs (cond_stmt),
+ gimple_cond_rhs (cond_stmt),
build_int_cst (TREE_TYPE (gimple_cond_rhs (cond_stmt)), 1));
if (TREE_CODE (gimple_cond_rhs (cond_stmt)) == SSA_NAME)
@@ -5311,12 +5311,12 @@ gimple_duplicate_sese_tail (edge entry ATTRIBUTE_UNUSED, edge exit ATTRIBUTE_UNU
for (gsi1 = gsi_start_bb (iters_bb); !gsi_end_p (gsi1); gsi_next (&gsi1))
if (gsi_stmt (gsi1) == SSA_NAME_DEF_STMT (gimple_cond_rhs (cond_stmt)))
break;
-
+
new_rhs = force_gimple_operand_gsi (&gsi1, new_rhs, true,
NULL_TREE,false,GSI_CONTINUE_LINKING);
}
- }
- gimple_cond_set_rhs (cond_stmt, unshare_expr (new_rhs));
+ }
+ gimple_cond_set_rhs (cond_stmt, unshare_expr (new_rhs));
gimple_cond_set_lhs (cond_stmt, unshare_expr (gimple_cond_lhs (cond_stmt)));
gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
@@ -5329,64 +5329,64 @@ gimple_duplicate_sese_tail (edge entry ATTRIBUTE_UNUSED, edge exit ATTRIBUTE_UNU
/* Add the PHI node arguments. */
add_phi_args_after_copy (region_copy, n_region, snew);
-
+
/* Get rid of now superfluous conditions and associated edges (and phi node
arguments). */
exit_bb = exit->dest;
-
+
e = redirect_edge_and_branch (exits[0], exits[1]->dest);
PENDING_STMT (e) = NULL;
-
- /* If the block consisting of the exit condition has the latch as
- successor, then the body of the loop is executed before
- the exit condition is tested.
-
+
+ /* If the block consisting of the exit condition has the latch as
+ successor, then the body of the loop is executed before
+ the exit condition is tested.
+
{ body }
{ cond } (exit[0]) -> { latch }
- |
+ |
V (exit[1])
-
+
{ exit_bb }
-
-
+
+
In such case, the equivalent copied edge nexits[1]
(for the peeled iteration) needs to be redirected to exit_bb.
-
- Otherwise,
-
+
+ Otherwise,
+
{ cond } (exit[0]) -> { body }
|
V (exit[1])
-
+
{ exit_bb }
-
-
+
+
exit[0] is pointing to the body of the loop,
- and the equivalent nexits[0] needs to be redirected to
- the copied body (of the peeled iteration). */
-
+ and the equivalent nexits[0] needs to be redirected to
+ the copied body (of the peeled iteration). */
+
if (exits[1]->dest == orig_loop->latch)
e = redirect_edge_and_branch (nexits[1], nexits[0]->dest);
else
e = redirect_edge_and_branch (nexits[0], nexits[1]->dest);
- PENDING_STMT (e) = NULL;
-
+ PENDING_STMT (e) = NULL;
+
redirect_edges = VEC_alloc (edge, heap, 10);
-
+
for (i = 0; i < n_region; i++)
region_copy[i]->flags |= BB_DUPLICATED;
-
- /* Iterate all incoming edges to latch. All those coming from
+
+ /* Iterate all incoming edges to latch. All those coming from
copied bbs will be redirected to exit_bb. */
FOR_EACH_EDGE (e, ei, orig_loop->latch->preds)
{
if (e->src->flags & BB_DUPLICATED)
VEC_safe_push (edge, heap, redirect_edges, e);
}
-
+
for (i = 0; i < n_region; i++)
region_copy[i]->flags &= ~BB_DUPLICATED;
-
+
for (i = 0; VEC_iterate (edge, redirect_edges, i, e); ++i)
{
e = redirect_edge_and_branch (e, exit_bb);
@@ -5395,7 +5395,7 @@ gimple_duplicate_sese_tail (edge entry ATTRIBUTE_UNUSED, edge exit ATTRIBUTE_UNU
orig_e = find_edge (orig_src, orig_loop->latch);
add_phi_args_after_redirect (e, orig_e);
}
-
+
VEC_free (edge, heap, redirect_edges);
/* Anything that is outside of the region, but was dominated by something
@@ -5567,7 +5567,7 @@ move_stmt_op (tree *tp, int *walk_subtrees, void *data)
&& !is_global_var (t))
|| TREE_CODE (t) == CONST_DECL)
replace_by_duplicate_decl (tp, p->vars_map, p->to_context);
-
+
if (SSA_VAR_P (t)
&& gimple_in_ssa_p (cfun))
{
@@ -6363,7 +6363,7 @@ print_succ_bbs (FILE *file, basic_block bb)
/* Print to FILE the basic block BB following the VERBOSITY level. */
-void
+void
print_loops_bb (FILE *file, basic_block bb, int indent, int verbosity)
{
char *s_indent = (char *) alloca ((size_t) indent + 1);
@@ -6409,7 +6409,7 @@ print_loop (FILE *file, struct loop *loop, int indent, int verbosity)
s_indent[indent] = '\0';
/* Print loop's header. */
- fprintf (file, "%sloop_%d (header = %d, latch = %d", s_indent,
+ fprintf (file, "%sloop_%d (header = %d, latch = %d", s_indent,
loop->num, loop->header->index, loop->latch->index);
fprintf (file, ", niter = ");
print_generic_expr (file, loop->nb_iterations, 0);
@@ -6816,14 +6816,14 @@ remove_edge_and_dominated_blocks (edge e)
/* Update the dominance information. The immediate dominator may change only
for blocks whose immediate dominator belongs to DF_IDOM:
-
+
Suppose that idom(X) = Y before removal of E and idom(X) != Y after the
removal. Let Z the arbitrary block such that idom(Z) = Y and
Z dominates X after the removal. Before removal, there exists a path P
from Y to X that avoids Z. Let F be the last edge on P that is
removed, and let W = F->dest. Before removal, idom(W) = Y (since Y
dominates W, and because of P, Z does not dominate W), and W belongs to
- the dominance frontier of E. Therefore, Y belongs to DF_IDOM. */
+ the dominance frontier of E. Therefore, Y belongs to DF_IDOM. */
EXECUTE_IF_SET_IN_BITMAP (df_idom, 0, i, bi)
{
bb = BASIC_BLOCK (i);
@@ -7033,11 +7033,11 @@ split_critical_edges (void)
{
if (EDGE_CRITICAL_P (e) && !(e->flags & EDGE_ABNORMAL))
split_edge (e);
- /* PRE inserts statements to edges and expects that
+ /* PRE inserts statements to edges and expects that
since split_critical_edges was done beforehand, committing edge
insertions will not split more edges. In addition to critical
edges we must split edges that have multiple successors and
- end by control flow statements, such as RESX.
+ end by control flow statements, such as RESX.
Go ahead and split them too. This matches the logic in
gimple_find_edge_insert_loc. */
else if ((!single_pred_p (e->dest)
diff --git a/gcc/tree-cfgcleanup.c b/gcc/tree-cfgcleanup.c
index d3a8ca91aa9..495450bf12c 100644
--- a/gcc/tree-cfgcleanup.c
+++ b/gcc/tree-cfgcleanup.c
@@ -537,7 +537,7 @@ cleanup_tree_cfg_bb (basic_block bb)
return true;
retval = cleanup_control_flow_bb (bb);
-
+
/* Forwarder blocks can carry line number information which is
useful when debugging, so we only clean them up when
optimizing. */
@@ -606,7 +606,7 @@ cleanup_tree_cfg_1 (void)
calls. */
retval |= split_bbs_on_noreturn_calls ();
}
-
+
end_recording_case_labels ();
BITMAP_FREE (cfgcleanup_altered_bbs);
return retval;
@@ -924,7 +924,7 @@ gate_merge_phi (void)
return 1;
}
-struct gimple_opt_pass pass_merge_phi =
+struct gimple_opt_pass pass_merge_phi =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-chrec.c b/gcc/tree-chrec.c
index 33d9f18c099..9711e20b5be 100644
--- a/gcc/tree-chrec.c
+++ b/gcc/tree-chrec.c
@@ -54,10 +54,10 @@ is_not_constant_evolution (const_tree cst)
/* Fold CODE for a polynomial function and a constant. */
-static inline tree
-chrec_fold_poly_cst (enum tree_code code,
- tree type,
- tree poly,
+static inline tree
+chrec_fold_poly_cst (enum tree_code code,
+ tree type,
+ tree poly,
tree cst)
{
gcc_assert (poly);
@@ -69,23 +69,23 @@ chrec_fold_poly_cst (enum tree_code code,
switch (code)
{
case PLUS_EXPR:
- return build_polynomial_chrec
- (CHREC_VARIABLE (poly),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (poly),
chrec_fold_plus (type, CHREC_LEFT (poly), cst),
CHREC_RIGHT (poly));
-
+
case MINUS_EXPR:
- return build_polynomial_chrec
- (CHREC_VARIABLE (poly),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (poly),
chrec_fold_minus (type, CHREC_LEFT (poly), cst),
CHREC_RIGHT (poly));
-
+
case MULT_EXPR:
- return build_polynomial_chrec
- (CHREC_VARIABLE (poly),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (poly),
chrec_fold_multiply (type, CHREC_LEFT (poly), cst),
chrec_fold_multiply (type, CHREC_RIGHT (poly), cst));
-
+
default:
return chrec_dont_know;
}
@@ -93,10 +93,10 @@ chrec_fold_poly_cst (enum tree_code code,
/* Fold the addition of two polynomial functions. */
-static inline tree
-chrec_fold_plus_poly_poly (enum tree_code code,
- tree type,
- tree poly0,
+static inline tree
+chrec_fold_plus_poly_poly (enum tree_code code,
+ tree type,
+ tree poly0,
tree poly1)
{
tree left, right;
@@ -113,7 +113,7 @@ chrec_fold_plus_poly_poly (enum tree_code code,
else
gcc_assert (chrec_type (poly0) == chrec_type (poly1));
gcc_assert (type == chrec_type (poly0));
-
+
/*
{a, +, b}_1 + {c, +, d}_2 -> {{a, +, b}_1 + c, +, d}_2,
{a, +, b}_2 + {c, +, d}_1 -> {{c, +, d}_1 + a, +, b}_2,
@@ -121,67 +121,67 @@ chrec_fold_plus_poly_poly (enum tree_code code,
if (flow_loop_nested_p (loop0, loop1))
{
if (code == PLUS_EXPR || code == POINTER_PLUS_EXPR)
- return build_polynomial_chrec
- (CHREC_VARIABLE (poly1),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (poly1),
chrec_fold_plus (type, poly0, CHREC_LEFT (poly1)),
CHREC_RIGHT (poly1));
else
- return build_polynomial_chrec
- (CHREC_VARIABLE (poly1),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (poly1),
chrec_fold_minus (type, poly0, CHREC_LEFT (poly1)),
- chrec_fold_multiply (type, CHREC_RIGHT (poly1),
+ chrec_fold_multiply (type, CHREC_RIGHT (poly1),
SCALAR_FLOAT_TYPE_P (type)
? build_real (type, dconstm1)
: build_int_cst_type (type, -1)));
}
-
+
if (flow_loop_nested_p (loop1, loop0))
{
if (code == PLUS_EXPR || code == POINTER_PLUS_EXPR)
- return build_polynomial_chrec
- (CHREC_VARIABLE (poly0),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (poly0),
chrec_fold_plus (type, CHREC_LEFT (poly0), poly1),
CHREC_RIGHT (poly0));
else
- return build_polynomial_chrec
- (CHREC_VARIABLE (poly0),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (poly0),
chrec_fold_minus (type, CHREC_LEFT (poly0), poly1),
CHREC_RIGHT (poly0));
}
-
+
/* This function should never be called for chrecs of loops that
do not belong to the same loop nest. */
gcc_assert (loop0 == loop1);
if (code == PLUS_EXPR || code == POINTER_PLUS_EXPR)
{
- left = chrec_fold_plus
+ left = chrec_fold_plus
(type, CHREC_LEFT (poly0), CHREC_LEFT (poly1));
- right = chrec_fold_plus
+ right = chrec_fold_plus
(rtype, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1));
}
else
{
- left = chrec_fold_minus
+ left = chrec_fold_minus
(type, CHREC_LEFT (poly0), CHREC_LEFT (poly1));
- right = chrec_fold_minus
+ right = chrec_fold_minus
(type, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1));
}
if (chrec_zerop (right))
return left;
else
- return build_polynomial_chrec
- (CHREC_VARIABLE (poly0), left, right);
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (poly0), left, right);
}
/* Fold the multiplication of two polynomial functions. */
-static inline tree
-chrec_fold_multiply_poly_poly (tree type,
- tree poly0,
+static inline tree
+chrec_fold_multiply_poly_poly (tree type,
+ tree poly0,
tree poly1)
{
tree t0, t1, t2;
@@ -195,29 +195,29 @@ chrec_fold_multiply_poly_poly (tree type,
gcc_assert (TREE_CODE (poly1) == POLYNOMIAL_CHREC);
gcc_assert (chrec_type (poly0) == chrec_type (poly1));
gcc_assert (type == chrec_type (poly0));
-
+
/* {a, +, b}_1 * {c, +, d}_2 -> {c*{a, +, b}_1, +, d}_2,
{a, +, b}_2 * {c, +, d}_1 -> {a*{c, +, d}_1, +, b}_2,
{a, +, b}_x * {c, +, d}_x -> {a*c, +, a*d + b*c + b*d, +, 2*b*d}_x. */
if (flow_loop_nested_p (loop0, loop1))
/* poly0 is a constant wrt. poly1. */
- return build_polynomial_chrec
- (CHREC_VARIABLE (poly1),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (poly1),
chrec_fold_multiply (type, CHREC_LEFT (poly1), poly0),
CHREC_RIGHT (poly1));
-
+
if (flow_loop_nested_p (loop1, loop0))
/* poly1 is a constant wrt. poly0. */
- return build_polynomial_chrec
- (CHREC_VARIABLE (poly0),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (poly0),
chrec_fold_multiply (type, CHREC_LEFT (poly0), poly1),
CHREC_RIGHT (poly0));
-
+
gcc_assert (loop0 == loop1);
/* poly0 and poly1 are two polynomials in the same variable,
{a, +, b}_x * {c, +, d}_x -> {a*c, +, a*d + b*c + b*d, +, 2*b*d}_x. */
-
+
/* "a*c". */
t0 = chrec_fold_multiply (type, CHREC_LEFT (poly0), CHREC_LEFT (poly1));
@@ -243,22 +243,22 @@ chrec_fold_multiply_poly_poly (tree type,
/* When the operands are automatically_generated_chrec_p, the fold has
to respect the semantics of the operands. */
-static inline tree
-chrec_fold_automatically_generated_operands (tree op0,
+static inline tree
+chrec_fold_automatically_generated_operands (tree op0,
tree op1)
{
if (op0 == chrec_dont_know
|| op1 == chrec_dont_know)
return chrec_dont_know;
-
+
if (op0 == chrec_known
|| op1 == chrec_known)
return chrec_known;
-
+
if (op0 == chrec_not_analyzed_yet
|| op1 == chrec_not_analyzed_yet)
return chrec_not_analyzed_yet;
-
+
/* The default case produces a safe result. */
return chrec_dont_know;
}
@@ -266,7 +266,7 @@ chrec_fold_automatically_generated_operands (tree op0,
/* Fold the addition of two chrecs. */
static tree
-chrec_fold_plus_1 (enum tree_code code, tree type,
+chrec_fold_plus_1 (enum tree_code code, tree type,
tree op0, tree op1)
{
tree op1_type = code == POINTER_PLUS_EXPR ? sizetype : type;
@@ -274,7 +274,7 @@ chrec_fold_plus_1 (enum tree_code code, tree type,
if (automatically_generated_chrec_p (op0)
|| automatically_generated_chrec_p (op1))
return chrec_fold_automatically_generated_operands (op0, op1);
-
+
switch (TREE_CODE (op0))
{
case POLYNOMIAL_CHREC:
@@ -285,13 +285,13 @@ chrec_fold_plus_1 (enum tree_code code, tree type,
default:
if (code == PLUS_EXPR || code == POINTER_PLUS_EXPR)
- return build_polynomial_chrec
- (CHREC_VARIABLE (op0),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (op0),
chrec_fold_plus (type, CHREC_LEFT (op0), op1),
CHREC_RIGHT (op0));
else
- return build_polynomial_chrec
- (CHREC_VARIABLE (op0),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (op0),
chrec_fold_minus (type, CHREC_LEFT (op0), op1),
CHREC_RIGHT (op0));
}
@@ -301,15 +301,15 @@ chrec_fold_plus_1 (enum tree_code code, tree type,
{
case POLYNOMIAL_CHREC:
if (code == PLUS_EXPR || code == POINTER_PLUS_EXPR)
- return build_polynomial_chrec
- (CHREC_VARIABLE (op1),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (op1),
chrec_fold_plus (type, op0, CHREC_LEFT (op1)),
CHREC_RIGHT (op1));
else
- return build_polynomial_chrec
- (CHREC_VARIABLE (op1),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (op1),
chrec_fold_minus (type, op0, CHREC_LEFT (op1)),
- chrec_fold_multiply (type, CHREC_RIGHT (op1),
+ chrec_fold_multiply (type, CHREC_RIGHT (op1),
SCALAR_FLOAT_TYPE_P (type)
? build_real (type, dconstm1)
: build_int_cst_type (type, -1)));
@@ -335,7 +335,7 @@ chrec_fold_plus_1 (enum tree_code code, tree type,
/* Fold the addition of two chrecs. */
tree
-chrec_fold_plus (tree type,
+chrec_fold_plus (tree type,
tree op0,
tree op1)
{
@@ -353,15 +353,15 @@ chrec_fold_plus (tree type,
code = POINTER_PLUS_EXPR;
else
code = PLUS_EXPR;
-
+
return chrec_fold_plus_1 (code, type, op0, op1);
}
/* Fold the subtraction of two chrecs. */
-tree
-chrec_fold_minus (tree type,
- tree op0,
+tree
+chrec_fold_minus (tree type,
+ tree op0,
tree op1)
{
if (automatically_generated_chrec_p (op0)
@@ -370,21 +370,21 @@ chrec_fold_minus (tree type,
if (integer_zerop (op1))
return op0;
-
+
return chrec_fold_plus_1 (MINUS_EXPR, type, op0, op1);
}
/* Fold the multiplication of two chrecs. */
tree
-chrec_fold_multiply (tree type,
+chrec_fold_multiply (tree type,
tree op0,
tree op1)
{
if (automatically_generated_chrec_p (op0)
|| automatically_generated_chrec_p (op1))
return chrec_fold_automatically_generated_operands (op0, op1);
-
+
switch (TREE_CODE (op0))
{
case POLYNOMIAL_CHREC:
@@ -392,34 +392,34 @@ chrec_fold_multiply (tree type,
{
case POLYNOMIAL_CHREC:
return chrec_fold_multiply_poly_poly (type, op0, op1);
-
+
default:
if (integer_onep (op1))
return op0;
if (integer_zerop (op1))
return build_int_cst (type, 0);
-
- return build_polynomial_chrec
- (CHREC_VARIABLE (op0),
+
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (op0),
chrec_fold_multiply (type, CHREC_LEFT (op0), op1),
chrec_fold_multiply (type, CHREC_RIGHT (op0), op1));
}
-
+
default:
if (integer_onep (op0))
return op1;
-
+
if (integer_zerop (op0))
return build_int_cst (type, 0);
-
+
switch (TREE_CODE (op1))
{
case POLYNOMIAL_CHREC:
- return build_polynomial_chrec
- (CHREC_VARIABLE (op1),
+ return build_polynomial_chrec
+ (CHREC_VARIABLE (op1),
chrec_fold_multiply (type, CHREC_LEFT (op1), op0),
chrec_fold_multiply (type, CHREC_RIGHT (op1), op0));
-
+
default:
if (integer_onep (op1))
return op0;
@@ -437,7 +437,7 @@ chrec_fold_multiply (tree type,
/* Evaluate the binomial coefficient. Return NULL_TREE if the intermediate
calculation overflows, otherwise return C(n,k) with type TYPE. */
-static tree
+static tree
tree_fold_binomial (tree type, tree n, unsigned int k)
{
unsigned HOST_WIDE_INT lidx, lnum, ldenom, lres, ldum;
@@ -510,7 +510,7 @@ tree_fold_binomial (tree type, tree n, unsigned int k)
/* Helper function. Use the Newton's interpolating formula for
evaluating the value of the evolution function. */
-static tree
+static tree
chrec_evaluate (unsigned var, tree chrec, tree n, unsigned int k)
{
tree arg0, arg1, binomial_n_k;
@@ -538,24 +538,24 @@ chrec_evaluate (unsigned var, tree chrec, tree n, unsigned int k)
binomial_n_k = tree_fold_binomial (type, n, k);
if (!binomial_n_k)
return chrec_dont_know;
-
+
return fold_build2 (MULT_EXPR, type, chrec, binomial_n_k);
}
-/* Evaluates "CHREC (X)" when the varying variable is VAR.
- Example: Given the following parameters,
-
+/* Evaluates "CHREC (X)" when the varying variable is VAR.
+ Example: Given the following parameters,
+
var = 1
chrec = {3, +, 4}_1
x = 10
-
- The result is given by the Newton's interpolating formula:
+
+ The result is given by the Newton's interpolating formula:
3 * \binom{10}{0} + 4 * \binom{10}{1}.
*/
-tree
+tree
chrec_apply (unsigned var,
- tree chrec,
+ tree chrec,
tree x)
{
tree type = chrec_type (chrec);
@@ -569,7 +569,7 @@ chrec_apply (unsigned var,
constants with respect to the varying loop. */
|| chrec_contains_symbols_defined_in_loop (chrec, var))
return chrec_dont_know;
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "(chrec_apply \n");
@@ -583,17 +583,17 @@ chrec_apply (unsigned var,
res = chrec_fold_multiply (TREE_TYPE (x), CHREC_RIGHT (chrec), x);
res = chrec_fold_plus (type, CHREC_LEFT (chrec), res);
}
-
+
else if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
res = chrec;
-
+
else if (TREE_CODE (x) == INTEGER_CST
&& tree_int_cst_sgn (x) == 1)
/* testsuite/.../ssa-chrec-38.c. */
res = chrec_evaluate (var, chrec, x, 0);
else
res = chrec_dont_know;
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " (varying_loop = %d\n", var);
@@ -605,14 +605,14 @@ chrec_apply (unsigned var,
print_generic_expr (dump_file, res, 0);
fprintf (dump_file, "))\n");
}
-
+
return res;
}
/* Replaces the initial condition in CHREC with INIT_COND. */
-tree
-chrec_replace_initial_condition (tree chrec,
+tree
+chrec_replace_initial_condition (tree chrec,
tree init_cond)
{
if (automatically_generated_chrec_p (chrec))
@@ -623,11 +623,11 @@ chrec_replace_initial_condition (tree chrec,
switch (TREE_CODE (chrec))
{
case POLYNOMIAL_CHREC:
- return build_polynomial_chrec
+ return build_polynomial_chrec
(CHREC_VARIABLE (chrec),
chrec_replace_initial_condition (CHREC_LEFT (chrec), init_cond),
CHREC_RIGHT (chrec));
-
+
default:
return init_cond;
}
@@ -635,12 +635,12 @@ chrec_replace_initial_condition (tree chrec,
/* Returns the initial condition of a given CHREC. */
-tree
+tree
initial_condition (tree chrec)
{
if (automatically_generated_chrec_p (chrec))
return chrec;
-
+
if (TREE_CODE (chrec) == POLYNOMIAL_CHREC)
return initial_condition (CHREC_LEFT (chrec));
else
@@ -650,37 +650,37 @@ initial_condition (tree chrec)
/* Returns a univariate function that represents the evolution in
LOOP_NUM. Mask the evolution of any other loop. */
-tree
-hide_evolution_in_other_loops_than_loop (tree chrec,
+tree
+hide_evolution_in_other_loops_than_loop (tree chrec,
unsigned loop_num)
{
struct loop *loop = get_loop (loop_num), *chloop;
if (automatically_generated_chrec_p (chrec))
return chrec;
-
+
switch (TREE_CODE (chrec))
{
case POLYNOMIAL_CHREC:
chloop = get_chrec_loop (chrec);
if (chloop == loop)
- return build_polynomial_chrec
- (loop_num,
- hide_evolution_in_other_loops_than_loop (CHREC_LEFT (chrec),
- loop_num),
+ return build_polynomial_chrec
+ (loop_num,
+ hide_evolution_in_other_loops_than_loop (CHREC_LEFT (chrec),
+ loop_num),
CHREC_RIGHT (chrec));
-
+
else if (flow_loop_nested_p (chloop, loop))
/* There is no evolution in this loop. */
return initial_condition (chrec);
-
+
else
{
gcc_assert (flow_loop_nested_p (loop, chloop));
- return hide_evolution_in_other_loops_than_loop (CHREC_LEFT (chrec),
+ return hide_evolution_in_other_loops_than_loop (CHREC_LEFT (chrec),
loop_num);
}
-
+
default:
return chrec;
}
@@ -689,8 +689,8 @@ hide_evolution_in_other_loops_than_loop (tree chrec,
/* Returns the evolution part of CHREC in LOOP_NUM when RIGHT is
true, otherwise returns the initial condition in LOOP_NUM. */
-static tree
-chrec_component_in_loop_num (tree chrec,
+static tree
+chrec_component_in_loop_num (tree chrec,
unsigned loop_num,
bool right)
{
@@ -699,7 +699,7 @@ chrec_component_in_loop_num (tree chrec,
if (automatically_generated_chrec_p (chrec))
return chrec;
-
+
switch (TREE_CODE (chrec))
{
case POLYNOMIAL_CHREC:
@@ -715,28 +715,28 @@ chrec_component_in_loop_num (tree chrec,
if (TREE_CODE (CHREC_LEFT (chrec)) != POLYNOMIAL_CHREC
|| CHREC_VARIABLE (CHREC_LEFT (chrec)) != CHREC_VARIABLE (chrec))
return component;
-
+
else
return build_polynomial_chrec
- (loop_num,
- chrec_component_in_loop_num (CHREC_LEFT (chrec),
- loop_num,
- right),
+ (loop_num,
+ chrec_component_in_loop_num (CHREC_LEFT (chrec),
+ loop_num,
+ right),
component);
}
-
+
else if (flow_loop_nested_p (chloop, loop))
/* There is no evolution part in this loop. */
return NULL_TREE;
-
+
else
{
gcc_assert (flow_loop_nested_p (loop, chloop));
- return chrec_component_in_loop_num (CHREC_LEFT (chrec),
- loop_num,
+ return chrec_component_in_loop_num (CHREC_LEFT (chrec),
+ loop_num,
right);
}
-
+
default:
if (right)
return NULL_TREE;
@@ -746,22 +746,22 @@ chrec_component_in_loop_num (tree chrec,
}
/* Returns the evolution part in LOOP_NUM. Example: the call
- evolution_part_in_loop_num ({{0, +, 1}_1, +, 2}_1, 1) returns
+ evolution_part_in_loop_num ({{0, +, 1}_1, +, 2}_1, 1) returns
{1, +, 2}_1 */
-tree
-evolution_part_in_loop_num (tree chrec,
+tree
+evolution_part_in_loop_num (tree chrec,
unsigned loop_num)
{
return chrec_component_in_loop_num (chrec, loop_num, true);
}
/* Returns the initial condition in LOOP_NUM. Example: the call
- initial_condition_in_loop_num ({{0, +, 1}_1, +, 2}_2, 2) returns
+ initial_condition_in_loop_num ({{0, +, 1}_1, +, 2}_2, 2) returns
{0, +, 1}_1 */
-tree
-initial_condition_in_loop_num (tree chrec,
+tree
+initial_condition_in_loop_num (tree chrec,
unsigned loop_num)
{
return chrec_component_in_loop_num (chrec, loop_num, false);
@@ -772,9 +772,9 @@ initial_condition_in_loop_num (tree chrec,
chrec_dont_know, for example after having determined that it is
impossible to say how many times a loop will execute. */
-tree
+tree
reset_evolution_in_loop (unsigned loop_num,
- tree chrec,
+ tree chrec,
tree new_evol)
{
struct loop *loop = get_loop (loop_num);
@@ -799,7 +799,7 @@ reset_evolution_in_loop (unsigned loop_num,
while (TREE_CODE (chrec) == POLYNOMIAL_CHREC
&& CHREC_VARIABLE (chrec) == loop_num)
chrec = CHREC_LEFT (chrec);
-
+
return build_polynomial_chrec (loop_num, chrec, new_evol);
}
@@ -807,14 +807,14 @@ reset_evolution_in_loop (unsigned loop_num,
alternate paths of a conditional expression. */
tree
-chrec_merge (tree chrec1,
+chrec_merge (tree chrec1,
tree chrec2)
{
if (chrec1 == chrec_dont_know
|| chrec2 == chrec_dont_know)
return chrec_dont_know;
- if (chrec1 == chrec_known
+ if (chrec1 == chrec_known
|| chrec2 == chrec_known)
return chrec_known;
@@ -835,18 +835,18 @@ chrec_merge (tree chrec1,
/* Helper function for is_multivariate_chrec. */
-static bool
+static bool
is_multivariate_chrec_rec (const_tree chrec, unsigned int rec_var)
{
if (chrec == NULL_TREE)
return false;
-
+
if (TREE_CODE (chrec) == POLYNOMIAL_CHREC)
{
if (CHREC_VARIABLE (chrec) != rec_var)
return true;
else
- return (is_multivariate_chrec_rec (CHREC_LEFT (chrec), rec_var)
+ return (is_multivariate_chrec_rec (CHREC_LEFT (chrec), rec_var)
|| is_multivariate_chrec_rec (CHREC_RIGHT (chrec), rec_var));
}
else
@@ -855,16 +855,16 @@ is_multivariate_chrec_rec (const_tree chrec, unsigned int rec_var)
/* Determine whether the given chrec is multivariate or not. */
-bool
+bool
is_multivariate_chrec (const_tree chrec)
{
if (chrec == NULL_TREE)
return false;
-
+
if (TREE_CODE (chrec) == POLYNOMIAL_CHREC)
- return (is_multivariate_chrec_rec (CHREC_LEFT (chrec),
+ return (is_multivariate_chrec_rec (CHREC_LEFT (chrec),
CHREC_VARIABLE (chrec))
- || is_multivariate_chrec_rec (CHREC_RIGHT (chrec),
+ || is_multivariate_chrec_rec (CHREC_RIGHT (chrec),
CHREC_VARIABLE (chrec)));
else
return false;
@@ -872,14 +872,14 @@ is_multivariate_chrec (const_tree chrec)
/* Determines whether the chrec contains symbolic names or not. */
-bool
+bool
chrec_contains_symbols (const_tree chrec)
{
int i, n;
if (chrec == NULL_TREE)
return false;
-
+
if (TREE_CODE (chrec) == SSA_NAME
|| TREE_CODE (chrec) == VAR_DECL
|| TREE_CODE (chrec) == PARM_DECL
@@ -898,7 +898,7 @@ chrec_contains_symbols (const_tree chrec)
/* Determines whether the chrec contains undetermined coefficients. */
-bool
+bool
chrec_contains_undetermined (const_tree chrec)
{
int i, n;
@@ -930,7 +930,7 @@ tree_contains_chrecs (const_tree expr, int *size)
if (size)
(*size)++;
-
+
if (tree_is_chrec (expr))
return true;
@@ -971,7 +971,7 @@ evolution_function_is_invariant_rec_p (tree chrec, int loopnum)
if (!evolution_function_is_invariant_rec_p (TREE_OPERAND (chrec, 1),
loopnum))
return false;
-
+
case 1:
if (!evolution_function_is_invariant_rec_p (TREE_OPERAND (chrec, 0),
loopnum))
@@ -996,12 +996,12 @@ evolution_function_is_invariant_p (tree chrec, int loopnum)
/* Determine whether the given tree is an affine multivariate
evolution. */
-bool
+bool
evolution_function_is_affine_multivariate_p (const_tree chrec, int loopnum)
{
if (chrec == NULL_TREE)
return false;
-
+
switch (TREE_CODE (chrec))
{
case POLYNOMIAL_CHREC:
@@ -1012,9 +1012,9 @@ evolution_function_is_affine_multivariate_p (const_tree chrec, int loopnum)
else
{
if (TREE_CODE (CHREC_RIGHT (chrec)) == POLYNOMIAL_CHREC
- && CHREC_VARIABLE (CHREC_RIGHT (chrec))
+ && CHREC_VARIABLE (CHREC_RIGHT (chrec))
!= CHREC_VARIABLE (chrec)
- && evolution_function_is_affine_multivariate_p
+ && evolution_function_is_affine_multivariate_p
(CHREC_RIGHT (chrec), loopnum))
return true;
else
@@ -1026,19 +1026,19 @@ evolution_function_is_affine_multivariate_p (const_tree chrec, int loopnum)
if (evolution_function_is_invariant_rec_p (CHREC_RIGHT (chrec), loopnum)
&& TREE_CODE (CHREC_LEFT (chrec)) == POLYNOMIAL_CHREC
&& CHREC_VARIABLE (CHREC_LEFT (chrec)) != CHREC_VARIABLE (chrec)
- && evolution_function_is_affine_multivariate_p
+ && evolution_function_is_affine_multivariate_p
(CHREC_LEFT (chrec), loopnum))
return true;
else
return false;
}
-
+
default:
return false;
}
}
-/* Determine whether the given tree is a function in zero or one
+/* Determine whether the given tree is a function in zero or one
variables. */
bool
@@ -1046,7 +1046,7 @@ evolution_function_is_univariate_p (const_tree chrec)
{
if (chrec == NULL_TREE)
return true;
-
+
switch (TREE_CODE (chrec))
{
case POLYNOMIAL_CHREC:
@@ -1058,11 +1058,11 @@ evolution_function_is_univariate_p (const_tree chrec)
if (!evolution_function_is_univariate_p (CHREC_LEFT (chrec)))
return false;
break;
-
+
default:
break;
}
-
+
switch (TREE_CODE (CHREC_RIGHT (chrec)))
{
case POLYNOMIAL_CHREC:
@@ -1071,11 +1071,11 @@ evolution_function_is_univariate_p (const_tree chrec)
if (!evolution_function_is_univariate_p (CHREC_RIGHT (chrec)))
return false;
break;
-
+
default:
- break;
+ break;
}
-
+
default:
return true;
}
@@ -1084,7 +1084,7 @@ evolution_function_is_univariate_p (const_tree chrec)
/* Returns the number of variables of CHREC. Example: the call
nb_vars_in_chrec ({{0, +, 1}_5, +, 2}_6) returns 2. */
-unsigned
+unsigned
nb_vars_in_chrec (tree chrec)
{
if (chrec == NULL_TREE)
@@ -1093,7 +1093,7 @@ nb_vars_in_chrec (tree chrec)
switch (TREE_CODE (chrec))
{
case POLYNOMIAL_CHREC:
- return 1 + nb_vars_in_chrec
+ return 1 + nb_vars_in_chrec
(initial_condition_in_loop_num (chrec, CHREC_VARIABLE (chrec)));
default:
@@ -1125,7 +1125,7 @@ convert_affine_scev (struct loop *loop, tree type,
/* In general,
(TYPE) (BASE + STEP * i) = (TYPE) BASE + (TYPE -- sign extend) STEP * i,
but we must check some assumptions.
-
+
1) If [BASE, +, STEP] wraps, the equation is not valid when precision
of CT is smaller than the precision of TYPE. For example, when we
cast unsigned char [254, +, 1] to unsigned, the values on left side
@@ -1183,7 +1183,7 @@ convert_affine_scev (struct loop *loop, tree type,
of CT and TYPE. This only needs to be handled specially when
CT is unsigned -- to avoid e.g. unsigned char [100, +, 255]
(with values 100, 99, 98, ...) from becoming signed or unsigned
- [100, +, 255] with values 100, 355, ...; the sign-extension is
+ [100, +, 255] with values 100, 355, ...; the sign-extension is
performed by default when CT is signed. */
new_step = *step;
if (TYPE_PRECISION (step_type) > TYPE_PRECISION (ct) && TYPE_UNSIGNED (ct))
@@ -1209,7 +1209,7 @@ convert_affine_scev (struct loop *loop, tree type,
/* Convert CHREC for the right hand side of a CREC.
The increment for a pointer type is always sizetype. */
-tree
+tree
chrec_convert_rhs (tree type, tree chrec, gimple at_stmt)
{
if (POINTER_TYPE_P (type))
@@ -1228,12 +1228,12 @@ chrec_convert_rhs (tree type, tree chrec, gimple at_stmt)
TREE_TYPE (CHREC_LEFT (chrec)) == TREE_TYPE (CHREC_RIGHT (chrec)).
An example of what could happen when adding two chrecs and the type
of the CHREC_RIGHT is different than CHREC_LEFT is:
-
+
{(uint) 0, +, (uchar) 10} +
{(uint) 0, +, (uchar) 250}
-
+
that would produce a wrong result if CHREC_RIGHT is not (uint):
-
+
{(uint) 0, +, (uchar) 4}
instead of
@@ -1241,7 +1241,7 @@ chrec_convert_rhs (tree type, tree chrec, gimple at_stmt)
{(uint) 0, +, (uint) 260}
*/
-tree
+tree
chrec_convert (tree type, tree chrec, gimple at_stmt)
{
return chrec_convert_1 (type, chrec, at_stmt, true);
@@ -1253,13 +1253,13 @@ chrec_convert (tree type, tree chrec, gimple at_stmt)
conversion is less accurate: the information is used for
determining a more accurate estimation of the number of iterations.
By default AT_STMT could be safely set to NULL_TREE.
-
+
USE_OVERFLOW_SEMANTICS is true if this function should assume that
the rules for overflow of the given language apply (e.g., that signed
arithmetics in C does not overflow) -- i.e., to use them to avoid unnecessary
tests, but also to enforce that the result follows them. */
-static tree
+static tree
chrec_convert_1 (tree type, tree chrec, gimple at_stmt,
bool use_overflow_semantics)
{
@@ -1269,7 +1269,7 @@ chrec_convert_1 (tree type, tree chrec, gimple at_stmt,
if (automatically_generated_chrec_p (chrec))
return chrec;
-
+
ct = chrec_type (chrec);
if (ct == type)
return chrec;
@@ -1346,13 +1346,13 @@ chrec_convert_aggressive (tree type, tree chrec)
rc = chrec_convert_aggressive (rtype, right);
if (!rc)
rc = chrec_convert (rtype, right, NULL);
-
+
return build_polynomial_chrec (CHREC_VARIABLE (chrec), lc, rc);
}
/* Returns true when CHREC0 == CHREC1. */
-bool
+bool
eq_evolutions_p (const_tree chrec0, const_tree chrec1)
{
if (chrec0 == NULL_TREE
@@ -1374,7 +1374,7 @@ eq_evolutions_p (const_tree chrec0, const_tree chrec1)
&& eq_evolutions_p (CHREC_RIGHT (chrec0), CHREC_RIGHT (chrec1)));
default:
return false;
- }
+ }
}
/* Returns EV_GROWS if CHREC grows (assuming that it does not overflow),
diff --git a/gcc/tree-chrec.h b/gcc/tree-chrec.h
index 545db38a730..fcff93a55da 100644
--- a/gcc/tree-chrec.h
+++ b/gcc/tree-chrec.h
@@ -22,8 +22,8 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_TREE_CHREC_H
#define GCC_TREE_CHREC_H
-/* The following trees are unique elements. Thus the comparison of another
- element to these elements should be done on the pointer to these trees,
+/* The following trees are unique elements. Thus the comparison of another
+ element to these elements should be done on the pointer to these trees,
and not on their value. */
extern tree chrec_not_analyzed_yet;
@@ -90,26 +90,26 @@ extern bool evolution_function_right_is_integer_cst (const_tree);
/* Determines whether CHREC is equal to zero. */
-static inline bool
+static inline bool
chrec_zerop (const_tree chrec)
{
if (chrec == NULL_TREE)
return false;
-
+
if (TREE_CODE (chrec) == INTEGER_CST)
return integer_zerop (chrec);
-
+
return false;
}
-/* Determines whether CHREC is a loop invariant with respect to LOOP_NUM.
+/* Determines whether CHREC is a loop invariant with respect to LOOP_NUM.
Set the result in RES and return true when the property can be computed. */
static inline bool
no_evolution_in_loop_p (tree chrec, unsigned loop_num, bool *res)
{
tree scev;
-
+
if (chrec == chrec_not_analyzed_yet
|| chrec == chrec_dont_know
|| chrec_contains_symbols_defined_in_loop (chrec, loop_num))
@@ -123,9 +123,9 @@ no_evolution_in_loop_p (tree chrec, unsigned loop_num, bool *res)
/* Build a polynomial chain of recurrence. */
-static inline tree
-build_polynomial_chrec (unsigned loop_num,
- tree left,
+static inline tree
+build_polynomial_chrec (unsigned loop_num,
+ tree left,
tree right)
{
bool val;
@@ -151,13 +151,13 @@ build_polynomial_chrec (unsigned loop_num,
if (chrec_zerop (right))
return left;
- return build3 (POLYNOMIAL_CHREC, TREE_TYPE (left),
+ return build3 (POLYNOMIAL_CHREC, TREE_TYPE (left),
build_int_cst (NULL_TREE, loop_num), left, right);
}
/* Determines whether the expression CHREC is a constant. */
-static inline bool
+static inline bool
evolution_function_is_constant_p (const_tree chrec)
{
if (chrec == NULL_TREE)
@@ -168,7 +168,7 @@ evolution_function_is_constant_p (const_tree chrec)
case INTEGER_CST:
case REAL_CST:
return true;
-
+
default:
return false;
}
@@ -176,12 +176,12 @@ evolution_function_is_constant_p (const_tree chrec)
/* Determine whether CHREC is an affine evolution function in LOOPNUM. */
-static inline bool
+static inline bool
evolution_function_is_affine_in_loop (const_tree chrec, int loopnum)
{
if (chrec == NULL_TREE)
return false;
-
+
switch (TREE_CODE (chrec))
{
case POLYNOMIAL_CHREC:
@@ -190,7 +190,7 @@ evolution_function_is_affine_in_loop (const_tree chrec, int loopnum)
return true;
else
return false;
-
+
default:
return false;
}
@@ -198,12 +198,12 @@ evolution_function_is_affine_in_loop (const_tree chrec, int loopnum)
/* Determine whether CHREC is an affine evolution function or not. */
-static inline bool
+static inline bool
evolution_function_is_affine_p (const_tree chrec)
{
if (chrec == NULL_TREE)
return false;
-
+
switch (TREE_CODE (chrec))
{
case POLYNOMIAL_CHREC:
@@ -214,7 +214,7 @@ evolution_function_is_affine_p (const_tree chrec)
return true;
else
return false;
-
+
default:
return false;
}
diff --git a/gcc/tree-complex.c b/gcc/tree-complex.c
index 5108e07cd48..8f246fa080c 100644
--- a/gcc/tree-complex.c
+++ b/gcc/tree-complex.c
@@ -3,17 +3,17 @@
Free Software Foundation, Inc.
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -66,7 +66,7 @@ static VEC(tree, heap) *complex_ssa_name_components;
/* Lookup UID in the complex_variable_components hashtable and return the
associated tree. */
-static tree
+static tree
cvc_lookup (unsigned int uid)
{
struct int_tree_map *h, in;
@@ -74,12 +74,12 @@ cvc_lookup (unsigned int uid)
h = (struct int_tree_map *) htab_find_with_hash (complex_variable_components, &in, uid);
return h ? h->to : NULL;
}
-
+
/* Insert the pair UID, TO into the complex_variable_components hashtable. */
-static void
+static void
cvc_insert (unsigned int uid, tree to)
-{
+{
struct int_tree_map *h;
void **loc;
@@ -215,7 +215,7 @@ init_dont_simulate_again (void)
stmt = gsi_stmt (gsi);
op0 = op1 = NULL_TREE;
- /* Most control-altering statements must be initially
+ /* Most control-altering statements must be initially
simulated, else we won't cover the entire cfg. */
sim_again_p = stmt_ends_bb_p (stmt);
@@ -571,7 +571,7 @@ set_component_ssa_name (tree ssa_name, bool imag_p, tree value)
a new ssa name. */
else
comp = get_component_ssa_name (ssa_name, imag_p);
-
+
/* Do all the work to assign VALUE to COMP. */
list = NULL;
value = force_gimple_operand (value, &list, false, NULL);
@@ -1066,7 +1066,7 @@ expand_complex_multiplication (gimple_stmt_iterator *gsi, tree inner_type,
}
/* Keep this algorithm in sync with fold-const.c:const_binop().
-
+
Expand complex division to scalars, straightforward algorithm.
a / b = ((ar*br + ai*bi)/t) + i((ai*br - ar*bi)/t)
t = br*br + bi*bi
@@ -1544,7 +1544,7 @@ expand_complex_operations_1 (gimple_stmt_iterator *gsi)
case RDIV_EXPR:
expand_complex_division (gsi, inner_type, ar, ai, br, bi, code, al, bl);
break;
-
+
case NEGATE_EXPR:
expand_complex_negation (gsi, inner_type, ar, ai);
break;
@@ -1612,7 +1612,7 @@ tree_lower_complex (void)
return 0;
}
-struct gimple_opt_pass pass_lower_complex =
+struct gimple_opt_pass pass_lower_complex =
{
{
GIMPLE_PASS,
@@ -1663,7 +1663,7 @@ gate_no_optimization (void)
return optimize == 0 || sorrycount || errorcount;
}
-struct gimple_opt_pass pass_lower_complex_O0 =
+struct gimple_opt_pass pass_lower_complex_O0 =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index ae0a0681479..1b0421c5ef0 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -21,57 +21,57 @@ along with GCC; see the file COPYING3. If not see
/* This pass walks a given loop structure searching for array
references. The information about the array accesses is recorded
- in DATA_REFERENCE structures.
-
- The basic test for determining the dependences is:
- given two access functions chrec1 and chrec2 to a same array, and
- x and y two vectors from the iteration domain, the same element of
+ in DATA_REFERENCE structures.
+
+ The basic test for determining the dependences is:
+ given two access functions chrec1 and chrec2 to a same array, and
+ x and y two vectors from the iteration domain, the same element of
the array is accessed twice at iterations x and y if and only if:
| chrec1 (x) == chrec2 (y).
-
+
The goals of this analysis are:
-
+
- to determine the independence: the relation between two
independent accesses is qualified with the chrec_known (this
information allows a loop parallelization),
-
+
- when two data references access the same data, to qualify the
dependence relation with classic dependence representations:
-
+
- distance vectors
- direction vectors
- loop carried level dependence
- polyhedron dependence
or with the chains of recurrences based representation,
-
- - to define a knowledge base for storing the data dependence
+
+ - to define a knowledge base for storing the data dependence
information,
-
+
- to define an interface to access this data.
-
-
+
+
Definitions:
-
+
- subscript: given two array accesses a subscript is the tuple
composed of the access functions for a given dimension. Example:
Given A[f1][f2][f3] and B[g1][g2][g3], there are three subscripts:
(f1, g1), (f2, g2), (f3, g3).
- Diophantine equation: an equation whose coefficients and
- solutions are integer constants, for example the equation
+ solutions are integer constants, for example the equation
| 3*x + 2*y = 1
has an integer solution x = 1 and y = -1.
-
+
References:
-
+
- "Advanced Compilation for High Performance Computing" by Randy
Allen and Ken Kennedy.
- http://citeseer.ist.psu.edu/goff91practical.html
-
- - "Loop Transformations for Restructuring Compilers - The Foundations"
+ http://citeseer.ist.psu.edu/goff91practical.html
+
+ - "Loop Transformations for Restructuring Compilers - The Foundations"
by Utpal Banerjee.
-
+
*/
#include "config.h"
@@ -127,7 +127,7 @@ static bool subscript_dependence_tester_1 (struct data_dependence_relation *,
struct loop *);
/* Returns true iff A divides B. */
-static inline bool
+static inline bool
tree_fold_divides_p (const_tree a, const_tree b)
{
gcc_assert (TREE_CODE (a) == INTEGER_CST);
@@ -137,7 +137,7 @@ tree_fold_divides_p (const_tree a, const_tree b)
/* Returns true iff A divides B. */
-static inline bool
+static inline bool
int_divides_p (int a, int b)
{
return ((b % a) == 0);
@@ -145,9 +145,9 @@ int_divides_p (int a, int b)
-/* Dump into FILE all the data references from DATAREFS. */
+/* Dump into FILE all the data references from DATAREFS. */
-void
+void
dump_data_references (FILE *file, VEC (data_reference_p, heap) *datarefs)
{
unsigned int i;
@@ -157,26 +157,26 @@ dump_data_references (FILE *file, VEC (data_reference_p, heap) *datarefs)
dump_data_reference (file, dr);
}
-/* Dump into STDERR all the data references from DATAREFS. */
+/* Dump into STDERR all the data references from DATAREFS. */
-void
+void
debug_data_references (VEC (data_reference_p, heap) *datarefs)
{
dump_data_references (stderr, datarefs);
}
-/* Dump to STDERR all the dependence relations from DDRS. */
+/* Dump to STDERR all the dependence relations from DDRS. */
-void
+void
debug_data_dependence_relations (VEC (ddr_p, heap) *ddrs)
{
dump_data_dependence_relations (stderr, ddrs);
}
-/* Dump into FILE all the dependence relations from DDRS. */
+/* Dump into FILE all the dependence relations from DDRS. */
-void
-dump_data_dependence_relations (FILE *file,
+void
+dump_data_dependence_relations (FILE *file,
VEC (ddr_p, heap) *ddrs)
{
unsigned int i;
@@ -188,7 +188,7 @@ dump_data_dependence_relations (FILE *file,
/* Print to STDERR the data_reference DR. */
-void
+void
debug_data_reference (struct data_reference *dr)
{
dump_data_reference (stderr, dr);
@@ -196,19 +196,19 @@ debug_data_reference (struct data_reference *dr)
/* Dump function for a DATA_REFERENCE structure. */
-void
-dump_data_reference (FILE *outf,
+void
+dump_data_reference (FILE *outf,
struct data_reference *dr)
{
unsigned int i;
-
+
fprintf (outf, "(Data Ref: \n stmt: ");
print_gimple_stmt (outf, DR_STMT (dr), 0, 0);
fprintf (outf, " ref: ");
print_generic_stmt (outf, DR_REF (dr), 0);
fprintf (outf, " base_object: ");
print_generic_stmt (outf, DR_BASE_OBJECT (dr), 0);
-
+
for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
{
fprintf (outf, " Access function %d: ", i);
@@ -258,7 +258,7 @@ dump_conflict_function (FILE *outf, conflict_function *cf)
/* Dump function for a SUBSCRIPT structure. */
-void
+void
dump_subscript (FILE *outf, struct subscript *subscript)
{
conflict_function *cf = SUB_CONFLICTS_IN_A (subscript);
@@ -272,7 +272,7 @@ dump_subscript (FILE *outf, struct subscript *subscript)
fprintf (outf, " last_conflict: ");
print_generic_stmt (outf, last_iteration, 0);
}
-
+
cf = SUB_CONFLICTS_IN_B (subscript);
fprintf (outf, " iterations_that_access_an_element_twice_in_B: ");
dump_conflict_function (outf, cf);
@@ -362,7 +362,7 @@ print_dist_vectors (FILE *outf, VEC (lambda_vector, heap) *dist_vects,
/* Debug version. */
-void
+void
debug_data_dependence_relation (struct data_dependence_relation *ddr)
{
dump_data_dependence_relation (stderr, ddr);
@@ -370,8 +370,8 @@ debug_data_dependence_relation (struct data_dependence_relation *ddr)
/* Dump function for a DATA_DEPENDENCE_RELATION structure. */
-void
-dump_data_dependence_relation (FILE *outf,
+void
+dump_data_dependence_relation (FILE *outf,
struct data_dependence_relation *ddr)
{
struct data_reference *dra, *drb;
@@ -391,7 +391,7 @@ dump_data_dependence_relation (FILE *outf,
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
fprintf (outf, " (no dependence)\n");
-
+
else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
{
unsigned int i;
@@ -433,40 +433,40 @@ dump_data_dependence_relation (FILE *outf,
/* Dump function for a DATA_DEPENDENCE_DIRECTION structure. */
void
-dump_data_dependence_direction (FILE *file,
+dump_data_dependence_direction (FILE *file,
enum data_dependence_direction dir)
{
switch (dir)
{
- case dir_positive:
+ case dir_positive:
fprintf (file, "+");
break;
-
+
case dir_negative:
fprintf (file, "-");
break;
-
+
case dir_equal:
fprintf (file, "=");
break;
-
+
case dir_positive_or_negative:
fprintf (file, "+-");
break;
-
- case dir_positive_or_equal:
+
+ case dir_positive_or_equal:
fprintf (file, "+=");
break;
-
- case dir_negative_or_equal:
+
+ case dir_negative_or_equal:
fprintf (file, "-=");
break;
-
- case dir_star:
- fprintf (file, "*");
+
+ case dir_star:
+ fprintf (file, "*");
break;
-
- default:
+
+ default:
break;
}
}
@@ -476,7 +476,7 @@ dump_data_dependence_direction (FILE *file,
dependence vectors, or in other words the number of loops in the
considered nest. */
-void
+void
dump_dist_dir_vectors (FILE *file, VEC (ddr_p, heap) *ddrs)
{
unsigned int i, j;
@@ -506,7 +506,7 @@ dump_dist_dir_vectors (FILE *file, VEC (ddr_p, heap) *ddrs)
/* Dumps the data dependence relations DDRS in FILE. */
-void
+void
dump_ddrs (FILE *file, VEC (ddr_p, heap) *ddrs)
{
unsigned int i;
@@ -684,7 +684,7 @@ canonicalize_base_object_address (tree addr)
return build_fold_addr_expr (TREE_OPERAND (addr, 0));
}
-/* Analyzes the behavior of the memory reference DR in the innermost loop or
+/* Analyzes the behavior of the memory reference DR in the innermost loop or
basic block that contains it. Returns true if analysis succeed or false
otherwise. */
@@ -719,7 +719,7 @@ dr_analyze_innermost (struct data_reference *dr)
base = build_fold_addr_expr (base);
if (in_loop)
{
- if (!simple_iv (loop, loop_containing_stmt (stmt), base, &base_iv,
+ if (!simple_iv (loop, loop_containing_stmt (stmt), base, &base_iv,
false))
{
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -792,10 +792,10 @@ dr_analyze_indices (struct data_reference *dr, struct loop *nest)
tree ref = unshare_expr (DR_REF (dr)), aref = ref, op;
tree base, off, access_fn = NULL_TREE;
basic_block before_loop = NULL;
-
+
if (nest)
before_loop = block_before_loop (nest);
-
+
while (handled_component_p (aref))
{
if (TREE_CODE (aref) == ARRAY_REF)
@@ -810,7 +810,7 @@ dr_analyze_indices (struct data_reference *dr, struct loop *nest)
TREE_OPERAND (aref, 1) = build_int_cst (TREE_TYPE (op), 0);
}
-
+
aref = TREE_OPERAND (aref, 0);
}
@@ -915,7 +915,7 @@ create_data_ref (struct loop *nest, tree memref, gimple stmt, bool is_read)
fprintf (dump_file, "\n");
}
- return dr;
+ return dr;
}
/* Returns true if FNA == FNB. */
@@ -1030,7 +1030,7 @@ affine_fn_op (enum tree_code op, affine_fn fna, affine_fn fnb)
VEC_quick_push (tree, ret,
fold_build2 (op, type,
- VEC_index (tree, fna, i),
+ VEC_index (tree, fna, i),
VEC_index (tree, fnb, i)));
}
@@ -1082,11 +1082,11 @@ compute_subscript_distance (struct data_dependence_relation *ddr)
if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
{
unsigned int i;
-
+
for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
{
struct subscript *subscript;
-
+
subscript = DDR_SUBSCRIPT (ddr, i);
cf_a = SUB_CONFLICTS_IN_A (subscript);
cf_b = SUB_CONFLICTS_IN_B (subscript);
@@ -1099,7 +1099,7 @@ compute_subscript_distance (struct data_dependence_relation *ddr)
return;
}
diff = affine_fn_minus (fn_a, fn_b);
-
+
if (affine_function_constant_p (diff))
SUB_DISTANCE (subscript) = affine_function_base (diff);
else
@@ -1290,8 +1290,8 @@ dr_may_alias_p (const struct data_reference *a, const struct data_reference *b)
&& TREE_CODE (addr_b) == ADDR_EXPR)
return TREE_OPERAND (addr_a, 0) == TREE_OPERAND (addr_b, 0);
- /* An instruction writing through a restricted pointer is "independent" of any
- instruction reading or writing through a different restricted pointer,
+ /* An instruction writing through a restricted pointer is "independent" of any
+ instruction reading or writing through a different restricted pointer,
in the same block/scope. */
type_a = TREE_TYPE (addr_a);
@@ -1303,7 +1303,7 @@ dr_may_alias_p (const struct data_reference *a, const struct data_reference *b)
if (TREE_CODE (addr_b) == SSA_NAME)
decl_b = SSA_NAME_VAR (addr_b);
- if (TYPE_RESTRICT (type_a) && TYPE_RESTRICT (type_b)
+ if (TYPE_RESTRICT (type_a) && TYPE_RESTRICT (type_b)
&& (!DR_IS_READ (a) || !DR_IS_READ (b))
&& decl_a && DECL_P (decl_a)
&& decl_b && DECL_P (decl_b)
@@ -1322,13 +1322,13 @@ static void compute_self_dependence (struct data_dependence_relation *);
size of the classic distance/direction vectors. */
static struct data_dependence_relation *
-initialize_data_dependence_relation (struct data_reference *a,
+initialize_data_dependence_relation (struct data_reference *a,
struct data_reference *b,
VEC (loop_p, heap) *loop_nest)
{
struct data_dependence_relation *res;
unsigned int i;
-
+
res = XNEW (struct data_dependence_relation);
DDR_A (res) = a;
DDR_B (res) = b;
@@ -1340,14 +1340,14 @@ initialize_data_dependence_relation (struct data_reference *a,
if (a == NULL || b == NULL)
{
- DDR_ARE_DEPENDENT (res) = chrec_dont_know;
+ DDR_ARE_DEPENDENT (res) = chrec_dont_know;
return res;
- }
+ }
/* If the data references do not alias, then they are independent. */
if (!dr_may_alias_p (a, b))
{
- DDR_ARE_DEPENDENT (res) = chrec_known;
+ DDR_ARE_DEPENDENT (res) = chrec_known;
return res;
}
@@ -1369,18 +1369,18 @@ initialize_data_dependence_relation (struct data_reference *a,
whether they alias or not. */
if (!operand_equal_p (DR_BASE_OBJECT (a), DR_BASE_OBJECT (b), 0))
{
- DDR_ARE_DEPENDENT (res) = chrec_dont_know;
+ DDR_ARE_DEPENDENT (res) = chrec_dont_know;
return res;
}
/* If the base of the object is not invariant in the loop nest, we cannot
analyze it. TODO -- in fact, it would suffice to record that there may
be arbitrary dependences in the loops where the base object varies. */
- if (loop_nest
+ if (loop_nest
&& !object_address_invariant_in_loop_p (VEC_index (loop_p, loop_nest, 0),
DR_BASE_OBJECT (a)))
{
- DDR_ARE_DEPENDENT (res) = chrec_dont_know;
+ DDR_ARE_DEPENDENT (res) = chrec_dont_know;
return res;
}
@@ -1396,7 +1396,7 @@ initialize_data_dependence_relation (struct data_reference *a,
for (i = 0; i < DR_NUM_DIMENSIONS (a); i++)
{
struct subscript *subscript;
-
+
subscript = XNEW (struct subscript);
SUB_CONFLICTS_IN_A (subscript) = conflict_fn_not_known ();
SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known ();
@@ -1444,7 +1444,7 @@ free_subscripts (VEC (subscript_p, heap) *subscripts)
description. */
static inline void
-finalize_ddr_dependent (struct data_dependence_relation *ddr,
+finalize_ddr_dependent (struct data_dependence_relation *ddr,
tree chrec)
{
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -1454,7 +1454,7 @@ finalize_ddr_dependent (struct data_dependence_relation *ddr,
fprintf (dump_file, ")\n");
}
- DDR_ARE_DEPENDENT (ddr) = chrec;
+ DDR_ARE_DEPENDENT (ddr) = chrec;
free_subscripts (DDR_SUBSCRIPTS (ddr));
DDR_SUBSCRIPTS (ddr) = NULL;
}
@@ -1496,7 +1496,7 @@ siv_subscript_p (const_tree chrec_a, const_tree chrec_b)
|| (evolution_function_is_constant_p (chrec_b)
&& evolution_function_is_univariate_p (chrec_a)))
return true;
-
+
if (evolution_function_is_univariate_p (chrec_a)
&& evolution_function_is_univariate_p (chrec_b))
{
@@ -1508,16 +1508,16 @@ siv_subscript_p (const_tree chrec_a, const_tree chrec_b)
case POLYNOMIAL_CHREC:
if (CHREC_VARIABLE (chrec_a) != CHREC_VARIABLE (chrec_b))
return false;
-
+
default:
return true;
}
-
+
default:
return true;
}
}
-
+
return false;
}
@@ -1533,7 +1533,7 @@ conflict_fn (unsigned n, ...)
gcc_assert (0 < n && n <= MAX_DIM);
va_start(ap, n);
-
+
ret->n = n;
for (i = 0; i < n; i++)
ret->fns[i] = va_arg (ap, affine_fn);
@@ -1575,16 +1575,16 @@ affine_fn_univar (tree cst, unsigned dim, tree coef)
CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
-static void
-analyze_ziv_subscript (tree chrec_a,
- tree chrec_b,
+static void
+analyze_ziv_subscript (tree chrec_a,
+ tree chrec_b,
conflict_function **overlaps_a,
- conflict_function **overlaps_b,
+ conflict_function **overlaps_b,
tree *last_conflicts)
{
tree type, difference;
dependence_stats.num_ziv++;
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "(analyze_ziv_subscript \n");
@@ -1592,7 +1592,7 @@ analyze_ziv_subscript (tree chrec_a,
chrec_a = chrec_convert (type, chrec_a, NULL);
chrec_b = chrec_convert (type, chrec_b, NULL);
difference = chrec_fold_minus (type, chrec_a, chrec_b);
-
+
switch (TREE_CODE (difference))
{
case INTEGER_CST:
@@ -1614,9 +1614,9 @@ analyze_ziv_subscript (tree chrec_a,
dependence_stats.num_ziv_independent++;
}
break;
-
+
default:
- /* We're not sure whether the indexes overlap. For the moment,
+ /* We're not sure whether the indexes overlap. For the moment,
conservatively answer "don't know". */
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "ziv test failed: difference is non-integer.\n");
@@ -1627,7 +1627,7 @@ analyze_ziv_subscript (tree chrec_a,
dependence_stats.num_ziv_unimplemented++;
break;
}
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, ")\n");
}
@@ -1679,7 +1679,7 @@ estimated_loop_iterations_int (struct loop *loop, bool conservative)
return hwi_nit < 0 ? -1 : hwi_nit;
}
-
+
/* Similar to estimated_loop_iterations, but returns the estimate as a tree,
and only if it fits to the int type. If this is not the case, or the
estimate on the number of iterations of LOOP could not be derived, returns
@@ -1710,10 +1710,10 @@ estimated_loop_iterations_tree (struct loop *loop, bool conservative)
CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
static void
-analyze_siv_subscript_cst_affine (tree chrec_a,
+analyze_siv_subscript_cst_affine (tree chrec_a,
tree chrec_b,
- conflict_function **overlaps_a,
- conflict_function **overlaps_b,
+ conflict_function **overlaps_a,
+ conflict_function **overlaps_b,
tree *last_conflicts)
{
bool value0, value1, value2;
@@ -1723,11 +1723,11 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
chrec_a = chrec_convert (type, chrec_a, NULL);
chrec_b = chrec_convert (type, chrec_b, NULL);
difference = chrec_fold_minus (type, initial_condition (chrec_b), chrec_a);
-
+
if (!chrec_is_positive (initial_condition (difference), &value0))
{
if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "siv test failed: chrec is not positive.\n");
+ fprintf (dump_file, "siv test failed: chrec is not positive.\n");
dependence_stats.num_siv_unimplemented++;
*overlaps_a = conflict_fn_not_known ();
@@ -1745,7 +1745,7 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
fprintf (dump_file, "siv test failed: chrec not positive.\n");
*overlaps_a = conflict_fn_not_known ();
- *overlaps_b = conflict_fn_not_known ();
+ *overlaps_b = conflict_fn_not_known ();
*last_conflicts = chrec_dont_know;
dependence_stats.num_siv_unimplemented++;
return;
@@ -1754,11 +1754,11 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
{
if (value1 == true)
{
- /* Example:
+ /* Example:
chrec_a = 12
chrec_b = {10, +, 1}
*/
-
+
if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
{
HOST_WIDE_INT numiter;
@@ -1770,7 +1770,7 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
CHREC_RIGHT (chrec_b));
*overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
*last_conflicts = integer_one_node;
-
+
/* Perform weak-zero siv test to see if overlap is
outside the loop bounds. */
@@ -1786,29 +1786,29 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
*last_conflicts = integer_zero_node;
dependence_stats.num_siv_independent++;
return;
- }
+ }
dependence_stats.num_siv_dependent++;
return;
}
-
+
/* When the step does not divide the difference, there are
no overlaps. */
else
{
*overlaps_a = conflict_fn_no_dependence ();
- *overlaps_b = conflict_fn_no_dependence ();
+ *overlaps_b = conflict_fn_no_dependence ();
*last_conflicts = integer_zero_node;
dependence_stats.num_siv_independent++;
return;
}
}
-
+
else
{
- /* Example:
+ /* Example:
chrec_a = 12
chrec_b = {10, +, -1}
-
+
In this case, chrec_a will not overlap with chrec_b. */
*overlaps_a = conflict_fn_no_dependence ();
*overlaps_b = conflict_fn_no_dependence ();
@@ -1818,7 +1818,7 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
}
}
}
- else
+ else
{
if (!chrec_is_positive (CHREC_RIGHT (chrec_b), &value2))
{
@@ -1826,7 +1826,7 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
fprintf (dump_file, "siv test failed: chrec not positive.\n");
*overlaps_a = conflict_fn_not_known ();
- *overlaps_b = conflict_fn_not_known ();
+ *overlaps_b = conflict_fn_not_known ();
*last_conflicts = chrec_dont_know;
dependence_stats.num_siv_unimplemented++;
return;
@@ -1835,7 +1835,7 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
{
if (value2 == false)
{
- /* Example:
+ /* Example:
chrec_a = 3
chrec_b = {10, +, -1}
*/
@@ -1864,17 +1864,17 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
*last_conflicts = integer_zero_node;
dependence_stats.num_siv_independent++;
return;
- }
+ }
dependence_stats.num_siv_dependent++;
return;
}
-
+
/* When the step does not divide the difference, there
are no overlaps. */
else
{
*overlaps_a = conflict_fn_no_dependence ();
- *overlaps_b = conflict_fn_no_dependence ();
+ *overlaps_b = conflict_fn_no_dependence ();
*last_conflicts = integer_zero_node;
dependence_stats.num_siv_independent++;
return;
@@ -1882,10 +1882,10 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
}
else
{
- /* Example:
- chrec_a = 3
+ /* Example:
+ chrec_a = 3
chrec_b = {4, +, 1}
-
+
In this case, chrec_a will not overlap with chrec_b. */
*overlaps_a = conflict_fn_no_dependence ();
*overlaps_b = conflict_fn_no_dependence ();
@@ -1949,7 +1949,7 @@ initialize_matrix_A (lambda_matrix A, tree chrec, unsigned index, int mult)
#define FLOOR_DIV(x,y) ((x) / (y))
-/* Solves the special case of the Diophantine equation:
+/* Solves the special case of the Diophantine equation:
| {0, +, STEP_A}_x (OVERLAPS_A) = {0, +, STEP_B}_y (OVERLAPS_B)
Computes the descriptions OVERLAPS_A and OVERLAPS_B. NITER is the
@@ -1957,9 +1957,9 @@ initialize_matrix_A (lambda_matrix A, tree chrec, unsigned index, int mult)
constructed as evolutions in dimension DIM. */
static void
-compute_overlap_steps_for_affine_univar (int niter, int step_a, int step_b,
+compute_overlap_steps_for_affine_univar (int niter, int step_a, int step_b,
affine_fn *overlaps_a,
- affine_fn *overlaps_b,
+ affine_fn *overlaps_b,
tree *last_conflicts, int dim)
{
if (((step_a > 0 && step_b > 0)
@@ -1982,11 +1982,11 @@ compute_overlap_steps_for_affine_univar (int niter, int step_a, int step_b,
else
*last_conflicts = chrec_dont_know;
- *overlaps_a = affine_fn_univar (integer_zero_node, dim,
+ *overlaps_a = affine_fn_univar (integer_zero_node, dim,
build_int_cst (NULL_TREE,
step_overlaps_a));
- *overlaps_b = affine_fn_univar (integer_zero_node, dim,
- build_int_cst (NULL_TREE,
+ *overlaps_b = affine_fn_univar (integer_zero_node, dim,
+ build_int_cst (NULL_TREE,
step_overlaps_b));
}
@@ -2000,11 +2000,11 @@ compute_overlap_steps_for_affine_univar (int niter, int step_a, int step_b,
/* Solves the special case of a Diophantine equation where CHREC_A is
an affine bivariate function, and CHREC_B is an affine univariate
- function. For example,
+ function. For example,
| {{0, +, 1}_x, +, 1335}_y = {0, +, 1336}_z
-
- has the following overlapping functions:
+
+ has the following overlapping functions:
| x (t, u, v) = {{0, +, 1336}_t, +, 1}_v
| y (t, u, v) = {{0, +, 1336}_u, +, 1}_v
@@ -2014,9 +2014,9 @@ compute_overlap_steps_for_affine_univar (int niter, int step_a, int step_b,
a common benchmark. Implement the general algorithm. */
static void
-compute_overlap_steps_for_affine_1_2 (tree chrec_a, tree chrec_b,
+compute_overlap_steps_for_affine_1_2 (tree chrec_a, tree chrec_b,
conflict_function **overlaps_a,
- conflict_function **overlaps_b,
+ conflict_function **overlaps_b,
tree *last_conflicts)
{
bool xz_p, yz_p, xyz_p;
@@ -2032,17 +2032,17 @@ compute_overlap_steps_for_affine_1_2 (tree chrec_a, tree chrec_b,
step_y = int_cst_value (CHREC_RIGHT (chrec_a));
step_z = int_cst_value (CHREC_RIGHT (chrec_b));
- niter_x =
+ niter_x =
estimated_loop_iterations_int (get_chrec_loop (CHREC_LEFT (chrec_a)),
false);
niter_y = estimated_loop_iterations_int (get_chrec_loop (chrec_a), false);
niter_z = estimated_loop_iterations_int (get_chrec_loop (chrec_b), false);
-
+
if (niter_x < 0 || niter_y < 0 || niter_z < 0)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "overlap steps test failed: no iteration counts.\n");
-
+
*overlaps_a = conflict_fn_not_known ();
*overlaps_b = conflict_fn_not_known ();
*last_conflicts = chrec_dont_know;
@@ -2135,10 +2135,10 @@ compute_overlap_steps_for_affine_1_2 (tree chrec_a, tree chrec_b,
parameters, because it uses lambda matrices of integers. */
static void
-analyze_subscript_affine_affine (tree chrec_a,
+analyze_subscript_affine_affine (tree chrec_a,
tree chrec_b,
- conflict_function **overlaps_a,
- conflict_function **overlaps_b,
+ conflict_function **overlaps_a,
+ conflict_function **overlaps_b,
tree *last_conflicts)
{
unsigned nb_vars_a, nb_vars_b, dim;
@@ -2156,10 +2156,10 @@ analyze_subscript_affine_affine (tree chrec_a,
}
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "(analyze_subscript_affine_affine \n");
-
+
/* For determining the initial intersection, we have to solve a
Diophantine equation. This is the most time consuming part.
-
+
For answering to the question: "Is there a dependence?" we have
to prove that there exists a solution to the Diophantine
equation, and that the solution is in the iteration domain,
@@ -2181,11 +2181,11 @@ analyze_subscript_affine_affine (tree chrec_a,
gamma = init_b - init_a;
/* Don't do all the hard work of solving the Diophantine equation
- when we already know the solution: for example,
+ when we already know the solution: for example,
| {3, +, 1}_1
| {3, +, 4}_2
| gamma = 3 - 3 = 0.
- Then the first overlap occurs during the first iterations:
+ Then the first overlap occurs during the first iterations:
| {3, +, 1}_1 ({0, +, 4}_x) = {3, +, 4}_2 ({0, +, 1}_x)
*/
if (gamma == 0)
@@ -2204,8 +2204,8 @@ analyze_subscript_affine_affine (tree chrec_a,
step_a = int_cst_value (CHREC_RIGHT (chrec_a));
step_b = int_cst_value (CHREC_RIGHT (chrec_b));
- compute_overlap_steps_for_affine_univar (niter, step_a, step_b,
- &ova, &ovb,
+ compute_overlap_steps_for_affine_univar (niter, step_a, step_b,
+ &ova, &ovb,
last_conflicts, 1);
*overlaps_a = conflict_fn (1, ova);
*overlaps_b = conflict_fn (1, ovb);
@@ -2269,20 +2269,20 @@ analyze_subscript_affine_affine (tree chrec_a,
|| (A[0][0] < 0 && -A[1][0] < 0)))
{
/* The solutions are given by:
- |
+ |
| [GAMMA/GCD_ALPHA_BETA t].[u11 u12] = [x0]
| [u21 u22] [y0]
-
+
For a given integer t. Using the following variables,
-
+
| i0 = u11 * gamma / gcd_alpha_beta
| j0 = u12 * gamma / gcd_alpha_beta
| i1 = u21
| j1 = u22
-
+
the solutions are:
-
- | x0 = i0 + i1 * t,
+
+ | x0 = i0 + i1 * t,
| y0 = j0 + j1 * t. */
HOST_WIDE_INT i0, j0, i1, j1;
@@ -2294,9 +2294,9 @@ analyze_subscript_affine_affine (tree chrec_a,
if ((i1 == 0 && i0 < 0)
|| (j1 == 0 && j0 < 0))
{
- /* There is no solution.
- FIXME: The case "i0 > nb_iterations, j0 > nb_iterations"
- falls in here, but for the moment we don't look at the
+ /* There is no solution.
+ FIXME: The case "i0 > nb_iterations, j0 > nb_iterations"
+ falls in here, but for the moment we don't look at the
upper bound of the iteration domain. */
*overlaps_a = conflict_fn_no_dependence ();
*overlaps_b = conflict_fn_no_dependence ();
@@ -2387,7 +2387,7 @@ analyze_subscript_affine_affine (tree chrec_a,
*last_conflicts = chrec_dont_know;
}
-end_analyze_subs_aa:
+end_analyze_subs_aa:
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " (overlaps_a = ");
@@ -2403,12 +2403,12 @@ end_analyze_subs_aa:
determining the dependence relation between chrec_a and chrec_b,
that contain symbols. This function modifies chrec_a and chrec_b
such that the analysis result is the same, and such that they don't
- contain symbols, and then can safely be passed to the analyzer.
+ contain symbols, and then can safely be passed to the analyzer.
Example: The analysis of the following tuples of evolutions produce
the same results: {x+1, +, 1}_1 vs. {x+3, +, 1}_1, and {-2, +, 1}_1
vs. {0, +, 1}_1
-
+
{x+1, +, 1}_1 ({2, +, 1}_1) = {x+3, +, 1}_1 ({0, +, 1}_1)
{-2, +, 1}_1 ({2, +, 1}_1) = {0, +, 1}_1 ({0, +, 1}_1)
*/
@@ -2434,7 +2434,7 @@ can_use_analyze_subscript_affine_affine (tree *chrec_a, tree *chrec_b)
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "can_use_subscript_aff_aff_for_symbolic \n");
- *chrec_a = build_polynomial_chrec (CHREC_VARIABLE (*chrec_a),
+ *chrec_a = build_polynomial_chrec (CHREC_VARIABLE (*chrec_a),
diff, CHREC_RIGHT (*chrec_a));
right_b = chrec_convert (type, CHREC_RIGHT (*chrec_b), NULL);
*chrec_b = build_polynomial_chrec (CHREC_VARIABLE (*chrec_b),
@@ -2451,36 +2451,36 @@ can_use_analyze_subscript_affine_affine (tree *chrec_a, tree *chrec_b)
CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
static void
-analyze_siv_subscript (tree chrec_a,
+analyze_siv_subscript (tree chrec_a,
tree chrec_b,
- conflict_function **overlaps_a,
- conflict_function **overlaps_b,
+ conflict_function **overlaps_a,
+ conflict_function **overlaps_b,
tree *last_conflicts,
int loop_nest_num)
{
dependence_stats.num_siv++;
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "(analyze_siv_subscript \n");
-
+
if (evolution_function_is_constant_p (chrec_a)
&& evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
- analyze_siv_subscript_cst_affine (chrec_a, chrec_b,
+ analyze_siv_subscript_cst_affine (chrec_a, chrec_b,
overlaps_a, overlaps_b, last_conflicts);
-
+
else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
&& evolution_function_is_constant_p (chrec_b))
- analyze_siv_subscript_cst_affine (chrec_b, chrec_a,
+ analyze_siv_subscript_cst_affine (chrec_b, chrec_a,
overlaps_b, overlaps_a, last_conflicts);
-
+
else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
&& evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
{
if (!chrec_contains_symbols (chrec_a)
&& !chrec_contains_symbols (chrec_b))
{
- analyze_subscript_affine_affine (chrec_a, chrec_b,
- overlaps_a, overlaps_b,
+ analyze_subscript_affine_affine (chrec_a, chrec_b,
+ overlaps_a, overlaps_b,
last_conflicts);
if (CF_NOT_KNOWN_P (*overlaps_a)
@@ -2492,11 +2492,11 @@ analyze_siv_subscript (tree chrec_a,
else
dependence_stats.num_siv_dependent++;
}
- else if (can_use_analyze_subscript_affine_affine (&chrec_a,
+ else if (can_use_analyze_subscript_affine_affine (&chrec_a,
&chrec_b))
{
- analyze_subscript_affine_affine (chrec_a, chrec_b,
- overlaps_a, overlaps_b,
+ analyze_subscript_affine_affine (chrec_a, chrec_b,
+ overlaps_a, overlaps_b,
last_conflicts);
if (CF_NOT_KNOWN_P (*overlaps_a)
@@ -2522,7 +2522,7 @@ analyze_siv_subscript (tree chrec_a,
*last_conflicts = chrec_dont_know;
dependence_stats.num_siv_unimplemented++;
}
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, ")\n");
}
@@ -2561,17 +2561,17 @@ gcd_of_steps_may_divide_p (const_tree chrec, const_tree cst)
CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
static void
-analyze_miv_subscript (tree chrec_a,
- tree chrec_b,
- conflict_function **overlaps_a,
- conflict_function **overlaps_b,
+analyze_miv_subscript (tree chrec_a,
+ tree chrec_b,
+ conflict_function **overlaps_a,
+ conflict_function **overlaps_b,
tree *last_conflicts,
struct loop *loop_nest)
{
/* FIXME: This is a MIV subscript, not yet handled.
- Example: (A[{1, +, 1}_1] vs. A[{1, +, 1}_2]) that comes from
- (A[i] vs. A[j]).
-
+ Example: (A[{1, +, 1}_1] vs. A[{1, +, 1}_2]) that comes from
+ (A[i] vs. A[j]).
+
In the SIV test we had to solve a Diophantine equation with two
variables. In the MIV case we have to solve a Diophantine
equation with 2*n variables (if the subscript uses n IVs).
@@ -2586,7 +2586,7 @@ analyze_miv_subscript (tree chrec_a,
chrec_a = chrec_convert (type, chrec_a, NULL);
chrec_b = chrec_convert (type, chrec_b, NULL);
difference = chrec_fold_minus (type, chrec_a, chrec_b);
-
+
if (eq_evolutions_p (chrec_a, chrec_b))
{
/* Access functions are the same: all the elements are accessed
@@ -2597,7 +2597,7 @@ analyze_miv_subscript (tree chrec_a,
(get_chrec_loop (chrec_a), true);
dependence_stats.num_miv_dependent++;
}
-
+
else if (evolution_function_is_constant_p (difference)
/* For the moment, the following is verified:
evolution_function_is_affine_multivariate_p (chrec_a,
@@ -2605,8 +2605,8 @@ analyze_miv_subscript (tree chrec_a,
&& !gcd_of_steps_may_divide_p (chrec_a, difference))
{
/* testsuite/.../ssa-chrec-33.c
- {{21, +, 2}_1, +, -2}_2 vs. {{20, +, 2}_1, +, -2}_2
-
+ {{21, +, 2}_1, +, -2}_2 vs. {{20, +, 2}_1, +, -2}_2
+
The difference is 1, and all the evolution steps are multiples
of 2, consequently there are no overlapping elements. */
*overlaps_a = conflict_fn_no_dependence ();
@@ -2614,7 +2614,7 @@ analyze_miv_subscript (tree chrec_a,
*last_conflicts = integer_zero_node;
dependence_stats.num_miv_independent++;
}
-
+
else if (evolution_function_is_affine_multivariate_p (chrec_a, loop_nest->num)
&& !chrec_contains_symbols (chrec_a)
&& evolution_function_is_affine_multivariate_p (chrec_b, loop_nest->num)
@@ -2623,18 +2623,18 @@ analyze_miv_subscript (tree chrec_a,
/* testsuite/.../ssa-chrec-35.c
{0, +, 1}_2 vs. {0, +, 1}_3
the overlapping elements are respectively located at iterations:
- {0, +, 1}_x and {0, +, 1}_x,
- in other words, we have the equality:
+ {0, +, 1}_x and {0, +, 1}_x,
+ in other words, we have the equality:
{0, +, 1}_2 ({0, +, 1}_x) = {0, +, 1}_3 ({0, +, 1}_x)
-
- Other examples:
- {{0, +, 1}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) =
+
+ Other examples:
+ {{0, +, 1}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) =
{0, +, 1}_1 ({{0, +, 1}_x, +, 2}_y)
- {{0, +, 2}_1, +, 3}_2 ({0, +, 1}_y, {0, +, 1}_x) =
+ {{0, +, 2}_1, +, 3}_2 ({0, +, 1}_y, {0, +, 1}_x) =
{{0, +, 3}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y)
*/
- analyze_subscript_affine_affine (chrec_a, chrec_b,
+ analyze_subscript_affine_affine (chrec_a, chrec_b,
overlaps_a, overlaps_b, last_conflicts);
if (CF_NOT_KNOWN_P (*overlaps_a)
@@ -2646,7 +2646,7 @@ analyze_miv_subscript (tree chrec_a,
else
dependence_stats.num_miv_dependent++;
}
-
+
else
{
/* When the analysis is too difficult, answer "don't know". */
@@ -2658,7 +2658,7 @@ analyze_miv_subscript (tree chrec_a,
*last_conflicts = chrec_dont_know;
dependence_stats.num_miv_unimplemented++;
}
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, ")\n");
}
@@ -2667,23 +2667,23 @@ analyze_miv_subscript (tree chrec_a,
with respect to LOOP_NEST. OVERLAP_ITERATIONS_A and
OVERLAP_ITERATIONS_B are initialized with two functions that
describe the iterations that contain conflicting elements.
-
+
Remark: For an integer k >= 0, the following equality is true:
-
+
CHREC_A (OVERLAP_ITERATIONS_A (k)) == CHREC_B (OVERLAP_ITERATIONS_B (k)).
*/
-static void
-analyze_overlapping_iterations (tree chrec_a,
- tree chrec_b,
- conflict_function **overlap_iterations_a,
- conflict_function **overlap_iterations_b,
+static void
+analyze_overlapping_iterations (tree chrec_a,
+ tree chrec_b,
+ conflict_function **overlap_iterations_a,
+ conflict_function **overlap_iterations_b,
tree *last_conflicts, struct loop *loop_nest)
{
unsigned int lnn = loop_nest->num;
dependence_stats.num_subscript_tests++;
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "(analyze_overlapping_iterations \n");
@@ -2700,12 +2700,12 @@ analyze_overlapping_iterations (tree chrec_a,
|| chrec_contains_undetermined (chrec_b))
{
dependence_stats.num_subscript_undetermined++;
-
+
*overlap_iterations_a = conflict_fn_not_known ();
*overlap_iterations_b = conflict_fn_not_known ();
}
- /* If they are the same chrec, and are affine, they overlap
+ /* If they are the same chrec, and are affine, they overlap
on every iteration. */
else if (eq_evolutions_p (chrec_a, chrec_b)
&& evolution_function_is_affine_multivariate_p (chrec_a, lnn))
@@ -2718,7 +2718,7 @@ analyze_overlapping_iterations (tree chrec_a,
/* If they aren't the same, and aren't affine, we can't do anything
yet. */
- else if ((chrec_contains_symbols (chrec_a)
+ else if ((chrec_contains_symbols (chrec_a)
|| chrec_contains_symbols (chrec_b))
&& (!evolution_function_is_affine_multivariate_p (chrec_a, lnn)
|| !evolution_function_is_affine_multivariate_p (chrec_b, lnn)))
@@ -2729,20 +2729,20 @@ analyze_overlapping_iterations (tree chrec_a,
}
else if (ziv_subscript_p (chrec_a, chrec_b))
- analyze_ziv_subscript (chrec_a, chrec_b,
+ analyze_ziv_subscript (chrec_a, chrec_b,
overlap_iterations_a, overlap_iterations_b,
last_conflicts);
-
+
else if (siv_subscript_p (chrec_a, chrec_b))
- analyze_siv_subscript (chrec_a, chrec_b,
- overlap_iterations_a, overlap_iterations_b,
+ analyze_siv_subscript (chrec_a, chrec_b,
+ overlap_iterations_a, overlap_iterations_b,
last_conflicts, lnn);
-
+
else
- analyze_miv_subscript (chrec_a, chrec_b,
+ analyze_miv_subscript (chrec_a, chrec_b,
overlap_iterations_a, overlap_iterations_b,
last_conflicts, loop_nest);
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " (overlap_iterations_a = ");
@@ -2842,7 +2842,7 @@ build_classic_dist_vector_1 (struct data_dependence_relation *ddr,
access_fn_a = DR_ACCESS_FN (ddr_a, i);
access_fn_b = DR_ACCESS_FN (ddr_b, i);
- if (TREE_CODE (access_fn_a) == POLYNOMIAL_CHREC
+ if (TREE_CODE (access_fn_a) == POLYNOMIAL_CHREC
&& TREE_CODE (access_fn_b) == POLYNOMIAL_CHREC)
{
int dist, index;
@@ -2867,7 +2867,7 @@ build_classic_dist_vector_1 (struct data_dependence_relation *ddr,
non_affine_dependence_relation (ddr);
return false;
}
-
+
dist = int_cst_value (SUB_DISTANCE (subscript));
/* This is the subscript coupling test. If we have already
@@ -3147,7 +3147,7 @@ build_classic_dist_vector (struct data_dependence_relation *ddr,
| T[j][i] = t + 2; // B
| }
- the vectors are:
+ the vectors are:
(0, 1, -1)
(1, 1, -1)
(1, -1, 1)
@@ -3269,9 +3269,9 @@ subscript_dependence_tester_1 (struct data_dependence_relation *ddr,
{
conflict_function *overlaps_a, *overlaps_b;
- analyze_overlapping_iterations (DR_ACCESS_FN (dra, i),
+ analyze_overlapping_iterations (DR_ACCESS_FN (dra, i),
DR_ACCESS_FN (drb, i),
- &overlaps_a, &overlaps_b,
+ &overlaps_a, &overlaps_b,
&last_conflicts, loop_nest);
if (CF_NOT_KNOWN_P (overlaps_a)
@@ -3316,10 +3316,10 @@ static void
subscript_dependence_tester (struct data_dependence_relation *ddr,
struct loop *loop_nest)
{
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "(subscript_dependence_tester \n");
-
+
if (subscript_dependence_tester_1 (ddr, DDR_A (ddr), DDR_B (ddr), loop_nest))
dependence_stats.num_dependence_dependent++;
@@ -3334,7 +3334,7 @@ subscript_dependence_tester (struct data_dependence_relation *ddr,
/* Returns true when all the access functions of A are affine or
constant with respect to LOOP_NEST. */
-static bool
+static bool
access_functions_are_affine_or_constant_p (const struct data_reference *a,
const struct loop *loop_nest)
{
@@ -3346,7 +3346,7 @@ access_functions_are_affine_or_constant_p (const struct data_reference *a,
if (!evolution_function_is_invariant_p (t, loop_nest->num)
&& !evolution_function_is_affine_multivariate_p (t, loop_nest->num))
return false;
-
+
return true;
}
@@ -3363,8 +3363,8 @@ access_functions_are_affine_or_constant_p (const struct data_reference *a,
ACCESS_FUN is expected to be an affine chrec. */
static bool
-init_omega_eq_with_af (omega_pb pb, unsigned eq,
- unsigned int offset, tree access_fun,
+init_omega_eq_with_af (omega_pb pb, unsigned eq,
+ unsigned int offset, tree access_fun,
struct data_dependence_relation *ddr)
{
switch (TREE_CODE (access_fun))
@@ -3386,7 +3386,7 @@ init_omega_eq_with_af (omega_pb pb, unsigned eq,
DDR_INNER_LOOP (ddr) = MAX (DDR_INNER_LOOP (ddr), var_idx);
if (offset == 0)
- pb->eqs[eq].coef[var_idx + DDR_NB_LOOPS (ddr) + 1]
+ pb->eqs[eq].coef[var_idx + DDR_NB_LOOPS (ddr) + 1]
+= int_cst_value (right);
switch (TREE_CODE (left))
@@ -3429,7 +3429,7 @@ omega_extract_distance_vectors (omega_pb pb,
/* Set a new problem for each loop in the nest. The basis is the
problem that we have initialized until now. On top of this we
add new constraints. */
- for (i = 0; i <= DDR_INNER_LOOP (ddr)
+ for (i = 0; i <= DDR_INNER_LOOP (ddr)
&& VEC_iterate (loop_p, DDR_LOOP_NEST (ddr), i, loopi); i++)
{
int dist = 0;
@@ -3453,7 +3453,7 @@ omega_extract_distance_vectors (omega_pb pb,
/* Reduce the constraint system, and test that the current
problem is feasible. */
res = omega_simplify_problem (copy);
- if (res == omega_false
+ if (res == omega_false
|| res == omega_unknown
|| copy->num_geqs > (int) DDR_NB_LOOPS (ddr))
goto next_problem;
@@ -3482,7 +3482,7 @@ omega_extract_distance_vectors (omega_pb pb,
copy->eqs[eq].coef[0] = -1;
res = omega_simplify_problem (copy);
- if (res == omega_false
+ if (res == omega_false
|| res == omega_unknown
|| copy->num_geqs > (int) DDR_NB_LOOPS (ddr))
goto next_problem;
@@ -3562,7 +3562,7 @@ omega_setup_subscript (tree access_fun_a, tree access_fun_b,
/* GCD test. */
if (DDR_NB_LOOPS (ddr) != 0 && pb->eqs[eq].coef[0]
- && !int_divides_p (lambda_vector_gcd
+ && !int_divides_p (lambda_vector_gcd
((lambda_vector) &(pb->eqs[eq].coef[1]),
2 * DDR_NB_LOOPS (ddr)),
pb->eqs[eq].coef[0]))
@@ -3611,7 +3611,7 @@ init_omega_for_ddr_1 (struct data_reference *dra, struct data_reference *drb,
removed by the solver: the "dx"
- coef[nb_loops + 1, 2*nb_loops] are the loop variables: "loop_x".
*/
- for (i = 0; i <= DDR_INNER_LOOP (ddr)
+ for (i = 0; i <= DDR_INNER_LOOP (ddr)
&& VEC_iterate (loop_p, DDR_LOOP_NEST (ddr), i, loopi); i++)
{
HOST_WIDE_INT nbi = estimated_loop_iterations_int (loopi, false);
@@ -3663,7 +3663,7 @@ init_omega_for_ddr_1 (struct data_reference *dra, struct data_reference *drb,
set MAYBE_DEPENDENT to true.
Example: for setting up the dependence system corresponding to the
- conflicting accesses
+ conflicting accesses
| loop_i
| loop_j
@@ -3671,7 +3671,7 @@ init_omega_for_ddr_1 (struct data_reference *dra, struct data_reference *drb,
| ... A[2*j, 2*(i + j)]
| endloop_j
| endloop_i
-
+
the following constraints come from the iteration domain:
0 <= i <= Ni
@@ -3872,14 +3872,14 @@ ddr_consistent_p (FILE *file,
}
}
- return true;
+ return true;
}
/* This computes the affine dependence relation between A and B with
respect to LOOP_NEST. CHREC_KNOWN is used for representing the
independence between two accesses, while CHREC_DONT_KNOW is used
for representing the unknown relation.
-
+
Note that it is possible to stop the computation of the dependence
relation the first time we detect a CHREC_KNOWN element for a given
subscript. */
@@ -3890,7 +3890,7 @@ compute_affine_dependence (struct data_dependence_relation *ddr,
{
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "(compute_affine_dependence\n");
@@ -3953,7 +3953,7 @@ compute_affine_dependence (struct data_dependence_relation *ddr,
else
subscript_dependence_tester (ddr, loop_nest);
}
-
+
/* As a last case, if the dependence cannot be determined, or if
the dependence is considered too difficult to determine, answer
"don't know". */
@@ -3973,7 +3973,7 @@ compute_affine_dependence (struct data_dependence_relation *ddr,
finalize_ddr_dependent (ddr, chrec_dont_know);
}
}
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, ")\n");
}
@@ -4016,7 +4016,7 @@ compute_self_dependence (struct data_dependence_relation *ddr)
COMPUTE_SELF_AND_RR is FALSE, don't compute read-read and self
relations. */
-void
+void
compute_all_dependences (VEC (data_reference_p, heap) *datarefs,
VEC (ddr_p, heap) **dependence_relations,
VEC (loop_p, heap) *loop_nest,
@@ -4075,7 +4075,7 @@ get_references_in_stmt (gimple stmt, VEC (data_ref_loc, heap) **references)
tree base;
op0 = gimple_assign_lhs_ptr (stmt);
op1 = gimple_assign_rhs1_ptr (stmt);
-
+
if (DECL_P (*op1)
|| (REFERENCE_CLASS_P (*op1)
&& (base = get_base_address (*op1))
@@ -4139,8 +4139,8 @@ find_data_references_in_stmt (struct loop *nest, gimple stmt,
{
dr = create_data_ref (nest, *ref->pos, stmt, ref->is_read);
gcc_assert (dr != NULL);
-
- /* FIXME -- data dependence analysis does not work correctly for objects
+
+ /* FIXME -- data dependence analysis does not work correctly for objects
with invariant addresses in loop nests. Let us fail here until the
problem is fixed. */
if (dr_address_invariant_p (dr) && nest)
@@ -4223,7 +4223,7 @@ find_data_references_in_bb (struct loop *loop, basic_block bb,
TODO: This function should be made smarter so that it can handle address
arithmetic as if they were array accesses, etc. */
-tree
+tree
find_data_references_in_loop (struct loop *loop,
VEC (data_reference_p, heap) **datarefs)
{
@@ -4291,13 +4291,13 @@ find_loop_nest (struct loop *loop, VEC (loop_p, heap) **loop_nest)
/* Returns true when the data dependences have been computed, false otherwise.
Given a loop nest LOOP, the following vectors are returned:
- DATAREFS is initialized to all the array elements contained in this loop,
- DEPENDENCE_RELATIONS contains the relations between the data references.
- Compute read-read and self relations if
+ DATAREFS is initialized to all the array elements contained in this loop,
+ DEPENDENCE_RELATIONS contains the relations between the data references.
+ Compute read-read and self relations if
COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */
bool
-compute_data_dependences_for_loop (struct loop *loop,
+compute_data_dependences_for_loop (struct loop *loop,
bool compute_self_and_read_read_dependences,
VEC (data_reference_p, heap) **datarefs,
VEC (ddr_p, heap) **dependence_relations)
@@ -4307,7 +4307,7 @@ compute_data_dependences_for_loop (struct loop *loop,
memset (&dependence_stats, 0, sizeof (dependence_stats));
- /* If the loop nest is not well formed, or one of the data references
+ /* If the loop nest is not well formed, or one of the data references
is not computable, give up without spending time to compute other
dependences. */
if (!loop
@@ -4330,20 +4330,20 @@ compute_data_dependences_for_loop (struct loop *loop,
{
fprintf (dump_file, "Dependence tester statistics:\n");
- fprintf (dump_file, "Number of dependence tests: %d\n",
+ fprintf (dump_file, "Number of dependence tests: %d\n",
dependence_stats.num_dependence_tests);
- fprintf (dump_file, "Number of dependence tests classified dependent: %d\n",
+ fprintf (dump_file, "Number of dependence tests classified dependent: %d\n",
dependence_stats.num_dependence_dependent);
- fprintf (dump_file, "Number of dependence tests classified independent: %d\n",
+ fprintf (dump_file, "Number of dependence tests classified independent: %d\n",
dependence_stats.num_dependence_independent);
- fprintf (dump_file, "Number of undetermined dependence tests: %d\n",
+ fprintf (dump_file, "Number of undetermined dependence tests: %d\n",
dependence_stats.num_dependence_undetermined);
- fprintf (dump_file, "Number of subscript tests: %d\n",
+ fprintf (dump_file, "Number of subscript tests: %d\n",
dependence_stats.num_subscript_tests);
- fprintf (dump_file, "Number of undetermined subscript tests: %d\n",
+ fprintf (dump_file, "Number of undetermined subscript tests: %d\n",
dependence_stats.num_subscript_undetermined);
- fprintf (dump_file, "Number of same subscript function: %d\n",
+ fprintf (dump_file, "Number of same subscript function: %d\n",
dependence_stats.num_same_subscript_function);
fprintf (dump_file, "Number of ziv tests: %d\n",
@@ -4353,9 +4353,9 @@ compute_data_dependences_for_loop (struct loop *loop,
fprintf (dump_file, "Number of ziv tests returning independent: %d\n",
dependence_stats.num_ziv_independent);
fprintf (dump_file, "Number of ziv tests unimplemented: %d\n",
- dependence_stats.num_ziv_unimplemented);
+ dependence_stats.num_ziv_unimplemented);
- fprintf (dump_file, "Number of siv tests: %d\n",
+ fprintf (dump_file, "Number of siv tests: %d\n",
dependence_stats.num_siv);
fprintf (dump_file, "Number of siv tests returning dependent: %d\n",
dependence_stats.num_siv_dependent);
@@ -4364,7 +4364,7 @@ compute_data_dependences_for_loop (struct loop *loop,
fprintf (dump_file, "Number of siv tests unimplemented: %d\n",
dependence_stats.num_siv_unimplemented);
- fprintf (dump_file, "Number of miv tests: %d\n",
+ fprintf (dump_file, "Number of miv tests: %d\n",
dependence_stats.num_miv);
fprintf (dump_file, "Number of miv tests returning dependent: %d\n",
dependence_stats.num_miv_dependent);
@@ -4377,9 +4377,9 @@ compute_data_dependences_for_loop (struct loop *loop,
return res;
}
-/* Returns true when the data dependences for the basic block BB have been
+/* Returns true when the data dependences for the basic block BB have been
computed, false otherwise.
- DATAREFS is initialized to all the array elements contained in this basic
+ DATAREFS is initialized to all the array elements contained in this basic
block, DEPENDENCE_RELATIONS contains the relations between the data
references. Compute read-read and self relations if
COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */
@@ -4400,32 +4400,32 @@ compute_data_dependences_for_bb (basic_block bb,
/* Entry point (for testing only). Analyze all the data references
and the dependence relations in LOOP.
- The data references are computed first.
-
+ The data references are computed first.
+
A relation on these nodes is represented by a complete graph. Some
of the relations could be of no interest, thus the relations can be
computed on demand.
-
+
In the following function we compute all the relations. This is
just a first implementation that is here for:
- - for showing how to ask for the dependence relations,
+ - for showing how to ask for the dependence relations,
- for the debugging the whole dependence graph,
- for the dejagnu testcases and maintenance.
-
+
It is possible to ask only for a part of the graph, avoiding to
compute the whole dependence graph. The computed dependences are
stored in a knowledge base (KB) such that later queries don't
recompute the same information. The implementation of this KB is
transparent to the optimizer, and thus the KB can be changed with a
more efficient implementation, or the KB could be disabled. */
-static void
+static void
analyze_all_data_dependences (struct loop *loop)
{
unsigned int i;
int nb_data_refs = 10;
- VEC (data_reference_p, heap) *datarefs =
+ VEC (data_reference_p, heap) *datarefs =
VEC_alloc (data_reference_p, heap, nb_data_refs);
- VEC (ddr_p, heap) *dependence_relations =
+ VEC (ddr_p, heap) *dependence_relations =
VEC_alloc (ddr_p, heap, nb_data_refs * nb_data_refs);
/* Compute DDs on the whole function. */
@@ -4451,14 +4451,14 @@ analyze_all_data_dependences (struct loop *loop)
{
if (chrec_contains_undetermined (DDR_ARE_DEPENDENT (ddr)))
nb_top_relations++;
-
+
else if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
nb_bot_relations++;
-
- else
+
+ else
nb_chrec_relations++;
}
-
+
gather_stats_on_scev_database ();
}
}
@@ -4501,7 +4501,7 @@ free_dependence_relation (struct data_dependence_relation *ddr)
/* Free the memory used by the data dependence relations from
DEPENDENCE_RELATIONS. */
-void
+void
free_dependence_relations (VEC (ddr_p, heap) *dependence_relations)
{
unsigned int i;
@@ -4548,7 +4548,7 @@ dump_rdg_vertex (FILE *file, struct graph *rdg, int i)
struct vertex *v = &(rdg->vertices[i]);
struct graph_edge *e;
- fprintf (file, "(vertex %d: (%s%s) (in:", i,
+ fprintf (file, "(vertex %d: (%s%s) (in:", i,
RDG_MEM_WRITE_STMT (rdg, i) ? "w" : "",
RDG_MEM_READS_STMT (rdg, i) ? "r" : "");
@@ -4773,7 +4773,7 @@ create_rdg_edges_for_scalar (struct graph *rdg, tree def, int idef)
{
use_operand_p imm_use_p;
imm_use_iterator iterator;
-
+
FOR_EACH_IMM_USE_FAST (imm_use_p, iterator, def)
{
struct graph_edge *e;
@@ -4896,7 +4896,7 @@ known_dependences_p (VEC (ddr_p, heap) *dependence_relations)
for (i = 0; VEC_iterate (ddr_p, dependence_relations, i, ddr); i++)
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
return false;
-
+
return true;
}
@@ -4958,10 +4958,10 @@ build_rdg (struct loop *loop)
VEC (ddr_p, heap) *dependence_relations;
VEC (data_reference_p, heap) *datarefs;
VEC (gimple, heap) *stmts = VEC_alloc (gimple, heap, nb_data_refs);
-
+
dependence_relations = VEC_alloc (ddr_p, heap, nb_data_refs * nb_data_refs) ;
datarefs = VEC_alloc (data_reference_p, heap, nb_data_refs);
- compute_data_dependences_for_loop (loop,
+ compute_data_dependences_for_loop (loop,
false,
&datarefs,
&dependence_relations);
@@ -5184,8 +5184,8 @@ remove_similar_memory_refs (VEC (gimple, heap) **stmts)
/* Returns the index of PARAMETER in the parameters vector of the
ACCESS_MATRIX. If PARAMETER does not exist return -1. */
-int
-access_matrix_get_index_for_parameter (tree parameter,
+int
+access_matrix_get_index_for_parameter (tree parameter,
struct access_matrix *access_matrix)
{
int i;
diff --git a/gcc/tree-data-ref.h b/gcc/tree-data-ref.h
index fe79faea40f..e945ecb464d 100644
--- a/gcc/tree-data-ref.h
+++ b/gcc/tree-data-ref.h
@@ -1,4 +1,4 @@
-/* Data references and dependences detectors.
+/* Data references and dependences detectors.
Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
Contributed by Sebastian Pop <pop@cri.ensmp.fr>
@@ -32,14 +32,14 @@ along with GCC; see the file COPYING3. If not see
reference in the innermost enclosing loop. The address is expressed as
BASE + STEP * # of iteration, and base is further decomposed as the base
pointer (BASE_ADDRESS), loop invariant offset (OFFSET) and
- constant offset (INIT). Examples, in loop nest
-
+ constant offset (INIT). Examples, in loop nest
+
for (i = 0; i < 100; i++)
for (j = 3; j < 100; j++)
Example 1 Example 2
data-ref a[j].b[i][j] *(p + x + 16B + 4B * j)
-
+
innermost_loop_behavior
base_address &a p
@@ -79,7 +79,7 @@ struct indices
{
/* The object. */
tree base_object;
-
+
/* A list of chrecs. Access functions of the indices. */
VEC(tree,heap) *access_fns;
};
@@ -110,7 +110,7 @@ struct dr_alias
| loop_2
| a[i+3][2*j+n-1]
- if "i" varies in loop_1 and "j" varies in loop_2, the access
+ if "i" varies in loop_1 and "j" varies in loop_2, the access
matrix with respect to the loop nest {loop_1, loop_2} is:
| loop_1 loop_2 param_n cst
@@ -163,7 +163,7 @@ struct data_reference
{
/* A pointer to the statement that contains this DR. */
gimple stmt;
-
+
/* A pointer to the memory reference. */
tree ref;
@@ -191,7 +191,7 @@ struct data_reference
#define DR_BASE_OBJECT(DR) (DR)->indices.base_object
#define DR_ACCESS_FNS(DR) (DR)->indices.access_fns
#define DR_ACCESS_FN(DR, I) VEC_index (tree, DR_ACCESS_FNS (DR), I)
-#define DR_NUM_DIMENSIONS(DR) VEC_length (tree, DR_ACCESS_FNS (DR))
+#define DR_NUM_DIMENSIONS(DR) VEC_length (tree, DR_ACCESS_FNS (DR))
#define DR_IS_READ(DR) (DR)->is_read
#define DR_BASE_ADDRESS(DR) (DR)->innermost.base_address
#define DR_OFFSET(DR) (DR)->innermost.offset
@@ -206,9 +206,9 @@ DEF_VEC_P(data_reference_p);
DEF_VEC_ALLOC_P (data_reference_p, heap);
enum data_dependence_direction {
- dir_positive,
- dir_negative,
- dir_equal,
+ dir_positive,
+ dir_negative,
+ dir_equal,
dir_positive_or_negative,
dir_positive_or_equal,
dir_negative_or_equal,
@@ -252,11 +252,11 @@ struct subscript
accessed twice. */
conflict_function *conflicting_iterations_in_a;
conflict_function *conflicting_iterations_in_b;
-
+
/* This field stores the information about the iteration domain
validity of the dependence relation. */
tree last_conflict;
-
+
/* Distance from the iteration that access a conflicting element in
A to the iteration that access this same conflicting element in
B. The distance is a tree scalar expression, i.e. a constant or a
@@ -278,23 +278,23 @@ DEF_VEC_ALLOC_P (subscript_p, heap);
struct data_dependence_relation
{
-
+
struct data_reference *a;
struct data_reference *b;
/* A "yes/no/maybe" field for the dependence relation:
-
+
- when "ARE_DEPENDENT == NULL_TREE", there exist a dependence
relation between A and B, and the description of this relation
is given in the SUBSCRIPTS array,
-
+
- when "ARE_DEPENDENT == chrec_known", there is no dependence and
SUBSCRIPTS is empty,
-
+
- when "ARE_DEPENDENT == chrec_dont_know", there may be a dependence,
but the analyzer cannot be more specific. */
tree are_dependent;
-
+
/* For each subscript in the dependence test, there is an element in
this array. This is the attribute that labels the edge A->B of
the data_dependence_relation. */
@@ -380,7 +380,7 @@ extern bool compute_data_dependences_for_loop (struct loop *, bool,
extern bool compute_data_dependences_for_bb (basic_block, bool,
VEC (data_reference_p, heap) **,
VEC (ddr_p, heap) **);
-extern tree find_data_references_in_loop (struct loop *,
+extern tree find_data_references_in_loop (struct loop *,
VEC (data_reference_p, heap) **);
extern void print_direction_vector (FILE *, lambda_vector, int);
extern void print_dir_vectors (FILE *, VEC (lambda_vector, heap) *, int);
@@ -393,11 +393,11 @@ extern void debug_data_reference (struct data_reference *);
extern void dump_data_references (FILE *, VEC (data_reference_p, heap) *);
extern void debug_data_references (VEC (data_reference_p, heap) *);
extern void debug_data_dependence_relation (struct data_dependence_relation *);
-extern void dump_data_dependence_relation (FILE *,
+extern void dump_data_dependence_relation (FILE *,
struct data_dependence_relation *);
extern void dump_data_dependence_relations (FILE *, VEC (ddr_p, heap) *);
extern void debug_data_dependence_relations (VEC (ddr_p, heap) *);
-extern void dump_data_dependence_direction (FILE *,
+extern void dump_data_dependence_direction (FILE *,
enum data_dependence_direction);
extern void free_dependence_relation (struct data_dependence_relation *);
extern void free_dependence_relations (VEC (ddr_p, heap) *);
@@ -509,24 +509,24 @@ int rdg_vertex_for_stmt (struct graph *, gimple);
/* Data dependence type. */
-enum rdg_dep_type
+enum rdg_dep_type
{
/* Read After Write (RAW). */
flow_dd = 'f',
-
+
/* Write After Read (WAR). */
anti_dd = 'a',
-
+
/* Write After Write (WAW). */
- output_dd = 'o',
-
+ output_dd = 'o',
+
/* Read After Read (RAR). */
- input_dd = 'i'
+ input_dd = 'i'
};
/* Dependence information attached to an edge of the RDG. */
-typedef struct rdg_edge
+typedef struct rdg_edge
{
/* Type of the dependence. */
enum rdg_dep_type type;
diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c
index 078a72a8bcc..69535f1dc60 100644
--- a/gcc/tree-dfa.c
+++ b/gcc/tree-dfa.c
@@ -145,7 +145,7 @@ create_var_ann (tree t)
/* Renumber all of the gimple stmt uids. */
-void
+void
renumber_gimple_stmt_uids (void)
{
basic_block bb;
@@ -165,7 +165,7 @@ renumber_gimple_stmt_uids (void)
/* Like renumber_gimple_stmt_uids, but only do work on the basic blocks
in BLOCKS, of which there are N_BLOCKS. Also renumbers PHIs. */
-void
+void
renumber_gimple_stmt_uids_in_blocks (basic_block *blocks, int n_blocks)
{
int i;
@@ -221,10 +221,10 @@ dump_referenced_vars (FILE *file)
{
tree var;
referenced_var_iterator rvi;
-
+
fprintf (file, "\nReferenced variables in %s: %u\n\n",
get_name (current_function_decl), (unsigned) num_referenced_vars);
-
+
FOR_EACH_REFERENCED_VAR (var, rvi)
{
fprintf (file, "Variable: ");
@@ -275,7 +275,7 @@ dump_variable (FILE *file, tree var)
if (TREE_ADDRESSABLE (var))
fprintf (file, ", is addressable");
-
+
if (is_global_var (var))
fprintf (file, ", is global");
@@ -509,7 +509,7 @@ find_referenced_vars_in (gimple stmt)
/* Lookup UID in the referenced_vars hashtable and return the associated
variable. */
-tree
+tree
referenced_var_lookup (unsigned int uid)
{
tree h;
@@ -520,12 +520,12 @@ referenced_var_lookup (unsigned int uid)
return h;
}
-/* Check if TO is in the referenced_vars hash table and insert it if not.
+/* Check if TO is in the referenced_vars hash table and insert it if not.
Return true if it required insertion. */
bool
referenced_var_check_and_insert (tree to)
-{
+{
tree h, *loc;
struct tree_decl_minimal in;
unsigned int uid = DECL_UID (to);
@@ -549,7 +549,7 @@ referenced_var_check_and_insert (tree to)
/* Lookup VAR UID in the default_defs hashtable and return the associated
variable. */
-tree
+tree
gimple_default_def (struct function *fn, tree var)
{
struct tree_decl_minimal ind;
@@ -564,7 +564,7 @@ gimple_default_def (struct function *fn, tree var)
void
set_default_def (tree var, tree def)
-{
+{
struct tree_decl_minimal ind;
struct tree_ssa_name in;
void **loc;
@@ -602,7 +602,7 @@ add_referenced_var (tree var)
v_ann = get_var_ann (var);
gcc_assert (DECL_P (var));
-
+
/* Insert VAR into the referenced_vars has table if it isn't present. */
if (referenced_var_check_and_insert (var))
{
diff --git a/gcc/tree-dump.c b/gcc/tree-dump.c
index 7b7a85f5082..e0512bc80a3 100644
--- a/gcc/tree-dump.c
+++ b/gcc/tree-dump.c
@@ -821,7 +821,7 @@ static const struct dump_option_value_info dump_options[] =
{"memsyms", TDF_MEMSYMS},
{"verbose", TDF_VERBOSE},
{"eh", TDF_EH},
- {"all", ~(TDF_RAW | TDF_SLIM | TDF_LINENO | TDF_TREE | TDF_RTL | TDF_IPA
+ {"all", ~(TDF_RAW | TDF_SLIM | TDF_LINENO | TDF_TREE | TDF_RTL | TDF_IPA
| TDF_STMTADDR | TDF_GRAPH | TDF_DIAGNOSTIC | TDF_VERBOSE
| TDF_RHS_ONLY)},
{NULL, 0}
@@ -1025,7 +1025,7 @@ dump_switch_p_1 (const char *arg, struct dump_file_info *dfi, bool doglob)
const char *option_value;
const char *ptr;
int flags;
-
+
if (doglob && !dfi->glob)
return 0;
@@ -1092,7 +1092,7 @@ dump_switch_p (const char *arg)
for (i = 0; i < extra_dump_files_in_use; i++)
any |= dump_switch_p_1 (arg, &extra_dump_files[i], false);
-
+
if (!any)
for (i = 0; i < extra_dump_files_in_use; i++)
any |= dump_switch_p_1 (arg, &extra_dump_files[i], true);
diff --git a/gcc/tree-dump.h b/gcc/tree-dump.h
index 558e1c1c952..eeb28e0aad6 100644
--- a/gcc/tree-dump.h
+++ b/gcc/tree-dump.h
@@ -94,7 +94,7 @@ extern void dump_function_to_file (tree, FILE *, int);
extern void debug_function (tree, int);
extern int dump_flag (dump_info_p, int, const_tree);
-extern unsigned int dump_register (const char *, const char *, const char *,
+extern unsigned int dump_register (const char *, const char *, const char *,
int);
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index 61c44f1fb14..21da53470a2 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -319,7 +319,7 @@ outside_finally_tree (treemple start, gimple target)
The eh region creation is straight-forward, but frobbing all the gotos
and such into shape isn't. */
-/* The sequence into which we record all EH stuff. This will be
+/* The sequence into which we record all EH stuff. This will be
placed at the end of the function when we're all done. */
static gimple_seq eh_seq;
@@ -2118,7 +2118,7 @@ redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
{
new_lp = get_eh_landing_pad_from_number (new_lp_nr);
gcc_assert (new_lp);
-
+
/* Unless CHANGE_REGION is true, the new and old landing pad
had better be associated with the same EH region. */
gcc_assert (change_region || new_lp->region == old_lp->region);
@@ -3278,7 +3278,7 @@ remove_unreachable_handlers (void)
fprintf (dump_file, "Removing unreachable landing pad %d\n", lp_nr);
remove_eh_landing_pad (lp);
}
-
+
if (dump_file)
{
fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
@@ -3588,7 +3588,7 @@ cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
}
/* A subroutine of cleanup_empty_eh. Handle more complex cases of
- unsplitting than unsplit_eh was prepared to handle, e.g. when
+ unsplitting than unsplit_eh was prepared to handle, e.g. when
multiple incoming edges and phis are involved. */
static bool
diff --git a/gcc/tree-flow-inline.h b/gcc/tree-flow-inline.h
index 56dc9f038b6..3446790fa7a 100644
--- a/gcc/tree-flow-inline.h
+++ b/gcc/tree-flow-inline.h
@@ -75,7 +75,7 @@ first_htab_element (htab_iterator *hti, htab_t table)
if (x != HTAB_EMPTY_ENTRY && x != HTAB_DELETED_ENTRY)
break;
} while (++(hti->slot) < hti->limit);
-
+
if (hti->slot < hti->limit)
return *(hti->slot);
return NULL;
@@ -133,7 +133,7 @@ static inline tree
next_referenced_var (referenced_var_iterator *iter)
{
return (tree) next_htab_element (&iter->hti);
-}
+}
/* Fill up VEC with the variables in the referenced vars hashtable. */
@@ -222,7 +222,7 @@ delink_imm_use (ssa_use_operand_t *linknode)
static inline void
link_imm_use_to_list (ssa_use_operand_t *linknode, ssa_use_operand_t *list)
{
- /* Link the new node at the head of the list. If we are in the process of
+ /* Link the new node at the head of the list. If we are in the process of
traversing the list, we won't visit any new nodes added to it. */
linknode->prev = list;
linknode->next = list->next;
@@ -258,7 +258,7 @@ set_ssa_use_from_ptr (use_operand_p use, tree val)
link_imm_use (use, val);
}
-/* Link ssa_imm_use node LINKNODE into the chain for DEF, with use occurring
+/* Link ssa_imm_use node LINKNODE into the chain for DEF, with use occurring
in STMT. */
static inline void
link_imm_use_stmt (ssa_use_operand_t *linknode, tree def, gimple stmt)
@@ -287,7 +287,7 @@ relink_imm_use (ssa_use_operand_t *node, ssa_use_operand_t *old)
}
}
-/* Relink ssa_imm_use node LINKNODE into the chain for OLD, with use occurring
+/* Relink ssa_imm_use node LINKNODE into the chain for OLD, with use occurring
in STMT. */
static inline void
relink_imm_use_stmt (ssa_use_operand_t *linknode, ssa_use_operand_t *old,
@@ -445,12 +445,12 @@ num_imm_uses (const_tree var)
return num;
}
-/* Return the tree pointed-to by USE. */
+/* Return the tree pointed-to by USE. */
static inline tree
get_use_from_ptr (use_operand_p use)
-{
+{
return *(use->use);
-}
+}
/* Return the tree pointed-to by DEF. */
static inline tree
@@ -571,13 +571,13 @@ phi_arg_index_from_use (use_operand_p use)
index = element - root;
#ifdef ENABLE_CHECKING
- /* Make sure the calculation doesn't have any leftover bytes. If it does,
+ /* Make sure the calculation doesn't have any leftover bytes. If it does,
then imm_use is likely not the first element in phi_arg_d. */
gcc_assert (
(((char *)element - (char *)root) % sizeof (struct phi_arg_d)) == 0);
gcc_assert (index < gimple_phi_capacity (phi));
#endif
-
+
return index;
}
@@ -875,7 +875,7 @@ single_ssa_def_operand (gimple stmt, int flags)
}
-/* Return true if there are zero operands in STMT matching the type
+/* Return true if there are zero operands in STMT matching the type
given in FLAGS. */
static inline bool
zero_ssa_operands (gimple stmt, int flags)
@@ -920,7 +920,7 @@ static inline tree
single_phi_def (gimple stmt, int flags)
{
tree def = PHI_RESULT (stmt);
- if ((flags & SSA_OP_DEF) && is_gimple_reg (def))
+ if ((flags & SSA_OP_DEF) && is_gimple_reg (def))
return def;
if ((flags & SSA_OP_VIRTUAL_DEFS) && !is_gimple_reg (def))
return def;
@@ -941,7 +941,7 @@ op_iter_init_phiuse (ssa_op_iter *ptr, gimple phi, int flags)
gcc_assert ((flags & (SSA_OP_USE | SSA_OP_VIRTUAL_USES)) != 0);
comp = (is_gimple_reg (phi_def) ? SSA_OP_USE : SSA_OP_VIRTUAL_USES);
-
+
/* If the PHI node doesn't the operand type we care about, we're done. */
if ((flags & comp) == 0)
{
@@ -970,7 +970,7 @@ op_iter_init_phidef (ssa_op_iter *ptr, gimple phi, int flags)
gcc_assert ((flags & (SSA_OP_DEF | SSA_OP_VIRTUAL_DEFS)) != 0);
comp = (is_gimple_reg (phi_def) ? SSA_OP_DEF : SSA_OP_VIRTUAL_DEFS);
-
+
/* If the PHI node doesn't have the operand type we care about,
we're done. */
if ((flags & comp) == 0)
@@ -1005,12 +1005,12 @@ end_imm_use_stmt_traverse (imm_use_iterator *imm)
/* Immediate use traversal of uses within a stmt require that all the
uses on a stmt be sequentially listed. This routine is used to build up
- this sequential list by adding USE_P to the end of the current list
- currently delimited by HEAD and LAST_P. The new LAST_P value is
+ this sequential list by adding USE_P to the end of the current list
+ currently delimited by HEAD and LAST_P. The new LAST_P value is
returned. */
static inline use_operand_p
-move_use_after_head (use_operand_p use_p, use_operand_p head,
+move_use_after_head (use_operand_p use_p, use_operand_p head,
use_operand_p last_p)
{
gcc_assert (USE_FROM_PTR (use_p) == USE_FROM_PTR (head));
@@ -1079,7 +1079,7 @@ static inline gimple
first_imm_use_stmt (imm_use_iterator *imm, tree var)
{
gcc_assert (TREE_CODE (var) == SSA_NAME);
-
+
imm->end_p = &(SSA_NAME_IMM_USE_NODE (var));
imm->imm_use = imm->end_p->next;
imm->next_imm_name = NULL_USE_OPERAND_P;
diff --git a/gcc/tree-flow.h b/gcc/tree-flow.h
index 96e4aa91281..cde7792cb4a 100644
--- a/gcc/tree-flow.h
+++ b/gcc/tree-flow.h
@@ -110,7 +110,7 @@ typedef struct
/*---------------------------------------------------------------------------
Attributes for SSA_NAMEs.
-
+
NOTE: These structures are stored in struct tree_ssa_name
but are only used by the tree optimizers, so it makes better sense
to declare them here to avoid recompiling unrelated files when
@@ -135,7 +135,7 @@ enum need_phi_state {
needs PHI nodes. This is probably an overly conservative assumption. */
NEED_PHI_STATE_UNKNOWN,
- /* This state indicates that we have seen one or more sets of the
+ /* This state indicates that we have seen one or more sets of the
variable in a single basic block and that the sets dominate all
uses seen so far. If after finding all definition and use sites
we are still in this state, then the variable does not need any
@@ -205,7 +205,7 @@ struct GTY(()) var_ann_d {
/* Immediate use lists are used to directly access all uses for an SSA
- name and get pointers to the statement for each use.
+ name and get pointers to the statement for each use.
The structure ssa_use_operand_d consists of PREV and NEXT pointers
to maintain the list. A USE pointer, which points to address where
@@ -227,7 +227,7 @@ struct GTY(()) var_ann_d {
iterator manages this by inserting a marker node into the list
immediately before the node currently being examined in the list.
this marker node is uniquely identified by having null stmt *and* a
- null use pointer.
+ null use pointer.
When iterating to the next use, the iteration routines check to see
if the node after the marker has changed. if it has, then the node
@@ -260,7 +260,7 @@ typedef struct immediate_use_iterator_d
for ((DEST) = first_readonly_imm_use (&(ITER), (SSAVAR)); \
!end_readonly_imm_use_p (&(ITER)); \
(DEST) = next_readonly_imm_use (&(ITER)))
-
+
/* Use this iterator to visit each stmt which has a use of SSAVAR. */
#define FOR_EACH_IMM_USE_STMT(STMT, ITER, SSAVAR) \
@@ -268,7 +268,7 @@ typedef struct immediate_use_iterator_d
!end_imm_use_stmt_p (&(ITER)); \
(STMT) = next_imm_use_stmt (&(ITER)))
-/* Use this to terminate the FOR_EACH_IMM_USE_STMT loop early. Failure to
+/* Use this to terminate the FOR_EACH_IMM_USE_STMT loop early. Failure to
do so will result in leaving a iterator marker node in the immediate
use list, and nothing good will come from that. */
#define BREAK_FROM_IMM_USE_STMT(ITER) \
@@ -278,7 +278,7 @@ typedef struct immediate_use_iterator_d
}
-/* Use this iterator in combination with FOR_EACH_IMM_USE_STMT to
+/* Use this iterator in combination with FOR_EACH_IMM_USE_STMT to
get access to each occurrence of ssavar on the stmt returned by
that iterator.. for instance:
@@ -323,7 +323,7 @@ static inline void set_phi_nodes (basic_block, gimple_seq);
Global declarations
---------------------------------------------------------------------------*/
struct GTY(()) int_tree_map {
-
+
unsigned int uid;
tree to;
};
@@ -334,7 +334,7 @@ extern int int_tree_map_eq (const void *, const void *);
extern unsigned int uid_decl_map_hash (const void *);
extern int uid_decl_map_eq (const void *, const void *);
-typedef struct
+typedef struct
{
htab_iterator hti;
} referenced_var_iterator;
@@ -348,7 +348,7 @@ typedef struct
#define FOR_EACH_REFERENCED_VAR(VAR, ITER) \
for ((VAR) = first_referenced_var (&(ITER)); \
!end_referenced_vars_p (&(ITER)); \
- (VAR) = next_referenced_var (&(ITER)))
+ (VAR) = next_referenced_var (&(ITER)))
typedef struct
@@ -889,7 +889,7 @@ struct mem_address
};
struct affine_tree_combination;
-tree create_mem_ref (gimple_stmt_iterator *, tree,
+tree create_mem_ref (gimple_stmt_iterator *, tree,
struct affine_tree_combination *, tree, bool);
rtx addr_for_mem_ref (struct mem_address *, addr_space_t, bool);
void get_address_description (tree, struct mem_address *);
diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c
index 7f00a63453f..7f193ec98b0 100644
--- a/gcc/tree-if-conv.c
+++ b/gcc/tree-if-conv.c
@@ -252,7 +252,7 @@ tree_if_convert_stmt (struct loop * loop, gimple t, tree cond,
/* This GIMPLE_ASSIGN is killing previous value of LHS. Appropriate
value will be selected by PHI node based on condition. It is possible
that before this transformation, PHI nodes was selecting default
- value and now it will use this new value. This is OK because it does
+ value and now it will use this new value. This is OK because it does
not change validity the program. */
break;
@@ -489,7 +489,7 @@ if_convertible_bb_p (struct loop *loop, basic_block bb, basic_block exit_bb)
fprintf (dump_file, "non empty basic block after exit bb\n");
return false;
}
- else if (bb == loop->latch
+ else if (bb == loop->latch
&& bb != exit_bb
&& !dominated_by_p (CDI_DOMINATORS, bb, exit_bb))
{
@@ -706,7 +706,7 @@ clean_predicate_lists (struct loop *loop)
whose phi arguments are selected when cond is true. */
static basic_block
-find_phi_replacement_condition (struct loop *loop,
+find_phi_replacement_condition (struct loop *loop,
basic_block bb, tree *cond,
gimple_stmt_iterator *gsi)
{
@@ -724,7 +724,7 @@ find_phi_replacement_condition (struct loop *loop,
S2: x = c ? b : a;
S2 is preferred over S1. Make 'b' first_bb and use its condition.
-
+
2) Do not make loop header first_bb.
3)
@@ -735,7 +735,7 @@ find_phi_replacement_condition (struct loop *loop,
S3: x = (c == d) ? b : a;
- S3 is preferred over S1 and S2*, Make 'b' first_bb and use
+ S3 is preferred over S1 and S2*, Make 'b' first_bb and use
its condition.
4) If pred B is dominated by pred A then use pred B's condition.
@@ -832,7 +832,7 @@ replace_phi_with_cond_gimple_assign_stmt (gimple phi, tree cond,
tree arg_0, arg_1;
gcc_assert (gimple_code (phi) == GIMPLE_PHI);
-
+
/* If this is not filtered earlier, then now it is too late. */
gcc_assert (gimple_phi_num_args (phi) == 2);
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index 34a8e9d8d07..2991c9e0b50 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -190,7 +190,7 @@ remap_ssa_name (tree name, copy_body_data *id)
new_tree = remap_decl (SSA_NAME_VAR (name), id);
/* We might've substituted constant or another SSA_NAME for
- the variable.
+ the variable.
Replace the SSA name representing RESULT_DECL by variable during
inlining: this saves us from need to introduce PHI node in a case
@@ -223,7 +223,7 @@ remap_ssa_name (tree name, copy_body_data *id)
{
gimple_stmt_iterator gsi = gsi_last_bb (id->entry_bb);
gimple init_stmt;
-
+
init_stmt = gimple_build_assign (new_tree,
fold_convert (TREE_TYPE (new_tree),
integer_zero_node));
@@ -277,7 +277,7 @@ remap_decl (tree decl, copy_body_data *id)
{
/* Make a copy of the variable or label. */
tree t = id->copy_decl (decl, id);
-
+
/* Remember it, so that if we encounter this local entity again
we can reuse this copy. Do this early because remap_type may
need this decl for TYPE_STUB_DECL. */
@@ -493,7 +493,7 @@ remapped_type (tree type, copy_body_data *id)
/* The type only needs remapping if it's variably modified. */
/* Decide if DECL can be put into BLOCK_NONLOCAL_VARs. */
-
+
static bool
can_be_nonlocal (tree decl, copy_body_data *id)
{
@@ -561,7 +561,7 @@ remap_decls (tree decls, VEC(tree,gc) **nonlocalized_list, copy_body_data *id)
/* If we didn't remap this variable, we can't mess with its
TREE_CHAIN. If we remapped this variable to the return slot, it's
already declared somewhere else, so don't declare it here. */
-
+
if (new_var == id->retvar)
;
else if (!new_var)
@@ -1095,7 +1095,7 @@ copy_tree_body_r (tree *tp, int *walk_subtrees, void *data)
&& id->remapping_type_depth == 0
&& !processing_debug_stmt)
add_referenced_var (*tp);
-
+
/* If EXPR has block defined, map it to newly constructed block.
When inlining we want EXPRs without block appear in the block
of function call. */
@@ -1247,7 +1247,7 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
case GIMPLE_TRY:
s1 = remap_gimple_seq (gimple_try_eval (stmt), id);
s2 = remap_gimple_seq (gimple_try_cleanup (stmt), id);
- copy = gimple_build_try (s1, s2, gimple_try_kind (stmt));
+ copy = gimple_build_try (s1, s2, gimple_try_kind (stmt));
break;
case GIMPLE_WITH_CLEANUP_EXPR:
@@ -1448,7 +1448,7 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
if (skip_first)
walk_tree (gimple_op_ptr (copy, 1), remap_gimple_op_r, &wi, NULL);
else
- walk_gimple_op (copy, remap_gimple_op_r, &wi);
+ walk_gimple_op (copy, remap_gimple_op_r, &wi);
/* Clear the copied virtual operands. We are not remapping them here
but are going to recreate them from scratch. */
@@ -1642,7 +1642,7 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale,
/* We could also just rescale the frequency, but
doing so would introduce roundoff errors and make
verifier unhappy. */
- edge->frequency
+ edge->frequency
= compute_call_stmt_bb_frequency (id->dst_node->decl,
copy_basic_block);
if (dump_file
@@ -1681,7 +1681,7 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale,
/* Constant propagation on argument done during inlining
may create new direct call. Produce an edge for it. */
- if ((!edge
+ if ((!edge
|| (edge->indirect_call
&& id->transform_call_graph_edges == CB_CGE_MOVE_CLONES))
&& is_gimple_call (stmt)
@@ -1700,7 +1700,7 @@ copy_bb (copy_body_data *id, basic_block bb, int frequency_scale,
if (id->transform_call_graph_edges == CB_CGE_MOVE_CLONES)
cgraph_create_edge_including_clones
(id->dst_node, dest, stmt, bb->count,
- compute_call_stmt_bb_frequency (id->dst_node->decl,
+ compute_call_stmt_bb_frequency (id->dst_node->decl,
copy_basic_block),
bb->loop_depth, CIF_ORIGINALLY_INDIRECT_CALL);
else
@@ -1955,7 +1955,7 @@ copy_phis_for_bb (basic_block bb, copy_body_data *id)
new_arg = force_gimple_operand (new_arg, &stmts, true, NULL);
gsi_insert_seq_on_edge_immediate (new_edge, stmts);
}
- add_phi_arg (new_phi, new_arg, new_edge,
+ add_phi_arg (new_phi, new_arg, new_edge,
gimple_phi_arg_location_from_edge (phi, old_edge));
}
}
@@ -2569,7 +2569,7 @@ declare_return_variable (copy_body_data *id, tree return_slot, tree modify_dest,
STRIP_USELESS_TYPE_CONVERSION (return_slot_addr);
/* We are going to construct *&return_slot and we can't do that
- for variables believed to be not addressable.
+ for variables believed to be not addressable.
FIXME: This check possibly can match, because values returned
via return slot optimization are not believed to have address
@@ -2687,7 +2687,7 @@ declare_return_variable (copy_body_data *id, tree return_slot, tree modify_dest,
use = var;
if (!useless_type_conversion_p (caller_type, TREE_TYPE (var)))
use = fold_convert (caller_type, var);
-
+
STRIP_USELESS_TYPE_CONVERSION (use);
if (DECL_BY_REFERENCE (result))
@@ -2747,7 +2747,7 @@ cannot_copy_type_1 (tree *nodep, int *walk_subtrees ATTRIBUTE_UNUSED,
UNION_TYPE nodes, then it goes into infinite recursion on a
structure containing a pointer to its own type. If it doesn't,
then the type node for S doesn't get adjusted properly when
- F is inlined.
+ F is inlined.
??? This is likely no longer true, but it's too late in the 4.0
cycle to try to find out. This should be checked for 4.1. */
@@ -3270,7 +3270,7 @@ estimate_num_insns (gimple stmt, eni_weights *weights)
case GIMPLE_SWITCH:
/* Take into account cost of the switch + guess 2 conditional jumps for
- each case label.
+ each case label.
TODO: once the switch expansion logic is sufficiently separated, we can
do better job on estimating cost of the switch. */
@@ -3293,7 +3293,7 @@ estimate_num_insns (gimple stmt, eni_weights *weights)
cost = weights->target_builtin_call_cost;
else
cost = weights->call_cost;
-
+
if (decl && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (decl))
{
@@ -3746,9 +3746,9 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id)
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Inlining ");
- print_generic_expr (dump_file, id->src_fn, 0);
+ print_generic_expr (dump_file, id->src_fn, 0);
fprintf (dump_file, " to ");
- print_generic_expr (dump_file, id->dst_fn, 0);
+ print_generic_expr (dump_file, id->dst_fn, 0);
fprintf (dump_file, " with frequency %i\n", cg_edge->frequency);
}
@@ -4043,11 +4043,11 @@ optimize_inline_calls (tree fn)
gcc_assert (e->inline_failed);
}
#endif
-
+
/* Fold the statements before compacting/renumbering the basic blocks. */
fold_marked_statements (last, id.statements_to_fold);
pointer_set_destroy (id.statements_to_fold);
-
+
gcc_assert (!id.debug_stmts);
/* Renumber the (code) basic_blocks consecutively. */
@@ -4515,14 +4515,14 @@ copy_decl_for_dup_finish (copy_body_data *id, tree decl, tree copy)
DECL_IGNORED_P (copy) = DECL_IGNORED_P (decl);
/* Set the DECL_ABSTRACT_ORIGIN so the debugging routines know what
- declaration inspired this copy. */
+ declaration inspired this copy. */
DECL_ABSTRACT_ORIGIN (copy) = DECL_ORIGIN (decl);
/* The new variable/label has no RTL, yet. */
if (CODE_CONTAINS_STRUCT (TREE_CODE (copy), TS_DECL_WRTL)
&& !TREE_STATIC (copy) && !DECL_EXTERNAL (copy))
SET_DECL_RTL (copy, NULL_RTX);
-
+
/* These args would always appear unused, if not for this. */
TREE_USED (copy) = 1;
@@ -4733,7 +4733,7 @@ delete_unreachable_blocks_update_callgraph (copy_body_data *id)
else
cgraph_remove_edge (e);
}
-
+
if (node->clones)
node = node->clones;
else if (node->next_sibling_clone)
@@ -4796,9 +4796,9 @@ update_clone_info (copy_body_data * id)
/* Create a copy of a function's tree.
OLD_DECL and NEW_DECL are FUNCTION_DECL tree nodes
of the original function and the new copied function
- respectively. In case we want to replace a DECL
- tree with another tree while duplicating the function's
- body, TREE_MAP represents the mapping between these
+ respectively. In case we want to replace a DECL
+ tree with another tree while duplicating the function's
+ body, TREE_MAP represents the mapping between these
trees. If UPDATE_CLONES is set, the call_stmt fields
of edges of clones of the function will be updated. */
void
@@ -4862,7 +4862,7 @@ tree_function_versioning (tree old_decl, tree new_decl,
old_transforms_to_apply,
i));
}
-
+
id.copy_decl = copy_decl_no_change;
id.transform_call_graph_edges
= update_clones ? CB_CGE_MOVE_CLONES : CB_CGE_MOVE;
@@ -4876,14 +4876,14 @@ tree_function_versioning (tree old_decl, tree new_decl,
initialize_cfun (new_decl, old_decl,
old_entry_block->count);
push_cfun (DECL_STRUCT_FUNCTION (new_decl));
-
+
/* Copy the function's static chain. */
p = DECL_STRUCT_FUNCTION (old_decl)->static_chain_decl;
if (p)
DECL_STRUCT_FUNCTION (new_decl)->static_chain_decl =
copy_static_chain (DECL_STRUCT_FUNCTION (old_decl)->static_chain_decl,
&id);
-
+
/* If there's a tree_map, prepare for substitution. */
if (tree_map)
for (i = 0; i < VEC_length (ipa_replace_map_p, tree_map); i++)
@@ -4898,7 +4898,7 @@ tree_function_versioning (tree old_decl, tree new_decl,
if (TREE_CODE (op) == VIEW_CONVERT_EXPR)
op = TREE_OPERAND (op, 0);
-
+
if (TREE_CODE (op) == ADDR_EXPR)
{
op = TREE_OPERAND (op, 0);
@@ -4921,12 +4921,12 @@ tree_function_versioning (tree old_decl, tree new_decl,
DECL_ARGUMENTS (new_decl) =
copy_arguments_for_versioning (DECL_ARGUMENTS (old_decl), &id,
args_to_skip, &vars);
-
+
DECL_INITIAL (new_decl) = remap_blocks (DECL_INITIAL (id.src_fn), &id);
-
+
/* Renumber the lexical scoping (non-code) blocks consecutively. */
number_blocks (id.dst_fn);
-
+
declare_inline_vars (DECL_INITIAL (new_decl), vars);
if (DECL_STRUCT_FUNCTION (old_decl)->local_decls != NULL_TREE)
@@ -4942,18 +4942,18 @@ tree_function_versioning (tree old_decl, tree new_decl,
tree_cons (NULL_TREE, remap_decl (var, &id),
cfun->local_decls);
}
-
+
/* Copy the Function's body. */
copy_body (&id, old_entry_block->count, REG_BR_PROB_BASE,
ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR);
-
+
if (DECL_RESULT (old_decl) != NULL_TREE)
{
tree *res_decl = &DECL_RESULT (old_decl);
DECL_RESULT (new_decl) = remap_decl (*res_decl, &id);
lang_hooks.dup_lang_specific_decl (DECL_RESULT (new_decl));
}
-
+
/* Renumber the lexical scoping (non-code) blocks consecutively. */
number_blocks (new_decl);
diff --git a/gcc/tree-into-ssa.c b/gcc/tree-into-ssa.c
index 8672a5e6317..d6f659c0624 100644
--- a/gcc/tree-into-ssa.c
+++ b/gcc/tree-into-ssa.c
@@ -762,7 +762,7 @@ mark_def_sites (basic_block bb, gimple stmt, bitmap kills)
set_livein_block (sym, bb);
set_rewrite_uses (stmt, true);
}
-
+
/* Now process the defs. Mark BB as the definition block and add
each def to the set of killed symbols. */
FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
@@ -866,7 +866,7 @@ prune_unused_phi_nodes (bitmap phis, bitmap kills, bitmap uses)
then for each of them find the nearest def that dominates them. If this
def is a phi node, we mark it live, and if it was not live before, we
add the predecessors of its basic block to the worklist.
-
+
To quickly locate the nearest def that dominates use, we use dfs numbering
of the dominance tree (that is already available in order to speed up
queries). For each def, we have the interval given by the dfs number on
@@ -1154,7 +1154,7 @@ insert_phi_nodes (bitmap *dfs)
tree var;
timevar_push (TV_TREE_INSERT_PHI_NODES);
-
+
FOR_EACH_REFERENCED_VAR (var, rvi)
{
struct def_blocks_d *def_map;
@@ -1183,7 +1183,7 @@ static void
register_new_def (tree def, tree sym)
{
tree currdef;
-
+
/* If this variable is set in a single basic block and all uses are
dominated by the set(s) in that single basic block, then there is
no reason to record anything for this variable in the block local
@@ -1230,7 +1230,7 @@ register_new_def (tree def, tree sym)
2- Every statement in BB is rewritten. USE and VUSE operands are
rewritten with their corresponding reaching definition. DEF and
VDEF targets are registered as new definitions.
-
+
3- All the PHI nodes in successor blocks of BB are visited. The
argument corresponding to BB is replaced with its current reaching
definition.
@@ -1250,7 +1250,7 @@ static tree
get_reaching_def (tree var)
{
tree currdef;
-
+
/* Lookup the current reaching definition for VAR. */
currdef = get_current_def (var);
@@ -1439,7 +1439,7 @@ rewrite_leave_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
saved_def = NULL;
var = tmp;
}
-
+
set_current_def (var, saved_def);
}
}
@@ -1500,7 +1500,7 @@ dump_defs_stack (FILE *file, int n)
for (j = (int) VEC_length (tree, block_defs_stack) - 1; j >= 0; j--)
{
tree name, var;
-
+
name = VEC_index (tree, block_defs_stack, j);
if (name == NULL_TREE)
{
@@ -1693,7 +1693,7 @@ debug_def_blocks_r (void **slot, void *data)
{
FILE *file = (FILE *) data;
struct def_blocks_d *db_p = (struct def_blocks_d *) *slot;
-
+
fprintf (file, "VAR: ");
print_generic_expr (file, db_p->var, dump_flags);
bitmap_print (file, db_p->def_blocks, ", DEF_BLOCKS: { ", "}");
@@ -1956,7 +1956,7 @@ rewrite_update_phi_arguments (basic_block bb)
if (!bitmap_bit_p (blocks_with_phis_to_rewrite, e->dest->index))
continue;
-
+
phis = VEC_index (gimple_vec, phis_to_rewrite, e->dest->index);
for (i = 0; VEC_iterate (gimple, phis, i, phi); i++)
{
@@ -2001,9 +2001,9 @@ rewrite_update_phi_arguments (basic_block bb)
SET_USE (arg_p, reaching_def);
stmt = SSA_NAME_DEF_STMT (reaching_def);
- /* Single element PHI nodes behave like copies, so get the
+ /* Single element PHI nodes behave like copies, so get the
location from the phi argument. */
- if (gimple_code (stmt) == GIMPLE_PHI &&
+ if (gimple_code (stmt) == GIMPLE_PHI &&
gimple_phi_num_args (stmt) == 1)
locus = gimple_phi_arg_location (stmt, 0);
else
@@ -2066,7 +2066,7 @@ rewrite_update_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
if (!register_defs_p (phi))
continue;
-
+
lhs = gimple_phi_result (phi);
lhs_sym = SSA_NAME_VAR (lhs);
@@ -2079,7 +2079,7 @@ rewrite_update_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
the names replaced by LHS. */
if (is_new_name (lhs))
register_new_update_set (lhs, names_replaced_by (lhs));
-
+
/* If LHS is an OLD name, register it as a new definition
for itself. */
if (is_old_name (lhs))
@@ -2116,7 +2116,7 @@ rewrite_update_leave_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
{
tree var = VEC_pop (tree, block_defs_stack);
tree saved_def;
-
+
/* NULL indicates the unwind stop point for this block (see
rewrite_update_enter_block). */
if (var == NULL)
@@ -2129,7 +2129,7 @@ rewrite_update_leave_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
/* Rewrite the actual blocks, statements, and PHI arguments, to be in SSA
- form.
+ form.
ENTRY indicates the block where to start. Every block dominated by
ENTRY will be rewritten.
@@ -2146,7 +2146,7 @@ static void
rewrite_blocks (basic_block entry, enum rewrite_mode what)
{
struct dom_walk_data walk_data;
-
+
/* Rewrite all the basic blocks in the program. */
timevar_push (TV_TREE_SSA_REWRITE_BLOCKS);
@@ -2187,7 +2187,7 @@ rewrite_blocks (basic_block entry, enum rewrite_mode what)
if (def_blocks)
dump_tree_ssa_stats (dump_file);
}
-
+
VEC_free (tree, heap, block_defs_stack);
timevar_pop (TV_TREE_SSA_REWRITE_BLOCKS);
@@ -2312,7 +2312,7 @@ rewrite_into_ssa (void)
{
bitmap *dfs;
basic_block bb;
-
+
timevar_push (TV_TREE_SSA_OTHER);
/* Initialize operand data structures. */
@@ -2359,7 +2359,7 @@ rewrite_into_ssa (void)
}
-struct gimple_opt_pass pass_build_ssa =
+struct gimple_opt_pass pass_build_ssa =
{
{
GIMPLE_PASS,
@@ -2505,7 +2505,7 @@ prepare_block_for_update (basic_block bb, bool insert_phi_p)
ssa_op_iter i;
use_operand_p use_p;
def_operand_p def_p;
-
+
stmt = gsi_stmt (si);
FOR_EACH_SSA_USE_OPERAND (use_p, stmt, i, SSA_OP_ALL_USES)
@@ -2916,7 +2916,7 @@ ssa_names_to_replace (void)
unsigned i = 0;
bitmap ret;
sbitmap_iterator sbi;
-
+
gcc_assert (update_ssa_initialized_fn == NULL
|| update_ssa_initialized_fn == cfun);
@@ -2948,7 +2948,7 @@ release_ssa_name_after_update_ssa (tree name)
This is slightly different than the regular PHI insertion
algorithm. The value of UPDATE_FLAGS controls how PHI nodes for
real names (i.e., GIMPLE registers) are inserted:
-
+
- If UPDATE_FLAGS == TODO_update_ssa, we are only interested in PHI
nodes inside the region affected by the block that defines VAR
and the blocks that define all its replacements. All these
diff --git a/gcc/tree-loop-distribution.c b/gcc/tree-loop-distribution.c
index b9b6ea3cd43..f9c96d3120e 100644
--- a/gcc/tree-loop-distribution.c
+++ b/gcc/tree-loop-distribution.c
@@ -4,17 +4,17 @@
and Sebastian Pop <sebastian.pop@amd.com>.
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -26,7 +26,7 @@ along with GCC; see the file COPYING3. If not see
| D(I) = A(I-1)*E
|ENDDO
- is transformed to
+ is transformed to
|DOALL I = 2, N
| A(I) = B(I) + C
@@ -855,7 +855,7 @@ free_rdg_components (VEC (rdgc, heap) *components)
of RDG in which the STARTING_VERTICES occur. */
static void
-rdg_build_components (struct graph *rdg, VEC (int, heap) *starting_vertices,
+rdg_build_components (struct graph *rdg, VEC (int, heap) *starting_vertices,
VEC (rdgc, heap) **components)
{
int i, v;
@@ -910,10 +910,10 @@ rdg_build_partitions (struct graph *rdg, VEC (rdgc, heap) *components,
bitmap np;
bool part_has_writes = false;
int v = VEC_index (int, x->vertices, 0);
-
+
if (bitmap_bit_p (processed, v))
continue;
-
+
np = build_rdg_partition_for_component (rdg, x, &part_has_writes,
other_stores);
bitmap_ior_into (partition, np);
diff --git a/gcc/tree-loop-linear.c b/gcc/tree-loop-linear.c
index cc2440dd41f..5461975d77a 100644
--- a/gcc/tree-loop-linear.c
+++ b/gcc/tree-loop-linear.c
@@ -48,7 +48,7 @@ along with GCC; see the file COPYING3. If not see
scaling, skewing, and reversal. They are used to change the
iteration order of loop nests in order to optimize data locality of
traversals, or remove dependences that prevent
- parallelization/vectorization/etc.
+ parallelization/vectorization/etc.
TODO: Determine reuse vectors/matrix and use it to determine optimal
transform matrix for locality purposes.
@@ -58,7 +58,7 @@ along with GCC; see the file COPYING3. If not see
considered. The first loop in the considered loop nest is
FIRST_LOOP, and consequently, the index of the considered loop is
obtained by LOOP->DEPTH - FIRST_LOOP->DEPTH
-
+
Initializes:
- DEPENDENCE_STEPS the sum of all the data dependence distances
carried by loop LOOP,
@@ -78,12 +78,12 @@ along with GCC; see the file COPYING3. If not see
| A[{0, +, 1336}_1]
| endloop_1
- gather_interchange_stats (in loop_1) will return
+ gather_interchange_stats (in loop_1) will return
DEPENDENCE_STEPS = 3002
NB_DEPS_NOT_CARRIED_BY_LOOP = 5
ACCESS_STRIDES = 10694
- gather_interchange_stats (in loop_2) will return
+ gather_interchange_stats (in loop_2) will return
DEPENDENCE_STEPS = 3000
NB_DEPS_NOT_CARRIED_BY_LOOP = 7
ACCESS_STRIDES = 8010
@@ -94,8 +94,8 @@ gather_interchange_stats (VEC (ddr_p, heap) *dependence_relations ATTRIBUTE_UNUS
VEC (data_reference_p, heap) *datarefs ATTRIBUTE_UNUSED,
struct loop *loop ATTRIBUTE_UNUSED,
struct loop *first_loop ATTRIBUTE_UNUSED,
- unsigned int *dependence_steps ATTRIBUTE_UNUSED,
- unsigned int *nb_deps_not_carried_by_loop ATTRIBUTE_UNUSED,
+ unsigned int *dependence_steps ATTRIBUTE_UNUSED,
+ unsigned int *nb_deps_not_carried_by_loop ATTRIBUTE_UNUSED,
double_int *access_strides ATTRIBUTE_UNUSED)
{
unsigned int i, j;
@@ -140,11 +140,11 @@ gather_interchange_stats (VEC (ddr_p, heap) *dependence_relations ATTRIBUTE_UNUS
struct loop *stmt_loop = loop_containing_stmt (stmt);
struct loop *inner_loop = first_loop->inner;
- if (inner_loop != stmt_loop
+ if (inner_loop != stmt_loop
&& !flow_loop_nested_p (inner_loop, stmt_loop))
continue;
- for (it = 0; it < DR_NUM_DIMENSIONS (dr);
+ for (it = 0; it < DR_NUM_DIMENSIONS (dr);
it++, ref = TREE_OPERAND (ref, 0))
{
int num = am_vector_index_for_loop (DR_ACCESS_MATRIX (dr), loop->num);
@@ -152,11 +152,11 @@ gather_interchange_stats (VEC (ddr_p, heap) *dependence_relations ATTRIBUTE_UNUS
tree array_size = TYPE_SIZE (TREE_TYPE (ref));
double_int dstride;
- if (array_size == NULL_TREE
+ if (array_size == NULL_TREE
|| TREE_CODE (array_size) != INTEGER_CST)
continue;
- dstride = double_int_mul (tree_to_double_int (array_size),
+ dstride = double_int_mul (tree_to_double_int (array_size),
shwi_to_double_int (istride));
(*access_strides) = double_int_add (*access_strides, dstride);
}
@@ -164,7 +164,7 @@ gather_interchange_stats (VEC (ddr_p, heap) *dependence_relations ATTRIBUTE_UNUS
}
/* Attempt to apply interchange transformations to TRANS to maximize the
- spatial and temporal locality of the loop.
+ spatial and temporal locality of the loop.
Returns the new transform matrix. The smaller the reuse vector
distances in the inner loops, the fewer the cache misses.
FIRST_LOOP is the loop->num of the first loop in the analyzed loop
@@ -172,8 +172,8 @@ gather_interchange_stats (VEC (ddr_p, heap) *dependence_relations ATTRIBUTE_UNUS
static lambda_trans_matrix
-try_interchange_loops (lambda_trans_matrix trans,
- unsigned int depth,
+try_interchange_loops (lambda_trans_matrix trans,
+ unsigned int depth,
VEC (ddr_p, heap) *dependence_relations,
VEC (data_reference_p, heap) *datarefs,
struct loop *first_loop)
@@ -202,24 +202,24 @@ try_interchange_loops (lambda_trans_matrix trans,
l2_cache_size = uhwi_to_double_int (L2_CACHE_SIZE * 1024);
/* LOOP_I is always the outer loop. */
- for (loop_j = first_loop->inner;
- loop_j;
+ for (loop_j = first_loop->inner;
+ loop_j;
loop_j = loop_j->inner)
- for (loop_i = first_loop;
- loop_depth (loop_i) < loop_depth (loop_j);
+ for (loop_i = first_loop;
+ loop_depth (loop_i) < loop_depth (loop_j);
loop_i = loop_i->inner)
{
gather_interchange_stats (dependence_relations, datarefs,
loop_i, first_loop,
- &dependence_steps_i,
+ &dependence_steps_i,
&nb_deps_not_carried_by_i,
&access_strides_i);
gather_interchange_stats (dependence_relations, datarefs,
loop_j, first_loop,
- &dependence_steps_j,
- &nb_deps_not_carried_by_j,
+ &dependence_steps_j,
+ &nb_deps_not_carried_by_j,
&access_strides_j);
-
+
/* Heuristics for loop interchange profitability:
0. Don't transform if the smallest stride is larger than
@@ -251,7 +251,7 @@ try_interchange_loops (lambda_trans_matrix trans,
if (res && double_int_ucmp (large, l1_cache_size) < 0)
continue;
- if (dependence_steps_i < dependence_steps_j
+ if (dependence_steps_i < dependence_steps_j
|| nb_deps_not_carried_by_i > nb_deps_not_carried_by_j
|| cmp < 0)
{
@@ -261,8 +261,8 @@ try_interchange_loops (lambda_trans_matrix trans,
/* Validate the resulting matrix. When the transformation
is not valid, reverse to the previous transformation. */
if (!lambda_transform_legal_p (trans, depth, dependence_relations))
- lambda_matrix_row_exchange (LTM_MATRIX (trans),
- loop_depth (loop_i) - loop_depth (first_loop),
+ lambda_matrix_row_exchange (LTM_MATRIX (trans),
+ loop_depth (loop_i) - loop_depth (first_loop),
loop_depth (loop_j) - loop_depth (first_loop));
}
}
@@ -330,7 +330,7 @@ linear_transform_loops (void)
unsigned int depth = 0;
VEC (ddr_p, heap) *dependence_relations;
VEC (data_reference_p, heap) *datarefs;
-
+
lambda_loopnest before, after;
lambda_trans_matrix trans;
struct obstack lambda_obstack;
@@ -355,7 +355,7 @@ linear_transform_loops (void)
if (!compute_data_dependences_for_loop (loop_nest, true, &datarefs,
&dependence_relations))
goto free_and_continue;
-
+
lambda_collect_parameters (datarefs, &lambda_parameters);
if (!lambda_compute_access_matrices (datarefs, lambda_parameters, nest))
goto free_and_continue;
@@ -395,7 +395,7 @@ linear_transform_loops (void)
fprintf (dump_file, "Before:\n");
print_lambda_loopnest (dump_file, before, 'i');
}
-
+
after = lambda_loopnest_transform (before, trans, &lambda_obstack);
if (dump_file)
diff --git a/gcc/tree-mudflap.c b/gcc/tree-mudflap.c
index 381b029d45f..590f7c358d9 100644
--- a/gcc/tree-mudflap.c
+++ b/gcc/tree-mudflap.c
@@ -419,8 +419,8 @@ mudflap_init (void)
/* This is the second part of the mudflap instrumentation. It works on
low-level GIMPLE using the CFG, because we want to run this pass after
tree optimizations have been performed, but we have to preserve the CFG
- for expansion from trees to RTL.
- Below is the list of transformations performed on statements in the
+ for expansion from trees to RTL.
+ Below is the list of transformations performed on statements in the
current function.
1) Memory reference transforms: Perform the mudflap indirection-related
@@ -787,7 +787,7 @@ mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp,
is necessary. Or we may have an innocent "a.b.c"
expression that must not be instrumented. We need to
recurse all the way down the nesting structure to figure it
- out: looking just at the outer node is not enough. */
+ out: looking just at the outer node is not enough. */
tree var;
int component_ref_only = (TREE_CODE (t) == COMPONENT_REF);
/* If we have a bitfield component reference, we must note the
@@ -806,7 +806,7 @@ mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp,
&& (TREE_CODE (var) == ARRAY_REF
|| TREE_CODE (var) == COMPONENT_REF))
elt = var;
-
+
if (TREE_CODE (var) == ARRAY_REF)
{
component_ref_only = 0;
@@ -826,9 +826,9 @@ mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp,
&& TREE_CODE (var) != STRING_CST)
return;
}
- else
+ else
{
- gcc_assert (TREE_CODE (var) == VAR_DECL
+ gcc_assert (TREE_CODE (var) == VAR_DECL
|| TREE_CODE (var) == PARM_DECL
|| TREE_CODE (var) == RESULT_DECL
|| TREE_CODE (var) == STRING_CST);
@@ -862,7 +862,7 @@ mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp,
if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST)
size = DECL_SIZE_UNIT (field);
-
+
if (elt)
elt = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (elt)),
elt);
@@ -950,9 +950,9 @@ mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp,
mf_build_check_statement_for (base, limit, iter, location, dirflag);
}
-/* Transform
- 1) Memory references.
- 2) BUILTIN_ALLOCA calls.
+/* Transform
+ 1) Memory references.
+ 2) BUILTIN_ALLOCA calls.
*/
static void
mf_xform_statements (void)
@@ -996,11 +996,11 @@ mf_xform_statements (void)
case GIMPLE_CALL:
{
tree fndecl = gimple_call_fndecl (s);
- if (fndecl && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA))
+ if (fndecl && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA))
gimple_call_set_cannot_inline (s, true);
}
break;
-
+
default:
;
}
@@ -1060,7 +1060,7 @@ mx_register_decls (tree decl, gimple_seq seq, location_t location)
while (decl != NULL_TREE)
{
- if (mf_decl_eligible_p (decl)
+ if (mf_decl_eligible_p (decl)
/* Not already processed. */
&& ! mf_marked_p (decl)
/* Automatic variable. */
@@ -1075,7 +1075,7 @@ mx_register_decls (tree decl, gimple_seq seq, location_t location)
gimplified when we got here. */
size = convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (decl)));
gcc_assert (is_gimple_val (size));
-
+
unregister_fncall_param =
mf_mark (build1 (ADDR_EXPR,
@@ -1100,7 +1100,7 @@ mx_register_decls (tree decl, gimple_seq seq, location_t location)
size,
build_int_cst (NULL_TREE, 3),
variable_name);
-
+
/* Accumulate the two calls. */
gimple_set_location (register_fncall, location);
@@ -1263,7 +1263,7 @@ mudflap_register_call (tree obj, tree object_size, tree varname)
arg,
convert (size_type_node, object_size),
/* __MF_TYPE_STATIC */
- build_int_cst (NULL_TREE, 4),
+ build_int_cst (NULL_TREE, 4),
varname);
append_to_statement_list (call_stmt, &enqueued_call_stmt_chain);
@@ -1324,7 +1324,7 @@ mudflap_finish_file (void)
tree call2_stmt = build_call_expr (mf_init_fndecl, 0);
append_to_statement_list (call2_stmt, &ctor_statements);
}
-
+
/* If appropriate, call __mf_set_options to pass along read-ignore mode. */
if (flag_mudflap_ignore_reads)
{
@@ -1359,8 +1359,8 @@ mudflap_finish_file (void)
DECL_NAME (obj));
continue;
}
-
- mudflap_register_call (obj,
+
+ mudflap_register_call (obj,
size_in_bytes (TREE_TYPE (obj)),
mf_varname_tree (obj));
}
@@ -1375,7 +1375,7 @@ mudflap_finish_file (void)
enqueued_call_stmt_chain = NULL_TREE;
}
- cgraph_build_static_cdtor ('I', ctor_statements,
+ cgraph_build_static_cdtor ('I', ctor_statements,
MAX_RESERVED_INIT_PRIORITY-1);
}
@@ -1386,7 +1386,7 @@ gate_mudflap (void)
return flag_mudflap != 0;
}
-struct gimple_opt_pass pass_mudflap_1 =
+struct gimple_opt_pass pass_mudflap_1 =
{
{
GIMPLE_PASS,
@@ -1405,7 +1405,7 @@ struct gimple_opt_pass pass_mudflap_1 =
}
};
-struct gimple_opt_pass pass_mudflap_2 =
+struct gimple_opt_pass pass_mudflap_2 =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-nested.c b/gcc/tree-nested.c
index 0b5e7321b16..fb95088b648 100644
--- a/gcc/tree-nested.c
+++ b/gcc/tree-nested.c
@@ -43,21 +43,21 @@
nonlocal references. We want to do this sooner rather than later, in
order to give us more freedom in emitting all of the functions in question.
- Back in olden times, when gcc was young, we developed an insanely
+ Back in olden times, when gcc was young, we developed an insanely
complicated scheme whereby variables which were referenced nonlocally
were forced to live in the stack of the declaring function, and then
the nested functions magically discovered where these variables were
placed. In order for this scheme to function properly, it required
- that the outer function be partially expanded, then we switch to
+ that the outer function be partially expanded, then we switch to
compiling the inner function, and once done with those we switch back
to compiling the outer function. Such delicate ordering requirements
- makes it difficult to do whole translation unit optimizations
+ makes it difficult to do whole translation unit optimizations
involving such functions.
The implementation here is much more direct. Everything that can be
referenced by an inner function is a member of an explicitly created
structure herein called the "nonlocal frame struct". The incoming
- static chain for a nested function is a pointer to this struct in
+ static chain for a nested function is a pointer to this struct in
the parent. In this way, we settle on known offsets from a known
base, and so are decoupled from the logic that places objects in the
function's stack frame. More importantly, we don't have to wait for
@@ -66,8 +66,8 @@
allocated anywhere. Which means that the outer function is now
inlinable.
- Theory of operation here is very simple. Iterate over all the
- statements in all the functions (depth first) several times,
+ Theory of operation here is very simple. Iterate over all the
+ statements in all the functions (depth first) several times,
allocating structures and fields on demand. In general we want to
examine inner functions first, so that we can avoid making changes
to outer functions which are unnecessary.
@@ -83,7 +83,7 @@ struct nesting_info
struct nesting_info *outer;
struct nesting_info *inner;
struct nesting_info *next;
-
+
struct pointer_map_t *field_map;
struct pointer_map_t *var_map;
bitmap suppress_expansion;
@@ -335,7 +335,7 @@ get_chain_decl (struct nesting_info *info)
the construction of this variable is handled specially in
expand_function_start and initialize_inlined_parameters.
Note also that it's represented as a parameter. This is more
- close to the truth, since the initial value does come from
+ close to the truth, since the initial value does come from
the caller. */
decl = build_decl (DECL_SOURCE_LOCATION (info->context),
PARM_DECL, create_tmp_var_name ("CHAIN"), type);
@@ -413,7 +413,7 @@ init_tmp_var_with_call (struct nesting_info *info, gimple_stmt_iterator *gsi,
return t;
}
-
+
/* Copy EXP into a temporary. Allocate the temporary in the context of
INFO and insert the initialization statement before GSI. */
@@ -533,7 +533,7 @@ lookup_tramp_for_decl (struct nesting_info *info, tree decl,
}
return (tree) *slot;
-}
+}
/* Build or return the field within the non-local frame state that holds
the non-local goto "jmp_buf". The buffer itself is maintained by the
@@ -657,7 +657,7 @@ walk_gimple_omp_for (gimple for_stmt,
}
/* Similarly for ROOT and all functions nested underneath, depth first. */
-
+
static void
walk_all_functions (walk_stmt_fn callback_stmt, walk_tree_fn callback_op,
struct nesting_info *root)
@@ -811,7 +811,7 @@ static void note_nonlocal_vla_type (struct nesting_info *info, tree type);
/* A subroutine of convert_nonlocal_reference_op. Create a local variable
in the nested function with DECL_VALUE_EXPR set to reference the true
- variable in the parent function. This is used both for debug info
+ variable in the parent function. This is used both for debug info
and in OpenMP lowering. */
static tree
@@ -1445,7 +1445,7 @@ convert_local_reference_op (tree *tp, int *walk_subtrees, void *data)
/* Then the frame decl is now addressable. */
TREE_ADDRESSABLE (info->frame_decl) = 1;
-
+
save_context = current_function_decl;
current_function_decl = info->context;
recompute_tree_invariant_for_addr_expr (t);
@@ -1785,7 +1785,7 @@ convert_nl_goto_reference (gimple_stmt_iterator *gsi, bool *handled_ops_p,
}
else
new_label = (tree) *slot;
-
+
/* Build: __builtin_nl_goto(new_label, &chain->nl_goto_field). */
field = get_nl_goto_field (i);
x = get_frame_field (info, target_context, field, &wi->gsi);
@@ -2078,7 +2078,7 @@ convert_all_function_calls (struct nesting_info *root)
transformations can induce new uses of the static chain, which in turn
require re-examining all users of the decl. */
/* ??? It would make sense to try to use the call graph to speed this up,
- but the call graph hasn't really been built yet. Even if it did, we
+ but the call graph hasn't really been built yet. Even if it did, we
would still need to iterate in this loop since address-of references
wouldn't show up in the callgraph anyway. */
iter_count = 0;
@@ -2292,7 +2292,7 @@ finalize_nesting_tree_1 (struct nesting_info *root)
gimple_seq_first_stmt (gimple_body (context)), true);
}
- /* If any parameters were referenced non-locally, then we need to
+ /* If any parameters were referenced non-locally, then we need to
insert a copy. Likewise, if any variables were referenced by
pointer, we need to initialize the address. */
if (root->any_parm_remapped)
diff --git a/gcc/tree-nomudflap.c b/gcc/tree-nomudflap.c
index 96b58f37c5a..9e9d624bb02 100644
--- a/gcc/tree-nomudflap.c
+++ b/gcc/tree-nomudflap.c
@@ -89,7 +89,7 @@ gate_mudflap (void)
return flag_mudflap != 0;
}
-struct gimple_opt_pass pass_mudflap_1 =
+struct gimple_opt_pass pass_mudflap_1 =
{
{
GIMPLE_PASS,
@@ -108,7 +108,7 @@ struct gimple_opt_pass pass_mudflap_1 =
}
};
-struct gimple_opt_pass pass_mudflap_2 =
+struct gimple_opt_pass pass_mudflap_2 =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-nrv.c b/gcc/tree-nrv.c
index c1e9d605679..defa8def89c 100644
--- a/gcc/tree-nrv.c
+++ b/gcc/tree-nrv.c
@@ -44,7 +44,7 @@ along with GCC; see the file COPYING3. If not see
That copy can often be avoided by directly constructing the return value
into the final destination mandated by the target's ABI.
- This is basically a generic equivalent to the C++ front-end's
+ This is basically a generic equivalent to the C++ front-end's
Named Return Value optimization. */
struct nrv_data
@@ -104,7 +104,7 @@ finalize_nrv_r (tree *tp, int *walk_subtrees, void *data)
ever encounter languages which prevent this kind of optimization,
then we could either have the languages register the optimization or
we could change the gating function to check the current language. */
-
+
static unsigned int
tree_nrv (void)
{
@@ -184,7 +184,7 @@ tree_nrv (void)
else if (gimple_has_lhs (stmt))
{
tree addr = get_base_address (gimple_get_lhs (stmt));
- /* If there's any MODIFY of component of RESULT,
+ /* If there's any MODIFY of component of RESULT,
then bail out. */
if (addr && addr == result)
return 0;
@@ -262,7 +262,7 @@ gate_pass_return_slot (void)
return optimize > 0;
}
-struct gimple_opt_pass pass_nrv =
+struct gimple_opt_pass pass_nrv =
{
{
GIMPLE_PASS,
@@ -349,7 +349,7 @@ execute_return_slot_opt (void)
return 0;
}
-struct gimple_opt_pass pass_return_slot =
+struct gimple_opt_pass pass_return_slot =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c
index 443f2808c2d..135342d58b0 100644
--- a/gcc/tree-object-size.c
+++ b/gcc/tree-object-size.c
@@ -381,7 +381,7 @@ alloc_object_size (const_gimple call, int object_size_type)
if (TREE_CHAIN (p))
arg2 = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (p)))-1;
}
-
+
if (DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (callee))
{
@@ -397,10 +397,10 @@ alloc_object_size (const_gimple call, int object_size_type)
if (arg1 < 0 || arg1 >= (int)gimple_call_num_args (call)
|| TREE_CODE (gimple_call_arg (call, arg1)) != INTEGER_CST
- || (arg2 >= 0
+ || (arg2 >= 0
&& (arg2 >= (int)gimple_call_num_args (call)
|| TREE_CODE (gimple_call_arg (call, arg2)) != INTEGER_CST)))
- return unknown[object_size_type];
+ return unknown[object_size_type];
if (arg2 >= 0)
bytes = size_binop (MULT_EXPR,
@@ -1111,7 +1111,7 @@ check_for_plus_in_loops (struct object_size_info *osi, tree var)
{
tree basevar = gimple_assign_rhs1 (stmt);
tree cst = gimple_assign_rhs2 (stmt);
-
+
gcc_assert (TREE_CODE (cst) == INTEGER_CST);
if (integer_zerop (cst))
diff --git a/gcc/tree-optimize.c b/gcc/tree-optimize.c
index 778658a70b4..781cbeaf39a 100644
--- a/gcc/tree-optimize.c
+++ b/gcc/tree-optimize.c
@@ -57,7 +57,7 @@ static bool
gate_all_optimizations (void)
{
return (optimize >= 1
- /* Don't bother doing anything if the program has errors.
+ /* Don't bother doing anything if the program has errors.
We have to pass down the queue if we already went into SSA */
&& (!(errorcount || sorrycount) || gimple_in_ssa_p (cfun)));
}
@@ -272,7 +272,7 @@ execute_fixup_cfg (void)
if (decl
&& gimple_call_flags (stmt) & (ECF_CONST
- | ECF_PURE
+ | ECF_PURE
| ECF_LOOPING_CONST_OR_PURE))
{
if (gimple_in_ssa_p (cfun))
@@ -400,7 +400,7 @@ tree_rest_of_compilation (tree fndecl)
We haven't necessarily assigned RTL to all variables yet, so it's
not safe to try to expand expressions involving them. */
cfun->dont_save_pending_sizes_p = 1;
-
+
gimple_register_cfg_hooks ();
bitmap_obstack_initialize (&reg_obstack); /* FIXME, only at RTL generation*/
@@ -409,12 +409,12 @@ tree_rest_of_compilation (tree fndecl)
/* Perform all tree transforms and optimizations. */
execute_pass_list (all_passes);
-
+
bitmap_obstack_release (&reg_obstack);
/* Release the default bitmap obstack. */
bitmap_obstack_release (NULL);
-
+
set_cfun (NULL);
/* If requested, warn about function definitions where the function will
diff --git a/gcc/tree-outof-ssa.c b/gcc/tree-outof-ssa.c
index d3901c34f0e..a82cec824fd 100644
--- a/gcc/tree-outof-ssa.c
+++ b/gcc/tree-outof-ssa.c
@@ -45,22 +45,22 @@ DEF_VEC_ALLOC_I(source_location,heap);
edges represented as pairs of nodes.
The predecessor and successor list: Nodes are entered in pairs, where
- [0] ->PRED, [1]->SUCC. All the even indexes in the array represent
- predecessors, all the odd elements are successors.
-
+ [0] ->PRED, [1]->SUCC. All the even indexes in the array represent
+ predecessors, all the odd elements are successors.
+
Rationale:
- When implemented as bitmaps, very large programs SSA->Normal times were
+ When implemented as bitmaps, very large programs SSA->Normal times were
being dominated by clearing the interference graph.
- Typically this list of edges is extremely small since it only includes
- PHI results and uses from a single edge which have not coalesced with
+ Typically this list of edges is extremely small since it only includes
+ PHI results and uses from a single edge which have not coalesced with
each other. This means that no virtual PHI nodes are included, and
empirical evidence suggests that the number of edges rarely exceed
3, and in a bootstrap of GCC, the maximum size encountered was 7.
This also limits the number of possible nodes that are involved to
rarely more than 6, and in the bootstrap of gcc, the maximum number
of nodes encountered was 12. */
-
+
typedef struct _elim_graph {
/* Size of the elimination vectors. */
int size;
@@ -79,7 +79,7 @@ typedef struct _elim_graph {
/* Stack for visited nodes. */
VEC(int,heap) *stack;
-
+
/* The variable partition map. */
var_map map;
@@ -324,7 +324,7 @@ new_elim_graph (int size)
g->edge_list = VEC_alloc (int, heap, 20);
g->edge_locus = VEC_alloc (source_location, heap, 10);
g->stack = VEC_alloc (int, heap, 30);
-
+
g->visited = sbitmap_alloc (size);
return g;
@@ -371,7 +371,7 @@ elim_graph_size (elim_graph g)
/* Add NODE to graph G, if it doesn't exist already. */
-static inline void
+static inline void
elim_graph_add_node (elim_graph g, int node)
{
int x;
@@ -478,7 +478,7 @@ eliminate_build (elim_graph g)
gimple_stmt_iterator gsi;
clear_elim_graph (g);
-
+
for (gsi = gsi_start_phis (g->e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
@@ -521,7 +521,7 @@ eliminate_build (elim_graph g)
/* Push successors of T onto the elimination stack for G. */
-static void
+static void
elim_forward (elim_graph g, int T)
{
int S;
@@ -588,10 +588,10 @@ get_temp_reg (tree name)
return x;
}
-/* Insert required copies for T in graph G. Check for a strongly connected
+/* Insert required copies for T in graph G. Check for a strongly connected
region, and create a temporary to break the cycle if one is found. */
-static void
+static void
elim_create (elim_graph g, int T)
{
int P, S;
@@ -655,7 +655,7 @@ eliminate_phi (edge e, elim_graph g)
if (!TEST_BIT (g->visited, part))
elim_forward (g, part);
}
-
+
sbitmap_zero (g->visited);
while (VEC_length (int, g->stack) > 0)
{
@@ -680,7 +680,7 @@ eliminate_phi (edge e, elim_graph g)
}
-/* Remove each argument from PHI. If an arg was the last use of an SSA_NAME,
+/* Remove each argument from PHI. If an arg was the last use of an SSA_NAME,
check to see if this allows another PHI node to be removed. */
static void
@@ -746,7 +746,7 @@ eliminate_useless_phis (void)
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
tree arg = PHI_ARG_DEF (phi, i);
- if (TREE_CODE (arg) == SSA_NAME
+ if (TREE_CODE (arg) == SSA_NAME
&& is_gimple_reg (SSA_NAME_VAR (arg)))
{
fprintf (stderr, "Argument of PHI is not virtual (");
@@ -776,9 +776,9 @@ eliminate_useless_phis (void)
/* This function will rewrite the current program using the variable mapping
- found in MAP. If the replacement vector VALUES is provided, any
- occurrences of partitions with non-null entries in the vector will be
- replaced with the expression in the vector instead of its mapped
+ found in MAP. If the replacement vector VALUES is provided, any
+ occurrences of partitions with non-null entries in the vector will be
+ replaced with the expression in the vector instead of its mapped
variable. */
static void
@@ -924,7 +924,7 @@ maybe_renumber_stmts_bb (basic_block bb)
{
unsigned i = 0;
gimple_stmt_iterator gsi;
-
+
if (!bb->aux)
return;
bb->aux = NULL;
@@ -972,7 +972,7 @@ trivially_conflicts_p (basic_block bb, tree result, tree arg)
if (gimple_uid (defa) < gimple_uid (use_stmt))
return true;
}
-
+
return false;
}
@@ -1013,9 +1013,9 @@ insert_backedge_copies (void)
tree arg = gimple_phi_arg_def (phi, i);
edge e = gimple_phi_arg_edge (phi, i);
- /* If the argument is not an SSA_NAME, then we will need a
+ /* If the argument is not an SSA_NAME, then we will need a
constant initialization. If the argument is an SSA_NAME with
- a different underlying variable then a copy statement will be
+ a different underlying variable then a copy statement will be
needed. */
if ((e->flags & EDGE_DFS_BACK)
&& (TREE_CODE (arg) != SSA_NAME
@@ -1032,7 +1032,7 @@ insert_backedge_copies (void)
/* In theory the only way we ought to get back to the
start of a loop should be with a COND_EXPR or GOTO_EXPR.
- However, better safe than sorry.
+ However, better safe than sorry.
If the block ends with a control statement or
something that might throw, then we have to
insert this assignment before the last
@@ -1047,7 +1047,7 @@ insert_backedge_copies (void)
continue;
}
- /* Create a new instance of the underlying variable of the
+ /* Create a new instance of the underlying variable of the
PHI result. */
stmt = gimple_build_assign (result_var,
gimple_phi_arg_def (phi, i));
@@ -1056,7 +1056,7 @@ insert_backedge_copies (void)
/* copy location if present. */
if (gimple_phi_arg_has_location (phi, i))
- gimple_set_location (stmt,
+ gimple_set_location (stmt,
gimple_phi_arg_location (phi, i));
/* Insert the new statement into the block and update
diff --git a/gcc/tree-parloops.c b/gcc/tree-parloops.c
index 61e372a54c3..56b88a89ad0 100644
--- a/gcc/tree-parloops.c
+++ b/gcc/tree-parloops.c
@@ -42,7 +42,7 @@ along with GCC; see the file COPYING3. If not see
conditions regarding profitability and correctness are satisfied), we
add GIMPLE_OMP_PARALLEL and GIMPLE_OMP_FOR codes and let omp expansion
machinery do its job.
-
+
The most of the complexity is in bringing the code into shape expected
by the omp expanders:
-- for GIMPLE_OMP_FOR, ensuring that the loop has only one induction
@@ -61,12 +61,12 @@ along with GCC; see the file COPYING3. If not see
-- handling of common scalar dependence patterns (accumulation, ...)
-- handling of non-innermost loops */
-/*
+/*
Reduction handling:
currently we use vect_is_simple_reduction() to detect reduction patterns.
The code transformation will be introduced by an example.
-
-
+
+
parloop
{
int sum=1;
@@ -108,8 +108,8 @@ parloop
# Storing the initial value given by the user. #
.paral_data_store.32.sum.27 = 1;
-
- #pragma omp parallel num_threads(4)
+
+ #pragma omp parallel num_threads(4)
#pragma omp for schedule(static)
@@ -126,23 +126,23 @@ parloop
# Adding this reduction phi is done at create_phi_for_local_result() #
# sum.27_56 = PHI <sum.27_11, 0>
GIMPLE_OMP_RETURN
-
- # Creating the atomic operation is done at
+
+ # Creating the atomic operation is done at
create_call_for_reduction_1() #
#pragma omp atomic_load
D.1839_59 = *&.paral_data_load.33_51->reduction.23;
D.1840_60 = sum.27_56 + D.1839_59;
#pragma omp atomic_store (D.1840_60);
-
+
GIMPLE_OMP_RETURN
-
+
# collecting the result after the join of the threads is done at
create_loads_for_reductions().
The value computed by the threads is loaded from the
shared struct. #
-
+
.paral_data_load.33_52 = &.paral_data_store.32;
sum_37 = .paral_data_load.33_52->sum.27;
sum_43 = D.1795_41 + sum_37;
@@ -161,19 +161,19 @@ parloop
thread. */
#define MIN_PER_THREAD 100
-/* Element of the hashtable, representing a
+/* Element of the hashtable, representing a
reduction in the current loop. */
struct reduction_info
{
gimple reduc_stmt; /* reduction statement. */
gimple reduc_phi; /* The phi node defining the reduction. */
enum tree_code reduction_code;/* code for the reduction operation. */
- gimple keep_res; /* The PHI_RESULT of this phi is the resulting value
+ gimple keep_res; /* The PHI_RESULT of this phi is the resulting value
of the reduction variable when existing the loop. */
tree initial_value; /* The initial value of the reduction var before entering the loop. */
tree field; /* the name of the field in the parloop data structure intended for reduction. */
tree init; /* reduction initialization value. */
- gimple new_phi; /* (helper field) Newly created phi node whose result
+ gimple new_phi; /* (helper field) Newly created phi node whose result
will be passed to the atomic operation. Represents
the local result each thread computed for the reduction
operation. */
@@ -259,7 +259,7 @@ loop_parallel_p (struct loop *loop)
fprintf (dump_file, "Considering loop %d\n", loop->num);
if (!loop->inner)
fprintf (dump_file, "loop is innermost\n");
- else
+ else
fprintf (dump_file, "loop NOT innermost\n");
}
@@ -376,7 +376,7 @@ take_address_of (tree obj, tree type, edge entry, htab_t decl_address)
}
/* Callback for htab_traverse. Create the initialization statement
- for reduction described in SLOT, and place it at the preheader of
+ for reduction described in SLOT, and place it at the preheader of
the loop described in DATA. */
static int
@@ -389,10 +389,10 @@ initialize_reductions (void **slot, void *data)
struct reduction_info *const reduc = (struct reduction_info *) *slot;
struct loop *loop = (struct loop *) data;
- /* Create initialization in preheader:
+ /* Create initialization in preheader:
reduction_variable = initialization value of reduction. */
- /* In the phi node at the header, replace the argument coming
+ /* In the phi node at the header, replace the argument coming
from the preheader with the reduction initialization value. */
/* Create a new variable to initialize the reduction. */
@@ -408,12 +408,12 @@ initialize_reductions (void **slot, void *data)
init = omp_reduction_init (c, TREE_TYPE (bvar));
reduc->init = init;
- /* Replace the argument representing the initialization value
- with the initialization value for the reduction (neutral
- element for the particular operation, e.g. 0 for PLUS_EXPR,
- 1 for MULT_EXPR, etc).
- Keep the old value in a new variable "reduction_initial",
- that will be taken in consideration after the parallel
+ /* Replace the argument representing the initialization value
+ with the initialization value for the reduction (neutral
+ element for the particular operation, e.g. 0 for PLUS_EXPR,
+ 1 for MULT_EXPR, etc).
+ Keep the old value in a new variable "reduction_initial",
+ that will be taken in consideration after the parallel
computing is done. */
e = loop_preheader_edge (loop);
@@ -526,10 +526,10 @@ eliminate_local_variables_stmt (edge entry, gimple stmt,
/* Eliminates the references to local variables from the single entry
single exit region between the ENTRY and EXIT edges.
-
+
This includes:
- 1) Taking address of a local variable -- these are moved out of the
- region (and temporary variable is created to hold the address if
+ 1) Taking address of a local variable -- these are moved out of the
+ region (and temporary variable is created to hold the address if
necessary).
2) Dereferencing a local variable -- these are replaced with indirect
@@ -589,7 +589,7 @@ expr_invariant_in_region_p (edge entry, edge exit, tree expr)
/* If COPY_NAME_P is true, creates and returns a duplicate of NAME.
The copies are stored to NAME_COPIES, if NAME was already duplicated,
its duplicate stored in NAME_COPIES is returned.
-
+
Regardless of COPY_NAME_P, the decl used as a base of the ssa name is also
duplicated, storing the copies in DECL_COPIES. */
@@ -757,7 +757,7 @@ separate_decls_in_region_debug_bind (gimple stmt,
static int
add_field_for_reduction (void **slot, void *data)
{
-
+
struct reduction_info *const red = (struct reduction_info *) *slot;
tree const type = (tree) data;
tree var = SSA_NAME_VAR (gimple_assign_lhs (red->reduc_stmt));
@@ -772,7 +772,7 @@ add_field_for_reduction (void **slot, void *data)
}
/* Callback for htab_traverse. Adds a field corresponding to a ssa name
- described in SLOT. The type is passed in DATA. */
+ described in SLOT. The type is passed in DATA. */
static int
add_field_for_name (void **slot, void *data)
@@ -790,12 +790,12 @@ add_field_for_name (void **slot, void *data)
return 1;
}
-/* Callback for htab_traverse. A local result is the intermediate result
- computed by a single
+/* Callback for htab_traverse. A local result is the intermediate result
+ computed by a single
thread, or the initial value in case no iteration was executed.
- This function creates a phi node reflecting these values.
- The phi's result will be stored in NEW_PHI field of the
- reduction's data structure. */
+ This function creates a phi node reflecting these values.
+ The phi's result will be stored in NEW_PHI field of the
+ reduction's data structure. */
static int
create_phi_for_local_result (void **slot, void *data)
@@ -808,16 +808,16 @@ create_phi_for_local_result (void **slot, void *data)
tree local_res;
source_location locus;
- /* STORE_BB is the block where the phi
- should be stored. It is the destination of the loop exit.
+ /* STORE_BB is the block where the phi
+ should be stored. It is the destination of the loop exit.
(Find the fallthru edge from GIMPLE_OMP_CONTINUE). */
store_bb = FALLTHRU_EDGE (loop->latch)->dest;
/* STORE_BB has two predecessors. One coming from the loop
(the reduction's result is computed at the loop),
- and another coming from a block preceding the loop,
- when no iterations
- are executed (the initial value should be taken). */
+ and another coming from a block preceding the loop,
+ when no iterations
+ are executed (the initial value should be taken). */
if (EDGE_PRED (store_bb, 0) == FALLTHRU_EDGE (loop->latch))
e = EDGE_PRED (store_bb, 1);
else
@@ -846,7 +846,7 @@ struct clsn_data
};
/* Callback for htab_traverse. Create an atomic instruction for the
- reduction described in SLOT.
+ reduction described in SLOT.
DATA annotates the place in memory the atomic operation relates to,
and the basic block it needs to be generated in. */
@@ -901,12 +901,12 @@ create_call_for_reduction_1 (void **slot, void *data)
return 1;
}
-/* Create the atomic operation at the join point of the threads.
- REDUCTION_LIST describes the reductions in the LOOP.
- LD_ST_DATA describes the shared data structure where
+/* Create the atomic operation at the join point of the threads.
+ REDUCTION_LIST describes the reductions in the LOOP.
+ LD_ST_DATA describes the shared data structure where
shared data is stored in and loaded from. */
static void
-create_call_for_reduction (struct loop *loop, htab_t reduction_list,
+create_call_for_reduction (struct loop *loop, htab_t reduction_list,
struct clsn_data *ld_st_data)
{
htab_traverse (reduction_list, create_phi_for_local_result, loop);
@@ -953,11 +953,11 @@ create_loads_for_reductions (void **slot, void *data)
gcc_unreachable ();
}
-/* Load the reduction result that was stored in LD_ST_DATA.
+/* Load the reduction result that was stored in LD_ST_DATA.
REDUCTION_LIST describes the list of reductions that the
loads should be generated for. */
static void
-create_final_loads_for_reduction (htab_t reduction_list,
+create_final_loads_for_reduction (htab_t reduction_list,
struct clsn_data *ld_st_data)
{
gimple_stmt_iterator gsi;
@@ -978,8 +978,8 @@ create_final_loads_for_reduction (htab_t reduction_list,
/* Callback for htab_traverse. Store the neutral value for the
particular reduction's operation, e.g. 0 for PLUS_EXPR,
1 for MULT_EXPR, etc. into the reduction field.
- The reduction is specified in SLOT. The store information is
- passed in DATA. */
+ The reduction is specified in SLOT. The store information is
+ passed in DATA. */
static int
create_stores_for_reduction (void **slot, void *data)
@@ -1035,7 +1035,7 @@ create_loads_and_stores_for_name (void **slot, void *data)
/* Moves all the variables used in LOOP and defined outside of it (including
the initial values of loop phi nodes, and *PER_THREAD if it is a ssa
name) to a structure created for this purpose. The code
-
+
while (1)
{
use (a);
@@ -1061,14 +1061,14 @@ create_loads_and_stores_for_name (void **slot, void *data)
pointer `new' is intentionally not initialized (the loop will be split to a
separate function later, and `new' will be initialized from its arguments).
LD_ST_DATA holds information about the shared data structure used to pass
- information among the threads. It is initialized here, and
- gen_parallel_loop will pass it to create_call_for_reduction that
- needs this information. REDUCTION_LIST describes the reductions
+ information among the threads. It is initialized here, and
+ gen_parallel_loop will pass it to create_call_for_reduction that
+ needs this information. REDUCTION_LIST describes the reductions
in LOOP. */
static void
separate_decls_in_region (edge entry, edge exit, htab_t reduction_list,
- tree *arg_struct, tree *new_arg_struct,
+ tree *arg_struct, tree *new_arg_struct,
struct clsn_data *ld_st_data)
{
@@ -1093,7 +1093,7 @@ separate_decls_in_region (edge entry, edge exit, htab_t reduction_list,
for (i = 0; VEC_iterate (basic_block, body, i, bb); i++)
{
- if (bb != entry_bb && bb != exit_bb)
+ if (bb != entry_bb && bb != exit_bb)
{
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
separate_decls_in_region_stmt (entry, exit, gsi_stmt (gsi),
@@ -1143,7 +1143,7 @@ separate_decls_in_region (edge entry, edge exit, htab_t reduction_list,
VEC_free (basic_block, heap, body);
- if (htab_elements (name_copies) == 0 && htab_elements (reduction_list) == 0)
+ if (htab_elements (name_copies) == 0 && htab_elements (reduction_list) == 0)
{
/* It may happen that there is nothing to copy (if there are only
loop carried and external variables in the loop). */
@@ -1167,7 +1167,7 @@ separate_decls_in_region (edge entry, edge exit, htab_t reduction_list,
type);
}
layout_type (type);
-
+
/* Create the loads and stores. */
*arg_struct = create_tmp_var (type, ".paral_data_store");
add_referenced_var (*arg_struct);
@@ -1188,7 +1188,7 @@ separate_decls_in_region (edge entry, edge exit, htab_t reduction_list,
if (reduction_list && htab_elements (reduction_list) > 0)
{
htab_traverse (reduction_list, create_stores_for_reduction,
- ld_st_data);
+ ld_st_data);
clsn_data.load = make_ssa_name (nvar, NULL);
clsn_data.load_bb = exit->dest;
clsn_data.store = ld_st_data->store;
@@ -1279,12 +1279,12 @@ create_loop_fn (void)
duplicates the part of the last iteration that gets disabled to the
exit of the loop. NIT is the number of iterations of the loop
(used to initialize the variables in the duplicated part).
-
+
TODO: the common case is that latch of the loop is empty and immediately
follows the loop exit. In this case, it would be better not to copy the
body of the loop, but only move the entry of the loop directly before the
exit check and increase the number of iterations of the loop by one.
- This may need some additional preconditioning in case NIT = ~0.
+ This may need some additional preconditioning in case NIT = ~0.
REDUCTION_LIST describes the reductions in LOOP. */
static void
@@ -1339,7 +1339,7 @@ transform_to_exit_first_loop (struct loop *loop, htab_t reduction_list, tree nit
ex_bb = nbbs[0];
free (nbbs);
- /* Other than reductions, the only gimple reg that should be copied
+ /* Other than reductions, the only gimple reg that should be copied
out of the loop is the control variable. */
control_name = NULL_TREE;
@@ -1354,13 +1354,13 @@ transform_to_exit_first_loop (struct loop *loop, htab_t reduction_list, tree nit
}
/* Check if it is a part of reduction. If it is,
- keep the phi at the reduction's keep_res field. The
- PHI_RESULT of this phi is the resulting value of the reduction
+ keep the phi at the reduction's keep_res field. The
+ PHI_RESULT of this phi is the resulting value of the reduction
variable when exiting the loop. */
exit = single_dom_exit (loop);
- if (htab_elements (reduction_list) > 0)
+ if (htab_elements (reduction_list) > 0)
{
struct reduction_info *red;
@@ -1380,10 +1380,10 @@ transform_to_exit_first_loop (struct loop *loop, htab_t reduction_list, tree nit
}
gcc_assert (control_name != NULL_TREE);
- /* Initialize the control variable to number of iterations
+ /* Initialize the control variable to number of iterations
according to the rhs of the exit condition. */
gsi = gsi_after_labels (ex_bb);
- cond_nit = last_stmt (exit->src);
+ cond_nit = last_stmt (exit->src);
nit_1 = gimple_cond_rhs (cond_nit);
nit_1 = force_gimple_operand_gsi (&gsi,
fold_convert (TREE_TYPE (control_name), nit_1),
@@ -1478,7 +1478,7 @@ create_parallel_loop (struct loop *loop, tree loop_fn, tree data,
stmt = SSA_NAME_DEF_STMT (PHI_ARG_DEF_FROM_EDGE (phi, exit));
def = PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop));
- locus = gimple_phi_arg_location_from_edge (stmt,
+ locus = gimple_phi_arg_location_from_edge (stmt,
loop_preheader_edge (loop));
add_phi_arg (phi, def, guard, locus);
@@ -1636,7 +1636,7 @@ gen_parallel_loop (struct loop *loop, htab_t reduction_list,
transform_to_exit_first_loop (loop, reduction_list, nit);
/* Generate initializations for reductions. */
- if (htab_elements (reduction_list) > 0)
+ if (htab_elements (reduction_list) > 0)
htab_traverse (reduction_list, initialize_reductions, loop);
/* Eliminate the references to local variables from the loop. */
@@ -1647,13 +1647,13 @@ gen_parallel_loop (struct loop *loop, htab_t reduction_list,
eliminate_local_variables (entry, exit);
/* In the old loop, move all variables non-local to the loop to a structure
and back, and create separate decls for the variables used in loop. */
- separate_decls_in_region (entry, exit, reduction_list, &arg_struct,
+ separate_decls_in_region (entry, exit, reduction_list, &arg_struct,
&new_arg_struct, &clsn_data);
/* Create the parallel constructs. */
parallel_head = create_parallel_loop (loop, create_loop_fn (), arg_struct,
new_arg_struct, n_threads);
- if (htab_elements (reduction_list) > 0)
+ if (htab_elements (reduction_list) > 0)
create_call_for_reduction (loop, reduction_list, &clsn_data);
scev_reset ();
@@ -1706,7 +1706,7 @@ build_new_reduction (htab_t reduction_list, gimple reduc_stmt, gimple phi)
struct reduction_info *new_reduction;
gcc_assert (reduc_stmt);
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file,
@@ -1714,9 +1714,9 @@ build_new_reduction (htab_t reduction_list, gimple reduc_stmt, gimple phi)
print_gimple_stmt (dump_file, reduc_stmt, 0, 0);
fprintf (dump_file, "\n");
}
-
+
new_reduction = XCNEW (struct reduction_info);
-
+
new_reduction->reduc_stmt = reduc_stmt;
new_reduction->reduc_phi = phi;
new_reduction->reduction_code = gimple_assign_rhs_code (reduc_stmt);
@@ -1791,7 +1791,7 @@ try_create_reduction_list (loop_p loop, htab_t reduction_list)
gather_scalar_reductions (loop, reduction_list);
-
+
for (gsi = gsi_start_phis (exit->dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
@@ -1907,7 +1907,7 @@ parallelize_loops (void)
else
fprintf (dump_file, "loop %d is innermost\n",loop->num);
}
-
+
/* If we use autopar in graphite pass, we use its marked dependency
checking results. */
if (flag_loop_parallelize_all && !loop->can_be_parallel)
@@ -1919,10 +1919,10 @@ parallelize_loops (void)
if (!single_dom_exit (loop))
{
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "loop is !single_dom_exit\n");
-
+
continue;
}
@@ -1941,7 +1941,7 @@ parallelize_loops (void)
/* Do not bother with loops in cold areas. */
|| optimize_loop_nest_for_size_p (loop)))
continue;
-
+
if (!try_get_loop_niter (loop, &niter_desc))
continue;
@@ -1959,8 +1959,8 @@ parallelize_loops (void)
fprintf (dump_file, "outer loop\n");
else
fprintf (dump_file, "inner loop\n");
- }
- gen_parallel_loop (loop, reduction_list,
+ }
+ gen_parallel_loop (loop, reduction_list,
n_threads, &niter_desc);
verify_flow_info ();
verify_dominators (CDI_DOMINATORS);
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index bff027b304c..1bff0bd52ce 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -71,7 +71,7 @@ enum tree_dump_index
#define TDF_DIAGNOSTIC (1 << 15) /* A dump to be put in a diagnostic
message. */
-#define TDF_VERBOSE (1 << 16) /* A dump that uses the full tree
+#define TDF_VERBOSE (1 << 16) /* A dump that uses the full tree
dumper to print stmts. */
#define TDF_RHS_ONLY (1 << 17) /* a flag to only print the RHS of
a gimple stmt. */
@@ -232,7 +232,7 @@ struct dump_file_info
/* To-do flags. */
#define TODO_dump_func (1 << 0)
#define TODO_ggc_collect (1 << 1)
-#define TODO_verify_ssa (1 << 2)
+#define TODO_verify_ssa (1 << 2)
#define TODO_verify_flow (1 << 3)
#define TODO_verify_stmts (1 << 4)
#define TODO_cleanup_cfg (1 << 5)
@@ -264,7 +264,7 @@ struct dump_file_info
IDF is done. This is used by passes that need the PHI nodes for
O_j even if it means that some arguments will come from the default
definition of O_j's symbol (e.g., pass_linear_transform).
-
+
WARNING: If you need to use this flag, chances are that your pass
may be doing something wrong. Inserting PHI nodes for an old name
where not all edges carry a new replacement may lead to silent
diff --git a/gcc/tree-phinodes.c b/gcc/tree-phinodes.c
index a48ae01fe8d..6e63ed4f8f6 100644
--- a/gcc/tree-phinodes.c
+++ b/gcc/tree-phinodes.c
@@ -353,7 +353,7 @@ reserve_phi_args_for_new_edge (basic_block bb)
/* Adds PHI to BB. */
-void
+void
add_phi_node_to_bb (gimple phi, basic_block bb)
{
gimple_stmt_iterator gsi;
@@ -441,7 +441,7 @@ remove_phi_arg_num (gimple phi, int i)
*(new_p->use) = *(old_p->use);
relink_imm_use (new_p, old_p);
/* Move the location as well. */
- gimple_phi_arg_set_location (phi, i,
+ gimple_phi_arg_set_location (phi, i,
gimple_phi_arg_location (phi, num_elem - 1));
}
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index 0ce35f5de86..78d45b88364 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -1,18 +1,18 @@
/* Predictive commoning.
Copyright (C) 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -29,7 +29,7 @@ along with GCC; see the file COPYING3. If not see
and if needed, we could also take register pressure into account.
Let us demonstrate what is done on an example:
-
+
for (i = 0; i < 100; i++)
{
a[i+2] = a[i] + a[i+1];
@@ -63,7 +63,7 @@ along with GCC; see the file COPYING3. If not see
making the further transformations simpler. Also, the shorter chains
need the same number of registers, but may require lower unrolling
factor in order to get rid of the copies on the loop latch.
-
+
In our example, we get the following chains (the chain for c is invalid).
a[i]{read,+0}, a[i+1]{read,-1}, a[i+2]{write,-2}
@@ -76,7 +76,7 @@ along with GCC; see the file COPYING3. If not see
with the smallest positive distance to the read. Then, we remove
the references that are not used in any of these chains, discard the
empty groups, and propagate all the links so that they point to the
- single root reference of the chain (adjusting their distance
+ single root reference of the chain (adjusting their distance
appropriately). Some extra care needs to be taken for references with
step 0. In our example (the numbers indicate the distance of the
reuse),
@@ -132,7 +132,7 @@ along with GCC; see the file COPYING3. If not see
times. The stores to RN (R0) in the copies of the loop body are
periodically replaced with R0, R1, ... (R1, R2, ...), so that they can
be coalesced and the copies can be eliminated.
-
+
TODO -- copy propagation and other optimizations may change the live
ranges of the temporary registers and prevent them from being coalesced;
this may increase the register pressure.
@@ -206,7 +206,7 @@ along with GCC; see the file COPYING3. If not see
references. */
#define MAX_DISTANCE (target_avail_regs < 16 ? 4 : 8)
-
+
/* Data references (or phi nodes that carry data reference values across
loop iterations). */
@@ -704,7 +704,7 @@ split_data_refs_to_components (struct loop *loop,
struct component *comp_list = NULL, *comp;
dref dataref;
basic_block last_always_executed = last_always_executed_block (loop);
-
+
for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
{
if (!DR_REF (dr))
@@ -754,7 +754,7 @@ split_data_refs_to_components (struct loop *loop,
&& (ia == bad || ib == bad
|| !determine_offset (dra, drb, &dummy_off)))
continue;
-
+
merge_comps (comp_father, comp_size, ia, ib);
}
@@ -808,7 +808,7 @@ end:
/* Returns true if the component COMP satisfies the conditions
described in 2) at the beginning of this file. LOOP is the current
loop. */
-
+
static bool
suitable_component_p (struct loop *loop, struct component *comp)
{
@@ -859,7 +859,7 @@ suitable_component_p (struct loop *loop, struct component *comp)
return true;
}
-
+
/* Check the conditions on references inside each of components COMPS,
and remove the unsuitable components from the list. The new list
of components is returned. The conditions are described in 2) at
@@ -1255,7 +1255,7 @@ replace_ref_with (gimple stmt, tree new_tree, bool set, bool in_lhs)
gsi_insert_before (&bsi, new_stmt, GSI_NEW_STMT);
return;
}
-
+
/* Since the reference is of gimple_reg type, it should only
appear as lhs or rhs of modify statement. */
gcc_assert (is_gimple_assign (stmt));
@@ -1275,7 +1275,7 @@ replace_ref_with (gimple stmt, tree new_tree, bool set, bool in_lhs)
if (in_lhs)
{
/* We have statement
-
+
OLD = VAL
If OLD is a memory reference, then VAL is gimple_val, and we transform
@@ -1284,7 +1284,7 @@ replace_ref_with (gimple stmt, tree new_tree, bool set, bool in_lhs)
OLD = VAL
NEW = VAL
- Otherwise, we are replacing a combination chain,
+ Otherwise, we are replacing a combination chain,
VAL is the expression that performs the combination, and OLD is an
SSA name. In this case, we transform the assignment to
@@ -1496,7 +1496,7 @@ initialize_root_vars (struct loop *loop, chain_p chain, bitmap tmp_vars)
}
if (reuse_first)
VEC_quick_push (tree, chain->vars, VEC_index (tree, chain->vars, 0));
-
+
for (i = 0; VEC_iterate (tree, chain->vars, i, var); i++)
VEC_replace (tree, chain->vars, i, make_ssa_name (var, NULL));
@@ -1561,12 +1561,12 @@ initialize_root_vars_lm (struct loop *loop, dref root, bool written,
VEC_quick_push (tree, *vars, var);
if (written)
VEC_quick_push (tree, *vars, VEC_index (tree, *vars, 0));
-
+
for (i = 0; VEC_iterate (tree, *vars, i, var); i++)
VEC_replace (tree, *vars, i, make_ssa_name (var, NULL));
var = VEC_index (tree, *vars, 0);
-
+
init = force_gimple_operand (init, &stmts, written, NULL_TREE);
if (stmts)
gsi_insert_seq_on_edge_immediate (entry, stmts);
@@ -1604,7 +1604,7 @@ execute_load_motion (struct loop *loop, chain_p chain, bitmap tmp_vars)
for (i = 0; VEC_iterate (dref, chain->refs, i, a); i++)
if (!DR_IS_READ (a->ref))
n_writes++;
-
+
/* If there are no reads in the loop, there is nothing to do. */
if (n_writes == VEC_length (dref, chain->refs))
return;
@@ -1630,7 +1630,7 @@ execute_load_motion (struct loop *loop, chain_p chain, bitmap tmp_vars)
else
ridx = 1;
}
-
+
replace_ref_with (a->stmt, VEC_index (tree, vars, ridx),
!is_read, !is_read);
}
@@ -1700,7 +1700,7 @@ remove_stmt (gimple stmt)
while (1)
{
gimple_stmt_iterator bsi;
-
+
bsi = gsi_for_stmt (stmt);
name = gimple_assign_lhs (stmt);
@@ -1804,7 +1804,7 @@ execute_pred_commoning (struct loop *loop, VEC (chain_p, heap) *chains,
else
execute_pred_commoning_chain (loop, chain, tmp_vars);
}
-
+
update_ssa (TODO_update_ssa_only_virtuals);
}
@@ -2400,7 +2400,7 @@ prepare_initializers_chain (struct loop *loop, chain_p chain)
init = ref_at_iteration (loop, DR_REF (dr), (int) i - n);
if (!init)
return false;
-
+
if (!chain->all_always_accessed && tree_could_trap_p (init))
return false;
@@ -2522,7 +2522,7 @@ tree_predictive_commoning_loop (struct loop *loop)
dta.chains = chains;
dta.tmp_vars = tmp_vars;
-
+
update_ssa (TODO_update_ssa_only_virtuals);
/* Cfg manipulations performed in tree_transform_and_unroll_loop before
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index 7173ad2331b..caa19ac8d6c 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -626,13 +626,13 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
{
size_t len = TREE_VEC_LENGTH (node);
for (i = 0; i < len - 1; i++)
- {
+ {
dump_generic_node (buffer, TREE_VEC_ELT (node, i), spc, flags,
false);
pp_character (buffer, ',');
pp_space (buffer);
}
- dump_generic_node (buffer, TREE_VEC_ELT (node, len - 1), spc,
+ dump_generic_node (buffer, TREE_VEC_ELT (node, len - 1), spc,
flags, false);
}
}
@@ -1905,7 +1905,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, ">");
break;
-
+
case VEC_COND_EXPR:
pp_string (buffer, " VEC_COND_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
@@ -2010,7 +2010,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
case OMP_SECTION:
pp_string (buffer, "#pragma omp section");
goto dump_omp_body;
-
+
case OMP_MASTER:
pp_string (buffer, "#pragma omp master");
goto dump_omp_body;
@@ -2144,7 +2144,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
-
+
case VEC_EXTRACT_ODD_EXPR:
pp_string (buffer, " VEC_EXTRACT_ODD_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
@@ -2583,7 +2583,7 @@ op_symbol_code (enum tree_code code)
case POINTER_PLUS_EXPR:
return "+";
-
+
case PLUS_EXPR:
return "+";
diff --git a/gcc/tree-profile.c b/gcc/tree-profile.c
index 2b75461b188..3d681e03421 100644
--- a/gcc/tree-profile.c
+++ b/gcc/tree-profile.c
@@ -73,10 +73,10 @@ tree_init_ic_make_global_vars (void)
tree gcov_type_ptr;
ptr_void = build_pointer_type (void_type_node);
-
- ic_void_ptr_var
- = build_decl (UNKNOWN_LOCATION, VAR_DECL,
- get_identifier ("__gcov_indirect_call_callee"),
+
+ ic_void_ptr_var
+ = build_decl (UNKNOWN_LOCATION, VAR_DECL,
+ get_identifier ("__gcov_indirect_call_callee"),
ptr_void);
TREE_STATIC (ic_void_ptr_var) = 1;
TREE_PUBLIC (ic_void_ptr_var) = 0;
@@ -85,9 +85,9 @@ tree_init_ic_make_global_vars (void)
varpool_finalize_decl (ic_void_ptr_var);
gcov_type_ptr = build_pointer_type (get_gcov_type ());
- ic_gcov_type_ptr_var
- = build_decl (UNKNOWN_LOCATION, VAR_DECL,
- get_identifier ("__gcov_indirect_call_counters"),
+ ic_gcov_type_ptr_var
+ = build_decl (UNKNOWN_LOCATION, VAR_DECL,
+ get_identifier ("__gcov_indirect_call_counters"),
gcov_type_ptr);
TREE_STATIC (ic_gcov_type_ptr_var) = 1;
TREE_PUBLIC (ic_gcov_type_ptr_var) = 0;
@@ -139,7 +139,7 @@ tree_init_edge_profiler (void)
one_value_profiler_fn_type);
tree_init_ic_make_global_vars ();
-
+
/* void (*) (gcov_type *, gcov_type, void *, void *) */
ic_profiler_fn_type
= build_function_type_list (void_type_node,
@@ -184,8 +184,8 @@ add_abnormal_goto_call_edges (gimple_stmt_iterator gsi)
make_abnormal_goto_edges (gimple_bb (stmt), true);
}
-/* Output instructions as GIMPLE trees to increment the edge
- execution count, and insert them on E. We rely on
+/* Output instructions as GIMPLE trees to increment the edge
+ execution count, and insert them on E. We rely on
gsi_insert_on_edge to preserve the order. */
static void
@@ -222,8 +222,8 @@ prepare_instrumented_value (gimple_stmt_iterator *gsi, histogram_value value)
true, NULL_TREE, true, GSI_SAME_STMT);
}
-/* Output instructions as GIMPLE trees to increment the interval histogram
- counter. VALUE is the expression whose value is profiled. TAG is the
+/* Output instructions as GIMPLE trees to increment the interval histogram
+ counter. VALUE is the expression whose value is profiled. TAG is the
tag of the section for counters, BASE is offset of the counter position. */
static void
@@ -238,7 +238,7 @@ tree_gen_interval_profiler (histogram_value value, unsigned tag, unsigned base)
value->hdata.intvl.int_start);
tree steps = build_int_cst_type (unsigned_type_node,
value->hdata.intvl.steps);
-
+
ref_ptr = force_gimple_operand_gsi (&gsi,
build_addr (ref, current_function_decl),
true, NULL_TREE, true, GSI_SAME_STMT);
@@ -249,8 +249,8 @@ tree_gen_interval_profiler (histogram_value value, unsigned tag, unsigned base)
add_abnormal_goto_call_edges (gsi);
}
-/* Output instructions as GIMPLE trees to increment the power of two histogram
- counter. VALUE is the expression whose value is profiled. TAG is the tag
+/* Output instructions as GIMPLE trees to increment the power of two histogram
+ counter. VALUE is the expression whose value is profiled. TAG is the tag
of the section for counters, BASE is offset of the counter position. */
static void
@@ -261,7 +261,7 @@ tree_gen_pow2_profiler (histogram_value value, unsigned tag, unsigned base)
tree ref_ptr = tree_coverage_counter_addr (tag, base);
gimple call;
tree val;
-
+
ref_ptr = force_gimple_operand_gsi (&gsi, ref_ptr,
true, NULL_TREE, true, GSI_SAME_STMT);
val = prepare_instrumented_value (&gsi, value);
@@ -282,7 +282,7 @@ tree_gen_one_value_profiler (histogram_value value, unsigned tag, unsigned base)
tree ref_ptr = tree_coverage_counter_addr (tag, base);
gimple call;
tree val;
-
+
ref_ptr = force_gimple_operand_gsi (&gsi, ref_ptr,
true, NULL_TREE, true, GSI_SAME_STMT);
val = prepare_instrumented_value (&gsi, value);
@@ -293,7 +293,7 @@ tree_gen_one_value_profiler (histogram_value value, unsigned tag, unsigned base)
/* Output instructions as GIMPLE trees for code to find the most
- common called function in indirect call.
+ common called function in indirect call.
VALUE is the call expression whose indirect callee is profiled.
TAG is the tag of the section for counters, BASE is offset of the
counter position. */
@@ -311,8 +311,8 @@ tree_gen_ic_profiler (histogram_value value, unsigned tag, unsigned base)
true, NULL_TREE, true, GSI_SAME_STMT);
/* Insert code:
-
- __gcov_indirect_call_counters = get_relevant_counter_ptr ();
+
+ __gcov_indirect_call_counters = get_relevant_counter_ptr ();
__gcov_indirect_call_callee = (void *) indirect call argument;
*/
@@ -345,9 +345,9 @@ tree_gen_ic_func_profiler (void)
if (!c_node->needed)
return;
-
+
tree_init_edge_profiler ();
-
+
FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
{
tree void0;
@@ -356,7 +356,7 @@ tree_gen_ic_func_profiler (void)
gsi = gsi_start_bb (bb);
cur_func = force_gimple_operand_gsi (&gsi,
- build_addr (current_function_decl,
+ build_addr (current_function_decl,
current_function_decl),
true, NULL_TREE,
true, GSI_SAME_STMT);
@@ -381,7 +381,7 @@ tree_gen_ic_func_profiler (void)
}
}
-/* Output instructions as GIMPLE trees for code to find the most common value
+/* Output instructions as GIMPLE trees for code to find the most common value
of a difference between two evaluations of an expression.
VALUE is the expression whose value is profiled. TAG is the tag of the
section for counters, BASE is offset of the counter position. */
@@ -398,8 +398,8 @@ tree_gen_const_delta_profiler (histogram_value value ATTRIBUTE_UNUSED,
gcc_unreachable ();
}
-/* Output instructions as GIMPLE trees to increment the average histogram
- counter. VALUE is the expression whose value is profiled. TAG is the
+/* Output instructions as GIMPLE trees to increment the average histogram
+ counter. VALUE is the expression whose value is profiled. TAG is the
tag of the section for counters, BASE is offset of the counter position. */
static void
@@ -410,7 +410,7 @@ tree_gen_average_profiler (histogram_value value, unsigned tag, unsigned base)
tree ref_ptr = tree_coverage_counter_addr (tag, base);
gimple call;
tree val;
-
+
ref_ptr = force_gimple_operand_gsi (&gsi, ref_ptr,
true, NULL_TREE,
true, GSI_SAME_STMT);
@@ -420,8 +420,8 @@ tree_gen_average_profiler (histogram_value value, unsigned tag, unsigned base)
add_abnormal_goto_call_edges (gsi);
}
-/* Output instructions as GIMPLE trees to increment the ior histogram
- counter. VALUE is the expression whose value is profiled. TAG is the
+/* Output instructions as GIMPLE trees to increment the ior histogram
+ counter. VALUE is the expression whose value is profiled. TAG is the
tag of the section for counters, BASE is offset of the counter position. */
static void
@@ -432,7 +432,7 @@ tree_gen_ior_profiler (histogram_value value, unsigned tag, unsigned base)
tree ref_ptr = tree_coverage_counter_addr (tag, base);
gimple call;
tree val;
-
+
ref_ptr = force_gimple_operand_gsi (&gsi, ref_ptr,
true, NULL_TREE, true, GSI_SAME_STMT);
val = prepare_instrumented_value (&gsi, value);
@@ -473,7 +473,7 @@ tree_profiling (void)
branch_prob ();
- if (! flag_branch_probabilities
+ if (! flag_branch_probabilities
&& flag_profile_values)
tree_gen_ic_func_profiler ();
@@ -490,7 +490,7 @@ tree_profiling (void)
return 0;
}
-struct gimple_opt_pass pass_tree_profile =
+struct gimple_opt_pass pass_tree_profile =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index 2cae2ceff45..087ba798830 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -19,9 +19,9 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-/*
- Description:
-
+/*
+ Description:
+
This pass analyzes the evolution of scalar variables in loop
structures. The algorithm is based on the SSA representation,
and on the loop hierarchy tree. This algorithm is not based on
@@ -42,15 +42,15 @@ along with GCC; see the file COPYING3. If not see
are fully instantiated before their use because symbolic names can
hide some difficult cases such as self-references described later
(see the Fibonacci example).
-
+
A short sketch of the algorithm is:
-
+
Given a scalar variable to be analyzed, follow the SSA edge to
its definition:
-
+
- When the definition is a GIMPLE_ASSIGN: if the right hand side
(RHS) of the definition cannot be statically analyzed, the answer
- of the analyzer is: "don't know".
+ of the analyzer is: "don't know".
Otherwise, for all the variables that are not yet analyzed in the
RHS, try to determine their evolution, and finally try to
evaluate the operation of the RHS that gives the evolution
@@ -72,16 +72,16 @@ along with GCC; see the file COPYING3. If not see
symbolic chrec {initial_condition, +, symbolic_stride}_loop.
Examples:
-
+
Example 1: Illustration of the basic algorithm.
-
+
| a = 3
| loop_1
| b = phi (a, c)
| c = b + 1
| if (c > 10) exit_loop
| endloop
-
+
Suppose that we want to know the number of iterations of the
loop_1. The exit_loop is controlled by a COND_EXPR (c > 10). We
ask the scalar evolution analyzer two questions: what's the
@@ -112,22 +112,22 @@ along with GCC; see the file COPYING3. If not see
created a variable that is implicitly defined, "x" or just "_1",
and all the other analyzed scalars of the loop are defined in
function of this variable:
-
+
a -> 3
b -> {3, +, 1}_1
c -> {4, +, 1}_1
-
- or in terms of a C program:
-
+
+ or in terms of a C program:
+
| a = 3
| for (x = 0; x <= 7; x++)
| {
| b = x + 3
| c = x + 4
| }
-
+
Example 2a: Illustration of the algorithm on nested loops.
-
+
| loop_1
| a = phi (1, b)
| c = a + 2
@@ -136,22 +136,22 @@ along with GCC; see the file COPYING3. If not see
| d = b + 3
| endloop
| endloop
-
+
For analyzing the scalar evolution of "a", the algorithm follows
the SSA edge into the loop's body: "a -> b". "b" is an inner
- loop-phi-node, and its analysis as in Example 1, gives:
-
+ loop-phi-node, and its analysis as in Example 1, gives:
+
b -> {c, +, 3}_2
d -> {c + 3, +, 3}_2
-
+
Following the SSA edge for the initial condition, we end on "c = a
+ 2", and then on the starting loop-phi-node "a". From this point,
the loop stride is computed: back on "c = a + 2" we get a "+2" in
the loop_1, then on the loop-phi-node "b" we compute the overall
effect of the inner loop that is "b = c + 30", and we get a "+30"
in the loop_1. That means that the overall stride in loop_1 is
- equal to "+32", and the result is:
-
+ equal to "+32", and the result is:
+
a -> {1, +, 32}_1
c -> {3, +, 32}_1
@@ -179,65 +179,65 @@ along with GCC; see the file COPYING3. If not see
The result of this call is {{0, +, 1}_1, +, 1}_2.
Example 3: Higher degree polynomials.
-
+
| loop_1
| a = phi (2, b)
| c = phi (5, d)
| b = a + 1
| d = c + a
| endloop
-
+
a -> {2, +, 1}_1
b -> {3, +, 1}_1
c -> {5, +, a}_1
d -> {5 + a, +, a}_1
-
+
instantiate_parameters (loop_1, {5, +, a}_1) -> {5, +, 2, +, 1}_1
instantiate_parameters (loop_1, {5 + a, +, a}_1) -> {7, +, 3, +, 1}_1
-
+
Example 4: Lucas, Fibonacci, or mixers in general.
-
+
| loop_1
| a = phi (1, b)
| c = phi (3, d)
| b = c
| d = c + a
| endloop
-
+
a -> (1, c)_1
c -> {3, +, a}_1
-
+
The syntax "(1, c)_1" stands for a PEELED_CHREC that has the
following semantics: during the first iteration of the loop_1, the
variable contains the value 1, and then it contains the value "c".
Note that this syntax is close to the syntax of the loop-phi-node:
"a -> (1, c)_1" vs. "a = phi (1, c)".
-
+
The symbolic chrec representation contains all the semantics of the
original code. What is more difficult is to use this information.
-
+
Example 5: Flip-flops, or exchangers.
-
+
| loop_1
| a = phi (1, b)
| c = phi (3, d)
| b = c
| d = a
| endloop
-
+
a -> (1, c)_1
c -> (3, a)_1
-
+
Based on these symbolic chrecs, it is possible to refine this
- information into the more precise PERIODIC_CHRECs:
-
+ information into the more precise PERIODIC_CHRECs:
+
a -> |1, 3|_1
c -> |3, 1|_1
-
+
This transformation is not yet implemented.
-
+
Further readings:
-
+
You can find a more detailed description of the algorithm in:
http://icps.u-strasbg.fr/~pop/DEA_03_Pop.pdf
http://icps.u-strasbg.fr/~pop/DEA_03_Pop.ps.gz. But note that
@@ -245,7 +245,7 @@ along with GCC; see the file COPYING3. If not see
algorithm have changed. I'm working on a research report that
updates the description of the algorithms to reflect the design
choices used in this implementation.
-
+
A set of slides show a high level overview of the algorithm and run
an example through the scalar evolution analyzer:
http://cri.ensmp.fr/~pop/gcc/mar04/slides.pdf
@@ -316,7 +316,7 @@ static inline struct scev_info_str *
new_scev_info_str (basic_block instantiated_below, tree var)
{
struct scev_info_str *res;
-
+
res = GGC_NEW (struct scev_info_str);
res->var = var;
res->chrec = chrec_not_analyzed_yet;
@@ -377,7 +377,7 @@ find_var_scev_info (basic_block instantiated_below, tree var)
/* Return true when CHREC contains symbolic names defined in
LOOP_NB. */
-bool
+bool
chrec_contains_symbols_defined_in_loop (const_tree chrec, unsigned loop_nb)
{
int i, n;
@@ -413,7 +413,7 @@ chrec_contains_symbols_defined_in_loop (const_tree chrec, unsigned loop_nb)
n = TREE_OPERAND_LENGTH (chrec);
for (i = 0; i < n; i++)
- if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (chrec, i),
+ if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (chrec, i),
loop_nb))
return true;
return false;
@@ -435,37 +435,37 @@ loop_phi_node_p (gimple phi)
In general, in the case of multivariate evolutions we want to get
the evolution in different loops. LOOP specifies the level for
which to get the evolution.
-
+
Example:
-
+
| for (j = 0; j < 100; j++)
| {
| for (k = 0; k < 100; k++)
| {
- | i = k + j; - Here the value of i is a function of j, k.
+ | i = k + j; - Here the value of i is a function of j, k.
| }
- | ... = i - Here the value of i is a function of j.
+ | ... = i - Here the value of i is a function of j.
| }
- | ... = i - Here the value of i is a scalar.
-
- Example:
-
+ | ... = i - Here the value of i is a scalar.
+
+ Example:
+
| i_0 = ...
| loop_1 10 times
| i_1 = phi (i_0, i_2)
| i_2 = i_1 + 2
| endloop
-
+
This loop has the same effect as:
LOOP_1 has the same effect as:
-
+
| i_1 = i_0 + 20
-
- The overall effect of the loop, "i_0 + 20" in the previous example,
- is obtained by passing in the parameters: LOOP = 1,
+
+ The overall effect of the loop, "i_0 + 20" in the previous example,
+ is obtained by passing in the parameters: LOOP = 1,
EVOLUTION_FN = {i_0, +, 2}_1.
*/
-
+
tree
compute_overall_effect_of_inner_loop (struct loop *loop, tree evolution_fn)
{
@@ -503,11 +503,11 @@ compute_overall_effect_of_inner_loop (struct loop *loop, tree evolution_fn)
else
return evolution_fn;
}
-
+
/* If the evolution function is an invariant, there is nothing to do. */
else if (no_evolution_in_loop_p (evolution_fn, loop->num, &val) && val)
return evolution_fn;
-
+
else
return chrec_dont_know;
}
@@ -521,14 +521,14 @@ chrec_is_positive (tree chrec, bool *value)
{
bool value0, value1, value2;
tree end_value, nb_iter;
-
+
switch (TREE_CODE (chrec))
{
case POLYNOMIAL_CHREC:
if (!chrec_is_positive (CHREC_LEFT (chrec), &value0)
|| !chrec_is_positive (CHREC_RIGHT (chrec), &value1))
return false;
-
+
/* FIXME -- overflows. */
if (value0 == value1)
{
@@ -555,17 +555,17 @@ chrec_is_positive (tree chrec, bool *value)
#endif
end_value = chrec_apply (CHREC_VARIABLE (chrec), chrec, nb_iter);
-
+
if (!chrec_is_positive (end_value, &value2))
return false;
-
+
*value = value0;
return value0 == value1;
-
+
case INTEGER_CST:
*value = (tree_int_cst_sgn (chrec) == 1);
return true;
-
+
default:
return false;
}
@@ -577,12 +577,12 @@ static void
set_scalar_evolution (basic_block instantiated_below, tree scalar, tree chrec)
{
tree *scalar_info;
-
+
if (TREE_CODE (scalar) != SSA_NAME)
return;
scalar_info = find_var_scev_info (instantiated_below, scalar);
-
+
if (dump_file)
{
if (dump_flags & TDF_DETAILS)
@@ -599,7 +599,7 @@ set_scalar_evolution (basic_block instantiated_below, tree scalar, tree chrec)
if (dump_flags & TDF_STATS)
nb_set_scev++;
}
-
+
*scalar_info = chrec;
}
@@ -610,7 +610,7 @@ static tree
get_scalar_evolution (basic_block instantiated_below, tree scalar)
{
tree res;
-
+
if (dump_file)
{
if (dump_flags & TDF_DETAILS)
@@ -623,7 +623,7 @@ get_scalar_evolution (basic_block instantiated_below, tree scalar)
if (dump_flags & TDF_STATS)
nb_get_scev++;
}
-
+
switch (TREE_CODE (scalar))
{
case SSA_NAME:
@@ -640,14 +640,14 @@ get_scalar_evolution (basic_block instantiated_below, tree scalar)
res = chrec_not_analyzed_yet;
break;
}
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " (scalar_evolution = ");
print_generic_expr (dump_file, res, 0);
fprintf (dump_file, "))\n");
}
-
+
return res;
}
@@ -655,8 +655,8 @@ get_scalar_evolution (basic_block instantiated_below, tree scalar)
function for an assignment of the form "a = b + c", where "a" and
"b" are on the strongly connected component. CHREC_BEFORE is the
information that we already have collected up to this point.
- TO_ADD is the evolution of "c".
-
+ TO_ADD is the evolution of "c".
+
When CHREC_BEFORE has an evolution part in LOOP_NB, add to this
evolution the expression TO_ADD, otherwise construct an evolution
part for this loop. */
@@ -678,7 +678,7 @@ add_to_evolution_1 (unsigned loop_nb, tree chrec_before, tree to_add,
unsigned var;
type = chrec_type (chrec_before);
-
+
/* When there is no evolution part in this loop, build it. */
if (chloop != loop)
{
@@ -712,7 +712,7 @@ add_to_evolution_1 (unsigned loop_nb, tree chrec_before, tree to_add,
return build_polynomial_chrec (CHREC_VARIABLE (chrec_before),
left, right);
}
-
+
default:
/* These nodes do not depend on a loop. */
if (chrec_before == chrec_dont_know)
@@ -725,155 +725,155 @@ add_to_evolution_1 (unsigned loop_nb, tree chrec_before, tree to_add,
}
/* Add TO_ADD to the evolution part of CHREC_BEFORE in the dimension
- of LOOP_NB.
-
+ of LOOP_NB.
+
Description (provided for completeness, for those who read code in
a plane, and for my poor 62 bytes brain that would have forgotten
all this in the next two or three months):
-
+
The algorithm of translation of programs from the SSA representation
into the chrecs syntax is based on a pattern matching. After having
reconstructed the overall tree expression for a loop, there are only
two cases that can arise:
-
+
1. a = loop-phi (init, a + expr)
2. a = loop-phi (init, expr)
-
+
where EXPR is either a scalar constant with respect to the analyzed
loop (this is a degree 0 polynomial), or an expression containing
other loop-phi definitions (these are higher degree polynomials).
-
+
Examples:
-
- 1.
+
+ 1.
| init = ...
| loop_1
| a = phi (init, a + 5)
| endloop
-
- 2.
+
+ 2.
| inita = ...
| initb = ...
| loop_1
| a = phi (inita, 2 * b + 3)
| b = phi (initb, b + 1)
| endloop
-
- For the first case, the semantics of the SSA representation is:
-
+
+ For the first case, the semantics of the SSA representation is:
+
| a (x) = init + \sum_{j = 0}^{x - 1} expr (j)
-
+
that is, there is a loop index "x" that determines the scalar value
of the variable during the loop execution. During the first
iteration, the value is that of the initial condition INIT, while
during the subsequent iterations, it is the sum of the initial
condition with the sum of all the values of EXPR from the initial
- iteration to the before last considered iteration.
-
+ iteration to the before last considered iteration.
+
For the second case, the semantics of the SSA program is:
-
+
| a (x) = init, if x = 0;
| expr (x - 1), otherwise.
-
+
The second case corresponds to the PEELED_CHREC, whose syntax is
- close to the syntax of a loop-phi-node:
-
+ close to the syntax of a loop-phi-node:
+
| phi (init, expr) vs. (init, expr)_x
-
+
The proof of the translation algorithm for the first case is a
- proof by structural induction based on the degree of EXPR.
-
+ proof by structural induction based on the degree of EXPR.
+
Degree 0:
When EXPR is a constant with respect to the analyzed loop, or in
other words when EXPR is a polynomial of degree 0, the evolution of
the variable A in the loop is an affine function with an initial
condition INIT, and a step EXPR. In order to show this, we start
from the semantics of the SSA representation:
-
+
f (x) = init + \sum_{j = 0}^{x - 1} expr (j)
-
+
and since "expr (j)" is a constant with respect to "j",
-
- f (x) = init + x * expr
-
+
+ f (x) = init + x * expr
+
Finally, based on the semantics of the pure sum chrecs, by
identification we get the corresponding chrecs syntax:
-
- f (x) = init * \binom{x}{0} + expr * \binom{x}{1}
+
+ f (x) = init * \binom{x}{0} + expr * \binom{x}{1}
f (x) -> {init, +, expr}_x
-
+
Higher degree:
Suppose that EXPR is a polynomial of degree N with respect to the
analyzed loop_x for which we have already determined that it is
written under the chrecs syntax:
-
+
| expr (x) -> {b_0, +, b_1, +, ..., +, b_{n-1}} (x)
-
+
We start from the semantics of the SSA program:
-
+
| f (x) = init + \sum_{j = 0}^{x - 1} expr (j)
|
- | f (x) = init + \sum_{j = 0}^{x - 1}
+ | f (x) = init + \sum_{j = 0}^{x - 1}
| (b_0 * \binom{j}{0} + ... + b_{n-1} * \binom{j}{n-1})
|
- | f (x) = init + \sum_{j = 0}^{x - 1}
- | \sum_{k = 0}^{n - 1} (b_k * \binom{j}{k})
+ | f (x) = init + \sum_{j = 0}^{x - 1}
+ | \sum_{k = 0}^{n - 1} (b_k * \binom{j}{k})
|
- | f (x) = init + \sum_{k = 0}^{n - 1}
- | (b_k * \sum_{j = 0}^{x - 1} \binom{j}{k})
+ | f (x) = init + \sum_{k = 0}^{n - 1}
+ | (b_k * \sum_{j = 0}^{x - 1} \binom{j}{k})
|
- | f (x) = init + \sum_{k = 0}^{n - 1}
- | (b_k * \binom{x}{k + 1})
+ | f (x) = init + \sum_{k = 0}^{n - 1}
+ | (b_k * \binom{x}{k + 1})
|
- | f (x) = init + b_0 * \binom{x}{1} + ...
- | + b_{n-1} * \binom{x}{n}
+ | f (x) = init + b_0 * \binom{x}{1} + ...
+ | + b_{n-1} * \binom{x}{n}
|
- | f (x) = init * \binom{x}{0} + b_0 * \binom{x}{1} + ...
- | + b_{n-1} * \binom{x}{n}
+ | f (x) = init * \binom{x}{0} + b_0 * \binom{x}{1} + ...
+ | + b_{n-1} * \binom{x}{n}
|
-
+
And finally from the definition of the chrecs syntax, we identify:
- | f (x) -> {init, +, b_0, +, ..., +, b_{n-1}}_x
-
+ | f (x) -> {init, +, b_0, +, ..., +, b_{n-1}}_x
+
This shows the mechanism that stands behind the add_to_evolution
function. An important point is that the use of symbolic
parameters avoids the need of an analysis schedule.
-
+
Example:
-
+
| inita = ...
| initb = ...
- | loop_1
+ | loop_1
| a = phi (inita, a + 2 + b)
| b = phi (initb, b + 1)
| endloop
-
+
When analyzing "a", the algorithm keeps "b" symbolically:
-
+
| a -> {inita, +, 2 + b}_1
-
+
Then, after instantiation, the analyzer ends on the evolution:
-
+
| a -> {inita, +, 2 + initb, +, 1}_1
*/
-static tree
+static tree
add_to_evolution (unsigned loop_nb, tree chrec_before, enum tree_code code,
tree to_add, gimple at_stmt)
{
tree type = chrec_type (to_add);
tree res = NULL_TREE;
-
+
if (to_add == NULL_TREE)
return chrec_before;
-
+
/* TO_ADD is either a scalar, or a parameter. TO_ADD is not
instantiated at this point. */
if (TREE_CODE (to_add) == POLYNOMIAL_CHREC)
/* This should not happen. */
return chrec_dont_know;
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "(add_to_evolution \n");
@@ -905,7 +905,7 @@ add_to_evolution (unsigned loop_nb, tree chrec_before, enum tree_code code,
/* Helper function. */
static inline tree
-set_nb_iterations_in_loop (struct loop *loop,
+set_nb_iterations_in_loop (struct loop *loop,
tree res)
{
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -914,7 +914,7 @@ set_nb_iterations_in_loop (struct loop *loop,
print_generic_expr (dump_file, res, 0);
fprintf (dump_file, "))\n");
}
-
+
loop->nb_iterations = res;
return res;
}
@@ -929,50 +929,50 @@ set_nb_iterations_in_loop (struct loop *loop,
guards the exit edge. If the expression is too difficult to
analyze, then give up. */
-gimple
+gimple
get_loop_exit_condition (const struct loop *loop)
{
gimple res = NULL;
edge exit_edge = single_exit (loop);
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "(get_loop_exit_condition \n ");
-
+
if (exit_edge)
{
gimple stmt;
-
+
stmt = last_stmt (exit_edge->src);
if (gimple_code (stmt) == GIMPLE_COND)
res = stmt;
}
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
print_gimple_stmt (dump_file, res, 0, 0);
fprintf (dump_file, ")\n");
}
-
+
return res;
}
/* Recursively determine and enqueue the exit conditions for a loop. */
-static void
-get_exit_conditions_rec (struct loop *loop,
+static void
+get_exit_conditions_rec (struct loop *loop,
VEC(gimple,heap) **exit_conditions)
{
if (!loop)
return;
-
+
/* Recurse on the inner loops, then on the next (sibling) loops. */
get_exit_conditions_rec (loop->inner, exit_conditions);
get_exit_conditions_rec (loop->next, exit_conditions);
-
+
if (single_exit (loop))
{
gimple loop_condition = get_loop_exit_condition (loop);
-
+
if (loop_condition)
VEC_safe_push (gimple, heap, *exit_conditions, loop_condition);
}
@@ -985,7 +985,7 @@ static void
select_loops_exit_conditions (VEC(gimple,heap) **exit_conditions)
{
struct loop *function_body = current_loops->tree_root;
-
+
get_exit_conditions_rec (function_body->inner, exit_conditions);
}
@@ -1020,34 +1020,34 @@ follow_ssa_edge_binary (struct loop *loop, gimple at_stmt,
{
if (TREE_CODE (rhs1) == SSA_NAME)
{
- /* Match an assignment under the form:
+ /* Match an assignment under the form:
"a = b + c". */
-
+
/* We want only assignments of form "name + name" contribute to
LIMIT, as the other cases do not necessarily contribute to
the complexity of the expression. */
limit++;
evol = *evolution_of_loop;
- res = follow_ssa_edge
+ res = follow_ssa_edge
(loop, SSA_NAME_DEF_STMT (rhs0), halting_phi, &evol, limit);
-
+
if (res == t_true)
- *evolution_of_loop = add_to_evolution
- (loop->num,
- chrec_convert (type, evol, at_stmt),
+ *evolution_of_loop = add_to_evolution
+ (loop->num,
+ chrec_convert (type, evol, at_stmt),
code, rhs1, at_stmt);
-
+
else if (res == t_false)
{
- res = follow_ssa_edge
- (loop, SSA_NAME_DEF_STMT (rhs1), halting_phi,
+ res = follow_ssa_edge
+ (loop, SSA_NAME_DEF_STMT (rhs1), halting_phi,
evolution_of_loop, limit);
-
+
if (res == t_true)
- *evolution_of_loop = add_to_evolution
- (loop->num,
- chrec_convert (type, *evolution_of_loop, at_stmt),
+ *evolution_of_loop = add_to_evolution
+ (loop->num,
+ chrec_convert (type, *evolution_of_loop, at_stmt),
code, rhs0, at_stmt);
else if (res == t_dont_know)
@@ -1057,16 +1057,16 @@ follow_ssa_edge_binary (struct loop *loop, gimple at_stmt,
else if (res == t_dont_know)
*evolution_of_loop = chrec_dont_know;
}
-
+
else
{
- /* Match an assignment under the form:
+ /* Match an assignment under the form:
"a = b + ...". */
- res = follow_ssa_edge
- (loop, SSA_NAME_DEF_STMT (rhs0), halting_phi,
+ res = follow_ssa_edge
+ (loop, SSA_NAME_DEF_STMT (rhs0), halting_phi,
evolution_of_loop, limit);
if (res == t_true)
- *evolution_of_loop = add_to_evolution
+ *evolution_of_loop = add_to_evolution
(loop->num, chrec_convert (type, *evolution_of_loop,
at_stmt),
code, rhs1, at_stmt);
@@ -1075,16 +1075,16 @@ follow_ssa_edge_binary (struct loop *loop, gimple at_stmt,
*evolution_of_loop = chrec_dont_know;
}
}
-
+
else if (TREE_CODE (rhs1) == SSA_NAME)
{
- /* Match an assignment under the form:
+ /* Match an assignment under the form:
"a = ... + c". */
- res = follow_ssa_edge
- (loop, SSA_NAME_DEF_STMT (rhs1), halting_phi,
+ res = follow_ssa_edge
+ (loop, SSA_NAME_DEF_STMT (rhs1), halting_phi,
evolution_of_loop, limit);
if (res == t_true)
- *evolution_of_loop = add_to_evolution
+ *evolution_of_loop = add_to_evolution
(loop->num, chrec_convert (type, *evolution_of_loop,
at_stmt),
code, rhs0, at_stmt);
@@ -1094,17 +1094,17 @@ follow_ssa_edge_binary (struct loop *loop, gimple at_stmt,
}
else
- /* Otherwise, match an assignment under the form:
+ /* Otherwise, match an assignment under the form:
"a = ... + ...". */
/* And there is nothing to do. */
res = t_false;
break;
-
+
case MINUS_EXPR:
/* This case is under the form "opnd0 = rhs0 - rhs1". */
if (TREE_CODE (rhs0) == SSA_NAME)
{
- /* Match an assignment under the form:
+ /* Match an assignment under the form:
"a = b - ...". */
/* We want only assignments of form "name - name" contribute to
@@ -1113,10 +1113,10 @@ follow_ssa_edge_binary (struct loop *loop, gimple at_stmt,
if (TREE_CODE (rhs1) == SSA_NAME)
limit++;
- res = follow_ssa_edge (loop, SSA_NAME_DEF_STMT (rhs0), halting_phi,
+ res = follow_ssa_edge (loop, SSA_NAME_DEF_STMT (rhs0), halting_phi,
evolution_of_loop, limit);
if (res == t_true)
- *evolution_of_loop = add_to_evolution
+ *evolution_of_loop = add_to_evolution
(loop->num, chrec_convert (type, *evolution_of_loop, at_stmt),
MINUS_EXPR, rhs1, at_stmt);
@@ -1124,7 +1124,7 @@ follow_ssa_edge_binary (struct loop *loop, gimple at_stmt,
*evolution_of_loop = chrec_dont_know;
}
else
- /* Otherwise, match an assignment under the form:
+ /* Otherwise, match an assignment under the form:
"a = ... - ...". */
/* And there is nothing to do. */
res = t_false;
@@ -1136,12 +1136,12 @@ follow_ssa_edge_binary (struct loop *loop, gimple at_stmt,
return res;
}
-
+
/* Follow the ssa edge into the expression EXPR.
Return true if the strongly connected component has been found. */
static t_bool
-follow_ssa_edge_expr (struct loop *loop, gimple at_stmt, tree expr,
+follow_ssa_edge_expr (struct loop *loop, gimple at_stmt, tree expr,
gimple halting_phi, tree *evolution_of_loop, int limit)
{
enum tree_code code = TREE_CODE (expr);
@@ -1149,10 +1149,10 @@ follow_ssa_edge_expr (struct loop *loop, gimple at_stmt, tree expr,
t_bool res;
/* The EXPR is one of the following cases:
- - an SSA_NAME,
+ - an SSA_NAME,
- an INTEGER_CST,
- - a PLUS_EXPR,
- - a POINTER_PLUS_EXPR,
+ - a PLUS_EXPR,
+ - a POINTER_PLUS_EXPR,
- a MINUS_EXPR,
- an ASSERT_EXPR,
- other cases are not yet handled. */
@@ -1173,7 +1173,7 @@ follow_ssa_edge_expr (struct loop *loop, gimple at_stmt, tree expr,
case SSA_NAME:
/* This assignment is under the form: "a_1 = b_2". */
- res = follow_ssa_edge
+ res = follow_ssa_edge
(loop, SSA_NAME_DEF_STMT (expr), halting_phi, evolution_of_loop, limit);
break;
@@ -1273,8 +1273,8 @@ backedge_phi_arg_p (gimple phi, int i)
static inline t_bool
follow_ssa_edge_in_condition_phi_branch (int i,
- struct loop *loop,
- gimple condition_phi,
+ struct loop *loop,
+ gimple condition_phi,
gimple halting_phi,
tree *evolution_of_branch,
tree init_cond, int limit)
@@ -1290,15 +1290,15 @@ follow_ssa_edge_in_condition_phi_branch (int i,
if (TREE_CODE (branch) == SSA_NAME)
{
*evolution_of_branch = init_cond;
- return follow_ssa_edge (loop, SSA_NAME_DEF_STMT (branch), halting_phi,
+ return follow_ssa_edge (loop, SSA_NAME_DEF_STMT (branch), halting_phi,
evolution_of_branch, limit);
}
- /* This case occurs when one of the condition branches sets
+ /* This case occurs when one of the condition branches sets
the variable to a constant: i.e. a phi-node like
- "a_2 = PHI <a_7(5), 2(6)>;".
-
- FIXME: This case have to be refined correctly:
+ "a_2 = PHI <a_7(5), 2(6)>;".
+
+ FIXME: This case have to be refined correctly:
in some cases it is possible to say something better than
chrec_dont_know, for example using a wrap-around notation. */
return t_false;
@@ -1309,8 +1309,8 @@ follow_ssa_edge_in_condition_phi_branch (int i,
static t_bool
follow_ssa_edge_in_condition_phi (struct loop *loop,
- gimple condition_phi,
- gimple halting_phi,
+ gimple condition_phi,
+ gimple halting_phi,
tree *evolution_of_loop, int limit)
{
int i, n;
@@ -1345,7 +1345,7 @@ follow_ssa_edge_in_condition_phi (struct loop *loop,
*evolution_of_loop = chrec_merge (*evolution_of_loop,
evolution_of_branch);
}
-
+
return t_true;
}
@@ -1356,7 +1356,7 @@ follow_ssa_edge_in_condition_phi (struct loop *loop,
static t_bool
follow_ssa_edge_inner_loop_phi (struct loop *outer_loop,
- gimple loop_phi_node,
+ gimple loop_phi_node,
gimple halting_phi,
tree *evolution_of_loop, int limit)
{
@@ -1406,16 +1406,16 @@ follow_ssa_edge (struct loop *loop, gimple def, gimple halting_phi,
tree *evolution_of_loop, int limit)
{
struct loop *def_loop;
-
+
if (gimple_nop_p (def))
return t_false;
-
+
/* Give up if the path is longer than the MAX that we allow. */
if (limit > PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
return t_dont_know;
-
+
def_loop = loop_containing_stmt (def);
-
+
switch (gimple_code (def))
{
case GIMPLE_PHI:
@@ -1424,7 +1424,7 @@ follow_ssa_edge (struct loop *loop, gimple def, gimple halting_phi,
record their evolutions. Finally, merge the collected
information and set the approximation to the main
variable. */
- return follow_ssa_edge_in_condition_phi
+ return follow_ssa_edge_in_condition_phi
(loop, def, halting_phi, evolution_of_loop, limit);
/* When the analyzed phi is the halting_phi, the
@@ -1432,25 +1432,25 @@ follow_ssa_edge (struct loop *loop, gimple def, gimple halting_phi,
the halting_phi to itself in the loop. */
if (def == halting_phi)
return t_true;
-
+
/* Otherwise, the evolution of the HALTING_PHI depends
on the evolution of another loop-phi-node, i.e. the
evolution function is a higher degree polynomial. */
if (def_loop == loop)
return t_false;
-
+
/* Inner loop. */
if (flow_loop_nested_p (loop, def_loop))
- return follow_ssa_edge_inner_loop_phi
+ return follow_ssa_edge_inner_loop_phi
(loop, def, halting_phi, evolution_of_loop, limit + 1);
/* Outer loop. */
return t_false;
case GIMPLE_ASSIGN:
- return follow_ssa_edge_in_rhs (loop, def, halting_phi,
+ return follow_ssa_edge_in_rhs (loop, def, halting_phi,
evolution_of_loop, limit);
-
+
default:
/* At this level of abstraction, the program is just a set
of GIMPLE_ASSIGNs and PHI_NODEs. In principle there is no
@@ -1465,14 +1465,14 @@ follow_ssa_edge (struct loop *loop, gimple def, gimple halting_phi,
function from LOOP_PHI_NODE to LOOP_PHI_NODE in the loop. */
static tree
-analyze_evolution_in_loop (gimple loop_phi_node,
+analyze_evolution_in_loop (gimple loop_phi_node,
tree init_cond)
{
int i, n = gimple_phi_num_args (loop_phi_node);
tree evolution_function = chrec_not_analyzed_yet;
struct loop *loop = loop_containing_stmt (loop_phi_node);
basic_block bb;
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "(analyze_evolution_in_loop \n");
@@ -1480,7 +1480,7 @@ analyze_evolution_in_loop (gimple loop_phi_node,
print_gimple_stmt (dump_file, loop_phi_node, 0, 0);
fprintf (dump_file, ")\n");
}
-
+
for (i = 0; i < n; i++)
{
tree arg = PHI_ARG_DEF (loop_phi_node, i);
@@ -1519,23 +1519,23 @@ analyze_evolution_in_loop (gimple loop_phi_node,
loop_phi_node by following the ssa edges, the
evolution is represented by a peeled chrec, i.e. the
first iteration, EV_FN has the value INIT_COND, then
- all the other iterations it has the value of ARG.
+ all the other iterations it has the value of ARG.
For the moment, PEELED_CHREC nodes are not built. */
if (res != t_true)
ev_fn = chrec_dont_know;
-
+
/* When there are multiple back edges of the loop (which in fact never
happens currently, but nevertheless), merge their evolutions. */
evolution_function = chrec_merge (evolution_function, ev_fn);
}
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " (evolution_function = ");
print_generic_expr (dump_file, evolution_function, 0);
fprintf (dump_file, "))\n");
}
-
+
return evolution_function;
}
@@ -1546,13 +1546,13 @@ analyze_evolution_in_loop (gimple loop_phi_node,
This analyzer does not analyze the evolution outside the current
loop, and leaves this task to the on-demand tree reconstructor. */
-static tree
+static tree
analyze_initial_condition (gimple loop_phi_node)
{
int i, n;
tree init_cond = chrec_not_analyzed_yet;
struct loop *loop = loop_containing_stmt (loop_phi_node);
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "(analyze_initial_condition \n");
@@ -1560,13 +1560,13 @@ analyze_initial_condition (gimple loop_phi_node)
print_gimple_stmt (dump_file, loop_phi_node, 0, 0);
fprintf (dump_file, ")\n");
}
-
+
n = gimple_phi_num_args (loop_phi_node);
for (i = 0; i < n; i++)
{
tree branch = PHI_ARG_DEF (loop_phi_node, i);
basic_block bb = gimple_phi_arg_edge (loop_phi_node, i)->src;
-
+
/* When the branch is oriented to the loop's body, it does
not contribute to the initial condition. */
if (flow_bb_inside_loop_p (loop, bb))
@@ -1611,19 +1611,19 @@ analyze_initial_condition (gimple loop_phi_node)
print_generic_expr (dump_file, init_cond, 0);
fprintf (dump_file, "))\n");
}
-
+
return init_cond;
}
/* Analyze the scalar evolution for LOOP_PHI_NODE. */
-static tree
+static tree
interpret_loop_phi (struct loop *loop, gimple loop_phi_node)
{
tree res;
struct loop *phi_loop = loop_containing_stmt (loop_phi_node);
tree init_cond;
-
+
if (phi_loop != loop)
{
struct loop *subloop;
@@ -1654,11 +1654,11 @@ interpret_condition_phi (struct loop *loop, gimple condition_phi)
{
int i, n = gimple_phi_num_args (condition_phi);
tree res = chrec_not_analyzed_yet;
-
+
for (i = 0; i < n; i++)
{
tree branch_chrec;
-
+
if (backedge_phi_arg_p (condition_phi, i))
{
res = chrec_dont_know;
@@ -1667,7 +1667,7 @@ interpret_condition_phi (struct loop *loop, gimple condition_phi)
branch_chrec = analyze_scalar_evolution
(loop, PHI_ARG_DEF (condition_phi, i));
-
+
res = chrec_merge (res, branch_chrec);
}
@@ -1723,7 +1723,7 @@ interpret_rhs_expr (struct loop *loop, gimple at_stmt,
chrec2 = chrec_convert (type, chrec2, at_stmt);
res = chrec_fold_plus (type, chrec1, chrec2);
break;
-
+
case MINUS_EXPR:
chrec1 = analyze_scalar_evolution (loop, rhs1);
chrec2 = analyze_scalar_evolution (loop, rhs2);
@@ -1756,17 +1756,17 @@ interpret_rhs_expr (struct loop *loop, gimple at_stmt,
chrec2 = chrec_convert (type, chrec2, at_stmt);
res = chrec_fold_multiply (type, chrec1, chrec2);
break;
-
+
CASE_CONVERT:
chrec1 = analyze_scalar_evolution (loop, rhs1);
res = chrec_convert (type, chrec1, at_stmt);
break;
-
+
default:
res = chrec_dont_know;
break;
}
-
+
return res;
}
@@ -1805,7 +1805,7 @@ interpret_gimple_assign (struct loop *loop, gimple stmt)
-/* This section contains all the entry points:
+/* This section contains all the entry points:
- number_of_iterations_in_loop,
- analyze_scalar_evolution,
- instantiate_parameters.
@@ -1814,9 +1814,9 @@ interpret_gimple_assign (struct loop *loop, gimple stmt)
/* Compute and return the evolution function in WRTO_LOOP, the nearest
common ancestor of DEF_LOOP and USE_LOOP. */
-static tree
-compute_scalar_evolution_in_loop (struct loop *wrto_loop,
- struct loop *def_loop,
+static tree
+compute_scalar_evolution_in_loop (struct loop *wrto_loop,
+ struct loop *def_loop,
tree ev)
{
tree res;
@@ -1860,7 +1860,7 @@ analyze_scalar_evolution_1 (struct loop *loop, tree var, tree res)
if (res != chrec_not_analyzed_yet)
{
if (loop != bb->loop_father)
- res = compute_scalar_evolution_in_loop
+ res = compute_scalar_evolution_in_loop
(find_common_loop (loop, bb->loop_father), bb->loop_father, res);
goto set_and_end;
@@ -1906,18 +1906,18 @@ analyze_scalar_evolution_1 (struct loop *loop, tree var, tree res)
/* Analyzes and returns the scalar evolution of the ssa_name VAR in
LOOP. LOOP is the loop in which the variable is used.
-
+
Example of use: having a pointer VAR to a SSA_NAME node, STMT a
pointer to the statement that uses this variable, in order to
determine the evolution function of the variable, use the following
calls:
-
+
loop_p loop = loop_containing_stmt (stmt);
tree chrec_with_symbols = analyze_scalar_evolution (loop, var);
tree chrec_instantiated = instantiate_parameters (loop, chrec_with_symbols);
*/
-tree
+tree
analyze_scalar_evolution (struct loop *loop, tree var)
{
tree res;
@@ -1945,8 +1945,8 @@ analyze_scalar_evolution (struct loop *loop, tree var)
FOLDED_CASTS is set to true if resolve_mixers used
chrec_convert_aggressive (TODO -- not really, we are way too conservative
- at the moment in order to keep things simple).
-
+ at the moment in order to keep things simple).
+
To illustrate the meaning of USE_LOOP and WRTO_LOOP, consider the following
example:
@@ -1997,7 +1997,7 @@ analyze_scalar_evolution_in_loop (struct loop *wrto_loop, struct loop *use_loop,
bool val = false;
tree ev = version, tmp;
- /* We cannot just do
+ /* We cannot just do
tmp = analyze_scalar_evolution (use_loop, version);
ev = resolve_mixers (wrto_loop, tmp);
@@ -2048,7 +2048,7 @@ get_instantiated_value (htab_t cache, basic_block instantiated_below,
tree version)
{
struct scev_info_str *info, pattern;
-
+
pattern.var = version;
pattern.instantiated_below = instantiated_below;
info = (struct scev_info_str *) htab_find (cache, &pattern);
@@ -2068,7 +2068,7 @@ set_instantiated_value (htab_t cache, basic_block instantiated_below,
{
struct scev_info_str *info, pattern;
PTR *slot;
-
+
pattern.var = version;
pattern.instantiated_below = instantiated_below;
slot = htab_find_slot (cache, &pattern, INSERT);
@@ -2636,7 +2636,7 @@ instantiate_scev (basic_block instantiate_below, struct loop *evolution_loop,
print_generic_expr (dump_file, chrec, 0);
fprintf (dump_file, ")\n");
}
-
+
res = instantiate_scev_r (instantiate_below, evolution_loop, chrec, false,
cache, 0);
@@ -2648,7 +2648,7 @@ instantiate_scev (basic_block instantiate_below, struct loop *evolution_loop,
}
htab_delete (cache);
-
+
return res;
}
@@ -2667,27 +2667,27 @@ resolve_mixers (struct loop *loop, tree chrec)
return ret;
}
-/* Entry point for the analysis of the number of iterations pass.
+/* Entry point for the analysis of the number of iterations pass.
This function tries to safely approximate the number of iterations
the loop will run. When this property is not decidable at compile
time, the result is chrec_dont_know. Otherwise the result is
a scalar or a symbolic parameter.
-
+
Example of analysis: suppose that the loop has an exit condition:
-
+
"if (b > 49) goto end_loop;"
-
+
and that in a previous analysis we have determined that the
variable 'b' has an evolution function:
-
- "EF = {23, +, 5}_2".
-
+
+ "EF = {23, +, 5}_2".
+
When we evaluate the function at the point 5, i.e. the value of the
variable 'b' after 5 iterations in the loop, we have EF (5) = 48,
and EF (6) = 53. In this case the value of 'b' on exit is '53' and
the loop body has been executed 6 times. */
-tree
+tree
number_of_latch_executions (struct loop *loop)
{
tree res, type;
@@ -2703,7 +2703,7 @@ number_of_latch_executions (struct loop *loop)
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "(number_of_iterations_in_loop\n");
-
+
exit = single_exit (loop);
if (!exit)
goto end;
@@ -2733,7 +2733,7 @@ end:
expression, the caller is responsible for dealing with this
the possible overflow. */
-tree
+tree
number_of_exit_cond_executions (struct loop *loop)
{
tree ret = number_of_latch_executions (loop);
@@ -2754,14 +2754,14 @@ number_of_exit_cond_executions (struct loop *loop)
This function computes the number of iterations for all the loops
from the EXIT_CONDITIONS array. */
-static void
+static void
number_of_iterations_for_all_loops (VEC(gimple,heap) **exit_conditions)
{
unsigned int i;
unsigned nb_chrec_dont_know_loops = 0;
unsigned nb_static_loops = 0;
gimple cond;
-
+
for (i = 0; VEC_iterate (gimple, *exit_conditions, i, cond); i++)
{
tree res = number_of_latch_executions (loop_containing_stmt (cond));
@@ -2770,7 +2770,7 @@ number_of_iterations_for_all_loops (VEC(gimple,heap) **exit_conditions)
else
nb_static_loops++;
}
-
+
if (dump_file)
{
fprintf (dump_file, "\n(\n");
@@ -2780,7 +2780,7 @@ number_of_iterations_for_all_loops (VEC(gimple,heap) **exit_conditions)
fprintf (dump_file, "%d\tnb_total_loops\n", number_of_loops ());
fprintf (dump_file, "-----------------------------------------\n");
fprintf (dump_file, ")\n\n");
-
+
print_loops (dump_file, 3);
}
}
@@ -2789,7 +2789,7 @@ number_of_iterations_for_all_loops (VEC(gimple,heap) **exit_conditions)
/* Counters for the stats. */
-struct chrec_stats
+struct chrec_stats
{
unsigned nb_chrecs;
unsigned nb_affine;
@@ -2821,15 +2821,15 @@ dump_chrecs_stats (FILE *file, struct chrec_stats *stats)
fprintf (file, "-----------------------------------------\n");
fprintf (file, "%d\taffine univariate chrecs\n", stats->nb_affine);
fprintf (file, "%d\taffine multivariate chrecs\n", stats->nb_affine_multivar);
- fprintf (file, "%d\tdegree greater than 2 polynomials\n",
+ fprintf (file, "%d\tdegree greater than 2 polynomials\n",
stats->nb_higher_poly);
fprintf (file, "%d\tchrec_dont_know chrecs\n", stats->nb_chrec_dont_know);
fprintf (file, "-----------------------------------------\n");
fprintf (file, "%d\ttotal chrecs\n", stats->nb_chrecs);
- fprintf (file, "%d\twith undetermined coefficients\n",
+ fprintf (file, "%d\twith undetermined coefficients\n",
stats->nb_undetermined);
fprintf (file, "-----------------------------------------\n");
- fprintf (file, "%d\tchrecs in the scev database\n",
+ fprintf (file, "%d\tchrecs in the scev database\n",
(int) htab_elements (scalar_evolution_info));
fprintf (file, "%d\tsets in the scev database\n", nb_set_scev);
fprintf (file, "%d\tgets in the scev database\n", nb_get_scev);
@@ -2848,15 +2848,15 @@ gather_chrec_stats (tree chrec, struct chrec_stats *stats)
print_generic_expr (dump_file, chrec, 0);
fprintf (dump_file, "\n");
}
-
+
stats->nb_chrecs++;
-
+
if (chrec == NULL_TREE)
{
stats->nb_undetermined++;
return;
}
-
+
switch (TREE_CODE (chrec))
{
case POLYNOMIAL_CHREC:
@@ -2878,20 +2878,20 @@ gather_chrec_stats (tree chrec, struct chrec_stats *stats)
fprintf (dump_file, " higher_degree_polynomial\n");
stats->nb_higher_poly++;
}
-
+
break;
default:
break;
}
-
+
if (chrec_contains_undetermined (chrec))
{
if (dump_file && (dump_flags & TDF_STATS))
fprintf (dump_file, " undetermined\n");
stats->nb_undetermined++;
}
-
+
if (dump_file && (dump_flags & TDF_STATS))
fprintf (dump_file, ")\n");
}
@@ -2899,47 +2899,47 @@ gather_chrec_stats (tree chrec, struct chrec_stats *stats)
/* One of the drivers for testing the scalar evolutions analysis.
This function analyzes the scalar evolution of all the scalars
defined as loop phi nodes in one of the loops from the
- EXIT_CONDITIONS array.
-
+ EXIT_CONDITIONS array.
+
TODO Optimization: A loop is in canonical form if it contains only
a single scalar loop phi node. All the other scalars that have an
evolution in the loop are rewritten in function of this single
index. This allows the parallelization of the loop. */
-static void
+static void
analyze_scalar_evolution_for_all_loop_phi_nodes (VEC(gimple,heap) **exit_conditions)
{
unsigned int i;
struct chrec_stats stats;
gimple cond, phi;
gimple_stmt_iterator psi;
-
+
reset_chrecs_counters (&stats);
-
+
for (i = 0; VEC_iterate (gimple, *exit_conditions, i, cond); i++)
{
struct loop *loop;
basic_block bb;
tree chrec;
-
+
loop = loop_containing_stmt (cond);
bb = loop->header;
-
+
for (psi = gsi_start_phis (bb); !gsi_end_p (psi); gsi_next (&psi))
{
phi = gsi_stmt (psi);
if (is_gimple_reg (PHI_RESULT (phi)))
{
- chrec = instantiate_parameters
- (loop,
+ chrec = instantiate_parameters
+ (loop,
analyze_scalar_evolution (loop, PHI_RESULT (phi)));
-
+
if (dump_file && (dump_flags & TDF_STATS))
gather_chrec_stats (chrec, &stats);
}
}
}
-
+
if (dump_file && (dump_flags & TDF_STATS))
dump_chrecs_stats (dump_file, &stats);
}
@@ -2959,16 +2959,16 @@ gather_stats_on_scev_database_1 (void **slot, void *stats)
/* Classify the chrecs of the whole database. */
-void
+void
gather_stats_on_scev_database (void)
{
struct chrec_stats stats;
-
+
if (!dump_file)
return;
-
+
reset_chrecs_counters (&stats);
-
+
htab_traverse (scalar_evolution_info, gather_stats_on_scev_database_1,
&stats);
@@ -3007,7 +3007,7 @@ scev_initialize (void)
del_scev_info,
ggc_calloc,
ggc_free);
-
+
initialize_scalar_evolutions_analyzer ();
FOR_EACH_LOOP (li, loop, 0)
@@ -3039,18 +3039,18 @@ scev_reset (void)
(see analyze_scalar_evolution_in_loop for more details on USE_LOOP
and WRTO_LOOP). If ALLOW_NONCONSTANT_STEP is true, we want step to be
invariant in LOOP. Otherwise we require it to be an integer constant.
-
+
IV->no_overflow is set to true if we are sure the iv cannot overflow (e.g.
because it is computed in signed arithmetics). Consequently, adding an
induction variable
-
+
for (i = IV->base; ; i += IV->step)
is only safe if IV->no_overflow is false, or TYPE_OVERFLOW_UNDEFINED is
false for the type of the induction variable, or you can prove that i does
not wrap by some other argument. Otherwise, this might introduce undefined
behavior, and
-
+
for (i = iv->base; ; i = (type) ((unsigned type) i + (unsigned type) iv->step))
must be used instead. */
@@ -3109,13 +3109,13 @@ void
scev_analysis (void)
{
VEC(gimple,heap) *exit_conditions;
-
+
exit_conditions = VEC_alloc (gimple, heap, 37);
select_loops_exit_conditions (&exit_conditions);
if (dump_file && (dump_flags & TDF_STATS))
analyze_scalar_evolution_for_all_loop_phi_nodes (&exit_conditions);
-
+
number_of_iterations_for_all_loops (&exit_conditions);
VEC_free (gimple, heap, exit_conditions);
}
@@ -3178,7 +3178,7 @@ expression_expensive_p (tree expr)
/* Replace ssa names for that scev can prove they are constant by the
appropriate constants. Also perform final value replacement in loops,
in case the replacement expressions are cheap.
-
+
We only consider SSA names defined by phi nodes; rest is left to the
ordinary constant propagation pass. */
diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c
index d96b66b1aa8..5cd5d9d29c6 100644
--- a/gcc/tree-ssa-address.c
+++ b/gcc/tree-ssa-address.c
@@ -1,18 +1,18 @@
/* Memory address lowering and addressing mode selection.
Copyright (C) 2004, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -46,7 +46,7 @@ along with GCC; see the file COPYING3. If not see
/* TODO -- handling of symbols (according to Richard Hendersons
comments, http://gcc.gnu.org/ml/gcc-patches/2005-04/msg00949.html):
-
+
There are at least 5 different kinds of symbols that we can run up against:
(1) binds_local_p, small data area.
@@ -178,7 +178,7 @@ gen_addr_rtx (enum machine_mode address_mode,
/* Returns address for TARGET_MEM_REF with parameters given by ADDR
in address space AS.
- If REALLY_EXPAND is false, just make fake registers instead
+ If REALLY_EXPAND is false, just make fake registers instead
of really expanding the operands, and perform the expansion in-place
by using one of the "templates". */
@@ -526,7 +526,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
{
amult = addr->elts[i].coef;
amult_neg = double_int_ext_for_comb (double_int_neg (amult), addr);
-
+
if (double_int_equal_p (amult, best_mult))
op_code = PLUS_EXPR;
else if (double_int_equal_p (amult_neg, best_mult))
@@ -547,7 +547,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
mult_elt = fold_build1 (NEGATE_EXPR, sizetype, elt);
}
addr->n = j;
-
+
parts->index = mult_elt;
parts->step = double_int_to_tree (sizetype, best_mult);
}
@@ -652,7 +652,7 @@ create_mem_ref (gimple_stmt_iterator *gsi, tree type, aff_tree *addr,
parts.index, parts.step),
true, NULL_TREE, true, GSI_SAME_STMT);
parts.step = NULL_TREE;
-
+
mem_ref = create_mem_ref_raw (type, &parts);
if (mem_ref)
return mem_ref;
@@ -662,7 +662,7 @@ create_mem_ref (gimple_stmt_iterator *gsi, tree type, aff_tree *addr,
{
tmp = build_addr (parts.symbol, current_function_decl);
gcc_assert (is_gimple_val (tmp));
-
+
/* Add the symbol to base, eventually forcing it to register. */
if (parts.base)
{
@@ -720,7 +720,7 @@ create_mem_ref (gimple_stmt_iterator *gsi, tree type, aff_tree *addr,
if (parts.base)
{
atype = TREE_TYPE (parts.base);
- parts.base = force_gimple_operand_gsi (gsi,
+ parts.base = force_gimple_operand_gsi (gsi,
fold_build2 (POINTER_PLUS_EXPR, atype,
parts.base,
fold_convert (sizetype, parts.offset)),
@@ -816,7 +816,7 @@ maybe_fold_tmr (tree ref)
if (!changed)
return NULL_TREE;
-
+
ret = create_mem_ref_raw (TREE_TYPE (ref), &addr);
if (!ret)
return NULL_TREE;
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index 4c052be418f..6ba550bbff0 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -325,7 +325,7 @@ dump_alias_info (FILE *file)
fprintf (file, "\n\nAlias information for %s\n\n", funcname);
fprintf (file, "Aliased symbols\n\n");
-
+
FOR_EACH_REFERENCED_VAR (var, rvi)
{
if (may_be_aliased (var))
@@ -345,7 +345,7 @@ dump_alias_info (FILE *file)
{
tree ptr = ssa_name (i);
struct ptr_info_def *pi;
-
+
if (ptr == NULL_TREE
|| SSA_NAME_IN_FREE_LIST (ptr))
continue;
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index 76ea0e49e1c..f3f113c902f 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -5,17 +5,17 @@
Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -63,7 +63,7 @@ along with GCC; see the file COPYING3. If not see
mark the outgoing edges as executable or not executable
depending on the predicate's value. This is then used when
visiting PHI nodes to know when a PHI argument can be ignored.
-
+
2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
same constant C, then the LHS of the PHI is set to C. This
@@ -714,7 +714,7 @@ do_dbg_cnt (void)
/* Do final substitution of propagated values, cleanup the flowgraph and
- free allocated storage.
+ free allocated storage.
Return TRUE when something was optimized. */
@@ -882,7 +882,7 @@ ccp_visit_phi_node (gimple phi)
return SSA_PROP_NOT_INTERESTING;
}
-/* Return true if we may propagate the address expression ADDR into the
+/* Return true if we may propagate the address expression ADDR into the
dereference DEREF and cancel them. */
bool
@@ -1027,7 +1027,7 @@ ccp_fold (gimple stmt)
return get_symbol_constant_value (rhs);
return rhs;
}
-
+
case GIMPLE_UNARY_RHS:
{
/* Handle unary operators that can appear in GIMPLE form.
@@ -1071,7 +1071,7 @@ ccp_fold (gimple stmt)
return op0;
}
- return
+ return
fold_unary_ignore_overflow_loc (loc, subcode,
gimple_expr_type (stmt), op0);
}
@@ -1415,7 +1415,7 @@ evaluate_stmt (gimple stmt)
if (code == GIMPLE_ASSIGN)
{
enum tree_code subcode = gimple_assign_rhs_code (stmt);
-
+
/* Other cases cannot satisfy is_gimple_min_invariant
without folding. */
if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
@@ -1495,7 +1495,7 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi)
if (integer_zerop (val.value))
gimple_cond_make_false (stmt);
- else
+ else
gimple_cond_make_true (stmt);
return true;
@@ -1587,7 +1587,7 @@ visit_cond_stmt (gimple stmt, edge *taken_edge_p)
its evaluation changes the lattice value of its output, return
SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
output value.
-
+
If STMT is a conditional branch and we can determine its truth
value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
value, return SSA_PROP_VARYING. */
@@ -1669,7 +1669,7 @@ gate_ccp (void)
}
-struct gimple_opt_pass pass_ccp =
+struct gimple_opt_pass pass_ccp =
{
{
GIMPLE_PASS,
@@ -1807,7 +1807,7 @@ maybe_fold_offset_to_array_ref (location_t loc, tree base, tree offset,
(char *)a - 4;
which should be not folded to &a->d[-8]. */
if (domain_type
- && TYPE_MAX_VALUE (domain_type)
+ && TYPE_MAX_VALUE (domain_type)
&& TREE_CODE (TYPE_MAX_VALUE (domain_type)) == INTEGER_CST)
{
tree up_bound = TYPE_MAX_VALUE (domain_type);
@@ -1902,7 +1902,7 @@ maybe_fold_offset_to_component_ref (location_t loc, tree record_type,
t = build3 (COMPONENT_REF, field_type, base, f, NULL_TREE);
return t;
}
-
+
/* Don't care about offsets into the middle of scalars. */
if (!AGGREGATE_TYPE_P (field_type))
continue;
@@ -1944,7 +1944,7 @@ maybe_fold_offset_to_component_ref (location_t loc, tree record_type,
field_type = TREE_TYPE (f);
offset = int_const_binop (MINUS_EXPR, offset, byte_position (f), 1);
- /* If we get here, we've got an aggregate field, and a possibly
+ /* If we get here, we've got an aggregate field, and a possibly
nonzero offset into them. Recurse and hope for a valid match. */
base = build3 (COMPONENT_REF, field_type, base, f, NULL_TREE);
SET_EXPR_LOCATION (base, loc);
@@ -2135,7 +2135,7 @@ maybe_fold_stmt_indirect (tree expr, tree base, tree offset)
}
else
{
- /* We can get here for out-of-range string constant accesses,
+ /* We can get here for out-of-range string constant accesses,
such as "_"[3]. Bail out of the entire substitution search
and arrange for the entire statement to be replaced by a
call to __builtin_trap. In all likelihood this will all be
@@ -2148,11 +2148,11 @@ maybe_fold_stmt_indirect (tree expr, tree base, tree offset)
&& TREE_CODE (TREE_OPERAND (t, 0)) == STRING_CST)
{
/* FIXME: Except that this causes problems elsewhere with dead
- code not being deleted, and we die in the rtl expanders
+ code not being deleted, and we die in the rtl expanders
because we failed to remove some ssa_name. In the meantime,
just return zero. */
/* FIXME2: This condition should be signaled by
- fold_read_from_constant_string directly, rather than
+ fold_read_from_constant_string directly, rather than
re-checking for it here. */
return integer_zero_node;
}
@@ -2380,7 +2380,7 @@ get_maxval_strlen (tree arg, tree *length, bitmap visited, int type)
{
tree var, val;
gimple def_stmt;
-
+
if (TREE_CODE (arg) != SSA_NAME)
{
if (TREE_CODE (arg) == COND_EXPR)
@@ -2475,7 +2475,7 @@ get_maxval_strlen (tree arg, tree *length, bitmap visited, int type)
return false;
}
}
- return true;
+ return true;
default:
return false;
@@ -2772,7 +2772,7 @@ fold_gimple_assign (gimple_stmt_iterator *si)
result = fold (rhs);
/* Strip away useless type conversions. Both the NON_LVALUE_EXPR
- that may have been added by fold, and "useless" type
+ that may have been added by fold, and "useless" type
conversions that might now be apparent due to propagation. */
STRIP_USELESS_TYPE_CONVERSION (result);
@@ -3205,7 +3205,7 @@ optimize_stdarg_builtin (gimple call)
|| TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
!= TYPE_MAIN_VARIANT (cfun_va_list))
return NULL_TREE;
-
+
lhs = build_fold_indirect_ref_loc (loc, lhs);
rhs = build_call_expr_loc (loc, built_in_decls[BUILT_IN_NEXT_ARG],
1, integer_zero_node);
@@ -3271,7 +3271,7 @@ gimplify_and_update_call_from_tree (gimple_stmt_iterator *si_p, tree expr)
if (lhs == NULL_TREE)
gimplify_and_add (expr, &stmts);
- else
+ else
tmp = get_initialized_tmp_var (expr, &stmts, NULL);
pop_gimplify_context (NULL);
@@ -3316,7 +3316,7 @@ execute_fold_all_builtins (void)
bool cfg_changed = false;
basic_block bb;
unsigned int todoflags = 0;
-
+
FOR_EACH_BB (bb)
{
gimple_stmt_iterator i;
@@ -3418,16 +3418,16 @@ execute_fold_all_builtins (void)
gsi_next (&i);
}
}
-
+
/* Delete unreachable blocks. */
if (cfg_changed)
todoflags |= TODO_cleanup_cfg;
-
+
return todoflags;
}
-struct gimple_opt_pass pass_fold_builtins =
+struct gimple_opt_pass pass_fold_builtins =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-ssa-coalesce.c b/gcc/tree-ssa-coalesce.c
index 5841aa07522..867e15c06e0 100644
--- a/gcc/tree-ssa-coalesce.c
+++ b/gcc/tree-ssa-coalesce.c
@@ -36,8 +36,8 @@ along with GCC; see the file COPYING3. If not see
/* This set of routines implements a coalesce_list. This is an object which
is used to track pairs of ssa_names which are desirable to coalesce
- together to avoid copies. Costs are associated with each pair, and when
- all desired information has been collected, the object can be used to
+ together to avoid copies. Costs are associated with each pair, and when
+ all desired information has been collected, the object can be used to
order the pairs for processing. */
/* This structure defines a pair entry. */
@@ -59,7 +59,7 @@ typedef struct cost_one_pair_d
/* This structure maintains the list of coalesce pairs. */
-typedef struct coalesce_list_d
+typedef struct coalesce_list_d
{
htab_t list; /* Hash table. */
coalesce_pair_p *sorted; /* List when sorted. */
@@ -91,7 +91,7 @@ coalesce_cost (int frequency, bool optimize_for_size)
/* Return the cost of executing a copy instruction in basic block BB. */
-static inline int
+static inline int
coalesce_cost_bb (basic_block bb)
{
return coalesce_cost (bb->frequency, optimize_bb_for_size_p (bb));
@@ -100,7 +100,7 @@ coalesce_cost_bb (basic_block bb)
/* Return the cost of executing a copy instruction on edge E. */
-static inline int
+static inline int
coalesce_cost_edge (edge e)
{
int mult = 1;
@@ -133,12 +133,12 @@ coalesce_cost_edge (edge e)
}
}
- return coalesce_cost (EDGE_FREQUENCY (e),
+ return coalesce_cost (EDGE_FREQUENCY (e),
optimize_edge_for_size_p (e)) * mult;
}
-/* Retrieve a pair to coalesce from the cost_one_list in CL. Returns the
+/* Retrieve a pair to coalesce from the cost_one_list in CL. Returns the
2 elements via P1 and P2. 1 is returned by the function if there is a pair,
NO_BEST_COALESCE is returned if there aren't any. */
@@ -160,7 +160,7 @@ pop_cost_one_pair (coalesce_list_p cl, int *p1, int *p2)
return 1;
}
-/* Retrieve the most expensive remaining pair to coalesce from CL. Returns the
+/* Retrieve the most expensive remaining pair to coalesce from CL. Returns the
2 elements via P1 and P2. Their calculated cost is returned by the function.
NO_BEST_COALESCE is returned if the coalesce list is empty. */
@@ -190,7 +190,7 @@ pop_best_coalesce (coalesce_list_p cl, int *p1, int *p2)
/* Hash function for coalesce list. Calculate hash for PAIR. */
-static unsigned int
+static unsigned int
coalesce_pair_map_hash (const void *pair)
{
hashval_t a = (hashval_t)(((const_coalesce_pair_p)pair)->first_element);
@@ -203,7 +203,7 @@ coalesce_pair_map_hash (const void *pair)
/* Equality function for coalesce list hash table. Compare PAIR1 and PAIR2,
returning TRUE if the two pairs are equivalent. */
-static int
+static int
coalesce_pair_map_eq (const void *pair1, const void *pair2)
{
const_coalesce_pair_p const p1 = (const_coalesce_pair_p) pair1;
@@ -216,13 +216,13 @@ coalesce_pair_map_eq (const void *pair1, const void *pair2)
/* Create a new empty coalesce list object and return it. */
-static inline coalesce_list_p
+static inline coalesce_list_p
create_coalesce_list (void)
{
coalesce_list_p list;
unsigned size = num_ssa_names * 3;
- if (size < 40)
+ if (size < 40)
size = 40;
list = (coalesce_list_p) xmalloc (sizeof (struct coalesce_list_d));
@@ -237,7 +237,7 @@ create_coalesce_list (void)
/* Delete coalesce list CL. */
-static inline void
+static inline void
delete_coalesce_list (coalesce_list_p cl)
{
gcc_assert (cl->cost_one_list == NULL);
@@ -249,8 +249,8 @@ delete_coalesce_list (coalesce_list_p cl)
}
-/* Find a matching coalesce pair object in CL for the pair P1 and P2. If
- one isn't found, return NULL if CREATE is false, otherwise create a new
+/* Find a matching coalesce pair object in CL for the pair P1 and P2. If
+ one isn't found, return NULL if CREATE is false, otherwise create a new
coalesce pair object and return it. */
static coalesce_pair_p
@@ -259,7 +259,7 @@ find_coalesce_pair (coalesce_list_p cl, int p1, int p2, bool create)
struct coalesce_pair p, *pair;
void **slot;
unsigned int hash;
-
+
/* Normalize so that p1 is the smaller value. */
if (p2 < p1)
{
@@ -271,8 +271,8 @@ find_coalesce_pair (coalesce_list_p cl, int p1, int p2, bool create)
p.first_element = p1;
p.second_element = p2;
}
-
-
+
+
hash = coalesce_pair_map_hash (&p);
pair = (struct coalesce_pair *) htab_find_with_hash (cl->list, &p, hash);
@@ -305,7 +305,7 @@ add_cost_one_coalesce (coalesce_list_p cl, int p1, int p2)
/* Add a coalesce between P1 and P2 in list CL with a cost of VALUE. */
-static inline void
+static inline void
add_coalesce (coalesce_list_p cl, int p1, int p2, int value)
{
coalesce_pair_p node;
@@ -329,7 +329,7 @@ add_coalesce (coalesce_list_p cl, int p1, int p2, int value)
/* Comparison function to allow qsort to sort P1 and P2 in Ascending order. */
-static int
+static int
compare_pairs (const void *p1, const void *p2)
{
const_coalesce_pair_p const *const pp1 = (const_coalesce_pair_p const *) p1;
@@ -458,7 +458,7 @@ sort_coalesce_list (coalesce_list_p cl)
/* Send debug info for coalesce list CL to file F. */
-static void
+static void
dump_coalesce_list (FILE *f, coalesce_list_p cl)
{
coalesce_pair_p node;
@@ -498,7 +498,7 @@ dump_coalesce_list (FILE *f, coalesce_list_p cl)
}
-/* This represents a conflict graph. Implemented as an array of bitmaps.
+/* This represents a conflict graph. Implemented as an array of bitmaps.
A full matrix is used for conflicts rather than just upper triangular form.
this make sit much simpler and faster to perform conflict merges. */
@@ -639,15 +639,15 @@ ssa_conflicts_dump (FILE *file, ssa_conflicts_p ptr)
}
-/* This structure is used to efficiently record the current status of live
- SSA_NAMES when building a conflict graph.
+/* This structure is used to efficiently record the current status of live
+ SSA_NAMES when building a conflict graph.
LIVE_BASE_VAR has a bit set for each base variable which has at least one
ssa version live.
- LIVE_BASE_PARTITIONS is an array of bitmaps using the basevar table as an
- index, and is used to track what partitions of each base variable are
- live. This makes it easy to add conflicts between just live partitions
- with the same base variable.
- The values in LIVE_BASE_PARTITIONS are only valid if the base variable is
+ LIVE_BASE_PARTITIONS is an array of bitmaps using the basevar table as an
+ index, and is used to track what partitions of each base variable are
+ live. This makes it easy to add conflicts between just live partitions
+ with the same base variable.
+ The values in LIVE_BASE_PARTITIONS are only valid if the base variable is
marked as being live. This delays clearing of these bitmaps until
they are actually needed again. */
@@ -721,7 +721,7 @@ live_track_add_partition (live_track_p ptr, int partition)
int root;
root = basevar_index (ptr->map, partition);
- /* If this base var wasn't live before, it is now. Clear the element list
+ /* If this base var wasn't live before, it is now. Clear the element list
since it was delayed until needed. */
if (!bitmap_bit_p (ptr->live_base_var, root))
{
@@ -729,7 +729,7 @@ live_track_add_partition (live_track_p ptr, int partition)
bitmap_clear (ptr->live_base_partitions[root]);
}
bitmap_set_bit (ptr->live_base_partitions[root], partition);
-
+
}
@@ -764,7 +764,7 @@ live_track_live_p (live_track_p ptr, tree var)
}
-/* This routine will add USE to PTR. USE will be marked as live in both the
+/* This routine will add USE to PTR. USE will be marked as live in both the
ssa live map and the live bitmap for the root of USE. */
static inline void
@@ -782,7 +782,7 @@ live_track_process_use (live_track_p ptr, tree use)
/* This routine will process a DEF in PTR. DEF will be removed from the live
- lists, and if there are any other live partitions with the same base
+ lists, and if there are any other live partitions with the same base
variable, conflicts will be added to GRAPH. */
static inline void
@@ -838,8 +838,8 @@ live_track_clear_base_vars (live_track_p ptr)
/* Build a conflict graph based on LIVEINFO. Any partitions which are in the
- partition view of the var_map liveinfo is based on get entries in the
- conflict graph. Only conflicts between ssa_name partitions with the same
+ partition view of the var_map liveinfo is based on get entries in the
+ conflict graph. Only conflicts between ssa_name partitions with the same
base variable are added. */
static ssa_conflicts_p
@@ -868,12 +868,12 @@ build_ssa_conflict_graph (tree_live_info_p liveinfo)
tree var;
gimple stmt = gsi_stmt (gsi);
- /* A copy between 2 partitions does not introduce an interference
- by itself. If they did, you would never be able to coalesce
- two things which are copied. If the two variables really do
- conflict, they will conflict elsewhere in the program.
-
- This is handled by simply removing the SRC of the copy from the
+ /* A copy between 2 partitions does not introduce an interference
+ by itself. If they did, you would never be able to coalesce
+ two things which are copied. If the two variables really do
+ conflict, they will conflict elsewhere in the program.
+
+ This is handled by simply removing the SRC of the copy from the
live list, and processing the stmt normally. */
if (is_gimple_assign (stmt))
{
@@ -894,11 +894,11 @@ build_ssa_conflict_graph (tree_live_info_p liveinfo)
live_track_process_use (live, var);
}
- /* If result of a PHI is unused, looping over the statements will not
+ /* If result of a PHI is unused, looping over the statements will not
record any conflicts since the def was never live. Since the PHI node
is going to be translated out of SSA form, it will insert a copy.
- There must be a conflict recorded between the result of the PHI and
- any variables that are live. Otherwise the out-of-ssa translation
+ There must be a conflict recorded between the result of the PHI and
+ any variables that are live. Otherwise the out-of-ssa translation
may create incorrect code. */
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -932,7 +932,7 @@ print_exprs (FILE *f, const char *str1, tree expr1, const char *str2,
/* Called if a coalesce across and abnormal edge cannot be performed. PHI is
- the phi node at fault, I is the argument index at fault. A message is
+ the phi node at fault, I is the argument index at fault. A message is
printed and compilation is then terminated. */
static inline void
@@ -1016,7 +1016,7 @@ create_outofssa_var_map (coalesce_list_p cl, bitmap used_in_copy)
ver = SSA_NAME_VERSION (res);
register_ssa_partition (map, res);
- /* Register ssa_names and coalesces between the args and the result
+ /* Register ssa_names and coalesces between the args and the result
of all PHI. */
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
@@ -1024,7 +1024,7 @@ create_outofssa_var_map (coalesce_list_p cl, bitmap used_in_copy)
arg = PHI_ARG_DEF (phi, i);
if (TREE_CODE (arg) == SSA_NAME)
register_ssa_partition (map, arg);
- if (TREE_CODE (arg) == SSA_NAME
+ if (TREE_CODE (arg) == SSA_NAME
&& SSA_NAME_VAR (arg) == SSA_NAME_VAR (res))
{
saw_copy = true;
@@ -1120,7 +1120,7 @@ create_outofssa_var_map (coalesce_list_p cl, bitmap used_in_copy)
if (SSA_NAME_VAR (outputs[match]) == SSA_NAME_VAR (input))
{
- cost = coalesce_cost (REG_BR_PROB_BASE,
+ cost = coalesce_cost (REG_BR_PROB_BASE,
optimize_bb_for_size_p (bb));
add_coalesce (cl, v1, v2, cost);
bitmap_set_bit (used_in_copy, v1);
@@ -1133,7 +1133,7 @@ create_outofssa_var_map (coalesce_list_p cl, bitmap used_in_copy)
default:
break;
}
-
+
#ifdef ENABLE_CHECKING
/* Mark real uses and defs. */
FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, (SSA_OP_DEF|SSA_OP_USE))
@@ -1141,7 +1141,7 @@ create_outofssa_var_map (coalesce_list_p cl, bitmap used_in_copy)
/* Validate that virtual ops don't get used in funny ways. */
if (gimple_vuse (stmt))
- bitmap_set_bit (used_in_virtual_ops,
+ bitmap_set_bit (used_in_virtual_ops,
DECL_UID (SSA_NAME_VAR (gimple_vuse (stmt))));
#endif /* ENABLE_CHECKING */
}
@@ -1228,7 +1228,7 @@ attempt_coalesce (var_map map, ssa_conflicts_p graph, int x, int y,
print_generic_expr (debug, partition_to_var (map, p2), TDF_SLIM);
}
- if (p1 == p2)
+ if (p1 == p2)
{
if (debug)
fprintf (debug, ": Already Coalesced.\n");
@@ -1251,7 +1251,7 @@ attempt_coalesce (var_map map, ssa_conflicts_p graph, int x, int y,
return false;
}
- /* z is the new combined partition. Remove the other partition from
+ /* z is the new combined partition. Remove the other partition from
the list, and merge the conflicts. */
if (z == p1)
ssa_conflicts_merge (graph, p1, p2);
@@ -1270,11 +1270,11 @@ attempt_coalesce (var_map map, ssa_conflicts_p graph, int x, int y,
}
-/* Attempt to Coalesce partitions in MAP which occur in the list CL using
+/* Attempt to Coalesce partitions in MAP which occur in the list CL using
GRAPH. Debug output is sent to DEBUG if it is non-NULL. */
static void
-coalesce_partitions (var_map map, ssa_conflicts_p graph, coalesce_list_p cl,
+coalesce_partitions (var_map map, ssa_conflicts_p graph, coalesce_list_p cl,
FILE *debug)
{
int x = 0, y = 0;
@@ -1285,7 +1285,7 @@ coalesce_partitions (var_map map, ssa_conflicts_p graph, coalesce_list_p cl,
edge_iterator ei;
/* First, coalesce all the copies across abnormal edges. These are not placed
- in the coalesce list because they do not need to be sorted, and simply
+ in the coalesce list because they do not need to be sorted, and simply
consume extra memory/compilation time in large programs. */
FOR_EACH_BB (bb)
@@ -1432,14 +1432,14 @@ coalesce_ssa_name (void)
dump_coalesce_list (dump_file, cl);
}
- /* First, coalesce all live on entry variables to their base variable.
+ /* First, coalesce all live on entry variables to their base variable.
This will ensure the first use is coming from the correct location. */
if (dump_file && (dump_flags & TDF_DETAILS))
dump_var_map (dump_file, map);
/* Now coalesce everything in the list. */
- coalesce_partitions (map, graph, cl,
+ coalesce_partitions (map, graph, cl,
((dump_flags & TDF_DETAILS) ? dump_file
: NULL));
diff --git a/gcc/tree-ssa-copy.c b/gcc/tree-ssa-copy.c
index 986ad454e74..4b8d0b9660b 100644
--- a/gcc/tree-ssa-copy.c
+++ b/gcc/tree-ssa-copy.c
@@ -73,7 +73,7 @@ may_propagate_copy (tree dest, tree orig)
if (TREE_CODE (dest) == SSA_NAME
&& SSA_NAME_OCCURS_IN_ABNORMAL_PHI (dest))
return false;
-
+
/* Do not copy between types for which we *do* need a conversion. */
if (!useless_type_conversion_p (type_d, type_o))
return false;
@@ -351,7 +351,7 @@ get_last_copy_of (tree var)
/* Traverse COPY_OF starting at VAR until we get to the last
link in the chain. Since it is possible to have cycles in PHI
nodes, the copy-of chain may also contain cycles.
-
+
To avoid infinite loops and to avoid traversing lengthy copy-of
chains, we artificially limit the maximum number of chains we are
willing to traverse.
@@ -390,7 +390,7 @@ set_copy_of_val (tree dest, tree first)
{
unsigned int dest_ver = SSA_NAME_VERSION (dest);
tree old_first, old_last, new_last;
-
+
/* Set FIRST to be the first link in COPY_OF[DEST]. If that
changed, return true. */
old_first = copy_of[dest_ver].value;
@@ -430,11 +430,11 @@ dump_copy_of (FILE *file, tree var)
if (TREE_CODE (var) != SSA_NAME)
return;
-
+
visited = sbitmap_alloc (num_ssa_names);
sbitmap_zero (visited);
SET_BIT (visited, SSA_NAME_VERSION (var));
-
+
fprintf (file, " copy-of chain: ");
val = var;
@@ -458,7 +458,7 @@ dump_copy_of (FILE *file, tree var)
fprintf (file, "[COPY]");
else
fprintf (file, "[NOT A COPY]");
-
+
sbitmap_free (visited);
}
@@ -477,7 +477,7 @@ copy_prop_visit_assignment (gimple stmt, tree *result_p)
lhs = gimple_assign_lhs (stmt);
rhs = gimple_assign_rhs1 (stmt);
-
+
gcc_assert (gimple_assign_rhs_code (stmt) == SSA_NAME);
@@ -494,7 +494,7 @@ copy_prop_visit_assignment (gimple stmt, tree *result_p)
copy of RHS's value, not of RHS itself. This avoids keeping
unnecessary copy-of chains (assignments cannot be in a cycle
like PHI nodes), speeding up the propagation process.
- This is different from what we do in copy_prop_visit_phi_node.
+ This is different from what we do in copy_prop_visit_phi_node.
In those cases, we are interested in the copy-of chains. */
*result_p = lhs;
if (set_copy_of_val (*result_p, rhs_val->value))
@@ -820,7 +820,7 @@ fini_copy_prop (void)
{
size_t i;
prop_value_t *tmp;
-
+
/* Set the final copy-of value for each variable by traversing the
copy-of chains. */
tmp = XCNEWVEC (prop_value_t, num_ssa_names);
@@ -858,7 +858,7 @@ fini_copy_prop (void)
/* Main entry point to the copy propagator.
PHIS_ONLY is true if we should only consider PHI nodes as generating
- copy propagation opportunities.
+ copy propagation opportunities.
The algorithm propagates the value COPY-OF using ssa_propagate. For
every variable X_i, COPY-OF(X_i) indicates which variable is X_i created
@@ -881,7 +881,7 @@ fini_copy_prop (void)
Visit #2: a_2 is copy-of x_298. Value changed.
Visit #3: a_5 is copy-of x_298. Value changed.
Visit #4: x_1 is copy-of x_298. Stable state reached.
-
+
When visiting PHI nodes, we only consider arguments that flow
through edges marked executable by the propagation engine. So,
when visiting statement #2 for the first time, we will only look at
@@ -918,7 +918,7 @@ fini_copy_prop (void)
1 x_54 = PHI <x_53, x_52>
2 x_53 = PHI <x_898, x_54>
-
+
Visit #1: x_54 is copy-of x_53 (because x_52 is copy-of x_53)
Visit #2: x_53 is copy-of x_898 (because x_54 is a copy of x_53,
so it is considered irrelevant
@@ -935,7 +935,7 @@ fini_copy_prop (void)
same variable. So, as long as their copy-of chains overlap, we
know that they will be a copy of the same variable, regardless of
which variable that may be).
-
+
Propagation would then proceed as follows (the notation a -> b
means that a is a copy-of b):
diff --git a/gcc/tree-ssa-copyrename.c b/gcc/tree-ssa-copyrename.c
index ed221c2f3e0..d62a09416d1 100644
--- a/gcc/tree-ssa-copyrename.c
+++ b/gcc/tree-ssa-copyrename.c
@@ -50,22 +50,22 @@ along with GCC; see the file COPYING3. If not see
T.3_5 = <blah>
a_1 = T.3_5
- If this copy couldn't be copy propagated, it could possibly remain in the
- program throughout the optimization phases. After SSA->normal, it would
+ If this copy couldn't be copy propagated, it could possibly remain in the
+ program throughout the optimization phases. After SSA->normal, it would
become:
T.3 = <blah>
a = T.3
-
- Since T.3_5 is distinct from all other SSA versions of T.3, there is no
- fundamental reason why the base variable needs to be T.3, subject to
- certain restrictions. This optimization attempts to determine if we can
+
+ Since T.3_5 is distinct from all other SSA versions of T.3, there is no
+ fundamental reason why the base variable needs to be T.3, subject to
+ certain restrictions. This optimization attempts to determine if we can
change the base variable on copies like this, and result in code such as:
a_5 = <blah>
a_1 = a_5
- This gives the SSA->normal pass a shot at coalescing a_1 and a_5. If it is
+ This gives the SSA->normal pass a shot at coalescing a_1 and a_5. If it is
possible, the copy goes away completely. If it isn't possible, a new temp
will be created for a_5, and you will end up with the exact same code:
@@ -79,8 +79,8 @@ along with GCC; see the file COPYING3. If not see
a_1 = <blah>
<blah2> = a_1
- get turned into
-
+ get turned into
+
T.3_5 = <blah>
a_1 = T.3_5
<blah2> = a_1
@@ -99,7 +99,7 @@ along with GCC; see the file COPYING3. If not see
<blah2> = a_1
which copy propagation would then turn into:
-
+
a_5 = <blah>
<blah2> = a_5
@@ -187,7 +187,7 @@ copy_rename_partition_coalesce (var_map map, tree var1, tree var2, FILE *debug)
ign1 = TREE_CODE (root1) == VAR_DECL && DECL_IGNORED_P (root1);
ign2 = TREE_CODE (root2) == VAR_DECL && DECL_IGNORED_P (root2);
- /* Never attempt to coalesce 2 user variables unless one is an inline
+ /* Never attempt to coalesce 2 user variables unless one is an inline
variable. */
if (!ign1 && !ign2)
{
@@ -195,7 +195,7 @@ copy_rename_partition_coalesce (var_map map, tree var1, tree var2, FILE *debug)
ign2 = true;
else if (DECL_FROM_INLINE (root1))
ign1 = true;
- else
+ else
{
if (debug)
fprintf (debug, " : 2 different USER vars. No coalesce.\n");
@@ -203,7 +203,7 @@ copy_rename_partition_coalesce (var_map map, tree var1, tree var2, FILE *debug)
}
}
- /* If both values have default defs, we can't coalesce. If only one has a
+ /* If both values have default defs, we can't coalesce. If only one has a
tag, make sure that variable is the new root partition. */
if (gimple_default_def (cfun, root1))
{
@@ -236,7 +236,7 @@ copy_rename_partition_coalesce (var_map map, tree var1, tree var2, FILE *debug)
/* Merge the two partitions. */
p3 = partition_union (map->var_partition, p1, p2);
- /* Set the root variable of the partition to the better choice, if there is
+ /* Set the root variable of the partition to the better choice, if there is
one. */
if (!ign2)
replace_ssa_name_symbol (partition_to_var (map, p3), root2);
@@ -246,7 +246,7 @@ copy_rename_partition_coalesce (var_map map, tree var1, tree var2, FILE *debug)
if (debug)
{
fprintf (debug, " --> P%d ", p3);
- print_generic_expr (debug, SSA_NAME_VAR (partition_to_var (map, p3)),
+ print_generic_expr (debug, SSA_NAME_VAR (partition_to_var (map, p3)),
TDF_SLIM);
fprintf (debug, "\n");
}
@@ -256,8 +256,8 @@ copy_rename_partition_coalesce (var_map map, tree var1, tree var2, FILE *debug)
/* This function will make a pass through the IL, and attempt to coalesce any
SSA versions which occur in PHI's or copies. Coalescing is accomplished by
- changing the underlying root variable of all coalesced version. This will
- then cause the SSA->normal pass to attempt to coalesce them all to the same
+ changing the underlying root variable of all coalesced version. This will
+ then cause the SSA->normal pass to attempt to coalesce them all to the same
variable. */
static unsigned int
@@ -324,7 +324,7 @@ rename_ssa_copies (void)
/* Now one more pass to make all elements of a partition share the same
root variable. */
-
+
for (x = 1; x < num_ssa_names; x++)
{
part_var = partition_to_var (map, x);
@@ -357,7 +357,7 @@ gate_copyrename (void)
return flag_tree_copyrename != 0;
}
-struct gimple_opt_pass pass_rename_ssa_copies =
+struct gimple_opt_pass pass_rename_ssa_copies =
{
{
GIMPLE_PASS,
@@ -371,7 +371,7 @@ struct gimple_opt_pass pass_rename_ssa_copies =
PROP_cfg | PROP_ssa, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
- 0, /* todo_flags_start */
+ 0, /* todo_flags_start */
TODO_dump_func | TODO_verify_ssa /* todo_flags_finish */
}
-};
+};
diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c
index 056b7b512bb..8669cdfa852 100644
--- a/gcc/tree-ssa-dce.c
+++ b/gcc/tree-ssa-dce.c
@@ -4,19 +4,19 @@
Contributed by Ben Elliston <bje@redhat.com>
and Andrew MacLeod <amacleod@redhat.com>
Adapted to use control dependence by Steven Bosscher, SUSE Labs.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -618,7 +618,7 @@ degenerate_phi_p (gimple phi)
/* Propagate necessity using the operands of necessary statements.
Process the uses on each statement in the worklist, and add all
feeding statements which contribute to the calculation of this
- value to the worklist.
+ value to the worklist.
In conservative mode, EL is NULL. */
@@ -626,7 +626,7 @@ static void
propagate_necessity (struct edge_list *el)
{
gimple stmt;
- bool aggressive = (el ? true : false);
+ bool aggressive = (el ? true : false);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\nProcessing worklist:\n");
@@ -694,7 +694,7 @@ propagate_necessity (struct edge_list *el)
else
{
/* Propagate through the operands. Examine all the USE, VUSE and
- VDEF operands in this statement. Mark all the statements
+ VDEF operands in this statement. Mark all the statements
which feed this statement's uses as necessary. */
ssa_op_iter iter;
tree use;
@@ -1071,8 +1071,8 @@ remove_dead_stmt (gimple_stmt_iterator *i, basic_block bb)
}
unlink_stmt_vdef (stmt);
- gsi_remove (i, true);
- release_defs (stmt);
+ gsi_remove (i, true);
+ release_defs (stmt);
}
/* Eliminate unnecessary statements. Any instruction not marked as necessary
@@ -1159,7 +1159,7 @@ eliminate_unnecessary_stmts (void)
print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
fprintf (dump_file, "\n");
}
-
+
gimple_call_set_lhs (stmt, NULL_TREE);
maybe_clean_or_replace_eh_stmt (stmt, stmt);
update_stmt (stmt);
@@ -1404,7 +1404,7 @@ perform_tree_ssa_dce (bool aggressive)
free_edge_list (el);
if (something_changed)
- return (TODO_update_ssa | TODO_cleanup_cfg | TODO_ggc_collect
+ return (TODO_update_ssa | TODO_cleanup_cfg | TODO_ggc_collect
| TODO_remove_unused_locals);
else
return 0;
diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c
index db21218629e..dd9fd566785 100644
--- a/gcc/tree-ssa-dom.c
+++ b/gcc/tree-ssa-dom.c
@@ -83,7 +83,7 @@ struct cond_equivalence
Computing and storing the edge equivalences instead of creating
them on-demand can save significant amounts of time, particularly
- for pathological cases involving switch statements.
+ for pathological cases involving switch statements.
These structures live for a single iteration of the dominator
optimizer in the edge's AUX field. At the end of an iteration we
@@ -210,7 +210,7 @@ initialize_hash_element (gimple stmt, tree lhs,
enum tree_code subcode = gimple_assign_rhs_code (stmt);
expr->type = NULL_TREE;
-
+
switch (get_gimple_rhs_class (subcode))
{
case GIMPLE_SINGLE_RHS:
@@ -255,7 +255,7 @@ initialize_hash_element (gimple stmt, tree lhs,
if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
expr->ops.call.pure = true;
- else
+ else
expr->ops.call.pure = false;
expr->ops.call.nargs = nargs;
@@ -293,7 +293,7 @@ static void
initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
{
expr->type = boolean_type_node;
-
+
if (COMPARISON_CLASS_P (cond))
{
expr->kind = EXPR_BINARY;
@@ -415,7 +415,7 @@ hashable_expr_equal_p (const struct hashable_expr *expr0,
return true;
}
-
+
default:
gcc_unreachable ();
}
@@ -473,7 +473,7 @@ iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
val = iterative_hash_expr (expr->ops.call.args[i], val);
}
break;
-
+
default:
gcc_unreachable ();
}
@@ -496,7 +496,7 @@ print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
print_generic_expr (stream, element->lhs, 0);
fprintf (stream, " = ");
}
-
+
switch (element->expr.kind)
{
case EXPR_SINGLE:
@@ -597,7 +597,7 @@ free_all_edge_infos (void)
}
}
-/* Jump threading, redundancy elimination and const/copy propagation.
+/* Jump threading, redundancy elimination and const/copy propagation.
This pass may expose new symbols that need to be renamed into SSA. For
every new symbol exposed, its corresponding bit will be set in
@@ -646,7 +646,7 @@ tree_ssa_dominator_optimize (void)
for jump threading; this may include back edges that are not part of
a single loop. */
mark_dfs_back_edges ();
-
+
/* Recursively walk the dominator tree optimizing statements. */
walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
@@ -720,10 +720,10 @@ tree_ssa_dominator_optimize (void)
/* Free asserted bitmaps and stacks. */
BITMAP_FREE (need_eh_cleanup);
-
+
VEC_free (expr_hash_elt_t, heap, avail_exprs_stack);
VEC_free (tree, heap, const_and_copies_stack);
-
+
/* Free the value-handle array. */
threadedge_finalize_values ();
ssa_name_values = NULL;
@@ -737,7 +737,7 @@ gate_dominator (void)
return flag_tree_dom != 0;
}
-struct gimple_opt_pass pass_dominator =
+struct gimple_opt_pass pass_dominator =
{
{
GIMPLE_PASS,
@@ -908,7 +908,7 @@ static void
record_equivalences_from_phis (basic_block bb)
{
gimple_stmt_iterator gsi;
-
+
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
@@ -1094,7 +1094,7 @@ record_cond (struct cond_equivalence *p)
/* Build a cond_equivalence record indicating that the comparison
CODE holds between operands OP0 and OP1. */
-
+
static void
build_and_record_new_cond (enum tree_code code,
tree op0, tree op1,
@@ -1371,7 +1371,7 @@ record_equality (tree x, tree y)
/* Returns true when STMT is a simple iv increment. It detects the
following situation:
-
+
i_1 = phi (..., i_2)
i_2 = i_1 +/- ... */
@@ -1410,7 +1410,7 @@ simple_iv_increment_p (gimple stmt)
}
/* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
- known value for that SSA_NAME (or NULL if no value is known).
+ known value for that SSA_NAME (or NULL if no value is known).
Propagate values from CONST_AND_COPIES into the PHI nodes of the
successors of BB. */
@@ -1846,7 +1846,7 @@ eliminate_redundant_computations (gimple_stmt_iterator* gsi)
}
opt_stats.num_re++;
-
+
if (assigns_var_p
&& !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
cached_lhs = fold_convert (expr_type, cached_lhs);
@@ -1881,7 +1881,7 @@ record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
&& gimple_assign_single_p (stmt))
{
tree rhs = gimple_assign_rhs1 (stmt);
-
+
/* If the RHS of the assignment is a constant or another variable that
may be propagated, register it in the CONST_AND_COPIES table. We
do not need to record unwind data for this, since this is a true
@@ -2031,7 +2031,7 @@ cprop_operand (gimple stmt, use_operand_p op_p)
}
/* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
- known value for that SSA_NAME (or NULL if no value is known).
+ known value for that SSA_NAME (or NULL if no value is known).
Propagate values from CONST_AND_COPIES into the uses, vuses and
vdef_ops of STMT. */
@@ -2050,7 +2050,7 @@ cprop_into_stmt (gimple stmt)
}
/* Optimize the statement pointed to by iterator SI.
-
+
We try to perform some simplistic global redundancy elimination and
constant propagation:
@@ -2072,10 +2072,10 @@ optimize_stmt (basic_block bb, gimple_stmt_iterator si)
bool modified_p = false;
old_stmt = stmt = gsi_stmt (si);
-
+
if (gimple_code (stmt) == GIMPLE_COND)
canonicalize_comparison (stmt);
-
+
update_stmt_if_modified (stmt);
opt_stats.num_stmts++;
@@ -2167,7 +2167,7 @@ optimize_stmt (basic_block bb, gimple_stmt_iterator si)
where it goes. If that is the case, then mark the CFG as altered.
This will cause us to later call remove_unreachable_blocks and
- cleanup_tree_cfg when it is safe to do so. It is not safe to
+ cleanup_tree_cfg when it is safe to do so. It is not safe to
clean things up here since removal of edges and such can trigger
the removal of PHI nodes, which in turn can release SSA_NAMEs to
the manager.
@@ -2191,7 +2191,7 @@ optimize_stmt (basic_block bb, gimple_stmt_iterator si)
if (gimple_modified_p (stmt) || modified_p)
{
tree val = NULL;
-
+
update_stmt_if_modified (stmt);
if (gimple_code (stmt) == GIMPLE_COND)
@@ -2259,7 +2259,7 @@ lookup_avail_expr (gimple stmt, bool insert)
if (slot == NULL)
{
free (element);
- return NULL_TREE;
+ return NULL_TREE;
}
if (*slot == NULL)
@@ -2469,7 +2469,7 @@ get_lhs_or_phi_result (gimple stmt)
nodes as well in an effort to pick up secondary optimization
opportunities. */
-static void
+static void
propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
{
/* First verify that propagation is valid and isn't going to move a
@@ -2496,7 +2496,7 @@ propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_name
fprintf (dump_file, "'\n");
}
- /* Walk over every use of LHS and try to replace the use with RHS.
+ /* Walk over every use of LHS and try to replace the use with RHS.
At this point the only reason why such a propagation would not
be successful would be if the use occurs in an ASM_EXPR. */
FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
@@ -2506,7 +2506,7 @@ propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_name
into debug stmts will occur then. */
if (gimple_debug_bind_p (use_stmt))
continue;
-
+
/* It's not always safe to propagate into an ASM_EXPR. */
if (gimple_code (use_stmt) == GIMPLE_ASM
&& ! may_propagate_copy_into_asm (lhs))
@@ -2559,7 +2559,7 @@ propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_name
continue;
}
- /* From this point onward we are propagating into a
+ /* From this point onward we are propagating into a
real statement. Folding may (or may not) be possible,
we may expose new operands, expose dead EH edges,
etc. */
@@ -2680,7 +2680,7 @@ propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_name
}
}
- /* Ensure there is nothing else to do. */
+ /* Ensure there is nothing else to do. */
gcc_assert (!all || has_zero_uses (lhs));
/* If we were able to propagate away all uses of LHS, then
@@ -2804,7 +2804,7 @@ eliminate_degenerate_phis (void)
A set bit indicates that the statement or PHI node which
defines the SSA_NAME should be (re)examined to determine if
it has become a degenerate PHI or trivial const/copy propagation
- opportunity.
+ opportunity.
Experiments have show we generally get better compilation
time behavior with bitmaps rather than sbitmaps. */
@@ -2882,7 +2882,7 @@ struct gimple_opt_pass pass_phi_only_cprop =
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_cleanup_cfg
- | TODO_dump_func
+ | TODO_dump_func
| TODO_ggc_collect
| TODO_verify_ssa
| TODO_verify_stmts
diff --git a/gcc/tree-ssa-dse.c b/gcc/tree-ssa-dse.c
index 9559b4cb2f5..37a548189a4 100644
--- a/gcc/tree-ssa-dse.c
+++ b/gcc/tree-ssa-dse.c
@@ -45,7 +45,7 @@ along with GCC; see the file COPYING3. If not see
In our SSA + virtual operand world we use immediate uses of virtual
operands to detect dead stores. If a store's virtual definition
is used precisely once by a later store to the same location which
- post dominates the first store, then the first store is dead.
+ post dominates the first store, then the first store is dead.
The single use of the store's virtual definition ensures that
there are no intervening aliased loads and the requirement that
@@ -56,7 +56,7 @@ along with GCC; see the file COPYING3. If not see
It may help to think of this as first moving the earlier store to
the point immediately before the later store. Again, the single
use of the virtual definition and the post-dominance relationship
- ensure that such movement would be safe. Clearly if there are
+ ensure that such movement would be safe. Clearly if there are
back to back stores, then the second is redundant.
Reviewing section 10.7.2 in Morgan's "Building an Optimizing Compiler"
@@ -77,7 +77,7 @@ struct dse_global_data
};
/* We allocate a bitmap-per-block for stores which are encountered
- during the scan of that block. This allows us to restore the
+ during the scan of that block. This allows us to restore the
global bitmap of stores when we finish processing a block. */
struct dse_block_local_data
{
@@ -441,7 +441,7 @@ gate_dse (void)
return flag_tree_dse != 0;
}
-struct gimple_opt_pass pass_dse =
+struct gimple_opt_pass pass_dse =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index 6ba800d8288..21517959370 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -41,7 +41,7 @@ along with GCC; see the file COPYING3. If not see
when we have a generalized tree combiner.
One class of common cases we handle is forward propagating a single use
- variable into a COND_EXPR.
+ variable into a COND_EXPR.
bb0:
x = a COND b;
@@ -51,13 +51,13 @@ along with GCC; see the file COPYING3. If not see
bb0:
if (a COND b) goto ... else goto ...
-
+
Similarly for the tests (x == 0), (x != 0), (x == 1) and (x != 1).
Or (assuming c1 and c2 are constants):
bb0:
- x = a + c1;
+ x = a + c1;
if (x EQ/NEQ c2) goto ... else goto ...
Will be transformed into:
@@ -66,7 +66,7 @@ along with GCC; see the file COPYING3. If not see
if (a EQ/NEQ (c2 - c1)) goto ... else goto ...
Similarly for x = a - c1.
-
+
Or
bb0:
@@ -331,7 +331,7 @@ remove_prop_source_from_use (tree name, gimple up_to_stmt)
/* Return the rhs of a gimple_assign STMT in a form of a single tree,
converted to type TYPE.
-
+
This should disappear, but is needed so we can combine expressions and use
the fold() interfaces. Long term, we need to develop folding and combine
routines that deal with gimple exclusively . */
@@ -387,14 +387,14 @@ combine_cond_expr_cond (location_t loc, enum tree_code code, tree type,
in GIMPLE_COND statement STMT into the conditional if that simplifies it.
Returns zero if no statement was changed, one if there were
changes and two if cfg_cleanup needs to run.
-
+
This must be kept in sync with forward_propagate_into_cond. */
static int
forward_propagate_into_gimple_cond (gimple stmt)
{
int did_something = 0;
- location_t loc = gimple_location (stmt);
+ location_t loc = gimple_location (stmt);
do {
tree tmp = NULL_TREE;
@@ -590,7 +590,7 @@ forward_propagate_into_cond (gimple_stmt_iterator *gsi_p)
return did_something;
}
-/* We've just substituted an ADDR_EXPR into stmt. Update all the
+/* We've just substituted an ADDR_EXPR into stmt. Update all the
relevant data structures to match. */
static void
@@ -657,7 +657,7 @@ forward_propagate_addr_into_variable_array_index (tree offset,
return false;
/* The RHS of the statement which defines OFFSET must be a
- multiplication of an object by the size of the array elements.
+ multiplication of an object by the size of the array elements.
This implicitly verifies that the size of the array elements
is constant. */
if (gimple_assign_rhs_code (offset_def) == MULT_EXPR
@@ -765,14 +765,14 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs,
return true;
}
- /* Now strip away any outer COMPONENT_REF/ARRAY_REF nodes from the LHS.
+ /* Now strip away any outer COMPONENT_REF/ARRAY_REF nodes from the LHS.
ADDR_EXPR will not appear on the LHS. */
lhsp = gimple_assign_lhs_ptr (use_stmt);
while (handled_component_p (*lhsp))
lhsp = &TREE_OPERAND (*lhsp, 0);
lhs = *lhsp;
- /* Now see if the LHS node is an INDIRECT_REF using NAME. If so,
+ /* Now see if the LHS node is an INDIRECT_REF using NAME. If so,
propagate the ADDR_EXPR into the use of NAME and fold the result. */
if (TREE_CODE (lhs) == INDIRECT_REF
&& TREE_OPERAND (lhs, 0) == name)
@@ -817,7 +817,7 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs,
return res;
}
- /* Now see if the RHS node is an INDIRECT_REF using NAME. If so,
+ /* Now see if the RHS node is an INDIRECT_REF using NAME. If so,
propagate the ADDR_EXPR into the use of NAME and try to
create a VCE and fold the result. */
if (TREE_CODE (rhs) == INDIRECT_REF
@@ -1107,9 +1107,9 @@ forward_propagate_comparison (gimple stmt)
/* If we have lhs = ~x (STMT), look and see if earlier we had x = ~y.
If so, we can change STMT into lhs = y which can later be copy
- propagated. Similarly for negation.
+ propagated. Similarly for negation.
- This could trivially be formulated as a forward propagation
+ This could trivially be formulated as a forward propagation
to immediate uses. However, we already had an implementation
from DOM which used backward propagation via the use-def links.
@@ -1372,7 +1372,7 @@ gate_forwprop (void)
return flag_tree_forwprop;
}
-struct gimple_opt_pass pass_forwprop =
+struct gimple_opt_pass pass_forwprop =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-ssa-ifcombine.c b/gcc/tree-ssa-ifcombine.c
index 335b4fd95fc..af9b15421be 100644
--- a/gcc/tree-ssa-ifcombine.c
+++ b/gcc/tree-ssa-ifcombine.c
@@ -657,7 +657,7 @@ gate_ifcombine (void)
return 1;
}
-struct gimple_opt_pass pass_tree_ifcombine =
+struct gimple_opt_pass pass_tree_ifcombine =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-ssa-live.c b/gcc/tree-ssa-live.c
index 6d2fb32e585..d75edb5a061 100644
--- a/gcc/tree-ssa-live.c
+++ b/gcc/tree-ssa-live.c
@@ -60,7 +60,7 @@ var_map_base_init (var_map map)
int x, num_part, num;
tree var;
var_ann_t ann;
-
+
num = 0;
num_part = num_var_partitions (map);
@@ -163,8 +163,8 @@ delete_var_map (var_map map)
}
-/* This function will combine the partitions in MAP for VAR1 and VAR2. It
- Returns the partition which represents the new partition. If the two
+/* This function will combine the partitions in MAP for VAR1 and VAR2. It
+ Returns the partition which represents the new partition. If the two
partitions cannot be combined, NO_PARTITION is returned. */
int
@@ -175,7 +175,7 @@ var_union (var_map map, tree var1, tree var2)
gcc_assert (TREE_CODE (var1) == SSA_NAME);
gcc_assert (TREE_CODE (var2) == SSA_NAME);
- /* This is independent of partition_to_view. If partition_to_view is
+ /* This is independent of partition_to_view. If partition_to_view is
on, then whichever one of these partitions is absorbed will never have a
dereference into the partition_to_view array any more. */
@@ -196,12 +196,12 @@ var_union (var_map map, tree var1, tree var2)
return p3;
}
-
-/* Compress the partition numbers in MAP such that they fall in the range
+
+/* Compress the partition numbers in MAP such that they fall in the range
0..(num_partitions-1) instead of wherever they turned out during
the partitioning exercise. This removes any references to unused
partitions, thereby allowing bitmaps and other vectors to be much
- denser.
+ denser.
This is implemented such that compaction doesn't affect partitioning.
Ie., once partitions are created and possibly merged, running one
@@ -215,8 +215,8 @@ var_union (var_map map, tree var1, tree var2)
definitions for assignment to program variables. */
-/* Set MAP back to the initial state of having no partition view. Return a
- bitmap which has a bit set for each partition number which is in use in the
+/* Set MAP back to the initial state of having no partition view. Return a
+ bitmap which has a bit set for each partition number which is in use in the
varmap. */
static bitmap
@@ -256,11 +256,11 @@ partition_view_init (var_map map)
/* This routine will finalize the view data for MAP based on the partitions
- set in SELECTED. This is either the same bitmap returned from
+ set in SELECTED. This is either the same bitmap returned from
partition_view_init, or a trimmed down version if some of those partitions
were not desired in this view. SELECTED is freed before returning. */
-static void
+static void
partition_view_fini (var_map map, bitmap selected)
{
bitmap_iterator bi;
@@ -294,7 +294,7 @@ partition_view_fini (var_map map, bitmap selected)
}
-/* Create a partition view which includes all the used partitions in MAP. If
+/* Create a partition view which includes all the used partitions in MAP. If
WANT_BASES is true, create the base variable map as well. */
extern void
@@ -312,8 +312,8 @@ partition_view_normal (var_map map, bool want_bases)
}
-/* Create a partition view in MAP which includes just partitions which occur in
- the bitmap ONLY. If WANT_BASES is true, create the base variable map
+/* Create a partition view in MAP which includes just partitions which occur in
+ the bitmap ONLY. If WANT_BASES is true, create the base variable map
as well. */
extern void
@@ -403,7 +403,7 @@ mark_scope_block_unused (tree scope)
}
/* Look if the block is dead (by possibly eliminating its dead subblocks)
- and return true if so.
+ and return true if so.
Block is declared dead if:
1) No statements are associated with it.
2) Declares no live variables
@@ -430,7 +430,7 @@ remove_unused_scope_block_p (tree scope)
/* Debug info of nested function refers to the block of the
function. We might stil call it even if all statements
of function it was nested into was elliminated.
-
+
TODO: We can actually look into cgraph to see if function
will be output to file. */
if (TREE_CODE (*t) == FUNCTION_DECL)
@@ -463,12 +463,12 @@ remove_unused_scope_block_p (tree scope)
/* When we are not doing full debug info, we however can keep around
only the used variables for cfgexpand's memory packing saving quite
- a lot of memory.
+ a lot of memory.
For sake of -g3, we keep around those vars but we don't count this as
use of block, so innermost block with no used vars and no instructions
can be considered dead. We only want to keep around blocks user can
- breakpoint into and ask about value of optimized out variables.
+ breakpoint into and ask about value of optimized out variables.
Similarly we need to keep around types at least until all variables of
all nested blocks are gone. We track no information on whether given
@@ -572,7 +572,7 @@ remove_unused_scope_block_p (tree scope)
return unused;
}
-/* Mark all VAR_DECLS under *EXPR_P as used, so that they won't be
+/* Mark all VAR_DECLS under *EXPR_P as used, so that they won't be
eliminated during the tree->rtl conversion process. */
static inline void
@@ -849,7 +849,7 @@ new_tree_live_info (var_map map)
/* Free storage for live range info object LIVE. */
-void
+void
delete_tree_live_info (tree_live_info_p live)
{
int x;
@@ -869,12 +869,12 @@ delete_tree_live_info (tree_live_info_p live)
}
-/* Visit basic block BB and propagate any required live on entry bits from
- LIVE into the predecessors. VISITED is the bitmap of visited blocks.
+/* Visit basic block BB and propagate any required live on entry bits from
+ LIVE into the predecessors. VISITED is the bitmap of visited blocks.
TMP is a temporary work bitmap which is passed in to avoid reallocating
it each time. */
-static void
+static void
loe_visit_block (tree_live_info_p live, basic_block bb, sbitmap visited,
bitmap tmp)
{
@@ -894,12 +894,12 @@ loe_visit_block (tree_live_info_p live, basic_block bb, sbitmap visited,
if (pred_bb == ENTRY_BLOCK_PTR)
continue;
/* TMP is variables live-on-entry from BB that aren't defined in the
- predecessor block. This should be the live on entry vars to pred.
+ predecessor block. This should be the live on entry vars to pred.
Note that liveout is the DEFs in a block while live on entry is
being calculated. */
bitmap_and_compl (tmp, loe, live->liveout[pred_bb->index]);
- /* Add these bits to live-on-entry for the pred. if there are any
+ /* Add these bits to live-on-entry for the pred. if there are any
changes, and pred_bb has been visited already, add it to the
revisit stack. */
change = bitmap_ior_into (live_on_entry (live, pred_bb), tmp);
@@ -912,7 +912,7 @@ loe_visit_block (tree_live_info_p live, basic_block bb, sbitmap visited,
}
-/* Using LIVE, fill in all the live-on-entry blocks between the defs and uses
+/* Using LIVE, fill in all the live-on-entry blocks between the defs and uses
of all the variables. */
static void
@@ -999,7 +999,7 @@ set_var_live_on_entry (tree ssa_name, tree_live_info_p live)
basic_block use_bb = gimple_bb (use_stmt);
if (use_bb != def_bb)
add_block = use_bb;
- }
+ }
/* If there was a live on entry use, set the bit. */
if (add_block)
@@ -1040,7 +1040,7 @@ calculate_live_on_exit (tree_live_info_p liveinfo)
{
gimple phi = gsi_stmt (gsi);
for (i = 0; i < gimple_phi_num_args (phi); i++)
- {
+ {
tree t = PHI_ARG_DEF (phi, i);
int p;
@@ -1065,10 +1065,10 @@ calculate_live_on_exit (tree_live_info_p liveinfo)
}
-/* Given partition map MAP, calculate all the live on entry bitmaps for
+/* Given partition map MAP, calculate all the live on entry bitmaps for
each partition. Return a new live info object. */
-tree_live_info_p
+tree_live_info_p
calculate_live_ranges (var_map map)
{
tree var;
@@ -1242,7 +1242,7 @@ verify_live_on_entry (tree_live_info_p live)
fprintf (stderr, " in BB%d, ", tmp->index);
fprintf (stderr, "by:\n");
print_gimple_stmt (stderr, stmt, 0, TDF_SLIM);
- fprintf (stderr, "\nIt is also live-on-entry to entry BB %d",
+ fprintf (stderr, "\nIt is also live-on-entry to entry BB %d",
entry_block);
fprintf (stderr, " So it appears to have multiple defs.\n");
}
@@ -1268,7 +1268,7 @@ verify_live_on_entry (tree_live_info_p live)
else
if (d == var)
{
- /* The only way this var shouldn't be marked live on entry is
+ /* The only way this var shouldn't be marked live on entry is
if it occurs in a PHI argument of the block. */
size_t z;
bool ok = false;
@@ -1289,7 +1289,7 @@ verify_live_on_entry (tree_live_info_p live)
continue;
num++;
print_generic_expr (stderr, var, TDF_SLIM);
- fprintf (stderr, " is not marked live-on-entry to entry BB%d ",
+ fprintf (stderr, " is not marked live-on-entry to entry BB%d ",
entry_block);
fprintf (stderr, "but it is a default def so it should be.\n");
}
diff --git a/gcc/tree-ssa-live.h b/gcc/tree-ssa-live.h
index d7dd10dc0dc..f49f065d69d 100644
--- a/gcc/tree-ssa-live.h
+++ b/gcc/tree-ssa-live.h
@@ -27,19 +27,19 @@ along with GCC; see the file COPYING3. If not see
-/* Used to create the variable mapping when we go out of SSA form.
+/* Used to create the variable mapping when we go out of SSA form.
Mapping from an ssa_name to a partition number is maintained, as well as
partition number to back to ssa_name. A partition can also be represented
- by a non-ssa_name variable. This allows ssa_names and their partition to
+ by a non-ssa_name variable. This allows ssa_names and their partition to
be coalesced with live on entry compiler variables, as well as eventually
- having real compiler variables assigned to each partition as part of the
- final stage of going of of ssa.
+ having real compiler variables assigned to each partition as part of the
+ final stage of going of of ssa.
Non-ssa_names maintain their partition index in the variable annotation.
This data structure also supports "views", which work on a subset of all
- partitions. This allows the coalescer to decide what partitions are
+ partitions. This allows the coalescer to decide what partitions are
interesting to it, and only work with those partitions. Whenever the view
is changed, the partition numbers change, but none of the partition groupings
change. (ie, it is truly a view since it doesn't change anything)
@@ -104,9 +104,9 @@ num_var_partitions (var_map map)
}
-/* Given partition index I from MAP, return the variable which represents that
+/* Given partition index I from MAP, return the variable which represents that
partition. */
-
+
static inline tree
partition_to_var (var_map map, int i)
{
@@ -119,10 +119,10 @@ partition_to_var (var_map map, int i)
}
-/* Given ssa_name VERSION, if it has a partition in MAP, return the var it
+/* Given ssa_name VERSION, if it has a partition in MAP, return the var it
is associated with. Otherwise return NULL. */
-static inline tree
+static inline tree
version_to_var (var_map map, int version)
{
int part;
@@ -131,12 +131,12 @@ version_to_var (var_map map, int version)
part = map->partition_to_view[part];
if (part == NO_PARTITION)
return NULL_TREE;
-
+
return partition_to_var (map, part);
}
-
-/* Given VAR, return the partition number in MAP which contains it.
+
+/* Given VAR, return the partition number in MAP which contains it.
NO_PARTITION is returned if it's not in any partition. */
static inline int
@@ -172,7 +172,7 @@ var_to_partition_to_var (var_map map, tree var)
static inline int
basevar_index (var_map map, int partition)
{
- gcc_assert (partition >= 0
+ gcc_assert (partition >= 0
&& partition <= (int) num_var_partitions (map));
return map->partition_to_base_index[partition];
}
@@ -188,11 +188,11 @@ num_basevars (var_map map)
-/* This routine registers a partition for SSA_VAR with MAP. Any unregistered
- partitions may be filtered out by a view later. */
+/* This routine registers a partition for SSA_VAR with MAP. Any unregistered
+ partitions may be filtered out by a view later. */
static inline void
-register_ssa_partition (var_map map ATTRIBUTE_UNUSED,
+register_ssa_partition (var_map map ATTRIBUTE_UNUSED,
tree ssa_var ATTRIBUTE_UNUSED)
{
#if defined ENABLE_CHECKING
@@ -201,22 +201,22 @@ register_ssa_partition (var_map map ATTRIBUTE_UNUSED,
}
-/* ---------------- live on entry/exit info ------------------------------
+/* ---------------- live on entry/exit info ------------------------------
This structure is used to represent live range information on SSA based
trees. A partition map must be provided, and based on the active partitions,
live-on-entry information and live-on-exit information can be calculated.
- As well, partitions are marked as to whether they are global (live
+ As well, partitions are marked as to whether they are global (live
outside the basic block they are defined in).
- The live-on-entry information is per block. It provide a bitmap for
- each block which has a bit set for each partition that is live on entry to
+ The live-on-entry information is per block. It provide a bitmap for
+ each block which has a bit set for each partition that is live on entry to
that block.
The live-on-exit information is per block. It provides a bitmap for each
block indicating which partitions are live on exit from the block.
- For the purposes of this implementation, we treat the elements of a PHI
+ For the purposes of this implementation, we treat the elements of a PHI
as follows:
Uses in a PHI are considered LIVE-ON-EXIT to the block from which they
@@ -225,9 +225,9 @@ register_ssa_partition (var_map map ATTRIBUTE_UNUSED,
The Def of a PHI node is *not* considered live on entry to the block.
It is considered to be "define early" in the block. Picture it as each
- block having a stmt (or block-preheader) before the first real stmt in
+ block having a stmt (or block-preheader) before the first real stmt in
the block which defines all the variables that are defined by PHIs.
-
+
----------------------------------------------------------------------- */
@@ -276,7 +276,7 @@ partition_is_global (tree_live_info_p live, int p)
}
-/* Return the bitmap from LIVE representing the live on entry blocks for
+/* Return the bitmap from LIVE representing the live on entry blocks for
partition P. */
static inline bitmap
@@ -306,7 +306,7 @@ live_on_exit (tree_live_info_p live, basic_block bb)
/* Return the partition map which the information in LIVE utilizes. */
-static inline var_map
+static inline var_map
live_var_map (tree_live_info_p live)
{
return live->map;
@@ -316,7 +316,7 @@ live_var_map (tree_live_info_p live)
/* Merge the live on entry information in LIVE for partitions P1 and P2. Place
the result into P1. Clear P2. */
-static inline void
+static inline void
live_merge_and_clear (tree_live_info_p live, int p1, int p2)
{
gcc_assert (live->livein[p1]);
@@ -328,7 +328,7 @@ live_merge_and_clear (tree_live_info_p live, int p1, int p2)
/* Mark partition P as live on entry to basic block BB in LIVE. */
-static inline void
+static inline void
make_live_on_entry (tree_live_info_p live, basic_block bb , int p)
{
bitmap_set_bit (live->livein[bb->index], p);
diff --git a/gcc/tree-ssa-loop-ch.c b/gcc/tree-ssa-loop-ch.c
index dffaf49ba06..f5aabebf940 100644
--- a/gcc/tree-ssa-loop-ch.c
+++ b/gcc/tree-ssa-loop-ch.c
@@ -1,18 +1,18 @@
/* Loop header copying on trees.
Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -263,7 +263,7 @@ gate_ch (void)
return flag_tree_ch != 0;
}
-struct gimple_opt_pass pass_ch =
+struct gimple_opt_pass pass_ch =
{
{
GIMPLE_PASS,
@@ -278,7 +278,7 @@ struct gimple_opt_pass pass_ch =
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_cleanup_cfg | TODO_dump_func
+ TODO_cleanup_cfg | TODO_dump_func
| TODO_verify_ssa /* todo_flags_finish */
}
};
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index 6c6a9f17a1d..1cc56595a9d 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -1,19 +1,19 @@
/* Loop invariant motion.
Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software
Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -251,13 +251,13 @@ clear_lim_data (gimple stmt)
/* Calls CBCK for each index in memory reference ADDR_P. There are two
kinds situations handled; in each of these cases, the memory reference
and DATA are passed to the callback:
-
+
Access to an array: ARRAY_{RANGE_}REF (base, index). In this case we also
pass the pointer to the index to the callback.
Pointer dereference: INDIRECT_REF (addr). In this case we also pass the
pointer to addr to the callback.
-
+
If the callback returns false, the whole search stops and false is returned.
Otherwise the function returns true after traversing through the whole
reference *ADDR_P. */
@@ -452,14 +452,14 @@ outermost_invariant_loop (tree def, struct loop *loop)
/* DATA is a structure containing information associated with a statement
inside LOOP. DEF is one of the operands of this statement.
-
+
Find the outermost loop enclosing LOOP in that value of DEF is invariant
and record this in DATA->max_loop field. If DEF itself is defined inside
this loop as well (i.e. we need to hoist it out of the loop if we want
to hoist the statement represented by DATA), record the statement in that
DEF is defined to the DATA->depends list. Additionally if ADD_COST is true,
add the cost of the computation of DEF to the DATA->cost.
-
+
If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
static bool
@@ -657,7 +657,7 @@ mem_ref_in_stmt (gimple stmt)
If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
we preserve the fact whether STMT is executed. It also fills other related
information to LIM_DATA (STMT).
-
+
The function returns false if STMT cannot be hoisted outside of the loop it
is defined in, and true otherwise. */
@@ -670,7 +670,7 @@ determine_max_movement (gimple stmt, bool must_preserve_exec)
struct lim_aux_data *lim_data = get_lim_data (stmt);
tree val;
ssa_op_iter iter;
-
+
if (must_preserve_exec)
level = ALWAYS_EXECUTED_IN (bb);
else
@@ -1138,7 +1138,7 @@ force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
return;
gcc_assert (TREE_CODE (op) == SSA_NAME);
-
+
stmt = SSA_NAME_DEF_STMT (op);
if (gimple_nop_p (stmt))
return;
@@ -1764,7 +1764,7 @@ gen_lsm_tmp_name (tree ref)
gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
lsm_tmp_name_add ("_RE");
break;
-
+
case IMAGPART_EXPR:
gen_lsm_tmp_name (TREE_OPERAND (ref, 0));
lsm_tmp_name_add ("_IM");
diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c
index a04466a4027..df48dfd2bc8 100644
--- a/gcc/tree-ssa-loop-ivcanon.c
+++ b/gcc/tree-ssa-loop-ivcanon.c
@@ -1,24 +1,24 @@
/* Induction variable canonicalization.
Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* This pass detects the loops that iterate a constant number of times,
- adds a canonical induction variable (step -1, tested against 0)
+ adds a canonical induction variable (step -1, tested against 0)
and replaces the exit test. This enables the less powerful rtl
level analysis to use this information.
@@ -155,7 +155,7 @@ constant_after_peeling (tree op, gimple stmt, struct loop *loop)
if (is_gimple_min_invariant (op))
return true;
-
+
/* We can still fold accesses to constant arrays when index is known. */
if (TREE_CODE (op) != SSA_NAME)
{
@@ -291,7 +291,7 @@ tree_estimate_loop_size (struct loop *loop, edge exit, struct loop_size *size)
fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
size->eliminated_by_peeling, size->last_iteration,
size->last_iteration_eliminated_by_peeling);
-
+
free (body);
}
@@ -323,7 +323,7 @@ estimated_unrolled_size (struct loop_size *size,
}
/* Tries to unroll LOOP completely, i.e. NITER times.
- UL determines which loops we are allowed to unroll.
+ UL determines which loops we are allowed to unroll.
EXIT is the exit of the loop that should be eliminated. */
static bool
@@ -431,9 +431,9 @@ try_unroll_loop_completely (struct loop *loop,
}
/* Adds a canonical induction variable to LOOP if suitable.
- CREATE_IV is true if we may create a new iv. UL determines
+ CREATE_IV is true if we may create a new iv. UL determines
which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
- to determine the number of iterations of a loop by direct evaluation.
+ to determine the number of iterations of a loop by direct evaluation.
Returns true if cfg is changed. */
static bool
@@ -494,7 +494,7 @@ canonicalize_induction_variables (void)
loop_iterator li;
struct loop *loop;
bool changed = false;
-
+
FOR_EACH_LOOP (li, loop, 0)
{
changed |= canonicalize_loop_induction_variables (loop,
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index e89ee0e4ce8..759edb11770 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -1,19 +1,19 @@
/* Induction variable optimizations.
Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -56,7 +56,7 @@ along with GCC; see the file COPYING3. If not see
4) The trees are transformed to use the new variables, the dead code is
removed.
-
+
All of this is done loop by loop. Doing it globally is theoretically
possible, it might give a better performance and it might enable us
to decide costs more precisely, but getting all the interactions right
@@ -1076,7 +1076,7 @@ find_induction_variables (struct ivopts_data *data)
print_generic_expr (dump_file, niter, TDF_SLIM);
fprintf (dump_file, "\n\n");
};
-
+
fprintf (dump_file, "Induction variables:\n\n");
EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
@@ -1159,7 +1159,7 @@ find_interesting_uses_op (struct ivopts_data *data, tree op)
iv = get_iv (data, op);
if (!iv)
return NULL;
-
+
if (iv->have_use_for)
{
use = iv_use (data, iv->use_id);
@@ -1545,7 +1545,7 @@ may_be_unaligned_p (tree ref, tree step)
|| bitpos % GET_MODE_ALIGNMENT (mode) != 0
|| bitpos % BITS_PER_UNIT != 0)
return true;
-
+
if (!constant_multiple_of (step, al, &mul))
return true;
}
@@ -2097,7 +2097,7 @@ add_candidate_1 (struct ivopts_data *data,
unsigned i;
struct iv_cand *cand = NULL;
tree type, orig_type;
-
+
if (base)
{
orig_type = TREE_TYPE (base);
@@ -2267,7 +2267,7 @@ add_autoinc_candidates (struct ivopts_data *data, tree base, tree step,
it. The candidate computation is scheduled on all available positions. */
static void
-add_candidate (struct ivopts_data *data,
+add_candidate (struct ivopts_data *data,
tree base, tree step, bool important, struct iv_use *use)
{
if (ip_normal_pos (data->current_loop))
@@ -2604,7 +2604,7 @@ get_use_iv_cost (struct ivopts_data *data, struct iv_use *use,
return ret;
}
-
+
/* n_map_members is a power of two, so this computes modulo. */
s = cand->id & (use->n_map_members - 1);
for (i = s; i < use->n_map_members; i++)
@@ -2645,7 +2645,7 @@ produce_memory_decl_rtl (tree obj, int *regno)
addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (obj));
enum machine_mode address_mode = targetm.addr_space.address_mode (as);
rtx x;
-
+
gcc_assert (obj);
if (TREE_STATIC (obj) || DECL_EXTERNAL (obj))
{
@@ -2876,7 +2876,7 @@ get_computation_aff (struct loop *loop,
if (stmt_after_increment (loop, cand, at))
{
aff_tree cstep_aff;
-
+
if (common_type != uutype)
cstep_common = fold_convert (common_type, cstep);
else
@@ -2947,7 +2947,7 @@ add_cost (enum machine_mode mode, bool speed)
cost = 1;
costs[mode] = cost;
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Addition in %s costs %d\n",
GET_MODE_NAME (mode), cost);
@@ -3012,7 +3012,7 @@ multiply_by_cost (HOST_WIDE_INT cst, enum machine_mode mode, bool speed)
gen_int_mode (cst, mode), NULL_RTX, 0);
seq = get_insns ();
end_sequence ();
-
+
cost = seq_cost (seq, speed);
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -3241,7 +3241,7 @@ get_address_cost (bool symbol_present, bool var_present,
base = gen_int_mode (off, address_mode);
else
base = NULL_RTX;
-
+
if (base)
addr = gen_rtx_fmt_ee (PLUS, address_mode, addr, base);
@@ -3293,7 +3293,7 @@ get_address_cost (bool symbol_present, bool var_present,
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Address costs:\n");
-
+
for (i = 0; i < 16; i++)
{
sym_p = i & 1;
@@ -3498,7 +3498,7 @@ force_expr_to_var_cost (tree expr, bool speed)
case MULT_EXPR:
if (cst_and_fits_in_hwi (op0))
cost = new_cost (multiply_by_cost (int_cst_value (op0), mode, speed), 0);
- else if (cst_and_fits_in_hwi (op1))
+ else if (cst_and_fits_in_hwi (op1))
cost = new_cost (multiply_by_cost (int_cst_value (op1), mode, speed), 0);
else
return new_cost (target_spill_cost [speed], 0);
@@ -3553,7 +3553,7 @@ split_address_cost (struct ivopts_data *data,
tree toffset;
enum machine_mode mode;
int unsignedp, volatilep;
-
+
core = get_inner_reference (addr, &bitsize, &bitpos, &toffset, &mode,
&unsignedp, &volatilep, false);
@@ -3576,7 +3576,7 @@ split_address_cost (struct ivopts_data *data,
*var_present = false;
return zero_cost;
}
-
+
*symbol_present = false;
*var_present = true;
return zero_cost;
@@ -3750,7 +3750,7 @@ get_computation_cost_at (struct ivopts_data *data,
if (!constant_multiple_of (ustep, cstep, &rat))
return infinite_cost;
-
+
if (double_int_fits_in_shwi_p (rat))
ratio = double_int_to_shwi (rat);
else
@@ -3761,14 +3761,14 @@ get_computation_cost_at (struct ivopts_data *data,
/* use = ubase + ratio * (var - cbase). If either cbase is a constant
or ratio == 1, it is better to handle this like
-
+
ubase - ratio * cbase + ratio * var
-
+
(also holds in the case ratio == -1, TODO. */
if (cst_and_fits_in_hwi (cbase))
{
- offset = - ratio * int_cst_value (cbase);
+ offset = - ratio * int_cst_value (cbase);
cost = difference_cost (data,
ubase, build_int_cst (utype, 0),
&symbol_present, &var_present, &offset,
@@ -4341,7 +4341,7 @@ determine_iv_cost (struct ivopts_data *data, struct iv_cand *cand)
if (cand->pos != IP_ORIGINAL
|| DECL_ARTIFICIAL (SSA_NAME_VAR (cand->var_before)))
cost++;
-
+
/* Prefer not to insert statements into latch unless there are some
already (so that we do not create unnecessary jumps). */
if (cand->pos == IP_END
@@ -4414,7 +4414,7 @@ determine_set_costs (struct ivopts_data *data)
etc.). For now the reserve is a constant 3.
Let I be the number of induction variables.
-
+
-- if U + I + R <= A, the cost is I * SMALL_COST (just not to encourage
make a lot of ivs without a reason).
-- if A - R < U + I <= A, the cost is I * PRES_COST
@@ -4893,7 +4893,7 @@ iv_ca_extend (struct ivopts_data *data, struct iv_ca *ivs,
if (!iv_ca_has_deps (ivs, new_cp))
continue;
-
+
if (!cheaper_cost_pair (new_cp, old_cp))
continue;
@@ -4948,7 +4948,7 @@ iv_ca_narrow (struct ivopts_data *data, struct iv_ca *ivs,
continue;
if (!iv_ca_has_deps (ivs, cp))
continue;
-
+
if (!cheaper_cost_pair (cp, new_cp))
continue;
@@ -4969,7 +4969,7 @@ iv_ca_narrow (struct ivopts_data *data, struct iv_ca *ivs,
continue;
if (!iv_ca_has_deps (ivs, cp))
continue;
-
+
if (!cheaper_cost_pair (cp, new_cp))
continue;
@@ -5117,7 +5117,7 @@ try_add_cand_for (struct ivopts_data *data, struct iv_ca *ivs,
/* Already tried this. */
if (cand->important && cand->iv->base_object == NULL_TREE)
continue;
-
+
if (iv_ca_cand_used_p (ivs, cand))
continue;
@@ -5179,7 +5179,7 @@ try_improve_iv_set (struct ivopts_data *data, struct iv_ca *ivs)
for (i = 0; i < n_iv_cands (data); i++)
{
cand = iv_cand (data, i);
-
+
if (iv_ca_cand_used_p (ivs, cand))
continue;
@@ -5310,10 +5310,10 @@ create_new_iv (struct ivopts_data *data, struct iv_cand *cand)
/* Rewrite the increment so that it uses var_before directly. */
find_interesting_uses_op (data, cand->var_after)->selected = cand;
-
+
return;
}
-
+
gimple_add_tmp_var (cand->var_before);
add_referenced_var (cand->var_before);
@@ -5606,7 +5606,7 @@ rewrite_use (struct ivopts_data *data, struct iv_use *use, struct iv_cand *cand)
default:
gcc_unreachable ();
}
-
+
update_stmt (use->stmt);
}
@@ -5763,7 +5763,7 @@ tree_ssa_iv_optimize_loop (struct ivopts_data *data, struct loop *loop)
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Processing loop %d\n", loop->num);
-
+
exit = single_dom_exit (loop);
if (exit)
{
@@ -5807,7 +5807,7 @@ tree_ssa_iv_optimize_loop (struct ivopts_data *data, struct loop *loop)
/* Create the new induction variables (item 4, part 1). */
create_new_ivs (data, iv_ca);
iv_ca_free (&iv_ca);
-
+
/* Rewrite the uses (item 4, part 2). */
rewrite_uses (data);
diff --git a/gcc/tree-ssa-loop-manip.c b/gcc/tree-ssa-loop-manip.c
index bc5c3392a0f..7c54c87e74f 100644
--- a/gcc/tree-ssa-loop-manip.c
+++ b/gcc/tree-ssa-loop-manip.c
@@ -1,18 +1,18 @@
/* High-level loop manipulation functions.
Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -43,7 +43,7 @@ along with GCC; see the file COPYING3. If not see
It is expected that neither BASE nor STEP are shared with other expressions
(unless the sharing rules allow this). Use VAR as a base var_decl for it
(if NULL, a new temporary will be created). The increment will occur at
- INCR_POS (after it if AFTER is true, before it otherwise). INCR_POS and
+ INCR_POS (after it if AFTER is true, before it otherwise). INCR_POS and
AFTER can be computed using standard_iv_increment_position. The ssa versions
of the variable before and after increment will be stored in VAR_BEFORE and
VAR_AFTER (unless they are NULL). */
@@ -302,11 +302,11 @@ find_uses_to_rename_bb (basic_block bb, bitmap *use_blocks, bitmap need_phis)
for (bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi); gsi_next (&bsi))
find_uses_to_rename_use (bb, PHI_ARG_DEF_FROM_EDGE (gsi_stmt (bsi), e),
use_blocks, need_phis);
-
+
for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
find_uses_to_rename_stmt (gsi_stmt (bsi), use_blocks, need_phis);
}
-
+
/* Marks names that are used outside of the loop they are defined in
for rewrite. Records the set of blocks in that the ssa
names are defined to USE_BLOCKS. If CHANGED_BBS is not NULL,
@@ -360,7 +360,7 @@ find_uses_to_rename (bitmap changed_bbs, bitmap *use_blocks, bitmap need_phis)
Looking from the outer loop with the normal SSA form, the first use of k
is not well-behaved, while the second one is an induction variable with
base 99 and step 1.
-
+
If CHANGED_BBS is not NULL, we look for uses outside loops only in
the basic blocks in this set.
@@ -414,7 +414,7 @@ check_loop_closed_ssa_use (basic_block bb, tree use)
{
gimple def;
basic_block def_bb;
-
+
if (TREE_CODE (use) != SSA_NAME || !is_gimple_reg (use))
return;
@@ -818,7 +818,7 @@ scale_dominated_blocks_in_loop (struct loop *loop, basic_block bb,
If N is number of iterations of the loop and MAY_BE_ZERO is the condition
under that loop exits in the first iteration even if N != 0,
-
+
while (1)
{
x = phi (init, next);
@@ -831,7 +831,7 @@ scale_dominated_blocks_in_loop (struct loop *loop, basic_block bb,
becomes (with possibly the exit conditions formulated a bit differently,
avoiding the need to create a new iv):
-
+
if (MAY_BE_ZERO || N < FACTOR)
goto rest;
@@ -847,7 +847,7 @@ scale_dominated_blocks_in_loop (struct loop *loop, basic_block bb,
pre;
post;
N -= FACTOR;
-
+
} while (N >= FACTOR);
rest:
@@ -862,7 +862,7 @@ scale_dominated_blocks_in_loop (struct loop *loop, basic_block bb,
break;
post;
}
-
+
Before the loop is unrolled, TRANSFORM is called for it (only for the
unrolled loop, but not for its versioned copy). DATA is passed to
TRANSFORM. */
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index 405f3cafb5f..7dbb8c3fe25 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -1,19 +1,19 @@
/* Functions to determine/estimate number of iterations of a loop.
Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -255,7 +255,7 @@ refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
return;
default:
return;
- }
+ }
mpz_init (offc0);
mpz_init (offc1);
@@ -297,7 +297,7 @@ refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
overflow decreases the appropriate offset by M, and underflow
increases it by M. The above inequality would not necessarily be
true if
-
+
-- VARX + OFFX underflows and VARX + OFFC0 does not, or
VARX + OFFC0 overflows, but VARX + OFFX does not.
This may only happen if OFFX < OFFC0.
@@ -352,7 +352,7 @@ end:
/* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
The subtraction is considered to be performed in arbitrary precision,
without overflows.
-
+
We do not attempt to be too clever regarding the value ranges of X and
Y; most of the time, they are just integers or ssa names offsetted by
integer. However, we try to use the information contained in the
@@ -650,7 +650,7 @@ number_of_iterations_ne (tree type, affine_iv *iv, tree final,
niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
niter->assumptions, assumption);
}
-
+
c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
@@ -847,7 +847,7 @@ assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
if (!integer_nonzerop (assumption))
niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
niter->assumptions, assumption);
-
+
iv0->no_overflow = true;
iv1->no_overflow = true;
return true;
@@ -869,10 +869,10 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
/* We are going to compute the number of iterations as
(iv1->base - iv0->base + step - 1) / step, computed in the unsigned
- variant of TYPE. This formula only works if
-
+ variant of TYPE. This formula only works if
+
-step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
-
+
(where MAX is the maximum value of the unsigned variant of TYPE, and
the computations in this formula are performed in full precision
(without overflows).
@@ -882,7 +882,7 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
and for loops iv0->base < iv1->base - step * i the condition
iv0->base < iv1->base + step, due to loop header copying, which enable us
to prove the lower bound.
-
+
The upper bound is more complicated. Unless the expressions for initial
and final value themselves contain enough information, we usually cannot
derive it from the context. */
@@ -920,7 +920,7 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
if (rolls_p && no_overflow_p)
return;
-
+
type1 = type;
if (POINTER_TYPE_P (type))
type1 = sizetype;
@@ -945,8 +945,8 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
}
/* And then we can compute iv0->base - diff, and compare it with
- iv1->base. */
- mbzl = fold_build2 (MINUS_EXPR, type1,
+ iv1->base. */
+ mbzl = fold_build2 (MINUS_EXPR, type1,
fold_convert (type1, iv0->base), diff);
mbzr = fold_convert (type1, iv1->base);
}
@@ -1020,7 +1020,7 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
or
for (i = iv1->base; i > iv0->base; i--).
-
+
In both cases # of iterations is iv1->base - iv0->base, assuming that
iv1->base >= iv0->base.
@@ -1108,7 +1108,7 @@ number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
value of the type. This we must know anyway, since if it is
equal to this value, the loop rolls forever. We do not check
- this condition for pointer type ivs, as the code cannot rely on
+ this condition for pointer type ivs, as the code cannot rely on
the object to that the pointer points being placed at the end of
the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
not defined for pointers). */
@@ -1183,7 +1183,7 @@ dump_affine_iv (FILE *file, affine_iv *iv)
exited (including possibly non-returning function calls, exceptions, etc.)
-- in this case we can use the information whether the control induction
variables can overflow or not in a more efficient way.
-
+
The results (number of iterations and assumptions as described in
comments at struct tree_niter_desc in tree-flow.h) are stored to NITER.
Returns false if it fails to determine number of iterations, true if it
@@ -1280,7 +1280,7 @@ number_of_iterations_cond (struct loop *loop,
niter->max = double_int_zero;
return true;
}
-
+
/* OK, now we know we have a senseful loop. Handle several cases, depending
on what comparison operator is used. */
bound_difference (loop, iv1->base, iv0->base, &bnds);
@@ -1801,7 +1801,7 @@ number_of_iterations_exit (struct loop *loop, edge exit,
default:
return false;
}
-
+
op0 = gimple_cond_lhs (stmt);
op1 = gimple_cond_rhs (stmt);
type = TREE_TYPE (op0);
@@ -1809,7 +1809,7 @@ number_of_iterations_exit (struct loop *loop, edge exit,
if (TREE_CODE (type) != INTEGER_TYPE
&& !POINTER_TYPE_P (type))
return false;
-
+
if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, false))
return false;
if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, false))
@@ -1862,7 +1862,7 @@ number_of_iterations_exit (struct loop *loop, edge exit,
{
const char *wording;
location_t loc = gimple_location (stmt);
-
+
/* We can provide a more specific warning if one of the operator is
constant and the other advances by +1 or -1. */
if (!integer_zerop (iv1.step)
@@ -1874,7 +1874,7 @@ number_of_iterations_exit (struct loop *loop, edge exit,
? N_("assuming that the loop is not infinite")
: N_("cannot optimize possibly infinite loops");
else
- wording =
+ wording =
flag_unsafe_loop_optimizations
? N_("assuming that the loop counter does not overflow")
: N_("cannot optimize loop, the loop counter may overflow");
@@ -1976,7 +1976,7 @@ finite_loop_p (struct loop *loop)
loop->num);
return true;
}
-
+
exits = get_loop_exit_edges (loop);
for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
{
@@ -2025,7 +2025,7 @@ chain_of_csts_start (struct loop *loop, tree x)
if (!bb
|| !flow_bb_inside_loop_p (loop, bb))
return NULL;
-
+
if (gimple_code (stmt) == GIMPLE_PHI)
{
if (bb == loop->header)
@@ -2058,7 +2058,7 @@ chain_of_csts_start (struct loop *loop, tree x)
* the initial value of the phi node is constant
* the value of the phi node in the next iteration can be derived from the
value in the current iteration by a chain of operations with constants.
-
+
If such phi node exists, it is returned, otherwise NULL is returned. */
static gimple
@@ -2089,8 +2089,8 @@ get_base_for (struct loop *loop, tree x)
return phi;
}
-/* Given an expression X, then
-
+/* Given an expression X, then
+
* if X is NULL_TREE, we return the constant BASE.
* otherwise X is a SSA name, whose value in the considered loop is derived
by a chain of operations with constant from a result of a phi node in
@@ -2305,7 +2305,7 @@ derive_constant_upper_bound_assign (gimple stmt)
/* Returns a constant upper bound on the value of expression VAL. VAL
is considered to be unsigned. If its type is signed, its value must
be nonnegative. */
-
+
static double_int
derive_constant_upper_bound (tree val)
{
@@ -2319,7 +2319,7 @@ derive_constant_upper_bound (tree val)
/* Returns a constant upper bound on the value of expression OP0 CODE OP1,
whose type is TYPE. The expression is considered to be unsigned. If
its type is signed, its value must be nonnegative. */
-
+
static double_int
derive_constant_upper_bound_ops (tree type, tree op0,
enum tree_code code, tree op1)
@@ -2451,7 +2451,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
return max;
return derive_constant_upper_bound_assign (stmt);
- default:
+ default:
return max;
}
}
@@ -2622,7 +2622,7 @@ array_at_struct_end_p (tree ref)
its declaration. */
if (!base || !INDIRECT_REF_P (base))
return false;
-
+
for (;handled_component_p (ref); ref = parent)
{
parent = TREE_OPERAND (ref, 0);
@@ -2697,7 +2697,7 @@ idx_infer_loop_bounds (tree base, tree *idx, void *dta)
low = array_ref_low_bound (base);
high = array_ref_up_bound (base);
-
+
/* The case of nonconstant bounds could be handled, but it would be
complicated. */
if (TREE_CODE (low) != INTEGER_CST
@@ -2731,7 +2731,7 @@ idx_infer_loop_bounds (tree base, tree *idx, void *dta)
next = fold_binary (PLUS_EXPR, type, low, step);
else
next = fold_binary (PLUS_EXPR, type, high, step);
-
+
if (tree_int_cst_compare (low, next) <= 0
&& tree_int_cst_compare (next, high) <= 0)
return true;
@@ -2851,7 +2851,7 @@ infer_loop_bounds_from_undefined (struct loop *loop)
gimple_stmt_iterator bsi;
basic_block bb;
bool reliable;
-
+
bbs = get_loop_body (loop);
for (i = 0; i < loop->num_nodes; i++)
@@ -2931,7 +2931,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop)
true, true, true);
}
VEC_free (edge, heap, exits);
-
+
infer_loop_bounds_from_undefined (loop);
/* If we have a measured profile, use it to estimate the number of
@@ -3011,7 +3011,7 @@ stmt_dominates_stmt_p (gimple s1, gimple s2)
static bool
n_of_executions_at_most (gimple stmt,
- struct nb_iter_bound *niter_bound,
+ struct nb_iter_bound *niter_bound,
tree niter)
{
double_int bound = niter_bound->bound;
@@ -3027,7 +3027,7 @@ n_of_executions_at_most (gimple stmt,
/* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
times. This means that:
-
+
-- if NITER_BOUND->is_exit is true, then everything before
NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
times, and everything after it at most NITER_BOUND->bound times.
@@ -3087,13 +3087,13 @@ nowrap_type_p (tree type)
enough with respect to the step and initial condition in order to
keep the evolution confined in TYPEs bounds. Return true when the
iv is known to overflow or when the property is not computable.
-
+
USE_OVERFLOW_SEMANTICS is true if this function should assume that
the rules for overflow of the given language apply (e.g., that signed
arithmetics in C does not overflow). */
bool
-scev_probably_wraps_p (tree base, tree step,
+scev_probably_wraps_p (tree base, tree step,
gimple at_stmt, struct loop *loop,
bool use_overflow_semantics)
{
@@ -3107,7 +3107,7 @@ scev_probably_wraps_p (tree base, tree step,
We used to test for the following situation that frequently appears
during address arithmetics:
-
+
D.1621_13 = (long unsigned intD.4) D.1620_12;
D.1622_14 = D.1621_13 * 8;
D.1623_15 = (doubleD.29 *) D.1622_14;
diff --git a/gcc/tree-ssa-loop-prefetch.c b/gcc/tree-ssa-loop-prefetch.c
index 60f5a2f9b0d..2769c04ce0b 100644
--- a/gcc/tree-ssa-loop-prefetch.c
+++ b/gcc/tree-ssa-loop-prefetch.c
@@ -1,18 +1,18 @@
/* Array prefetching.
Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -99,12 +99,12 @@ along with GCC; see the file COPYING3. If not see
while still within this bound (starting with those with lowest
prefetch_mod, since they are responsible for most of the cache
misses).
-
+
5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
and PREFETCH_BEFORE requirements (within some bounds), and to avoid
prefetching nonaccessed memory.
TODO -- actually implement peeling.
-
+
6) We actually emit the prefetch instructions. ??? Perhaps emit the
prefetch instructions with guards in cases where 5) was not sufficient
to satisfy the constraints?
@@ -114,18 +114,18 @@ along with GCC; see the file COPYING3. If not see
model has two heuristcs:
1. A heuristic that determines whether the given loop has enough CPU
ops that can be overlapped with cache missing memory ops.
- If not, the loop won't benefit from prefetching. This is implemented
- by requirung the ratio between the instruction count and the mem ref
+ If not, the loop won't benefit from prefetching. This is implemented
+ by requirung the ratio between the instruction count and the mem ref
count to be above a certain minimum.
2. A heuristic that disables prefetching in a loop with an unknown trip
- count if the prefetching cost is above a certain limit. The relative
+ count if the prefetching cost is above a certain limit. The relative
prefetching cost is estimated by taking the ratio between the
prefetch count and the total intruction count (this models the I-cache
cost).
The limits used in these heuristics are defined as parameters with
- reasonable default values. Machine-specific default values will be
+ reasonable default values. Machine-specific default values will be
added later.
-
+
Some other TODO:
-- write and use more general reuse analysis (that could be also used
in other cache aimed loop optimizations)
@@ -451,7 +451,7 @@ analyze_ref (struct loop *loop, tree *ref_p, tree *base,
off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
bit_offset = TREE_INT_CST_LOW (off);
gcc_assert (bit_offset % BITS_PER_UNIT == 0);
-
+
*delta += bit_offset / BITS_PER_UNIT;
}
@@ -593,15 +593,15 @@ ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
return (x + by - 1) / by;
}
-/* Given a CACHE_LINE_SIZE and two inductive memory references
- with a common STEP greater than CACHE_LINE_SIZE and an address
- difference DELTA, compute the probability that they will fall
- in different cache lines. DISTINCT_ITERS is the number of
- distinct iterations after which the pattern repeats itself.
+/* Given a CACHE_LINE_SIZE and two inductive memory references
+ with a common STEP greater than CACHE_LINE_SIZE and an address
+ difference DELTA, compute the probability that they will fall
+ in different cache lines. DISTINCT_ITERS is the number of
+ distinct iterations after which the pattern repeats itself.
ALIGN_UNIT is the unit of alignment in bytes. */
static int
-compute_miss_rate (unsigned HOST_WIDE_INT cache_line_size,
+compute_miss_rate (unsigned HOST_WIDE_INT cache_line_size,
HOST_WIDE_INT step, HOST_WIDE_INT delta,
unsigned HOST_WIDE_INT distinct_iters,
int align_unit)
@@ -612,7 +612,7 @@ compute_miss_rate (unsigned HOST_WIDE_INT cache_line_size,
total_positions = 0;
miss_positions = 0;
-
+
/* Iterate through all possible alignments of the first
memory reference within its cache line. */
for (align = 0; align < cache_line_size; align += align_unit)
@@ -657,7 +657,7 @@ prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
former. */
if (by_is_before)
ref->prefetch_before = 0;
-
+
return;
}
@@ -711,11 +711,11 @@ prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
return;
}
- /* A more complicated case with step > prefetch_block. First reduce
+ /* A more complicated case with step > prefetch_block. First reduce
the ratio between the step and the cache line size to its simplest
- terms. The resulting denominator will then represent the number of
- distinct iterations after which each address will go back to its
- initial location within the cache line. This computation assumes
+ terms. The resulting denominator will then represent the number of
+ distinct iterations after which each address will go back to its
+ initial location within the cache line. This computation assumes
that PREFETCH_BLOCK is a power of two. */
prefetch_block = PREFETCH_BLOCK;
reduced_prefetch_block = prefetch_block;
@@ -731,7 +731,7 @@ prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
delta %= step;
ref_type = TREE_TYPE (ref->mem);
align_unit = TYPE_ALIGN (ref_type) / 8;
- miss_rate = compute_miss_rate(prefetch_block, step, delta,
+ miss_rate = compute_miss_rate(prefetch_block, step, delta,
reduced_prefetch_block, align_unit);
if (miss_rate <= ACCEPTABLE_MISS_RATE)
{
@@ -744,9 +744,9 @@ prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
/* Try also the following iteration. */
prefetch_before++;
delta = step - delta;
- miss_rate = compute_miss_rate(prefetch_block, step, delta,
+ miss_rate = compute_miss_rate(prefetch_block, step, delta,
reduced_prefetch_block, align_unit);
- if (miss_rate <= ACCEPTABLE_MISS_RATE)
+ if (miss_rate <= ACCEPTABLE_MISS_RATE)
{
if (prefetch_before < ref->prefetch_before)
ref->prefetch_before = prefetch_before;
@@ -1314,7 +1314,7 @@ self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
know its stride. */
while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
ref = TREE_OPERAND (ref, 0);
-
+
if (TREE_CODE (ref) == ARRAY_REF)
{
stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
@@ -1457,7 +1457,7 @@ determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
/* If the dependence cannot be analyzed, assume that there might be
a reuse. */
dist = 0;
-
+
ref->independent_p = false;
refb->independent_p = false;
}
@@ -1525,14 +1525,14 @@ determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
/* Do a cost-benefit analysis to determine if prefetching is profitable
for the current loop given the following parameters:
AHEAD: the iteration ahead distance,
- EST_NITER: the estimated trip count,
+ EST_NITER: the estimated trip count,
NINSNS: estimated number of instructions in the loop,
PREFETCH_COUNT: an estimate of the number of prefetches
MEM_REF_COUNT: total number of memory references in the loop. */
-static bool
-is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
- unsigned ninsns, unsigned prefetch_count,
+static bool
+is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
+ unsigned ninsns, unsigned prefetch_count,
unsigned mem_ref_count)
{
int insn_to_mem_ratio, insn_to_prefetch_ratio;
@@ -1540,41 +1540,41 @@ is_loop_prefetching_profitable (unsigned ahead, HOST_WIDE_INT est_niter,
if (mem_ref_count == 0)
return false;
- /* Prefetching improves performance by overlapping cache missing
- memory accesses with CPU operations. If the loop does not have
- enough CPU operations to overlap with memory operations, prefetching
- won't give a significant benefit. One approximate way of checking
- this is to require the ratio of instructions to memory references to
+ /* Prefetching improves performance by overlapping cache missing
+ memory accesses with CPU operations. If the loop does not have
+ enough CPU operations to overlap with memory operations, prefetching
+ won't give a significant benefit. One approximate way of checking
+ this is to require the ratio of instructions to memory references to
be above a certain limit. This approximation works well in practice.
TODO: Implement a more precise computation by estimating the time
for each CPU or memory op in the loop. Time estimates for memory ops
should account for cache misses. */
- insn_to_mem_ratio = ninsns / mem_ref_count;
+ insn_to_mem_ratio = ninsns / mem_ref_count;
if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
return false;
/* Profitability of prefetching is highly dependent on the trip count.
- For a given AHEAD distance, the first AHEAD iterations do not benefit
- from prefetching, and the last AHEAD iterations execute useless
+ For a given AHEAD distance, the first AHEAD iterations do not benefit
+ from prefetching, and the last AHEAD iterations execute useless
prefetches. So, if the trip count is not large enough relative to AHEAD,
prefetching may cause serious performance degradation. To avoid this
- problem when the trip count is not known at compile time, we
+ problem when the trip count is not known at compile time, we
conservatively skip loops with high prefetching costs. For now, only
- the I-cache cost is considered. The relative I-cache cost is estimated
+ the I-cache cost is considered. The relative I-cache cost is estimated
by taking the ratio between the number of prefetches and the total
number of instructions. Since we are using integer arithmetic, we
- compute the reciprocal of this ratio.
+ compute the reciprocal of this ratio.
TODO: Account for loop unrolling, which may reduce the costs of
- shorter stride prefetches. Note that not accounting for loop
+ shorter stride prefetches. Note that not accounting for loop
unrolling over-estimates the cost and hence gives more conservative
results. */
if (est_niter < 0)
{
- insn_to_prefetch_ratio = ninsns / prefetch_count;
+ insn_to_prefetch_ratio = ninsns / prefetch_count;
return insn_to_prefetch_ratio >= MIN_INSN_TO_PREFETCH_RATIO;
}
-
+
if (est_niter <= (HOST_WIDE_INT) ahead)
{
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -1626,19 +1626,19 @@ loop_prefetch_arrays (struct loop *loop)
the loop body. */
time = tree_num_loop_insns (loop, &eni_time_weights);
ahead = (PREFETCH_LATENCY + time - 1) / time;
- est_niter = estimated_loop_iterations_int (loop, false);
+ est_niter = estimated_loop_iterations_int (loop, false);
ninsns = tree_num_loop_insns (loop, &eni_size_weights);
unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
est_niter);
if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
+ fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
HOST_WIDE_INT_PRINT_DEC "\n"
- "insn count %d, mem ref count %d, prefetch count %d\n",
- ahead, unroll_factor, est_niter,
- ninsns, mem_ref_count, prefetch_count);
+ "insn count %d, mem ref count %d, prefetch count %d\n",
+ ahead, unroll_factor, est_niter,
+ ninsns, mem_ref_count, prefetch_count);
- if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns,
+ if (!is_loop_prefetching_profitable (ahead, est_niter, ninsns,
prefetch_count, mem_ref_count))
goto fail;
@@ -1693,10 +1693,10 @@ tree_ssa_prefetch_arrays (void)
fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
- fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
- fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
+ fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
+ fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
MIN_INSN_TO_PREFETCH_RATIO);
- fprintf (dump_file, " min insn-to-mem ratio: %d \n",
+ fprintf (dump_file, " min insn-to-mem ratio: %d \n",
PREFETCH_MIN_INSN_TO_MEM_RATIO);
fprintf (dump_file, "\n");
}
diff --git a/gcc/tree-ssa-loop-unswitch.c b/gcc/tree-ssa-loop-unswitch.c
index 8f200be81c1..5b9ba90469c 100644
--- a/gcc/tree-ssa-loop-unswitch.c
+++ b/gcc/tree-ssa-loop-unswitch.c
@@ -1,18 +1,18 @@
/* Loop unswitching.
Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -216,7 +216,7 @@ tree_unswitch_single_loop (struct loop *loop, int num)
i = 0;
bbs = get_loop_body (loop);
-
+
while (1)
{
/* Find a bb to unswitch on. */
@@ -294,7 +294,7 @@ tree_unswitch_loop (struct loop *loop,
extract_true_false_edges_from_block (unswitch_on, &edge_true, &edge_false);
prob_true = edge_true->probability;
- return loop_version (loop, unshare_expr (cond),
+ return loop_version (loop, unshare_expr (cond),
NULL, prob_true, prob_true,
REG_BR_PROB_BASE - prob_true, false);
}
diff --git a/gcc/tree-ssa-loop.c b/gcc/tree-ssa-loop.c
index b497cccccd1..591890ccfd5 100644
--- a/gcc/tree-ssa-loop.c
+++ b/gcc/tree-ssa-loop.c
@@ -1,18 +1,18 @@
/* Loop optimizations over tree-ssa.
Copyright (C) 2003, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -47,7 +47,7 @@ gate_tree_loop (void)
return flag_tree_loop_optimize != 0;
}
-struct gimple_opt_pass pass_tree_loop =
+struct gimple_opt_pass pass_tree_loop =
{
{
GIMPLE_PASS,
@@ -81,8 +81,8 @@ tree_ssa_loop_init (void)
scev_initialize ();
return 0;
}
-
-struct gimple_opt_pass pass_tree_loop_init =
+
+struct gimple_opt_pass pass_tree_loop_init =
{
{
GIMPLE_PASS,
@@ -119,7 +119,7 @@ gate_tree_ssa_loop_im (void)
return flag_tree_loop_im != 0;
}
-struct gimple_opt_pass pass_lim =
+struct gimple_opt_pass pass_lim =
{
{
GIMPLE_PASS,
@@ -155,7 +155,7 @@ gate_tree_ssa_loop_unswitch (void)
return flag_unswitch_loops != 0;
}
-struct gimple_opt_pass pass_tree_unswitch =
+struct gimple_opt_pass pass_tree_unswitch =
{
{
GIMPLE_PASS,
@@ -193,7 +193,7 @@ gate_tree_predictive_commoning (void)
return flag_predictive_commoning != 0;
}
-struct gimple_opt_pass pass_predcom =
+struct gimple_opt_pass pass_predcom =
{
{
GIMPLE_PASS,
@@ -305,7 +305,7 @@ graphite_transforms (void)
static bool
gate_graphite_transforms (void)
{
- /* Enable -fgraphite pass if any one of the graphite optimization flags
+ /* Enable -fgraphite pass if any one of the graphite optimization flags
is turned on. */
if (flag_loop_block || flag_loop_interchange || flag_loop_strip_mine
|| flag_graphite_identity || flag_loop_parallelize_all)
@@ -675,8 +675,8 @@ tree_ssa_loop_done (void)
loop_optimizer_finalize ();
return 0;
}
-
-struct gimple_opt_pass pass_tree_loop_done =
+
+struct gimple_opt_pass pass_tree_loop_done =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index 948707eb1a0..2a984a1034e 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -1,18 +1,18 @@
/* Global, SSA-based optimizations using mathematical identities.
Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -312,7 +312,7 @@ insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
recip_def = make_rename_temp (type, "reciptmp");
new_stmt = gimple_build_assign_with_ops (RDIV_EXPR, recip_def,
build_one_cst (type), def);
-
+
if (occ->bb_has_division)
{
/* Case 1: insert before an existing division. */
@@ -418,7 +418,7 @@ execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
count++;
}
}
-
+
/* Do the expensive part only if we can hope to optimize something. */
threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
if (count >= threshold)
@@ -737,7 +737,7 @@ execute_cse_sincos_1 (tree name)
gsi = gsi_for_stmt (use_stmt);
gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
- gsi_remove (&gsi, true);
+ gsi_remove (&gsi, true);
}
VEC_free(gimple, heap, stmts);
diff --git a/gcc/tree-ssa-operands.c b/gcc/tree-ssa-operands.c
index 6af31a437ea..a8e11b1c9ec 100644
--- a/gcc/tree-ssa-operands.c
+++ b/gcc/tree-ssa-operands.c
@@ -35,40 +35,40 @@ along with GCC; see the file COPYING3. If not see
#include "langhooks.h"
#include "ipa-reference.h"
-/* This file contains the code required to manage the operands cache of the
- SSA optimizer. For every stmt, we maintain an operand cache in the stmt
- annotation. This cache contains operands that will be of interest to
- optimizers and other passes wishing to manipulate the IL.
+/* This file contains the code required to manage the operands cache of the
+ SSA optimizer. For every stmt, we maintain an operand cache in the stmt
+ annotation. This cache contains operands that will be of interest to
+ optimizers and other passes wishing to manipulate the IL.
- The operand type are broken up into REAL and VIRTUAL operands. The real
- operands are represented as pointers into the stmt's operand tree. Thus
+ The operand type are broken up into REAL and VIRTUAL operands. The real
+ operands are represented as pointers into the stmt's operand tree. Thus
any manipulation of the real operands will be reflected in the actual tree.
- Virtual operands are represented solely in the cache, although the base
- variable for the SSA_NAME may, or may not occur in the stmt's tree.
+ Virtual operands are represented solely in the cache, although the base
+ variable for the SSA_NAME may, or may not occur in the stmt's tree.
Manipulation of the virtual operands will not be reflected in the stmt tree.
- The routines in this file are concerned with creating this operand cache
+ The routines in this file are concerned with creating this operand cache
from a stmt tree.
- The operand tree is the parsed by the various get_* routines which look
- through the stmt tree for the occurrence of operands which may be of
- interest, and calls are made to the append_* routines whenever one is
- found. There are 4 of these routines, each representing one of the
+ The operand tree is the parsed by the various get_* routines which look
+ through the stmt tree for the occurrence of operands which may be of
+ interest, and calls are made to the append_* routines whenever one is
+ found. There are 4 of these routines, each representing one of the
4 types of operands. Defs, Uses, Virtual Uses, and Virtual May Defs.
- The append_* routines check for duplication, and simply keep a list of
+ The append_* routines check for duplication, and simply keep a list of
unique objects for each operand type in the build_* extendable vectors.
- Once the stmt tree is completely parsed, the finalize_ssa_operands()
- routine is called, which proceeds to perform the finalization routine
+ Once the stmt tree is completely parsed, the finalize_ssa_operands()
+ routine is called, which proceeds to perform the finalization routine
on each of the 4 operand vectors which have been built up.
- If the stmt had a previous operand cache, the finalization routines
- attempt to match up the new operands with the old ones. If it's a perfect
- match, the old vector is simply reused. If it isn't a perfect match, then
- a new vector is created and the new operands are placed there. For
- virtual operands, if the previous cache had SSA_NAME version of a
- variable, and that same variable occurs in the same operands cache, then
+ If the stmt had a previous operand cache, the finalization routines
+ attempt to match up the new operands with the old ones. If it's a perfect
+ match, the old vector is simply reused. If it isn't a perfect match, then
+ a new vector is created and the new operands are placed there. For
+ virtual operands, if the previous cache had SSA_NAME version of a
+ variable, and that same variable occurs in the same operands cache, then
the new cache vector will also get the same SSA_NAME.
i.e., if a stmt had a VUSE of 'a_5', and 'a' occurs in the new
@@ -78,7 +78,7 @@ along with GCC; see the file COPYING3. If not see
/* Structure storing statistics on how many call clobbers we have, and
how many where avoided. */
-static struct
+static struct
{
/* Number of call-clobbered ops we attempt to add to calls in
add_call_clobbered_mem_symbols. */
@@ -90,7 +90,7 @@ static struct
/* Number of reads (VUSEs) avoided by using not_read information. */
unsigned int static_read_clobbers_avoided;
-
+
/* Number of write-clobbers avoided because the variable can't escape to
this call. */
unsigned int unescapable_clobbers_avoided;
@@ -109,7 +109,7 @@ static struct
/* By default, operands are loaded. */
#define opf_use 0
-/* Operand is the target of an assignment expression or a
+/* Operand is the target of an assignment expression or a
call-clobbered variable. */
#define opf_def (1 << 0)
@@ -138,7 +138,7 @@ static tree build_vdef;
/* The built VUSE operand. */
static tree build_vuse;
-/* Bitmap obstack for our datastructures that needs to survive across
+/* Bitmap obstack for our datastructures that needs to survive across
compilations of multiple functions. */
static bitmap_obstack operands_bitmap_obstack;
@@ -174,7 +174,7 @@ ssa_operands_active (void)
return cfun->gimple_df && gimple_ssa_operands (cfun)->ops_active;
}
-
+
/* Create the VOP variable, an artificial global variable to act as a
representative of all of the virtual operands FUD chain. */
@@ -208,7 +208,7 @@ create_vop_var (void)
In 1k we can fit 25 use operands (or 63 def operands) on a host with
8 byte pointers, that would be 10 statements each with 1 def and 2
uses. */
-
+
#define OP_SIZE_INIT 0
#define OP_SIZE_1 (1024 - sizeof (void *))
#define OP_SIZE_2 (1024 * 4 - sizeof (void *))
@@ -289,7 +289,7 @@ fini_ssa_operands (void)
/* Return memory for an operand of size SIZE. */
-
+
static inline void *
ssa_operand_alloc (unsigned size)
{
@@ -319,7 +319,7 @@ ssa_operand_alloc (unsigned size)
gcc_unreachable ();
}
- ptr = (struct ssa_operand_memory_d *)
+ ptr = (struct ssa_operand_memory_d *)
ggc_alloc (sizeof (void *)
+ gimple_ssa_operands (cfun)->ssa_operand_mem_size);
ptr->next = gimple_ssa_operands (cfun)->operand_memory;
@@ -374,7 +374,7 @@ alloc_use (void)
/* Adds OP to the list of defs after LAST. */
-static inline def_optype_p
+static inline def_optype_p
add_def_op (tree *op, def_optype_p last)
{
def_optype_p new_def;
@@ -529,8 +529,8 @@ finalize_ssa_uses (gimple stmt)
/* Now create nodes for all the new nodes. */
for (new_i = 0; new_i < VEC_length (tree, build_uses); new_i++)
- last = add_use_op (stmt,
- (tree *) VEC_index (tree, build_uses, new_i),
+ last = add_use_op (stmt,
+ (tree *) VEC_index (tree, build_uses, new_i),
last);
/* Now set the stmt's operands. */
@@ -552,7 +552,7 @@ cleanup_build_arrays (void)
/* Finalize all the build vectors, fill the new ones into INFO. */
-
+
static inline void
finalize_ssa_stmt_operands (gimple stmt)
{
@@ -699,11 +699,11 @@ mark_address_taken (tree ref)
/* A subroutine of get_expr_operands to handle INDIRECT_REF,
- ALIGN_INDIRECT_REF and MISALIGNED_INDIRECT_REF.
+ ALIGN_INDIRECT_REF and MISALIGNED_INDIRECT_REF.
STMT is the statement being processed, EXPR is the INDIRECT_REF
that got us here.
-
+
FLAGS is as in get_expr_operands.
RECURSE_ON_BASE should be set to true if we want to continue
@@ -758,9 +758,9 @@ maybe_add_call_vops (gimple stmt)
call-clobbered. */
if (!(call_flags & ECF_NOVOPS))
{
- /* A 'pure' or a 'const' function never call-clobbers anything.
- A 'noreturn' function might, but since we don't return anyway
- there is no point in recording that. */
+ /* A 'pure' or a 'const' function never call-clobbers anything.
+ A 'noreturn' function might, but since we don't return anyway
+ there is no point in recording that. */
if (!(call_flags & (ECF_PURE | ECF_CONST | ECF_NORETURN)))
add_virtual_operand (stmt, opf_def);
else if (!(call_flags & ECF_CONST))
@@ -921,7 +921,7 @@ get_expr_operands (gimple stmt, tree *expr_p, int flags)
gimple_set_has_volatile_ops (stmt, true);
get_expr_operands (stmt, &TREE_OPERAND (expr, 0), flags);
-
+
if (code == COMPONENT_REF)
{
if (TREE_THIS_VOLATILE (TREE_OPERAND (expr, 1)))
@@ -1208,7 +1208,7 @@ verify_imm_links (FILE *f, tree var)
{
if (prev != ptr->prev)
goto error;
-
+
if (ptr->use == NULL)
goto error; /* 2 roots, or SAFE guard node. */
else if (*(ptr->use) != var)
@@ -1246,7 +1246,7 @@ verify_imm_links (FILE *f, tree var)
fprintf (f, " STMT MODIFIED. - <%p> ", (void *)ptr->loc.stmt);
print_gimple_stmt (f, ptr->loc.stmt, 0, TDF_SLIM);
}
- fprintf (f, " IMM ERROR : (use_p : tree - %p:%p)", (void *)ptr,
+ fprintf (f, " IMM ERROR : (use_p : tree - %p:%p)", (void *)ptr,
(void *)ptr->use);
print_generic_expr (f, USE_FROM_PTR (ptr), TDF_SLIM);
fprintf(f, "\n");
diff --git a/gcc/tree-ssa-operands.h b/gcc/tree-ssa-operands.h
index 5a8e02696d4..a435fab0201 100644
--- a/gcc/tree-ssa-operands.h
+++ b/gcc/tree-ssa-operands.h
@@ -42,7 +42,7 @@ struct def_optype_d
typedef struct def_optype_d *def_optype_p;
/* This represents the USE operands of a stmt. */
-struct use_optype_d
+struct use_optype_d
{
struct use_optype_d *next;
struct ssa_use_operand_d use_ptr;
@@ -52,7 +52,7 @@ typedef struct use_optype_d *use_optype_p;
/* This structure represents a variable sized buffer which is allocated by the
operand memory manager. Operands are suballocated out of this block. The
MEM array varies in size. */
-
+
struct GTY((chain_next("%h.next"))) ssa_operand_memory_d {
struct ssa_operand_memory_d *next;
char mem[1];
@@ -70,7 +70,7 @@ struct GTY(()) ssa_operands {
struct def_optype_d * GTY ((skip (""))) free_defs;
struct use_optype_d * GTY ((skip (""))) free_uses;
};
-
+
#define USE_FROM_PTR(PTR) get_use_from_ptr (PTR)
#define DEF_FROM_PTR(PTR) get_def_from_ptr (PTR)
#define SET_USE(USE, V) set_ssa_use_from_ptr (USE, V)
@@ -123,9 +123,9 @@ enum ssa_op_iter_type {
ssa_op_iter_def
};
-/* This structure is used in the operand iterator loops. It contains the
+/* This structure is used in the operand iterator loops. It contains the
items required to determine which operand is retrieved next. During
- optimization, this structure is scalarized, and any unused fields are
+ optimization, this structure is scalarized, and any unused fields are
optimized away, resulting in little overhead. */
typedef struct ssa_operand_iterator_d
@@ -139,7 +139,7 @@ typedef struct ssa_operand_iterator_d
gimple phi_stmt;
} ssa_op_iter;
-/* These flags are used to determine which operands are returned during
+/* These flags are used to determine which operands are returned during
execution of the loop. */
#define SSA_OP_USE 0x01 /* Real USE operands. */
#define SSA_OP_DEF 0x02 /* Real DEF operands. */
@@ -154,7 +154,7 @@ typedef struct ssa_operand_iterator_d
#define SSA_OP_ALL_DEFS (SSA_OP_VIRTUAL_DEFS | SSA_OP_DEF)
#define SSA_OP_ALL_OPERANDS (SSA_OP_ALL_USES | SSA_OP_ALL_DEFS)
-/* This macro executes a loop over the operands of STMT specified in FLAG,
+/* This macro executes a loop over the operands of STMT specified in FLAG,
returning each operand as a 'tree' in the variable TREEVAR. ITER is an
ssa_op_iter structure used to control the loop. */
#define FOR_EACH_SSA_TREE_OPERAND(TREEVAR, STMT, ITER, FLAGS) \
@@ -162,16 +162,16 @@ typedef struct ssa_operand_iterator_d
!op_iter_done (&(ITER)); \
TREEVAR = op_iter_next_tree (&(ITER)))
-/* This macro executes a loop over the operands of STMT specified in FLAG,
- returning each operand as a 'use_operand_p' in the variable USEVAR.
+/* This macro executes a loop over the operands of STMT specified in FLAG,
+ returning each operand as a 'use_operand_p' in the variable USEVAR.
ITER is an ssa_op_iter structure used to control the loop. */
#define FOR_EACH_SSA_USE_OPERAND(USEVAR, STMT, ITER, FLAGS) \
for (USEVAR = op_iter_init_use (&(ITER), STMT, FLAGS); \
!op_iter_done (&(ITER)); \
USEVAR = op_iter_next_use (&(ITER)))
-/* This macro executes a loop over the operands of STMT specified in FLAG,
- returning each operand as a 'def_operand_p' in the variable DEFVAR.
+/* This macro executes a loop over the operands of STMT specified in FLAG,
+ returning each operand as a 'def_operand_p' in the variable DEFVAR.
ITER is an ssa_op_iter structure used to control the loop. */
#define FOR_EACH_SSA_DEF_OPERAND(DEFVAR, STMT, ITER, FLAGS) \
for (DEFVAR = op_iter_init_def (&(ITER), STMT, FLAGS); \
@@ -204,19 +204,19 @@ typedef struct ssa_operand_iterator_d
: op_iter_init_def (&(ITER), STMT, FLAGS)); \
!op_iter_done (&(ITER)); \
(DEFVAR) = op_iter_next_def (&(ITER)))
-
+
/* This macro returns an operand in STMT as a tree if it is the ONLY
operand matching FLAGS. If there are 0 or more than 1 operand matching
FLAGS, then NULL_TREE is returned. */
#define SINGLE_SSA_TREE_OPERAND(STMT, FLAGS) \
single_ssa_tree_operand (STMT, FLAGS)
-
+
/* This macro returns an operand in STMT as a use_operand_p if it is the ONLY
operand matching FLAGS. If there are 0 or more than 1 operand matching
FLAGS, then NULL_USE_OPERAND_P is returned. */
#define SINGLE_SSA_USE_OPERAND(STMT, FLAGS) \
single_ssa_use_operand (STMT, FLAGS)
-
+
/* This macro returns an operand in STMT as a def_operand_p if it is the ONLY
operand matching FLAGS. If there are 0 or more than 1 operand matching
FLAGS, then NULL_DEF_OPERAND_P is returned. */
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index 3accbbc2bb1..b4a0aea9718 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -207,7 +207,7 @@ tree_ssa_phiopt_worker (bool do_store_elim)
bb_order = blocks_in_phiopt_order ();
n = n_basic_blocks - NUM_FIXED_BLOCKS;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++)
{
gimple cond_stmt, phi;
basic_block bb1, bb2;
@@ -307,7 +307,7 @@ tree_ssa_phiopt_worker (bool do_store_elim)
}
free (bb_order);
-
+
if (do_store_elim)
pointer_set_destroy (nontrap);
/* If the CFG has changed, we should cleanup the CFG. */
@@ -332,12 +332,12 @@ blocks_in_phiopt_order (void)
{
basic_block x, y;
basic_block *order = XNEWVEC (basic_block, n_basic_blocks);
- unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS;
+ unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS;
unsigned np, i;
- sbitmap visited = sbitmap_alloc (last_basic_block);
+ sbitmap visited = sbitmap_alloc (last_basic_block);
-#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
-#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
+#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
+#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
sbitmap_zero (visited);
@@ -696,7 +696,7 @@ minmax_replacement (basic_block cond_bb, basic_block middle_bb,
&& operand_equal_for_phi_arg_p (arg_false, larger))
{
/* Case
-
+
if (smaller < larger)
rslt = smaller;
else
@@ -905,7 +905,7 @@ abs_replacement (basic_block cond_bb, basic_block middle_bb,
optimize. */
if (assign == NULL)
return false;
-
+
/* If we got here, then we have found the only executable statement
in OTHER_BLOCK. If it is anything other than arg = -arg1 or
arg1 = -arg0, then we can not optimize. */
@@ -918,7 +918,7 @@ abs_replacement (basic_block cond_bb, basic_block middle_bb,
return false;
rhs = gimple_assign_rhs1 (assign);
-
+
/* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
if (!(lhs == arg0 && rhs == arg1)
&& !(lhs == arg1 && rhs == arg0))
diff --git a/gcc/tree-ssa-phiprop.c b/gcc/tree-ssa-phiprop.c
index bac2303899f..fcd1d23bac8 100644
--- a/gcc/tree-ssa-phiprop.c
+++ b/gcc/tree-ssa-phiprop.c
@@ -298,7 +298,7 @@ propagate_with_phi (basic_block bb, gimple phi, struct phiprop_d *phivn,
/* Check whether this is a load of *ptr. */
if (!(is_gimple_assign (use_stmt)
- && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
+ && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
&& gimple_assign_rhs_code (use_stmt) == INDIRECT_REF
&& TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == ptr
/* We cannot replace a load that may throw or is volatile. */
@@ -355,7 +355,7 @@ tree_ssa_phiprop (void)
{
VEC(basic_block, heap) *bbs;
struct phiprop_d *phivn;
- bool did_something = false;
+ bool did_something = false;
basic_block bb;
gimple_stmt_iterator gsi;
unsigned i;
@@ -388,7 +388,7 @@ gate_phiprop (void)
return flag_tree_phiprop;
}
-struct gimple_opt_pass pass_phiprop =
+struct gimple_opt_pass pass_phiprop =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index 7e44f846590..a3e7ad824b4 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -1156,7 +1156,7 @@ fully_constant_expression (pre_expr e)
}
case tcc_reference:
if (nary->opcode != REALPART_EXPR
- && nary->opcode != IMAGPART_EXPR
+ && nary->opcode != IMAGPART_EXPR
&& nary->opcode != VIEW_CONVERT_EXPR)
return e;
/* Fallthrough. */
@@ -2998,7 +2998,7 @@ create_expression_by_pieces (basic_block block, pre_expr expr,
genop2 = fold_convert (sizetype, genop2);
else
genop2 = fold_convert (TREE_TYPE (nary->op[1]), genop2);
-
+
folded = fold_build2 (nary->opcode, nary->type,
genop1, genop2);
}
@@ -3257,7 +3257,7 @@ insert_into_preds_of_block (basic_block block, unsigned int exprnum,
if (!useless_type_conversion_p (type, TREE_TYPE (constant)))
{
tree builtexpr = fold_convert (type, constant);
- if (!is_gimple_min_invariant (builtexpr))
+ if (!is_gimple_min_invariant (builtexpr))
{
tree forcedexpr = force_gimple_operand (builtexpr,
&stmts, true,
@@ -4589,7 +4589,7 @@ execute_pre (bool do_fre)
remove_dead_inserted_code ();
loop_optimizer_finalize ();
}
-
+
return 0;
}
init_pre (do_fre);
diff --git a/gcc/tree-ssa-propagate.c b/gcc/tree-ssa-propagate.c
index 14912388581..6ef73a11406 100644
--- a/gcc/tree-ssa-propagate.c
+++ b/gcc/tree-ssa-propagate.c
@@ -177,7 +177,7 @@ cfg_blocks_empty_p (void)
/* Add a basic block to the worklist. The block must not be already
in the worklist, and it must not be the ENTRY or EXIT block. */
-static void
+static void
cfg_blocks_add (basic_block bb)
{
bool head = false;
@@ -512,7 +512,7 @@ ssa_prop_init (void)
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
gimple_set_plf (gsi_stmt (si), STMT_IN_SSA_EDGE_WORKLIST, false);
-
+
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
gimple_set_plf (gsi_stmt (si), STMT_IN_SSA_EDGE_WORKLIST, false);
@@ -717,7 +717,7 @@ update_call_from_tree (gimple_stmt_iterator *si_p, tree expr)
{
args = VEC_alloc (tree, heap, nargs);
VEC_safe_grow (tree, heap, args, nargs);
-
+
for (i = 0; i < nargs; i++)
VEC_replace (tree, args, i, CALL_EXPR_ARG (expr, i));
}
@@ -799,7 +799,7 @@ ssa_propagate (ssa_prop_visit_stmt_fn visit_stmt,
ssa_prop_init ();
/* Iterate until the worklists are empty. */
- while (!cfg_blocks_empty_p ()
+ while (!cfg_blocks_empty_p ()
|| VEC_length (gimple, interesting_ssa_edges) > 0
|| VEC_length (gimple, varying_ssa_edges) > 0)
{
@@ -943,7 +943,7 @@ replace_phi_args_in (gimple phi, prop_value_t *prop_value)
}
}
}
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
if (!replaced)
@@ -1083,7 +1083,7 @@ substitute_and_fold (prop_value_t *prop_value, ssa_prop_fold_stmt_fn fold_fn)
== GIMPLE_SINGLE_RHS))
{
tree rhs = gimple_assign_rhs1 (stmt);
-
+
if (TREE_CODE (rhs) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (rhs);
}
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 5136aee5d32..5fd5967121d 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -106,34 +106,34 @@ along with GCC; see the file COPYING3. If not see
mergetmp2 = d + e
and put mergetmp2 on the merge worklist.
-
+
so merge worklist = {mergetmp, c, mergetmp2}
-
+
Continue building binary ops of these operations until you have only
one operation left on the worklist.
-
+
So we have
-
+
build binary op
mergetmp3 = mergetmp + c
-
+
worklist = {mergetmp2, mergetmp3}
-
+
mergetmp4 = mergetmp2 + mergetmp3
-
+
worklist = {mergetmp4}
-
+
because we have one operation left, we can now just set the original
statement equal to the result of that operation.
-
+
This will at least expose a + b and d + e to redundancy elimination
as binary operations.
-
+
For extra points, you can reuse the old statements to build the
mergetmps, since you shouldn't run out.
So why don't we do this?
-
+
Because it's expensive, and rarely will help. Most trees we are
reassociating have 3 or less ops. If they have 2 ops, they already
will be written into a nice single binary op. If you have 3 ops, a
@@ -142,15 +142,15 @@ along with GCC; see the file COPYING3. If not see
mergetmp = op1 + op2
newstmt = mergetmp + op3
-
+
instead of
mergetmp = op2 + op3
newstmt = mergetmp + op1
-
+
If all three are of the same rank, you can't expose them all in a
single binary operator anyway, so the above is *still* the best you
can do.
-
+
Thus, this is what we do. When we have three ops left, we check to see
what order to put them in, and call it a day. As a nod to vector sum
reduction, we check if any of the ops are really a phi node that is a
@@ -447,7 +447,7 @@ eliminate_duplicate_pair (enum tree_code opcode,
{
VEC_free (operand_entry_t, heap, *ops);
*ops = NULL;
- add_to_ops_vec (ops, fold_convert (TREE_TYPE (last->op),
+ add_to_ops_vec (ops, fold_convert (TREE_TYPE (last->op),
integer_zero_node));
*all_done = true;
}
@@ -511,7 +511,7 @@ eliminate_plus_minus_pair (enum tree_code opcode,
}
VEC_ordered_remove (operand_entry_t, *ops, i);
- add_to_ops_vec (ops, fold_convert(TREE_TYPE (oe->op),
+ add_to_ops_vec (ops, fold_convert(TREE_TYPE (oe->op),
integer_zero_node));
VEC_ordered_remove (operand_entry_t, *ops, currindex);
reassociate_stats.ops_eliminated ++;
@@ -579,7 +579,7 @@ eliminate_not_pairs (enum tree_code opcode,
oe->op = build_low_bits_mask (TREE_TYPE (oe->op),
TYPE_PRECISION (TREE_TYPE (oe->op)));
- reassociate_stats.ops_eliminated
+ reassociate_stats.ops_eliminated
+= VEC_length (operand_entry_t, *ops) - 1;
VEC_free (operand_entry_t, heap, *ops);
*ops = NULL;
@@ -618,9 +618,9 @@ eliminate_using_constants (enum tree_code opcode,
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Found & 0, removing all other ops\n");
- reassociate_stats.ops_eliminated
+ reassociate_stats.ops_eliminated
+= VEC_length (operand_entry_t, *ops) - 1;
-
+
VEC_free (operand_entry_t, heap, *ops);
*ops = NULL;
VEC_safe_push (operand_entry_t, heap, *ops, oelast);
@@ -646,15 +646,15 @@ eliminate_using_constants (enum tree_code opcode,
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Found | -1, removing all other ops\n");
- reassociate_stats.ops_eliminated
+ reassociate_stats.ops_eliminated
+= VEC_length (operand_entry_t, *ops) - 1;
-
+
VEC_free (operand_entry_t, heap, *ops);
*ops = NULL;
VEC_safe_push (operand_entry_t, heap, *ops, oelast);
return;
}
- }
+ }
else if (integer_zerop (oelast->op))
{
if (VEC_length (operand_entry_t, *ops) != 1)
@@ -677,8 +677,8 @@ eliminate_using_constants (enum tree_code opcode,
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Found * 0, removing all other ops\n");
-
- reassociate_stats.ops_eliminated
+
+ reassociate_stats.ops_eliminated
+= VEC_length (operand_entry_t, *ops) - 1;
VEC_free (operand_entry_t, heap, *ops);
*ops = NULL;
@@ -1740,14 +1740,14 @@ repropagate_negates (void)
We do this top down because we don't know whether the subtract is
part of a possible chain of reassociation except at the top.
-
+
IE given
d = f + g
c = a + e
b = c - d
q = b - r
k = t - q
-
+
we want to break up k = t - q, but we won't until we've transformed q
= b - r, which won't be broken up until we transform b = c - d.
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 0a8dcaa36c4..466ca7565b1 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -324,7 +324,7 @@ vn_constant_eq (const void *p1, const void *p2)
}
/* Hash table hash function for vn_constant_t. */
-
+
static hashval_t
vn_constant_hash (const void *p1)
{
@@ -358,11 +358,11 @@ get_or_alloc_constant_value_id (tree constant)
{
void **slot;
vn_constant_t vc = XNEW (struct vn_constant_s);
-
+
vc->hashcode = vn_hash_constant_with_type (constant);
vc->constant = constant;
slot = htab_find_slot_with_hash (constant_to_value_id, vc,
- vc->hashcode, INSERT);
+ vc->hashcode, INSERT);
if (*slot)
{
free (vc);
@@ -379,7 +379,7 @@ get_or_alloc_constant_value_id (tree constant)
bool
value_id_constant_p (unsigned int v)
{
- return bitmap_bit_p (constant_value_ids, v);
+ return bitmap_bit_p (constant_value_ids, v);
}
/* Compare two reference operands P1 and P2 for equality. Return true if
@@ -980,7 +980,7 @@ vn_reference_lookup_1 (vn_reference_t vr, vn_reference_t *vnresult)
*vnresult = (vn_reference_t)*slot;
return ((vn_reference_t)*slot)->result;
}
-
+
return NULL_TREE;
}
@@ -1007,7 +1007,7 @@ vn_reference_lookup_2 (ao_ref *op ATTRIBUTE_UNUSED, tree vuse, void *vr_)
hash, NO_INSERT);
if (slot)
return *slot;
-
+
return NULL;
}
@@ -1336,7 +1336,7 @@ vn_reference_insert_pieces (tree vuse, alias_set_type set, tree type,
slot = htab_find_slot_with_hash (current_info->references, vr1, vr1->hashcode,
INSERT);
-
+
/* At this point we should have all the things inserted that we have
seen before, and we should never try inserting something that
already exists. */
@@ -1417,7 +1417,7 @@ vn_nary_op_eq (const void *p1, const void *p2)
tree
vn_nary_op_lookup_pieces (unsigned int length, enum tree_code code,
tree type, tree op0, tree op1, tree op2,
- tree op3, vn_nary_op_t *vnresult)
+ tree op3, vn_nary_op_t *vnresult)
{
void **slot;
struct vn_nary_op_s vno1;
@@ -1521,7 +1521,7 @@ vn_nary_op_insert_pieces (unsigned int length, enum tree_code code,
tree type, tree op0,
tree op1, tree op2, tree op3,
tree result,
- unsigned int value_id)
+ unsigned int value_id)
{
void **slot;
vn_nary_op_t vno1;
@@ -1549,7 +1549,7 @@ vn_nary_op_insert_pieces (unsigned int length, enum tree_code code,
*slot = vno1;
return vno1;
-
+
}
/* Insert OP into the current hash table with a value number of
@@ -2982,12 +2982,12 @@ init_scc_vn (void)
sccstack = NULL;
constant_to_value_id = htab_create (23, vn_constant_hash, vn_constant_eq,
free);
-
+
constant_value_ids = BITMAP_ALLOC (NULL);
-
+
next_dfs_num = 1;
next_value_id = 1;
-
+
vn_ssa_aux_table = VEC_alloc (vn_ssa_aux_t, heap, num_ssa_names + 1);
/* VEC_alloc doesn't actually grow it to the right size, it just
preallocates the space to do so. */
@@ -3074,7 +3074,7 @@ set_hashtable_value_ids (void)
table. */
FOR_EACH_HTAB_ELEMENT (valid_info->nary,
- vno, vn_nary_op_t, hi)
+ vno, vn_nary_op_t, hi)
{
if (vno->result)
{
@@ -3086,7 +3086,7 @@ set_hashtable_value_ids (void)
}
FOR_EACH_HTAB_ELEMENT (valid_info->phis,
- vp, vn_phi_t, hi)
+ vp, vn_phi_t, hi)
{
if (vp->result)
{
@@ -3098,7 +3098,7 @@ set_hashtable_value_ids (void)
}
FOR_EACH_HTAB_ELEMENT (valid_info->references,
- vr, vn_reference_t, hi)
+ vr, vn_reference_t, hi)
{
if (vr->result)
{
@@ -3119,7 +3119,7 @@ run_scc_vn (bool may_insert_arg)
size_t i;
tree param;
bool changed = true;
-
+
may_insert = may_insert_arg;
init_scc_vn ();
@@ -3151,7 +3151,7 @@ run_scc_vn (bool may_insert_arg)
}
/* Initialize the value ids. */
-
+
for (i = 1; i < num_ssa_names; ++i)
{
tree name = ssa_name (i);
@@ -3165,7 +3165,7 @@ run_scc_vn (bool may_insert_arg)
else if (is_gimple_min_invariant (info->valnum))
info->value_id = get_or_alloc_constant_value_id (info->valnum);
}
-
+
/* Propagate until they stop changing. */
while (changed)
{
@@ -3186,9 +3186,9 @@ run_scc_vn (bool may_insert_arg)
}
}
}
-
+
set_hashtable_value_ids ();
-
+
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Value numbers:\n");
@@ -3214,7 +3214,7 @@ run_scc_vn (bool may_insert_arg)
/* Return the maximum value id we have ever seen. */
unsigned int
-get_max_value_id (void)
+get_max_value_id (void)
{
return next_value_id;
}
diff --git a/gcc/tree-ssa-sink.c b/gcc/tree-ssa-sink.c
index be3fb7145fd..6f7d9a4685b 100644
--- a/gcc/tree-ssa-sink.c
+++ b/gcc/tree-ssa-sink.c
@@ -53,7 +53,7 @@ along with GCC; see the file COPYING3. If not see
else
y = *q;
-
+
should become
sinktemp = p;
p = p + 1;
@@ -65,16 +65,16 @@ along with GCC; see the file COPYING3. If not see
y = *q
}
Store copy propagation will take care of the store elimination above.
-
+
2. Sinking using Partial Dead Code Elimination. */
static struct
-{
+{
/* The number of statements sunk down the flowgraph by code sinking. */
int sunk;
-
+
} sink_stats;
@@ -205,7 +205,7 @@ is_hidden_global_store (gimple stmt)
static basic_block
nearest_common_dominator_of_uses (gimple stmt, bool *debug_stmts)
-{
+{
bitmap blocks = BITMAP_ALLOC (NULL);
basic_block commondom;
unsigned int j;
@@ -250,13 +250,13 @@ nearest_common_dominator_of_uses (gimple stmt, bool *debug_stmts)
}
commondom = BASIC_BLOCK (bitmap_first_set_bit (blocks));
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, j, bi)
- commondom = nearest_common_dominator (CDI_DOMINATORS, commondom,
+ commondom = nearest_common_dominator (CDI_DOMINATORS, commondom,
BASIC_BLOCK (j));
BITMAP_FREE (blocks);
return commondom;
}
-/* Given a statement (STMT) and the basic block it is currently in (FROMBB),
+/* Given a statement (STMT) and the basic block it is currently in (FROMBB),
determine the location to sink the statement to, if any.
Returns true if there is such location; in that case, TOGSI points to the
statement before that STMT should be moved. */
@@ -297,19 +297,19 @@ statement_sink_location (gimple stmt, basic_block frombb,
/* There are a few classes of things we can't or don't move, some because we
don't have code to handle it, some because it's not profitable and some
- because it's not legal.
-
+ because it's not legal.
+
We can't sink things that may be global stores, at least not without
calculating a lot more information, because we may cause it to no longer
be seen by an external routine that needs it depending on where it gets
- moved to.
-
+ moved to.
+
We don't want to sink loads from memory.
We can't sink statements that end basic blocks without splitting the
incoming edge for the sink location to place it there.
- We can't sink statements that have volatile operands.
+ We can't sink statements that have volatile operands.
We don't want to sink dead code, so anything with 0 immediate uses is not
sunk.
@@ -329,7 +329,7 @@ statement_sink_location (gimple stmt, basic_block frombb,
|| (cfun->has_local_explicit_reg_vars
&& TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))) == BLKmode))
return false;
-
+
FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, iter, SSA_OP_ALL_DEFS)
{
tree def = DEF_FROM_PTR (def_p);
@@ -337,14 +337,14 @@ statement_sink_location (gimple stmt, basic_block frombb,
|| SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def))
return false;
}
-
+
FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
{
tree use = USE_FROM_PTR (use_p);
if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use))
return false;
}
-
+
/* If all the immediate uses are not in the same place, find the nearest
common dominator of all the immediate uses. For PHI nodes, we have to
find the nearest common dominator of all of the predecessor blocks, since
@@ -354,16 +354,16 @@ statement_sink_location (gimple stmt, basic_block frombb,
bool debug_stmts = false;
basic_block commondom = nearest_common_dominator_of_uses (stmt,
&debug_stmts);
-
+
if (commondom == frombb)
return false;
/* Our common dominator has to be dominated by frombb in order to be a
trivially safe place to put this statement, since it has multiple
- uses. */
+ uses. */
if (!dominated_by_p (CDI_DOMINATORS, commondom, frombb))
return false;
-
+
/* It doesn't make sense to move to a dominator that post-dominates
frombb, because it means we've just moved it into a path that always
executes if frombb executes, instead of reducing the number of
@@ -417,10 +417,10 @@ statement_sink_location (gimple stmt, basic_block frombb,
/* This will happen when you have
a_3 = PHI <a_13, a_26>
-
- a_26 = VDEF <a_3>
- If the use is a phi, and is in the same bb as the def,
+ a_26 = VDEF <a_3>
+
+ If the use is a phi, and is in the same bb as the def,
we can't sink it. */
if (gimple_bb (use) == frombb)
@@ -449,7 +449,7 @@ sink_code_in_bb (basic_block bb)
edge_iterator ei;
edge e;
bool last = true;
-
+
/* If this block doesn't dominate anything, there can't be any place to sink
the statements to. */
if (first_dom_son (CDI_DOMINATORS, bb) == NULL)
@@ -462,7 +462,7 @@ sink_code_in_bb (basic_block bb)
for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
{
- gimple stmt = gsi_stmt (gsi);
+ gimple stmt = gsi_stmt (gsi);
gimple_stmt_iterator togsi;
if (!statement_sink_location (stmt, bb, &togsi))
@@ -471,7 +471,7 @@ sink_code_in_bb (basic_block bb)
gsi_prev (&gsi);
last = false;
continue;
- }
+ }
if (dump_file)
{
fprintf (dump_file, "Sinking ");
@@ -479,7 +479,7 @@ sink_code_in_bb (basic_block bb)
fprintf (dump_file, " from bb %d to bb %d\n",
bb->index, (gsi_bb (togsi))->index);
}
-
+
/* If this is the end of the basic block, we need to insert at the end
of the basic block. */
if (gsi_end_p (togsi))
@@ -503,7 +503,7 @@ sink_code_in_bb (basic_block bb)
last = false;
if (!gsi_end_p (gsi))
gsi_prev (&gsi);
-
+
}
earlyout:
for (son = first_dom_son (CDI_POST_DOMINATORS, bb);
@@ -512,7 +512,7 @@ sink_code_in_bb (basic_block bb)
{
sink_code_in_bb (son);
}
-}
+}
/* Perform code sinking.
This moves code down the flowgraph when we know it would be
@@ -520,7 +520,7 @@ sink_code_in_bb (basic_block bb)
executions of the statement.
IE given
-
+
a_1 = b + c;
if (<something>)
{
@@ -559,7 +559,7 @@ execute_sink_code (void)
memset (&sink_stats, 0, sizeof (sink_stats));
calculate_dominance_info (CDI_DOMINATORS);
calculate_dominance_info (CDI_POST_DOMINATORS);
- sink_code_in_bb (EXIT_BLOCK_PTR);
+ sink_code_in_bb (EXIT_BLOCK_PTR);
statistics_counter_event (cfun, "Sunk statements", sink_stats.sunk);
free_dominance_info (CDI_POST_DOMINATORS);
remove_fake_exit_edges ();
@@ -597,7 +597,7 @@ struct gimple_opt_pass pass_sink_code =
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_update_ssa
+ TODO_update_ssa
| TODO_dump_func
| TODO_ggc_collect
| TODO_verify_ssa /* todo_flags_finish */
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index e0a681db2f2..012dc1c9795 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -1444,10 +1444,10 @@ unify_nodes (constraint_graph_t graph, unsigned int to, unsigned int from,
changed_count++;
}
}
-
+
BITMAP_FREE (get_varinfo (from)->solution);
BITMAP_FREE (get_varinfo (from)->oldsolution);
-
+
if (stats.iterations > 0)
{
BITMAP_FREE (get_varinfo (to)->oldsolution);
@@ -2288,10 +2288,10 @@ unite_pointer_equivalences (constraint_graph_t graph)
if (label)
{
int label_rep = graph->pe_rep[label];
-
+
if (label_rep == -1)
continue;
-
+
label_rep = find (label_rep);
if (label_rep >= 0 && unite (label_rep, find (i)))
unify_nodes (graph, label_rep, i, false);
@@ -2368,7 +2368,7 @@ rewrite_constraints (constraint_graph_t graph,
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
-
+
fprintf (dump_file, "%s is a non-pointer variable,"
"ignoring constraint:",
get_varinfo (lhs.var)->name);
@@ -2382,7 +2382,7 @@ rewrite_constraints (constraint_graph_t graph,
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
-
+
fprintf (dump_file, "%s is a non-pointer variable,"
"ignoring constraint:",
get_varinfo (rhs.var)->name);
@@ -4752,7 +4752,7 @@ shared_bitmap_add (bitmap pt_vars)
/* Set bits in INTO corresponding to the variable uids in solution set FROM. */
-static void
+static void
set_uids_in_ptset (bitmap into, bitmap from, struct pt_solution *pt)
{
unsigned int i;
@@ -5424,16 +5424,16 @@ solve_constraints (void)
"substitution\n");
init_graph (VEC_length (varinfo_t, varmap) * 2);
-
+
if (dump_file)
fprintf (dump_file, "Building predecessor graph\n");
build_pred_graph ();
-
+
if (dump_file)
fprintf (dump_file, "Detecting pointer and location "
"equivalences\n");
si = perform_var_substitution (graph);
-
+
if (dump_file)
fprintf (dump_file, "Rewriting constraints and unifying "
"variables\n");
diff --git a/gcc/tree-ssa-ter.c b/gcc/tree-ssa-ter.c
index c35d6336beb..902b1f07f36 100644
--- a/gcc/tree-ssa-ter.c
+++ b/gcc/tree-ssa-ter.c
@@ -36,20 +36,20 @@ along with GCC; see the file COPYING3. If not see
/* Temporary Expression Replacement (TER)
Replace SSA version variables during out-of-ssa with their defining
- expression if there is only one use of the variable.
+ expression if there is only one use of the variable.
This pass is required in order for the RTL expansion pass to see larger
chunks of code. This allows it to make better choices on RTL pattern
selection. When expand is rewritten and merged with out-of-ssa, and
- understands SSA, this should be eliminated.
+ understands SSA, this should be eliminated.
A pass is made through the function, one block at a time. No cross block
information is tracked.
Variables which only have one use, and whose defining stmt is considered
a replaceable expression (see is_replaceable_p) are tracked to see whether
- they can be replaced at their use location.
-
+ they can be replaced at their use location.
+
n_12 = C * 10
a_2 = b_5 + 6
v_9 = a_2 * n_12
@@ -64,16 +64,16 @@ along with GCC; see the file COPYING3. If not see
v = (b + 6) * (C * 10)
-
- This requires ensuring that none of the variables used by the expression
- change between the def point and where it is used. Furthermore, if any
- of the ssa_names used in this expression are themselves replaceable, we
- have to ensure none of that expressions' arguments change either.
- Although SSA_NAMES themselves don't change, this pass is performed after
- coalescing has coalesced different SSA_NAMES together, so there could be a
+
+ This requires ensuring that none of the variables used by the expression
+ change between the def point and where it is used. Furthermore, if any
+ of the ssa_names used in this expression are themselves replaceable, we
+ have to ensure none of that expressions' arguments change either.
+ Although SSA_NAMES themselves don't change, this pass is performed after
+ coalescing has coalesced different SSA_NAMES together, so there could be a
definition of an SSA_NAME which is coalesced with a use that causes a
problem, i.e.,
-
+
PHI b_5 = <b_8(2), b_14(1)>
<...>
a_2 = b_5 + 6
@@ -85,7 +85,7 @@ along with GCC; see the file COPYING3. If not see
The expression b_5 + 6 CANNOT replace the use in the statement defining v_9
because b_8 is in fact killing the value of b_5 since they share a partition
and will be assigned the same memory or register location.
-
+
TER implements this but stepping through the instructions in a block and
tracking potential expressions for replacement, and the partitions they are
dependent on. Expressions are represented by the SSA_NAME_VERSION of the
@@ -94,8 +94,8 @@ along with GCC; see the file COPYING3. If not see
When a stmt is determined to be a possible replacement expression, the
following steps are taken:
- EXPR_DECL_UID bitmap is allocated and set to the base variable UID of the
- def and any uses in the expression. non-NULL means the expression is being
+ EXPR_DECL_UID bitmap is allocated and set to the base variable UID of the
+ def and any uses in the expression. non-NULL means the expression is being
tracked. The UID's themselves are used to prevent TER substitution into
accumulating sequences, i.e.,
@@ -104,9 +104,9 @@ along with GCC; see the file COPYING3. If not see
x = x + w
etc.
this can result in very large expressions which don't accomplish anything
- see PR tree-optimization/17549.
+ see PR tree-optimization/17549.
- PARTITION_DEPENDENCIES is another bitmap array, and it has a bit set for any
+ PARTITION_DEPENDENCIES is another bitmap array, and it has a bit set for any
partition which is used in the expression. This is primarily used to remove
an expression from the partition kill lists when a decision is made whether
to replace it or not. This is indexed by ssa version number as well, and
@@ -114,18 +114,18 @@ along with GCC; see the file COPYING3. If not see
but they are summarized by an artificial partition called VIRTUAL_PARTITION.
This means a MAY or MUST def will kill *ALL* expressions that are dependent
on a virtual operand.
- Note that the EXPR_DECL_UID and this bitmap represent very similar
+ Note that the EXPR_DECL_UID and this bitmap represent very similar
information, but the info in one is not easy to obtain from the other.
KILL_LIST is yet another bitmap array, this time it is indexed by partition
- number, and represents a list of active expressions which will will no
+ number, and represents a list of active expressions which will will no
longer be valid if a definition into this partition takes place.
PARTITION_IN_USE is simply a bitmap which is used to track which partitions
- currently have something in their kill list. This is used at the end of
+ currently have something in their kill list. This is used at the end of
a block to clear out the KILL_LIST bitmaps at the end of each block.
- NEW_REPLACEABLE_DEPENDENCIES is used as a temporary place to store
+ NEW_REPLACEABLE_DEPENDENCIES is used as a temporary place to store
dependencies which will be reused by the current definition. All the uses
on an expression are processed before anything else is done. If a use is
determined to be a replaceable expression AND the current stmt is also going
@@ -137,7 +137,7 @@ along with GCC; see the file COPYING3. If not see
a_2 = b_5 + 6
v_8 = a_2 + c_4
- a_2's expression 'b_5 + 6' is determined to be replaceable at the use
+ a_2's expression 'b_5 + 6' is determined to be replaceable at the use
location. It is dependent on the partition 'b_5' is in. This is cached into
the NEW_REPLACEABLE_DEPENDENCIES bitmap, and when v_8 is examined for
replaceability, it is a candidate, and it is dependent on the partition
@@ -148,14 +148,14 @@ along with GCC; see the file COPYING3. If not see
x_9 = v_8 * 5
x_9 is dependent on partitions b_5, and c_4
-
- if a statement is found which has either of those partitions written to
+
+ if a statement is found which has either of those partitions written to
before x_9 is used, then x_9 itself is NOT replaceable. */
/* Temporary Expression Replacement (TER) table information. */
-typedef struct temp_expr_table_d
+typedef struct temp_expr_table_d
{
var_map map;
bitmap *partition_dependencies; /* Partitions expr is dependent on. */
@@ -213,7 +213,7 @@ new_temp_expr_table (var_map map)
}
-/* Free TER table T. If there are valid replacements, return the expression
+/* Free TER table T. If there are valid replacements, return the expression
vector. */
static bitmap
@@ -259,7 +259,7 @@ version_to_be_replaced_p (temp_expr_table_p tab, int version)
}
-/* Add partition P to the list if partitions VERSION is dependent on. TAB is
+/* Add partition P to the list if partitions VERSION is dependent on. TAB is
the expression table */
static inline void
@@ -286,10 +286,10 @@ add_to_partition_kill_list (temp_expr_table_p tab, int p, int ver)
}
-/* Remove VER from the partition kill list for P. TAB is the expression
+/* Remove VER from the partition kill list for P. TAB is the expression
table. */
-static inline void
+static inline void
remove_from_partition_kill_list (temp_expr_table_p tab, int p, int version)
{
#ifdef ENABLE_CHECKING
@@ -304,8 +304,8 @@ remove_from_partition_kill_list (temp_expr_table_p tab, int p, int version)
}
-/* Add a dependency between the def of ssa VERSION and VAR. If VAR is
- replaceable by an expression, add a dependence each of the elements of the
+/* Add a dependency between the def of ssa VERSION and VAR. If VAR is
+ replaceable by an expression, add a dependence each of the elements of the
expression. These are contained in the new_replaceable list. TAB is the
expression table. */
@@ -321,18 +321,18 @@ add_dependence (temp_expr_table_p tab, int version, tree var)
{
if (!bitmap_empty_p (tab->new_replaceable_dependencies))
{
- /* Version will now be killed by a write to any partition the
+ /* Version will now be killed by a write to any partition the
substituted expression would have been killed by. */
EXECUTE_IF_SET_IN_BITMAP (tab->new_replaceable_dependencies, 0, x, bi)
add_to_partition_kill_list (tab, x, version);
- /* Rather than set partition_dependencies and in_use lists bit by
+ /* Rather than set partition_dependencies and in_use lists bit by
bit, simply OR in the new_replaceable_dependencies bits. */
if (!tab->partition_dependencies[version])
tab->partition_dependencies[version] = BITMAP_ALLOC (NULL);
- bitmap_ior_into (tab->partition_dependencies[version],
+ bitmap_ior_into (tab->partition_dependencies[version],
tab->new_replaceable_dependencies);
- bitmap_ior_into (tab->partition_in_use,
+ bitmap_ior_into (tab->partition_in_use,
tab->new_replaceable_dependencies);
/* It is only necessary to add these once. */
bitmap_clear (tab->new_replaceable_dependencies);
@@ -420,7 +420,7 @@ is_replaceable_p (gimple stmt)
return false;
/* Float expressions must go through memory if float-store is on. */
- if (flag_float_store
+ if (flag_float_store
&& FLOAT_TYPE_P (gimple_expr_type (stmt)))
return false;
@@ -442,8 +442,8 @@ is_replaceable_p (gimple stmt)
}
-/* This function will remove the expression for VERSION from replacement
- consideration in table TAB. If FREE_EXPR is true, then remove the
+/* This function will remove the expression for VERSION from replacement
+ consideration in table TAB. If FREE_EXPR is true, then remove the
expression from consideration as well by freeing the decl uid bitmap. */
static void
@@ -467,7 +467,7 @@ finished_with_expr (temp_expr_table_p tab, int version, bool free_expr)
/* Create an expression entry for a replaceable expression. */
-static void
+static void
process_replaceable (temp_expr_table_p tab, gimple stmt)
{
tree var, def, basevar;
@@ -520,7 +520,7 @@ kill_expr (temp_expr_table_p tab, int partition)
{
unsigned version;
- /* Mark every active expr dependent on this var as not replaceable.
+ /* Mark every active expr dependent on this var as not replaceable.
finished_with_expr can modify the bitmap, so we can't execute over it. */
while (tab->kill_list[partition])
{
@@ -534,7 +534,7 @@ kill_expr (temp_expr_table_p tab, int partition)
}
-/* This function kills all expressions in TAB which are dependent on virtual
+/* This function kills all expressions in TAB which are dependent on virtual
partitions. */
static inline void
@@ -555,7 +555,7 @@ mark_replaceable (temp_expr_table_p tab, tree var, bool more_replacing)
/* Move the dependence list to the pending listpending. */
if (more_replacing && tab->partition_dependencies[version])
- bitmap_ior_into (tab->new_replaceable_dependencies,
+ bitmap_ior_into (tab->new_replaceable_dependencies,
tab->partition_dependencies[version]);
finished_with_expr (tab, version, !more_replacing);
@@ -615,8 +615,8 @@ find_replaceable_in_bb (temp_expr_table_p tab, basic_block bb)
}
}
- /* Mark expression as replaceable unless stmt is volatile or the
- def variable has the same root variable as something in the
+ /* Mark expression as replaceable unless stmt is volatile or the
+ def variable has the same root variable as something in the
substitution list. */
if (gimple_has_volatile_ops (stmt) || same_root_var)
finished_with_expr (tab, ver, true);
@@ -624,7 +624,7 @@ find_replaceable_in_bb (temp_expr_table_p tab, basic_block bb)
mark_replaceable (tab, use, stmt_replaceable);
}
}
-
+
/* Next, see if this stmt kills off an active expression. */
FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
{
@@ -649,10 +649,10 @@ find_replaceable_in_bb (temp_expr_table_p tab, basic_block bb)
/* This function is the driver routine for replacement of temporary expressions
- in the SSA->normal phase, operating on MAP. If there are replaceable
- expressions, a table is returned which maps SSA versions to the
- expressions they should be replaced with. A NULL_TREE indicates no
- replacement should take place. If there are no replacements at all,
+ in the SSA->normal phase, operating on MAP. If there are replaceable
+ expressions, a table is returned which maps SSA versions to the
+ expressions they should be replaced with. A NULL_TREE indicates no
+ replacement should take place. If there are no replacements at all,
NULL is returned by the function, otherwise an expression vector indexed
by SSA_NAME version numbers. */
@@ -674,7 +674,7 @@ find_replaceable_exprs (var_map map)
ret = free_temp_expr_table (table);
return ret;
-}
+}
/* Dump TER expression table EXPR to file F. */
@@ -709,7 +709,7 @@ debug_ter (FILE *f, temp_expr_table_p t)
unsigned x, y;
bitmap_iterator bi;
- fprintf (f, "\nDumping current state of TER\n virtual partition = %d\n",
+ fprintf (f, "\nDumping current state of TER\n virtual partition = %d\n",
VIRTUAL_PARTITION (t));
if (t->replaceable_expressions)
dump_replaceable_exprs (f, t->replaceable_expressions);
@@ -720,7 +720,7 @@ debug_ter (FILE *f, temp_expr_table_p t)
{
print_generic_expr (stderr, ssa_name (x), TDF_SLIM);
fprintf (f, " dep-parts : ");
- if (t->partition_dependencies[x]
+ if (t->partition_dependencies[x]
&& !bitmap_empty_p (t->partition_dependencies[x]))
{
EXECUTE_IF_SET_IN_BITMAP (t->partition_dependencies[x], 0, y, bi)
@@ -732,7 +732,7 @@ debug_ter (FILE *f, temp_expr_table_p t)
fprintf (stderr, "\n");
}
- bitmap_print (f, t->partition_in_use, "Partitions in use ",
+ bitmap_print (f, t->partition_in_use, "Partitions in use ",
"\npartition KILL lists:\n");
for (x = 0; x <= num_var_partitions (t->map); x++)
diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
index 1bcf2bf1804..e57c18e064b 100644
--- a/gcc/tree-ssa-threadedge.c
+++ b/gcc/tree-ssa-threadedge.c
@@ -180,7 +180,7 @@ record_temporary_equivalence (tree x, tree y, VEC(tree, heap) **stack)
}
/* Record temporary equivalences created by PHIs at the target of the
- edge E. Record unwind information for the equivalences onto STACK.
+ edge E. Record unwind information for the equivalences onto STACK.
If a PHI which prevents threading is encountered, then return FALSE
indicating we should not thread this edge, else return TRUE. */
@@ -199,7 +199,7 @@ record_temporary_equivalences_from_phis (edge e, VEC(tree, heap) **stack)
tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
tree dst = gimple_phi_result (phi);
- /* If the desired argument is not the same as this PHI's result
+ /* If the desired argument is not the same as this PHI's result
and it is set by a PHI in E->dest, then we can not thread
through E->dest. */
if (src != dst
@@ -274,7 +274,7 @@ fold_assignment_stmt (gimple stmt)
Record unwind information for temporary equivalences onto STACK.
Use SIMPLIFY (a pointer to a callback function) to further simplify
- statements using pass specific information.
+ statements using pass specific information.
We might consider marking just those statements which ultimately
feed the COND_EXPR. It's not clear if the overhead of bookkeeping
@@ -372,7 +372,7 @@ record_temporary_equivalences_from_stmts_at_dest (edge e,
/* At this point we have a statement which assigns an RHS to an
SSA_VAR on the LHS. We want to try and simplify this statement
to expose more context sensitive equivalences which in turn may
- allow us to simplify the condition at the end of the loop.
+ allow us to simplify the condition at the end of the loop.
Handle simple copy operations as well as implied copies from
ASSERT_EXPRs. */
@@ -420,7 +420,7 @@ record_temporary_equivalences_from_stmts_at_dest (edge e,
|| (TREE_CODE (cached_lhs) != SSA_NAME
&& !is_gimple_min_invariant (cached_lhs)))
cached_lhs = (*simplify) (stmt, stmt);
-
+
/* Restore the statement's original uses/defs. */
i = 0;
FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
@@ -579,7 +579,7 @@ simplify_control_stmt_condition (edge e,
}
/* We are exiting E->src, see if E->dest ends with a conditional
- jump which has a known value when reached via E.
+ jump which has a known value when reached via E.
Special care is necessary if E is a back edge in the CFG as we
may have already recorded equivalences for E->dest into our
@@ -592,14 +592,14 @@ simplify_control_stmt_condition (edge e,
end with a conditional which is either always true or always
false when reached via the loop backedge. Thus we do not want
to blindly disable threading across a loop backedge.
-
+
DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
to avoid allocating memory.
-
+
HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
the simplified condition with left-hand sides of ASSERT_EXPRs they are
used in.
-
+
STACK is used to undo temporary equivalences created during the walk of
E->dest.
@@ -634,7 +634,7 @@ thread_across_edge (gimple dummy_cond,
goto fail;
}
}
-
+
stmt_count = 0;
/* PHIs create temporary equivalences. */
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index 62524bb1460..db29b3b0151 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -481,7 +481,7 @@ redirection_block_p (basic_block bb)
|| is_gimple_debug (gsi_stmt (gsi))
|| gimple_nop_p (gsi_stmt (gsi))))
gsi_next (&gsi);
-
+
/* Check if this is an empty block. */
if (gsi_end_p (gsi))
return true;
@@ -909,7 +909,7 @@ thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
else
tgt_bb = split_edge (tgt_edge);
}
-
+
if (latch->aux)
{
/* First handle the case latch edge is redirected. */
@@ -951,7 +951,7 @@ thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
loop->header = latch->dest;
loop->latch = latch->src;
}
-
+
return true;
fail:
diff --git a/gcc/tree-ssa-uncprop.c b/gcc/tree-ssa-uncprop.c
index 1efea61bae0..8e7715431f4 100644
--- a/gcc/tree-ssa-uncprop.c
+++ b/gcc/tree-ssa-uncprop.c
@@ -53,7 +53,7 @@ struct edge_equivalency
in the CFG.
When complete, each edge that creates an equivalency will have an
- EDGE_EQUIVALENCY structure hanging off the edge's AUX field.
+ EDGE_EQUIVALENCY structure hanging off the edge's AUX field.
The caller is responsible for freeing the AUX fields. */
static void
@@ -157,7 +157,7 @@ associate_equivalences_with_edges (void)
equivalency->rhs = op1;
if (code == EQ_EXPR)
true_edge->aux = equivalency;
- else
+ else
false_edge->aux = equivalency;
}
@@ -358,7 +358,7 @@ record_equiv (tree value, tree equivalence)
free (equiv_hash_elt);
equiv_hash_elt = (struct equiv_hash_elt *) *slot;
-
+
VEC_safe_push (tree, heap, equiv_hash_elt->equivalences, equivalence);
}
@@ -585,7 +585,7 @@ gate_uncprop (void)
return flag_tree_dom != 0;
}
-struct gimple_opt_pass pass_uncprop =
+struct gimple_opt_pass pass_uncprop =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c
index 7fe94ee944d..b5d60b305af 100644
--- a/gcc/tree-ssa.c
+++ b/gcc/tree-ssa.c
@@ -725,7 +725,7 @@ verify_use (basic_block bb, basic_block def_bb, use_operand_p use_p,
err = true;
}
- /* Make sure the use is in an appropriate list by checking the previous
+ /* Make sure the use is in an appropriate list by checking the previous
element to make sure it's the same. */
if (use_p->prev == NULL)
{
@@ -1044,7 +1044,7 @@ verify_ssa (bool check_modified_stmt)
free_dominance_info (CDI_DOMINATORS);
else
set_dom_info_availability (CDI_DOMINATORS, orig_dom_state);
-
+
BITMAP_FREE (names_defined_in_bb);
timevar_pop (TV_TREE_SSA_VERIFY);
return;
@@ -1114,9 +1114,9 @@ void
init_tree_ssa (struct function *fn)
{
fn->gimple_df = GGC_CNEW (struct gimple_df);
- fn->gimple_df->referenced_vars = htab_create_ggc (20, uid_decl_map_hash,
+ fn->gimple_df->referenced_vars = htab_create_ggc (20, uid_decl_map_hash,
uid_decl_map_eq, NULL);
- fn->gimple_df->default_defs = htab_create_ggc (20, uid_ssaname_map_hash,
+ fn->gimple_df->default_defs = htab_create_ggc (20, uid_ssaname_map_hash,
uid_ssaname_map_eq, NULL);
pt_solution_reset (&fn->gimple_df->escaped);
pt_solution_reset (&fn->gimple_df->callused);
@@ -1444,7 +1444,7 @@ useless_type_conversion_p (tree outer_type, tree inner_type)
else if (AGGREGATE_TYPE_P (inner_type)
&& TREE_CODE (inner_type) == TREE_CODE (outer_type))
return false;
-
+
return false;
}
@@ -1494,7 +1494,7 @@ tree_ssa_strip_useless_type_conversions (tree exp)
/* Internal helper for walk_use_def_chains. VAR, FN and DATA are as
described in walk_use_def_chains.
-
+
VISITED is a pointer set used to mark visited SSA_NAMEs to avoid
infinite loops. We used to have a bitmap for this to just mark
SSA versions we had visited. But non-sparse bitmaps are way too
@@ -1552,10 +1552,10 @@ walk_use_def_chains_1 (tree var, walk_use_def_chains_fn fn, void *data,
if (fn (gimple_phi_arg_def (def_stmt, i), def_stmt, data))
return true;
}
-
+
return false;
}
-
+
/* Walk use-def chains starting at the SSA variable VAR. Call
@@ -1563,7 +1563,7 @@ walk_use_def_chains_1 (tree var, walk_use_def_chains_fn fn, void *data,
arguments: VAR, its defining statement (DEF_STMT) and a generic
pointer to whatever state information that FN may want to maintain
(DATA). FN is able to stop the walk by returning true, otherwise
- in order to continue the walk, FN should return false.
+ in order to continue the walk, FN should return false.
Note, that if DEF_STMT is a PHI node, the semantics are slightly
different. The first argument to FN is no longer the original
@@ -1657,7 +1657,7 @@ warn_uninit (tree t, const char *gmsgid, void *data)
/* Do not warn if it can be initialized outside this module. */
if (is_global_var (var))
return;
-
+
location = (context != NULL && gimple_has_location (context))
? gimple_location (context)
: DECL_SOURCE_LOCATION (var);
@@ -1714,7 +1714,7 @@ warn_uninitialized_var (tree *tp, int *walk_subtrees, void *data_)
use_operand_p vuse;
tree op;
- /* If there is not gimple stmt,
+ /* If there is not gimple stmt,
or alias information has not been computed,
then we cannot check VUSE ops. */
if (data->stmt == NULL)
@@ -1729,7 +1729,7 @@ warn_uninitialized_var (tree *tp, int *walk_subtrees, void *data_)
return NULL_TREE;
op = USE_FROM_PTR (vuse);
- if (t != SSA_NAME_VAR (op)
+ if (t != SSA_NAME_VAR (op)
|| !SSA_NAME_IS_DEFAULT_DEF (op))
return NULL_TREE;
/* If this is a VUSE of t and it is the default definition,
@@ -1928,7 +1928,7 @@ execute_update_addresses_taken (bool do_optimize)
if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL)
{
tree lhs = gimple_get_lhs (stmt);
-
+
/* We may not rewrite TMR_SYMBOL to SSA. */
if (lhs && TREE_CODE (lhs) == TARGET_MEM_REF
&& TMR_SYMBOL (lhs))
diff --git a/gcc/tree-ssanames.c b/gcc/tree-ssanames.c
index bb0880260ff..46fa42029ce 100644
--- a/gcc/tree-ssanames.c
+++ b/gcc/tree-ssanames.c
@@ -3,17 +3,17 @@
Free Software Foundation, Inc.
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
-
+
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -30,7 +30,7 @@ along with GCC; see the file COPYING3. If not see
/* Rewriting a function into SSA form can create a huge number of SSA_NAMEs,
many of which may be thrown away shortly after their creation if jumps
- were threaded through PHI nodes.
+ were threaded through PHI nodes.
While our garbage collection mechanisms will handle this situation, it
is extremely wasteful to create nodes and throw them away, especially
@@ -44,7 +44,7 @@ along with GCC; see the file COPYING3. If not see
Right now we maintain our free list on a per-function basis. It may
or may not make sense to maintain the free list for the duration of
- a compilation unit.
+ a compilation unit.
External code should rely solely upon HIGHEST_SSA_VERSION and the
externally defined functions. External code should not know about
@@ -166,7 +166,7 @@ make_ssa_name_fn (struct function *fn, tree var, gimple stmt)
/* We no longer need the SSA_NAME expression VAR, release it so that
- it may be reused.
+ it may be reused.
Note it is assumed that no calls to make_ssa_name will be made
until all uses of the ssa name are released and that the only
@@ -195,7 +195,7 @@ release_ssa_name (tree var)
/* release_ssa_name can be called multiple times on a single SSA_NAME.
However, it should only end up on our free list one time. We
keep a status bit in the SSA_NAME node itself to indicate it has
- been put on the free list.
+ been put on the free list.
Note that once on the freelist you can not reference the SSA_NAME's
defining statement. */
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index 3409ef83259..1fd9094c866 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -443,7 +443,7 @@ build_constructors (gimple swtch)
tree low = CASE_LOW (cs);
pos = CASE_LOW (cs);
- do
+ do
{
constructor_elt *elt;
@@ -473,7 +473,7 @@ constructor_contains_same_values_p (VEC (constructor_elt, gc) *vec)
for (i = 0; i < len; i++)
{
constructor_elt *elt = VEC_index (constructor_elt, vec, i);
-
+
if (!prev)
prev = elt->value;
else if (!operand_equal_p (elt->value, prev, OEP_ONLY_CONST))
diff --git a/gcc/tree-tailcall.c b/gcc/tree-tailcall.c
index c29bfc304c5..de2a45e949c 100644
--- a/gcc/tree-tailcall.c
+++ b/gcc/tree-tailcall.c
@@ -65,7 +65,7 @@ along with GCC; see the file COPYING3. If not see
return acc;
}
- To do this, we maintain two accumulators (a_acc and m_acc) that indicate
+ To do this, we maintain two accumulators (a_acc and m_acc) that indicate
when we reach the return x statement, we should return a_acc + x * m_acc
instead. They are initially initialized to 0 and 1, respectively,
so the semantics of the function is obviously preserved. If we are
@@ -79,12 +79,12 @@ along with GCC; see the file COPYING3. If not see
1) Just return x, where x is not in any of the remaining special shapes.
We rewrite this to a gimple equivalent of return m_acc * x + a_acc.
-
+
2) return f (...), where f is the current function, is rewritten in a
classical tail-recursion elimination way, into assignment of arguments
and jump to the start of the function. Values of the accumulators
are unchanged.
-
+
3) return a + m * f(...), where a and m do not depend on call to f.
To preserve the semantics described before we want this to be rewritten
in such a way that we finally return
@@ -211,7 +211,7 @@ independent_of_stmt_p (tree expr, gimple at, gimple_stmt_iterator gsi)
bb->aux = &bb->aux;
while (1)
- {
+ {
at = SSA_NAME_DEF_STMT (expr);
bb = gimple_bb (at);
@@ -270,7 +270,7 @@ process_assignment (gimple stmt, gimple_stmt_iterator call, tree *m,
enum tree_code code = gimple_assign_rhs_code (stmt);
enum gimple_rhs_class rhs_class = get_gimple_rhs_class (code);
tree src_var = gimple_assign_rhs1 (stmt);
-
+
/* See if this is a simple copy operation of an SSA name to the function
result. In that case we may have a simple tail call. Ignore type
conversions that can never produce extra code between the function
@@ -350,7 +350,7 @@ propagate_through_phis (tree var, edge e)
{
basic_block dest = e->dest;
gimple_stmt_iterator gsi;
-
+
for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
@@ -411,7 +411,7 @@ find_tail_calls (basic_block bb, struct tailcall **ret)
return;
}
- /* If the LHS of our call is not just a simple register, we can't
+ /* If the LHS of our call is not just a simple register, we can't
transform this into a tail or sibling call. This situation happens,
in (e.g.) "*p = foo()" where foo returns a struct. In this case
we won't have a temporary here, but we need to carry out the side
@@ -569,7 +569,7 @@ add_successor_phi_arg (edge e, tree var, tree phi_arg)
tree node of the statement's result. */
static tree
-adjust_return_value_with_ops (enum tree_code code, const char *label,
+adjust_return_value_with_ops (enum tree_code code, const char *label,
tree op0, tree op1, gimple_stmt_iterator gsi,
enum gsi_iterator_update update)
{
@@ -590,7 +590,7 @@ adjust_return_value_with_ops (enum tree_code code, const char *label,
return result;
}
-/* Creates a new GIMPLE statement that adjusts the value of accumulator ACC by
+/* Creates a new GIMPLE statement that adjusts the value of accumulator ACC by
the computation specified by CODE and OP1 and insert the statement
at the position specified by GSI as a new statement. Returns new SSA name
of updated accumulator. */
@@ -708,7 +708,7 @@ arg_needs_copy_p (tree param)
if (!is_gimple_reg (param) || !var_ann (param))
return false;
-
+
/* Parameters that are only defined but never used need not be copied. */
def = gimple_default_def (cfun, param);
if (!def)
@@ -893,7 +893,7 @@ create_tailcall_accumulator (const char *label, basic_block bb, tree init)
UNKNOWN_LOCATION);
return PHI_RESULT (phi);
}
-
+
/* Optimizes tail calls in the function, turning the tail recursion
into iteration. */
@@ -953,7 +953,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls)
set_default_def (param, new_name);
phi = create_phi_node (name, first);
SSA_NAME_DEF_STMT (name) = phi;
- add_phi_arg (phi, new_name, single_pred_edge (first),
+ add_phi_arg (phi, new_name, single_pred_edge (first),
EXPR_LOCATION (param));
}
phis_constructed = true;
@@ -1016,7 +1016,7 @@ execute_tail_calls (void)
return tree_optimize_tail_calls_1 (true);
}
-struct gimple_opt_pass pass_tail_recursion =
+struct gimple_opt_pass pass_tail_recursion =
{
{
GIMPLE_PASS,
@@ -1035,7 +1035,7 @@ struct gimple_opt_pass pass_tail_recursion =
}
};
-struct gimple_opt_pass pass_tail_calls =
+struct gimple_opt_pass pass_tail_calls =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index c13c2750270..abc848537b0 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -1,7 +1,7 @@
-/* Data References Analysis and Manipulation Utilities for Vectorization.
+/* Data References Analysis and Manipulation Utilities for Vectorization.
Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
Foundation, Inc.
- Contributed by Dorit Naishlos <dorit@il.ibm.com>
+ Contributed by Dorit Naishlos <dorit@il.ibm.com>
and Ira Rosen <irar@il.ibm.com>
This file is part of GCC.
@@ -41,19 +41,19 @@ along with GCC; see the file COPYING3. If not see
/* Return the smallest scalar part of STMT.
- This is used to determine the vectype of the stmt. We generally set the
- vectype according to the type of the result (lhs). For stmts whose
+ This is used to determine the vectype of the stmt. We generally set the
+ vectype according to the type of the result (lhs). For stmts whose
result-type is different than the type of the arguments (e.g., demotion,
- promotion), vectype will be reset appropriately (later). Note that we have
+ promotion), vectype will be reset appropriately (later). Note that we have
to visit the smallest datatype in this function, because that determines the
- VF. If the smallest datatype in the loop is present only as the rhs of a
+ VF. If the smallest datatype in the loop is present only as the rhs of a
promotion operation - we'd miss it.
Such a case, where a variable of this datatype does not appear in the lhs
anywhere in the loop, can only occur if it's an invariant: e.g.:
- 'int_x = (int) short_inv', which we'd expect to have been optimized away by
+ 'int_x = (int) short_inv', which we'd expect to have been optimized away by
invariant motion. However, we cannot rely on invariant motion to always take
invariants out of the loop, and so in the case of promotion we also have to
- check the rhs.
+ check the rhs.
LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
types. */
@@ -77,8 +77,8 @@ vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
if (rhs < lhs)
scalar_type = rhs_type;
}
-
- *lhs_size_unit = lhs;
+
+ *lhs_size_unit = lhs;
*rhs_size_unit = rhs;
return scalar_type;
}
@@ -87,7 +87,7 @@ vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
/* Find the place of the data-ref in STMT in the interleaving chain that starts
from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
-int
+int
vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
{
gimple next_stmt = first_stmt;
@@ -119,11 +119,11 @@ vect_insert_into_interleaving_chain (struct data_reference *dra,
{
gimple prev, next;
tree next_init;
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
+ stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
prev = DR_GROUP_FIRST_DR (stmtinfo_b);
- next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
+ next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
while (next)
{
next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
@@ -145,8 +145,8 @@ vect_insert_into_interleaving_chain (struct data_reference *dra,
/* Function vect_update_interleaving_chain.
-
- For two data-refs DRA and DRB that are a part of a chain interleaved data
+
+ For two data-refs DRA and DRB that are a part of a chain interleaved data
accesses, update the interleaving chain. DRB's INIT is smaller than DRA's.
There are four possible cases:
@@ -171,7 +171,7 @@ static void
vect_update_interleaving_chain (struct data_reference *drb,
struct data_reference *dra)
{
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
+ stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
tree next_init, init_dra_chain, init_drb_chain;
gimple first_a, first_b;
@@ -196,7 +196,7 @@ vect_update_interleaving_chain (struct data_reference *drb,
return;
}
- /* 3. DRA is a part of a chain and DRB is not. */
+ /* 3. DRA is a part of a chain and DRB is not. */
if (DR_GROUP_FIRST_DR (stmtinfo_a) && !DR_GROUP_FIRST_DR (stmtinfo_b))
{
gimple old_first_stmt = DR_GROUP_FIRST_DR (stmtinfo_a);
@@ -206,12 +206,12 @@ vect_update_interleaving_chain (struct data_reference *drb,
if (tree_int_cst_compare (init_old, DR_INIT (drb)) > 0)
{
- /* DRB's init is smaller than the init of the stmt previously marked
- as the first stmt of the interleaving chain of DRA. Therefore, we
+ /* DRB's init is smaller than the init of the stmt previously marked
+ as the first stmt of the interleaving chain of DRA. Therefore, we
update FIRST_STMT and put DRB in the head of the list. */
DR_GROUP_FIRST_DR (stmtinfo_b) = DR_STMT (drb);
DR_GROUP_NEXT_DR (stmtinfo_b) = old_first_stmt;
-
+
/* Update all the stmts in the list to point to the new FIRST_STMT. */
tmp = old_first_stmt;
while (tmp)
@@ -224,11 +224,11 @@ vect_update_interleaving_chain (struct data_reference *drb,
{
/* Insert DRB in the list of DRA. */
vect_insert_into_interleaving_chain (drb, dra);
- DR_GROUP_FIRST_DR (stmtinfo_b) = DR_GROUP_FIRST_DR (stmtinfo_a);
+ DR_GROUP_FIRST_DR (stmtinfo_b) = DR_GROUP_FIRST_DR (stmtinfo_a);
}
return;
}
-
+
/* 4. both DRA and DRB are in some interleaving chains. */
first_a = DR_GROUP_FIRST_DR (stmtinfo_a);
first_b = DR_GROUP_FIRST_DR (stmtinfo_b);
@@ -239,29 +239,29 @@ vect_update_interleaving_chain (struct data_reference *drb,
if (tree_int_cst_compare (init_dra_chain, init_drb_chain) > 0)
{
- /* Insert the nodes of DRA chain into the DRB chain.
+ /* Insert the nodes of DRA chain into the DRB chain.
After inserting a node, continue from this node of the DRB chain (don't
start from the beginning. */
node = DR_GROUP_FIRST_DR (stmtinfo_a);
- prev = DR_GROUP_FIRST_DR (stmtinfo_b);
+ prev = DR_GROUP_FIRST_DR (stmtinfo_b);
first_stmt = first_b;
}
else
{
- /* Insert the nodes of DRB chain into the DRA chain.
+ /* Insert the nodes of DRB chain into the DRA chain.
After inserting a node, continue from this node of the DRA chain (don't
start from the beginning. */
node = DR_GROUP_FIRST_DR (stmtinfo_b);
- prev = DR_GROUP_FIRST_DR (stmtinfo_a);
+ prev = DR_GROUP_FIRST_DR (stmtinfo_a);
first_stmt = first_a;
}
-
+
while (node)
{
node_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node)));
- next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
+ next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
while (next)
- {
+ {
next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
if (tree_int_cst_compare (next_init, node_init) > 0)
{
@@ -280,9 +280,9 @@ vect_update_interleaving_chain (struct data_reference *drb,
DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = node;
DR_GROUP_NEXT_DR (vinfo_for_stmt (node)) = NULL;
prev = node;
- }
+ }
DR_GROUP_FIRST_DR (vinfo_for_stmt (node)) = first_stmt;
- node = DR_GROUP_NEXT_DR (vinfo_for_stmt (node));
+ node = DR_GROUP_NEXT_DR (vinfo_for_stmt (node));
}
}
@@ -304,12 +304,12 @@ vect_equal_offsets (tree offset1, tree offset2)
if (TREE_CODE (offset1) != TREE_CODE (offset2)
|| !BINARY_CLASS_P (offset1)
- || !BINARY_CLASS_P (offset2))
+ || !BINARY_CLASS_P (offset2))
return false;
-
- res0 = vect_equal_offsets (TREE_OPERAND (offset1, 0),
+
+ res0 = vect_equal_offsets (TREE_OPERAND (offset1, 0),
TREE_OPERAND (offset2, 0));
- res1 = vect_equal_offsets (TREE_OPERAND (offset1, 1),
+ res1 = vect_equal_offsets (TREE_OPERAND (offset1, 1),
TREE_OPERAND (offset2, 1));
return (res0 && res1);
@@ -321,7 +321,7 @@ vect_equal_offsets (tree offset1, tree offset2)
Check if DRA and DRB are a part of interleaving. In case they are, insert
DRA and DRB in an interleaving chain. */
-static bool
+static bool
vect_check_interleaving (struct data_reference *dra,
struct data_reference *drb)
{
@@ -330,26 +330,26 @@ vect_check_interleaving (struct data_reference *dra,
/* Check that the data-refs have same first location (except init) and they
are both either store or load (not load and store). */
if ((DR_BASE_ADDRESS (dra) != DR_BASE_ADDRESS (drb)
- && (TREE_CODE (DR_BASE_ADDRESS (dra)) != ADDR_EXPR
+ && (TREE_CODE (DR_BASE_ADDRESS (dra)) != ADDR_EXPR
|| TREE_CODE (DR_BASE_ADDRESS (drb)) != ADDR_EXPR
- || TREE_OPERAND (DR_BASE_ADDRESS (dra), 0)
+ || TREE_OPERAND (DR_BASE_ADDRESS (dra), 0)
!= TREE_OPERAND (DR_BASE_ADDRESS (drb),0)))
|| !vect_equal_offsets (DR_OFFSET (dra), DR_OFFSET (drb))
- || !tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb))
+ || !tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb))
|| DR_IS_READ (dra) != DR_IS_READ (drb))
return false;
/* Check:
1. data-refs are of the same type
2. their steps are equal
- 3. the step (if greater than zero) is greater than the difference between
+ 3. the step (if greater than zero) is greater than the difference between
data-refs' inits. */
type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
if (type_size_a != type_size_b
|| tree_int_cst_compare (DR_STEP (dra), DR_STEP (drb))
- || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
+ || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
TREE_TYPE (DR_REF (drb))))
return false;
@@ -359,16 +359,16 @@ vect_check_interleaving (struct data_reference *dra,
if (init_a > init_b)
{
- /* If init_a == init_b + the size of the type * k, we have an interleaving,
+ /* If init_a == init_b + the size of the type * k, we have an interleaving,
and DRB is accessed before DRA. */
diff_mod_size = (init_a - init_b) % type_size_a;
if (step && (init_a - init_b) > step)
- return false;
+ return false;
if (diff_mod_size == 0)
{
- vect_update_interleaving_chain (drb, dra);
+ vect_update_interleaving_chain (drb, dra);
if (vect_print_dump_info (REPORT_DR_DETAILS))
{
fprintf (vect_dump, "Detected interleaving ");
@@ -377,11 +377,11 @@ vect_check_interleaving (struct data_reference *dra,
print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
}
return true;
- }
+ }
}
- else
+ else
{
- /* If init_b == init_a + the size of the type * k, we have an
+ /* If init_b == init_a + the size of the type * k, we have an
interleaving, and DRA is accessed before DRB. */
diff_mod_size = (init_b - init_a) % type_size_a;
@@ -390,7 +390,7 @@ vect_check_interleaving (struct data_reference *dra,
if (diff_mod_size == 0)
{
- vect_update_interleaving_chain (dra, drb);
+ vect_update_interleaving_chain (dra, drb);
if (vect_print_dump_info (REPORT_DR_DETAILS))
{
fprintf (vect_dump, "Detected interleaving ");
@@ -399,9 +399,9 @@ vect_check_interleaving (struct data_reference *dra,
print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
}
return true;
- }
+ }
}
-
+
return false;
}
@@ -485,7 +485,7 @@ vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
Return TRUE if there (might) exist a dependence between a memory-reference
DRA and a memory-reference DRB. When versioning for alias may check a
dependence at run-time, return FALSE. */
-
+
static bool
vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
loop_vec_info loop_vinfo)
@@ -495,13 +495,13 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
int vectorization_factor = 0;
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
- stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
+ stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
lambda_vector dist_v;
unsigned int loop_depth;
-
+
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
{
/* Independent data accesses. */
@@ -517,10 +517,10 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
if ((DR_IS_READ (dra) && DR_IS_READ (drb) && loop_vinfo) || dra == drb)
return false;
-
+
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
{
- if (loop_vinfo)
+ if (loop_vinfo)
{
if (vect_print_dump_info (REPORT_DR_DETAILS))
{
@@ -530,7 +530,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
fprintf (vect_dump, " and ");
print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
}
-
+
/* Add to list of ddrs that need to be tested at run-time. */
return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
}
@@ -552,13 +552,13 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
}
/* Versioning for alias is not yet supported for basic block SLP, and
- dependence distance is unapplicable, hence, in case of known data
+ dependence distance is unapplicable, hence, in case of known data
dependence, basic block vectorization is impossible for now. */
if (!loop_vinfo)
{
if (dra != drb && vect_check_interleaving (dra, drb))
return false;
-
+
if (vect_print_dump_info (REPORT_DR_DETAILS))
{
fprintf (vect_dump, "determined dependence between ");
@@ -567,7 +567,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
}
- return true;
+ return true;
}
/* Loop-based vectorization and known data dependence. */
@@ -582,7 +582,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
}
/* Add to list of ddrs that need to be tested at run-time. */
return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
- }
+ }
loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
for (i = 0; VEC_iterate (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v); i++)
@@ -609,7 +609,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
}
/* For interleaving, mark that there is a read-write dependency if
- necessary. We check before that one of the data-refs is store. */
+ necessary. We check before that one of the data-refs is store. */
if (DR_IS_READ (dra))
DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a) = true;
else
@@ -617,15 +617,15 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
if (DR_IS_READ (drb))
DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b) = true;
}
-
+
continue;
}
- if (abs (dist) >= vectorization_factor
+ if (abs (dist) >= vectorization_factor
|| (dist > 0 && DDR_REVERSED_P (ddr)))
{
- /* Dependence distance does not create dependence, as far as
- vectorization is concerned, in this case. If DDR_REVERSED_P the
+ /* Dependence distance does not create dependence, as far as
+ vectorization is concerned, in this case. If DDR_REVERSED_P the
order of the data-refs in DDR was reversed (to make distance
vector positive), and the actual distance is negative. */
if (vect_print_dump_info (REPORT_DR_DETAILS))
@@ -649,26 +649,26 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
}
/* Function vect_analyze_data_ref_dependences.
-
+
Examine all the data references in the loop, and make sure there do not
exist any data dependences between them. */
-
+
bool
-vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
+vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
unsigned int i;
VEC (ddr_p, heap) *ddrs = NULL;
struct data_dependence_relation *ddr;
- if (vect_print_dump_info (REPORT_DETAILS))
+ if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vect_analyze_dependences ===");
-
+
if (loop_vinfo)
ddrs = LOOP_VINFO_DDRS (loop_vinfo);
else
ddrs = BB_VINFO_DDRS (bb_vinfo);
-
+
for (i = 0; VEC_iterate (ddr_p, ddrs, i, ddr); i++)
if (vect_analyze_data_ref_dependence (ddr, loop_vinfo))
return false;
@@ -693,7 +693,7 @@ static bool
vect_compute_data_ref_alignment (struct data_reference *dr)
{
gimple stmt = DR_STMT (dr);
- stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
tree ref = DR_REF (dr);
@@ -702,13 +702,13 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
bool base_aligned;
tree misalign;
tree aligned_to, alignment;
-
+
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "vect_compute_data_ref_alignment:");
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
-
+
/* Initialize misalignment to unknown. */
SET_DR_MISALIGNMENT (dr, -1);
@@ -727,7 +727,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
{
tree step = DR_STEP (dr);
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
-
+
if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
{
if (vect_print_dump_info (REPORT_ALIGNMENT))
@@ -758,7 +758,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
return true;
}
- if ((DECL_P (base)
+ if ((DECL_P (base)
&& tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base)),
alignment) >= 0)
|| (TREE_CODE (base_addr) == SSA_NAME
@@ -767,11 +767,11 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
alignment) >= 0))
base_aligned = true;
else
- base_aligned = false;
+ base_aligned = false;
- if (!base_aligned)
+ if (!base_aligned)
{
- /* Do not change the alignment of global variables if
+ /* Do not change the alignment of global variables if
flag_section_anchors is enabled. */
if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype))
|| (TREE_STATIC (base) && flag_section_anchors))
@@ -783,7 +783,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
}
return true;
}
-
+
/* Force the alignment of the decl.
NOTE: This is the only change to the code we make during
the analysis phase, before deciding to vectorize the loop. */
@@ -795,7 +795,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
/* At this point we assume that the base is aligned. */
gcc_assert (base_aligned
- || (TREE_CODE (base) == VAR_DECL
+ || (TREE_CODE (base) == VAR_DECL
&& DECL_ALIGN (base) >= TYPE_ALIGN (vectype)));
/* Modulo alignment. */
@@ -827,7 +827,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
Return FALSE if a data reference is found that cannot be vectorized. */
static bool
-vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
+vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
VEC (data_reference_p, heap) *datarefs;
@@ -838,7 +838,7 @@ vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
-
+
for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
if (!vect_compute_data_ref_alignment (dr))
return false;
@@ -939,10 +939,10 @@ vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
{
if (DR_IS_READ (dr))
- fprintf (vect_dump,
+ fprintf (vect_dump,
"not vectorized: unsupported unaligned load.");
else
- fprintf (vect_dump,
+ fprintf (vect_dump,
"not vectorized: unsupported unaligned store.");
}
return false;
@@ -990,7 +990,7 @@ vector_alignment_reachable_p (struct data_reference *dr)
only if natural alignment is reachable through peeling. */
if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
{
- HOST_WIDE_INT elmsize =
+ HOST_WIDE_INT elmsize =
int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
if (vect_print_dump_info (REPORT_DETAILS))
{
@@ -1164,7 +1164,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
+ How many accesses will become aligned due to the peeling
- How many accesses will become unaligned due to the peeling,
and the cost of misaligned accesses.
- - The cost of peeling (the extra runtime checks, the increase
+ - The cost of peeling (the extra runtime checks, the increase
in code size).
The scheme we use FORNOW: peel to force the alignment of the first
@@ -1194,7 +1194,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
}
}
- vect_versioning_for_alias_required
+ vect_versioning_for_alias_required
= LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo);
/* Temporarily, if versioning for alias is required, we disable peeling
@@ -1225,10 +1225,10 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
npeel = nelements - mis;
- /* For interleaved data access every iteration accesses all the
+ /* For interleaved data access every iteration accesses all the
members of the group, therefore we divide the number of iterations
by the group size. */
- stmt_info = vinfo_for_stmt (DR_STMT (dr0));
+ stmt_info = vinfo_for_stmt (DR_STMT (dr0));
if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
npeel /= DR_GROUP_SIZE (stmt_info);
@@ -1256,7 +1256,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
vect_update_misalignment_for_peel (dr, dr0, npeel);
supportable_dr_alignment = vect_supportable_dr_alignment (dr);
SET_DR_MISALIGNMENT (dr, save_misalignment);
-
+
if (!supportable_dr_alignment)
{
do_peeling = false;
@@ -1303,8 +1303,8 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
4) all misaligned data refs with a known misalignment are supported, and
5) the number of runtime alignment checks is within reason. */
- do_versioning =
- flag_tree_vect_loop_version
+ do_versioning =
+ flag_tree_vect_loop_version
&& optimize_loop_nest_for_speed_p (loop)
&& (!loop->inner); /* FORNOW */
@@ -1342,7 +1342,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
stmt = DR_STMT (dr);
vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
gcc_assert (vectype);
-
+
/* The rightmost bits of an aligned address must be zeros.
Construct the mask needed for this test. For example,
GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
@@ -1362,7 +1362,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
DR_STMT (dr));
}
}
-
+
/* Versioning requires at least one misaligned data reference. */
if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
do_versioning = false;
@@ -1413,7 +1413,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
Return FALSE if a data reference is found that cannot be vectorized. */
bool
-vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
+vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
if (vect_print_dump_info (REPORT_DETAILS))
@@ -1422,7 +1422,7 @@ vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
- fprintf (vect_dump,
+ fprintf (vect_dump,
"not vectorized: can't calculate alignment for data ref.");
return false;
}
@@ -1450,16 +1450,16 @@ vect_analyze_group_access (struct data_reference *dr)
HOST_WIDE_INT stride;
bool slp_impossible = false;
- /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
+ /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
interleaving group (including gaps). */
- stride = dr_step / type_size;
+ stride = dr_step / type_size;
/* Not consecutive access is possible only if it is a part of interleaving. */
if (!DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)))
{
/* Check if it this DR is a part of interleaving, and is a single
element of the group that is accessed in the loop. */
-
+
/* Gaps are supported only for loads. STEP must be a multiple of the type
size. The size of the group must be a power of 2. */
if (DR_IS_READ (dr)
@@ -1572,7 +1572,7 @@ vect_analyze_group_access (struct data_reference *dr)
the type to get COUNT_IN_BYTES. */
count_in_bytes = type_size * count;
- /* Check that the size of the interleaving (including gaps) is not
+ /* Check that the size of the interleaving (including gaps) is not
greater than STEP. */
if (dr_step && dr_step < count_in_bytes + gaps * type_size)
{
@@ -1592,9 +1592,9 @@ vect_analyze_group_access (struct data_reference *dr)
{
slp_impossible = true;
/* There is a gap after the last load in the group. This gap is a
- difference between the stride and the number of elements. When
- there is no gap, this difference should be 0. */
- DR_GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count;
+ difference between the stride and the number of elements. When
+ there is no gap, this difference should be 0. */
+ DR_GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count;
}
else
{
@@ -1618,7 +1618,7 @@ vect_analyze_group_access (struct data_reference *dr)
return false;
}
- /* FORNOW: we handle only interleaving that is a power of 2.
+ /* FORNOW: we handle only interleaving that is a power of 2.
We don't fail here if it may be still possible to vectorize the
group using SLP. If not, the size of the group will be checked in
vect_analyze_operations, and the vectorization will fail. */
@@ -1633,12 +1633,12 @@ vect_analyze_group_access (struct data_reference *dr)
if (stride == 0)
stride = count;
-
+
DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Detected interleaving of size %d", (int)stride);
- /* SLP: create an SLP data structure for every interleaving group of
+ /* SLP: create an SLP data structure for every interleaving group of
stores for further analysis in vect_analyse_slp. */
if (!DR_IS_READ (dr) && !slp_impossible)
{
@@ -1646,7 +1646,7 @@ vect_analyze_group_access (struct data_reference *dr)
VEC_safe_push (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo),
stmt);
if (bb_vinfo)
- VEC_safe_push (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo),
+ VEC_safe_push (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo),
stmt);
}
}
@@ -1672,7 +1672,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
-
+
if (loop_vinfo && !step)
{
if (vect_print_dump_info (REPORT_DETAILS))
@@ -1682,7 +1682,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
/* Don't allow invariant accesses in loops. */
if (loop_vinfo && dr_step == 0)
- return false;
+ return false;
if (loop && nested_in_vect_loop_p (loop, stmt))
{
@@ -1693,13 +1693,13 @@ vect_analyze_data_ref_access (struct data_reference *dr)
/* For the rest of the analysis we use the outer-loop step. */
step = STMT_VINFO_DR_STEP (stmt_info);
dr_step = TREE_INT_CST_LOW (step);
-
+
if (dr_step == 0)
{
if (vect_print_dump_info (REPORT_ALIGNMENT))
fprintf (vect_dump, "zero step in outer loop.");
if (DR_IS_READ (dr))
- return true;
+ return true;
else
return false;
}
@@ -1805,7 +1805,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
break;
}
}
-
+
if (found)
{
VEC_ordered_remove (ddr_p, ddrs, i);
@@ -1839,7 +1839,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
The general structure of the analysis of data refs in the vectorizer is as
follows:
- 1- vect_analyze_data_refs(loop/bb): call
+ 1- vect_analyze_data_refs(loop/bb): call
compute_data_dependences_for_loop/bb to find and analyze all data-refs
in the loop/bb and their dependences.
2- vect_analyze_dependences(): apply dependence testing using ddrs.
@@ -1849,7 +1849,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
*/
bool
-vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
+vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
struct loop *loop = NULL;
basic_block bb = NULL;
@@ -1860,7 +1860,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "=== vect_analyze_data_refs ===\n");
-
+
if (loop_vinfo)
{
loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -1886,8 +1886,8 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
gimple stmt;
stmt_vec_info stmt_info;
basic_block bb;
- tree base, offset, init;
-
+ tree base, offset, init;
+
if (!dr || !DR_REF (dr))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
@@ -1921,17 +1921,17 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
base = unshare_expr (DR_BASE_ADDRESS (dr));
offset = unshare_expr (DR_OFFSET (dr));
init = unshare_expr (DR_INIT (dr));
-
+
/* Update DR field in stmt_vec_info struct. */
bb = gimple_bb (stmt);
/* If the dataref is in an inner-loop of the loop that is considered for
for vectorization, we also want to analyze the access relative to
- the outer-loop (DR contains information only relative to the
+ the outer-loop (DR contains information only relative to the
inner-most enclosing loop). We do that by building a reference to the
first location accessed by the inner-loop, and analyze it relative to
- the outer-loop. */
- if (loop && nested_in_vect_loop_p (loop, stmt))
+ the outer-loop. */
+ if (loop && nested_in_vect_loop_p (loop, stmt))
{
tree outer_step, outer_base, outer_init;
HOST_WIDE_INT pbitsize, pbitpos;
@@ -1941,12 +1941,12 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
affine_iv base_iv, offset_iv;
tree dinit;
- /* Build a reference to the first location accessed by the
+ /* Build a reference to the first location accessed by the
inner-loop: *(BASE+INIT). (The first location is actually
BASE+INIT+OFFSET, but we add OFFSET separately later). */
tree inner_base = build_fold_indirect_ref
(fold_build2 (POINTER_PLUS_EXPR,
- TREE_TYPE (base), base,
+ TREE_TYPE (base), base,
fold_convert (sizetype, init)));
if (vect_print_dump_info (REPORT_DETAILS))
@@ -1955,7 +1955,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
print_generic_expr (vect_dump, inner_base, TDF_SLIM);
}
- outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
+ outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
&poffset, &pmode, &punsignedp, &pvolatilep, false);
gcc_assert (outer_base != NULL_TREE);
@@ -1967,7 +1967,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
}
outer_base = build_fold_addr_expr (outer_base);
- if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
+ if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
&base_iv, false))
{
if (vect_print_dump_info (REPORT_DETAILS))
@@ -1978,7 +1978,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
if (offset)
{
if (poffset)
- poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
+ poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
poffset);
else
poffset = offset;
@@ -1989,7 +1989,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
offset_iv.base = ssize_int (0);
offset_iv.step = ssize_int (0);
}
- else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
+ else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
&offset_iv, false))
{
if (vect_print_dump_info (REPORT_DETAILS))
@@ -2009,11 +2009,11 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
STMT_VINFO_DR_STEP (stmt_info) = outer_step;
/* FIXME: Use canonicalize_base_object_address (base_iv.base); */
- STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
+ STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
STMT_VINFO_DR_INIT (stmt_info) = outer_init;
- STMT_VINFO_DR_OFFSET (stmt_info) =
+ STMT_VINFO_DR_OFFSET (stmt_info) =
fold_convert (ssizetype, offset_iv.base);
- STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
+ STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
size_int (highest_pow2_factor (offset_iv.base));
if (vect_print_dump_info (REPORT_DETAILS))
@@ -2043,12 +2043,12 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
}
STMT_VINFO_DATA_REF (stmt_info) = dr;
-
+
/* Set vectype for STMT. */
scalar_type = TREE_TYPE (DR_REF (dr));
STMT_VINFO_VECTYPE (stmt_info) =
get_vectype_for_scalar_type (scalar_type);
- if (!STMT_VINFO_VECTYPE (stmt_info))
+ if (!STMT_VINFO_VECTYPE (stmt_info))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
{
@@ -2061,16 +2061,16 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
return false;
}
}
-
+
return true;
}
/* Function vect_get_new_vect_var.
- Returns a name for a new variable. The current naming scheme appends the
- prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
- the name of vectorizer generated variables, and appends that to NAME if
+ Returns a name for a new variable. The current naming scheme appends the
+ prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
+ the name of vectorizer generated variables, and appends that to NAME if
provided. */
tree
@@ -2135,7 +2135,7 @@ vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
if LOOP=j_loop: &in+i*2B (relative to j_loop)
Output:
- 1. Return an SSA_NAME whose value is the address of the memory location of
+ 1. Return an SSA_NAME whose value is the address of the memory location of
the first vector of the data reference.
2. If new_stmt_list is not NULL_TREE after return then the caller must insert
these statement(s) which define the returned SSA_NAME.
@@ -2181,7 +2181,7 @@ vect_create_addr_base_for_vector_ref (gimple stmt,
base_offset = ssize_int (0);
init = ssize_int (0);
base_name = build_fold_indirect_ref (unshare_expr (DR_REF (dr)));
- }
+ }
data_ref_base_var = create_tmp_var (TREE_TYPE (data_ref_base), "batmp");
add_referenced_var (data_ref_base_var);
@@ -2220,11 +2220,11 @@ vect_create_addr_base_for_vector_ref (gimple stmt,
if (TREE_CODE (DR_REF (dr)) == INDIRECT_REF)
addr_base = unshare_expr (TREE_OPERAND (DR_REF (dr), 0));
else
- addr_base = build1 (ADDR_EXPR,
+ addr_base = build1 (ADDR_EXPR,
build_pointer_type (TREE_TYPE (DR_REF (dr))),
unshare_expr (DR_REF (dr)));
}
-
+
vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
vec_stmt = fold_convert (vect_ptr_type, addr_base);
@@ -2247,7 +2247,7 @@ vect_create_addr_base_for_vector_ref (gimple stmt,
/* Function vect_create_data_ref_ptr.
Create a new pointer to vector type (vp), that points to the first location
- accessed in the loop by STMT, along with the def-use update chain to
+ accessed in the loop by STMT, along with the def-use update chain to
appropriately advance the pointer through the loop iterations. Also set
aliasing information for the pointer. This vector pointer is used by the
callers to this function to create a memory reference expression for vector
@@ -2280,11 +2280,11 @@ vect_create_addr_base_for_vector_ref (gimple stmt,
Return the initial_address in INITIAL_ADDRESS.
2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
- update the pointer in each iteration of the loop.
+ update the pointer in each iteration of the loop.
Return the increment stmt that updates the pointer in PTR_INCR.
- 3. Set INV_P to true if the access pattern of the data reference in the
+ 3. Set INV_P to true if the access pattern of the data reference in the
vectorized loop is invariant. Set it to false otherwise.
4. Return the pointer. */
@@ -2318,7 +2318,7 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
tree step;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
-
+
if (loop_vinfo)
{
loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -2332,21 +2332,21 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
only_init = true;
*ptr_incr = NULL;
}
-
+
/* Check the step (evolution) of the load in LOOP, and record
whether it's invariant. */
if (nested_in_vect_loop)
step = STMT_VINFO_DR_STEP (stmt_info);
else
step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
-
+
if (tree_int_cst_compare (step, size_zero_node) == 0)
*inv_p = true;
else
*inv_p = false;
/* Create an expression for the first address accessed by this load
- in LOOP. */
+ in LOOP. */
base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr)));
if (vect_print_dump_info (REPORT_DETAILS))
@@ -2354,7 +2354,7 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
tree data_ref_base = base_name;
fprintf (vect_dump, "create vector-pointer variable to type: ");
print_generic_expr (vect_dump, vectype, TDF_SLIM);
- if (TREE_CODE (data_ref_base) == VAR_DECL
+ if (TREE_CODE (data_ref_base) == VAR_DECL
|| TREE_CODE (data_ref_base) == ARRAY_REF)
fprintf (vect_dump, " vectorizing an array ref: ");
else if (TREE_CODE (data_ref_base) == COMPONENT_REF)
@@ -2413,7 +2413,7 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
def-use update cycles for the pointer: One relative to the outer-loop
(LOOP), which is what steps (3) and (4) below do. The other is relative
to the inner-loop (which is the inner-most loop containing the dataref),
- and this is done be step (5) below.
+ and this is done be step (5) below.
When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
inner-most loop, and so steps (3),(4) work the same, and step (5) is
@@ -2421,11 +2421,11 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
vp0 = &base_addr;
LOOP: vp1 = phi(vp0,vp2)
- ...
+ ...
...
vp2 = vp1 + step
goto LOOP
-
+
If there is an inner-loop nested in loop, then step (5) will also be
applied, and an additional update in the inner-loop will be created:
@@ -2478,7 +2478,7 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
**/
/* No update in loop is required. */
- if (only_init && (!loop_vinfo || at_loop == loop))
+ if (only_init && (!loop_vinfo || at_loop == loop))
{
/* Copy the points-to information if it exists. */
if (DR_PTR_INFO (dr))
@@ -2489,7 +2489,7 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
{
/* The step of the vector pointer is the Vector Size. */
tree step = TYPE_SIZE_UNIT (vectype);
- /* One exception to the above is when the scalar step of the load in
+ /* One exception to the above is when the scalar step of the load in
LOOP is zero. In this case the step here is also zero. */
if (*inv_p)
step = size_zero_node;
@@ -2527,7 +2527,7 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
{
standard_iv_increment_position (containing_loop, &incr_gsi,
&insert_after);
- create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), vect_ptr,
+ create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), vect_ptr,
containing_loop, &incr_gsi, insert_after, &indx_before_incr,
&indx_after_incr);
incr = gsi_stmt (incr_gsi);
@@ -2542,7 +2542,7 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
if (ptr_incr)
*ptr_incr = incr;
- return indx_before_incr;
+ return indx_before_incr;
}
else
gcc_unreachable ();
@@ -2552,14 +2552,14 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
/* Function bump_vector_ptr
Increment a pointer (to a vector type) by vector-size. If requested,
- i.e. if PTR-INCR is given, then also connect the new increment stmt
+ i.e. if PTR-INCR is given, then also connect the new increment stmt
to the existing def-use update-chain of the pointer, by modifying
the PTR_INCR as illustrated below:
The pointer def-use update-chain before this function:
DATAREF_PTR = phi (p_0, p_2)
....
- PTR_INCR: p_2 = DATAREF_PTR + step
+ PTR_INCR: p_2 = DATAREF_PTR + step
The pointer def-use update-chain after this function:
DATAREF_PTR = phi (p_0, p_2)
@@ -2569,18 +2569,18 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
PTR_INCR: p_2 = NEW_DATAREF_PTR + step
Input:
- DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
+ DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
in the loop.
- PTR_INCR - optional. The stmt that updates the pointer in each iteration of
+ PTR_INCR - optional. The stmt that updates the pointer in each iteration of
the loop. The increment amount across iterations is expected
- to be vector_size.
+ to be vector_size.
BSI - location where the new update stmt is to be placed.
STMT - the original scalar memory-access stmt that is being vectorized.
BUMP - optional. The offset by which to bump the pointer. If not given,
the offset is assumed to be vector_size.
Output: Return NEW_DATAREF_PTR as illustrated above.
-
+
*/
tree
@@ -2599,7 +2599,7 @@ bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
if (bump)
update = bump;
-
+
incr_stmt = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, ptr_var,
dataref_ptr, update);
new_dataref_ptr = make_ssa_name (ptr_var, incr_stmt);
@@ -2666,11 +2666,11 @@ vect_strided_store_supported (tree vectype)
int mode;
mode = (int) TYPE_MODE (vectype);
-
+
/* Check that the operation is supported. */
- interleave_high_optab = optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR,
+ interleave_high_optab = optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR,
vectype, optab_default);
- interleave_low_optab = optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR,
+ interleave_low_optab = optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR,
vectype, optab_default);
if (!interleave_high_optab || !interleave_low_optab)
{
@@ -2679,9 +2679,9 @@ vect_strided_store_supported (tree vectype)
return false;
}
- if (optab_handler (interleave_high_optab, mode)->insn_code
+ if (optab_handler (interleave_high_optab, mode)->insn_code
== CODE_FOR_nothing
- || optab_handler (interleave_low_optab, mode)->insn_code
+ || optab_handler (interleave_low_optab, mode)->insn_code
== CODE_FOR_nothing)
{
if (vect_print_dump_info (REPORT_DETAILS))
@@ -2696,7 +2696,7 @@ vect_strided_store_supported (tree vectype)
/* Function vect_permute_store_chain.
Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
- a power of 2, generate interleave_high/low stmts to reorder the data
+ a power of 2, generate interleave_high/low stmts to reorder the data
correctly for the stores. Return the final references for stores in
RESULT_CHAIN.
@@ -2706,7 +2706,7 @@ vect_strided_store_supported (tree vectype)
1st vec: 0 1 2 3 4 5 6 7
2nd vec: 8 9 10 11 12 13 14 15
- 3rd vec: 16 17 18 19 20 21 22 23
+ 3rd vec: 16 17 18 19 20 21 22 23
4th vec: 24 25 26 27 28 29 30 31
The output sequence should be:
@@ -2718,22 +2718,22 @@ vect_strided_store_supported (tree vectype)
i.e., we interleave the contents of the four vectors in their order.
- We use interleave_high/low instructions to create such output. The input of
+ We use interleave_high/low instructions to create such output. The input of
each interleave_high/low operation is two vectors:
- 1st vec 2nd vec
- 0 1 2 3 4 5 6 7
- the even elements of the result vector are obtained left-to-right from the
- high/low elements of the first vector. The odd elements of the result are
+ 1st vec 2nd vec
+ 0 1 2 3 4 5 6 7
+ the even elements of the result vector are obtained left-to-right from the
+ high/low elements of the first vector. The odd elements of the result are
obtained left-to-right from the high/low elements of the second vector.
The output of interleave_high will be: 0 4 1 5
and of interleave_low: 2 6 3 7
-
+
The permutation is done in log LENGTH stages. In each stage interleave_high
- and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
- where the first argument is taken from the first half of DR_CHAIN and the
- second argument from it's second half.
- In our example,
+ and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
+ where the first argument is taken from the first half of DR_CHAIN and the
+ second argument from it's second half.
+ In our example,
I1: interleave_high (1st vec, 3rd vec)
I2: interleave_low (1st vec, 3rd vec)
@@ -2753,10 +2753,10 @@ vect_strided_store_supported (tree vectype)
I2: 2 10 18 26 3 11 19 27
I3: 4 12 20 28 5 13 21 30
I4: 6 14 22 30 7 15 23 31. */
-
+
bool
-vect_permute_store_chain (VEC(tree,heap) *dr_chain,
- unsigned int length,
+vect_permute_store_chain (VEC(tree,heap) *dr_chain,
+ unsigned int length,
gimple stmt,
gimple_stmt_iterator *gsi,
VEC(tree,heap) **result_chain)
@@ -2768,7 +2768,7 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain,
int i;
unsigned int j;
enum tree_code high_code, low_code;
-
+
scalar_dest = gimple_assign_lhs (stmt);
/* Check that the operation is supported. */
@@ -2785,9 +2785,9 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain,
vect2 = VEC_index (tree, dr_chain, j+length/2);
/* Create interleaving stmt:
- in the case of big endian:
- high = interleave_high (vect1, vect2)
- and in the case of little endian:
+ in the case of big endian:
+ high = interleave_high (vect1, vect2)
+ and in the case of little endian:
high = interleave_low (vect1, vect2). */
perm_dest = create_tmp_var (vectype, "vect_inter_high");
DECL_GIMPLE_REG_P (perm_dest) = 1;
@@ -2811,9 +2811,9 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain,
/* Create interleaving stmt:
in the case of big endian:
- low = interleave_low (vect1, vect2)
+ low = interleave_low (vect1, vect2)
and in the case of little endian:
- low = interleave_high (vect1, vect2). */
+ low = interleave_high (vect1, vect2). */
perm_dest = create_tmp_var (vectype, "vect_inter_low");
DECL_GIMPLE_REG_P (perm_dest) = 1;
add_referenced_var (perm_dest);
@@ -2830,21 +2830,21 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain,
}
/* Function vect_setup_realignment
-
+
This function is called when vectorizing an unaligned load using
the dr_explicit_realign[_optimized] scheme.
This function generates the following code at the loop prolog:
p = initial_addr;
x msq_init = *(floor(p)); # prolog load
- realignment_token = call target_builtin;
+ realignment_token = call target_builtin;
loop:
x msq = phi (msq_init, ---)
- The stmts marked with x are generated only for the case of
+ The stmts marked with x are generated only for the case of
dr_explicit_realign_optimized.
- The code above sets up a new (vector) pointer, pointing to the first
+ The code above sets up a new (vector) pointer, pointing to the first
location accessed by STMT, and a "floor-aligned" load using that pointer.
It also generates code to compute the "realignment-token" (if the relevant
target hook was defined), and creates a phi-node at the loop-header bb
@@ -2853,10 +2853,10 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain,
created by the caller to this function).
For the case of dr_explicit_realign_optimized:
- The caller to this function uses the phi-result (msq) to create the
+ The caller to this function uses the phi-result (msq) to create the
realignment code inside the loop, and sets up the missing phi argument,
as follows:
- loop:
+ loop:
msq = phi (msq_init, lsq)
lsq = *(floor(p')); # load in loop
result = realign_load (msq, lsq, realignment_token);
@@ -2873,8 +2873,8 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain,
a memory location that may be unaligned.
BSI - place where new code is to be inserted.
ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
- is used.
-
+ is used.
+
Output:
REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
target hook, if defined.
@@ -3120,7 +3120,7 @@ vect_strided_load_supported (tree vectype)
/* Function vect_permute_load_chain.
Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
- a power of 2, generate extract_even/odd stmts to reorder the input data
+ a power of 2, generate extract_even/odd stmts to reorder the input data
correctly. Return the final references for loads in RESULT_CHAIN.
E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
@@ -3129,14 +3129,14 @@ vect_strided_load_supported (tree vectype)
1st vec: 0 1 2 3 4 5 6 7
2nd vec: 8 9 10 11 12 13 14 15
- 3rd vec: 16 17 18 19 20 21 22 23
+ 3rd vec: 16 17 18 19 20 21 22 23
4th vec: 24 25 26 27 28 29 30 31
The output sequence should be:
1st vec: 0 4 8 12 16 20 24 28
2nd vec: 1 5 9 13 17 21 25 29
- 3rd vec: 2 6 10 14 18 22 26 30
+ 3rd vec: 2 6 10 14 18 22 26 30
4th vec: 3 7 11 15 19 23 27 31
i.e., the first output vector should contain the first elements of each
@@ -3144,17 +3144,17 @@ vect_strided_load_supported (tree vectype)
We use extract_even/odd instructions to create such output. The input of each
extract_even/odd operation is two vectors
- 1st vec 2nd vec
- 0 1 2 3 4 5 6 7
+ 1st vec 2nd vec
+ 0 1 2 3 4 5 6 7
- and the output is the vector of extracted even/odd elements. The output of
+ and the output is the vector of extracted even/odd elements. The output of
extract_even will be: 0 2 4 6
and of extract_odd: 1 3 5 7
-
+
The permutation is done in log LENGTH stages. In each stage extract_even and
- extract_odd stmts are created for each pair of vectors in DR_CHAIN in their
- order. In our example,
+ extract_odd stmts are created for each pair of vectors in DR_CHAIN in their
+ order. In our example,
E1: extract_even (1st vec, 2nd vec)
E2: extract_odd (1st vec, 2nd vec)
@@ -3165,18 +3165,18 @@ vect_strided_load_supported (tree vectype)
E1: 0 2 4 6 8 10 12 14
E2: 1 3 5 7 9 11 13 15
- E3: 16 18 20 22 24 26 28 30
+ E3: 16 18 20 22 24 26 28 30
E4: 17 19 21 23 25 27 29 31
In order to proceed and create the correct sequence for the next stage (or
- for the correct output, if the second stage is the last one, as in our
- example), we first put the output of extract_even operation and then the
+ for the correct output, if the second stage is the last one, as in our
+ example), we first put the output of extract_even operation and then the
output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
The input for the second stage is:
1st vec (E1): 0 2 4 6 8 10 12 14
- 2nd vec (E3): 16 18 20 22 24 26 28 30
- 3rd vec (E2): 1 3 5 7 9 11 13 15
+ 2nd vec (E3): 16 18 20 22 24 26 28 30
+ 3rd vec (E2): 1 3 5 7 9 11 13 15
4th vec (E4): 17 19 21 23 25 27 29 31
The output of the second stage:
@@ -3190,12 +3190,12 @@ vect_strided_load_supported (tree vectype)
1st vec (E1): 0 4 8 12 16 20 24 28
2nd vec (E3): 1 5 9 13 17 21 25 29
- 3rd vec (E2): 2 6 10 14 18 22 26 30
+ 3rd vec (E2): 2 6 10 14 18 22 26 30
4th vec (E4): 3 7 11 15 19 23 27 31. */
bool
-vect_permute_load_chain (VEC(tree,heap) *dr_chain,
- unsigned int length,
+vect_permute_load_chain (VEC(tree,heap) *dr_chain,
+ unsigned int length,
gimple stmt,
gimple_stmt_iterator *gsi,
VEC(tree,heap) **result_chain)
@@ -3232,8 +3232,8 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain,
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
mark_symbols_for_renaming (perm_stmt);
- VEC_replace (tree, *result_chain, j/2, data_ref);
-
+ VEC_replace (tree, *result_chain, j/2, data_ref);
+
/* data_ref = permute_odd (first_data_ref, second_data_ref); */
perm_dest = create_tmp_var (vectype, "vect_perm_odd");
DECL_GIMPLE_REG_P (perm_dest) = 1;
@@ -3273,16 +3273,16 @@ vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
unsigned int i, gap_count;
tree tmp_data_ref;
- /* DR_CHAIN contains input data-refs that are a part of the interleaving.
- RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
+ /* DR_CHAIN contains input data-refs that are a part of the interleaving.
+ RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
vectors, that are ready for vector computation. */
result_chain = VEC_alloc (tree, heap, size);
/* Permute. */
if (!vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain))
return false;
- /* Put a permuted data-ref in the VECTORIZED_STMT field.
- Since we scan the chain starting from it's first node, their order
+ /* Put a permuted data-ref in the VECTORIZED_STMT field.
+ Since we scan the chain starting from it's first node, their order
corresponds the order of data-refs in RESULT_CHAIN. */
next_stmt = first_stmt;
gap_count = 1;
@@ -3298,7 +3298,7 @@ vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
access (if there is no gap DR_GROUP_GAP is 1). We skip loads that
correspond to the gaps.
*/
- if (next_stmt != first_stmt
+ if (next_stmt != first_stmt
&& gap_count < DR_GROUP_GAP (vinfo_for_stmt (next_stmt)))
{
gap_count++;
@@ -3324,11 +3324,11 @@ vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
while (rel_stmt)
{
prev_stmt = rel_stmt;
- rel_stmt =
+ rel_stmt =
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
}
- STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
+ STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
new_stmt;
}
}
@@ -3352,7 +3352,7 @@ vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
Returns whether the alignment of a DECL can be forced to be aligned
on ALIGNMENT bit boundary. */
-bool
+bool
vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
{
if (TREE_CODE (decl) != VAR_DECL)
@@ -3472,7 +3472,7 @@ vect_supportable_dr_alignment (struct data_reference *dr)
bool is_packed = false;
tree type = (TREE_TYPE (DR_REF (dr)));
- if (optab_handler (vec_realign_load_optab, mode)->insn_code !=
+ if (optab_handler (vec_realign_load_optab, mode)->insn_code !=
CODE_FOR_nothing
&& (!targetm.vectorize.builtin_mask_for_load
|| targetm.vectorize.builtin_mask_for_load ()))
@@ -3488,11 +3488,11 @@ vect_supportable_dr_alignment (struct data_reference *dr)
if (!known_alignment_for_access_p (dr))
{
tree ba = DR_BASE_OBJECT (dr);
-
+
if (ba)
is_packed = contains_packed_reference (ba);
}
-
+
if (targetm.vectorize.
builtin_support_vector_misalignment (mode, type,
DR_MISALIGNMENT (dr), is_packed))
@@ -3507,17 +3507,17 @@ vect_supportable_dr_alignment (struct data_reference *dr)
if (!known_alignment_for_access_p (dr))
{
tree ba = DR_BASE_OBJECT (dr);
-
+
if (ba)
is_packed = contains_packed_reference (ba);
}
-
+
if (targetm.vectorize.
- builtin_support_vector_misalignment (mode, type,
+ builtin_support_vector_misalignment (mode, type,
DR_MISALIGNMENT (dr), is_packed))
return dr_unaligned_supported;
}
-
+
/* Unsupported. */
return dr_unaligned_unsupported;
}
diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c
index 9ef0239421d..2bb71a137ab 100644
--- a/gcc/tree-vect-generic.c
+++ b/gcc/tree-vect-generic.c
@@ -3,17 +3,17 @@
Free Software Foundation, Inc.
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
@@ -417,12 +417,12 @@ expand_vector_operations_1 (gimple_stmt_iterator *gsi)
if (TREE_CODE (type) != VECTOR_TYPE)
return;
- if (code == NOP_EXPR
+ if (code == NOP_EXPR
|| code == FLOAT_EXPR
|| code == FIX_TRUNC_EXPR
|| code == VIEW_CONVERT_EXPR)
return;
-
+
gcc_assert (code != CONVERT_EXPR);
/* The signedness is determined from input argument. */
@@ -432,8 +432,8 @@ expand_vector_operations_1 (gimple_stmt_iterator *gsi)
/* Choose between vector shift/rotate by vector and vector shift/rotate by
scalar */
- if (code == LSHIFT_EXPR
- || code == RSHIFT_EXPR
+ if (code == LSHIFT_EXPR
+ || code == RSHIFT_EXPR
|| code == LROTATE_EXPR
|| code == RROTATE_EXPR)
{
@@ -454,7 +454,7 @@ expand_vector_operations_1 (gimple_stmt_iterator *gsi)
else
op = optab_for_tree_code (code, type, optab_default);
- /* For widening/narrowing vector operations, the relevant type is of the
+ /* For widening/narrowing vector operations, the relevant type is of the
arguments, not the widened result. VEC_UNPACK_FLOAT_*_EXPR is
calculated in the same way above. */
if (code == WIDEN_SUM_EXPR
@@ -548,7 +548,7 @@ expand_vector_operations (void)
return 0;
}
-struct gimple_opt_pass pass_lower_vector =
+struct gimple_opt_pass pass_lower_vector =
{
{
GIMPLE_PASS,
@@ -568,7 +568,7 @@ struct gimple_opt_pass pass_lower_vector =
}
};
-struct gimple_opt_pass pass_lower_vector_ssa =
+struct gimple_opt_pass pass_lower_vector_ssa =
{
{
GIMPLE_PASS,
diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c
index c0b15cd98a5..ea1a4d6bcff 100644
--- a/gcc/tree-vect-loop-manip.c
+++ b/gcc/tree-vect-loop-manip.c
@@ -1,7 +1,7 @@
-/* Vectorizer Specific Loop Manipulations
+/* Vectorizer Specific Loop Manipulations
Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
Foundation, Inc.
- Contributed by Dorit Naishlos <dorit@il.ibm.com>
+ Contributed by Dorit Naishlos <dorit@il.ibm.com>
and Ira Rosen <irar@il.ibm.com>
This file is part of GCC.
@@ -154,7 +154,7 @@ slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
phi nodes, organized in the same order.
case 2: NEW_LOOP was placed after ORIG_LOOP:
- The successor block of NEW_LOOP is the original exit block of
+ The successor block of NEW_LOOP is the original exit block of
ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
We postpone updating these phis to a later stage (when
loop guards are added).
@@ -303,8 +303,8 @@ slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
Generally slpeel_update_phi_nodes_for_guard1 creates phis for variables
that change between iterations of a loop (and therefore have a phi-node
at the loop entry), whereas slpeel_update_phi_nodes_for_guard2 creates
- phis for variables that are used out of the loop (and therefore have
- loop-closed exit phis). Some variables may be both updated between
+ phis for variables that are used out of the loop (and therefore have
+ loop-closed exit phis). Some variables may be both updated between
iterations and used after the loop. This is why in loop1_exit_bb we
may need both entry_phis (created by slpeel_update_phi_nodes_for_guard1)
and exit phis (created by slpeel_update_phi_nodes_for_guard2).
@@ -335,15 +335,15 @@ slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
*/
/* Function slpeel_update_phi_nodes_for_guard1
-
+
Input:
- GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
- DEFS - a bitmap of ssa names to mark new names for which we recorded
- information.
-
+ information.
+
In the context of the overall structure, we have:
- loop1_preheader_bb:
+ loop1_preheader_bb:
guard1 (goto loop1/merge1_bb)
LOOP-> loop1
loop1_exit_bb:
@@ -399,12 +399,12 @@ slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
/* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
of LOOP. Set the two phi args in NEW_PHI for these edges: */
loop_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, EDGE_SUCC (loop->latch, 0));
- loop_locus = gimple_phi_arg_location_from_edge (orig_phi,
- EDGE_SUCC (loop->latch,
+ loop_locus = gimple_phi_arg_location_from_edge (orig_phi,
+ EDGE_SUCC (loop->latch,
0));
guard_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, loop_preheader_edge (loop));
- guard_locus
- = gimple_phi_arg_location_from_edge (orig_phi,
+ guard_locus
+ = gimple_phi_arg_location_from_edge (orig_phi,
loop_preheader_edge (loop));
add_phi_arg (new_phi, loop_arg, new_exit_e, loop_locus);
@@ -470,10 +470,10 @@ slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
In the context of the overall structure, we have:
- loop1_preheader_bb:
+ loop1_preheader_bb:
guard1 (goto loop1/merge1_bb)
loop1
- loop1_exit_bb:
+ loop1_exit_bb:
guard2 (goto merge1_bb/merge2_bb)
merge1_bb
LOOP-> loop2
@@ -483,7 +483,7 @@ LOOP-> loop2
For each name used out side the loop (i.e - for each name that has an exit
phi in next_bb) we create a new phi in:
- 1. merge2_bb (to account for the edge from guard_bb)
+ 1. merge2_bb (to account for the edge from guard_bb)
2. loop2_exit_bb (an exit-phi to keep LOOP in loop-closed form)
3. guard2 bb (an exit phi to keep the preceding loop in loop-closed form),
if needed (if it wasn't handled by slpeel_update_phis_nodes_for_phi1).
@@ -516,7 +516,7 @@ slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
orig_phi = update_phi;
orig_def = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
/* This loop-closed-phi actually doesn't represent a use
- out of the loop - the phi arg is a constant. */
+ out of the loop - the phi arg is a constant. */
if (TREE_CODE (orig_def) != SSA_NAME)
continue;
orig_def_new_name = get_current_def (orig_def);
@@ -541,7 +541,7 @@ slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
new_name2 = get_current_def (get_current_def (orig_name)). */
new_name2 = get_current_def (new_name);
}
-
+
if (is_new_loop)
{
guard_arg = orig_def;
@@ -554,7 +554,7 @@ slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
}
if (new_name2)
guard_arg = new_name2;
-
+
add_phi_arg (new_phi, loop_arg, new_exit_e, UNKNOWN_LOCATION);
add_phi_arg (new_phi, guard_arg, guard_edge, UNKNOWN_LOCATION);
@@ -606,7 +606,7 @@ slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
/* 3.3. GUARD_BB has one incoming edge: */
gcc_assert (EDGE_COUNT (guard_edge->src->preds) == 1);
- add_phi_arg (new_phi, arg, EDGE_PRED (guard_edge->src, 0),
+ add_phi_arg (new_phi, arg, EDGE_PRED (guard_edge->src, 0),
UNKNOWN_LOCATION);
/* 3.4. Update phi in successor of GUARD_BB: */
@@ -673,7 +673,7 @@ slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
}
-/* Given LOOP this function generates a new copy of it and puts it
+/* Given LOOP this function generates a new copy of it and puts it
on E which is either the entry or exit of LOOP. */
struct loop *
@@ -683,13 +683,13 @@ slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
basic_block *new_bbs, *bbs;
bool at_exit;
bool was_imm_dom;
- basic_block exit_dest;
+ basic_block exit_dest;
gimple phi;
tree phi_arg;
edge exit, new_exit;
gimple_stmt_iterator gsi;
- at_exit = (e == single_exit (loop));
+ at_exit = (e == single_exit (loop));
if (!at_exit && e != loop_preheader_edge (loop))
return NULL;
@@ -711,8 +711,8 @@ slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
}
exit_dest = single_exit (loop)->dest;
- was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
- exit_dest) == loop->header ?
+ was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
+ exit_dest) == loop->header ?
true : false);
new_bbs = XNEWVEC (basic_block, loop->num_nodes);
@@ -722,7 +722,7 @@ slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
&exit, 1, &new_exit, NULL,
e->src);
- /* Duplicating phi args at exit bbs as coming
+ /* Duplicating phi args at exit bbs as coming
also from exit of duplicated loop. */
for (gsi = gsi_start_phis (exit_dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -738,11 +738,11 @@ slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
new_loop_exit_edge = EDGE_SUCC (new_loop->header, 1);
else
new_loop_exit_edge = EDGE_SUCC (new_loop->header, 0);
-
- add_phi_arg (phi, phi_arg, new_loop_exit_edge, locus);
+
+ add_phi_arg (phi, phi_arg, new_loop_exit_edge, locus);
}
- }
-
+ }
+
if (at_exit) /* Add the loop copy at exit. */
{
redirect_edge_and_branch_force (e, new_loop->header);
@@ -756,19 +756,19 @@ slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
edge new_exit_e;
edge entry_e = loop_preheader_edge (loop);
basic_block preheader = entry_e->src;
-
- if (!flow_bb_inside_loop_p (new_loop,
+
+ if (!flow_bb_inside_loop_p (new_loop,
EDGE_SUCC (new_loop->header, 0)->dest))
new_exit_e = EDGE_SUCC (new_loop->header, 0);
else
- new_exit_e = EDGE_SUCC (new_loop->header, 1);
+ new_exit_e = EDGE_SUCC (new_loop->header, 1);
redirect_edge_and_branch_force (new_exit_e, loop->header);
PENDING_STMT (new_exit_e) = NULL;
set_immediate_dominator (CDI_DOMINATORS, loop->header,
new_exit_e->src);
- /* We have to add phi args to the loop->header here as coming
+ /* We have to add phi args to the loop->header here as coming
from new_exit_e edge. */
for (gsi = gsi_start_phis (loop->header);
!gsi_end_p (gsi);
@@ -778,8 +778,8 @@ slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, entry_e);
if (phi_arg)
add_phi_arg (phi, phi_arg, new_exit_e,
- gimple_phi_arg_location_from_edge (phi, entry_e));
- }
+ gimple_phi_arg_location_from_edge (phi, entry_e));
+ }
redirect_edge_and_branch_force (entry_e, new_loop->header);
PENDING_STMT (entry_e) = NULL;
@@ -795,7 +795,7 @@ slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
/* Given the condition statement COND, put it as the last statement
of GUARD_BB; EXIT_BB is the basic block to skip the loop;
- Assumes that this is the single exit of the guarded loop.
+ Assumes that this is the single exit of the guarded loop.
Returns the skip edge, inserts new stmts on the COND_EXPR_STMT_LIST. */
static edge
@@ -881,19 +881,19 @@ slpeel_verify_cfg_after_peeling (struct loop *first_loop,
after second_loop.
*/
gcc_assert (EDGE_COUNT (loop1_exit_bb->succs) == 2);
-
+
/* 1. Verify that one of the successors of first_loop->exit is the preheader
of second_loop. */
-
+
/* The preheader of new_loop is expected to have two predecessors:
first_loop->exit and the block that precedes first_loop. */
- gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
+ gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
&& ((EDGE_PRED (loop2_entry_bb, 0)->src == loop1_exit_bb
&& EDGE_PRED (loop2_entry_bb, 1)->src == loop1_entry_bb)
|| (EDGE_PRED (loop2_entry_bb, 1)->src == loop1_exit_bb
&& EDGE_PRED (loop2_entry_bb, 0)->src == loop1_entry_bb)));
-
+
/* Verify that the other successor of first_loop->exit is after the
second_loop. */
/* TODO */
@@ -920,7 +920,7 @@ set_prologue_iterations (basic_block bb_before_first_loop,
gimple cond_stmt;
gimple_seq gimplify_stmt_list = NULL, stmts = NULL;
tree cost_pre_condition = NULL_TREE;
- tree scalar_loop_iters =
+ tree scalar_loop_iters =
unshare_expr (LOOP_VINFO_NITERS_UNCHANGED (loop_vec_info_for_loop (loop)));
e = single_pred_edge (bb_before_first_loop);
@@ -941,7 +941,7 @@ set_prologue_iterations (basic_block bb_before_first_loop,
e_fallthru = EDGE_SUCC (then_bb, 0);
cost_pre_condition =
- fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
+ fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
build_int_cst (TREE_TYPE (scalar_loop_iters), th));
cost_pre_condition =
force_gimple_operand (cost_pre_condition, &gimplify_stmt_list,
@@ -956,11 +956,11 @@ set_prologue_iterations (basic_block bb_before_first_loop,
gsi = gsi_last_bb (cond_bb);
gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
-
+
var = create_tmp_var (TREE_TYPE (scalar_loop_iters),
"prologue_after_cost_adjust");
add_referenced_var (var);
- prologue_after_cost_adjust_name =
+ prologue_after_cost_adjust_name =
force_gimple_operand (scalar_loop_iters, &stmts, false, var);
gsi = gsi_last_bb (then_bb);
@@ -968,7 +968,7 @@ set_prologue_iterations (basic_block bb_before_first_loop,
gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
newphi = create_phi_node (var, bb_before_first_loop);
- add_phi_arg (newphi, prologue_after_cost_adjust_name, e_fallthru,
+ add_phi_arg (newphi, prologue_after_cost_adjust_name, e_fallthru,
UNKNOWN_LOCATION);
add_phi_arg (newphi, first_niters, e_false, UNKNOWN_LOCATION);
@@ -982,7 +982,7 @@ set_prologue_iterations (basic_block bb_before_first_loop,
that is placed on the entry (exit) edge E of LOOP. After this transformation
we have two loops one after the other - first-loop iterates FIRST_NITERS
times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
- If the cost model indicates that it is profitable to emit a scalar
+ If the cost model indicates that it is profitable to emit a scalar
loop instead of the vector one, then the prolog (epilog) loop will iterate
for the entire unchanged scalar iterations of the loop.
@@ -1002,10 +1002,10 @@ set_prologue_iterations (basic_block bb_before_first_loop,
- TH: cost model profitability threshold of iterations for vectorization.
- CHECK_PROFITABILITY: specify whether cost model check has not occurred
during versioning and hence needs to occur during
- prologue generation or whether cost model check
+ prologue generation or whether cost model check
has not occurred during prologue generation and hence
needs to occur during epilogue generation.
-
+
Output:
The function returns a pointer to the new loop-copy, or NULL if it failed
@@ -1028,8 +1028,8 @@ set_prologue_iterations (basic_block bb_before_first_loop,
*/
static struct loop*
-slpeel_tree_peel_loop_to_edge (struct loop *loop,
- edge e, tree first_niters,
+slpeel_tree_peel_loop_to_edge (struct loop *loop,
+ edge e, tree first_niters,
tree niters, bool update_first_loop_count,
unsigned int th, bool check_profitability,
tree cond_expr, gimple_seq cond_expr_stmt_list)
@@ -1045,12 +1045,12 @@ slpeel_tree_peel_loop_to_edge (struct loop *loop,
edge exit_e = single_exit (loop);
LOC loop_loc;
tree cost_pre_condition = NULL_TREE;
-
+
if (!slpeel_can_duplicate_loop_p (loop, e))
return NULL;
-
+
/* We have to initialize cfg_hooks. Then, when calling
- cfg_hooks->split_edge, the function tree_split_edge
+ cfg_hooks->split_edge, the function tree_split_edge
is actually called and, when calling cfg_hooks->duplicate_block,
the function tree_duplicate_bb is called. */
gimple_register_cfg_hooks ();
@@ -1069,7 +1069,7 @@ slpeel_tree_peel_loop_to_edge (struct loop *loop,
orig_exit_bb:
*/
-
+
if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, e)))
{
loop_loc = find_loop_location (loop);
@@ -1082,7 +1082,7 @@ slpeel_tree_peel_loop_to_edge (struct loop *loop,
}
return NULL;
}
-
+
if (e == exit_e)
{
/* NEW_LOOP was placed after LOOP. */
@@ -1193,15 +1193,15 @@ slpeel_tree_peel_loop_to_edge (struct loop *loop,
if (!update_first_loop_count)
{
pre_condition =
- fold_build2 (LE_EXPR, boolean_type_node, first_niters,
+ fold_build2 (LE_EXPR, boolean_type_node, first_niters,
build_int_cst (TREE_TYPE (first_niters), 0));
if (check_profitability)
{
tree scalar_loop_iters
= unshare_expr (LOOP_VINFO_NITERS_UNCHANGED
(loop_vec_info_for_loop (loop)));
- cost_pre_condition =
- fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
+ cost_pre_condition =
+ fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
build_int_cst (TREE_TYPE (scalar_loop_iters), th));
pre_condition = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
@@ -1217,7 +1217,7 @@ slpeel_tree_peel_loop_to_edge (struct loop *loop,
}
}
- /* Prologue peeling. */
+ /* Prologue peeling. */
else
{
if (check_profitability)
@@ -1225,7 +1225,7 @@ slpeel_tree_peel_loop_to_edge (struct loop *loop,
loop, th);
pre_condition =
- fold_build2 (LE_EXPR, boolean_type_node, first_niters,
+ fold_build2 (LE_EXPR, boolean_type_node, first_niters,
build_int_cst (TREE_TYPE (first_niters), 0));
}
@@ -1266,7 +1266,7 @@ slpeel_tree_peel_loop_to_edge (struct loop *loop,
bb_between_loops = new_exit_bb;
bb_after_second_loop = split_edge (single_exit (second_loop));
- pre_condition =
+ pre_condition =
fold_build2 (EQ_EXPR, boolean_type_node, first_niters, niters);
skip_e = slpeel_add_loop_guard (bb_between_loops, pre_condition, NULL,
bb_after_second_loop, bb_before_first_loop);
@@ -1367,10 +1367,10 @@ vect_build_loop_niters (loop_vec_info loop_vinfo, gimple_seq seq)
and places them at the loop preheader edge or in COND_EXPR_STMT_LIST
if that is non-NULL. */
-static void
-vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
+static void
+vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
tree *ni_name_ptr,
- tree *ratio_mult_vf_name_ptr,
+ tree *ratio_mult_vf_name_ptr,
tree *ratio_name_ptr,
gimple_seq cond_expr_stmt_list)
{
@@ -1389,7 +1389,7 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
pe = loop_preheader_edge (loop);
- /* Generate temporary variable that contains
+ /* Generate temporary variable that contains
number of iterations loop executes. */
ni_name = vect_build_loop_niters (loop_vinfo, cond_expr_stmt_list);
@@ -1414,7 +1414,7 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
gcc_assert (!new_bb);
}
}
-
+
/* Create: ratio_mult_vf = ratio << log2 (vf). */
ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
@@ -1440,20 +1440,20 @@ vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
*ni_name_ptr = ni_name;
*ratio_mult_vf_name_ptr = ratio_mult_vf_name;
*ratio_name_ptr = ratio_name;
-
- return;
+
+ return;
}
/* Function vect_can_advance_ivs_p
- In case the number of iterations that LOOP iterates is unknown at compile
- time, an epilog loop will be generated, and the loop induction variables
- (IVs) will be "advanced" to the value they are supposed to take just before
+ In case the number of iterations that LOOP iterates is unknown at compile
+ time, an epilog loop will be generated, and the loop induction variables
+ (IVs) will be "advanced" to the value they are supposed to take just before
the epilog loop. Here we check that the access function of the loop IVs
and the expression that represents the loop bound are simple enough.
These restrictions will be relaxed in the future. */
-bool
+bool
vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -1516,19 +1516,19 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
}
evolution_part = evolution_part_in_loop_num (access_fn, loop->num);
-
+
if (evolution_part == NULL_TREE)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "No evolution.");
return false;
}
-
- /* FORNOW: We do not transform initial conditions of IVs
+
+ /* FORNOW: We do not transform initial conditions of IVs
which evolution functions are a polynomial of degree >= 2. */
if (tree_is_chrec (evolution_part))
- return false;
+ return false;
}
return true;
@@ -1570,14 +1570,14 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
vect_can_advance_ivs_p). This assumption will be relaxed in the future.
Assumption 4: Exactly one of the successors of LOOP exit-bb is on a path
- coming out of LOOP on which the ivs of LOOP are used (this is the path
+ coming out of LOOP on which the ivs of LOOP are used (this is the path
that leads to the epilog loop; other paths skip the epilog loop). This
path starts with the edge UPDATE_E, and its destination (denoted update_bb)
needs to have its phis updated.
*/
static void
-vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
+vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
edge update_e)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -1621,13 +1621,13 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
/* Skip reduction phis. */
if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (phi)) == vect_reduction_def)
- {
+ {
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "reduc phi. skip.");
continue;
- }
+ }
- access_fn = analyze_scalar_evolution (loop, PHI_RESULT (phi));
+ access_fn = analyze_scalar_evolution (loop, PHI_RESULT (phi));
gcc_assert (access_fn);
/* We can end up with an access_fn like
(short int) {(short unsigned int) i_49, +, 1}_1
@@ -1638,13 +1638,13 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
evolution_part =
unshare_expr (evolution_part_in_loop_num (access_fn, loop->num));
gcc_assert (evolution_part != NULL_TREE);
-
+
/* FORNOW: We do not support IVs whose evolution function is a polynomial
of degree >= 2 or exponential. */
gcc_assert (!tree_is_chrec (evolution_part));
step_expr = evolution_part;
- init_expr = unshare_expr (initial_condition_in_loop_num (access_fn,
+ init_expr = unshare_expr (initial_condition_in_loop_num (access_fn,
loop->num));
init_expr = fold_convert (type, init_expr);
@@ -1652,7 +1652,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
fold_convert (TREE_TYPE (step_expr), niters),
step_expr);
if (POINTER_TYPE_P (TREE_TYPE (init_expr)))
- ni = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (init_expr),
+ ni = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (init_expr),
init_expr,
fold_convert (sizetype, off));
else
@@ -1666,7 +1666,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
last_gsi = gsi_last_bb (exit_bb);
ni_name = force_gimple_operand_gsi (&last_gsi, ni, false, var,
true, GSI_SAME_STMT);
-
+
/* Fix phi expressions in the successor bb. */
SET_PHI_ARG_DEF (phi1, update_e->dest_idx, ni_name);
}
@@ -1703,17 +1703,17 @@ conservative_cost_threshold (loop_vec_info loop_vinfo,
/* Function vect_do_peeling_for_loop_bound
Peel the last iterations of the loop represented by LOOP_VINFO.
- The peeled iterations form a new epilog loop. Given that the loop now
+ The peeled iterations form a new epilog loop. Given that the loop now
iterates NITERS times, the new epilog loop iterates
NITERS % VECTORIZATION_FACTOR times.
-
- The original loop will later be made to iterate
+
+ The original loop will later be made to iterate
NITERS / VECTORIZATION_FACTOR times (this value is placed into RATIO).
COND_EXPR and COND_EXPR_STMT_LIST are combined with a new generated
test. */
-void
+void
vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
tree cond_expr, gimple_seq cond_expr_stmt_list)
{
@@ -1733,7 +1733,7 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
initialize_original_copy_tables ();
/* Generate the following variables on the preheader of original loop:
-
+
ni_name = number of iteration the original loop executes
ratio = ni_name / vf
ratio_mult_vf_name = ratio * vf */
@@ -1741,9 +1741,9 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
&ratio_mult_vf_name, ratio,
cond_expr_stmt_list);
- loop_num = loop->num;
+ loop_num = loop->num;
- /* If cost model check not done during versioning and
+ /* If cost model check not done during versioning and
peeling for alignment. */
if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
&& !LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)
@@ -1755,7 +1755,7 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
/* Get profitability threshold for vectorized loop. */
min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
- th = conservative_cost_threshold (loop_vinfo,
+ th = conservative_cost_threshold (loop_vinfo,
min_profitable_iters);
}
@@ -1781,9 +1781,9 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
else
update_e = EDGE_PRED (preheader, 1);
- /* Update IVs of original loop as if they were advanced
+ /* Update IVs of original loop as if they were advanced
by ratio_mult_vf_name steps. */
- vect_update_ivs_after_vectorizer (loop_vinfo, ratio_mult_vf_name, update_e);
+ vect_update_ivs_after_vectorizer (loop_vinfo, ratio_mult_vf_name, update_e);
/* After peeling we have to reset scalar evolution analyzer. */
scev_reset ();
@@ -1797,7 +1797,7 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
Set the number of iterations for the loop represented by LOOP_VINFO
to the minimum between LOOP_NITERS (the original iteration count of the loop)
and the misalignment of DR - the data reference recorded in
- LOOP_VINFO_UNALIGNED_DR (LOOP_VINFO). As a result, after the execution of
+ LOOP_VINFO_UNALIGNED_DR (LOOP_VINFO). As a result, after the execution of
this loop, the data reference DR will refer to an aligned location.
The following computation is generated:
@@ -1822,7 +1822,7 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
the number of elements that fit in the vector. Therefore, instead of VF we
use TYPE_VECTOR_SUBPARTS. */
-static tree
+static tree
vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
{
struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
@@ -1844,7 +1844,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
step = DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_DR (stmt_info)));
- pe = loop_preheader_edge (loop);
+ pe = loop_preheader_edge (loop);
if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
{
@@ -1860,7 +1860,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
else
{
gimple_seq new_stmts = NULL;
- tree start_addr = vect_create_addr_base_for_vector_ref (dr_stmt,
+ tree start_addr = vect_create_addr_base_for_vector_ref (dr_stmt,
&new_stmts, NULL_TREE, loop);
tree ptr_type = TREE_TYPE (start_addr);
tree size = TYPE_SIZE (ptr_type);
@@ -1875,11 +1875,11 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmts);
gcc_assert (!new_bb);
-
+
/* Create: byte_misalign = addr & (vectype_size - 1) */
- byte_misalign =
+ byte_misalign =
fold_build2 (BIT_AND_EXPR, type, fold_convert (type, start_addr), vectype_size_minus_1);
-
+
/* Create: elem_misalign = byte_misalign / element_size */
elem_misalign =
fold_build2 (RSHIFT_EXPR, type, byte_misalign, elem_size_log);
@@ -1915,7 +1915,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
gcc_assert (!new_bb);
}
- return iters_name;
+ return iters_name;
}
@@ -1923,14 +1923,14 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
NITERS iterations were peeled from LOOP. DR represents a data reference
in LOOP. This function updates the information recorded in DR to
- account for the fact that the first NITERS iterations had already been
+ account for the fact that the first NITERS iterations had already been
executed. Specifically, it updates the OFFSET field of DR. */
static void
vect_update_init_of_dr (struct data_reference *dr, tree niters)
{
tree offset = DR_OFFSET (dr);
-
+
niters = fold_build2 (MULT_EXPR, sizetype,
fold_convert (sizetype, niters),
fold_convert (sizetype, DR_STEP (dr)));
@@ -1942,9 +1942,9 @@ vect_update_init_of_dr (struct data_reference *dr, tree niters)
/* Function vect_update_inits_of_drs
- NITERS iterations were peeled from the loop represented by LOOP_VINFO.
- This function updates the information recorded for the data references in
- the loop to account for the fact that the first NITERS iterations had
+ NITERS iterations were peeled from the loop represented by LOOP_VINFO.
+ This function updates the information recorded for the data references in
+ the loop to account for the fact that the first NITERS iterations had
already been executed. Specifically, it updates the initial_condition of
the access_function of all the data_references in the loop. */
@@ -1988,7 +1988,7 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo)
ni_name = vect_build_loop_niters (loop_vinfo, NULL);
niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo, ni_name);
-
+
/* Get profitability threshold for vectorized loop. */
min_profitable_iters = LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo);
@@ -2221,7 +2221,7 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
/* Create expression
((store_ptr_0 + store_segment_length_0) < load_ptr_0)
|| (load_ptr_0 + load_segment_length_0) < store_ptr_0))
- &&
+ &&
...
&&
((store_ptr_n + store_segment_length_n) < load_ptr_n)
@@ -2276,7 +2276,7 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
}
- part_cond_expr =
+ part_cond_expr =
fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
fold_build2 (LT_EXPR, boolean_type_node,
fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (addr_base_a),
@@ -2288,7 +2288,7 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
addr_base_b,
segment_length_b),
addr_base_a));
-
+
if (*cond_expr)
*cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
*cond_expr, part_cond_expr);
@@ -2303,7 +2303,7 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
/* Function vect_loop_versioning.
-
+
If the loop has data references that may or may not be aligned or/and
has data reference relations whose independence was not proven then
two versions of the loop need to be generated, one which is vectorized
@@ -2311,11 +2311,11 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
loops is executed. The test checks for the alignment of all of the
data references that may or may not be aligned. An additional
sequence of runtime tests is generated for each pairs of DDRs whose
- independence was not proven. The vectorized version of loop is
- executed only if both alias and alignment tests are passed.
-
+ independence was not proven. The vectorized version of loop is
+ executed only if both alias and alignment tests are passed.
+
The test generated to check which version of loop is executed
- is modified to also check for profitability as indicated by the
+ is modified to also check for profitability as indicated by the
cost model initially.
The versioning precondition(s) are placed in *COND_EXPR and
@@ -2348,7 +2348,7 @@ vect_loop_versioning (loop_vec_info loop_vinfo, bool do_versioning,
min_profitable_iters);
*cond_expr =
- fold_build2 (GT_EXPR, boolean_type_node, scalar_loop_iters,
+ fold_build2 (GT_EXPR, boolean_type_node, scalar_loop_iters,
build_int_cst (TREE_TYPE (scalar_loop_iters), th));
*cond_expr = force_gimple_operand (*cond_expr, cond_expr_stmt_list,
@@ -2378,14 +2378,14 @@ vect_loop_versioning (loop_vec_info loop_vinfo, bool do_versioning,
prob, prob, REG_BR_PROB_BASE - prob, true);
free_original_copy_tables();
- /* Loop versioning violates an assumption we try to maintain during
+ /* Loop versioning violates an assumption we try to maintain during
vectorization - that the loop exit block has a single predecessor.
After versioning, the exit block of both loop versions is the same
basic block (i.e. it has two predecessors). Just in order to simplify
following transformations in the vectorizer, we fix this situation
here by adding a new (empty) block on the exit-edge of the loop,
with the proper loop-exit phis to maintain loop-closed-form. */
-
+
merge_bb = single_exit (loop)->dest;
gcc_assert (EDGE_COUNT (merge_bb->preds) == 2);
new_exit_bb = split_edge (single_exit (loop));
@@ -2398,10 +2398,10 @@ vect_loop_versioning (loop_vec_info loop_vinfo, bool do_versioning,
new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
new_exit_bb);
arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
- add_phi_arg (new_phi, arg, new_exit_e,
+ add_phi_arg (new_phi, arg, new_exit_e,
gimple_phi_arg_location_from_edge (orig_phi, e));
SET_PHI_ARG_DEF (orig_phi, e->dest_idx, PHI_RESULT (new_phi));
- }
+ }
/* End loop-exit-fixes after versioning. */
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 55b9fb2bf99..9c42376b19f 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -1,7 +1,7 @@
/* Loop Vectorization
Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
Foundation, Inc.
- Contributed by Dorit Naishlos <dorit@il.ibm.com> and
+ Contributed by Dorit Naishlos <dorit@il.ibm.com> and
Ira Rosen <irar@il.ibm.com>
This file is part of GCC.
@@ -43,7 +43,7 @@ along with GCC; see the file COPYING3. If not see
/* Loop Vectorization Pass.
- This pass tries to vectorize loops.
+ This pass tries to vectorize loops.
For example, the vectorizer transforms the following simple loop:
@@ -282,10 +282,10 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
if (STMT_VINFO_VECTYPE (stmt_info))
{
- /* The only case when a vectype had been already set is for stmts
+ /* The only case when a vectype had been already set is for stmts
that contain a dataref, or for "pattern-stmts" (stmts generated
by the vectorizer to represent/replace a certain idiom). */
- gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
+ gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
|| is_pattern_stmt_p (stmt_info));
vectype = STMT_VINFO_VECTYPE (stmt_info);
}
@@ -294,7 +294,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)
&& !is_pattern_stmt_p (stmt_info));
- scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
+ scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
&dummy);
if (vect_print_dump_info (REPORT_DETAILS))
{
@@ -307,7 +307,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
{
- fprintf (vect_dump,
+ fprintf (vect_dump,
"not vectorized: unsupported data-type ");
print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
}
@@ -415,7 +415,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
fprintf (vect_dump, "=== vect_analyze_scalar_cycles ===");
/* First - identify all inductions. Reduction detection assumes that all the
- inductions have been identified, therefore, this order must not be
+ inductions have been identified, therefore, this order must not be
changed. */
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -446,9 +446,9 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
}
if (!access_fn
- || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
+ || !vect_is_simple_iv_evolution (loop->num, access_fn, &dumy, &dumy))
{
- VEC_safe_push (gimple, heap, worklist, phi);
+ VEC_safe_push (gimple, heap, worklist, phi);
continue;
}
@@ -468,7 +468,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
bool nested_cycle;
if (vect_print_dump_info (REPORT_DETAILS))
- {
+ {
fprintf (vect_dump, "Analyze phi: ");
print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
}
@@ -477,7 +477,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
- reduc_stmt = vect_is_simple_reduction (loop_vinfo, phi, !nested_cycle,
+ reduc_stmt = vect_is_simple_reduction (loop_vinfo, phi, !nested_cycle,
&double_reduc);
if (reduc_stmt)
{
@@ -490,7 +490,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
vect_double_reduction_def;
}
- else
+ else
{
if (nested_cycle)
{
@@ -524,7 +524,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
/* Function vect_analyze_scalar_cycles.
Examine the cross iteration def-use cycles of scalar variables, by
- analyzing the loop-header PHIs of scalar variables; Classify each
+ analyzing the loop-header PHIs of scalar variables; Classify each
cycle as one of the following: invariant, induction, reduction, unknown.
We do that for the loop represented by LOOP_VINFO, and also to its
inner-loop, if exists.
@@ -555,7 +555,7 @@ vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
1. When vectorized, they are executed in the same order as in the original
scalar loop, so we can't change the order of computation when
vectorizing them.
- 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
+ 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
current checks are too strict. */
if (loop->inner)
@@ -855,13 +855,13 @@ vect_analyze_loop_form (struct loop *loop)
fprintf (vect_dump, "=== vect_analyze_loop_form ===");
/* Different restrictions apply when we are considering an inner-most loop,
- vs. an outer (nested) loop.
+ vs. an outer (nested) loop.
(FORNOW. May want to relax some of these restrictions in the future). */
if (!loop->inner)
{
- /* Inner-most loop. We currently require that the number of BBs is
- exactly 2 (the header and latch). Vectorizable inner-most loops
+ /* Inner-most loop. We currently require that the number of BBs is
+ exactly 2 (the header and latch). Vectorizable inner-most loops
look like this:
(pre-header)
@@ -892,7 +892,7 @@ vect_analyze_loop_form (struct loop *loop)
edge backedge, entryedge;
/* Nested loop. We currently require that the loop is doubly-nested,
- contains a single inner loop, and the number of BBs is exactly 5.
+ contains a single inner loop, and the number of BBs is exactly 5.
Vectorizable outer-loops look like this:
(pre-header)
@@ -902,7 +902,7 @@ vect_analyze_loop_form (struct loop *loop)
inner-loop |
| |
tail ------+
- |
+ |
(exit-bb)
The inner-loop has the properties expected of inner-most loops
@@ -934,7 +934,7 @@ vect_analyze_loop_form (struct loop *loop)
return NULL;
}
- if (loop->num_nodes != 5)
+ if (loop->num_nodes != 5)
{
if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
fprintf (vect_dump, "not vectorized: control flow in loop.");
@@ -943,14 +943,14 @@ vect_analyze_loop_form (struct loop *loop)
}
gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
- backedge = EDGE_PRED (innerloop->header, 1);
+ backedge = EDGE_PRED (innerloop->header, 1);
entryedge = EDGE_PRED (innerloop->header, 0);
if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
{
backedge = EDGE_PRED (innerloop->header, 0);
- entryedge = EDGE_PRED (innerloop->header, 1);
+ entryedge = EDGE_PRED (innerloop->header, 1);
}
-
+
if (entryedge->src != loop->header
|| !single_exit (innerloop)
|| single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
@@ -964,8 +964,8 @@ vect_analyze_loop_form (struct loop *loop)
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Considering outer-loop vectorization.");
}
-
- if (!single_exit (loop)
+
+ if (!single_exit (loop)
|| EDGE_COUNT (loop->header->preds) != 2)
{
if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
@@ -1023,11 +1023,11 @@ vect_analyze_loop_form (struct loop *loop)
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
}
-
- if (!number_of_iterations)
+
+ if (!number_of_iterations)
{
if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
- fprintf (vect_dump,
+ fprintf (vect_dump,
"not vectorized: number of iterations cannot be computed.");
if (inner_loop_vinfo)
destroy_loop_vec_info (inner_loop_vinfo, true);
@@ -1125,11 +1125,11 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
(i.e. a phi in the tail of the outer-loop).
FORNOW: we currently don't support the case that these phis
are not used in the outerloop (unless it is double reduction,
- i.e., this phi is vect_reduction_def), cause this case
+ i.e., this phi is vect_reduction_def), cause this case
requires to actually do something here. */
if ((!STMT_VINFO_RELEVANT_P (stmt_info)
|| STMT_VINFO_LIVE_P (stmt_info))
- && STMT_VINFO_DEF_TYPE (stmt_info)
+ && STMT_VINFO_DEF_TYPE (stmt_info)
!= vect_double_reduction_def)
{
if (vect_print_dump_info (REPORT_DETAILS))
@@ -1191,7 +1191,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
if (STMT_VINFO_RELEVANT_P (stmt_info) && !PURE_SLP_STMT (stmt_info))
/* STMT needs both SLP and loop-based vectorization. */
only_slp_in_loop = false;
- }
+ }
} /* bbs */
/* All operations in the loop are either irrelevant (deal with loop
@@ -1323,7 +1323,7 @@ vect_analyze_loop (struct loop *loop)
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "===== analyze_loop_nest =====");
- if (loop_outer (loop)
+ if (loop_outer (loop)
&& loop_vec_info_for_loop (loop_outer (loop))
&& LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
{
@@ -1396,7 +1396,7 @@ vect_analyze_loop (struct loop *loop)
return NULL;
}
- /* Analyze data dependences between the data-refs in the loop.
+ /* Analyze data dependences between the data-refs in the loop.
FORNOW: fail at the first data dependence that we encounter. */
ok = vect_analyze_data_ref_dependences (loop_vinfo, NULL);
@@ -1539,29 +1539,29 @@ report_vect_op (gimple stmt, const char *msg)
a1 = phi < a0, a2 >
a3 = ...
a2 = operation (a3, a1)
-
+
such that:
- 1. operation is commutative and associative and it is safe to
+ 1. operation is commutative and associative and it is safe to
change the order of the computation (if CHECK_REDUCTION is true)
2. no uses for a2 in the loop (a2 is used out of the loop)
3. no uses of a1 in the loop besides the reduction operation.
Condition 1 is tested here.
- Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
+ Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
- (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
- nested cycles, if CHECK_REDUCTION is false.
+ (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
+ nested cycles, if CHECK_REDUCTION is false.
(3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
reductions:
a1 = phi < a0, a2 >
inner loop (def of a3)
- a2 = phi < a3 >
+ a2 = phi < a3 >
*/
gimple
-vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
+vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
bool check_reduction, bool *double_reduc)
{
struct loop *loop = (gimple_bb (phi))->loop_father;
@@ -1582,7 +1582,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
/* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
otherwise, we assume outer loop vectorization. */
- gcc_assert ((check_reduction && loop == vect_loop)
+ gcc_assert ((check_reduction && loop == vect_loop)
|| (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
name = PHI_RESULT (phi);
@@ -1673,15 +1673,15 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
return NULL;
}
- def1 = SSA_NAME_DEF_STMT (op1);
- if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
+ def1 = SSA_NAME_DEF_STMT (op1);
+ if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
&& loop->inner
&& flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
&& is_gimple_assign (def1))
{
if (vect_print_dump_info (REPORT_DETAILS))
report_vect_op (def_stmt, "detected double reduction: ");
-
+
*double_reduc = true;
return def_stmt;
}
@@ -1691,7 +1691,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
code = gimple_assign_rhs_code (def_stmt);
- if (check_reduction
+ if (check_reduction
&& (!commutative_tree_code (code) || !associative_tree_code (code)))
{
if (vect_print_dump_info (REPORT_DETAILS))
@@ -1699,7 +1699,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
return NULL;
}
- if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
+ if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
{
if (code != COND_EXPR)
{
@@ -1714,8 +1714,8 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
{
op4 = TREE_OPERAND (op3, 1);
op3 = TREE_OPERAND (op3, 0);
- }
-
+ }
+
op1 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 1);
op2 = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 2);
@@ -1775,17 +1775,17 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
return NULL;
}
- /* Check that it's ok to change the order of the computation.
+ /* Check that it's ok to change the order of the computation.
Generally, when vectorizing a reduction we change the order of the
computation. This may change the behavior of the program in some
- cases, so we need to check that this is ok. One exception is when
+ cases, so we need to check that this is ok. One exception is when
vectorizing an outer-loop: the inner-loop is executed sequentially,
and therefore vectorizing reductions in the inner-loop during
outer-loop vectorization is safe. */
/* CHECKME: check for !flag_finite_math_only too? */
if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
- && check_reduction)
+ && check_reduction)
{
/* Changing the order of operations changes the semantics. */
if (vect_print_dump_info (REPORT_DETAILS))
@@ -1804,7 +1804,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
{
/* Changing the order of operations changes the semantics. */
if (vect_print_dump_info (REPORT_DETAILS))
- report_vect_op (def_stmt,
+ report_vect_op (def_stmt,
"reduction: unsafe fixed-point math optimization: ");
return NULL;
}
@@ -1819,7 +1819,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
if (TREE_CODE (op2) == SSA_NAME)
def2 = SSA_NAME_DEF_STMT (op2);
- if (code != COND_EXPR
+ if (code != COND_EXPR
&& (!def1 || !def2 || gimple_nop_p (def1) || gimple_nop_p (def2)))
{
if (vect_print_dump_info (REPORT_DETAILS))
@@ -1835,10 +1835,10 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
&& (code == COND_EXPR
|| (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
&& (is_gimple_assign (def1)
- || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
+ || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
== vect_induction_def
|| (gimple_code (def1) == GIMPLE_PHI
- && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
+ && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def1)))))))
{
@@ -1853,7 +1853,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
== vect_induction_def
|| (gimple_code (def2) == GIMPLE_PHI
- && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
+ && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def2)))))))
{
@@ -1921,7 +1921,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
if (!flag_vect_cost_model)
{
if (vect_print_dump_info (REPORT_COST))
- fprintf (vect_dump, "cost model disabled.");
+ fprintf (vect_dump, "cost model disabled.");
return 0;
}
@@ -2021,7 +2021,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST
+ TARG_COND_NOT_TAKEN_BRANCH_COST);
}
- else
+ else
{
if (byte_misalign)
{
@@ -2048,10 +2048,10 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST;
}
- else
+ else
{
int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
- peel_iters_prologue = niters < peel_iters_prologue ?
+ peel_iters_prologue = niters < peel_iters_prologue ?
niters : peel_iters_prologue;
peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
}
@@ -2142,9 +2142,9 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
vec_inside_cost += SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance);
}
- /* Calculate number of iterations required to make the vector version
+ /* Calculate number of iterations required to make the vector version
profitable, relative to the loop bodies only. The following condition
- must hold true:
+ must hold true:
SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
where
SIC = scalar iteration cost, VIC = vector iteration cost,
@@ -2199,7 +2199,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
min_profitable_iters);
}
- min_profitable_iters =
+ min_profitable_iters =
min_profitable_iters < vf ? vf : min_profitable_iters;
/* Because the condition we create is:
@@ -2210,21 +2210,21 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, " Profitability threshold = %d\n",
min_profitable_iters);
-
+
return min_profitable_iters;
}
-/* TODO: Close dependency between vect_model_*_cost and vectorizable_*
+/* TODO: Close dependency between vect_model_*_cost and vectorizable_*
functions. Design better to avoid maintenance issues. */
-
-/* Function vect_model_reduction_cost.
- Models cost for a reduction operation, including the vector ops
+/* Function vect_model_reduction_cost.
+
+ Models cost for a reduction operation, including the vector ops
generated within the strip-mine loop, the initial definition before
the loop, and the epilogue code that must be generated. */
-static bool
+static bool
vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
int ncopies)
{
@@ -2270,11 +2270,11 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
}
return false;
}
-
+
mode = TYPE_MODE (vectype);
orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
- if (!orig_stmt)
+ if (!orig_stmt)
orig_stmt = STMT_VINFO_STMT (stmt_info);
code = gimple_assign_rhs_code (orig_stmt);
@@ -2291,7 +2291,7 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
{
if (reduc_code != ERROR_MARK)
outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
- else
+ else
{
int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
tree bitsize =
@@ -2308,7 +2308,7 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
/* Final reduction via vector shifts and the reduction operator. Also
requires scalar extract. */
outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
- + TARG_VEC_TO_SCALAR_COST);
+ + TARG_VEC_TO_SCALAR_COST);
else
/* Use extracts and reduction op for final reduction. For N elements,
we have N extracts and N-1 reduction ops. */
@@ -2338,7 +2338,7 @@ vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
/* prologue cost for vec_init and vec_step. */
STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST;
-
+
if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
"outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info),
@@ -2355,7 +2355,7 @@ vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
Output:
Return a vector variable, initialized with the first VF values of
the induction variable. E.g., for an iv with IV_PHI='X' and
- evolution S, for a vector of 4 units, we want to return:
+ evolution S, for a vector of 4 units, we want to return:
[X, X + S, X + 2*S, X + 3*S]. */
static tree
@@ -2365,7 +2365,7 @@ get_initial_def_for_induction (gimple iv_phi)
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree scalar_type = TREE_TYPE (gimple_phi_result (iv_phi));
- tree vectype;
+ tree vectype;
int nunits;
edge pe = loop_preheader_edge (loop);
struct loop *iv_loop;
@@ -2438,7 +2438,7 @@ get_initial_def_for_induction (gimple iv_phi)
/* iv_loop is nested in the loop to be vectorized. init_expr had already
been created during vectorization of previous stmts; We obtain it from
the STMT_VINFO_VEC_STMT of the defining stmt. */
- tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
+ tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
loop_preheader_edge (iv_loop));
vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
}
@@ -2533,12 +2533,12 @@ get_initial_def_for_induction (gimple iv_phi)
vec_def = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, vec_def);
gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
- set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
+ set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
NULL));
/* Set the arguments of the phi node: */
add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
- add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
+ add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
UNKNOWN_LOCATION);
@@ -2547,7 +2547,7 @@ get_initial_def_for_induction (gimple iv_phi)
more than one vector stmt - i.e - we need to "unroll" the
vector stmt by a factor VF/nunits. For more details see documentation
in vectorizable_operation. */
-
+
if (ncopies > 1)
{
stmt_vec_info prev_stmt_vinfo;
@@ -2579,7 +2579,7 @@ get_initial_def_for_induction (gimple iv_phi)
set_vinfo_for_stmt (new_stmt,
new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
- prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
+ prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
}
}
@@ -2596,7 +2596,7 @@ get_initial_def_for_induction (gimple iv_phi)
break;
}
}
- if (exit_phi)
+ if (exit_phi)
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
/* FORNOW. Currently not supporting the case that an inner-loop induction
@@ -2667,11 +2667,11 @@ get_initial_def_for_induction (gimple iv_phi)
FORNOW, we are using the 'adjust in epilog' scheme, because this way the
initialization vector is simpler (same element in all entries), if
ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
-
+
A cost model should help decide between these two schemes. */
tree
-get_initial_def_for_reduction (gimple stmt, tree init_val,
+get_initial_def_for_reduction (gimple stmt, tree init_val,
tree *adjustment_def)
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
@@ -2685,7 +2685,7 @@ get_initial_def_for_reduction (gimple stmt, tree init_val,
tree init_def;
tree t = NULL_TREE;
int i;
- bool nested_in_vect_loop = false;
+ bool nested_in_vect_loop = false;
tree init_value;
REAL_VALUE_TYPE real_init_val = dconst0;
int int_init_val = 0;
@@ -2710,8 +2710,8 @@ get_initial_def_for_reduction (gimple stmt, tree init_val,
&& (def_stmt = SSA_NAME_DEF_STMT (init_val))
&& gimple_code (def_stmt) == GIMPLE_PHI
&& flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
- && vinfo_for_stmt (def_stmt)
- && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
+ && vinfo_for_stmt (def_stmt)
+ && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_double_reduction_def)
{
*adjustment_def = NULL;
@@ -2738,12 +2738,12 @@ get_initial_def_for_reduction (gimple stmt, tree init_val,
case BIT_XOR_EXPR:
case MULT_EXPR:
case BIT_AND_EXPR:
- /* ADJUSMENT_DEF is NULL when called from
+ /* ADJUSMENT_DEF is NULL when called from
vect_create_epilog_for_reduction to vectorize double reduction. */
if (adjustment_def)
{
if (nested_in_vect_loop)
- *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
+ *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
NULL);
else
*adjustment_def = init_val;
@@ -2760,7 +2760,7 @@ get_initial_def_for_reduction (gimple stmt, tree init_val,
else
def_for_init = build_int_cst (scalar_type, int_init_val);
- /* Create a vector of '0' or '1' except the first element. */
+ /* Create a vector of '0' or '1' except the first element. */
for (i = nunits - 2; i >= 0; --i)
t = tree_cons (NULL_TREE, def_for_init, t);
@@ -2810,11 +2810,11 @@ get_initial_def_for_reduction (gimple stmt, tree init_val,
/* Function vect_create_epilog_for_reduction
-
+
Create code at the loop-epilog to finalize the result of a reduction
- computation.
-
- VECT_DEF is a vector of partial results.
+ computation.
+
+ VECT_DEF is a vector of partial results.
REDUC_CODE is the tree-code for the epilog reduction.
NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
number of elements that we can fit in a vectype (nunits). In this case
@@ -2823,23 +2823,23 @@ get_initial_def_for_reduction (gimple stmt, tree init_val,
in vectorizable_operation.
STMT is the scalar reduction stmt that is being vectorized.
REDUCTION_PHI is the phi-node that carries the reduction computation.
- REDUC_INDEX is the index of the operand in the right hand side of the
+ REDUC_INDEX is the index of the operand in the right hand side of the
statement that is defined by REDUCTION_PHI.
DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
This function:
- 1. Creates the reduction def-use cycle: sets the arguments for
+ 1. Creates the reduction def-use cycle: sets the arguments for
REDUCTION_PHI:
The loop-entry argument is the vectorized initial-value of the reduction.
The loop-latch argument is VECT_DEF - the vector of partial sums.
2. "Reduces" the vector of partial results VECT_DEF into a single result,
- by applying the operation specified by REDUC_CODE if available, or by
+ by applying the operation specified by REDUC_CODE if available, or by
other means (whole-vector shifts or a scalar loop).
- The function also creates a new phi node at the loop exit to preserve
+ The function also creates a new phi node at the loop exit to preserve
loop-closed form, as illustrated below.
-
+
The flow at the entry to this function:
-
+
loop:
vec_def = phi <null, null> # REDUCTION_PHI
VECT_DEF = vector_stmt # vectorized form of STMT
@@ -2854,7 +2854,7 @@ get_initial_def_for_reduction (gimple stmt, tree init_val,
loop:
vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
VECT_DEF = vector_stmt # vectorized form of STMT
- s_loop = scalar_stmt # (scalar) STMT
+ s_loop = scalar_stmt # (scalar) STMT
loop_exit:
s_out0 = phi <s_loop> # (scalar) EXIT_PHI
v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
@@ -2870,7 +2870,7 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
int ncopies,
enum tree_code reduc_code,
gimple reduction_phi,
- int reduc_index,
+ int reduc_index,
bool double_reduc)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
@@ -2890,7 +2890,7 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
gimple epilog_stmt = NULL;
tree new_scalar_dest, new_dest;
gimple exit_phi;
- tree bitsize, bitpos, bytesize;
+ tree bitsize, bitpos, bytesize;
enum tree_code code = gimple_assign_rhs_code (stmt);
tree adjustment_def;
tree vec_initial_def, def;
@@ -2905,18 +2905,18 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
VEC(gimple,heap) *phis = NULL;
enum vect_def_type dt = vect_unknown_def_type;
int j, i;
-
+
if (nested_in_vect_loop_p (loop, stmt))
{
outer_loop = loop;
loop = loop->inner;
nested_in_vect_loop = true;
}
-
+
switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
{
case GIMPLE_SINGLE_RHS:
- gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
+ gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
== ternary_op);
reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
break;
@@ -2924,7 +2924,7 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
reduction_op = gimple_assign_rhs1 (stmt);
break;
case GIMPLE_BINARY_RHS:
- reduction_op = reduc_index ?
+ reduction_op = reduc_index ?
gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
break;
default:
@@ -2936,7 +2936,7 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
mode = TYPE_MODE (vectype);
/*** 1. Create the reduction def-use cycle ***/
-
+
/* For the case of reduction, vect_get_vec_def_for_operand returns
the scalar def before the loop, that defines the initial value
of the reduction variable. */
@@ -2948,7 +2948,7 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
for (j = 0; j < ncopies; j++)
{
/* 1.1 set the loop-entry arg of the reduction-phi: */
- add_phi_arg (phi, vec_initial_def, loop_preheader_edge (loop),
+ add_phi_arg (phi, vec_initial_def, loop_preheader_edge (loop),
UNKNOWN_LOCATION);
/* 1.2 set the loop-latch arg for the reduction-phi: */
@@ -2978,9 +2978,9 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
Step 1 can be accomplished using one the following three schemes:
(scheme 1) using reduc_code, if available.
(scheme 2) using whole-vector shifts, if available.
- (scheme 3) using a scalar loop. In this case steps 1+2 above are
+ (scheme 3) using a scalar loop. In this case steps 1+2 above are
combined.
-
+
The overall epilog code looks like this:
s_out0 = phi <s_loop> # original EXIT_PHI
@@ -3017,14 +3017,14 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
exit_gsi = gsi_after_labels (exit_bb);
- /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
+ /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
(i.e. when reduc_code is not available) and in the final adjustment
code (if needed). Also get the original scalar reduction variable as
- defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
- represents a reduction pattern), the tree-code and scalar-def are
- taken from the original stmt that the pattern-stmt (STMT) replaces.
+ defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
+ represents a reduction pattern), the tree-code and scalar-def are
+ taken from the original stmt that the pattern-stmt (STMT) replaces.
Otherwise (it is a regular reduction) - the tree-code and scalar-def
- are taken from STMT. */
+ are taken from STMT. */
orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
if (!orig_stmt)
@@ -3055,7 +3055,7 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
/* In case this is a reduction in an inner-loop while vectorizing an outer
loop - we don't need to extract a single scalar result at the end of the
inner-loop (unless it is double reduction, i.e., the use of reduction is
- outside the outer-loop). The final vector of partial results will be used
+ outside the outer-loop). The final vector of partial results will be used
in the vectorized outer-loop, or reduced to a scalar result at the end of
the outer-loop. */
if (nested_in_vect_loop && !double_reduc)
@@ -3140,7 +3140,7 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
bit_offset /= 2)
{
tree bitpos = size_int (bit_offset);
-
+
epilog_stmt = gimple_build_assign_with_ops (shift_code, vec_dest,
new_temp, bitpos);
new_name = make_ssa_name (vec_dest, epilog_stmt);
@@ -3160,10 +3160,10 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
{
tree rhs;
- /*** Case 3: Create:
+ /*** Case 3: Create:
s = extract_field <v_out2, 0>
- for (offset = element_size;
- offset < vector_size;
+ for (offset = element_size;
+ offset < vector_size;
offset += element_size;)
{
Create: s' = extract_field <v_out2, offset>
@@ -3181,15 +3181,15 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
-
+
for (bit_offset = element_bitsize;
bit_offset < vec_size_in_bits;
bit_offset += element_bitsize)
- {
+ {
tree bitpos = bitsize_int (bit_offset);
tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
bitpos);
-
+
epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_name);
@@ -3209,7 +3209,7 @@ vect_create_epilog_for_reduction (tree vect_def, gimple stmt,
/* 2.4 Extract the final scalar result. Create:
s_out3 = extract_field <v_out2, bitpos> */
-
+
if (extract_scalar_result)
{
tree rhs;
@@ -3269,7 +3269,7 @@ vect_finalize_reduction:
/* Replace uses of s_out0 with uses of s_out3:
Find the loop-closed-use at the loop exit of the original scalar result.
- (The reduction result is expected to have two immediate uses - one at the
+ (The reduction result is expected to have two immediate uses - one at the
latch block, and one at the loop exit). */
phis = VEC_alloc (gimple, heap, 10);
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
@@ -3294,35 +3294,35 @@ vect_finalize_reduction:
/* FORNOW. Currently not supporting the case that an inner-loop
reduction is not used in the outer-loop (but only outside the
outer-loop), unless it is double reduction. */
- gcc_assert ((STMT_VINFO_RELEVANT_P (stmt_vinfo)
+ gcc_assert ((STMT_VINFO_RELEVANT_P (stmt_vinfo)
&& !STMT_VINFO_LIVE_P (stmt_vinfo)) || double_reduc);
epilog_stmt = adjustment_def ? epilog_stmt : new_phi;
STMT_VINFO_VEC_STMT (stmt_vinfo) = epilog_stmt;
- set_vinfo_for_stmt (epilog_stmt,
- new_stmt_vec_info (epilog_stmt, loop_vinfo,
+ set_vinfo_for_stmt (epilog_stmt,
+ new_stmt_vec_info (epilog_stmt, loop_vinfo,
NULL));
if (adjustment_def)
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
- if (!double_reduc
+ if (!double_reduc
|| STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_double_reduction_def)
continue;
- /* Handle double reduction:
+ /* Handle double reduction:
stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
stmt2: s3 = phi <s1, s4> - (regular) reduction phi (inner loop)
stmt3: s4 = use (s3) - (regular) reduction stmt (inner loop)
stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
- At that point the regular reduction (stmt2 and stmt3) is already
+ At that point the regular reduction (stmt2 and stmt3) is already
vectorized, as well as the exit phi node, stmt4.
Here we vectorize the phi node of double reduction, stmt1, and
update all relevant statements. */
- /* Go through all the uses of s2 to find double reduction phi node,
+ /* Go through all the uses of s2 to find double reduction phi node,
i.e., stmt1 above. */
orig_name = PHI_RESULT (exit_phi);
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
@@ -3337,13 +3337,13 @@ vect_finalize_reduction:
if (gimple_code (use_stmt) != GIMPLE_PHI
|| gimple_phi_num_args (use_stmt) != 2
|| !use_stmt_vinfo
- || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
+ || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
!= vect_double_reduction_def
|| bb->loop_father != outer_loop)
continue;
- /* Create vector phi node for double reduction:
- vs1 = phi <vs0, vs2>
+ /* Create vector phi node for double reduction:
+ vs1 = phi <vs0, vs2>
vs1 was created previously in this function by a call to
vect_get_vec_def_for_operand and is stored in vec_initial_def;
vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
@@ -3351,22 +3351,22 @@ vect_finalize_reduction:
/* Create vector phi node. */
vect_phi = create_phi_node (vec_initial_def, bb);
- new_phi_vinfo = new_stmt_vec_info (vect_phi,
+ new_phi_vinfo = new_stmt_vec_info (vect_phi,
loop_vec_info_for_loop (outer_loop), NULL);
set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
- /* Create vs0 - initial def of the double reduction phi. */
- preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
- loop_preheader_edge (outer_loop));
+ /* Create vs0 - initial def of the double reduction phi. */
+ preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
+ loop_preheader_edge (outer_loop));
init_def = get_initial_def_for_reduction (stmt, preheader_arg,
NULL);
vect_phi_init = vect_init_vector (use_stmt, init_def, vectype,
NULL);
-
+
/* Update phi node arguments with vs0 and vs2. */
- add_phi_arg (vect_phi, vect_phi_init,
+ add_phi_arg (vect_phi, vect_phi_init,
loop_preheader_edge (outer_loop), UNKNOWN_LOCATION);
- add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
+ add_phi_arg (vect_phi, PHI_RESULT (epilog_stmt),
loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
if (vect_print_dump_info (REPORT_DETAILS))
{
@@ -3378,12 +3378,12 @@ vect_finalize_reduction:
/* Replace the use, i.e., set the correct vs1 in the regular
reduction phi node. FORNOW, NCOPIES is always 1, so the loop
- is redundant. */
+ is redundant. */
use = reduction_phi;
for (j = 0; j < ncopies; j++)
{
edge pr_edge = loop_preheader_edge (loop);
- SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
+ SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
}
}
@@ -3397,7 +3397,7 @@ vect_finalize_reduction:
}
VEC_free (gimple, heap, phis);
-}
+}
/* Function vectorizable_reduction.
@@ -3407,18 +3407,18 @@ vect_finalize_reduction:
stmt to replace it, put it in VEC_STMT, and insert it at GSI.
Return FALSE if not a vectorizable STMT, TRUE otherwise.
- This function also handles reduction idioms (patterns) that have been
+ This function also handles reduction idioms (patterns) that have been
recognized in advance during vect_pattern_recog. In this case, STMT may be
of this form:
X = pattern_expr (arg0, arg1, ..., X)
and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
sequence that had been detected and replaced by the pattern-stmt (STMT).
-
+
In some cases of reduction patterns, the type of the reduction variable X is
different than the type of the other arguments of STMT.
In such cases, the vectype that is used when transforming STMT into a vector
stmt is different than the vectype that is used to determine the
- vectorization factor, because it consists of a different number of elements
+ vectorization factor, because it consists of a different number of elements
than the actual number of elements that are being operated upon in parallel.
For example, consider an accumulation of shorts into an int accumulator.
@@ -3515,7 +3515,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
&& STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
return false;
- /* 2. Has this been recognized as a reduction pattern?
+ /* 2. Has this been recognized as a reduction pattern?
Check if STMT represents a pattern that has been recognized
in earlier analysis stages. For stmts that represent a pattern,
@@ -3530,7 +3530,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
}
-
+
/* 3. Check the operands of the operation. The first operands are defined
inside the loop body. The last operand is the reduction variable,
which is defined by the loop-header-phi. */
@@ -3571,12 +3571,12 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
scalar_dest = gimple_assign_lhs (stmt);
scalar_type = TREE_TYPE (scalar_dest);
- if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
+ if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
&& !SCALAR_FLOAT_TYPE_P (scalar_type))
return false;
/* All uses but the last are expected to be defined in the loop.
- The last use is the reduction variable. In case of nested cycle this
+ The last use is the reduction variable. In case of nested cycle this
assumption is not true: we use reduc_index to record the index of the
reduction variable. */
for (i = 0; i < op_type-1; i++)
@@ -3603,27 +3603,27 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
}
}
- is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, NULL, &def_stmt,
+ is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, NULL, &def_stmt,
&def, &dt);
gcc_assert (is_simple_use);
gcc_assert (dt == vect_reduction_def
|| dt == vect_nested_cycle
- || ((dt == vect_internal_def || dt == vect_external_def
+ || ((dt == vect_internal_def || dt == vect_external_def
|| dt == vect_constant_def || dt == vect_induction_def)
- && nested_cycle && found_nested_cycle_def));
+ && nested_cycle && found_nested_cycle_def));
if (!found_nested_cycle_def)
reduc_def_stmt = def_stmt;
gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
- if (orig_stmt)
- gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
- reduc_def_stmt,
- !nested_cycle,
+ if (orig_stmt)
+ gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
+ reduc_def_stmt,
+ !nested_cycle,
&dummy));
else
- gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
+ gcc_assert (stmt == vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
!nested_cycle, &dummy));
-
+
if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
return false;
@@ -3692,24 +3692,24 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
STMT: int_acc = widen_sum <short_a, int_acc>
This means that:
- 1. The tree-code that is used to create the vector operation in the
- epilog code (that reduces the partial results) is not the
- tree-code of STMT, but is rather the tree-code of the original
- stmt from the pattern that STMT is replacing. I.e, in the example
- above we want to use 'widen_sum' in the loop, but 'plus' in the
+ 1. The tree-code that is used to create the vector operation in the
+ epilog code (that reduces the partial results) is not the
+ tree-code of STMT, but is rather the tree-code of the original
+ stmt from the pattern that STMT is replacing. I.e, in the example
+ above we want to use 'widen_sum' in the loop, but 'plus' in the
epilog.
2. The type (mode) we use to check available target support
- for the vector operation to be created in the *epilog*, is
- determined by the type of the reduction variable (in the example
+ for the vector operation to be created in the *epilog*, is
+ determined by the type of the reduction variable (in the example
above we'd check this: plus_optab[vect_int_mode]).
However the type (mode) we use to check available target support
for the vector operation to be created *inside the loop*, is
determined by the type of the other arguments to STMT (in the
example we'd check this: widen_sum_optab[vect_short_mode]).
-
- This is contrary to "regular" reductions, in which the types of all
- the arguments are the same as the type of the reduction variable.
- For "regular" reductions we can therefore use the same vector type
+
+ This is contrary to "regular" reductions, in which the types of all
+ the arguments are the same as the type of the reduction variable.
+ For "regular" reductions we can therefore use the same vector type
(and also the same tree-code) when generating the epilog code and
when generating the code inside the loop. */
@@ -3757,7 +3757,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
epilog_reduc_code = ERROR_MARK;
if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
{
- reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype,
+ reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype,
optab_default);
if (!reduc_optab)
{
@@ -3768,12 +3768,12 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
}
if (reduc_optab
- && optab_handler (reduc_optab, vec_mode)->insn_code
+ && optab_handler (reduc_optab, vec_mode)->insn_code
== CODE_FOR_nothing)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "reduc op not supported by target.");
-
+
epilog_reduc_code = ERROR_MARK;
}
}
@@ -3795,7 +3795,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
return false;
}
-
+
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
@@ -3859,7 +3859,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
{
/* Create the reduction-phi that defines the reduction-operand. */
new_phi = create_phi_node (vec_dest, loop->header);
- set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo,
+ set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo,
NULL));
/* Get the vector def for the reduction variable from the phi
node. */
@@ -3877,19 +3877,19 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
/* Handle uses. */
if (j == 0)
{
- loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
+ loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
stmt, NULL);
if (op_type == ternary_op)
{
if (reduc_index == 0)
- loop_vec_def1 = vect_get_vec_def_for_operand (ops[2], stmt,
+ loop_vec_def1 = vect_get_vec_def_for_operand (ops[2], stmt,
NULL);
else
- loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt,
+ loop_vec_def1 = vect_get_vec_def_for_operand (ops[1], stmt,
NULL);
}
- /* Get the vector def for the reduction variable from the phi
+ /* Get the vector def for the reduction variable from the phi
node. */
first_phi = new_phi;
}
@@ -3919,15 +3919,15 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
else
{
if (reduc_index == 0)
- expr = build3 (code, vectype, reduc_def, loop_vec_def0,
+ expr = build3 (code, vectype, reduc_def, loop_vec_def0,
loop_vec_def1);
- else
+ else
{
if (reduc_index == 1)
- expr = build3 (code, vectype, loop_vec_def0, reduc_def,
+ expr = build3 (code, vectype, loop_vec_def0, reduc_def,
loop_vec_def1);
else
- expr = build3 (code, vectype, loop_vec_def0, loop_vec_def1,
+ expr = build3 (code, vectype, loop_vec_def0, loop_vec_def1,
reduc_def);
}
}
@@ -3936,7 +3936,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
-
+
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
else
@@ -4045,7 +4045,7 @@ vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
/* Function vectorizable_live_operation.
- STMT computes a value that is used outside the loop. Check if
+ STMT computes a value that is used outside the loop. Check if
it can be supported. */
bool
@@ -4061,7 +4061,7 @@ vectorizable_live_operation (gimple stmt,
tree op;
tree def;
gimple def_stmt;
- enum vect_def_type dt;
+ enum vect_def_type dt;
enum tree_code code;
enum gimple_rhs_class rhs_class;
@@ -4216,8 +4216,8 @@ vect_transform_loop (loop_vec_info loop_vinfo)
split_edge (loop_preheader_edge (loop));
/* FORNOW: the vectorizer supports only loops which body consist
- of one basic block (header + empty latch). When the vectorizer will
- support more involved loop forms, the order by which the BBs are
+ of one basic block (header + empty latch). When the vectorizer will
+ support more involved loop forms, the order by which the BBs are
traversed need to be reconsidered. */
for (i = 0; i < nbbs; i++)
@@ -4268,7 +4268,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
{
fprintf (vect_dump, "------>vectorizing statement: ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
- }
+ }
stmt_info = vinfo_for_stmt (stmt);
@@ -4321,7 +4321,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
continue;
}
}
-
+
/* -------- vectorize statement ------------ */
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "transform statement.");
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 3a77e858a57..b2c1de2c049 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -39,7 +39,7 @@ along with GCC; see the file COPYING3. If not see
#include "toplev.h"
/* Function prototypes */
-static void vect_pattern_recog_1
+static void vect_pattern_recog_1
(gimple (* ) (gimple, tree *, tree *), gimple_stmt_iterator);
static bool widened_name_p (tree, gimple, tree *, gimple *);
@@ -60,7 +60,7 @@ static vect_recog_func_ptr vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
Check whether NAME, an ssa-name used in USE_STMT,
is a result of a type-promotion, such that:
DEF_STMT: NAME = NOP (name0)
- where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
+ where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
*/
static bool
@@ -102,7 +102,7 @@ widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt)
|| (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2)))
return false;
- if (!vect_is_simple_use (oprnd0, loop_vinfo, NULL, &dummy_gimple, &dummy,
+ if (!vect_is_simple_use (oprnd0, loop_vinfo, NULL, &dummy_gimple, &dummy,
&dt))
return false;
@@ -139,10 +139,10 @@ vect_recog_temp_ssa_var (tree type, gimple stmt)
[S6 prod = (TYPE2) prod; #optional]
S7 sum_1 = prod + sum_0;
- where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
- same size of 'TYPE1' or bigger. This is a special case of a reduction
+ where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
+ same size of 'TYPE1' or bigger. This is a special case of a reduction
computation.
-
+
Input:
* LAST_STMT: A stmt from which the pattern search begins. In the example,
@@ -186,13 +186,13 @@ vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
type = gimple_expr_type (last_stmt);
- /* Look for the following pattern
+ /* Look for the following pattern
DX = (TYPE1) X;
DY = (TYPE1) Y;
- DPROD = DX * DY;
+ DPROD = DX * DY;
DDPROD = (TYPE2) DPROD;
sum_1 = DDPROD + sum_0;
- In which
+ In which
- DX is double the size of X
- DY is double the size of Y
- DX, DY, DPROD all have the same type
@@ -254,10 +254,10 @@ vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
prod_type = half_type;
stmt = SSA_NAME_DEF_STMT (oprnd0);
- /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
+ /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
inside the loop (in case we are analyzing an outer-loop). */
if (!is_gimple_assign (stmt))
- return NULL;
+ return NULL;
stmt_vinfo = vinfo_for_stmt (stmt);
gcc_assert (stmt_vinfo);
if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
@@ -303,12 +303,12 @@ vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
half_type = TREE_TYPE (oprnd00);
*type_in = half_type;
*type_out = type;
-
+
/* Pattern detected. Create a stmt to be used to replace the pattern: */
var = vect_recog_temp_ssa_var (type, NULL);
rhs = build3 (DOT_PROD_EXPR, type, oprnd00, oprnd01, oprnd1),
pattern_stmt = gimple_build_assign (var, rhs);
-
+
if (vect_print_dump_info (REPORT_DETAILS))
{
fprintf (vect_dump, "vect_recog_dot_prod_pattern: detected: ");
@@ -321,7 +321,7 @@ vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
return pattern_stmt;
}
-
+
/* Function vect_recog_widen_mult_pattern
Try to find the following pattern:
@@ -354,8 +354,8 @@ vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
*/
static gimple
-vect_recog_widen_mult_pattern (gimple last_stmt,
- tree *type_in,
+vect_recog_widen_mult_pattern (gimple last_stmt,
+ tree *type_in,
tree *type_out)
{
gimple def_stmt0, def_stmt1;
@@ -516,7 +516,7 @@ vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
!= NULL_TREE)
{
var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
- gimple_call_set_lhs (stmt, var);
+ gimple_call_set_lhs (stmt, var);
return stmt;
}
}
@@ -530,7 +530,7 @@ vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
Try to find the following pattern:
- type x_t;
+ type x_t;
TYPE x_T, sum = init;
loop:
sum_0 = phi <init, sum_1>
@@ -538,7 +538,7 @@ vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
S2 x_T = (TYPE) x_t;
S3 sum_1 = x_T + sum_0;
- where type 'TYPE' is at least double the size of type 'type', i.e - we're
+ where type 'TYPE' is at least double the size of type 'type', i.e - we're
summing elements of type 'type' into an accumulator of type 'TYPE'. This is
a special case of a reduction computation.
@@ -546,9 +546,9 @@ vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
* LAST_STMT: A stmt from which the pattern search begins. In the example,
when this function is called with S3, the pattern {S2,S3} will be detected.
-
+
Output:
-
+
* TYPE_IN: The type of the input arguments to the pattern.
* TYPE_OUT: The type of the output of this pattern.
@@ -557,12 +557,12 @@ vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
stmts that constitute the pattern. In this case it will be:
WIDEN_SUM <x_t, sum_0>
- Note: The widening-sum idiom is a widening reduction pattern that is
+ Note: The widening-sum idiom is a widening reduction pattern that is
vectorized without preserving all the intermediate results. It
- produces only N/2 (widened) results (by summing up pairs of
- intermediate results) rather than all N results. Therefore, we
- cannot allow this pattern when we want to get all the results and in
- the correct order (as is the case when this computation is in an
+ produces only N/2 (widened) results (by summing up pairs of
+ intermediate results) rather than all N results. Therefore, we
+ cannot allow this pattern when we want to get all the results and in
+ the correct order (as is the case when this computation is in an
inner-loop nested in an outer-loop that us being vectorized). */
static gimple
@@ -637,7 +637,7 @@ vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out)
}
-/* Function vect_pattern_recog_1
+/* Function vect_pattern_recog_1
Input:
PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
@@ -645,18 +645,18 @@ vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out)
STMT: A stmt from which the pattern search should start.
If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
- expression that computes the same functionality and can be used to
- replace the sequence of stmts that are involved in the pattern.
+ expression that computes the same functionality and can be used to
+ replace the sequence of stmts that are involved in the pattern.
Output:
- This function checks if the expression returned by PATTERN_RECOG_FUNC is
- supported in vector form by the target. We use 'TYPE_IN' to obtain the
- relevant vector type. If 'TYPE_IN' is already a vector type, then this
+ This function checks if the expression returned by PATTERN_RECOG_FUNC is
+ supported in vector form by the target. We use 'TYPE_IN' to obtain the
+ relevant vector type. If 'TYPE_IN' is already a vector type, then this
indicates that target support had already been checked by PATTERN_RECOG_FUNC.
If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
to the available target pattern.
- This function also does some bookkeeping, as explained in the documentation
+ This function also does some bookkeeping, as explained in the documentation
for vect_recog_pattern. */
static void
@@ -674,12 +674,12 @@ vect_pattern_recog_1 (
pattern_stmt = (* vect_recog_func) (stmt, &type_in, &type_out);
if (!pattern_stmt)
- return;
-
- if (VECTOR_MODE_P (TYPE_MODE (type_in)))
- {
- /* No need to check target support (already checked by the pattern
- recognition function). */
+ return;
+
+ if (VECTOR_MODE_P (TYPE_MODE (type_in)))
+ {
+ /* No need to check target support (already checked by the pattern
+ recognition function). */
pattern_vectype = type_in;
}
else
@@ -716,16 +716,16 @@ vect_pattern_recog_1 (
/* Found a vectorizable pattern. */
if (vect_print_dump_info (REPORT_DETAILS))
{
- fprintf (vect_dump, "pattern recognized: ");
+ fprintf (vect_dump, "pattern recognized: ");
print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
}
-
+
/* Mark the stmts that are involved in the pattern. */
gsi_insert_before (&si, pattern_stmt, GSI_SAME_STMT);
set_vinfo_for_stmt (pattern_stmt,
new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL));
pattern_stmt_info = vinfo_for_stmt (pattern_stmt);
-
+
STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt;
STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info);
STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype;
@@ -764,7 +764,7 @@ vect_pattern_recog_1 (
- fill in the STMT_VINFO fields as follows:
in_pattern_p related_stmt vec_stmt
- S1: a_i = .... - - -
+ S1: a_i = .... - - -
S2: a_2 = ..use(a_i).. - - -
S3: a_1 = ..use(a_2).. - - -
> S6: a_new = .... - S4 -
@@ -780,7 +780,7 @@ vect_pattern_recog_1 (
If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
(because they are marked as irrelevant). It will vectorize S6, and record
- a pointer to the new vector stmt VS6 both from S6 (as usual), and also
+ a pointer to the new vector stmt VS6 both from S6 (as usual), and also
from S4. We do that so that when we get to vectorizing stmts that use the
def of S4 (like S5 that uses a_0), we'll know where to take the relevant
vector-def from. S4 will be skipped, and S5 will be vectorized as usual:
@@ -798,7 +798,7 @@ vect_pattern_recog_1 (
DCE could then get rid of {S1,S2,S3,S4,S5,S6} (if their defs are not used
elsewhere), and we'll end up with:
- VS6: va_new = ....
+ VS6: va_new = ....
VS5: ... = ..vuse(va_new)..
If vectorization does not succeed, DCE will clean S6 away (its def is
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index 1017847a5f1..fe88e1d6d48 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -1,7 +1,7 @@
/* SLP - Basic Block Vectorization
Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
Foundation, Inc.
- Contributed by Dorit Naishlos <dorit@il.ibm.com>
+ Contributed by Dorit Naishlos <dorit@il.ibm.com>
and Ira Rosen <irar@il.ibm.com>
This file is part of GCC.
@@ -71,12 +71,12 @@ vect_free_slp_tree (slp_tree node)
if (SLP_TREE_LEFT (node))
vect_free_slp_tree (SLP_TREE_LEFT (node));
-
+
if (SLP_TREE_RIGHT (node))
vect_free_slp_tree (SLP_TREE_RIGHT (node));
-
+
VEC_free (gimple, heap, SLP_TREE_SCALAR_STMTS (node));
-
+
if (SLP_TREE_VEC_STMTS (node))
VEC_free (gimple, heap, SLP_TREE_VEC_STMTS (node));
@@ -101,12 +101,12 @@ vect_free_slp_instance (slp_instance instance)
static bool
vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
- slp_tree slp_node, gimple stmt,
+ slp_tree slp_node, gimple stmt,
VEC (gimple, heap) **def_stmts0,
VEC (gimple, heap) **def_stmts1,
enum vect_def_type *first_stmt_dt0,
enum vect_def_type *first_stmt_dt1,
- tree *first_stmt_def0_type,
+ tree *first_stmt_def0_type,
tree *first_stmt_def1_type,
tree *first_stmt_const_oprnd,
int ncopies_for_cost,
@@ -117,11 +117,11 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
tree def;
gimple def_stmt;
enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
- stmt_vec_info stmt_info =
+ stmt_vec_info stmt_info =
vinfo_for_stmt (VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0));
enum gimple_rhs_class rhs_class;
struct loop *loop = NULL;
-
+
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -132,11 +132,11 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
{
oprnd = gimple_op (stmt, i + 1);
- if (!vect_is_simple_use (oprnd, loop_vinfo, bb_vinfo, &def_stmt, &def,
+ if (!vect_is_simple_use (oprnd, loop_vinfo, bb_vinfo, &def_stmt, &def,
&dt[i])
|| (!def_stmt && dt[i] != vect_constant_def))
{
- if (vect_print_dump_info (REPORT_SLP))
+ if (vect_print_dump_info (REPORT_SLP))
{
fprintf (vect_dump, "Build SLP failed: can't find def for ");
print_generic_expr (vect_dump, oprnd, TDF_SLIM);
@@ -216,7 +216,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
/* Store. */
vect_model_store_cost (stmt_info, ncopies_for_cost, dt[0], slp_node);
}
-
+
else
{
if (!*first_stmt_dt1 && i == 1)
@@ -227,13 +227,13 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
*first_stmt_def1_type = TREE_TYPE (def);
else
{
- /* We assume that the stmt contains only one constant
+ /* We assume that the stmt contains only one constant
operand. We fail otherwise, to be on the safe side. */
if (*first_stmt_const_oprnd)
{
- if (vect_print_dump_info (REPORT_SLP))
+ if (vect_print_dump_info (REPORT_SLP))
fprintf (vect_dump, "Build SLP failed: two constant "
- "oprnds in stmt");
+ "oprnds in stmt");
return false;
}
*first_stmt_const_oprnd = oprnd;
@@ -241,23 +241,23 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
}
else
{
- /* Not first stmt of the group, check that the def-stmt/s match
+ /* Not first stmt of the group, check that the def-stmt/s match
the def-stmt/s of the first stmt. */
- if ((i == 0
+ if ((i == 0
&& (*first_stmt_dt0 != dt[i]
|| (*first_stmt_def0_type && def
&& *first_stmt_def0_type != TREE_TYPE (def))))
- || (i == 1
+ || (i == 1
&& (*first_stmt_dt1 != dt[i]
|| (*first_stmt_def1_type && def
- && *first_stmt_def1_type != TREE_TYPE (def))))
- || (!def
- && TREE_TYPE (*first_stmt_const_oprnd)
+ && *first_stmt_def1_type != TREE_TYPE (def))))
+ || (!def
+ && TREE_TYPE (*first_stmt_const_oprnd)
!= TREE_TYPE (oprnd)))
- {
- if (vect_print_dump_info (REPORT_SLP))
+ {
+ if (vect_print_dump_info (REPORT_SLP))
fprintf (vect_dump, "Build SLP failed: different types ");
-
+
return false;
}
}
@@ -269,7 +269,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
case vect_constant_def:
case vect_external_def:
break;
-
+
case vect_internal_def:
if (i == 0)
VEC_safe_push (gimple, heap, *def_stmts0, def_stmt);
@@ -279,7 +279,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
default:
/* FORNOW: Not supported. */
- if (vect_print_dump_info (REPORT_SLP))
+ if (vect_print_dump_info (REPORT_SLP))
{
fprintf (vect_dump, "Build SLP failed: illegal type of def ");
print_generic_expr (vect_dump, def, TDF_SLIM);
@@ -294,12 +294,12 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
/* Recursively build an SLP tree starting from NODE.
- Fail (and return FALSE) if def-stmts are not isomorphic, require data
- permutation or are of unsupported types of operation. Otherwise, return
+ Fail (and return FALSE) if def-stmts are not isomorphic, require data
+ permutation or are of unsupported types of operation. Otherwise, return
TRUE. */
static bool
-vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
+vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
slp_tree *node, unsigned int group_size,
int *inside_cost, int *outside_cost,
int ncopies_for_cost, unsigned int *max_nunits,
@@ -335,7 +335,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
/* For every stmt in NODE find its def stmt/s. */
for (i = 0; VEC_iterate (gimple, stmts, i, stmt); i++)
{
- if (vect_print_dump_info (REPORT_SLP))
+ if (vect_print_dump_info (REPORT_SLP))
{
fprintf (vect_dump, "Build SLP for ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
@@ -344,17 +344,17 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
lhs = gimple_get_lhs (stmt);
if (lhs == NULL_TREE)
{
- if (vect_print_dump_info (REPORT_SLP))
+ if (vect_print_dump_info (REPORT_SLP))
{
fprintf (vect_dump,
"Build SLP failed: not GIMPLE_ASSIGN nor GIMPLE_CALL");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
-
+
return false;
}
- scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
+ scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
@@ -365,7 +365,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
}
return false;
}
-
+
ncopies = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
if (ncopies != 1)
{
@@ -376,11 +376,11 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (bb_vinfo)
return false;
}
-
+
/* In case of multiple types we need to detect the smallest type. */
if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
*max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
-
+
if (is_gimple_call (stmt))
rhs_code = CALL_EXPR;
else
@@ -391,7 +391,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
{
first_stmt_code = rhs_code;
- /* Shift arguments should be equal in all the packed stmts for a
+ /* Shift arguments should be equal in all the packed stmts for a
vector shift with scalar shift operand. */
if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
|| rhs_code == LROTATE_EXPR
@@ -442,26 +442,26 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
&& (first_stmt_code != REALPART_EXPR
|| rhs_code != IMAGPART_EXPR))
{
- if (vect_print_dump_info (REPORT_SLP))
+ if (vect_print_dump_info (REPORT_SLP))
{
- fprintf (vect_dump,
+ fprintf (vect_dump,
"Build SLP failed: different operation in stmt ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
-
+
return false;
}
-
- if (need_same_oprnds
+
+ if (need_same_oprnds
&& !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
{
- if (vect_print_dump_info (REPORT_SLP))
+ if (vect_print_dump_info (REPORT_SLP))
{
- fprintf (vect_dump,
+ fprintf (vect_dump,
"Build SLP failed: different shift arguments in ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
-
+
return false;
}
}
@@ -472,11 +472,11 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (REFERENCE_CLASS_P (lhs))
{
/* Store. */
- if (!vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo, *node,
- stmt, &def_stmts0, &def_stmts1,
- &first_stmt_dt0,
- &first_stmt_dt1,
- &first_stmt_def0_type,
+ if (!vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo, *node,
+ stmt, &def_stmts0, &def_stmts1,
+ &first_stmt_dt0,
+ &first_stmt_dt1,
+ &first_stmt_def0_type,
&first_stmt_def1_type,
&first_stmt_const_oprnd,
ncopies_for_cost,
@@ -498,7 +498,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"loads have gaps ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
-
+
return false;
}
@@ -517,9 +517,9 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
return false;
}
-
+
first_load = DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt));
-
+
if (first_load == stmt)
{
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
@@ -532,15 +532,15 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"unaligned load ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
-
+
return false;
}
-
+
/* Analyze costs (for the first stmt in the group). */
vect_model_load_cost (vinfo_for_stmt (stmt),
ncopies_for_cost, *node);
}
-
+
/* Store the place of this load in the interleaving chain. In
case that permutation is needed we later decide if a specific
permutation is supported. */
@@ -548,9 +548,9 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
first_load);
if (load_place != i)
permutation = true;
-
+
VEC_safe_push (int, heap, *load_permutation, load_place);
-
+
/* We stop the tree when we reach a group of loads. */
stop_recursion = true;
continue;
@@ -561,7 +561,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
{
/* Not strided load. */
- if (vect_print_dump_info (REPORT_SLP))
+ if (vect_print_dump_info (REPORT_SLP))
{
fprintf (vect_dump, "Build SLP failed: not strided load ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
@@ -575,7 +575,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (TREE_CODE_CLASS (rhs_code) != tcc_binary
&& TREE_CODE_CLASS (rhs_code) != tcc_unary)
{
- if (vect_print_dump_info (REPORT_SLP))
+ if (vect_print_dump_info (REPORT_SLP))
{
fprintf (vect_dump, "Build SLP failed: operation");
fprintf (vect_dump, " unsupported ");
@@ -585,11 +585,11 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
return false;
}
- /* Find the def-stmts. */
+ /* Find the def-stmts. */
if (!vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo, *node, stmt,
&def_stmts0, &def_stmts1,
- &first_stmt_dt0, &first_stmt_dt1,
- &first_stmt_def0_type,
+ &first_stmt_dt0, &first_stmt_dt1,
+ &first_stmt_def0_type,
&first_stmt_def1_type,
&first_stmt_const_oprnd,
ncopies_for_cost,
@@ -599,7 +599,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
}
/* Add the costs of the node to the overall instance costs. */
- *inside_cost += SLP_TREE_INSIDE_OF_LOOP_COST (*node);
+ *inside_cost += SLP_TREE_INSIDE_OF_LOOP_COST (*node);
*outside_cost += SLP_TREE_OUTSIDE_OF_LOOP_COST (*node);
/* Strided loads were reached - stop the recursion. */
@@ -607,14 +607,14 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
{
if (permutation)
{
- VEC_safe_push (slp_tree, heap, *loads, *node);
- *inside_cost += TARG_VEC_PERMUTE_COST * group_size;
+ VEC_safe_push (slp_tree, heap, *loads, *node);
+ *inside_cost += TARG_VEC_PERMUTE_COST * group_size;
}
return true;
}
- /* Create SLP_TREE nodes for the definition node/s. */
+ /* Create SLP_TREE nodes for the definition node/s. */
if (first_stmt_dt0 == vect_internal_def)
{
slp_tree left_node = XNEW (struct _slp_tree);
@@ -624,12 +624,12 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
SLP_TREE_RIGHT (left_node) = NULL;
SLP_TREE_OUTSIDE_OF_LOOP_COST (left_node) = 0;
SLP_TREE_INSIDE_OF_LOOP_COST (left_node) = 0;
- if (!vect_build_slp_tree (loop_vinfo, bb_vinfo, &left_node, group_size,
- inside_cost, outside_cost, ncopies_for_cost,
+ if (!vect_build_slp_tree (loop_vinfo, bb_vinfo, &left_node, group_size,
+ inside_cost, outside_cost, ncopies_for_cost,
max_nunits, load_permutation, loads,
vectorization_factor))
return false;
-
+
SLP_TREE_LEFT (*node) = left_node;
}
@@ -647,7 +647,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
max_nunits, load_permutation, loads,
vectorization_factor))
return false;
-
+
SLP_TREE_RIGHT (*node) = right_node;
}
@@ -668,7 +668,7 @@ vect_print_slp_tree (slp_tree node)
for (i = 0; VEC_iterate (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt); i++)
{
fprintf (vect_dump, "\n\tstmt %d ", i);
- print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
+ print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
fprintf (vect_dump, "\n");
@@ -677,9 +677,9 @@ vect_print_slp_tree (slp_tree node)
}
-/* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
- If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
- J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
+/* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
+ If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
+ J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
stmts in NODE are to be marked. */
static void
@@ -715,7 +715,7 @@ vect_mark_slp_stmts_relevant (slp_tree node)
for (i = 0; VEC_iterate (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt); i++)
{
stmt_info = vinfo_for_stmt (stmt);
- gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
+ gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
|| STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
}
@@ -725,7 +725,7 @@ vect_mark_slp_stmts_relevant (slp_tree node)
}
-/* Check if the permutation required by the SLP INSTANCE is supported.
+/* Check if the permutation required by the SLP INSTANCE is supported.
Reorganize the SLP nodes stored in SLP_INSTANCE_LOADS if needed. */
static bool
@@ -737,17 +737,17 @@ vect_supported_slp_permutation_p (slp_instance instance)
VEC (slp_tree, heap) *sorted_loads = NULL;
int index;
slp_tree *tmp_loads = NULL;
- int group_size = SLP_INSTANCE_GROUP_SIZE (instance), i, j;
+ int group_size = SLP_INSTANCE_GROUP_SIZE (instance), i, j;
slp_tree load;
-
- /* FORNOW: The only supported loads permutation is loads from the same
+
+ /* FORNOW: The only supported loads permutation is loads from the same
location in all the loads in the node, when the data-refs in
- nodes of LOADS constitute an interleaving chain.
+ nodes of LOADS constitute an interleaving chain.
Sort the nodes according to the order of accesses in the chain. */
tmp_loads = (slp_tree *) xmalloc (sizeof (slp_tree) * group_size);
- for (i = 0, j = 0;
- VEC_iterate (int, SLP_INSTANCE_LOAD_PERMUTATION (instance), i, index)
- && VEC_iterate (slp_tree, SLP_INSTANCE_LOADS (instance), j, load);
+ for (i = 0, j = 0;
+ VEC_iterate (int, SLP_INSTANCE_LOAD_PERMUTATION (instance), i, index)
+ && VEC_iterate (slp_tree, SLP_INSTANCE_LOADS (instance), j, load);
i += group_size, j++)
{
gimple scalar_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (load), 0);
@@ -760,14 +760,14 @@ vect_supported_slp_permutation_p (slp_instance instance)
"permutation ");
print_gimple_stmt (vect_dump, scalar_stmt, 0, TDF_SLIM);
}
-
+
free (tmp_loads);
- return false;
+ return false;
}
tmp_loads[index] = load;
}
-
+
sorted_loads = VEC_alloc (slp_tree, heap, group_size);
for (i = 0; i < group_size; i++)
VEC_safe_push (slp_tree, heap, sorted_loads, tmp_loads[i]);
@@ -808,8 +808,8 @@ vect_supported_load_permutation_p (slp_instance slp_instn, int group_size,
fprintf (vect_dump, "%d ", next);
}
- /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
- GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
+ /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
+ GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
well. */
if (VEC_length (int, load_permutation)
!= (unsigned int) (group_size * group_size))
@@ -829,44 +829,44 @@ vect_supported_load_permutation_p (slp_instance slp_instn, int group_size,
}
prev = next;
- }
+ }
}
if (supported && i == group_size * group_size
&& vect_supported_slp_permutation_p (slp_instn))
return true;
- return false;
+ return false;
}
-/* Find the first load in the loop that belongs to INSTANCE.
+/* Find the first load in the loop that belongs to INSTANCE.
When loads are in several SLP nodes, there can be a case in which the first
- load does not appear in the first SLP node to be transformed, causing
+ load does not appear in the first SLP node to be transformed, causing
incorrect order of statements. Since we generate all the loads together,
they must be inserted before the first load of the SLP instance and not
before the first load of the first node of the instance. */
-static gimple
-vect_find_first_load_in_slp_instance (slp_instance instance)
+static gimple
+vect_find_first_load_in_slp_instance (slp_instance instance)
{
int i, j;
slp_tree load_node;
gimple first_load = NULL, load;
- for (i = 0;
- VEC_iterate (slp_tree, SLP_INSTANCE_LOADS (instance), i, load_node);
+ for (i = 0;
+ VEC_iterate (slp_tree, SLP_INSTANCE_LOADS (instance), i, load_node);
i++)
- for (j = 0;
+ for (j = 0;
VEC_iterate (gimple, SLP_TREE_SCALAR_STMTS (load_node), j, load);
j++)
first_load = get_earlier_stmt (load, first_load);
-
+
return first_load;
}
/* Analyze an SLP instance starting from a group of strided stores. Call
- vect_build_slp_tree to build a tree of packed stmts if possible.
+ vect_build_slp_tree to build a tree of packed stmts if possible.
Return FALSE if it's impossible to SLP any stmt in the loop. */
static bool
@@ -884,7 +884,7 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
unsigned int max_nunits = 0;
VEC (int, heap) *load_permutation;
VEC (slp_tree, heap) *loads;
-
+
scalar_type = TREE_TYPE (DR_REF (STMT_VINFO_DATA_REF (
vinfo_for_stmt (stmt))));
vectype = get_vectype_for_scalar_type (scalar_type);
@@ -914,11 +914,11 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (vect_print_dump_info (REPORT_SLP))
fprintf (vect_dump, "Build SLP failed: unrolling required in basic"
" block SLP");
-
+
return false;
}
- /* Create a node (a root of the SLP tree) for the packed strided stores. */
+ /* Create a node (a root of the SLP tree) for the packed strided stores. */
SLP_TREE_SCALAR_STMTS (node) = VEC_alloc (gimple, heap, group_size);
next = stmt;
/* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
@@ -939,17 +939,17 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
GROUP_SIZE / NUNITS otherwise. */
ncopies_for_cost = unrolling_factor * group_size / nunits;
-
- load_permutation = VEC_alloc (int, heap, group_size * group_size);
- loads = VEC_alloc (slp_tree, heap, group_size);
+
+ load_permutation = VEC_alloc (int, heap, group_size * group_size);
+ loads = VEC_alloc (slp_tree, heap, group_size);
/* Build the tree for the SLP instance. */
- if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
- &inside_cost, &outside_cost, ncopies_for_cost,
- &max_nunits, &load_permutation, &loads,
+ if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
+ &inside_cost, &outside_cost, ncopies_for_cost,
+ &max_nunits, &load_permutation, &loads,
vectorization_factor))
{
- /* Create a new SLP instance. */
+ /* Create a new SLP instance. */
new_instance = XNEW (struct _slp_instance);
SLP_INSTANCE_TREE (new_instance) = node;
SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
@@ -958,7 +958,7 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (max_nunits > nunits)
unrolling_factor = least_common_multiple (max_nunits, group_size)
/ group_size;
-
+
SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (new_instance) = outside_cost;
SLP_INSTANCE_INSIDE_OF_LOOP_COST (new_instance) = inside_cost;
@@ -968,7 +968,7 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (VEC_length (slp_tree, loads))
{
if (!vect_supported_load_permutation_p (new_instance, group_size,
- load_permutation))
+ load_permutation))
{
if (vect_print_dump_info (REPORT_SLP))
{
@@ -988,13 +988,13 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
VEC_free (int, heap, SLP_INSTANCE_LOAD_PERMUTATION (new_instance));
if (loop_vinfo)
- VEC_safe_push (slp_instance, heap,
- LOOP_VINFO_SLP_INSTANCES (loop_vinfo),
+ VEC_safe_push (slp_instance, heap,
+ LOOP_VINFO_SLP_INSTANCES (loop_vinfo),
new_instance);
else
VEC_safe_push (slp_instance, heap, BB_VINFO_SLP_INSTANCES (bb_vinfo),
new_instance);
-
+
if (vect_print_dump_info (REPORT_SLP))
vect_print_slp_tree (node);
@@ -1006,7 +1006,7 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
vect_free_slp_tree (node);
VEC_free (int, heap, load_permutation);
VEC_free (slp_tree, heap, loads);
-
+
return false;
}
@@ -1029,12 +1029,12 @@ vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
strided_stores = LOOP_VINFO_STRIDED_STORES (loop_vinfo);
else
strided_stores = BB_VINFO_STRIDED_STORES (bb_vinfo);
-
+
for (i = 0; VEC_iterate (gimple, strided_stores, i, store); i++)
if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, store))
ok = true;
- if (bb_vinfo && !ok)
+ if (bb_vinfo && !ok)
{
if (vect_print_dump_info (REPORT_SLP))
fprintf (vect_dump, "Failed to SLP the basic block.");
@@ -1066,8 +1066,8 @@ vect_make_slp_decision (loop_vec_info loop_vinfo)
if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
- /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
- call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
+ /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
+ call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
loop-based vectorization. Such stmts will be marked as HYBRID. */
vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
decided_to_slp++;
@@ -1075,8 +1075,8 @@ vect_make_slp_decision (loop_vec_info loop_vinfo)
LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
- if (decided_to_slp && vect_print_dump_info (REPORT_SLP))
- fprintf (vect_dump, "Decided to SLP %d instances. Unrolling factor %d",
+ if (decided_to_slp && vect_print_dump_info (REPORT_SLP))
+ fprintf (vect_dump, "Decided to SLP %d instances. Unrolling factor %d",
decided_to_slp, unrolling_factor);
}
@@ -1128,7 +1128,7 @@ vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
/* Create and initialize a new bb_vec_info struct for BB, as well as
stmt_vec_info structs for all the stmts in it. */
-
+
static bb_vec_info
new_bb_vec_info (basic_block bb)
{
@@ -1227,7 +1227,7 @@ vect_slp_analyze_operations (bb_vec_info bb_vinfo)
for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); )
{
- if (!vect_slp_analyze_node_operations (bb_vinfo,
+ if (!vect_slp_analyze_node_operations (bb_vinfo,
SLP_INSTANCE_TREE (instance)))
{
vect_free_slp_instance (instance);
@@ -1235,8 +1235,8 @@ vect_slp_analyze_operations (bb_vec_info bb_vinfo)
}
else
i++;
- }
-
+ }
+
if (!VEC_length (slp_instance, slp_instances))
return false;
@@ -1280,13 +1280,13 @@ vect_slp_analyze_bb (basic_block bb)
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
fprintf (vect_dump, "not vectorized: unhandled data-ref in basic "
"block.\n");
-
+
destroy_bb_vec_info (bb_vinfo);
return NULL;
}
ddrs = BB_VINFO_DDRS (bb_vinfo);
- if (!VEC_length (ddr_p, ddrs))
+ if (!VEC_length (ddr_p, ddrs))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
fprintf (vect_dump, "not vectorized: not enough data-refs in basic "
@@ -1301,17 +1301,17 @@ vect_slp_analyze_bb (basic_block bb)
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
fprintf (vect_dump, "not vectorized: bad data alignment in basic "
"block.\n");
-
+
destroy_bb_vec_info (bb_vinfo);
return NULL;
}
-
+
if (!vect_analyze_data_ref_dependences (NULL, bb_vinfo))
{
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
fprintf (vect_dump, "not vectorized: unhandled data dependence in basic"
" block.\n");
-
+
destroy_bb_vec_info (bb_vinfo);
return NULL;
}
@@ -1321,7 +1321,7 @@ vect_slp_analyze_bb (basic_block bb)
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
fprintf (vect_dump, "not vectorized: unhandled data access in basic "
"block.\n");
-
+
destroy_bb_vec_info (bb_vinfo);
return NULL;
}
@@ -1347,7 +1347,7 @@ vect_slp_analyze_bb (basic_block bb)
destroy_bb_vec_info (bb_vinfo);
return NULL;
}
-
+
slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
/* Mark all the statements that we want to vectorize as pure SLP and
@@ -1356,7 +1356,7 @@ vect_slp_analyze_bb (basic_block bb)
{
vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
- }
+ }
if (!vect_slp_analyze_operations (bb_vinfo))
{
@@ -1374,11 +1374,11 @@ vect_slp_analyze_bb (basic_block bb)
}
-/* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
+/* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
the number of created vector stmts depends on the unrolling factor). However,
the actual number of vector stmts for every SLP node depends on VF which is
set later in vect_analyze_operations(). Hence, SLP costs should be updated.
- In this function we assume that the inside costs calculated in
+ In this function we assume that the inside costs calculated in
vect_model_xxx_cost are linear in ncopies. */
void
@@ -1393,13 +1393,13 @@ vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
/* We assume that costs are linear in ncopies. */
- SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance) *= vf
- / SLP_INSTANCE_UNROLLING_FACTOR (instance);
+ SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance) *= vf
+ / SLP_INSTANCE_UNROLLING_FACTOR (instance);
}
-/* For constant and loop invariant defs of SLP_NODE this function returns
- (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
+/* For constant and loop invariant defs of SLP_NODE this function returns
+ (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
OP_NUM determines if we gather defs for operand 0 or operand 1 of the scalar
stmts. NUMBER_OF_VECTORS is the number of vector defs to create. */
@@ -1441,7 +1441,7 @@ vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds,
}
else
{
- vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
+ vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
gcc_assert (vector_type);
constant_p = false;
}
@@ -1449,7 +1449,7 @@ vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds,
nunits = TYPE_VECTOR_SUBPARTS (vector_type);
/* NUMBER_OF_COPIES is the number of times we need to use the same values in
- created vectors. It is greater than 1 if unrolling is performed.
+ created vectors. It is greater than 1 if unrolling is performed.
For example, we have two scalar operands, s1 and s2 (e.g., group of
strided accesses of size two), while NUNITS is four (i.e., four scalars
@@ -1457,13 +1457,13 @@ vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds,
two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
will be 2).
- If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
+ If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
containing the operands.
For example, NUNITS is four as before, and the group size is 8
(s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
{s5, s6, s7, s8}. */
-
+
number_of_copies = least_common_multiple (nunits, group_size) / group_size;
number_of_places_left_in_vector = nunits;
@@ -1475,7 +1475,7 @@ vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds,
op = gimple_assign_rhs1 (stmt);
else
op = gimple_op (stmt, op_num + 1);
-
+
/* Create 'vect_ = {op0,op1,...,opn}'. */
t = tree_cons (NULL_TREE, op, t);
@@ -1496,7 +1496,7 @@ vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds,
}
}
- /* Since the vectors are created in the reverse order, we should invert
+ /* Since the vectors are created in the reverse order, we should invert
them. */
vec_num = VEC_length (tree, voprnds);
for (j = vec_num - 1; j >= 0; j--)
@@ -1508,8 +1508,8 @@ vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds,
VEC_free (tree, heap, voprnds);
/* In case that VF is greater than the unrolling factor needed for the SLP
- group of stmts, NUMBER_OF_VECTORS to be created is greater than
- NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
+ group of stmts, NUMBER_OF_VECTORS to be created is greater than
+ NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
to replicate the vectors. */
while (number_of_vectors > VEC_length (tree, *vec_oprnds))
{
@@ -1542,15 +1542,15 @@ vect_get_slp_vect_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds)
}
-/* Get vectorized definitions for SLP_NODE.
- If the scalar definitions are loop invariants or constants, collect them and
+/* Get vectorized definitions for SLP_NODE.
+ If the scalar definitions are loop invariants or constants, collect them and
call vect_get_constant_vectors() to create vector stmts.
Otherwise, the def-stmts must be already vectorized and the vectorized stmts
must be stored in the LEFT/RIGHT node of SLP_NODE, and we call
- vect_get_slp_vect_defs() to retrieve them.
+ vect_get_slp_vect_defs() to retrieve them.
If VEC_OPRNDS1 is NULL, don't get vector defs for the second operand (from
- the right node. This is used when the second operand must remain scalar. */
-
+ the right node. This is used when the second operand must remain scalar. */
+
void
vect_get_slp_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds0,
VEC (tree,heap) **vec_oprnds1)
@@ -1558,12 +1558,12 @@ vect_get_slp_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds0,
gimple first_stmt;
enum tree_code code;
int number_of_vects;
- HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
+ HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
/* The number of vector defs is determined by the number of vector statements
in the node from which we get those statements. */
- if (SLP_TREE_LEFT (slp_node))
+ if (SLP_TREE_LEFT (slp_node))
number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_LEFT (slp_node));
else
{
@@ -1619,22 +1619,22 @@ vect_get_slp_defs (slp_tree slp_node, VEC (tree,heap) **vec_oprnds0,
}
-/* Create NCOPIES permutation statements using the mask MASK_BYTES (by
+/* Create NCOPIES permutation statements using the mask MASK_BYTES (by
building a vector of type MASK_TYPE from it) and two input vectors placed in
DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
shifting by STRIDE elements of DR_CHAIN for every copy.
(STRIDE is the number of vectorized stmts for NODE divided by the number of
- copies).
+ copies).
VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
the created stmts must be inserted. */
static inline void
-vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
- int *mask_array, int mask_nunits,
+vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
+ int *mask_array, int mask_nunits,
tree mask_element_type, tree mask_type,
- int first_vec_indx, int second_vec_indx,
- gimple_stmt_iterator *gsi, slp_tree node,
- tree builtin_decl, tree vectype,
+ int first_vec_indx, int second_vec_indx,
+ gimple_stmt_iterator *gsi, slp_tree node,
+ tree builtin_decl, tree vectype,
VEC(tree,heap) *dr_chain,
int ncopies, int vect_stmts_counter)
{
@@ -1654,11 +1654,11 @@ vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
group_size = VEC_length (gimple, SLP_TREE_SCALAR_STMTS (node));
stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
- dr_chain_size = VEC_length (tree, dr_chain);
+ dr_chain_size = VEC_length (tree, dr_chain);
- /* Initialize the vect stmts of NODE to properly insert the generated
+ /* Initialize the vect stmts of NODE to properly insert the generated
stmts later. */
- for (i = VEC_length (gimple, SLP_TREE_VEC_STMTS (node));
+ for (i = VEC_length (gimple, SLP_TREE_VEC_STMTS (node));
i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (node), NULL);
@@ -1681,8 +1681,8 @@ vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
gimple_call_set_lhs (perm_stmt, data_ref);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
- /* Store the vector statement in NODE. */
- VEC_replace (gimple, SLP_TREE_VEC_STMTS (node),
+ /* Store the vector statement in NODE. */
+ VEC_replace (gimple, SLP_TREE_VEC_STMTS (node),
stride * i + vect_stmts_counter, perm_stmt);
first_vec_indx += stride;
@@ -1695,16 +1695,16 @@ vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
}
-/* Given FIRST_MASK_ELEMENT - the mask element in element representation,
+/* Given FIRST_MASK_ELEMENT - the mask element in element representation,
return in CURRENT_MASK_ELEMENT its equivalent in target specific
- representation. Check that the mask is valid and return FALSE if not.
+ representation. Check that the mask is valid and return FALSE if not.
Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
the next vector, i.e., the current first vector is not needed. */
-
+
static bool
-vect_get_mask_element (gimple stmt, int first_mask_element, int m,
+vect_get_mask_element (gimple stmt, int first_mask_element, int m,
int mask_nunits, bool only_one_vec, int index,
- int *mask, int *current_mask_element,
+ int *mask, int *current_mask_element,
bool *need_next_vector)
{
int i;
@@ -1739,7 +1739,7 @@ vect_get_mask_element (gimple stmt, int first_mask_element, int m,
if (needs_first_vector || mask_fixed)
{
/* We either need the first vector too or have already moved to the
- next vector. In both cases, this permutation needs three
+ next vector. In both cases, this permutation needs three
vectors. */
if (vect_print_dump_info (REPORT_DETAILS))
{
@@ -1830,32 +1830,32 @@ vect_transform_slp_perm_load (gimple stmt, VEC (tree, heap) *dr_chain,
/* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
unrolling factor. */
- orig_vec_stmts_num = group_size *
+ orig_vec_stmts_num = group_size *
SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
if (orig_vec_stmts_num == 1)
only_one_vec = true;
- /* Number of copies is determined by the final vectorization factor
+ /* Number of copies is determined by the final vectorization factor
relatively to SLP_NODE_INSTANCE unrolling factor. */
- ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
+ ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
- /* Generate permutation masks for every NODE. Number of masks for each NODE
- is equal to GROUP_SIZE.
- E.g., we have a group of three nodes with three loads from the same
- location in each node, and the vector size is 4. I.e., we have a
- a0b0c0a1b1c1... sequence and we need to create the following vectors:
+ /* Generate permutation masks for every NODE. Number of masks for each NODE
+ is equal to GROUP_SIZE.
+ E.g., we have a group of three nodes with three loads from the same
+ location in each node, and the vector size is 4. I.e., we have a
+ a0b0c0a1b1c1... sequence and we need to create the following vectors:
for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
...
The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9} (in target
scpecific type, e.g., in bytes for Altivec.
- The last mask is illegal since we assume two operands for permute
+ The last mask is illegal since we assume two operands for permute
operation, and the mask element values can't be outside that range. Hence,
the last mask must be converted into {2,5,5,5}.
- For the first two permutations we need the first and the second input
+ For the first two permutations we need the first and the second input
vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
- we need the second and the third vectors: {b1,c1,a2,b2} and
+ we need the second and the third vectors: {b1,c1,a2,b2} and
{c2,a3,b3,c3}. */
for (i = 0;
@@ -1880,13 +1880,13 @@ vect_transform_slp_perm_load (gimple stmt, VEC (tree, heap) *dr_chain,
first_mask_element = (i + j * group_size) * scale;
for (m = 0; m < scale; m++)
{
- if (!vect_get_mask_element (stmt, first_mask_element, m,
+ if (!vect_get_mask_element (stmt, first_mask_element, m,
mask_nunits, only_one_vec, index, mask,
&current_mask_element, &need_next_vector))
return false;
mask[index++] = current_mask_element;
- }
+ }
if (index == mask_nunits)
{
@@ -1903,15 +1903,15 @@ vect_transform_slp_perm_load (gimple stmt, VEC (tree, heap) *dr_chain,
SLP_TREE_SCALAR_STMTS (node), scalar_index++);
vect_create_mask_and_perm (stmt, next_scalar_stmt,
- mask, mask_nunits, mask_element_type, mask_type,
- first_vec_index, second_vec_index, gsi, node,
- builtin_decl, vectype, dr_chain, ncopies,
+ mask, mask_nunits, mask_element_type, mask_type,
+ first_vec_index, second_vec_index, gsi, node,
+ builtin_decl, vectype, dr_chain, ncopies,
vect_stmts_counter++);
}
- }
- }
- }
- }
+ }
+ }
+ }
+ }
free (mask);
return true;
@@ -1941,7 +1941,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
vectorization_factor);
vect_schedule_slp_instance (SLP_TREE_RIGHT (node), instance,
vectorization_factor);
-
+
stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (node), 0);
stmt_info = vinfo_for_stmt (stmt);
@@ -1984,7 +1984,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
{
fprintf (vect_dump, "------>vectorizing SLP node starting from: ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
- }
+ }
/* Loads should be inserted before the first load. */
if (SLP_INSTANCE_FIRST_LOAD_STMT (instance)
@@ -1993,7 +1993,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance));
else
si = gsi_for_stmt (stmt);
-
+
is_store = vect_transform_stmt (stmt, &si, &strided_store, node, instance);
if (is_store)
{
@@ -2026,12 +2026,12 @@ vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
- }
+ }
else
{
slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
vf = 1;
- }
+ }
for (i = 0; VEC_iterate (slp_instance, slp_instances, i, instance); i++)
{
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index fb4a5bf5dd7..5c12697f252 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -1,7 +1,7 @@
/* Statement Analysis and Transformation for Vectorization
Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
Foundation, Inc.
- Contributed by Dorit Naishlos <dorit@il.ibm.com>
+ Contributed by Dorit Naishlos <dorit@il.ibm.com>
and Ira Rosen <irar@il.ibm.com>
This file is part of GCC.
@@ -62,7 +62,7 @@ vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
{
gimple pattern_stmt;
- /* This is the last stmt in a sequence that was detected as a
+ /* This is the last stmt in a sequence that was detected as a
pattern that can potentially be vectorized. Don't mark the stmt
as relevant/live because it's not going to be vectorized.
Instead mark the pattern-stmt that replaces it. */
@@ -120,9 +120,9 @@ vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
*live_p = false;
/* cond stmt other than loop exit cond. */
- if (is_ctrl_stmt (stmt)
- && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
- != loop_exit_ctrl_vec_info_type)
+ if (is_ctrl_stmt (stmt)
+ && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
+ != loop_exit_ctrl_vec_info_type)
*relevant = vect_used_in_scope;
/* changing memory. */
@@ -162,9 +162,9 @@ vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
}
-/* Function exist_non_indexing_operands_for_use_p
+/* Function exist_non_indexing_operands_for_use_p
- USE is one of the uses attached to STMT. Check if USE is
+ USE is one of the uses attached to STMT. Check if USE is
used in STMT for anything other than indexing an array. */
static bool
@@ -186,7 +186,7 @@ exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
(This should have been verified in analyze_data_refs).
'var' in the second case corresponds to a def, not a use,
- so USE cannot correspond to any operands that are not used
+ so USE cannot correspond to any operands that are not used
for array indexing.
Therefore, all we need to check is if STMT falls into the
@@ -207,12 +207,12 @@ exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
}
-/*
+/*
Function process_use.
Inputs:
- a USE in STMT in a loop represented by LOOP_VINFO
- - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
+ - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
that defined USE. This is done by calling mark_relevant and passing it
the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
@@ -223,17 +223,17 @@ exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
Exceptions:
- case 1: If USE is used only for address computations (e.g. array indexing),
- which does not need to be directly vectorized, then the liveness/relevance
+ which does not need to be directly vectorized, then the liveness/relevance
of the respective DEF_STMT is left unchanged.
- - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
- skip DEF_STMT cause it had already been processed.
+ - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
+ skip DEF_STMT cause it had already been processed.
- case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
be modified accordingly.
Return true if everything is as expected. Return false otherwise. */
static bool
-process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
+process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
enum vect_relevant relevant, VEC(gimple,heap) **worklist)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -244,13 +244,13 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
gimple def_stmt;
enum vect_def_type dt;
- /* case 1: we are only interested in uses that need to be vectorized. Uses
+ /* case 1: we are only interested in uses that need to be vectorized. Uses
that are used for address computation are not considered relevant. */
if (!exist_non_indexing_operands_for_use_p (use, stmt))
return true;
if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
- {
+ {
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
return false;
@@ -267,10 +267,10 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
return true;
}
- /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
- DEF_STMT must have already been processed, because this should be the
- only way that STMT, which is a reduction-phi, was put in the worklist,
- as there should be no other uses for DEF_STMT in the loop. So we just
+ /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
+ DEF_STMT must have already been processed, because this should be the
+ only way that STMT, which is a reduction-phi, was put in the worklist,
+ as there should be no other uses for DEF_STMT in the loop. So we just
check that everything is as expected, and we are done. */
dstmt_vinfo = vinfo_for_stmt (def_stmt);
bb = gimple_bb (stmt);
@@ -285,7 +285,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
- gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
+ gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
|| STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
return true;
}
@@ -324,7 +324,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
default:
gcc_unreachable ();
- }
+ }
}
/* case 3b: inner-loop stmt defining an outer-loop stmt:
@@ -342,7 +342,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
switch (relevant)
{
case vect_unused_in_scope:
- relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
+ relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
|| STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
vect_used_in_outer_by_reduction : vect_unused_in_scope;
break;
@@ -408,7 +408,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{
bb = bbs[i];
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
- {
+ {
phi = gsi_stmt (si);
if (vect_print_dump_info (REPORT_DETAILS))
{
@@ -426,7 +426,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{
fprintf (vect_dump, "init: stmt relevant? ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
- }
+ }
if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
vect_mark_relevant (&worklist, stmt, relevant, live_p);
@@ -446,8 +446,8 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
- /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
- (DEF_STMT) as relevant/irrelevant and live/dead according to the
+ /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
+ (DEF_STMT) as relevant/irrelevant and live/dead according to the
liveness and relevance properties of STMT. */
stmt_vinfo = vinfo_for_stmt (stmt);
relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
@@ -463,9 +463,9 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
live_p = false
relevant = vect_used_by_reduction
This is because we distinguish between two kinds of relevant stmts -
- those that are used by a reduction computation, and those that are
- (also) used by a regular computation. This allows us later on to
- identify stmts that are used solely by a reduction, and therefore the
+ those that are used by a reduction computation, and those that are
+ (also) used by a regular computation. This allows us later on to
+ identify stmts that are used solely by a reduction, and therefore the
order of the results that they produce does not have to be kept. */
def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
@@ -492,9 +492,9 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
return false;
}
- live_p = false;
+ live_p = false;
break;
-
+
case vect_nested_cycle:
if (tmp_relevant != vect_unused_in_scope
&& tmp_relevant != vect_used_in_outer_by_reduction
@@ -507,9 +507,9 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
return false;
}
- live_p = false;
- break;
-
+ live_p = false;
+ break;
+
case vect_double_reduction_def:
if (tmp_relevant != vect_unused_in_scope
&& tmp_relevant != vect_used_by_reduction)
@@ -522,12 +522,12 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
}
live_p = false;
- break;
+ break;
default:
break;
}
-
+
FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
{
tree op = USE_FROM_PTR (use_p);
@@ -571,14 +571,14 @@ cost_for_stmt (gimple stmt)
}
}
-/* Function vect_model_simple_cost.
+/* Function vect_model_simple_cost.
- Models cost for simple operations, i.e. those that only emit ncopies of a
+ Models cost for simple operations, i.e. those that only emit ncopies of a
single op. Right now, this does not account for multiple insns that could
be generated for the single vector op. We will handle that shortly. */
void
-vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
+vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
enum vect_def_type *dt, slp_tree slp_node)
{
int i;
@@ -594,9 +594,9 @@ vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
for (i = 0; i < 2; i++)
{
if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
- outside_cost += TARG_SCALAR_TO_VEC_COST;
+ outside_cost += TARG_SCALAR_TO_VEC_COST;
}
-
+
if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
"outside_cost = %d .", inside_cost, outside_cost);
@@ -607,8 +607,8 @@ vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
}
-/* Function vect_cost_strided_group_size
-
+/* Function vect_cost_strided_group_size
+
For strided load or store, return the group_size only if it is the first
load or store of a group, else return 1. This ensures that group size is
only returned once per group. */
@@ -631,7 +631,7 @@ vect_cost_strided_group_size (stmt_vec_info stmt_info)
has the overhead of the strided access attributed to it. */
void
-vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
+vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
enum vect_def_type dt, slp_tree slp_node)
{
int group_size;
@@ -645,18 +645,18 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
outside_cost = TARG_SCALAR_TO_VEC_COST;
/* Strided access? */
- if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
+ if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
group_size = vect_cost_strided_group_size (stmt_info);
/* Not a strided access. */
else
group_size = 1;
- /* Is this an access in a group of stores, which provide strided access?
+ /* Is this an access in a group of stores, which provide strided access?
If so, add in the cost of the permutes. */
- if (group_size > 1)
+ if (group_size > 1)
{
/* Uses a high and low interleave operation for each needed permute. */
- inside_cost = ncopies * exact_log2(group_size) * group_size
+ inside_cost = ncopies * exact_log2(group_size) * group_size
* TARG_VEC_STMT_COST;
if (vect_print_dump_info (REPORT_COST))
@@ -682,12 +682,12 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
Models cost for loads. In the case of strided accesses, the last access
has the overhead of the strided access attributed to it. Since unaligned
- accesses are supported for loads, we also account for the costs of the
+ accesses are supported for loads, we also account for the costs of the
access scheme chosen. */
void
vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
-
+
{
int group_size;
int alignment_support_cheme;
@@ -715,9 +715,9 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
- /* Is this an access in a group of loads providing strided access?
+ /* Is this an access in a group of loads providing strided access?
If so, add in the cost of the permutes. */
- if (group_size > 1)
+ if (group_size > 1)
{
/* Uses an even and odd extract operations for each needed permute. */
inside_cost = ncopies * exact_log2(group_size) * group_size
@@ -792,7 +792,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
default:
gcc_unreachable ();
}
-
+
if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
"outside_cost = %d .", inside_cost, outside_cost);
@@ -808,7 +808,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
Insert a new stmt (INIT_STMT) that initializes a new vector variable with
the vector elements of VECTOR_VAR. Place the initialization at BSI if it
is not NULL. Otherwise, place the initialization at the loop preheader.
- Return the DEF of INIT_STMT.
+ Return the DEF of INIT_STMT.
It will be used in the vectorization of STMT. */
tree
@@ -822,9 +822,9 @@ vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
edge pe;
tree new_temp;
basic_block new_bb;
-
+
new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
- add_referenced_var (new_var);
+ add_referenced_var (new_var);
init_stmt = gimple_build_assign (new_var, vector_var);
new_temp = make_ssa_name (new_var, init_stmt);
gimple_assign_set_lhs (init_stmt, new_temp);
@@ -834,14 +834,14 @@ vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
else
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
-
+
if (loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
if (nested_in_vect_loop_p (loop, stmt))
loop = loop->inner;
-
+
pe = loop_preheader_edge (loop);
new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
gcc_assert (!new_bb);
@@ -907,7 +907,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
print_generic_expr (vect_dump, op, TDF_SLIM);
}
- is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
+ is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
&dt);
gcc_assert (is_simple_use);
if (vect_print_dump_info (REPORT_DETAILS))
@@ -932,7 +932,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
gcc_assert (vector_type);
- if (scalar_def)
+ if (scalar_def)
*scalar_def = op;
/* Create 'vect_cst_ = {cst,cst,...,cst}' */
@@ -954,7 +954,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
gcc_assert (vector_type);
nunits = TYPE_VECTOR_SUBPARTS (vector_type);
- if (scalar_def)
+ if (scalar_def)
*scalar_def = def;
/* Create 'vec_inv = {inv,inv,..,inv}' */
@@ -974,7 +974,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
/* Case 3: operand is defined inside the loop. */
case vect_internal_def:
{
- if (scalar_def)
+ if (scalar_def)
*scalar_def = NULL/* FIXME tuples: def_stmt*/;
/* Get the def from the vectorized stmt. */
@@ -998,7 +998,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
struct loop *loop;
gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
- loop = (gimple_bb (def_stmt))->loop_father;
+ loop = (gimple_bb (def_stmt))->loop_father;
/* Get the def before the loop */
op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
@@ -1026,32 +1026,32 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
/* Function vect_get_vec_def_for_stmt_copy
- Return a vector-def for an operand. This function is used when the
- vectorized stmt to be created (by the caller to this function) is a "copy"
- created in case the vectorized result cannot fit in one vector, and several
- copies of the vector-stmt are required. In this case the vector-def is
+ Return a vector-def for an operand. This function is used when the
+ vectorized stmt to be created (by the caller to this function) is a "copy"
+ created in case the vectorized result cannot fit in one vector, and several
+ copies of the vector-stmt are required. In this case the vector-def is
retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
- of the stmt that defines VEC_OPRND.
+ of the stmt that defines VEC_OPRND.
DT is the type of the vector def VEC_OPRND.
Context:
In case the vectorization factor (VF) is bigger than the number
of elements that can fit in a vectype (nunits), we have to generate
more than one vector stmt to vectorize the scalar stmt. This situation
- arises when there are multiple data-types operated upon in the loop; the
+ arises when there are multiple data-types operated upon in the loop; the
smallest data-type determines the VF, and as a result, when vectorizing
stmts operating on wider types we need to create 'VF/nunits' "copies" of the
vector stmt (each computing a vector of 'nunits' results, and together
- computing 'VF' results in each iteration). This function is called when
+ computing 'VF' results in each iteration). This function is called when
vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
which VF=16 and nunits=4, so the number of copies required is 4):
scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
-
+
S1: x = load VS1.0: vx.0 = memref0 VS1.1
VS1.1: vx.1 = memref1 VS1.2
VS1.2: vx.2 = memref2 VS1.3
- VS1.3: vx.3 = memref3
+ VS1.3: vx.3 = memref3
S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
VSnew.1: vz1 = vx.1 + ... VSnew.2
@@ -1060,19 +1060,19 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
The vectorization of S1 is explained in vectorizable_load.
The vectorization of S2:
- To create the first vector-stmt out of the 4 copies - VSnew.0 -
- the function 'vect_get_vec_def_for_operand' is called to
+ To create the first vector-stmt out of the 4 copies - VSnew.0 -
+ the function 'vect_get_vec_def_for_operand' is called to
get the relevant vector-def for each operand of S2. For operand x it
returns the vector-def 'vx.0'.
- To create the remaining copies of the vector-stmt (VSnew.j), this
- function is called to get the relevant vector-def for each operand. It is
- obtained from the respective VS1.j stmt, which is recorded in the
+ To create the remaining copies of the vector-stmt (VSnew.j), this
+ function is called to get the relevant vector-def for each operand. It is
+ obtained from the respective VS1.j stmt, which is recorded in the
STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
- For example, to obtain the vector-def 'vx.1' in order to create the
- vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
- Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
+ For example, to obtain the vector-def 'vx.1' in order to create the
+ vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
+ Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
and return its def ('vx.1').
Overall, to create the above sequence this function will be called 3 times:
@@ -1108,8 +1108,8 @@ vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
stmt. See vect_get_vec_def_for_stmt_copy() for details. */
static void
-vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
- VEC(tree,heap) **vec_oprnds0,
+vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
+ VEC(tree,heap) **vec_oprnds0,
VEC(tree,heap) **vec_oprnds1)
{
tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
@@ -1139,14 +1139,14 @@ vect_get_vec_defs (tree op0, tree op1, gimple stmt,
{
tree vec_oprnd;
- *vec_oprnds0 = VEC_alloc (tree, heap, 1);
- vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
+ *vec_oprnds0 = VEC_alloc (tree, heap, 1);
+ vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
if (op1)
{
- *vec_oprnds1 = VEC_alloc (tree, heap, 1);
- vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
+ *vec_oprnds1 = VEC_alloc (tree, heap, 1);
+ vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
}
}
@@ -1169,7 +1169,7 @@ vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
- set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
+ set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
bb_vinfo));
if (vect_print_dump_info (REPORT_DETAILS))
@@ -1208,8 +1208,8 @@ vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
/* Function vectorizable_call.
- Check if STMT performs a function call that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ Check if STMT performs a function call that can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at BSI.
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
@@ -1236,7 +1236,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
/* FORNOW: unsupported in basic block SLP. */
gcc_assert (loop_vinfo);
-
+
if (!STMT_VINFO_RELEVANT_P (stmt_info))
return false;
@@ -1477,7 +1477,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
/* Function vect_gen_widened_results_half
Create a vector stmt whose code, type, number of arguments, and result
- variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
+ variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
In the case that CODE is a CALL_EXPR, this means that a call to DECL
needs to be created (DECL is a function-decl of a target-builtin).
@@ -1489,40 +1489,40 @@ vect_gen_widened_results_half (enum tree_code code,
tree vec_oprnd0, tree vec_oprnd1, int op_type,
tree vec_dest, gimple_stmt_iterator *gsi,
gimple stmt)
-{
+{
gimple new_stmt;
- tree new_temp;
-
- /* Generate half of the widened result: */
- if (code == CALL_EXPR)
- {
- /* Target specific support */
+ tree new_temp;
+
+ /* Generate half of the widened result: */
+ if (code == CALL_EXPR)
+ {
+ /* Target specific support */
if (op_type == binary_op)
new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
else
new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
- }
- else
+ }
+ else
{
- /* Generic support */
- gcc_assert (op_type == TREE_CODE_LENGTH (code));
+ /* Generic support */
+ gcc_assert (op_type == TREE_CODE_LENGTH (code));
if (op_type != binary_op)
vec_oprnd1 = NULL;
new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
vec_oprnd1);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
- }
+ }
vect_finish_stmt_generation (stmt, new_stmt, gsi);
return new_stmt;
}
-/* Check if STMT performs a conversion operation, that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+/* Check if STMT performs a conversion operation, that can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at BSI.
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
@@ -1563,7 +1563,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
/* FORNOW: unsupported in basic block SLP. */
gcc_assert (loop_vinfo);
-
+
if (!STMT_VINFO_RELEVANT_P (stmt_info))
return false;
@@ -1624,7 +1624,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
this, so we can safely override NCOPIES with 1 here. */
if (slp_node)
ncopies = 1;
-
+
/* Sanity check: make sure that at least one copy of the vectorized stmt
needs to be generated. */
gcc_assert (ncopies >= 1);
@@ -1659,7 +1659,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
/* FORNOW: SLP not supported. */
if (STMT_SLP_TYPE (stmt_info))
- return false;
+ return false;
}
if (!vec_stmt) /* transformation not required. */
@@ -1685,14 +1685,14 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
for (j = 0; j < ncopies; j++)
{
if (j == 0)
- vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
+ vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
else
vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
builtin_decl =
targetm.vectorize.builtin_conversion (code, integral_type);
for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
- {
+ {
/* Arguments are ready. create the new vector stmt. */
new_stmt = gimple_build_call (builtin_decl, 1, vop0);
new_temp = make_ssa_name (vec_dest, new_stmt);
@@ -1726,7 +1726,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
/* Generate first half of the widened result: */
new_stmt
- = vect_gen_widened_results_half (code1, decl1,
+ = vect_gen_widened_results_half (code1, decl1,
vec_oprnd0, vec_oprnd1,
unary_op, vec_dest, gsi, stmt);
if (j == 0)
@@ -1784,14 +1784,14 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
}
if (vec_oprnds0)
- VEC_free (tree, heap, vec_oprnds0);
+ VEC_free (tree, heap, vec_oprnds0);
return true;
}
/* Function vectorizable_assignment.
- Check if STMT performs an assignment (copy) that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ Check if STMT performs an assignment (copy) that can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at BSI.
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
@@ -1886,15 +1886,15 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
if (slp_node)
VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), *vec_stmt);
}
-
- VEC_free (tree, heap, vec_oprnds);
+
+ VEC_free (tree, heap, vec_oprnds);
return true;
}
/* Function vectorizable_operation.
- Check if STMT performs a binary or unary operation that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ Check if STMT performs a binary or unary operation that can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at BSI.
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
@@ -1998,7 +1998,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
if (op_type == binary_op)
{
op1 = gimple_assign_rhs2 (stmt);
- if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
+ if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
&dt[1]))
{
if (vect_print_dump_info (REPORT_DETAILS))
@@ -2115,13 +2115,13 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
/* Handle def. */
vec_dest = vect_create_destination_var (scalar_dest, vectype);
- /* Allocate VECs for vector operands. In case of SLP, vector operands are
+ /* Allocate VECs for vector operands. In case of SLP, vector operands are
created in the previous stages of the recursion, so no allocation is
needed, except for the case of shift with scalar shift argument. In that
case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
- In case of loop-based vectorization we allocate VECs of size 1. We
- allocate VEC_OPRNDS1 only in case of binary operation. */
+ In case of loop-based vectorization we allocate VECs of size 1. We
+ allocate VEC_OPRNDS1 only in case of binary operation. */
if (!slp_node)
{
vec_oprnds0 = VEC_alloc (tree, heap, 1);
@@ -2129,7 +2129,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
vec_oprnds1 = VEC_alloc (tree, heap, 1);
}
else if (scalar_shift_arg)
- vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
+ vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
/* In case the vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
@@ -2192,9 +2192,9 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
{
if (op_type == binary_op && scalar_shift_arg)
{
- /* Vector shl and shr insn patterns can be defined with scalar
- operand 2 (shift operand). In this case, use constant or loop
- invariant op1 directly, without extending it to vector mode
+ /* Vector shl and shr insn patterns can be defined with scalar
+ operand 2 (shift operand). In this case, use constant or loop
+ invariant op1 directly, without extending it to vector mode
first. */
optab_op2_mode = insn_data[icode].operand[2].mode;
if (!VECTOR_MODE_P (optab_op2_mode))
@@ -2207,23 +2207,23 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
{
/* Store vec_oprnd1 for every vector stmt to be created
for SLP_NODE. We check during the analysis that all the
- shift arguments are the same.
- TODO: Allow different constants for different vector
- stmts generated for an SLP instance. */
+ shift arguments are the same.
+ TODO: Allow different constants for different vector
+ stmts generated for an SLP instance. */
for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
}
}
}
-
- /* vec_oprnd1 is available if operand 1 should be of a scalar-type
- (a special case for certain kind of vector shifts); otherwise,
+
+ /* vec_oprnd1 is available if operand 1 should be of a scalar-type
+ (a special case for certain kind of vector shifts); otherwise,
operand 1 should be of a vector type (the usual case). */
if (op_type == binary_op && !vec_oprnd1)
- vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
+ vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
slp_node);
else
- vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
+ vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
slp_node);
}
else
@@ -2261,14 +2261,14 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
/* Get vectorized definitions for loop-based vectorization. For the first
- operand we call vect_get_vec_def_for_operand() (with OPRND containing
- scalar operand), and for the rest we get a copy with
+ operand we call vect_get_vec_def_for_operand() (with OPRND containing
+ scalar operand), and for the rest we get a copy with
vect_get_vec_def_for_stmt_copy() using the previous vector definition
(stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
The vectors are collected into VEC_OPRNDS. */
static void
-vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
+vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
{
tree vec_oprnd;
@@ -2276,7 +2276,7 @@ vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
/* Get first vector operand. */
/* All the vector operands except the very first one (that is scalar oprnd)
are stmt copies. */
- if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
+ if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
else
vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
@@ -2286,18 +2286,18 @@ vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
/* Get second vector operand. */
vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
-
+
*oprnd = vec_oprnd;
- /* For conversion in multiple steps, continue to get operands
+ /* For conversion in multiple steps, continue to get operands
recursively. */
if (multi_step_cvt)
- vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
+ vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
}
/* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
- For multi-step conversions store the resulting vectors and call the function
+ For multi-step conversions store the resulting vectors and call the function
recursively. */
static void
@@ -2313,7 +2313,7 @@ vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
gimple new_stmt;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- vec_dest = VEC_pop (tree, vec_dsts);
+ vec_dest = VEC_pop (tree, vec_dsts);
for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
{
@@ -2327,10 +2327,10 @@ vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
if (multi_step_cvt)
/* Store the resulting vector for next recursive call. */
- VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
+ VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
else
{
- /* This is the last step of the conversion sequence. Store the
+ /* This is the last step of the conversion sequence. Store the
vectors in SLP_NODE or in vector info of the scalar statement
(or in STMT_VINFO_RELATED_STMT chain). */
if (slp_node)
@@ -2348,7 +2348,7 @@ vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
}
/* For multi-step demotion operations we first generate demotion operations
- from the source type to the intermediate types, and then combine the
+ from the source type to the intermediate types, and then combine the
results (stored in VEC_OPRNDS) in demotion operation to the destination
type. */
if (multi_step_cvt)
@@ -2356,7 +2356,7 @@ vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
/* At each level of recursion we have have of the operands we had at the
previous level. */
VEC_truncate (tree, *vec_oprnds, (i+1)/2);
- vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
+ vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
stmt, vec_dsts, gsi, slp_node,
code, prev_stmt_info);
}
@@ -2475,25 +2475,25 @@ vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
ncopies);
- /* In case of multi-step demotion, we first generate demotion operations to
- the intermediate types, and then from that types to the final one.
+ /* In case of multi-step demotion, we first generate demotion operations to
+ the intermediate types, and then from that types to the final one.
We create vector destinations for the intermediate type (TYPES) received
- from supportable_narrowing_operation, and store them in the correct order
+ from supportable_narrowing_operation, and store them in the correct order
for future use in vect_create_vectorized_demotion_stmts(). */
if (multi_step_cvt)
vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
else
vec_dsts = VEC_alloc (tree, heap, 1);
-
+
vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
VEC_quick_push (tree, vec_dsts, vec_dest);
if (multi_step_cvt)
{
- for (i = VEC_length (tree, interm_types) - 1;
+ for (i = VEC_length (tree, interm_types) - 1;
VEC_iterate (tree, interm_types, i, intermediate_type); i--)
{
- vec_dest = vect_create_destination_var (scalar_dest,
+ vec_dest = vect_create_destination_var (scalar_dest,
intermediate_type);
VEC_quick_push (tree, vec_dsts, vec_dest);
}
@@ -2509,21 +2509,21 @@ vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
{
/* Handle uses. */
if (slp_node)
- vect_get_slp_defs (slp_node, &vec_oprnds0, NULL);
+ vect_get_slp_defs (slp_node, &vec_oprnds0, NULL);
else
{
VEC_free (tree, heap, vec_oprnds0);
vec_oprnds0 = VEC_alloc (tree, heap,
(multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
- vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
+ vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
vect_pow2 (multi_step_cvt) - 1);
}
/* Arguments are ready. Create the new vector stmts. */
tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
- vect_create_vectorized_demotion_stmts (&vec_oprnds0,
+ vect_create_vectorized_demotion_stmts (&vec_oprnds0,
multi_step_cvt, stmt, tmp_vec_dsts,
- gsi, slp_node, code1,
+ gsi, slp_node, code1,
&prev_stmt_info);
}
@@ -2538,7 +2538,7 @@ vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
/* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
- and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
+ and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
the resulting vectors and call the function recursively. */
static void
@@ -2548,7 +2548,7 @@ vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
VEC (tree, heap) *vec_dsts,
gimple_stmt_iterator *gsi,
slp_tree slp_node, enum tree_code code1,
- enum tree_code code2, tree decl1,
+ enum tree_code code2, tree decl1,
tree decl2, int op_type,
stmt_vec_info *prev_stmt_info)
{
@@ -2569,7 +2569,7 @@ vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
vop1 = NULL_TREE;
/* Generate the two halves of promotion operation. */
- new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
+ new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
op_type, vec_dest, gsi, stmt);
new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
op_type, vec_dest, gsi, stmt);
@@ -2614,7 +2614,7 @@ vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
if (multi_step_cvt)
{
- /* For multi-step promotion operation we first generate we call the
+ /* For multi-step promotion operation we first generate we call the
function recurcively for every stage. We start from the input type,
create promotion operations to the intermediate types, and then
create promotions to the output type. */
@@ -2627,7 +2627,7 @@ vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
prev_stmt_info);
}
}
-
+
/* Function vectorizable_type_promotion
@@ -2649,7 +2649,7 @@ vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
tree decl1 = NULL_TREE, decl2 = NULL_TREE;
- int op_type;
+ int op_type;
tree def;
gimple def_stmt;
enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
@@ -2664,10 +2664,10 @@ vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
int multi_step_cvt = 0;
VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
-
+
/* FORNOW: not supported by basic block SLP vectorization. */
gcc_assert (loop_vinfo);
-
+
if (!STMT_VINFO_RELEVANT_P (stmt_info))
return false;
@@ -2765,10 +2765,10 @@ vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
ncopies);
/* Handle def. */
- /* In case of multi-step promotion, we first generate promotion operations
+ /* In case of multi-step promotion, we first generate promotion operations
to the intermediate types, and then from that types to the final one.
- We store vector destination in VEC_DSTS in the correct order for
- recursive creation of promotion operations in
+ We store vector destination in VEC_DSTS in the correct order for
+ recursive creation of promotion operations in
vect_create_vectorized_promotion_stmts(). Vector destinations are created
according to TYPES recieved from supportable_widening_operation(). */
if (multi_step_cvt)
@@ -2789,10 +2789,10 @@ vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
VEC_quick_push (tree, vec_dsts, vec_dest);
}
}
-
+
if (!slp_node)
{
- vec_oprnds0 = VEC_alloc (tree, heap,
+ vec_oprnds0 = VEC_alloc (tree, heap,
(multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
if (op_type == binary_op)
vec_oprnds1 = VEC_alloc (tree, heap, 1);
@@ -2836,7 +2836,7 @@ vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
/* Arguments are ready. Create the new vector stmts. */
tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
- multi_step_cvt, stmt,
+ multi_step_cvt, stmt,
tmp_vec_dsts,
gsi, slp_node, code1, code2,
decl1, decl2, op_type,
@@ -2856,9 +2856,9 @@ vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
/* Function vectorizable_store.
- Check if STMT defines a non scalar data-ref (array/pointer/structure) that
- can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ Check if STMT defines a non scalar data-ref (array/pointer/structure) that
+ can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at BSI.
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
@@ -2949,7 +2949,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* The scalar rhs type needs to be trivially convertible to the vector
component type. This should always be the case. */
if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
- {
+ {
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "??? operands of different types");
return false;
@@ -2971,7 +2971,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (!vect_strided_store_supported (vectype)
&& !PURE_SLP_STMT (stmt_info) && !slp)
return false;
-
+
if (first_stmt == stmt)
{
/* STMT is the leader of the group. Check the operands of all the
@@ -2981,7 +2981,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
gcc_assert (gimple_assign_single_p (next_stmt));
op = gimple_assign_rhs1 (next_stmt);
- if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
+ if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
&def, &dt))
{
if (vect_print_dump_info (REPORT_DETAILS))
@@ -3014,7 +3014,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* We vectorize all the stmts of the interleaving group when we
reach the last stmt in the group. */
- if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
+ if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
< DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
&& !slp)
{
@@ -3031,14 +3031,14 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
else
vec_num = group_size;
}
- else
+ else
{
first_stmt = stmt;
first_dr = dr;
group_size = vec_num = 1;
first_stmt_vinfo = stmt_info;
}
-
+
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
@@ -3051,7 +3051,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* In case the vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
more than one vector stmt - i.e - we need to "unroll" the
- vector stmt by a factor VF/nunits. For more details see documentation in
+ vector stmt by a factor VF/nunits. For more details see documentation in
vect_get_vec_def_for_copy_stmt. */
/* In case of interleaving (non-unit strided access):
@@ -3075,7 +3075,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
...
-
+
And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
(the order of the data-refs in the output of vect_permute_store_chain
corresponds to the order of scalar stmts in the interleaving chain - see
@@ -3084,7 +3084,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
In case of both multiple types and interleaving, above vector stores and
permutation stmts are created for every copy. The result vector stmts are
put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
- STMT_VINFO_RELATED_STMT for the next copies.
+ STMT_VINFO_RELATED_STMT for the next copies.
*/
prev_stmt_info = NULL;
@@ -3104,29 +3104,29 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
else
{
- /* For interleaved stores we collect vectorized defs for all the
- stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
- used as an input to vect_permute_store_chain(), and OPRNDS as
+ /* For interleaved stores we collect vectorized defs for all the
+ stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
+ used as an input to vect_permute_store_chain(), and OPRNDS as
an input to vect_get_vec_def_for_stmt_copy() for the next copy.
If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
- next_stmt = first_stmt;
+ next_stmt = first_stmt;
for (i = 0; i < group_size; i++)
{
- /* Since gaps are not supported for interleaved stores,
- GROUP_SIZE is the exact number of stmts in the chain.
- Therefore, NEXT_STMT can't be NULL_TREE. In case that
- there is no interleaving, GROUP_SIZE is 1, and only one
+ /* Since gaps are not supported for interleaved stores,
+ GROUP_SIZE is the exact number of stmts in the chain.
+ Therefore, NEXT_STMT can't be NULL_TREE. In case that
+ there is no interleaving, GROUP_SIZE is 1, and only one
iteration of the loop will be executed. */
gcc_assert (next_stmt
&& gimple_assign_single_p (next_stmt));
op = gimple_assign_rhs1 (next_stmt);
- vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
+ vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
NULL);
- VEC_quick_push(tree, dr_chain, vec_oprnd);
- VEC_quick_push(tree, oprnds, vec_oprnd);
+ VEC_quick_push(tree, dr_chain, vec_oprnd);
+ VEC_quick_push(tree, oprnds, vec_oprnd);
next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
}
}
@@ -3134,16 +3134,16 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* We should have catched mismatched types earlier. */
gcc_assert (useless_type_conversion_p (vectype,
TREE_TYPE (vec_oprnd)));
- dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
- &dummy, &ptr_incr, false,
+ dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
+ &dummy, &ptr_incr, false,
&inv_p);
gcc_assert (bb_vinfo || !inv_p);
}
- else
+ else
{
- /* For interleaved stores we created vectorized defs for all the
- defs stored in OPRNDS in the previous iteration (previous copy).
- DR_CHAIN is then used as an input to vect_permute_store_chain(),
+ /* For interleaved stores we created vectorized defs for all the
+ defs stored in OPRNDS in the previous iteration (previous copy).
+ DR_CHAIN is then used as an input to vect_permute_store_chain(),
and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
next copy.
If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
@@ -3151,19 +3151,19 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
for (i = 0; i < group_size; i++)
{
op = VEC_index (tree, oprnds, i);
- vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
+ vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
&dt);
- vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
+ vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
VEC_replace(tree, dr_chain, i, vec_oprnd);
VEC_replace(tree, oprnds, i, vec_oprnd);
}
- dataref_ptr =
+ dataref_ptr =
bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
}
if (strided_store)
{
- result_chain = VEC_alloc (tree, heap, group_size);
+ result_chain = VEC_alloc (tree, heap, group_size);
/* Permute. */
if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
&result_chain))
@@ -3181,7 +3181,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (slp)
vec_oprnd = VEC_index (tree, vec_oprnds, i);
else if (strided_store)
- /* For strided stores vectorized defs are interleaved in
+ /* For strided stores vectorized defs are interleaved in
vect_permute_store_chain(). */
vec_oprnd = VEC_index (tree, result_chain, i);
@@ -3207,7 +3207,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (slp)
continue;
-
+
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
else
@@ -3220,19 +3220,19 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
}
- VEC_free (tree, heap, dr_chain);
- VEC_free (tree, heap, oprnds);
+ VEC_free (tree, heap, dr_chain);
+ VEC_free (tree, heap, oprnds);
if (result_chain)
- VEC_free (tree, heap, result_chain);
+ VEC_free (tree, heap, result_chain);
return true;
}
/* vectorizable_load.
- Check if STMT reads a non scalar data-ref (array/pointer/structure) that
- can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ Check if STMT reads a non scalar data-ref (array/pointer/structure) that
+ can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at BSI.
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
@@ -3244,7 +3244,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
tree vec_dest = NULL;
tree data_ref = NULL;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
- stmt_vec_info prev_stmt_info;
+ stmt_vec_info prev_stmt_info;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
@@ -3347,7 +3347,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* The vector component type needs to be trivially convertible to the
scalar lhs. This should always be the case. */
if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
- {
+ {
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "??? operands of different types");
return false;
@@ -3444,8 +3444,8 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
S1: x = load - VS1_0
S2: z = x + 1 - -
- See in documentation in vect_get_vec_def_for_stmt_copy for how the
- information we recorded in RELATED_STMT field is used to vectorize
+ See in documentation in vect_get_vec_def_for_stmt_copy for how the
+ information we recorded in RELATED_STMT field is used to vectorize
stmt S2. */
/* In case of interleaving (non-unit strided access):
@@ -3455,7 +3455,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
S3: x1 = &base + 1
S4: x3 = &base + 3
- Vectorized loads are created in the order of memory accesses
+ Vectorized loads are created in the order of memory accesses
starting from the access of the first stmt of the chain:
VS1: vx0 = &base
@@ -3476,7 +3476,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
The generation of permutation stmts and recording them in
STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
- In case of both multiple types and interleaving, the vector loads and
+ In case of both multiple types and interleaving, the vector loads and
permutation stmts above are created for every copy. The result vector stmts
are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
STMT_VINFO_RELATED_STMT for the next copies. */
@@ -3493,7 +3493,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
Otherwise, the data reference is potentially unaligned on a target that
- does not support unaligned accesses (dr_explicit_realign_optimized) -
+ does not support unaligned accesses (dr_explicit_realign_optimized) -
then generate the following code, in which the data in each iteration is
obtained by two vector loads, one from the previous iteration, and one
from the current iteration:
@@ -3542,15 +3542,15 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
prev_stmt_info = NULL;
for (j = 0; j < ncopies; j++)
- {
+ {
/* 1. Create the vector pointer update chain. */
if (j == 0)
dataref_ptr = vect_create_data_ref_ptr (first_stmt,
- at_loop, offset,
- &dummy, &ptr_incr, false,
+ at_loop, offset,
+ &dummy, &ptr_incr, false,
&inv_p);
else
- dataref_ptr =
+ dataref_ptr =
bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
for (i = 0; i < vec_num; i++)
@@ -3584,7 +3584,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (compute_in_loop)
msq = vect_setup_realignment (first_stmt, gsi,
&realignment_token,
- dr_explicit_realign,
+ dr_explicit_realign,
dataref_ptr, NULL);
data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
@@ -3661,9 +3661,9 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* CHECKME: bitpos depends on endianess? */
bitpos = bitsize_zero_node;
- vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
+ vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
bitsize, bitpos);
- vec_dest =
+ vec_dest =
vect_create_destination_var (scalar_dest, NULL_TREE);
new_stmt = gimple_build_assign (vec_dest, vec_inv);
new_temp = make_ssa_name (vec_dest, new_stmt);
@@ -3708,7 +3708,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (strided_load)
{
if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
- return false;
+ return false;
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
VEC_free (tree, heap, dr_chain);
@@ -3732,7 +3732,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
/* Function vect_is_simple_cond.
-
+
Input:
LOOP - the loop that is being vectorized.
COND - Condition that is checked for simple use.
@@ -3756,7 +3756,7 @@ vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
if (TREE_CODE (lhs) == SSA_NAME)
{
gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
- if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
+ if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
&dt))
return false;
}
@@ -3767,7 +3767,7 @@ vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
if (TREE_CODE (rhs) == SSA_NAME)
{
gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
- if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
+ if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
&dt))
return false;
}
@@ -3780,9 +3780,9 @@ vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
/* vectorizable_condition.
- Check if STMT is conditional modify expression that can be vectorized.
- If VEC_STMT is also passed, vectorize the STMT: create a vectorized
- stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
+ Check if STMT is conditional modify expression that can be vectorized.
+ If VEC_STMT is also passed, vectorize the STMT: create a vectorized
+ stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
at GSI.
When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
@@ -3814,7 +3814,7 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
/* FORNOW: unsupported in basic block SLP. */
gcc_assert (loop_vinfo);
-
+
gcc_assert (ncopies >= 1);
if (ncopies > 1)
return false; /* FORNOW */
@@ -3832,7 +3832,7 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
return false;
/* FORNOW: not yet supported. */
- if (STMT_VINFO_LIVE_P (stmt_info))
+ if (STMT_VINFO_LIVE_P (stmt_info))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "value used after loop.");
@@ -3865,11 +3865,11 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
if (TREE_CODE (then_clause) == SSA_NAME)
{
gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
- if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
+ if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
&then_def_stmt, &def, &dt))
return false;
}
- else if (TREE_CODE (then_clause) != INTEGER_CST
+ else if (TREE_CODE (then_clause) != INTEGER_CST
&& TREE_CODE (then_clause) != REAL_CST
&& TREE_CODE (then_clause) != FIXED_CST)
return false;
@@ -3881,7 +3881,7 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
&else_def_stmt, &def, &dt))
return false;
}
- else if (TREE_CODE (else_clause) != INTEGER_CST
+ else if (TREE_CODE (else_clause) != INTEGER_CST
&& TREE_CODE (else_clause) != REAL_CST
&& TREE_CODE (else_clause) != FIXED_CST)
return false;
@@ -3889,7 +3889,7 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
vec_mode = TYPE_MODE (vectype);
- if (!vec_stmt)
+ if (!vec_stmt)
{
STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
@@ -3902,9 +3902,9 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
vec_dest = vect_create_destination_var (scalar_dest, vectype);
/* Handle cond expr. */
- vec_cond_lhs =
+ vec_cond_lhs =
vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
- vec_cond_rhs =
+ vec_cond_rhs =
vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
if (reduc_index == 1)
vec_then_clause = reduc_def;
@@ -3916,16 +3916,16 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
/* Arguments are ready. Create the new vector stmt. */
- vec_compare = build2 (TREE_CODE (cond_expr), vectype,
+ vec_compare = build2 (TREE_CODE (cond_expr), vectype,
vec_cond_lhs, vec_cond_rhs);
- vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
+ vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
vec_compare, vec_then_clause, vec_else_clause);
*vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
new_temp = make_ssa_name (vec_dest, *vec_stmt);
gimple_assign_set_lhs (*vec_stmt, new_temp);
vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
-
+
return true;
}
@@ -3937,7 +3937,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
- enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
+ enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
bool ok;
HOST_WIDE_INT dummy;
tree scalar_type, vectype;
@@ -3949,22 +3949,22 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
}
if (gimple_has_volatile_ops (stmt))
- {
+ {
if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
fprintf (vect_dump, "not vectorized: stmt has volatile operands");
return false;
}
-
- /* Skip stmts that do not need to be vectorized. In loops this is expected
+
+ /* Skip stmts that do not need to be vectorized. In loops this is expected
to include:
- the COND_EXPR which is the loop exit condition
- any LABEL_EXPRs in the loop
- - computations that are used only for array indexing or loop control.
+ - computations that are used only for array indexing or loop control.
In basic blocks we only analyze statements that are a part of some SLP
instance, therefore, all the statements are relevant. */
- if (!STMT_VINFO_RELEVANT_P (stmt_info)
+ if (!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info))
{
if (vect_print_dump_info (REPORT_DETAILS))
@@ -4032,7 +4032,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
}
ok = true;
- if (!bb_vinfo
+ if (!bb_vinfo
&& (STMT_VINFO_RELEVANT_P (stmt_info)
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
@@ -4052,7 +4052,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
|| vectorizable_assignment (stmt, NULL, NULL, node)
|| vectorizable_load (stmt, NULL, NULL, node, NULL)
|| vectorizable_store (stmt, NULL, NULL, node));
- }
+ }
if (!ok)
{
@@ -4062,7 +4062,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
fprintf (vect_dump, "supported: ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
-
+
return false;
}
@@ -4083,15 +4083,15 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
fprintf (vect_dump, "supported: ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
-
+
return false;
}
if (!PURE_SLP_STMT (stmt_info))
{
- /* Groups of strided accesses whose size is not a power of 2 are not
- vectorizable yet using loop-vectorization. Therefore, if this stmt
- feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
+ /* Groups of strided accesses whose size is not a power of 2 are not
+ vectorizable yet using loop-vectorization. Therefore, if this stmt
+ feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
loop-based vectorized), the loop cannot be vectorized. */
if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
&& exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
@@ -4107,7 +4107,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
return false;
}
}
-
+
return true;
}
@@ -4118,7 +4118,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
bool
vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
- bool *strided_store, slp_tree slp_node,
+ bool *strided_store, slp_tree slp_node,
slp_instance slp_node_instance)
{
bool is_store = false;
@@ -4161,7 +4161,7 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
break;
case load_vec_info_type:
- done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
+ done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
slp_node_instance);
gcc_assert (done);
break;
@@ -4217,7 +4217,7 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
&& STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
&& (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
- || STMT_VINFO_RELEVANT (stmt_info) ==
+ || STMT_VINFO_RELEVANT (stmt_info) ==
vect_used_in_outer_by_reduction))
{
struct loop *innerloop = LOOP_VINFO_LOOP (
@@ -4265,9 +4265,9 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
/* STMT was inserted by the vectorizer to replace a computation idiom.
- ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
- computed this idiom. We need to record a pointer to VEC_STMT in
- the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
+ ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
+ computed this idiom. We need to record a pointer to VEC_STMT in
+ the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
documentation of vect_pattern_recog. */
if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
{
@@ -4277,11 +4277,11 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
}
}
- return is_store;
+ return is_store;
}
-/* Remove a group of stores (for SLP or interleaving), free their
+/* Remove a group of stores (for SLP or interleaving), free their
stmt_vec_info. */
void
@@ -4308,7 +4308,7 @@ vect_remove_stores (gimple first_stmt)
Create and initialize a new stmt_vec_info struct for STMT. */
stmt_vec_info
-new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
+new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
stmt_vec_info res;
@@ -4446,40 +4446,40 @@ get_vectype_for_scalar_type (tree scalar_type)
DEF - the defining stmt in case OPERAND is an SSA_NAME.
Returns whether a stmt with OPERAND can be vectorized.
- For loops, supportable operands are constants, loop invariants, and operands
- that are defined by the current iteration of the loop. Unsupportable
- operands are those that are defined by a previous iteration of the loop (as
+ For loops, supportable operands are constants, loop invariants, and operands
+ that are defined by the current iteration of the loop. Unsupportable
+ operands are those that are defined by a previous iteration of the loop (as
is the case in reduction/induction computations).
For basic blocks, supportable operands are constants and bb invariants.
For now, operands defined outside the basic block are not supported. */
bool
-vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
+vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo, gimple *def_stmt,
tree *def, enum vect_def_type *dt)
-{
+{
basic_block bb;
stmt_vec_info stmt_vinfo;
struct loop *loop = NULL;
-
+
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
*def_stmt = NULL;
*def = NULL_TREE;
-
+
if (vect_print_dump_info (REPORT_DETAILS))
{
fprintf (vect_dump, "vect_is_simple_use: operand ");
print_generic_expr (vect_dump, operand, TDF_SLIM);
}
-
+
if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
{
*dt = vect_constant_def;
return true;
}
-
+
if (is_gimple_min_invariant (operand))
{
*def = operand;
@@ -4493,14 +4493,14 @@ vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
fprintf (vect_dump, "non-associatable copy.");
operand = TREE_OPERAND (operand, 0);
}
-
+
if (TREE_CODE (operand) != SSA_NAME)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "not ssa-name.");
return false;
}
-
+
*def_stmt = SSA_NAME_DEF_STMT (operand);
if (*def_stmt == NULL)
{
@@ -4528,7 +4528,7 @@ vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
if ((loop && !flow_bb_inside_loop_p (loop, bb))
|| (!loop && bb != BB_VINFO_BB (bb_vinfo))
- || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
+ || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
*dt = vect_external_def;
else
{
@@ -4573,26 +4573,26 @@ vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
/* Function supportable_widening_operation
- Check whether an operation represented by the code CODE is a
- widening operation that is supported by the target platform in
+ Check whether an operation represented by the code CODE is a
+ widening operation that is supported by the target platform in
vector form (i.e., when operating on arguments of type VECTYPE).
-
+
Widening operations we currently support are NOP (CONVERT), FLOAT
and WIDEN_MULT. This function checks if these operations are supported
by the target platform either directly (via vector tree-codes), or via
target builtins.
Output:
- - CODE1 and CODE2 are codes of vector operations to be used when
- vectorizing the operation, if available.
+ - CODE1 and CODE2 are codes of vector operations to be used when
+ vectorizing the operation, if available.
- DECL1 and DECL2 are decls of target builtin functions to be used
when vectorizing the operation, if available. In this case,
- CODE1 and CODE2 are CALL_EXPR.
+ CODE1 and CODE2 are CALL_EXPR.
- MULTI_STEP_CVT determines the number of required intermediate steps in
case of multi-step conversion (like char->short->int - in that case
MULTI_STEP_CVT will be 1).
- - INTERM_TYPES contains the intermediate type required to perform the
- widening operation (short in the above example). */
+ - INTERM_TYPES contains the intermediate type required to perform the
+ widening operation (short in the above example). */
bool
supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
@@ -4613,23 +4613,23 @@ supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
enum tree_code c1, c2;
/* The result of a vectorized widening operation usually requires two vectors
- (because the widened results do not fit int one vector). The generated
- vector results would normally be expected to be generated in the same
+ (because the widened results do not fit int one vector). The generated
+ vector results would normally be expected to be generated in the same
order as in the original scalar computation, i.e. if 8 results are
generated in each vector iteration, they are to be organized as follows:
- vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
+ vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
- However, in the special case that the result of the widening operation is
+ However, in the special case that the result of the widening operation is
used in a reduction computation only, the order doesn't matter (because
- when vectorizing a reduction we change the order of the computation).
+ when vectorizing a reduction we change the order of the computation).
Some targets can take advantage of this and generate more efficient code.
For example, targets like Altivec, that support widen_mult using a sequence
of {mult_even,mult_odd} generate the following vectors:
vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
When vectorizing outer-loops, we execute the inner-loop sequentially
- (each vectorized inner-loop iteration contributes to VF outer-loop
- iterations in parallel). We therefore don't allow to change the order
+ (each vectorized inner-loop iteration contributes to VF outer-loop
+ iterations in parallel). We therefore don't allow to change the order
of the computation in the inner-loop during outer-loop vectorization. */
if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
@@ -4726,7 +4726,7 @@ supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
== CODE_FOR_nothing)
return false;
- /* Check if it's a multi-step conversion that can be done using intermediate
+ /* Check if it's a multi-step conversion that can be done using intermediate
types. */
if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
|| insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
@@ -4738,10 +4738,10 @@ supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
if (!CONVERT_EXPR_CODE_P (code))
return false;
-
+
*code1 = c1;
*code2 = c2;
-
+
/* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
to get to NARROW_VECTYPE, and fail if we do not. */
@@ -4761,7 +4761,7 @@ supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
|| (icode2 = optab2->handlers[(int) prev_mode].insn_code)
== CODE_FOR_nothing
|| insn_data[icode2].operand[0].mode != intermediate_mode
- || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
+ || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
== CODE_FOR_nothing
|| (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
== CODE_FOR_nothing)
@@ -4789,22 +4789,22 @@ supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
/* Function supportable_narrowing_operation
- Check whether an operation represented by the code CODE is a
- narrowing operation that is supported by the target platform in
+ Check whether an operation represented by the code CODE is a
+ narrowing operation that is supported by the target platform in
vector form (i.e., when operating on arguments of type VECTYPE).
-
+
Narrowing operations we currently support are NOP (CONVERT) and
FIX_TRUNC. This function checks if these operations are supported by
the target platform directly via vector tree-codes.
Output:
- - CODE1 is the code of a vector operation to be used when
- vectorizing the operation, if available.
+ - CODE1 is the code of a vector operation to be used when
+ vectorizing the operation, if available.
- MULTI_STEP_CVT determines the number of required intermediate steps in
case of multi-step conversion (like int->short->char - in that case
MULTI_STEP_CVT will be 1).
- INTERM_TYPES contains the intermediate type required to perform the
- narrowing operation (short in the above example). */
+ narrowing operation (short in the above example). */
bool
supportable_narrowing_operation (enum tree_code code,
@@ -4850,7 +4850,7 @@ supportable_narrowing_operation (enum tree_code code,
return false;
vec_mode = TYPE_MODE (vectype);
- if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
+ if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
== CODE_FOR_nothing)
return false;
@@ -4871,13 +4871,13 @@ supportable_narrowing_operation (enum tree_code code,
intermediate_mode = insn_data[icode1].operand[0].mode;
intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
TYPE_UNSIGNED (prev_type));
- interm_optab = optab_for_tree_code (c1, intermediate_type,
+ interm_optab = optab_for_tree_code (c1, intermediate_type,
optab_default);
- if (!interm_optab
+ if (!interm_optab
|| (icode1 = optab1->handlers[(int) prev_mode].insn_code)
== CODE_FOR_nothing
|| insn_data[icode1].operand[0].mode != intermediate_mode
- || (icode1
+ || (icode1
= interm_optab->handlers[(int) intermediate_mode].insn_code)
== CODE_FOR_nothing)
return false;
diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c
index 90abbd76665..71cf8e5358c 100644
--- a/gcc/tree-vectorizer.c
+++ b/gcc/tree-vectorizer.c
@@ -1,7 +1,7 @@
/* Vectorizer
Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
Foundation, Inc.
- Contributed by Dorit Naishlos <dorit@il.ibm.com>
+ Contributed by Dorit Naishlos <dorit@il.ibm.com>
This file is part of GCC.
@@ -21,21 +21,21 @@ along with GCC; see the file COPYING3. If not see
/* Loop and basic block vectorizer.
- This file contains drivers for the three vectorizers:
- (1) loop vectorizer (inter-iteration parallelism),
+ This file contains drivers for the three vectorizers:
+ (1) loop vectorizer (inter-iteration parallelism),
(2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
vectorizer)
(3) BB vectorizer (out-of-loops), aka SLP
-
+
The rest of the vectorizer's code is organized as follows:
- - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
- used by drivers (1) and (2).
- - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
- drivers (1) and (2).
- - tree-vect-slp.c - BB vectorization specific analysis and transformation,
+ - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
+ used by drivers (1) and (2).
+ - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
+ drivers (1) and (2).
+ - tree-vect-slp.c - BB vectorization specific analysis and transformation,
used by drivers (2) and (3).
- tree-vect-stmts.c - statements analysis and transformation (used by all).
- - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
+ - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
manipulations (used by all).
- tree-vect-patterns.c - vectorizable code patterns detector (used by all)
@@ -73,7 +73,7 @@ along with GCC; see the file COPYING3. If not see
/* vect_dump will be set to stderr or dump_file if exist. */
FILE *vect_dump;
-/* vect_verbosity_level set to an invalid value
+/* vect_verbosity_level set to an invalid value
to mark that it's uninitialized. */
static enum verbosity_levels vect_verbosity_level = MAX_VERBOSITY_LEVEL;
static enum verbosity_levels user_vect_verbosity_level = MAX_VERBOSITY_LEVEL;
@@ -100,7 +100,7 @@ vect_set_verbosity_level (const char *val)
if (vl < MAX_VERBOSITY_LEVEL)
user_vect_verbosity_level = (enum verbosity_levels) vl;
else
- user_vect_verbosity_level
+ user_vect_verbosity_level
= (enum verbosity_levels) (MAX_VERBOSITY_LEVEL - 1);
}
@@ -125,9 +125,9 @@ vect_set_dump_settings (bool slp)
vect_verbosity_level = user_vect_verbosity_level;
/* Ignore user defined verbosity if dump flags require higher level of
verbosity. */
- if (dump_file)
+ if (dump_file)
{
- if (((dump_flags & TDF_DETAILS)
+ if (((dump_flags & TDF_DETAILS)
&& vect_verbosity_level >= REPORT_DETAILS)
|| ((dump_flags & TDF_STATS)
&& vect_verbosity_level >= REPORT_UNVECTORIZED_LOCATIONS))
@@ -135,8 +135,8 @@ vect_set_dump_settings (bool slp)
}
else
{
- /* If there is no dump file, print to stderr in case of loop
- vectorization. */
+ /* If there is no dump file, print to stderr in case of loop
+ vectorization. */
if (!slp)
vect_dump = stderr;
@@ -174,7 +174,7 @@ vect_print_dump_info (enum verbosity_levels vl)
DECL_SOURCE_FILE (current_function_decl),
DECL_SOURCE_LINE (current_function_decl));
else
- fprintf (vect_dump, "\n%s:%d: note: ",
+ fprintf (vect_dump, "\n%s:%d: note: ",
LOC_FILE (vect_location), LOC_LINE (vect_location));
return true;
@@ -182,7 +182,7 @@ vect_print_dump_info (enum verbosity_levels vl)
/* Function vectorize_loops.
-
+
Entry point to loop vectorization phase. */
unsigned
@@ -207,8 +207,8 @@ vectorize_loops (void)
/* ----------- Analyze loops. ----------- */
- /* If some loop was duplicated, it gets bigger number
- than all previously defined loops. This fact allows us to run
+ /* If some loop was duplicated, it gets bigger number
+ than all previously defined loops. This fact allows us to run
only over initial loops skipping newly generated ones. */
FOR_EACH_LOOP (li, loop, 0)
if (optimize_loop_nest_for_speed_p (loop))
@@ -255,7 +255,7 @@ vectorize_loops (void)
return num_vectorized_loops > 0 ? TODO_cleanup_cfg : 0;
}
-
+
/* Entry point to basic block SLP phase. */
@@ -289,9 +289,9 @@ execute_vect_slp (void)
static bool
gate_vect_slp (void)
{
- /* Apply SLP either if the vectorizer is on and the user didn't specify
+ /* Apply SLP either if the vectorizer is on and the user didn't specify
whether to run SLP or not, or if the SLP flag was set by the user. */
- return ((flag_tree_vectorize != 0 && flag_tree_slp_vectorize != 0)
+ return ((flag_tree_vectorize != 0 && flag_tree_slp_vectorize != 0)
|| flag_tree_slp_vectorize == 1);
}
@@ -313,7 +313,7 @@ struct gimple_opt_pass pass_slp_vectorize =
TODO_ggc_collect
| TODO_verify_ssa
| TODO_dump_func
- | TODO_update_ssa
+ | TODO_update_ssa
| TODO_verify_stmts /* todo_flags_finish */
}
};
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index 31e9c185005..b7c6316f9c6 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -1,5 +1,5 @@
/* Vectorizer
- Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free
Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
@@ -98,9 +98,9 @@ typedef struct _slp_tree {
VEC (gimple, heap) *stmts;
/* Vectorized stmt/s. */
VEC (gimple, heap) *vec_stmts;
- /* Number of vector stmts that are created to replace the group of scalar
- stmts. It is calculated during the transformation phase as the number of
- scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
+ /* Number of vector stmts that are created to replace the group of scalar
+ stmts. It is calculated during the transformation phase as the number of
+ scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
divided by vector size. */
unsigned int vec_stmts_size;
/* Vectorization costs associated with SLP node. */
@@ -127,13 +127,13 @@ typedef struct _slp_instance {
unsigned int unrolling_factor;
/* Vectorization costs associated with SLP instance. */
- struct
+ struct
{
int outside_of_loop; /* Statements generated outside loop. */
int inside_of_loop; /* Statements generated inside loop. */
} cost;
- /* Loads permutation relatively to the stores, NULL if there is no
+ /* Loads permutation relatively to the stores, NULL if there is no
permutation. */
VEC (int, heap) *load_permutation;
@@ -182,12 +182,12 @@ typedef struct _loop_vec_info {
tree num_iters_unchanged;
/* Minimum number of iterations below which vectorization is expected to
- not be profitable (as estimated by the cost model).
+ not be profitable (as estimated by the cost model).
-1 indicates that vectorization will not be profitable.
FORNOW: This field is an int. Will be a tree in the future, to represent
- values unknown at compile time. */
- int min_profitable_iters;
-
+ values unknown at compile time. */
+ int min_profitable_iters;
+
/* Is the loop vectorizable? */
bool vectorizable;
@@ -235,7 +235,7 @@ typedef struct _loop_vec_info {
of the loop. */
VEC(slp_instance, heap) *slp_instances;
- /* The unrolling factor needed to SLP the loop. In case of that pure SLP is
+ /* The unrolling factor needed to SLP the loop. In case of that pure SLP is
applied to the loop, i.e., no unrolling is needed, this is 1. */
unsigned slp_unrolling_factor;
} *loop_vec_info;
@@ -284,18 +284,18 @@ loop_vec_info_for_loop (struct loop *loop)
static inline bool
nested_in_vect_loop_p (struct loop *loop, gimple stmt)
{
- return (loop->inner
+ return (loop->inner
&& (loop->inner == (gimple_bb (stmt))->loop_father));
}
typedef struct _bb_vec_info {
basic_block bb;
- /* All interleaving chains of stores in the basic block, represented by the
+ /* All interleaving chains of stores in the basic block, represented by the
first stmt in the chain. */
VEC(gimple, heap) *strided_stores;
- /* All SLP instances in the basic block. This is a subset of the set of
+ /* All SLP instances in the basic block. This is a subset of the set of
STRIDED_STORES of the basic block. */
VEC(slp_instance, heap) *slp_instances;
@@ -337,7 +337,7 @@ enum stmt_vec_info_type {
loop_exit_ctrl_vec_info_type
};
-/* Indicates whether/how a variable is used in the scope of loop/basic
+/* Indicates whether/how a variable is used in the scope of loop/basic
block. */
enum vect_relevant {
vect_unused_in_scope = 0,
@@ -349,33 +349,33 @@ enum vect_relevant {
vect_used_in_outer,
/* defs that feed computations that end up (only) in a reduction. These
- defs may be used by non-reduction stmts, but eventually, any
- computations/values that are affected by these defs are used to compute
- a reduction (i.e. don't get stored to memory, for example). We use this
- to identify computations that we can change the order in which they are
+ defs may be used by non-reduction stmts, but eventually, any
+ computations/values that are affected by these defs are used to compute
+ a reduction (i.e. don't get stored to memory, for example). We use this
+ to identify computations that we can change the order in which they are
computed. */
vect_used_by_reduction,
- vect_used_in_scope
+ vect_used_in_scope
};
/* The type of vectorization that can be applied to the stmt: regular loop-based
vectorization; pure SLP - the stmt is a part of SLP instances and does not
have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
a part of SLP instance and also must be loop-based vectorized, since it has
- uses outside SLP sequences.
-
- In the loop context the meanings of pure and hybrid SLP are slightly
- different. By saying that pure SLP is applied to the loop, we mean that we
- exploit only intra-iteration parallelism in the loop; i.e., the loop can be
- vectorized without doing any conceptual unrolling, cause we don't pack
- together stmts from different iterations, only within a single iteration.
- Loop hybrid SLP means that we exploit both intra-iteration and
+ uses outside SLP sequences.
+
+ In the loop context the meanings of pure and hybrid SLP are slightly
+ different. By saying that pure SLP is applied to the loop, we mean that we
+ exploit only intra-iteration parallelism in the loop; i.e., the loop can be
+ vectorized without doing any conceptual unrolling, cause we don't pack
+ together stmts from different iterations, only within a single iteration.
+ Loop hybrid SLP means that we exploit both intra-iteration and
inter-iteration parallelism (e.g., number of elements in the vector is 4
- and the slp-group-size is 2, in which case we don't have enough parallelism
- within an iteration, so we obtain the rest of the parallelism from subsequent
+ and the slp-group-size is 2, in which case we don't have enough parallelism
+ within an iteration, so we obtain the rest of the parallelism from subsequent
iterations by unrolling the loop by 2). */
-enum slp_vect_type {
+enum slp_vect_type {
loop_vect = 0,
pure_slp,
hybrid
@@ -413,7 +413,7 @@ typedef struct _stmt_vec_info {
/** The following is relevant only for stmts that contain a non-scalar
- data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
+ data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
at most one such data-ref. **/
/* Information about the data-ref (access function, etc),
@@ -431,14 +431,14 @@ typedef struct _stmt_vec_info {
/* Stmt is part of some pattern (computation idiom) */
bool in_pattern_p;
- /* Used for various bookkeeping purposes, generally holding a pointer to
- some other stmt S that is in some way "related" to this stmt.
+ /* Used for various bookkeeping purposes, generally holding a pointer to
+ some other stmt S that is in some way "related" to this stmt.
Current use of this field is:
- If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
- true): S is the "pattern stmt" that represents (and replaces) the
- sequence of stmts that constitutes the pattern. Similarly, the
- related_stmt of the "pattern stmt" points back to this stmt (which is
- the last stmt in the original sequence of stmts that constitutes the
+ If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
+ true): S is the "pattern stmt" that represents (and replaces) the
+ sequence of stmts that constitutes the pattern. Similarly, the
+ related_stmt of the "pattern stmt" points back to this stmt (which is
+ the last stmt in the original sequence of stmts that constitutes the
pattern). */
gimple related_stmt;
@@ -470,7 +470,7 @@ typedef struct _stmt_vec_info {
bool read_write_dep;
/* Vectorization costs associated with statement. */
- struct
+ struct
{
int outside_of_loop; /* Statements generated outside loop. */
int inside_of_loop; /* Statements generated inside loop. */
@@ -478,7 +478,7 @@ typedef struct _stmt_vec_info {
/* Whether the stmt is SLPed, loop-based vectorized, or both. */
enum slp_vect_type slp_type;
-
+
/* The bb_vec_info with respect to which STMT is vectorized. */
bb_vec_info bb_vinfo;
} *stmt_vec_info;
@@ -558,7 +558,7 @@ typedef struct _stmt_vec_info {
#endif
/* Cost of any vector operation, excluding load, store or vector to scalar
- operation. */
+ operation. */
#ifndef TARG_VEC_STMT_COST
#define TARG_VEC_STMT_COST 1
#endif
@@ -683,25 +683,25 @@ is_loop_header_bb_p (basic_block bb)
return false;
}
-static inline void
-stmt_vinfo_set_inside_of_loop_cost (stmt_vec_info stmt_info, slp_tree slp_node,
+static inline void
+stmt_vinfo_set_inside_of_loop_cost (stmt_vec_info stmt_info, slp_tree slp_node,
int cost)
{
if (slp_node)
SLP_TREE_INSIDE_OF_LOOP_COST (slp_node) = cost;
else
STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = cost;
-}
+}
-static inline void
-stmt_vinfo_set_outside_of_loop_cost (stmt_vec_info stmt_info, slp_tree slp_node,
+static inline void
+stmt_vinfo_set_outside_of_loop_cost (stmt_vec_info stmt_info, slp_tree slp_node,
int cost)
{
if (slp_node)
SLP_TREE_OUTSIDE_OF_LOOP_COST (slp_node) = cost;
else
STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = cost;
-}
+}
static inline int
vect_pow2 (int x)
@@ -743,7 +743,7 @@ extern LOC vect_loop_location;
/* Function prototypes. */
/*-----------------------------------------------------------------*/
-/* Simple loop peeling and versioning utilities for vectorizer's purposes -
+/* Simple loop peeling and versioning utilities for vectorizer's purposes -
in tree-vect-loop-manip.c. */
extern void slpeel_make_loop_iterate_ntimes (struct loop *, tree);
extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
@@ -759,13 +759,13 @@ extern tree get_vectype_for_scalar_type (tree);
extern bool vect_is_simple_use (tree, loop_vec_info, bb_vec_info, gimple *,
tree *, enum vect_def_type *);
extern bool supportable_widening_operation (enum tree_code, gimple, tree,
- tree *, tree *, enum tree_code *,
- enum tree_code *, int *,
+ tree *, tree *, enum tree_code *,
+ enum tree_code *, int *,
VEC (tree, heap) **);
extern bool supportable_narrowing_operation (enum tree_code, const_gimple,
- tree, enum tree_code *, int *,
+ tree, enum tree_code *, int *,
VEC (tree, heap) **);
-extern stmt_vec_info new_stmt_vec_info (gimple stmt, loop_vec_info,
+extern stmt_vec_info new_stmt_vec_info (gimple stmt, loop_vec_info,
bb_vec_info);
extern void free_stmt_vec_info (gimple stmt);
extern tree vectorizable_function (gimple, tree, tree);
@@ -786,9 +786,9 @@ extern bool vect_transform_stmt (gimple, gimple_stmt_iterator *,
bool *, slp_tree, slp_instance);
extern void vect_remove_stores (gimple);
extern bool vect_analyze_stmt (gimple, bool *, slp_tree);
-extern bool vectorizable_condition (gimple, gimple_stmt_iterator *, gimple *,
+extern bool vectorizable_condition (gimple, gimple_stmt_iterator *, gimple *,
tree, int);
-
+
/* In tree-vect-data-refs.c. */
extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int);
extern enum dr_alignment_support vect_supportable_dr_alignment
@@ -803,7 +803,7 @@ extern bool vect_analyze_data_ref_accesses (loop_vec_info, bb_vec_info);
extern bool vect_prune_runtime_alias_test_list (loop_vec_info);
extern bool vect_analyze_data_refs (loop_vec_info, bb_vec_info);
extern tree vect_create_data_ref_ptr (gimple, struct loop *, tree, tree *,
- gimple *, bool, bool *);
+ gimple *, bool, bool *);
extern tree bump_vector_ptr (tree, gimple, gimple_stmt_iterator *, gimple, tree);
extern tree vect_create_destination_var (tree, tree);
extern bool vect_strided_store_supported (tree);
@@ -811,7 +811,7 @@ extern bool vect_strided_load_supported (tree);
extern bool vect_permute_store_chain (VEC(tree,heap) *,unsigned int, gimple,
gimple_stmt_iterator *, VEC(tree,heap) **);
extern tree vect_setup_realignment (gimple, gimple_stmt_iterator *, tree *,
- enum dr_alignment_support, tree,
+ enum dr_alignment_support, tree,
struct loop **);
extern bool vect_permute_load_chain (VEC(tree,heap) *,unsigned int, gimple,
gimple_stmt_iterator *, VEC(tree,heap) **);
@@ -843,7 +843,7 @@ extern int vect_min_worthwhile_factor (enum tree_code);
/* In tree-vect-slp.c. */
extern void vect_free_slp_instance (slp_instance);
extern bool vect_transform_slp_perm_load (gimple, VEC (tree, heap) *,
- gimple_stmt_iterator *, int,
+ gimple_stmt_iterator *, int,
slp_instance, bool);
extern bool vect_schedule_slp (loop_vec_info, bb_vec_info);
extern void vect_update_slp_costs_according_to_vf (loop_vec_info);
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 5adc0da84da..6e6070937c1 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -623,7 +623,7 @@ abs_extent_range (value_range_t *vr, tree min, tree max)
}
-/* Return value range information for VAR.
+/* Return value range information for VAR.
If we have no values ranges recorded (ie, VRP is not running), then
return NULL. Otherwise create an empty range if none existed for VAR. */
@@ -985,11 +985,11 @@ valid_value_p (tree expr)
|| TREE_CODE (expr) == MINUS_EXPR)
return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
&& TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
-
+
return is_gimple_min_invariant (expr);
}
-/* Return
+/* Return
1 if VAL < VAL2
0 if !(VAL < VAL2)
-2 if those are incomparable. */
@@ -1035,7 +1035,7 @@ operand_less_p (tree val, tree val2)
}
/* Compare two values VAL1 and VAL2. Return
-
+
-2 if VAL1 and VAL2 cannot be compared at compile-time,
-1 if VAL1 < VAL2,
0 if VAL1 == VAL2,
@@ -1073,7 +1073,7 @@ compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
{
tree n1, c1, n2, c2;
enum tree_code code1, code2;
-
+
/* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
same name, return -2. */
@@ -1209,7 +1209,7 @@ compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
/* First see if VAL1 and VAL2 are not the same. */
if (val1 == val2 || operand_equal_p (val1, val2, 0))
return 0;
-
+
/* If VAL1 is a lower address than VAL2, return -1. */
if (operand_less_p (val1, val2) == 1)
return -1;
@@ -1270,7 +1270,7 @@ compare_values (tree val1, tree val2)
This also applies to value_ranges_intersect_p and
range_includes_zero_p. The semantics of VR_RANGE and
VR_ANTI_RANGE should be encoded here, but that also means
- adapting the users of these functions to the new semantics.
+ adapting the users of these functions to the new semantics.
Benchmark compile/20001226-1.c compilation time after changing this
function. */
@@ -1295,8 +1295,8 @@ value_inside_range (tree val, value_range_t * vr)
/* Return true if value ranges VR0 and VR1 have a non-empty
- intersection.
-
+ intersection.
+
Benchmark compile/20001226-1.c compilation time after changing this
function.
*/
@@ -1542,7 +1542,7 @@ extract_range_from_assert (value_range_t *vr_p, tree expr)
The only situation in which we can build a valid
anti-range is when LIMIT_VR is a single-valued range
- (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
+ (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
if (limit_vr
&& limit_vr->type == VR_RANGE
@@ -1736,7 +1736,7 @@ extract_range_from_assert (value_range_t *vr_p, tree expr)
there are three cases to consider.
- 1. The VR_ANTI_RANGE range is completely within the
+ 1. The VR_ANTI_RANGE range is completely within the
VR_RANGE and the endpoints of the ranges are
different. In that case the resulting range
should be whichever range is more precise.
@@ -2823,7 +2823,7 @@ extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
set_value_range_to_varying (vr);
return;
}
-
+
/* ABS_EXPR may flip the range around, if the original range
included negative values. */
if (is_overflow_infinity (vr0.min))
@@ -2862,7 +2862,7 @@ extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
/* If a VR_ANTI_RANGEs contains zero, then we have
~[-INF, min(MIN, MAX)]. */
if (vr0.type == VR_ANTI_RANGE)
- {
+ {
if (range_includes_zero_p (&vr0))
{
/* Take the lower of the two values. */
@@ -3040,7 +3040,7 @@ extract_range_from_comparison (value_range_t *vr, enum tree_code code,
{
bool sop = false;
tree val;
-
+
val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
NULL);
@@ -3309,7 +3309,7 @@ vrp_var_may_overflow (tree var, gimple stmt)
/* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
-
+
- Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
all the values in the ranges.
@@ -3466,7 +3466,7 @@ compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
/* Otherwise, we don't know. */
return NULL_TREE;
}
-
+
gcc_unreachable ();
}
@@ -3726,7 +3726,7 @@ build_assert_expr_for (tree cond, tree v)
if (COMPARISON_CLASS_P (cond))
{
- tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
+ tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
assertion = gimple_build_assign (n, a);
}
else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
@@ -3935,11 +3935,11 @@ register_new_assert_for (tree name, tree expr,
registered location for A. If we are doing an edge insertion,
assume that A will be inserted at E->DEST. Note that this is not
necessarily true.
-
+
If E is a critical edge, it will be split. But even if E is
split, the new block will dominate the same set of blocks that
E->DEST dominates.
-
+
The reverse, however, is not true, blocks dominated by E->DEST
will not be dominated by the new block created to split E. So,
if the insertion location is on a critical edge, we will not use
@@ -4222,7 +4222,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
/* OP is an operand of a truth value expression which is known to have
a particular value. Register any asserts for OP and for any
- operands in OP's defining statement.
+ operands in OP's defining statement.
If CODE is EQ_EXPR, then we want to register OP is zero (false),
if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
@@ -4241,7 +4241,7 @@ register_edge_assert_for_1 (tree op, enum tree_code code,
return false;
/* We know that OP will have a zero or nonzero value. If OP is used
- more than once go ahead and register an assert for OP.
+ more than once go ahead and register an assert for OP.
The FOUND_IN_SUBGRAPH support is not helpful in this situation as
it will always be set for OP (because OP is used in a COND_EXPR in
@@ -4302,7 +4302,7 @@ register_edge_assert_for_1 (tree op, enum tree_code code,
code, e, bsi);
}
else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
- {
+ {
/* Recurse through the type conversion. */
retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
code, e, bsi);
@@ -4557,7 +4557,7 @@ find_switch_asserts (basic_block bb, gimple last)
If a statement produces a useful assertion A for name N_i, then the
list of assertions already generated for N_i is scanned to
determine if A is actually needed.
-
+
If N_i already had the assertion A at a location dominating the
current location, then nothing needs to be done. Otherwise, the
new location for A is recorded instead.
@@ -4601,7 +4601,7 @@ find_switch_asserts (basic_block bb, gimple last)
4- If BB does not end in a conditional expression, then we recurse
into BB's dominator children.
-
+
At the end of the recursive traversal, every SSA name will have a
list of locations where ASSERT_EXPRs should be added. When a new
location for name N is found, it is registered by calling
@@ -4678,7 +4678,7 @@ find_assert_locations_1 (basic_block bb, sbitmap live)
{
tree t = op;
gimple def_stmt = SSA_NAME_DEF_STMT (t);
-
+
while (is_gimple_assign (def_stmt)
&& gimple_assign_rhs_code (def_stmt) == NOP_EXPR
&& TREE_CODE
@@ -5054,7 +5054,7 @@ search_for_addr_array (tree t, location_t location)
if (gimple_code (g) != GIMPLE_ASSIGN)
return;
- if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
+ if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
!= GIMPLE_SINGLE_RHS)
return;
@@ -5063,11 +5063,11 @@ search_for_addr_array (tree t, location_t location)
/* We are only interested in addresses of ARRAY_REF's. */
- if (TREE_CODE (t) != ADDR_EXPR)
+ if (TREE_CODE (t) != ADDR_EXPR)
return;
/* Check each ARRAY_REFs in the reference chain. */
- do
+ do
{
if (TREE_CODE (t) == ARRAY_REF)
check_array_ref (location, t, true /*ignore_off_by_one*/);
@@ -5080,7 +5080,7 @@ search_for_addr_array (tree t, location_t location)
/* walk_tree() callback that checks if *TP is
an ARRAY_REF inside an ADDR_EXPR (in which an array
subscript one outside the valid range is allowed). Call
- check_array_ref for each ARRAY_REF found. The location is
+ check_array_ref for each ARRAY_REF found. The location is
passed in DATA. */
static tree
@@ -5188,21 +5188,21 @@ check_all_array_refs (void)
/* Convert range assertion expressions into the implied copies and
copy propagate away the copies. Doing the trivial copy propagation
here avoids the need to run the full copy propagation pass after
- VRP.
-
+ VRP.
+
FIXME, this will eventually lead to copy propagation removing the
names that had useful range information attached to them. For
instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
then N_i will have the range [3, +INF].
-
+
However, by converting the assertion into the implied copy
operation N_i = N_j, we will then copy-propagate N_j into the uses
of N_i and lose the range information. We may want to hold on to
ASSERT_EXPRs a little while longer as the ranges could be used in
things like jump threading.
-
+
The problem with keeping ASSERT_EXPRs around is that passes after
- VRP need to handle them appropriately.
+ VRP need to handle them appropriately.
Another approach would be to make the range information a first
class property of the SSA_NAME so that it can be queried from
@@ -5247,7 +5247,7 @@ remove_range_assertions (void)
/* And finally, remove the copy, it is not needed. */
gsi_remove (&si, true);
- release_defs (stmt);
+ release_defs (stmt);
}
else
gsi_next (&si);
@@ -5395,7 +5395,7 @@ vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
return SSA_PROP_NOT_INTERESTING;
}
-
+
/* Every other statement produces no useful ranges. */
FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
set_value_range_to_varying (get_value_range (def));
@@ -5753,7 +5753,7 @@ vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
else
location = gimple_location (stmt);
- warning_at (location, OPT_Wtype_limits,
+ warning_at (location, OPT_Wtype_limits,
integer_zerop (ret)
? G_("comparison always false "
"due to limited range of data type")
@@ -5787,7 +5787,7 @@ vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
fprintf (dump_file, "\nVisiting conditional with predicate: ");
print_gimple_stmt (dump_file, stmt, 0, 0);
fprintf (dump_file, "\nWith known ranges\n");
-
+
FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
{
fprintf (dump_file, "\t");
@@ -5801,7 +5801,7 @@ vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
/* Compute the value of the predicate COND by checking the known
ranges of each of its operands.
-
+
Note that we cannot evaluate all the equivalent ranges here
because those ranges may not yet be final and with the current
propagation strategy, we cannot determine when the value ranges
@@ -5948,7 +5948,7 @@ find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
&& min_take_default
&& max_take_default)
{
- /* Only the default case label reached.
+ /* Only the default case label reached.
Return an empty range. */
*min_idx = 1;
*max_idx = 0;
@@ -6780,7 +6780,7 @@ simplify_cond_using_ranges (gimple stmt)
&& is_gimple_min_invariant (op1))
{
value_range_t *vr = get_value_range (op0);
-
+
/* If we have range information for OP0, then we might be
able to simplify this conditional. */
if (vr->type == VR_RANGE)
@@ -6881,7 +6881,7 @@ simplify_switch_using_ranges (gimple stmt)
i = 1;
j = 0;
}
- else
+ else
{
j = i;
}
@@ -7026,7 +7026,7 @@ fold_predicate_in (gimple_stmt_iterator *si)
{
if (assignment_p)
val = fold_convert (gimple_expr_type (stmt), val);
-
+
if (dump_file)
{
fprintf (dump_file, "Folding predicate ");
@@ -7067,7 +7067,7 @@ vrp_fold_stmt (gimple_stmt_iterator *si)
}
/* Stack of dest,src equivalency pairs that need to be restored after
- each attempt to thread a block's incoming edge to an outgoing edge.
+ each attempt to thread a block's incoming edge to an outgoing edge.
A NULL entry is used to mark the end of pairs which need to be
restored. */
@@ -7107,7 +7107,7 @@ simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
Unlike DOM, we do not iterate VRP if jump threading was successful.
While iterating may expose new opportunities for VRP, it is expected
those opportunities would be very limited and the compile time cost
- to expose those opportunities would be significant.
+ to expose those opportunities would be significant.
As jump threading opportunities are discovered, they are registered
for later realization. */
@@ -7302,7 +7302,7 @@ vrp_finalize (void)
4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
5 endif
6 if (q_2)
-
+
In the code above, pointer p_5 has range [q_2, q_2], but from the
code we can also determine that p_5 cannot be NULL and, if q_2 had
a non-varying range, p_5's range should also be compatible with it.
@@ -7316,7 +7316,7 @@ vrp_finalize (void)
between names so that we can take advantage of information from
multiple ranges when doing final replacement. Note that this
equivalency relation is transitive but not symmetric.
-
+
In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
cannot assert that q_2 is equivalent to p_5 because q_2 may be used
in contexts where that assertion does not hold (e.g., in line 6).
diff --git a/gcc/tree.c b/gcc/tree.c
index a40c36ecf73..dc4820981ed 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -196,13 +196,13 @@ static GTY ((if_marked ("ggc_marked_p"), param_is (union tree_node)))
/* General tree->tree mapping structure for use in hash tables. */
-static GTY ((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
+static GTY ((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
htab_t debug_expr_for_decl;
-static GTY ((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
+static GTY ((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
htab_t value_expr_for_decl;
-static GTY ((if_marked ("tree_priority_map_marked_p"),
+static GTY ((if_marked ("tree_priority_map_marked_p"),
param_is (struct tree_priority_map)))
htab_t init_priority_for_decl;
@@ -273,7 +273,7 @@ static inline enum tree_node_structure_enum
tree_node_structure_for_code (enum tree_code code)
{
switch (TREE_CODE_CLASS (code))
- {
+ {
case tcc_declaration:
{
switch (code)
@@ -367,7 +367,7 @@ initialize_tree_contains_struct (void)
MARK_TS_COMMON (C); \
tree_contains_struct[C][TS_DECL_MINIMAL] = 1; \
} while (0)
-
+
#define MARK_TS_DECL_COMMON(C) \
do { \
MARK_TS_DECL_MINIMAL (C); \
@@ -543,7 +543,7 @@ init_ttree (void)
int_cst_hash_table = htab_create_ggc (1024, int_cst_hash_hash,
int_cst_hash_eq, NULL);
-
+
int_cst_node = make_node (INTEGER_CST);
cl_option_hash_table = htab_create_ggc (64, cl_option_hash_hash,
@@ -584,7 +584,7 @@ decl_assembler_name_equal (tree decl, const_tree asmname)
decl_str = IDENTIFIER_POINTER (decl_asmname);
asmname_str = IDENTIFIER_POINTER (asmname);
-
+
/* If the target assembler name was set by the user, things are trickier.
We have a leading '*' to begin with. After that, it's arguable what
@@ -844,7 +844,7 @@ make_node_stat (enum tree_code code MEM_STAT_DECL)
break;
}
break;
-
+
default:
gcc_unreachable ();
}
@@ -986,7 +986,7 @@ copy_node_stat (tree node MEM_STAT_DECL)
but the optimizer should catch that. */
TYPE_SYMTAB_POINTER (t) = 0;
TYPE_SYMTAB_ADDRESS (t) = 0;
-
+
/* Do not copy the values cache. */
if (TYPE_CACHED_VALUES_P(t))
{
@@ -1187,7 +1187,7 @@ build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
TREE_INT_CST_LOW (t) = low;
TREE_INT_CST_HIGH (t) = hi;
TREE_TYPE (t) = type;
-
+
TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix) = t;
}
}
@@ -1456,7 +1456,7 @@ build_string (int len, const char *str)
#ifdef GATHER_STATISTICS
tree_node_counts[(int) c_kind]++;
tree_node_sizes[(int) c_kind] += length;
-#endif
+#endif
s = ggc_alloc_tree (length);
@@ -4206,7 +4206,7 @@ free_lang_data_in_type (tree type)
}
}
}
-
+
/* Remove members that are not actually FIELD_DECLs from the field
list of an aggregate. These occur in C++. */
if (RECORD_OR_UNION_TYPE_P (type))
@@ -4218,7 +4218,7 @@ free_lang_data_in_type (tree type)
to be removed, we cannot set its TREE_CHAIN to NULL.
Otherwise, we would not be able to find all the other fields
in the other instances of this TREE_TYPE.
-
+
This was causing an ICE in testsuite/g++.dg/lto/20080915.C. */
prev = NULL_TREE;
member = TYPE_FIELDS (type);
@@ -4454,7 +4454,7 @@ free_lang_data_in_decl (tree decl)
else if (TREE_CODE (decl) == TYPE_DECL)
{
DECL_INITIAL (decl) = NULL_TREE;
-
+
/* DECL_CONTEXT is overloaded as DECL_FIELD_CONTEXT for
FIELD_DECLs, which should be preserved. Otherwise,
we shouldn't be concerned with source-level lexical
@@ -4988,7 +4988,7 @@ free_lang_data (void)
}
-struct simple_ipa_opt_pass pass_ipa_free_lang_data =
+struct simple_ipa_opt_pass pass_ipa_free_lang_data =
{
{
SIMPLE_IPA_PASS,
@@ -5023,10 +5023,10 @@ is_attribute_with_length_p (const char *attr, int attr_len, const_tree ident)
if (TREE_CODE (ident) != IDENTIFIER_NODE)
return 0;
-
+
p = IDENTIFIER_POINTER (ident);
ident_len = IDENTIFIER_LENGTH (ident);
-
+
if (ident_len == attr_len
&& strcmp (attr, p) == 0)
return 1;
@@ -5208,12 +5208,12 @@ merge_dllimport_decl_attributes (tree old, tree new_tree)
marked dllimport and a definition appears later, then the object
is not dllimport'd. We also remove a `new' dllimport if the old list
contains dllexport: dllexport always overrides dllimport, regardless
- of the order of declaration. */
+ of the order of declaration. */
if (!VAR_OR_FUNCTION_DECL_P (new_tree))
delete_dllimport_p = 0;
else if (DECL_DLLIMPORT_P (new_tree)
&& lookup_attribute ("dllexport", DECL_ATTRIBUTES (old)))
- {
+ {
DECL_DLLIMPORT_P (new_tree) = 0;
warning (OPT_Wattributes, "%q+D already declared with dllexport attribute: "
"dllimport ignored", new_tree);
@@ -5239,7 +5239,7 @@ merge_dllimport_decl_attributes (tree old, tree new_tree)
}
/* Let an inline definition silently override the external reference,
- but otherwise warn about attribute inconsistency. */
+ but otherwise warn about attribute inconsistency. */
else if (TREE_CODE (new_tree) == VAR_DECL
|| !DECL_DECLARED_INLINE_P (new_tree))
warning (OPT_Wattributes, "%q+D redeclared without dllimport attribute: "
@@ -5250,11 +5250,11 @@ merge_dllimport_decl_attributes (tree old, tree new_tree)
a = merge_attributes (DECL_ATTRIBUTES (old), DECL_ATTRIBUTES (new_tree));
- if (delete_dllimport_p)
+ if (delete_dllimport_p)
{
tree prev, t;
- const size_t attr_len = strlen ("dllimport");
-
+ const size_t attr_len = strlen ("dllimport");
+
/* Scan the list for dllimport and delete it. */
for (prev = NULL_TREE, t = a; t; prev = t, t = TREE_CHAIN (t))
{
@@ -5335,7 +5335,7 @@ handle_dll_attribute (tree * pnode, tree name, tree args, int flags,
any damage. */
if (is_dllimport)
{
- /* Honor any target-specific overrides. */
+ /* Honor any target-specific overrides. */
if (!targetm.valid_dllimport_attribute_p (node))
*no_add_attrs = true;
@@ -5343,7 +5343,7 @@ handle_dll_attribute (tree * pnode, tree name, tree args, int flags,
&& DECL_DECLARED_INLINE_P (node))
{
warning (OPT_Wattributes, "inline function %q+D declared as "
- " dllimport: attribute ignored", node);
+ " dllimport: attribute ignored", node);
*no_add_attrs = true;
}
/* Like MS, treat definition of dllimported variables and
@@ -5400,7 +5400,7 @@ handle_dll_attribute (tree * pnode, tree name, tree args, int flags,
if (DECL_VISIBILITY_SPECIFIED (node)
&& DECL_VISIBILITY (node) != VISIBILITY_DEFAULT)
error ("%qE implies default visibility, but %qD has already "
- "been declared with a different visibility",
+ "been declared with a different visibility",
name, node);
DECL_VISIBILITY (node) = VISIBILITY_DEFAULT;
DECL_VISIBILITY_SPECIFIED (node) = 1;
@@ -5486,7 +5486,7 @@ build_qualified_type (tree type, int type_quals)
else
/* T is its own canonical type. */
TYPE_CANONICAL (t) = t;
-
+
}
return t;
@@ -5501,7 +5501,7 @@ tree
build_distinct_type_copy (tree type)
{
tree t = copy_node (type);
-
+
TYPE_POINTER_TO (t) = 0;
TYPE_REFERENCE_TO (t) = 0;
@@ -5539,7 +5539,7 @@ build_variant_type_copy (tree type)
/* Since we're building a variant, assume that it is a non-semantic
variant. This also propagates TYPE_STRUCTURAL_EQUALITY_P. */
TYPE_CANONICAL (t) = TYPE_CANONICAL (type);
-
+
/* Add the new type to the chain of variants of TYPE. */
TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (m);
TYPE_NEXT_VARIANT (m) = t;
@@ -5646,7 +5646,7 @@ decl_init_priority_insert (tree decl, priority_type priority)
gcc_assert (VAR_OR_FUNCTION_DECL_P (decl));
h = decl_priority_info (decl);
h->init = priority;
-}
+}
/* Set the finalization priority for DECL to PRIORITY. */
@@ -5658,7 +5658,7 @@ decl_fini_priority_insert (tree decl, priority_type priority)
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
h = decl_priority_info (decl);
h->fini = priority;
-}
+}
/* Print out the statistics for the DECL_DEBUG_EXPR hash table. */
@@ -5684,7 +5684,7 @@ print_value_expr_statistics (void)
/* Lookup a debug expression for FROM, and return it if we find one. */
-tree
+tree
decl_debug_expr_lookup (tree from)
{
struct tree_map *h, in;
@@ -5711,11 +5711,11 @@ decl_debug_expr_insert (tree from, tree to)
h->to = to;
loc = htab_find_slot_with_hash (debug_expr_for_decl, h, h->hash, INSERT);
*(struct tree_map **) loc = h;
-}
+}
/* Lookup a value expression for FROM, and return it if we find one. */
-tree
+tree
decl_value_expr_lookup (tree from)
{
struct tree_map *h, in;
@@ -5782,7 +5782,7 @@ type_hash_eq (const void *va, const void *vb)
TYPE_ATTRIBUTES (b->type))
|| TYPE_ALIGN (a->type) != TYPE_ALIGN (b->type)
|| TYPE_MODE (a->type) != TYPE_MODE (b->type)
- || (TREE_CODE (a->type) != COMPLEX_TYPE
+ || (TREE_CODE (a->type) != COMPLEX_TYPE
&& TYPE_NAME (a->type) != TYPE_NAME (b->type)))
return 0;
@@ -6612,7 +6612,7 @@ iterative_hash_expr (const_tree t, hashval_t val)
else
{
gcc_assert (IS_EXPR_CODE_CLASS (tclass));
-
+
val = iterative_hash_object (code, val);
/* Don't hash the type, that can lead to having nodes which
@@ -6793,7 +6793,7 @@ build_reference_type_for_mode (tree to_type, enum machine_mode mode,
if (TYPE_STRUCTURAL_EQUALITY_P (to_type))
SET_TYPE_STRUCTURAL_EQUALITY (t);
else if (TYPE_CANONICAL (to_type) != to_type)
- TYPE_CANONICAL (t)
+ TYPE_CANONICAL (t)
= build_reference_type_for_mode (TYPE_CANONICAL (to_type),
mode, can_alias_all);
@@ -7027,7 +7027,7 @@ build_array_type (tree elt_type, tree index_type)
SET_TYPE_STRUCTURAL_EQUALITY (t);
else if (TYPE_CANONICAL (elt_type) != elt_type
|| (index_type && TYPE_CANONICAL (index_type) != index_type))
- TYPE_CANONICAL (t)
+ TYPE_CANONICAL (t)
= build_array_type (TYPE_CANONICAL (elt_type),
index_type ? TYPE_CANONICAL (index_type) : NULL);
}
@@ -7048,7 +7048,7 @@ strip_array_types (tree type)
}
/* Computes the canonical argument types from the argument type list
- ARGTYPES.
+ ARGTYPES.
Upon return, *ANY_STRUCTURAL_P will be true iff either it was true
on entry to this function, or if any of the ARGTYPES are
@@ -7062,14 +7062,14 @@ strip_array_types (tree type)
canonical argument list is unneeded (i.e., *ANY_STRUCTURAL_P is
true) or would not differ from ARGTYPES. */
-static tree
-maybe_canonicalize_argtypes(tree argtypes,
+static tree
+maybe_canonicalize_argtypes(tree argtypes,
bool *any_structural_p,
bool *any_noncanonical_p)
{
tree arg;
bool any_noncanonical_argtypes_p = false;
-
+
for (arg = argtypes; arg && !(*any_structural_p); arg = TREE_CHAIN (arg))
{
if (!TREE_VALUE (arg) || TREE_VALUE (arg) == error_mark_node)
@@ -7153,7 +7153,7 @@ build_function_type (tree value_type, tree arg_types)
/* Set up the canonical type. */
any_structural_p = TYPE_STRUCTURAL_EQUALITY_P (value_type);
any_noncanonical_p = TYPE_CANONICAL (value_type) != value_type;
- canon_argtypes = maybe_canonicalize_argtypes (arg_types,
+ canon_argtypes = maybe_canonicalize_argtypes (arg_types,
&any_structural_p,
&any_noncanonical_p);
if (any_structural_p)
@@ -7161,7 +7161,7 @@ build_function_type (tree value_type, tree arg_types)
else if (any_noncanonical_p)
TYPE_CANONICAL (t) = build_function_type (TYPE_CANONICAL (value_type),
canon_argtypes);
-
+
if (!COMPLETE_TYPE_P (t))
layout_type (t);
return t;
@@ -7227,8 +7227,8 @@ build_function_type_skip_args (tree orig_type, bitmap args_to_skip)
return new_type;
}
-/* Build variant of function type ORIG_TYPE skipping ARGS_TO_SKIP.
-
+/* Build variant of function type ORIG_TYPE skipping ARGS_TO_SKIP.
+
Arguments from DECL_ARGUMENTS list can't be removed now, since they are
linked by TREE_CHAIN directly. It is caller responsibility to eliminate
them when they are being duplicated (i.e. copy_arguments_for_versioning). */
@@ -7366,7 +7366,7 @@ build_method_type_directly (tree basetype,
if (any_structural_p)
SET_TYPE_STRUCTURAL_EQUALITY (t);
else if (any_noncanonical_p)
- TYPE_CANONICAL (t)
+ TYPE_CANONICAL (t)
= build_method_type_directly (TYPE_CANONICAL (basetype),
TYPE_CANONICAL (rettype),
canon_argtypes);
@@ -7422,7 +7422,7 @@ build_offset_type (tree basetype, tree type)
SET_TYPE_STRUCTURAL_EQUALITY (t);
else if (TYPE_CANONICAL (TYPE_MAIN_VARIANT (basetype)) != basetype
|| TYPE_CANONICAL (type) != type)
- TYPE_CANONICAL (t)
+ TYPE_CANONICAL (t)
= build_offset_type (TYPE_CANONICAL (TYPE_MAIN_VARIANT (basetype)),
TYPE_CANONICAL (type));
}
@@ -7459,7 +7459,7 @@ build_complex_type (tree component_type)
if (TYPE_STRUCTURAL_EQUALITY_P (component_type))
SET_TYPE_STRUCTURAL_EQUALITY (t);
else if (TYPE_CANONICAL (component_type) != component_type)
- TYPE_CANONICAL (t)
+ TYPE_CANONICAL (t)
= build_complex_type (TYPE_CANONICAL (component_type));
}
@@ -7888,7 +7888,7 @@ get_type_static_bounds (const_tree type, mpz_t min, mpz_t max)
}
}
- if (!POINTER_TYPE_P (type) && TYPE_MAX_VALUE (type)
+ if (!POINTER_TYPE_P (type) && TYPE_MAX_VALUE (type)
&& TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST)
mpz_set_double_int (max, tree_to_double_int (TYPE_MAX_VALUE (type)),
TYPE_UNSIGNED (type));
@@ -8337,7 +8337,7 @@ tree_check_failed (const_tree node, const char *file,
while ((code = va_arg (args, int)))
{
const char *prefix = length ? " or " : "expected ";
-
+
strcpy (tmp + length, prefix);
length += strlen (prefix);
strcpy (tmp + length, tree_code_name[code]);
@@ -8514,9 +8514,9 @@ static const char *ts_enum_names[] = {
whether CODE contains the tree structure identified by EN. */
void
-tree_contains_struct_check_failed (const_tree node,
+tree_contains_struct_check_failed (const_tree node,
const enum tree_node_structure_enum en,
- const char *file, int line,
+ const char *file, int line,
const char *function)
{
internal_error
@@ -8586,7 +8586,7 @@ make_vector_type (tree innertype, int nunits, enum machine_mode mode)
SET_TYPE_STRUCTURAL_EQUALITY (t);
else if (TYPE_CANONICAL (innertype) != innertype
|| mode != VOIDmode)
- TYPE_CANONICAL (t)
+ TYPE_CANONICAL (t)
= make_vector_type (TYPE_CANONICAL (innertype), nunits, VOIDmode);
layout_type (t);
@@ -8846,7 +8846,7 @@ build_common_tree_nodes_2 (int short_double)
/* Decimal float types. */
dfloat32_type_node = make_node (REAL_TYPE);
- TYPE_PRECISION (dfloat32_type_node) = DECIMAL32_TYPE_SIZE;
+ TYPE_PRECISION (dfloat32_type_node) = DECIMAL32_TYPE_SIZE;
layout_type (dfloat32_type_node);
SET_TYPE_MODE (dfloat32_type_node, SDmode);
dfloat32_ptr_type_node = build_pointer_type (dfloat32_type_node);
@@ -8858,7 +8858,7 @@ build_common_tree_nodes_2 (int short_double)
dfloat64_ptr_type_node = build_pointer_type (dfloat64_type_node);
dfloat128_type_node = make_node (REAL_TYPE);
- TYPE_PRECISION (dfloat128_type_node) = DECIMAL128_TYPE_SIZE;
+ TYPE_PRECISION (dfloat128_type_node) = DECIMAL128_TYPE_SIZE;
layout_type (dfloat128_type_node);
SET_TYPE_MODE (dfloat128_type_node, TDmode);
dfloat128_ptr_type_node = build_pointer_type (dfloat128_type_node);
@@ -8931,7 +8931,7 @@ build_common_tree_nodes_2 (int short_double)
declare the type to be __builtin_va_list. */
if (TREE_CODE (t) != RECORD_TYPE)
t = build_variant_type_copy (t);
-
+
va_list_type_node = t;
}
}
@@ -9119,7 +9119,7 @@ build_common_builtin_nodes (void)
/* Complex multiplication and division. These are handled as builtins
rather than optabs because emit_library_call_value doesn't support
- complex. Further, we can do slightly better with folding these
+ complex. Further, we can do slightly better with folding these
beasties if the real and complex parts of the arguments are separate. */
{
int mode;
@@ -9176,7 +9176,7 @@ tree
reconstruct_complex_type (tree type, tree bottom)
{
tree inner, outer;
-
+
if (TREE_CODE (type) == POINTER_TYPE)
{
inner = reconstruct_complex_type (TREE_TYPE (type), bottom);
@@ -9204,8 +9204,8 @@ reconstruct_complex_type (tree type, tree bottom)
inner = reconstruct_complex_type (TREE_TYPE (type), bottom);
/* The build_method_type_directly() routine prepends 'this' to argument list,
so we must compensate by getting rid of it. */
- outer
- = build_method_type_directly
+ outer
+ = build_method_type_directly
(TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (type))),
inner,
TREE_CHAIN (TYPE_ARG_TYPES (type)));
@@ -9363,7 +9363,7 @@ build_omp_clause (location_t loc, enum omp_clause_code code)
tree_node_counts[(int) omp_clause_kind]++;
tree_node_sizes[(int) omp_clause_kind] += size;
#endif
-
+
return t;
}
@@ -9695,7 +9695,7 @@ signed_or_unsigned_type_for (int unsignedp, tree type)
if (!INTEGRAL_TYPE_P (t) || TYPE_UNSIGNED (t) == unsignedp)
return t;
-
+
return lang_hooks.types.type_for_size (TYPE_PRECISION (t), unsignedp);
}
@@ -9841,7 +9841,7 @@ operand_equal_for_phi_arg_p (const_tree arg0, const_tree arg1)
}
/* Returns number of zeros at the end of binary representation of X.
-
+
??? Use ffs if available? */
tree
@@ -10179,7 +10179,7 @@ walk_tree_1 (tree *tp, walk_tree_fn func, void *data,
that are directly gimplified in gimplify_type_sizes in order for the
mark/copy-if-shared/unmark machinery of the gimplifier to work with
variable-sized types.
-
+
Note that DECLs get walked as part of processing the BIND_EXPR. */
if (TREE_CODE (DECL_EXPR_DECL (*tp)) == TYPE_DECL)
{
diff --git a/gcc/tree.def b/gcc/tree.def
index 01d9b3060de..60ac1ade865 100644
--- a/gcc/tree.def
+++ b/gcc/tree.def
@@ -1,6 +1,6 @@
/* This file contains the definitions and documentation for the
tree codes used in GCC.
- Copyright (C) 1987, 1988, 1993, 1995, 1997, 1998, 2000, 2001, 2004, 2005,
+ Copyright (C) 1987, 1988, 1993, 1995, 1997, 1998, 2000, 2001, 2004, 2005,
2006, 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GCC.
@@ -115,7 +115,7 @@ DEFTREECODE (BLOCK, "block", tcc_exceptional, 0)
macros in tree.h. Changing the order will degrade the speed of the
compiler. OFFSET_TYPE, ENUMERAL_TYPE, BOOLEAN_TYPE, INTEGER_TYPE,
REAL_TYPE, POINTER_TYPE. */
-
+
/* An offset is a pointer relative to an object.
The TREE_TYPE field is the type of the object at the offset.
The TYPE_OFFSET_BASETYPE points to the node for the type of object
@@ -157,7 +157,7 @@ DEFTREECODE (REAL_TYPE, "real_type", tcc_type, 0)
macros in tree.h. Changing the order will degrade the speed of the
compiler. POINTER_TYPE, REFERENCE_TYPE. Note that this range
overlaps the previous range of ordered types. */
-
+
/* All pointer-to-x types have code POINTER_TYPE.
The TREE_TYPE points to the node for the type pointed to. */
DEFTREECODE (POINTER_TYPE, "pointer_type", tcc_type, 0)
@@ -174,7 +174,7 @@ DEFTREECODE (REFERENCE_TYPE, "reference_type", tcc_type, 0)
/* The ordering of the following codes is optimized for the checking
macros in tree.h. Changing the order will degrade the speed of the
compiler. COMPLEX_TYPE, VECTOR_TYPE, ARRAY_TYPE. */
-
+
/* Complex number types. The TREE_TYPE field is the data type
of the real and imaginary parts. It must be of scalar
arithmetic type, not including pointer type. */
@@ -189,7 +189,7 @@ DEFTREECODE (VECTOR_TYPE, "vector_type", tcc_type, 0)
macros in tree.h. Changing the order will degrade the speed of the
compiler. ARRAY_TYPE, RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE.
Note that this range overlaps the previous range. */
-
+
/* Types of arrays. Special fields:
TREE_TYPE Type of an array element.
TYPE_DOMAIN Type to index by.
@@ -226,7 +226,7 @@ DEFTREECODE (QUAL_UNION_TYPE, "qual_union_type", tcc_type, 0)
/* The ordering of the following codes is optimized for the checking
macros in tree.h. Changing the order will degrade the speed of the
compiler. VOID_TYPE, FUNCTION_TYPE, METHOD_TYPE. */
-
+
/* The void type in C */
DEFTREECODE (VOID_TYPE, "void_type", tcc_type, 0)
@@ -494,7 +494,7 @@ DEFTREECODE (TARGET_EXPR, "target_expr", tcc_expression, 4)
Operand 1 must have the same type as the entire expression, unless
it unconditionally throws an exception, in which case it should
have VOID_TYPE. The same constraints apply to operand 2. The
- condition in operand 0 must be of integral type.
+ condition in operand 0 must be of integral type.
In cfg gimple, if you do not have a selection expression, operands
1 and 2 are NULL. The operands are then taken from the cfg edges. */
@@ -921,7 +921,7 @@ DEFTREECODE (STATEMENT_LIST, "statement_list", tcc_exceptional, 0)
/* Predicate assertion. Artificial expression generated by the optimizers
to keep track of predicate values. This expression may only appear on
the RHS of assignments.
-
+
Given X = ASSERT_EXPR <Y, EXPR>, the optimizers can infer
two things:
@@ -954,8 +954,8 @@ DEFTREECODE (WITH_SIZE_EXPR, "with_size_expr", tcc_expression, 2)
If OFF > 0, the last VS - OFF elements of vector OP0 are concatenated to
the first OFF elements of the vector OP1.
If OFF == 0, then the returned vector is OP1.
- On different targets OFF may take different forms; It can be an address, in
- which case its low log2(VS)-1 bits define the offset, or it can be a mask
+ On different targets OFF may take different forms; It can be an address, in
+ which case its low log2(VS)-1 bits define the offset, or it can be a mask
generated by the builtin targetm.vectorize.mask_for_load_builtin_decl. */
DEFTREECODE (REALIGN_LOAD_EXPR, "realign_load", tcc_expression, 3)
@@ -1046,12 +1046,12 @@ DEFTREECODE (OMP_ATOMIC, "omp_atomic", tcc_statement, 2)
/* OpenMP clauses. */
DEFTREECODE (OMP_CLAUSE, "omp_clause", tcc_exceptional, 0)
-/* Reduction operations.
+/* Reduction operations.
Operations that take a vector of elements and "reduce" it to a scalar
result (e.g. summing the elements of the vector, finding the minimum over
the vector elements, etc).
Operand 0 is a vector; the first element in the vector has the result.
- Operand 1 is a vector. */
+ Operand 1 is a vector. */
DEFTREECODE (REDUC_MAX_EXPR, "reduc_max_expr", tcc_unary, 1)
DEFTREECODE (REDUC_MIN_EXPR, "reduc_min_expr", tcc_unary, 1)
DEFTREECODE (REDUC_PLUS_EXPR, "reduc_plus_expr", tcc_unary, 1)
@@ -1062,7 +1062,7 @@ DEFTREECODE (REDUC_PLUS_EXPR, "reduc_plus_expr", tcc_unary, 1)
twice the size of t1. DOT_PROD_EXPR(arg1,arg2,arg3) is equivalent to:
tmp = WIDEN_MULT_EXPR(arg1, arg2);
arg3 = PLUS_EXPR (tmp, arg3);
- or:
+ or:
tmp = WIDEN_MULT_EXPR(arg1, arg2);
arg3 = WIDEN_SUM_EXPR (tmp, arg3); */
DEFTREECODE (DOT_PROD_EXPR, "dot_prod_expr", tcc_expression, 3)
@@ -1075,7 +1075,7 @@ DEFTREECODE (DOT_PROD_EXPR, "dot_prod_expr", tcc_expression, 3)
the first argument from type t1 to type t2, and then summing it
with the second argument. */
DEFTREECODE (WIDEN_SUM_EXPR, "widen_sum_expr", tcc_binary, 2)
-
+
/* Widening multiplication.
The two arguments are of type t1.
The result is of type t2, such that t2 is at least twice
diff --git a/gcc/tree.h b/gcc/tree.h
index 6fbac3aa2c7..8656a3c672b 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -1254,7 +1254,7 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int,
#define TREE_THIS_NOTRAP(NODE) ((NODE)->base.nothrow_flag)
/* In a VAR_DECL, PARM_DECL or FIELD_DECL, or any kind of ..._REF node,
- nonzero means it may not be the lhs of an assignment.
+ nonzero means it may not be the lhs of an assignment.
Nonzero in a FUNCTION_DECL means this function should be treated
as "const" function (can only read its arguments). */
#define TREE_READONLY(NODE) (NON_TYPE_CHECK (NODE)->base.readonly_flag)
@@ -2077,7 +2077,7 @@ extern enum machine_mode vector_type_mode (const_tree);
/* The "canonical" type for this type node, which can be used to
compare the type for equality with another type. If two types are
equal (based on the semantics of the language), then they will have
- equivalent TYPE_CANONICAL entries.
+ equivalent TYPE_CANONICAL entries.
As a special case, if TYPE_CANONICAL is NULL_TREE, then it cannot
be used for comparison against other types. Instead, the type is
@@ -2912,12 +2912,12 @@ struct GTY(()) tree_parm_decl {
/* A replaceable function is one which may be replaced at link-time
with an entirely different definition, provided that the
replacement has the same type. For example, functions declared
- with __attribute__((weak)) on most systems are replaceable.
+ with __attribute__((weak)) on most systems are replaceable.
COMDAT functions are not replaceable, since all definitions of the
function must be equivalent. It is important that COMDAT functions
not be treated as replaceable so that use of C++ template
- instantiations is not penalized.
+ instantiations is not penalized.
For example, DECL_REPLACEABLE is used to determine whether or not a
function (including a template instantiation) which is not
@@ -3048,7 +3048,7 @@ extern void decl_fini_priority_insert (tree, priority_type);
(VAR_DECL_CHECK (NODE)->decl_with_vis.init_priority_p)
/* For a VAR_DECL or FUNCTION_DECL the initialization priority of
- NODE. */
+ NODE. */
#define DECL_INIT_PRIORITY(NODE) \
(decl_init_priority_lookup (NODE))
/* Set the initialization priority for NODE to VAL. */
@@ -4952,7 +4952,7 @@ extern tree fold_call_stmt (gimple, bool);
extern tree gimple_fold_builtin_snprintf_chk (gimple, tree, enum built_in_function);
extern tree make_range (tree, int *, tree *, tree *, bool *);
extern tree build_range_check (location_t, tree, tree, int, tree, tree);
-extern bool merge_ranges (int *, tree *, tree *, int, tree, tree, int,
+extern bool merge_ranges (int *, tree *, tree *, int, tree, tree, int,
tree, tree);
extern void set_builtin_user_assembler_name (tree decl, const char *asmspec);
@@ -5332,7 +5332,7 @@ void init_inline_once (void);
/* In ipa-reference.c. Used for parsing attributes of asm code. */
extern GTY(()) tree memory_identifier_string;
-/* Compute the number of operands in an expression node NODE. For
+/* Compute the number of operands in an expression node NODE. For
tcc_vl_exp nodes like CALL_EXPRs, this is stored in the node itself,
otherwise it is looked up from the node's code. */
static inline int
diff --git a/gcc/treestruct.def b/gcc/treestruct.def
index c56524d708c..af7c80956b3 100644
--- a/gcc/treestruct.def
+++ b/gcc/treestruct.def
@@ -1,5 +1,5 @@
/* This file contains the definitions for the tree structure
- enumeration used in GCC.
+ enumeration used in GCC.
Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
@@ -19,12 +19,12 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-/* The format of this file is
+/* The format of this file is
DEFTREESTRUCT(enumeration value, printable name).
Each enumeration value should correspond with a single member of
- union tree_node.
+ union tree_node.
These enumerator values are used in order to distinguish members of
union tree_node for garbage collection purposes, as well as
diff --git a/gcc/unwind-compat.c b/gcc/unwind-compat.c
index d9e41d1a72c..5b41f24688d 100644
--- a/gcc/unwind-compat.c
+++ b/gcc/unwind-compat.c
@@ -192,7 +192,7 @@ extern void __libunwind_Unwind_SetGR
void
_Unwind_SetGR (struct _Unwind_Context *context, int index,
- _Unwind_Word val)
+ _Unwind_Word val)
{
__libunwind_Unwind_SetGR (context, index, val);
}
diff --git a/gcc/unwind-dw2-fde-glibc.c b/gcc/unwind-dw2-fde-glibc.c
index ad6ecd50d59..b8a7312f501 100644
--- a/gcc/unwind-dw2-fde-glibc.c
+++ b/gcc/unwind-dw2-fde-glibc.c
@@ -204,13 +204,13 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr)
}
goto found;
}
-
+
last_cache_entry = cache_entry;
/* Exit early if we found an unused entry. */
if ((cache_entry->pc_low | cache_entry->pc_high) == 0)
break;
if (cache_entry->link != NULL)
- prev_cache_entry = cache_entry;
+ prev_cache_entry = cache_entry;
}
}
else
@@ -236,7 +236,7 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr)
if (size < offsetof (struct dl_phdr_info, dlpi_phnum)
+ sizeof (info->dlpi_phnum))
return -1;
-
+
/* See if PC falls into one of the loaded segments. Find the eh_frame
segment at the same time. */
for (n = info->dlpi_phnum; --n >= 0; phdr++)
@@ -257,7 +257,7 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr)
else if (phdr->p_type == PT_DYNAMIC)
p_dynamic = phdr;
}
-
+
if (!match)
return 0;
@@ -397,7 +397,7 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr)
{
_Unwind_Ptr func;
unsigned int encoding = get_fde_encoding (data->ret);
-
+
read_encoded_value_with_base (encoding,
base_from_cb_data (encoding, data),
data->ret->pc_begin, &func);
diff --git a/gcc/unwind-dw2.c b/gcc/unwind-dw2.c
index 2208f17dc1d..28373c20bd8 100644
--- a/gcc/unwind-dw2.c
+++ b/gcc/unwind-dw2.c
@@ -404,7 +404,7 @@ extract_cie_info (const struct dwarf_cie *cie, struct _Unwind_Context *context,
else if (aug[0] == 'P')
{
_Unwind_Ptr personality;
-
+
p = read_encoded_value (context, *p, p + 1, &personality);
fs->personality = (_Unwind_Personality_Fn) personality;
aug += 1;
@@ -672,7 +672,7 @@ execute_stack_op (const unsigned char *op_ptr, const unsigned char *op_end,
/* Unary operations. */
gcc_assert (stack_elt);
stack_elt -= 1;
-
+
result = stack[stack_elt];
switch (op)
@@ -749,7 +749,7 @@ execute_stack_op (const unsigned char *op_ptr, const unsigned char *op_end,
_Unwind_Word first, second;
gcc_assert (stack_elt >= 2);
stack_elt -= 2;
-
+
second = stack[stack_elt];
first = stack[stack_elt + 1];
@@ -822,7 +822,7 @@ execute_stack_op (const unsigned char *op_ptr, const unsigned char *op_end,
case DW_OP_bra:
gcc_assert (stack_elt);
stack_elt -= 1;
-
+
offset = read_2s (op_ptr);
op_ptr += 2;
if (stack[stack_elt] != 0)
@@ -902,7 +902,7 @@ execute_cfa_program (const unsigned char *insn_ptr,
case DW_CFA_set_loc:
{
_Unwind_Ptr pc;
-
+
insn_ptr = read_encoded_value (context, fs->fde_encoding,
insn_ptr, &pc);
fs->pc = (void *) pc;
@@ -1164,7 +1164,7 @@ uw_frame_state_for (struct _Unwind_Context *context, _Unwind_FrameState *fs)
if (fs->lsda_encoding != DW_EH_PE_omit)
{
_Unwind_Ptr lsda;
-
+
aug = read_encoded_value (context, fs->lsda_encoding, aug, &lsda);
context->lsda = (void *) lsda;
}
@@ -1248,7 +1248,7 @@ _Unwind_SetSpColumn (struct _Unwind_Context *context, void *cfa,
_Unwind_SpTmp *tmp_sp)
{
int size = dwarf_reg_size_table[__builtin_dwarf_sp_column ()];
-
+
if (size == sizeof(_Unwind_Ptr))
tmp_sp->ptr = (_Unwind_Ptr) cfa;
else
diff --git a/gcc/value-prof.c b/gcc/value-prof.c
index 1da60e1a02a..5b92f744860 100644
--- a/gcc/value-prof.c
+++ b/gcc/value-prof.c
@@ -617,7 +617,7 @@ gimple_divmod_fixed_value (gimple stmt, tree value, int prob, gcov_type count,
e13->count = all - count;
remove_edge (e23);
-
+
e24 = make_edge (bb2, bb4, EDGE_FALLTHRU);
e24->probability = REG_BR_PROB_BASE;
e24->count = count;
@@ -649,7 +649,7 @@ gimple_divmod_fixed_value_transform (gimple_stmt_iterator *si)
return false;
code = gimple_assign_rhs_code (stmt);
-
+
if (code != TRUNC_DIV_EXPR && code != TRUNC_MOD_EXPR)
return false;
@@ -703,7 +703,7 @@ gimple_divmod_fixed_value_transform (gimple_stmt_iterator *si)
/* Generate code for transformation 2 (with parent gimple assign STMT and
probability of taking the optimal path PROB, which is equivalent to COUNT/ALL
- within roundoff error). This generates the result into a temp and returns
+ within roundoff error). This generates the result into a temp and returns
the temp; it does not replace or alter the original STMT. */
static tree
gimple_mod_pow2 (gimple stmt, int prob, gcov_type count, gcov_type all)
@@ -772,7 +772,7 @@ gimple_mod_pow2 (gimple stmt, int prob, gcov_type count, gcov_type all)
e13->count = all - count;
remove_edge (e23);
-
+
e24 = make_edge (bb2, bb4, EDGE_FALLTHRU);
e24->probability = REG_BR_PROB_BASE;
e24->count = count;
@@ -803,7 +803,7 @@ gimple_mod_pow2_value_transform (gimple_stmt_iterator *si)
return false;
code = gimple_assign_rhs_code (stmt);
-
+
if (code != TRUNC_MOD_EXPR || !TYPE_UNSIGNED (lhs_type))
return false;
@@ -851,8 +851,8 @@ gimple_mod_pow2_value_transform (gimple_stmt_iterator *si)
NCOUNTS the number of cases to support. Currently only NCOUNTS==0 or 1 is
supported and this is built into this interface. The probabilities of taking
the optimal paths are PROB1 and PROB2, which are equivalent to COUNT1/ALL and
- COUNT2/ALL respectively within roundoff error). This generates the
- result into a temp and returns the temp; it does not replace or alter
+ COUNT2/ALL respectively within roundoff error). This generates the
+ result into a temp and returns the temp; it does not replace or alter
the original STMT. */
/* FIXME: Generalize the interface to handle NCOUNTS > 1. */
@@ -911,7 +911,7 @@ gimple_mod_subtract (gimple stmt, int prob1, int prob2, int ncounts,
e12 = split_block (bb, bb1end);
bb2 = e12->dest;
bb2->count = all - count1;
-
+
if (ncounts) /* Assumed to be 0 or 1. */
{
e23 = split_block (bb2, bb2end);
@@ -974,7 +974,7 @@ gimple_mod_subtract_transform (gimple_stmt_iterator *si)
return false;
code = gimple_assign_rhs_code (stmt);
-
+
if (code != TRUNC_MOD_EXPR || !TYPE_UNSIGNED (lhs_type))
return false;
@@ -1051,7 +1051,7 @@ static struct cgraph_node** pid_map = NULL;
/* Initialize map of pids (pid -> cgraph node) */
-static void
+static void
init_pid_map (void)
{
struct cgraph_node *n;
@@ -1087,7 +1087,7 @@ find_func_by_pid (int pid)
*/
static gimple
-gimple_ic (gimple icall_stmt, struct cgraph_node *direct_call,
+gimple_ic (gimple icall_stmt, struct cgraph_node *direct_call,
int prob, gcov_type count, gcov_type all)
{
gimple dcall_stmt, load_stmt, cond_stmt;
@@ -1107,7 +1107,7 @@ gimple_ic (gimple icall_stmt, struct cgraph_node *direct_call,
load_stmt = gimple_build_assign (tmpv, tmp);
gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
- tmp = fold_convert (optype, build_addr (direct_call->decl,
+ tmp = fold_convert (optype, build_addr (direct_call->decl,
current_function_decl));
load_stmt = gimple_build_assign (tmp1, tmp);
gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
@@ -1142,7 +1142,7 @@ gimple_ic (gimple icall_stmt, struct cgraph_node *direct_call,
e_ci->count = all - count;
remove_edge (e_di);
-
+
e_dj = make_edge (dcall_bb, join_bb, EDGE_FALLTHRU);
e_dj->probability = REG_BR_PROB_BASE;
e_dj->count = count;
@@ -1185,7 +1185,7 @@ gimple_ic_transform (gimple stmt)
tree callee;
gimple modify;
struct cgraph_node *direct_call;
-
+
if (gimple_code (stmt) != GIMPLE_CALL)
return false;
@@ -1207,7 +1207,7 @@ gimple_ic_transform (gimple stmt)
return false;
bb_all = gimple_bb (stmt)->count;
- /* The order of CHECK_COUNTER calls is important -
+ /* The order of CHECK_COUNTER calls is important -
since check_counter can correct the third parameter
and we want to make count <= all <= bb_all. */
if ( check_counter (stmt, "ic", &all, &bb_all, bb_all)
@@ -1276,7 +1276,7 @@ interesting_stringop_to_profile_p (tree fndecl, gimple call, int *size_arg)
}
/* Convert stringop (..., vcall_size)
- into
+ into
if (vcall_size == icall_size)
stringop (..., icall_size);
else
@@ -1343,7 +1343,7 @@ gimple_stringop_fixed_value (gimple vcall_stmt, tree icall_size, int prob,
e_cv->count = all - count;
remove_edge (e_iv);
-
+
e_ij = make_edge (icall_bb, join_bb, EDGE_FALLTHRU);
e_ij->probability = REG_BR_PROB_BASE;
e_ij->count = count;
@@ -1442,7 +1442,7 @@ gimple_stringops_transform (gimple_stmt_iterator *gsi)
print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
}
gimple_stringop_fixed_value (stmt, tree_val, prob, count, all);
-
+
return true;
}
@@ -1501,7 +1501,7 @@ struct value_prof_hooks {
/* Identify and exploit properties of values that are hard to analyze
statically. See value-prof.c for more detail. */
- bool (*value_profile_transformations) (void);
+ bool (*value_profile_transformations) (void);
};
/* Find values inside STMT for that we want to measure histograms for
@@ -1562,8 +1562,8 @@ gimple_divmod_values_to_profile (gimple stmt, histogram_values *values)
}
}
-/* Find calls inside STMT for that we want to measure histograms for
- indirect/virtual call optimization. */
+/* Find calls inside STMT for that we want to measure histograms for
+ indirect/virtual call optimization. */
static void
gimple_indirect_call_to_profile (gimple stmt, histogram_values *values)
@@ -1578,7 +1578,7 @@ gimple_indirect_call_to_profile (gimple stmt, histogram_values *values)
VEC_reserve (histogram_value, heap, *values, 3);
- VEC_quick_push (histogram_value, *values,
+ VEC_quick_push (histogram_value, *values,
gimple_alloc_histogram_value (cfun, HIST_TYPE_INDIR_CALL,
stmt, callee));
@@ -1650,7 +1650,7 @@ gimple_find_values_to_profile (histogram_values *values)
FOR_EACH_BB (bb)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
gimple_values_to_profile (gsi_stmt (gsi), values);
-
+
for (i = 0; VEC_iterate (histogram_value, *values, i, hist); i++)
{
switch (hist->type)
diff --git a/gcc/value-prof.h b/gcc/value-prof.h
index fc914adcd61..907cd785600 100644
--- a/gcc/value-prof.h
+++ b/gcc/value-prof.h
@@ -30,7 +30,7 @@ enum hist_type
always constant. */
HIST_TYPE_CONST_DELTA, /* Tries to identify the (almost) always constant
difference between two evaluations of a value. */
- HIST_TYPE_INDIR_CALL, /* Tries to identify the function that is (almost)
+ HIST_TYPE_INDIR_CALL, /* Tries to identify the function that is (almost)
called in indirect call */
HIST_TYPE_AVERAGE, /* Compute average value (sum of all values). */
HIST_TYPE_IOR /* Used to compute expected alignment. */
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index 5cc8ced3956..8267df825ee 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -407,12 +407,12 @@ static int vars_copy_1 (void **, void *);
static void vars_copy (htab_t, htab_t);
static tree var_debug_decl (tree);
static void var_reg_set (dataflow_set *, rtx, enum var_init_status, rtx);
-static void var_reg_delete_and_set (dataflow_set *, rtx, bool,
+static void var_reg_delete_and_set (dataflow_set *, rtx, bool,
enum var_init_status, rtx);
static void var_reg_delete (dataflow_set *, rtx, bool);
static void var_regno_delete (dataflow_set *, int);
static void var_mem_set (dataflow_set *, rtx, enum var_init_status, rtx);
-static void var_mem_delete_and_set (dataflow_set *, rtx, bool,
+static void var_mem_delete_and_set (dataflow_set *, rtx, bool,
enum var_init_status, rtx);
static void var_mem_delete (dataflow_set *, rtx, bool);
@@ -521,7 +521,7 @@ stack_adjust_offset_pre_post (rtx pattern, HOST_WIDE_INT *pre,
/* We handle only adjustments by constant amount. */
gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS &&
CONST_INT_P (val));
-
+
if (code == PRE_MODIFY)
*pre -= INTVAL (val);
else
@@ -1367,7 +1367,7 @@ get_init_value (dataflow_set *set, rtx loc, decl_or_value dv)
part. */
static void
-var_reg_delete_and_set (dataflow_set *set, rtx loc, bool modify,
+var_reg_delete_and_set (dataflow_set *set, rtx loc, bool modify,
enum var_init_status initialized, rtx set_src)
{
tree decl = REG_EXPR (loc);
@@ -1465,7 +1465,7 @@ var_mem_decl_set (dataflow_set *set, rtx loc, enum var_init_status initialized,
Adjust the address first if it is stack pointer based. */
static void
-var_mem_set (dataflow_set *set, rtx loc, enum var_init_status initialized,
+var_mem_set (dataflow_set *set, rtx loc, enum var_init_status initialized,
rtx set_src)
{
tree decl = MEM_EXPR (loc);
@@ -1483,7 +1483,7 @@ var_mem_set (dataflow_set *set, rtx loc, enum var_init_status initialized,
Adjust the address first if it is stack pointer based. */
static void
-var_mem_delete_and_set (dataflow_set *set, rtx loc, bool modify,
+var_mem_delete_and_set (dataflow_set *set, rtx loc, bool modify,
enum var_init_status initialized, rtx set_src)
{
tree decl = MEM_EXPR (loc);
@@ -1687,7 +1687,7 @@ val_resolve (dataflow_set *set, rtx val, rtx loc, rtx insn)
VAR_INIT_STATUS_INITIALIZED, NULL_RTX, INSERT);
}
-/* Initialize dataflow set SET to be empty.
+/* Initialize dataflow set SET to be empty.
VARS_SIZE is the initial size of hash table VARS. */
static void
@@ -1758,7 +1758,7 @@ variable_union_info_cmp_pos (const void *n1, const void *n2)
if (i1->pos != i2->pos)
return i1->pos - i2->pos;
-
+
return (i1->pos_dst - i2->pos_dst);
}
@@ -4128,8 +4128,8 @@ track_expr_p (tree expr, bool need_rtl)
decl_rtl = DECL_RTL_IF_SET (expr);
if (!decl_rtl && need_rtl)
return 0;
-
- /* If this expression is really a debug alias of some other declaration, we
+
+ /* If this expression is really a debug alias of some other declaration, we
don't need to track this expression if the ultimate declaration is
ignored. */
realdecl = expr;
@@ -4143,7 +4143,7 @@ track_expr_p (tree expr, bool need_rtl)
}
/* Do not track EXPR if REALDECL it should be ignored for debugging
- purposes. */
+ purposes. */
if (DECL_IGNORED_P (realdecl))
return 0;
@@ -5116,14 +5116,14 @@ find_src_set_src (dataflow_set *set, rtx src)
{
found = false;
for (i = 0; i < var->n_var_parts && !found; i++)
- for (nextp = var->var_part[i].loc_chain; nextp && !found;
+ for (nextp = var->var_part[i].loc_chain; nextp && !found;
nextp = nextp->next)
if (rtx_equal_p (nextp->loc, src))
{
set_src = nextp->set_src;
found = true;
}
-
+
}
}
@@ -6513,7 +6513,7 @@ emit_note_insn_var_location (void **varp, void *data)
= gen_rtx_EXPR_LIST (VOIDmode, loc[0], GEN_INT (offsets[0]));
NOTE_VAR_LOCATION (note) = gen_rtx_VAR_LOCATION (VOIDmode, decl,
- expr_list,
+ expr_list,
(int) initialized);
}
else if (n_var_parts)
@@ -6527,7 +6527,7 @@ emit_note_insn_var_location (void **varp, void *data)
parallel = gen_rtx_PARALLEL (VOIDmode,
gen_rtvec_v (n_var_parts, loc));
NOTE_VAR_LOCATION (note) = gen_rtx_VAR_LOCATION (VOIDmode, decl,
- parallel,
+ parallel,
(int) initialized);
}
@@ -7095,7 +7095,7 @@ static void
vt_add_function_parameters (void)
{
tree parm;
-
+
for (parm = DECL_ARGUMENTS (current_function_decl);
parm; parm = TREE_CHAIN (parm))
{
diff --git a/gcc/varasm.c b/gcc/varasm.c
index c9953d128da..af50b5b2d18 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -215,7 +215,7 @@ prefix_name (const char *prefix, tree name)
unsigned plen = strlen (prefix);
unsigned nlen = strlen (IDENTIFIER_POINTER (name));
char *toname = (char *) alloca (plen + nlen + 1);
-
+
memcpy (toname, prefix, plen);
memcpy (toname + plen, IDENTIFIER_POINTER (name), nlen + 1);
@@ -238,19 +238,19 @@ tree
default_emutls_var_fields (tree type, tree *name ATTRIBUTE_UNUSED)
{
tree word_type_node, field, next_field;
-
+
field = build_decl (UNKNOWN_LOCATION,
FIELD_DECL, get_identifier ("__templ"), ptr_type_node);
DECL_CONTEXT (field) = type;
next_field = field;
-
+
field = build_decl (UNKNOWN_LOCATION,
FIELD_DECL, get_identifier ("__offset"),
ptr_type_node);
DECL_CONTEXT (field) = type;
TREE_CHAIN (field) = next_field;
next_field = field;
-
+
word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
field = build_decl (UNKNOWN_LOCATION,
FIELD_DECL, get_identifier ("__align"),
@@ -258,7 +258,7 @@ default_emutls_var_fields (tree type, tree *name ATTRIBUTE_UNUSED)
DECL_CONTEXT (field) = type;
TREE_CHAIN (field) = next_field;
next_field = field;
-
+
field = build_decl (UNKNOWN_LOCATION,
FIELD_DECL, get_identifier ("__size"), word_type_node);
DECL_CONTEXT (field) = type;
@@ -300,7 +300,7 @@ static tree
get_emutls_init_templ_addr (tree decl)
{
tree name, to;
-
+
if (targetm.emutls.register_common && !DECL_INITIAL (decl)
&& !DECL_SECTION_NAME (decl))
return null_pointer_node;
@@ -324,7 +324,7 @@ get_emutls_init_templ_addr (tree decl)
DECL_IGNORED_P (to) = 1;
DECL_CONTEXT (to) = DECL_CONTEXT (decl);
DECL_SECTION_NAME (to) = DECL_SECTION_NAME (decl);
-
+
DECL_WEAK (to) = DECL_WEAK (decl);
if (DECL_ONE_ONLY (decl))
{
@@ -464,7 +464,7 @@ emutls_finish (void)
htab_traverse_noresize (emutls_htab, emutls_common_1, &body);
if (body == NULL_TREE)
return;
-
+
cgraph_build_static_cdtor ('I', body, DEFAULT_INIT_PRIORITY);
}
}
@@ -1509,7 +1509,7 @@ default_stabs_asm_out_destructor (rtx symbol ATTRIBUTE_UNUSED,
}
/* Write the address of the entity given by SYMBOL to SEC. */
-void
+void
assemble_addr_to_section (rtx symbol, section *sec)
{
switch_to_section (sec);
@@ -1540,7 +1540,7 @@ default_named_section_asm_out_destructor (rtx symbol, int priority)
section *sec;
if (priority != DEFAULT_INIT_PRIORITY)
- sec = get_cdtor_priority_section (priority,
+ sec = get_cdtor_priority_section (priority,
/*constructor_p=*/false);
else
sec = get_section (".dtors", SECTION_WRITE, NULL);
@@ -1580,7 +1580,7 @@ default_named_section_asm_out_constructor (rtx symbol, int priority)
section *sec;
if (priority != DEFAULT_INIT_PRIORITY)
- sec = get_cdtor_priority_section (priority,
+ sec = get_cdtor_priority_section (priority,
/*constructor_p=*/true);
else
sec = get_section (".ctors", SECTION_WRITE, NULL);
@@ -2032,27 +2032,27 @@ default_emutls_var_init (tree to, tree decl, tree proxy)
constructor_elt *elt;
tree type = TREE_TYPE (to);
tree field = TYPE_FIELDS (type);
-
+
elt = VEC_quick_push (constructor_elt, v, NULL);
elt->index = field;
elt->value = fold_convert (TREE_TYPE (field), DECL_SIZE_UNIT (decl));
-
+
elt = VEC_quick_push (constructor_elt, v, NULL);
field = TREE_CHAIN (field);
elt->index = field;
elt->value = build_int_cst (TREE_TYPE (field),
DECL_ALIGN_UNIT (decl));
-
+
elt = VEC_quick_push (constructor_elt, v, NULL);
field = TREE_CHAIN (field);
elt->index = field;
elt->value = null_pointer_node;
-
+
elt = VEC_quick_push (constructor_elt, v, NULL);
field = TREE_CHAIN (field);
elt->index = field;
elt->value = proxy;
-
+
return build_constructor (type, v);
}
@@ -2806,7 +2806,7 @@ static void maybe_output_constant_def_contents (struct constant_descriptor_tree
/* Constant pool accessor function. */
-htab_t
+htab_t
constant_pool_htab (void)
{
return const_desc_htab;
@@ -4498,7 +4498,7 @@ output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align)
else if (TREE_CODE (exp) == INTEGER_CST)
exp = build_int_cst_wide (saved_type, TREE_INT_CST_LOW (exp),
TREE_INT_CST_HIGH (exp));
-
+
}
/* Eliminate any conversions since we'll be outputting the underlying
@@ -4722,7 +4722,7 @@ output_constructor_array_range (oc_local_state *local)
unsigned int align2
= min_align (local->align, fieldsize * BITS_PER_UNIT);
-
+
for (index = lo_index; index <= hi_index; index++)
{
/* Output the element's initial value. */
@@ -4730,7 +4730,7 @@ output_constructor_array_range (oc_local_state *local)
assemble_zeros (fieldsize);
else
output_constant (local->val, fieldsize, align2);
-
+
/* Count its size. */
local->total_bytes += fieldsize;
}
@@ -4751,12 +4751,12 @@ output_constructor_regular_field (oc_local_state *local)
if (local->index != NULL_TREE)
fieldpos = (tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (local->val)), 1)
- * ((tree_low_cst (local->index, 0)
+ * ((tree_low_cst (local->index, 0)
- tree_low_cst (local->min_index, 0))));
- else if (local->field != NULL_TREE)
+ else if (local->field != NULL_TREE)
fieldpos = int_byte_position (local->field);
else
- fieldpos = 0;
+ fieldpos = 0;
/* Output any buffered-up bit-fields preceding this element. */
if (local->byte_buffer_in_use)
@@ -4765,7 +4765,7 @@ output_constructor_regular_field (oc_local_state *local)
local->total_bytes++;
local->byte_buffer_in_use = false;
}
-
+
/* Advance to offset of this element.
Note no alignment needed in an array, since that is guaranteed
if each element has the proper size. */
@@ -4776,7 +4776,7 @@ output_constructor_regular_field (oc_local_state *local)
assemble_zeros (fieldpos - local->total_bytes);
local->total_bytes = fieldpos;
}
-
+
/* Find the alignment of this element. */
align2 = min_align (local->align, BITS_PER_UNIT * fieldpos);
@@ -4784,7 +4784,7 @@ output_constructor_regular_field (oc_local_state *local)
if (local->field)
{
fieldsize = 0;
-
+
/* If this is an array with an unspecified upper bound,
the initializer determines the size. */
/* ??? This ought to only checked if DECL_SIZE_UNIT is NULL,
@@ -4809,7 +4809,7 @@ output_constructor_regular_field (oc_local_state *local)
}
else
fieldsize = int_size_in_bytes (TREE_TYPE (local->type));
-
+
/* Output the element's initial value. */
if (local->val == NULL_TREE)
assemble_zeros (fieldsize);
@@ -4836,38 +4836,38 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
HOST_WIDE_INT relative_index
= (!local->field
? (local->index
- ? (tree_low_cst (local->index, 0)
+ ? (tree_low_cst (local->index, 0)
- tree_low_cst (local->min_index, 0))
: local->last_relative_index + 1)
: 0);
-
+
/* Bit position of this element from the start of the containing
constructor. */
HOST_WIDE_INT constructor_relative_ebitpos
= (local->field
- ? int_bit_position (local->field)
+ ? int_bit_position (local->field)
: ebitsize * relative_index);
-
+
/* Bit position of this element from the start of a possibly ongoing
outer byte buffer. */
HOST_WIDE_INT byte_relative_ebitpos
= ((outer ? outer->bit_offset : 0) + constructor_relative_ebitpos);
- /* From the start of a possibly ongoing outer byte buffer, offsets to
+ /* From the start of a possibly ongoing outer byte buffer, offsets to
the first bit of this element and to the first bit past the end of
this element. */
HOST_WIDE_INT next_offset = byte_relative_ebitpos;
HOST_WIDE_INT end_offset = byte_relative_ebitpos + ebitsize;
-
+
local->last_relative_index = relative_index;
-
+
if (local->val == NULL_TREE)
local->val = integer_zero_node;
-
+
while (TREE_CODE (local->val) == VIEW_CONVERT_EXPR
|| TREE_CODE (local->val) == NON_LVALUE_EXPR)
local->val = TREE_OPERAND (local->val, 0);
-
+
if (TREE_CODE (local->val) != INTEGER_CST
&& TREE_CODE (local->val) != CONSTRUCTOR)
{
@@ -4886,7 +4886,7 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
local->total_bytes++;
local->byte_buffer_in_use = false;
}
-
+
/* If still not at proper byte, advance to there. */
if (next_offset / BITS_PER_UNIT != local->total_bytes)
{
@@ -4895,7 +4895,7 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
local->total_bytes = next_offset / BITS_PER_UNIT;
}
}
-
+
/* Set up the buffer if necessary. */
if (!local->byte_buffer_in_use)
{
@@ -4903,7 +4903,7 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
if (ebitsize > 0)
local->byte_buffer_in_use = true;
}
-
+
/* If this is nested constructor, recurse passing the bit offset and the
pending data, then retrieve the new pending data afterwards. */
if (TREE_CODE (local->val) == CONSTRUCTOR)
@@ -4917,10 +4917,10 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
local->byte = output_state.byte;
return;
}
-
+
/* Otherwise, we must split the element into pieces that fall within
separate bytes, and combine each byte with previous or following
- bit-fields. */
+ bit-fields. */
while (next_offset < end_offset)
{
int this_time;
@@ -4928,7 +4928,7 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
HOST_WIDE_INT value;
HOST_WIDE_INT next_byte = next_offset / BITS_PER_UNIT;
HOST_WIDE_INT next_bit = next_offset % BITS_PER_UNIT;
-
+
/* Advance from byte to byte
within this element when necessary. */
while (next_byte != local->total_bytes)
@@ -4937,7 +4937,7 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
local->total_bytes++;
local->byte = 0;
}
-
+
/* Number of bits we can process at once
(all part of the same byte). */
this_time = MIN (end_offset - next_offset,
@@ -4948,7 +4948,7 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
first (of the bits that are significant)
and put them into bytes from the most significant end. */
shift = end_offset - next_offset - this_time;
-
+
/* Don't try to take a bunch of bits that cross
the word boundary in the INTEGER_CST. We can
only select bits from the LOW or HIGH part
@@ -4959,7 +4959,7 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
this_time = shift + this_time - HOST_BITS_PER_WIDE_INT;
shift = HOST_BITS_PER_WIDE_INT;
}
-
+
/* Now get the bits from the appropriate constant word. */
if (shift < HOST_BITS_PER_WIDE_INT)
value = TREE_INT_CST_LOW (local->val);
@@ -4969,7 +4969,7 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
value = TREE_INT_CST_HIGH (local->val);
shift -= HOST_BITS_PER_WIDE_INT;
}
-
+
/* Get the result. This works only when:
1 <= this_time <= HOST_BITS_PER_WIDE_INT. */
local->byte |= (((value >> shift)
@@ -4983,7 +4983,7 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
and pack them starting at the least significant
bits of the bytes. */
shift = next_offset - byte_relative_ebitpos;
-
+
/* Don't try to take a bunch of bits that cross
the word boundary in the INTEGER_CST. We can
only select bits from the LOW or HIGH part
@@ -4991,7 +4991,7 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
if (shift < HOST_BITS_PER_WIDE_INT
&& shift + this_time > HOST_BITS_PER_WIDE_INT)
this_time = (HOST_BITS_PER_WIDE_INT - shift);
-
+
/* Now get the bits from the appropriate constant word. */
if (shift < HOST_BITS_PER_WIDE_INT)
value = TREE_INT_CST_LOW (local->val);
@@ -5001,14 +5001,14 @@ output_constructor_bitfield (oc_local_state *local, oc_outer_state *outer)
value = TREE_INT_CST_HIGH (local->val);
shift -= HOST_BITS_PER_WIDE_INT;
}
-
+
/* Get the result. This works only when:
1 <= this_time <= HOST_BITS_PER_WIDE_INT. */
local->byte |= (((value >> shift)
& (((HOST_WIDE_INT) 2 << (this_time - 1)) - 1))
<< next_bit);
}
-
+
next_offset += this_time;
local->byte_buffer_in_use = true;
}
@@ -5044,7 +5044,7 @@ output_constructor (tree exp, unsigned HOST_WIDE_INT size,
if (TREE_CODE (local.type) == ARRAY_TYPE
&& TYPE_DOMAIN (local.type) != NULL_TREE)
local.min_index = TYPE_MIN_VALUE (TYPE_DOMAIN (local.type));
-
+
gcc_assert (HOST_BITS_PER_WIDE_INT >= BITS_PER_UNIT);
/* As CE goes through the elements of the constant, FIELD goes through the
@@ -5105,7 +5105,7 @@ output_constructor (tree exp, unsigned HOST_WIDE_INT size,
&& (local.field == NULL_TREE
|| !CONSTRUCTOR_BITFIELD_P (local.field)))
output_constructor_regular_field (&local);
-
+
/* For a true bitfield or part of an outer one. */
else
output_constructor_bitfield (&local, outer);
@@ -5129,7 +5129,7 @@ output_constructor (tree exp, unsigned HOST_WIDE_INT size,
local.total_bytes = local.size;
}
}
-
+
return local.total_bytes;
}
@@ -6347,7 +6347,7 @@ default_unique_section (tree decl, int reloc)
/* If we're using one_only, then there needs to be a .gnu.linkonce
prefix to the section name. */
linkonce = one_only ? ".gnu.linkonce" : "";
-
+
string = ACONCAT ((linkonce, prefix, ".", name, NULL));
DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
diff --git a/gcc/varpool.c b/gcc/varpool.c
index 90a9ace0ff9..157755b195e 100644
--- a/gcc/varpool.c
+++ b/gcc/varpool.c
@@ -30,7 +30,7 @@ along with GCC; see the file COPYING3. If not see
#include "hashtab.h"
#include "ggc.h"
#include "timevar.h"
-#include "debug.h"
+#include "debug.h"
#include "target.h"
#include "output.h"
#include "gimple.h"
@@ -57,21 +57,21 @@ struct varpool_node *varpool_nodes;
/* Queue of cgraph nodes scheduled to be lowered and output.
The queue is maintained via mark_needed_node, linked via node->next_needed
- pointer.
+ pointer.
LAST_NEEDED_NODE points to the end of queue, so it can be
maintained in forward order. GTY is needed to make it friendly to
PCH.
-
+
During compilation we construct the queue of needed variables
twice: first time it is during cgraph construction, second time it is at the
end of compilation in VARPOOL_REMOVE_UNREFERENCED_DECLS so we can avoid
optimized out variables being output.
-
- Each variable is thus first analyzed and then later possibly output.
+
+ Each variable is thus first analyzed and then later possibly output.
FIRST_UNANALYZED_NODE points to first node in queue that was not analyzed
yet and is moved via VARPOOL_ANALYZE_PENDING_DECLS. */
-
+
struct varpool_node *varpool_nodes_queue;
static GTY(()) struct varpool_node *varpool_last_needed_node;
static GTY(()) struct varpool_node *varpool_first_unanalyzed_node;
diff --git a/gcc/vec.c b/gcc/vec.c
index 3e60580992d..078bcc63653 100644
--- a/gcc/vec.c
+++ b/gcc/vec.c
@@ -33,7 +33,7 @@ along with GCC; see the file COPYING3. If not see
#include "toplev.h"
#include "hashtab.h"
-struct vec_prefix
+struct vec_prefix
{
unsigned num;
unsigned alloc;
@@ -190,10 +190,10 @@ calculate_allocation (const struct vec_prefix *pfx, int reserve, bool exact)
/* If there's no prefix, and we've not requested anything, then we
will create a NULL vector. */
return 0;
-
+
/* We must have run out of room. */
gcc_assert (alloc - num < (unsigned) reserve);
-
+
if (exact)
/* Exact size. */
alloc = num + reserve;
@@ -208,7 +208,7 @@ calculate_allocation (const struct vec_prefix *pfx, int reserve, bool exact)
else
/* Grow slower when large. */
alloc = (alloc * 3 / 2);
-
+
/* If this is still too small, set it to the right size. */
if (alloc < num + reserve)
alloc = num + reserve;
@@ -228,19 +228,19 @@ vec_gc_o_reserve_1 (void *vec, int reserve, size_t vec_offset, size_t elt_size,
{
struct vec_prefix *pfx = (struct vec_prefix *) vec;
unsigned alloc = calculate_allocation (pfx, reserve, exact);
-
+
if (!alloc)
{
if (pfx)
ggc_free (pfx);
return NULL;
}
-
+
vec = ggc_realloc_stat (vec, vec_offset + alloc * elt_size PASS_MEM_STAT);
((struct vec_prefix *)vec)->alloc = alloc;
if (!pfx)
((struct vec_prefix *)vec)->num = 0;
-
+
return vec;
}
@@ -316,7 +316,7 @@ vec_heap_o_reserve_1 (void *vec, int reserve, size_t vec_offset,
if (vec)
free_overhead (pfx);
#endif
-
+
vec = xrealloc (vec, vec_offset + alloc * elt_size);
((struct vec_prefix *)vec)->alloc = alloc;
if (!pfx)
@@ -326,7 +326,7 @@ vec_heap_o_reserve_1 (void *vec, int reserve, size_t vec_offset,
register_overhead ((struct vec_prefix *)vec,
vec_offset + alloc * elt_size PASS_MEM_STAT);
#endif
-
+
return vec;
}
diff --git a/gcc/vec.h b/gcc/vec.h
index d16fdaacbb4..e8bdde8851c 100644
--- a/gcc/vec.h
+++ b/gcc/vec.h
@@ -86,12 +86,12 @@ along with GCC; see the file COPYING3. If not see
when the type is defined, and is therefore part of the type. If
you need both gc'd and heap allocated versions, you still must have
*exactly* one definition of the common non-memory managed base vector.
-
+
If you need to directly manipulate a vector, then the 'address'
accessor will return the address of the start of the vector. Also
the 'space' predicate will tell you whether there is spare capacity
in the vector. You will not normally need to use these two functions.
-
+
Vector types are defined using a DEF_VEC_{O,P,I}(TYPEDEF) macro, to
get the non-memory allocation version, and then a
DEF_VEC_ALLOC_{O,P,I}(TYPEDEF,ALLOC) macro to get memory managed
@@ -204,10 +204,10 @@ along with GCC; see the file COPYING3. If not see
/* Use these to determine the required size and initialization of a
vector embedded within another structure (as the final member).
-
+
size_t VEC_T_embedded_size(int reserve);
void VEC_T_embedded_init(VEC(T) *v, int reserve);
-
+
These allow the caller to perform the memory allocation. */
#define VEC_embedded_size(T,N) (VEC_OP(T,base,embedded_size)(N))
@@ -222,7 +222,7 @@ along with GCC; see the file COPYING3. If not see
#define VEC_copy(T,A,V) (VEC_OP(T,A,copy)(VEC_BASE(V) MEM_STAT_INFO))
/* Determine if a vector has additional capacity.
-
+
int VEC_T_space (VEC(T) *v,int reserve)
If V has space for RESERVE additional entries, return nonzero. You
@@ -260,7 +260,7 @@ along with GCC; see the file COPYING3. If not see
T *VEC_T_quick_push (VEC(T) *v, T obj); // Integer
T *VEC_T_quick_push (VEC(T) *v, T obj); // Pointer
T *VEC_T_quick_push (VEC(T) *v, T *obj); // Object
-
+
Push a new element onto the end, returns a pointer to the slot
filled in. For object vectors, the new value can be NULL, in which
case NO initialization is performed. There must
@@ -273,7 +273,7 @@ along with GCC; see the file COPYING3. If not see
T *VEC_T_A_safe_push (VEC(T,A) *&v, T obj); // Integer
T *VEC_T_A_safe_push (VEC(T,A) *&v, T obj); // Pointer
T *VEC_T_A_safe_push (VEC(T,A) *&v, T *obj); // Object
-
+
Push a new element onto the end, returns a pointer to the slot
filled in. For object vectors, the new value can be NULL, in which
case NO initialization is performed. Reallocates V, if needed. */
@@ -293,7 +293,7 @@ along with GCC; see the file COPYING3. If not see
/* Truncate to specific length
void VEC_T_truncate (VEC(T) *v, unsigned len);
-
+
Set the length as specified. The new length must be less than or
equal to the current length. This is an O(1) operation. */
@@ -324,7 +324,7 @@ along with GCC; see the file COPYING3. If not see
T VEC_T_replace (VEC(T) *v, unsigned ix, T val); // Integer
T VEC_T_replace (VEC(T) *v, unsigned ix, T val); // Pointer
T *VEC_T_replace (VEC(T) *v, unsigned ix, T *val); // Object
-
+
Replace the IXth element of V with a new value, VAL. For pointer
vectors returns the original value. For object vectors returns a
pointer to the new value. For object vectors the new value can be
@@ -338,7 +338,7 @@ along with GCC; see the file COPYING3. If not see
T *VEC_T_quick_insert (VEC(T) *v, unsigned ix, T val); // Integer
T *VEC_T_quick_insert (VEC(T) *v, unsigned ix, T val); // Pointer
T *VEC_T_quick_insert (VEC(T) *v, unsigned ix, T *val); // Object
-
+
Insert an element, VAL, at the IXth position of V. Return a pointer
to the slot created. For vectors of object, the new value can be
NULL, in which case no initialization of the inserted slot takes
@@ -351,7 +351,7 @@ along with GCC; see the file COPYING3. If not see
T *VEC_T_A_safe_insert (VEC(T,A) *&v, unsigned ix, T val); // Integer
T *VEC_T_A_safe_insert (VEC(T,A) *&v, unsigned ix, T val); // Pointer
T *VEC_T_A_safe_insert (VEC(T,A) *&v, unsigned ix, T *val); // Object
-
+
Insert an element, VAL, at the IXth position of V. Return a pointer
to the slot created. For vectors of object, the new value can be
NULL, in which case no initialization of the inserted slot takes
@@ -359,12 +359,12 @@ along with GCC; see the file COPYING3. If not see
#define VEC_safe_insert(T,A,V,I,O) \
(VEC_OP(T,A,safe_insert)(&(V),I,O VEC_CHECK_INFO MEM_STAT_INFO))
-
+
/* Remove element retaining order
T VEC_T_ordered_remove (VEC(T) *v, unsigned ix); // Integer
T VEC_T_ordered_remove (VEC(T) *v, unsigned ix); // Pointer
void VEC_T_ordered_remove (VEC(T) *v, unsigned ix); // Object
-
+
Remove an element from the IXth position of V. Ordering of
remaining elements is preserved. For pointer vectors returns the
removed object. This is an O(N) operation due to a memmove. */
@@ -376,7 +376,7 @@ along with GCC; see the file COPYING3. If not see
T VEC_T_unordered_remove (VEC(T) *v, unsigned ix); // Integer
T VEC_T_unordered_remove (VEC(T) *v, unsigned ix); // Pointer
void VEC_T_unordered_remove (VEC(T) *v, unsigned ix); // Object
-
+
Remove an element from the IXth position of V. Ordering of
remaining elements is destroyed. For pointer vectors returns the
removed object. This is an O(1) operation. */
@@ -386,7 +386,7 @@ along with GCC; see the file COPYING3. If not see
/* Remove a block of elements
void VEC_T_block_remove (VEC(T) *v, unsigned ix, unsigned len);
-
+
Remove LEN elements starting at the IXth. Ordering is retained.
This is an O(1) operation. */
@@ -402,17 +402,17 @@ along with GCC; see the file COPYING3. If not see
#define VEC_address(T,V) (VEC_OP(T,base,address)(VEC_BASE(V)))
/* Find the first index in the vector not less than the object.
- unsigned VEC_T_lower_bound (VEC(T) *v, const T val,
+ unsigned VEC_T_lower_bound (VEC(T) *v, const T val,
bool (*lessthan) (const T, const T)); // Integer
- unsigned VEC_T_lower_bound (VEC(T) *v, const T val,
+ unsigned VEC_T_lower_bound (VEC(T) *v, const T val,
bool (*lessthan) (const T, const T)); // Pointer
unsigned VEC_T_lower_bound (VEC(T) *v, const T *val,
bool (*lessthan) (const T*, const T*)); // Object
-
+
Find the first position in which VAL could be inserted without
changing the ordering of V. LESSTHAN is a function that returns
true if the first argument is strictly less than the second. */
-
+
#define VEC_lower_bound(T,V,O,LT) \
(VEC_OP(T,base,lower_bound)(VEC_BASE(V),O,LT VEC_CHECK_INFO))
@@ -440,7 +440,7 @@ void vec_heap_free (void *);
#define VEC_CHECK_INFO ,__FILE__,__LINE__,__FUNCTION__
#define VEC_CHECK_DECL ,const char *file_,unsigned line_,const char *function_
#define VEC_CHECK_PASS ,file_,line_,function_
-
+
#define VEC_ASSERT(EXPR,OP,T,A) \
(void)((EXPR) ? 0 : (VEC_ASSERT_FAIL(OP,VEC(T,A)), 0))
@@ -461,7 +461,7 @@ extern void vec_assert_fail (const char *, const char * VEC_CHECK_DECL)
#define VEC(T,A) VEC_##T##_##A
#define VEC_OP(T,A,OP) VEC_##T##_##A##_##OP
-/* Base of vector type, not user visible. */
+/* Base of vector type, not user visible. */
#define VEC_T(T,B) \
typedef struct VEC(T,B) \
{ \
diff --git a/gcc/vmsdbgout.c b/gcc/vmsdbgout.c
index 74285af56c6..2ad6ed23e99 100644
--- a/gcc/vmsdbgout.c
+++ b/gcc/vmsdbgout.c
@@ -1741,7 +1741,7 @@ vmsdbgout_finish (const char *main_input_filename ATTRIBUTE_UNUSED)
#define MAXPATH 256
/* descrip.h doesn't have everything ... */
-typedef struct fibdef* __fibdef_ptr32 __attribute__ (( mode (SI) ));
+typedef struct fibdef* __fibdef_ptr32 __attribute__ (( mode (SI) ));
struct dsc$descriptor_fib
{
unsigned int fib$l_len;
diff --git a/gcc/web.c b/gcc/web.c
index 2c5f93e3f62..27e7f919fe2 100644
--- a/gcc/web.c
+++ b/gcc/web.c
@@ -81,7 +81,7 @@ unionfind_root (struct web_entry *element)
return element;
}
-/* Union sets.
+/* Union sets.
Return true if FIRST and SECOND points to the same web entry structure and
nothing is done. Otherwise, return false. */
@@ -253,7 +253,7 @@ replace_ref (df_ref ref, rtx reg)
return;
if (dump_file)
fprintf (dump_file, "Updating insn %i (%i->%i)\n",
- uid, REGNO (oldreg), REGNO (reg));
+ uid, REGNO (oldreg), REGNO (reg));
*loc = reg;
df_insn_rescan (DF_REF_INSN (ref));
}
@@ -386,7 +386,7 @@ struct rtl_opt_pass pass_web =
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_df_finish | TODO_verify_rtl_sharing |
+ TODO_df_finish | TODO_verify_rtl_sharing |
TODO_dump_func /* todo_flags_finish */
}
};
diff --git a/gcc/xcoffout.c b/gcc/xcoffout.c
index 0f13486ad1e..f00aee00e5e 100644
--- a/gcc/xcoffout.c
+++ b/gcc/xcoffout.c
@@ -145,7 +145,7 @@ static const struct xcoff_type_number xcoff_type_numbers[] = {
/* ??? Should also handle built-in C++ and Obj-C types. There perhaps
aren't any that C doesn't already have. */
-};
+};
/* Returns an XCOFF fundamental type number for DECL (assumed to be a
TYPE_DECL), or 0 if dbxout.c should assign a type number normally. */