summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2014-05-06 11:26:24 -0400
committerBruce Momjian <bruce@momjian.us>2014-05-06 11:26:24 -0400
commit99058cfc634ad59fa80f802f96c334be313faefb (patch)
treed8bb3de774338ac58eb73df3320aee02e82f7d86
parent3ada1fab8bf9ba001fcb095d66914f3f1f34711f (diff)
downloadpostgresql-99058cfc634ad59fa80f802f96c334be313faefb.tar.gz
Remove tabs after spaces in C comments
This was not changed in HEAD, but will be done later as part of a pgindent run. Future pgindent runs will also do this. Report by Tom Lane Backpatch through all supported branches, but not HEAD
-rw-r--r--contrib/btree_gist/btree_interval.c2
-rw-r--r--contrib/cube/cube.c4
-rw-r--r--contrib/dblink/dblink.c4
-rw-r--r--contrib/earthdistance/earthdistance.c2
-rw-r--r--contrib/intarray/_int_gist.c4
-rw-r--r--contrib/ltree/ltree_op.c2
-rw-r--r--contrib/oid2name/oid2name.c2
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.c2
-rw-r--r--contrib/pgcrypto/crypt-des.c2
-rw-r--r--contrib/pgcrypto/crypt-gensalt.c2
-rw-r--r--contrib/pgcrypto/fortuna.c8
-rw-r--r--contrib/pgcrypto/fortuna.h2
-rw-r--r--contrib/pgcrypto/imath.c6
-rw-r--r--contrib/pgcrypto/imath.h2
-rw-r--r--contrib/pgcrypto/internal-sha2.c2
-rw-r--r--contrib/pgcrypto/internal.c2
-rw-r--r--contrib/pgcrypto/mbuf.c2
-rw-r--r--contrib/pgcrypto/mbuf.h2
-rw-r--r--contrib/pgcrypto/md5.c2
-rw-r--r--contrib/pgcrypto/md5.h2
-rw-r--r--contrib/pgcrypto/openssl.c2
-rw-r--r--contrib/pgcrypto/pgcrypto.c2
-rw-r--r--contrib/pgcrypto/pgcrypto.h2
-rw-r--r--contrib/pgcrypto/pgp-armor.c2
-rw-r--r--contrib/pgcrypto/pgp-cfb.c4
-rw-r--r--contrib/pgcrypto/pgp-compress.c2
-rw-r--r--contrib/pgcrypto/pgp-decrypt.c2
-rw-r--r--contrib/pgcrypto/pgp-encrypt.c2
-rw-r--r--contrib/pgcrypto/pgp-info.c2
-rw-r--r--contrib/pgcrypto/pgp-mpi-internal.c4
-rw-r--r--contrib/pgcrypto/pgp-mpi-openssl.c4
-rw-r--r--contrib/pgcrypto/pgp-mpi.c2
-rw-r--r--contrib/pgcrypto/pgp-pgsql.c6
-rw-r--r--contrib/pgcrypto/pgp-pubdec.c2
-rw-r--r--contrib/pgcrypto/pgp-pubenc.c2
-rw-r--r--contrib/pgcrypto/pgp-pubkey.c2
-rw-r--r--contrib/pgcrypto/pgp-s2k.c2
-rw-r--r--contrib/pgcrypto/pgp.c2
-rw-r--r--contrib/pgcrypto/pgp.h2
-rw-r--r--contrib/pgcrypto/px-crypt.c2
-rw-r--r--contrib/pgcrypto/px-crypt.h2
-rw-r--r--contrib/pgcrypto/px-hmac.c2
-rw-r--r--contrib/pgcrypto/px.c2
-rw-r--r--contrib/pgcrypto/px.h2
-rw-r--r--contrib/pgcrypto/random.c2
-rw-r--r--contrib/pgcrypto/rijndael.c6
-rw-r--r--contrib/pgcrypto/rijndael.h4
-rw-r--r--contrib/pgcrypto/sha1.c2
-rw-r--r--contrib/pgcrypto/sha1.h2
-rw-r--r--contrib/pgcrypto/sha2.c2
-rw-r--r--contrib/pgcrypto/sha2.h2
-rw-r--r--contrib/pgstattuple/pgstattuple.c2
-rw-r--r--contrib/seg/seg.c4
-rw-r--r--contrib/spi/moddatetime.c2
-rw-r--r--contrib/spi/timetravel.c6
-rw-r--r--contrib/sslinfo/sslinfo.c2
-rw-r--r--contrib/vacuumlo/vacuumlo.c2
-rw-r--r--src/backend/access/common/heaptuple.c10
-rw-r--r--src/backend/access/common/indextuple.c6
-rw-r--r--src/backend/access/common/printtup.c2
-rw-r--r--src/backend/access/common/reloptions.c6
-rw-r--r--src/backend/access/common/tupconvert.c2
-rw-r--r--src/backend/access/common/tupdesc.c2
-rw-r--r--src/backend/access/gin/ginfast.c6
-rw-r--r--src/backend/access/gin/ginget.c8
-rw-r--r--src/backend/access/gin/ginscan.c6
-rw-r--r--src/backend/access/gist/gist.c2
-rw-r--r--src/backend/access/gist/gistsplit.c10
-rw-r--r--src/backend/access/gist/gistutil.c6
-rw-r--r--src/backend/access/gist/gistvacuum.c2
-rw-r--r--src/backend/access/hash/hash.c6
-rw-r--r--src/backend/access/hash/hashfunc.c10
-rw-r--r--src/backend/access/hash/hashovfl.c16
-rw-r--r--src/backend/access/hash/hashpage.c20
-rw-r--r--src/backend/access/hash/hashsearch.c2
-rw-r--r--src/backend/access/hash/hashsort.c4
-rw-r--r--src/backend/access/hash/hashutil.c4
-rw-r--r--src/backend/access/heap/heapam.c70
-rw-r--r--src/backend/access/heap/hio.c14
-rw-r--r--src/backend/access/heap/pruneheap.c14
-rw-r--r--src/backend/access/heap/rewriteheap.c14
-rw-r--r--src/backend/access/heap/syncscan.c6
-rw-r--r--src/backend/access/heap/tuptoaster.c6
-rw-r--r--src/backend/access/index/genam.c4
-rw-r--r--src/backend/access/index/indexam.c16
-rw-r--r--src/backend/access/nbtree/nbtcompare.c2
-rw-r--r--src/backend/access/nbtree/nbtinsert.c22
-rw-r--r--src/backend/access/nbtree/nbtpage.c30
-rw-r--r--src/backend/access/nbtree/nbtree.c10
-rw-r--r--src/backend/access/nbtree/nbtsearch.c28
-rw-r--r--src/backend/access/nbtree/nbtsort.c22
-rw-r--r--src/backend/access/nbtree/nbtutils.c40
-rw-r--r--src/backend/access/nbtree/nbtxlog.c8
-rw-r--r--src/backend/access/transam/clog.c8
-rw-r--r--src/backend/access/transam/multixact.c54
-rw-r--r--src/backend/access/transam/slru.c14
-rw-r--r--src/backend/access/transam/subtrans.c4
-rw-r--r--src/backend/access/transam/transam.c4
-rw-r--r--src/backend/access/transam/twophase.c12
-rw-r--r--src/backend/access/transam/varsup.c8
-rw-r--r--src/backend/access/transam/xact.c54
-rw-r--r--src/backend/access/transam/xlog.c68
-rw-r--r--src/backend/bootstrap/bootstrap.c14
-rw-r--r--src/backend/catalog/aclchk.c18
-rw-r--r--src/backend/catalog/catalog.c4
-rw-r--r--src/backend/catalog/dependency.c28
-rw-r--r--src/backend/catalog/heap.c32
-rw-r--r--src/backend/catalog/index.c74
-rw-r--r--src/backend/catalog/indexing.c2
-rw-r--r--src/backend/catalog/namespace.c40
-rw-r--r--src/backend/catalog/pg_constraint.c2
-rw-r--r--src/backend/catalog/pg_depend.c4
-rw-r--r--src/backend/catalog/pg_operator.c4
-rw-r--r--src/backend/catalog/pg_proc.c10
-rw-r--r--src/backend/catalog/pg_shdepend.c16
-rw-r--r--src/backend/catalog/pg_type.c2
-rw-r--r--src/backend/catalog/storage.c2
-rw-r--r--src/backend/catalog/toasting.c2
-rw-r--r--src/backend/commands/aggregatecmds.c2
-rw-r--r--src/backend/commands/alter.c2
-rw-r--r--src/backend/commands/analyze.c42
-rw-r--r--src/backend/commands/async.c14
-rw-r--r--src/backend/commands/cluster.c22
-rw-r--r--src/backend/commands/comment.c4
-rw-r--r--src/backend/commands/copy.c32
-rw-r--r--src/backend/commands/dbcommands.c12
-rw-r--r--src/backend/commands/define.c2
-rw-r--r--src/backend/commands/explain.c4
-rw-r--r--src/backend/commands/foreigncmds.c6
-rw-r--r--src/backend/commands/functioncmds.c10
-rw-r--r--src/backend/commands/indexcmds.c32
-rw-r--r--src/backend/commands/opclasscmds.c6
-rw-r--r--src/backend/commands/operatorcmds.c2
-rw-r--r--src/backend/commands/portalcmds.c8
-rw-r--r--src/backend/commands/prepare.c4
-rw-r--r--src/backend/commands/proclang.c2
-rw-r--r--src/backend/commands/schemacmds.c4
-rw-r--r--src/backend/commands/sequence.c10
-rw-r--r--src/backend/commands/tablecmds.c62
-rw-r--r--src/backend/commands/tablespace.c16
-rw-r--r--src/backend/commands/trigger.c40
-rw-r--r--src/backend/commands/typecmds.c20
-rw-r--r--src/backend/commands/user.c8
-rw-r--r--src/backend/commands/vacuum.c52
-rw-r--r--src/backend/commands/vacuumlazy.c8
-rw-r--r--src/backend/commands/variable.c16
-rw-r--r--src/backend/commands/view.c6
-rw-r--r--src/backend/executor/execAmi.c6
-rw-r--r--src/backend/executor/execCurrent.c2
-rw-r--r--src/backend/executor/execJunk.c2
-rw-r--r--src/backend/executor/execMain.c26
-rw-r--r--src/backend/executor/execProcnode.c4
-rw-r--r--src/backend/executor/execQual.c48
-rw-r--r--src/backend/executor/execTuples.c14
-rw-r--r--src/backend/executor/execUtils.c14
-rw-r--r--src/backend/executor/functions.c16
-rw-r--r--src/backend/executor/nodeAgg.c32
-rw-r--r--src/backend/executor/nodeAppend.c4
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c4
-rw-r--r--src/backend/executor/nodeFunctionscan.c2
-rw-r--r--src/backend/executor/nodeHash.c14
-rw-r--r--src/backend/executor/nodeHashjoin.c8
-rw-r--r--src/backend/executor/nodeIndexscan.c6
-rw-r--r--src/backend/executor/nodeLimit.c2
-rw-r--r--src/backend/executor/nodeMaterial.c2
-rw-r--r--src/backend/executor/nodeMergejoin.c14
-rw-r--r--src/backend/executor/nodeRecursiveunion.c2
-rw-r--r--src/backend/executor/nodeResult.c2
-rw-r--r--src/backend/executor/nodeSetOp.c6
-rw-r--r--src/backend/executor/nodeSubplan.c10
-rw-r--r--src/backend/executor/nodeSubqueryscan.c4
-rw-r--r--src/backend/executor/nodeUnique.c2
-rw-r--r--src/backend/executor/nodeValuesscan.c2
-rw-r--r--src/backend/executor/nodeWindowAgg.c20
-rw-r--r--src/backend/executor/nodeWorktablescan.c2
-rw-r--r--src/backend/executor/spi.c10
-rw-r--r--src/backend/executor/tstoreReceiver.c2
-rw-r--r--src/backend/lib/stringinfo.c4
-rw-r--r--src/backend/libpq/auth.c8
-rw-r--r--src/backend/libpq/be-secure.c4
-rw-r--r--src/backend/libpq/hba.c4
-rw-r--r--src/backend/libpq/md5.c2
-rw-r--r--src/backend/libpq/pqcomm.c4
-rw-r--r--src/backend/libpq/pqformat.c2
-rw-r--r--src/backend/libpq/pqsignal.c4
-rw-r--r--src/backend/main/main.c8
-rw-r--r--src/backend/nodes/bitmapset.c4
-rw-r--r--src/backend/nodes/copyfuncs.c6
-rw-r--r--src/backend/nodes/equalfuncs.c12
-rw-r--r--src/backend/nodes/list.c4
-rw-r--r--src/backend/nodes/nodeFuncs.c12
-rw-r--r--src/backend/nodes/outfuncs.c6
-rw-r--r--src/backend/nodes/read.c10
-rw-r--r--src/backend/nodes/readfuncs.c14
-rw-r--r--src/backend/nodes/tidbitmap.c16
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c6
-rw-r--r--src/backend/optimizer/path/allpaths.c16
-rw-r--r--src/backend/optimizer/path/clausesel.c20
-rw-r--r--src/backend/optimizer/path/costsize.c50
-rw-r--r--src/backend/optimizer/path/equivclass.c36
-rw-r--r--src/backend/optimizer/path/indxpath.c54
-rw-r--r--src/backend/optimizer/path/joinpath.c10
-rw-r--r--src/backend/optimizer/path/joinrels.c12
-rw-r--r--src/backend/optimizer/path/orindxpath.c10
-rw-r--r--src/backend/optimizer/path/pathkeys.c22
-rw-r--r--src/backend/optimizer/path/tidpath.c4
-rw-r--r--src/backend/optimizer/plan/createplan.c28
-rw-r--r--src/backend/optimizer/plan/initsplan.c36
-rw-r--r--src/backend/optimizer/plan/planagg.c8
-rw-r--r--src/backend/optimizer/plan/planmain.c14
-rw-r--r--src/backend/optimizer/plan/planner.c48
-rw-r--r--src/backend/optimizer/plan/setrefs.c20
-rw-r--r--src/backend/optimizer/plan/subselect.c38
-rw-r--r--src/backend/optimizer/prep/prepjointree.c28
-rw-r--r--src/backend/optimizer/prep/prepqual.c10
-rw-r--r--src/backend/optimizer/prep/preptlist.c14
-rw-r--r--src/backend/optimizer/prep/prepunion.c14
-rw-r--r--src/backend/optimizer/util/clauses.c50
-rw-r--r--src/backend/optimizer/util/joininfo.c2
-rw-r--r--src/backend/optimizer/util/pathnode.c12
-rw-r--r--src/backend/optimizer/util/plancat.c8
-rw-r--r--src/backend/optimizer/util/predtest.c20
-rw-r--r--src/backend/optimizer/util/relnode.c10
-rw-r--r--src/backend/optimizer/util/restrictinfo.c8
-rw-r--r--src/backend/optimizer/util/tlist.c2
-rw-r--r--src/backend/optimizer/util/var.c14
-rw-r--r--src/backend/parser/analyze.c12
-rw-r--r--src/backend/parser/kwlookup.c2
-rw-r--r--src/backend/parser/parse_agg.c12
-rw-r--r--src/backend/parser/parse_clause.c28
-rw-r--r--src/backend/parser/parse_coerce.c36
-rw-r--r--src/backend/parser/parse_cte.c12
-rw-r--r--src/backend/parser/parse_expr.c16
-rw-r--r--src/backend/parser/parse_func.c20
-rw-r--r--src/backend/parser/parse_node.c8
-rw-r--r--src/backend/parser/parse_oper.c4
-rw-r--r--src/backend/parser/parse_relation.c10
-rw-r--r--src/backend/parser/parse_target.c16
-rw-r--r--src/backend/parser/parse_type.c10
-rw-r--r--src/backend/parser/parse_utilcmd.c8
-rw-r--r--src/backend/parser/parser.c2
-rw-r--r--src/backend/port/darwin/system.c2
-rw-r--r--src/backend/port/dynloader/darwin.c2
-rw-r--r--src/backend/port/dynloader/freebsd.c2
-rw-r--r--src/backend/port/dynloader/netbsd.c2
-rw-r--r--src/backend/port/dynloader/openbsd.c2
-rw-r--r--src/backend/port/posix_sema.c2
-rw-r--r--src/backend/port/sysv_sema.c14
-rw-r--r--src/backend/port/sysv_shmem.c10
-rw-r--r--src/backend/port/win32_shmem.c2
-rw-r--r--src/backend/postmaster/autovacuum.c42
-rw-r--r--src/backend/postmaster/bgwriter.c24
-rw-r--r--src/backend/postmaster/pgarch.c4
-rw-r--r--src/backend/postmaster/pgstat.c26
-rw-r--r--src/backend/postmaster/postmaster.c60
-rw-r--r--src/backend/postmaster/syslogger.c12
-rw-r--r--src/backend/postmaster/walwriter.c6
-rw-r--r--src/backend/regex/regc_color.c2
-rw-r--r--src/backend/regex/regc_cvec.c2
-rw-r--r--src/backend/regex/regc_lex.c2
-rw-r--r--src/backend/regex/regc_locale.c6
-rw-r--r--src/backend/regex/regc_nfa.c6
-rw-r--r--src/backend/regex/regcomp.c8
-rw-r--r--src/backend/regex/rege_dfa.c2
-rw-r--r--src/backend/regex/regerror.c2
-rw-r--r--src/backend/regex/regexec.c2
-rw-r--r--src/backend/regex/regfree.c2
-rw-r--r--src/backend/rewrite/rewriteDefine.c6
-rw-r--r--src/backend/rewrite/rewriteHandler.c22
-rw-r--r--src/backend/rewrite/rewriteManip.c18
-rw-r--r--src/backend/storage/buffer/buf_init.c4
-rw-r--r--src/backend/storage/buffer/buf_table.c4
-rw-r--r--src/backend/storage/buffer/bufmgr.c38
-rw-r--r--src/backend/storage/buffer/freelist.c10
-rw-r--r--src/backend/storage/buffer/localbuf.c4
-rw-r--r--src/backend/storage/file/buffile.c6
-rw-r--r--src/backend/storage/file/fd.c22
-rw-r--r--src/backend/storage/freespace/freespace.c4
-rw-r--r--src/backend/storage/freespace/fsmpage.c4
-rw-r--r--src/backend/storage/ipc/ipc.c10
-rw-r--r--src/backend/storage/ipc/ipci.c4
-rw-r--r--src/backend/storage/ipc/pmsignal.c8
-rw-r--r--src/backend/storage/ipc/procarray.c18
-rw-r--r--src/backend/storage/ipc/shmem.c16
-rw-r--r--src/backend/storage/ipc/shmqueue.c2
-rw-r--r--src/backend/storage/ipc/sinval.c14
-rw-r--r--src/backend/storage/ipc/sinvaladt.c16
-rw-r--r--src/backend/storage/large_object/inv_api.c2
-rw-r--r--src/backend/storage/lmgr/deadlock.c18
-rw-r--r--src/backend/storage/lmgr/lmgr.c8
-rw-r--r--src/backend/storage/lmgr/lock.c16
-rw-r--r--src/backend/storage/lmgr/lwlock.c8
-rw-r--r--src/backend/storage/lmgr/proc.c28
-rw-r--r--src/backend/storage/lmgr/s_lock.c6
-rw-r--r--src/backend/storage/lmgr/spin.c2
-rw-r--r--src/backend/storage/page/bufpage.c6
-rw-r--r--src/backend/storage/smgr/md.c24
-rw-r--r--src/backend/tcop/fastpath.c10
-rw-r--r--src/backend/tcop/postgres.c30
-rw-r--r--src/backend/tcop/pquery.c14
-rw-r--r--src/backend/tsearch/ts_locale.c4
-rw-r--r--src/backend/tsearch/ts_typanalyze.c4
-rw-r--r--src/backend/tsearch/ts_utils.c8
-rw-r--r--src/backend/utils/adt/acl.c16
-rw-r--r--src/backend/utils/adt/array_userfuncs.c4
-rw-r--r--src/backend/utils/adt/arrayfuncs.c22
-rw-r--r--src/backend/utils/adt/arrayutils.c2
-rw-r--r--src/backend/utils/adt/char.c2
-rw-r--r--src/backend/utils/adt/date.c6
-rw-r--r--src/backend/utils/adt/datetime.c12
-rw-r--r--src/backend/utils/adt/datum.c2
-rw-r--r--src/backend/utils/adt/domains.c6
-rw-r--r--src/backend/utils/adt/float.c6
-rw-r--r--src/backend/utils/adt/format_type.c8
-rw-r--r--src/backend/utils/adt/formatting.c2
-rw-r--r--src/backend/utils/adt/geo_selfuncs.c4
-rw-r--r--src/backend/utils/adt/inet_net_ntop.c2
-rw-r--r--src/backend/utils/adt/int.c30
-rw-r--r--src/backend/utils/adt/int8.c44
-rw-r--r--src/backend/utils/adt/like.c4
-rw-r--r--src/backend/utils/adt/misc.c2
-rw-r--r--src/backend/utils/adt/nabstime.c4
-rw-r--r--src/backend/utils/adt/network.c14
-rw-r--r--src/backend/utils/adt/numeric.c40
-rw-r--r--src/backend/utils/adt/pg_locale.c6
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c6
-rw-r--r--src/backend/utils/adt/pseudotypes.c6
-rw-r--r--src/backend/utils/adt/regexp.c8
-rw-r--r--src/backend/utils/adt/regproc.c12
-rw-r--r--src/backend/utils/adt/ri_triggers.c8
-rw-r--r--src/backend/utils/adt/rowtypes.c8
-rw-r--r--src/backend/utils/adt/ruleutils.c34
-rw-r--r--src/backend/utils/adt/selfuncs.c84
-rw-r--r--src/backend/utils/adt/timestamp.c18
-rw-r--r--src/backend/utils/adt/varchar.c4
-rw-r--r--src/backend/utils/adt/varlena.c30
-rw-r--r--src/backend/utils/adt/xml.c32
-rw-r--r--src/backend/utils/cache/catcache.c20
-rw-r--r--src/backend/utils/cache/inval.c36
-rw-r--r--src/backend/utils/cache/lsyscache.c8
-rw-r--r--src/backend/utils/cache/plancache.c16
-rw-r--r--src/backend/utils/cache/relcache.c50
-rw-r--r--src/backend/utils/cache/syscache.c4
-rw-r--r--src/backend/utils/cache/typcache.c4
-rw-r--r--src/backend/utils/error/elog.c42
-rw-r--r--src/backend/utils/fmgr/dfmgr.c6
-rw-r--r--src/backend/utils/fmgr/fmgr.c20
-rw-r--r--src/backend/utils/fmgr/funcapi.c10
-rw-r--r--src/backend/utils/hash/dynahash.c24
-rw-r--r--src/backend/utils/init/flatfiles.c10
-rw-r--r--src/backend/utils/init/miscinit.c30
-rw-r--r--src/backend/utils/init/postinit.c8
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c2
-rw-r--r--src/backend/utils/mb/mbutils.c12
-rw-r--r--src/backend/utils/mb/wstrcmp.c2
-rw-r--r--src/backend/utils/mb/wstrncmp.c2
-rw-r--r--src/backend/utils/misc/guc.c28
-rw-r--r--src/backend/utils/misc/ps_status.c6
-rw-r--r--src/backend/utils/misc/tzparser.c2
-rw-r--r--src/backend/utils/mmgr/aset.c16
-rw-r--r--src/backend/utils/mmgr/mcxt.c16
-rw-r--r--src/backend/utils/mmgr/portalmem.c12
-rw-r--r--src/backend/utils/resowner/resowner.c8
-rw-r--r--src/backend/utils/sort/logtape.c30
-rw-r--r--src/backend/utils/sort/tuplesort.c66
-rw-r--r--src/backend/utils/sort/tuplestore.c20
-rw-r--r--src/backend/utils/time/combocid.c2
-rw-r--r--src/backend/utils/time/snapmgr.c6
-rw-r--r--src/backend/utils/time/tqual.c18
-rw-r--r--src/bin/initdb/initdb.c8
-rw-r--r--src/bin/pg_ctl/pg_ctl.c2
-rw-r--r--src/bin/pg_dump/common.c4
-rw-r--r--src/bin/pg_dump/dumputils.c12
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c16
-rw-r--r--src/bin/pg_dump/pg_backup_db.c4
-rw-r--r--src/bin/pg_dump/pg_dump.c42
-rw-r--r--src/bin/pg_dump/pg_dump.h6
-rw-r--r--src/bin/pg_dump/pg_dump_sort.c16
-rw-r--r--src/bin/pg_dump/pg_dumpall.c6
-rw-r--r--src/bin/pg_resetxlog/pg_resetxlog.c4
-rw-r--r--src/bin/psql/common.c2
-rw-r--r--src/bin/psql/copy.c2
-rw-r--r--src/bin/psql/describe.c2
-rw-r--r--src/bin/psql/input.c4
-rw-r--r--src/bin/psql/large_obj.c2
-rw-r--r--src/bin/psql/mainloop.c2
-rw-r--r--src/bin/psql/mbprint.c2
-rw-r--r--src/bin/psql/print.c6
-rw-r--r--src/bin/psql/settings.h2
-rw-r--r--src/bin/psql/stringutils.c10
-rw-r--r--src/bin/psql/tab-complete.c6
-rw-r--r--src/bin/scripts/common.c4
-rw-r--r--src/include/access/attnum.h2
-rw-r--r--src/include/access/gin.h2
-rw-r--r--src/include/access/hash.h2
-rw-r--r--src/include/access/htup.h24
-rw-r--r--src/include/access/itup.h2
-rw-r--r--src/include/access/nbtree.h24
-rw-r--r--src/include/access/reloptions.h2
-rw-r--r--src/include/access/skey.h4
-rw-r--r--src/include/access/transam.h2
-rw-r--r--src/include/access/tupdesc.h2
-rw-r--r--src/include/access/tupmacs.h4
-rw-r--r--src/include/access/tuptoaster.h2
-rw-r--r--src/include/access/xlog.h6
-rw-r--r--src/include/access/xlog_internal.h4
-rw-r--r--src/include/access/xlogdefs.h8
-rw-r--r--src/include/c.h18
-rw-r--r--src/include/catalog/catversion.h2
-rw-r--r--src/include/catalog/dependency.h2
-rw-r--r--src/include/catalog/namespace.h2
-rw-r--r--src/include/catalog/pg_attrdef.h2
-rw-r--r--src/include/catalog/pg_attribute.h4
-rw-r--r--src/include/catalog/pg_authid.h2
-rw-r--r--src/include/catalog/pg_constraint.h6
-rw-r--r--src/include/catalog/pg_control.h4
-rw-r--r--src/include/catalog/pg_description.h6
-rw-r--r--src/include/catalog/pg_largeobject.h2
-rw-r--r--src/include/catalog/pg_opclass.h6
-rw-r--r--src/include/catalog/pg_proc.h4
-rw-r--r--src/include/catalog/pg_rewrite.h2
-rw-r--r--src/include/catalog/pg_shdepend.h2
-rw-r--r--src/include/catalog/pg_shdescription.h4
-rw-r--r--src/include/catalog/pg_statistic.h24
-rw-r--r--src/include/catalog/pg_trigger.h2
-rw-r--r--src/include/catalog/pg_ts_dict.h2
-rw-r--r--src/include/catalog/pg_ts_template.h2
-rw-r--r--src/include/catalog/pg_type.h12
-rw-r--r--src/include/commands/comment.h2
-rw-r--r--src/include/commands/vacuum.h4
-rw-r--r--src/include/executor/executor.h2
-rw-r--r--src/include/executor/hashjoin.h4
-rw-r--r--src/include/executor/spi_priv.h2
-rw-r--r--src/include/executor/tuptable.h12
-rw-r--r--src/include/fmgr.h16
-rw-r--r--src/include/funcapi.h2
-rw-r--r--src/include/lib/stringinfo.h4
-rw-r--r--src/include/libpq/libpq-be.h8
-rw-r--r--src/include/libpq/pqcomm.h2
-rw-r--r--src/include/mb/pg_wchar.h4
-rw-r--r--src/include/miscadmin.h16
-rw-r--r--src/include/nodes/execnodes.h22
-rw-r--r--src/include/nodes/nodes.h4
-rw-r--r--src/include/nodes/params.h4
-rw-r--r--src/include/nodes/parsenodes.h38
-rw-r--r--src/include/nodes/plannodes.h20
-rw-r--r--src/include/nodes/primnodes.h52
-rw-r--r--src/include/nodes/relation.h62
-rw-r--r--src/include/nodes/tidbitmap.h2
-rw-r--r--src/include/nodes/value.h2
-rw-r--r--src/include/parser/parse_node.h4
-rw-r--r--src/include/pg_config_manual.h14
-rw-r--r--src/include/pgstat.h2
-rw-r--r--src/include/port.h2
-rw-r--r--src/include/port/linux.h2
-rw-r--r--src/include/port/win32.h4
-rw-r--r--src/include/portability/instr_time.h4
-rw-r--r--src/include/postgres.h6
-rw-r--r--src/include/postgres_ext.h2
-rw-r--r--src/include/postmaster/syslogger.h2
-rw-r--r--src/include/regex/regcustom.h2
-rw-r--r--src/include/regex/regex.h2
-rw-r--r--src/include/regex/regguts.h10
-rw-r--r--src/include/snowball/header.h2
-rw-r--r--src/include/storage/block.h2
-rw-r--r--src/include/storage/buf_internals.h6
-rw-r--r--src/include/storage/bufpage.h8
-rw-r--r--src/include/storage/ipc.h2
-rw-r--r--src/include/storage/itemid.h2
-rw-r--r--src/include/storage/itemptr.h2
-rw-r--r--src/include/storage/lock.h16
-rw-r--r--src/include/storage/pg_sema.h2
-rw-r--r--src/include/storage/pg_shmem.h2
-rw-r--r--src/include/storage/pos.h2
-rw-r--r--src/include/storage/proc.h4
-rw-r--r--src/include/storage/relfilenode.h2
-rw-r--r--src/include/storage/s_lock.h2
-rw-r--r--src/include/storage/sinval.h4
-rw-r--r--src/include/storage/sinvaladt.h2
-rw-r--r--src/include/storage/smgr.h4
-rw-r--r--src/include/tcop/dest.h12
-rw-r--r--src/include/tcop/tcopdebug.h2
-rw-r--r--src/include/utils/acl.h4
-rw-r--r--src/include/utils/catcache.h8
-rw-r--r--src/include/utils/datetime.h2
-rw-r--r--src/include/utils/elog.h4
-rw-r--r--src/include/utils/errcodes.h2
-rw-r--r--src/include/utils/guc.h4
-rw-r--r--src/include/utils/hsearch.h2
-rw-r--r--src/include/utils/inet.h2
-rw-r--r--src/include/utils/memutils.h6
-rw-r--r--src/include/utils/palloc.h8
-rw-r--r--src/include/utils/pg_crc.h2
-rw-r--r--src/include/utils/plancache.h2
-rw-r--r--src/include/utils/portal.h4
-rw-r--r--src/include/utils/rel.h2
-rw-r--r--src/include/utils/relcache.h2
-rw-r--r--src/include/utils/resowner.h2
-rw-r--r--src/include/utils/selfuncs.h2
-rw-r--r--src/include/utils/timestamp.h2
-rw-r--r--src/include/utils/tqual.h2
-rw-r--r--src/include/utils/tuplesort.h2
-rw-r--r--src/include/utils/tuplestore.h2
-rw-r--r--src/interfaces/ecpg/include/sqlca.h2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt.h2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/interval.c2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/numeric.c2
-rw-r--r--src/interfaces/ecpg/preproc/parser.c2
-rw-r--r--src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c2
-rw-r--r--src/interfaces/ecpg/test/expected/preproc-init.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-array.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-code100.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-copystdout.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-define.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-dynalloc.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-dynalloc2.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-dyntest.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-indicators.c2
-rw-r--r--src/interfaces/ecpg/test/expected/thread-alloc.c2
-rw-r--r--src/interfaces/ecpg/test/expected/thread-descriptor.c2
-rw-r--r--src/interfaces/ecpg/test/expected/thread-prep.c2
-rw-r--r--src/interfaces/libpq/fe-auth.c2
-rw-r--r--src/interfaces/libpq/fe-connect.c24
-rw-r--r--src/interfaces/libpq/fe-exec.c26
-rw-r--r--src/interfaces/libpq/fe-lobj.c2
-rw-r--r--src/interfaces/libpq/fe-misc.c4
-rw-r--r--src/interfaces/libpq/fe-protocol2.c8
-rw-r--r--src/interfaces/libpq/fe-protocol3.c24
-rw-r--r--src/interfaces/libpq/fe-secure.c8
-rw-r--r--src/interfaces/libpq/libpq-fe.h4
-rw-r--r--src/interfaces/libpq/pqexpbuffer.c8
-rw-r--r--src/interfaces/libpq/pqexpbuffer.h8
-rw-r--r--src/pl/plperl/plperl.c2
-rw-r--r--src/pl/plpgsql/src/pl_comp.c12
-rw-r--r--src/pl/plpgsql/src/pl_exec.c14
-rw-r--r--src/pl/plpgsql/src/pl_funcs.c6
-rw-r--r--src/pl/plpgsql/src/plpgsql.h4
-rw-r--r--src/pl/plpython/plpython.c4
-rw-r--r--src/pl/tcl/pltcl.c2
-rw-r--r--src/port/chklocale.c2
-rw-r--r--src/port/crypt.c26
-rw-r--r--src/port/exec.c2
-rw-r--r--src/port/getaddrinfo.c2
-rw-r--r--src/port/getopt.c2
-rw-r--r--src/port/getopt_long.c2
-rw-r--r--src/port/inet_aton.c4
-rw-r--r--src/port/memcmp.c2
-rw-r--r--src/port/path.c4
-rw-r--r--src/port/qsort.c2
-rw-r--r--src/port/qsort_arg.c2
-rw-r--r--src/port/snprintf.c6
-rw-r--r--src/port/strlcat.c2
-rw-r--r--src/port/strlcpy.c4
-rw-r--r--src/port/strtol.c4
-rw-r--r--src/port/strtoul.c2
-rw-r--r--src/port/thread.c4
-rw-r--r--src/port/unsetenv.c2
-rw-r--r--src/test/regress/pg_regress.c10
-rw-r--r--src/timezone/localtime.c12
-rw-r--r--src/timezone/pgtz.c12
-rw-r--r--src/tutorial/complex.c2
561 files changed, 2793 insertions, 2793 deletions
diff --git a/contrib/btree_gist/btree_interval.c b/contrib/btree_gist/btree_interval.c
index b136847923..04301c2b37 100644
--- a/contrib/btree_gist/btree_interval.c
+++ b/contrib/btree_gist/btree_interval.c
@@ -82,7 +82,7 @@ intr2num(const Interval *i)
/*
* INTERVALSIZE should be the actual size-on-disk of an Interval, as shown
- * in pg_type. This might be less than sizeof(Interval) if the compiler
+ * in pg_type. This might be less than sizeof(Interval) if the compiler
* insists on adding alignment padding at the end of the struct.
*/
#define INTERVALSIZE 16
diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c
index 5fe1109d1b..afbfadc1d4 100644
--- a/contrib/cube/cube.c
+++ b/contrib/cube/cube.c
@@ -564,7 +564,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS)
rt_cube_size(datum_r, &size_r);
/*
- * Now split up the regions between the two seeds. An important property
+ * Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
@@ -580,7 +580,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS)
{
/*
* If we've already decided where to place this item, just put it on
- * the right list. Otherwise, we need to figure out which page needs
+ * the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index a48949334d..f59ef295c1 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -636,7 +636,7 @@ dblink_fetch(PG_FUNCTION_ARGS)
"the specified FROM clause rowtype")));
/*
- * fast track when no results. We could exit earlier, but then we'd
+ * fast track when no results. We could exit earlier, but then we'd
* not report error if the result tuple type is wrong.
*/
if (funcctx->max_calls < 1)
@@ -2257,7 +2257,7 @@ dblink_security_check(PGconn *conn, remoteConn *rconn)
}
/*
- * For non-superusers, insist that the connstr specify a password. This
+ * For non-superusers, insist that the connstr specify a password. This
* prevents a password from being picked up from .pgpass, a service file,
* the environment, etc. We don't want the postgres user's passwords
* to be accessible to non-superusers.
diff --git a/contrib/earthdistance/earthdistance.c b/contrib/earthdistance/earthdistance.c
index 4dce1f828e..8aca5d708b 100644
--- a/contrib/earthdistance/earthdistance.c
+++ b/contrib/earthdistance/earthdistance.c
@@ -91,7 +91,7 @@ geo_distance_internal(Point *pt1, Point *pt2)
* distance between the points in miles on earth's surface
*
* If float8 is passed-by-value, the oldstyle version-0 calling convention
- * is unportable, so we use version-1. However, if it's passed-by-reference,
+ * is unportable, so we use version-1. However, if it's passed-by-reference,
* continue to use oldstyle. This is just because we'd like earthdistance
* to serve as a canary for any unintentional breakage of version-0 functions
* with float8 results.
diff --git a/contrib/intarray/_int_gist.c b/contrib/intarray/_int_gist.c
index 29e08eda66..cbca511759 100644
--- a/contrib/intarray/_int_gist.c
+++ b/contrib/intarray/_int_gist.c
@@ -487,7 +487,7 @@ g_int_picksplit(PG_FUNCTION_ARGS)
qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost);
/*
- * Now split up the regions between the two seeds. An important property
+ * Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
@@ -505,7 +505,7 @@ g_int_picksplit(PG_FUNCTION_ARGS)
/*
* If we've already decided where to place this item, just put it on
- * the right list. Otherwise, we need to figure out which page needs
+ * the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/
diff --git a/contrib/ltree/ltree_op.c b/contrib/ltree/ltree_op.c
index 2e6d5367d8..8f898d07c1 100644
--- a/contrib/ltree/ltree_op.c
+++ b/contrib/ltree/ltree_op.c
@@ -612,7 +612,7 @@ ltreeparentsel(PG_FUNCTION_ARGS)
/*
* If the histogram is large enough, see what fraction of it the
* constant is "<@" to, and assume that's representative of the
- * non-MCV population. Otherwise use the default selectivity for the
+ * non-MCV population. Otherwise use the default selectivity for the
* non-MCV population.
*/
selec = histogram_selectivity(&vardata, &contproc,
diff --git a/contrib/oid2name/oid2name.c b/contrib/oid2name/oid2name.c
index 9d896b84f5..0d66ada91f 100644
--- a/contrib/oid2name/oid2name.c
+++ b/contrib/oid2name/oid2name.c
@@ -430,7 +430,7 @@ sql_exec(PGconn *conn, const char *todo, bool quiet)
}
/*
- * Dump all databases. There are no system objects to worry about.
+ * Dump all databases. There are no system objects to worry about.
*/
void
sql_exec_dumpalldbs(PGconn *conn, struct options * opts)
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 85960700ac..8caf12e76c 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -791,7 +791,7 @@ pgss_memsize(void)
* caller must hold an exclusive lock on pgss->lock
*
* Note: despite needing exclusive lock, it's not an error for the target
- * entry to already exist. This is because pgss_store releases and
+ * entry to already exist. This is because pgss_store releases and
* reacquires lock after failing to find a match; so someone else could
* have made the entry while we waited to get exclusive lock.
*/
diff --git a/contrib/pgcrypto/crypt-des.c b/contrib/pgcrypto/crypt-des.c
index 0337bfcd4b..beb8702ba8 100644
--- a/contrib/pgcrypto/crypt-des.c
+++ b/contrib/pgcrypto/crypt-des.c
@@ -29,7 +29,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/crypt-gensalt.c b/contrib/pgcrypto/crypt-gensalt.c
index b2ec3da828..9db9163e71 100644
--- a/contrib/pgcrypto/crypt-gensalt.c
+++ b/contrib/pgcrypto/crypt-gensalt.c
@@ -9,7 +9,7 @@
* entirely in crypt_blowfish.c.
*
* Put bcrypt generator also here as crypt-blowfish.c
- * may not be compiled always. -- marko
+ * may not be compiled always. -- marko
*/
#include "postgres.h"
diff --git a/contrib/pgcrypto/fortuna.c b/contrib/pgcrypto/fortuna.c
index 91d461c96f..3f4cdcdc65 100644
--- a/contrib/pgcrypto/fortuna.c
+++ b/contrib/pgcrypto/fortuna.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -53,7 +53,7 @@
/*
* There is some confusion about whether and how to carry forward
- * the state of the pools. Seems like original Fortuna does not
+ * the state of the pools. Seems like original Fortuna does not
* do it, resetting hash after each request. I guess expecting
* feeding to happen more often that requesting. This is absolutely
* unsuitable for pgcrypto, as nothing asynchronous happens here.
@@ -77,7 +77,7 @@
* How many pools.
*
* Original Fortuna uses 32 pools, that means 32'th pool is
- * used not earlier than in 13th year. This is a waste in
+ * used not earlier than in 13th year. This is a waste in
* pgcrypto, as we have very low-frequancy seeding. Here
* is preferable to have all entropy usable in reasonable time.
*
@@ -296,7 +296,7 @@ reseed(FState *st)
}
/*
- * Pick a random pool. This uses key bytes as random source.
+ * Pick a random pool. This uses key bytes as random source.
*/
static unsigned
get_rand_pool(FState *st)
diff --git a/contrib/pgcrypto/fortuna.h b/contrib/pgcrypto/fortuna.h
index b4d7064dec..9b578a8621 100644
--- a/contrib/pgcrypto/fortuna.h
+++ b/contrib/pgcrypto/fortuna.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/imath.c b/contrib/pgcrypto/imath.c
index 35dc652d43..4fbfc9bd2e 100644
--- a/contrib/pgcrypto/imath.c
+++ b/contrib/pgcrypto/imath.c
@@ -21,7 +21,7 @@
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
@@ -211,7 +211,7 @@ static int s_vcmp(mp_int a, int v);
static mp_digit s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc,
mp_size size_a, mp_size size_b);
-/* Unsigned magnitude subtraction. Assumes dc is big enough. */
+/* Unsigned magnitude subtraction. Assumes dc is big enough. */
static void s_usub(mp_digit *da, mp_digit *db, mp_digit *dc,
mp_size size_a, mp_size size_b);
@@ -2275,7 +2275,7 @@ mp_error_string(mp_result res)
/* }}} */
/*------------------------------------------------------------------------*/
-/* Private functions for internal use. These make assumptions. */
+/* Private functions for internal use. These make assumptions. */
/* {{{ s_alloc(num) */
diff --git a/contrib/pgcrypto/imath.h b/contrib/pgcrypto/imath.h
index 09d0e3e818..00cc799816 100644
--- a/contrib/pgcrypto/imath.h
+++ b/contrib/pgcrypto/imath.h
@@ -20,7 +20,7 @@
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
diff --git a/contrib/pgcrypto/internal-sha2.c b/contrib/pgcrypto/internal-sha2.c
index acbb50930b..5e45582a88 100644
--- a/contrib/pgcrypto/internal-sha2.c
+++ b/contrib/pgcrypto/internal-sha2.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/internal.c b/contrib/pgcrypto/internal.c
index c17066894a..91223d3fb8 100644
--- a/contrib/pgcrypto/internal.c
+++ b/contrib/pgcrypto/internal.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/mbuf.c b/contrib/pgcrypto/mbuf.c
index 7355e76e96..6131314392 100644
--- a/contrib/pgcrypto/mbuf.c
+++ b/contrib/pgcrypto/mbuf.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/mbuf.h b/contrib/pgcrypto/mbuf.h
index aa2b5596ee..1c2c64220d 100644
--- a/contrib/pgcrypto/mbuf.h
+++ b/contrib/pgcrypto/mbuf.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/md5.c b/contrib/pgcrypto/md5.c
index 8083d1f280..9c53ceb02c 100644
--- a/contrib/pgcrypto/md5.c
+++ b/contrib/pgcrypto/md5.c
@@ -19,7 +19,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/md5.h b/contrib/pgcrypto/md5.h
index eb7c620b48..94e459f440 100644
--- a/contrib/pgcrypto/md5.h
+++ b/contrib/pgcrypto/md5.h
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/openssl.c b/contrib/pgcrypto/openssl.c
index f4338050a7..082ca3cc63 100644
--- a/contrib/pgcrypto/openssl.c
+++ b/contrib/pgcrypto/openssl.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgcrypto.c b/contrib/pgcrypto/pgcrypto.c
index 796344f866..beb7b43351 100644
--- a/contrib/pgcrypto/pgcrypto.c
+++ b/contrib/pgcrypto/pgcrypto.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgcrypto.h b/contrib/pgcrypto/pgcrypto.h
index b011b06062..8f8554665b 100644
--- a/contrib/pgcrypto/pgcrypto.h
+++ b/contrib/pgcrypto/pgcrypto.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp-armor.c b/contrib/pgcrypto/pgp-armor.c
index 7963ccc329..95386e3706 100644
--- a/contrib/pgcrypto/pgp-armor.c
+++ b/contrib/pgcrypto/pgp-armor.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp-cfb.c b/contrib/pgcrypto/pgp-cfb.c
index e3418e2d8c..8a9b5d3794 100644
--- a/contrib/pgcrypto/pgp-cfb.c
+++ b/contrib/pgcrypto/pgp-cfb.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -90,7 +90,7 @@ pgp_cfb_free(PGP_CFB *ctx)
}
/*
- * Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC)
+ * Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC)
*/
static int
mix_encrypt_normal(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst)
diff --git a/contrib/pgcrypto/pgp-compress.c b/contrib/pgcrypto/pgp-compress.c
index 2b7d7e861f..73ed58e8f2 100644
--- a/contrib/pgcrypto/pgp-compress.c
+++ b/contrib/pgcrypto/pgp-compress.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp-decrypt.c b/contrib/pgcrypto/pgp-decrypt.c
index e598198dc0..a9e1c6dd17 100644
--- a/contrib/pgcrypto/pgp-decrypt.c
+++ b/contrib/pgcrypto/pgp-decrypt.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp-encrypt.c b/contrib/pgcrypto/pgp-encrypt.c
index 47f524e090..4b6afab2cc 100644
--- a/contrib/pgcrypto/pgp-encrypt.c
+++ b/contrib/pgcrypto/pgp-encrypt.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp-info.c b/contrib/pgcrypto/pgp-info.c
index a51a553236..c1c7d90cea 100644
--- a/contrib/pgcrypto/pgp-info.c
+++ b/contrib/pgcrypto/pgp-info.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp-mpi-internal.c b/contrib/pgcrypto/pgp-mpi-internal.c
index 283946b17d..fb65d47604 100644
--- a/contrib/pgcrypto/pgp-mpi-internal.c
+++ b/contrib/pgcrypto/pgp-mpi-internal.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -147,7 +147,7 @@ bn_to_mpi(mpz_t *bn)
*
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
- * above that it uses 'arbitrary high number'. Following
+ * above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm.
*
diff --git a/contrib/pgcrypto/pgp-mpi-openssl.c b/contrib/pgcrypto/pgp-mpi-openssl.c
index f2b25de090..ad6ee3e43d 100644
--- a/contrib/pgcrypto/pgp-mpi-openssl.c
+++ b/contrib/pgcrypto/pgp-mpi-openssl.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -82,7 +82,7 @@ bn_to_mpi(BIGNUM *bn)
*
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
- * above that it uses 'arbitrary high number'. Following
+ * above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm.
*
diff --git a/contrib/pgcrypto/pgp-mpi.c b/contrib/pgcrypto/pgp-mpi.c
index 62b0cea9ee..e0910f6ea4 100644
--- a/contrib/pgcrypto/pgp-mpi.c
+++ b/contrib/pgcrypto/pgp-mpi.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp-pgsql.c b/contrib/pgcrypto/pgp-pgsql.c
index b3f7ca165e..af3339efc9 100644
--- a/contrib/pgcrypto/pgp-pgsql.c
+++ b/contrib/pgcrypto/pgp-pgsql.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -93,7 +93,7 @@ add_block_entropy(PX_MD *md, text *data)
}
/*
- * Mix user data into RNG. It is for user own interests to have
+ * Mix user data into RNG. It is for user own interests to have
* RNG state shuffled.
*/
static void
@@ -310,7 +310,7 @@ set_arg(PGP_Context *ctx, char *key, char *val,
}
/*
- * Find next word. Handle ',' and '=' as words. Skip whitespace.
+ * Find next word. Handle ',' and '=' as words. Skip whitespace.
* Put word info into res_p, res_len.
* Returns ptr to next word.
*/
diff --git a/contrib/pgcrypto/pgp-pubdec.c b/contrib/pgcrypto/pgp-pubdec.c
index cb32708fee..fd62e3dd32 100644
--- a/contrib/pgcrypto/pgp-pubdec.c
+++ b/contrib/pgcrypto/pgp-pubdec.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp-pubenc.c b/contrib/pgcrypto/pgp-pubenc.c
index be9476ac18..d0775cd6c3 100644
--- a/contrib/pgcrypto/pgp-pubenc.c
+++ b/contrib/pgcrypto/pgp-pubenc.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp-pubkey.c b/contrib/pgcrypto/pgp-pubkey.c
index 0e2cf09e25..7a9247322e 100644
--- a/contrib/pgcrypto/pgp-pubkey.c
+++ b/contrib/pgcrypto/pgp-pubkey.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp-s2k.c b/contrib/pgcrypto/pgp-s2k.c
index 76a5cf891c..211fde04f5 100644
--- a/contrib/pgcrypto/pgp-s2k.c
+++ b/contrib/pgcrypto/pgp-s2k.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp.c b/contrib/pgcrypto/pgp.c
index 23f6552dce..345112bffe 100644
--- a/contrib/pgcrypto/pgp.c
+++ b/contrib/pgcrypto/pgp.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp.h b/contrib/pgcrypto/pgp.h
index 7860d830c4..92122de130 100644
--- a/contrib/pgcrypto/pgp.h
+++ b/contrib/pgcrypto/pgp.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/px-crypt.c b/contrib/pgcrypto/px-crypt.c
index 2243279972..21837e86f4 100644
--- a/contrib/pgcrypto/px-crypt.c
+++ b/contrib/pgcrypto/px-crypt.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/px-crypt.h b/contrib/pgcrypto/px-crypt.h
index c2460cb9f9..32628aa6bb 100644
--- a/contrib/pgcrypto/px-crypt.h
+++ b/contrib/pgcrypto/px-crypt.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/px-hmac.c b/contrib/pgcrypto/px-hmac.c
index 61e5b0ea3d..a1a57fcd2c 100644
--- a/contrib/pgcrypto/px-hmac.c
+++ b/contrib/pgcrypto/px-hmac.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c
index 59ec32c505..3994fdb145 100644
--- a/contrib/pgcrypto/px.c
+++ b/contrib/pgcrypto/px.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h
index 4137898874..d4e2456d1b 100644
--- a/contrib/pgcrypto/px.h
+++ b/contrib/pgcrypto/px.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/random.c b/contrib/pgcrypto/random.c
index b22e029d2c..e5a2c64693 100644
--- a/contrib/pgcrypto/random.c
+++ b/contrib/pgcrypto/random.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/rijndael.c b/contrib/pgcrypto/rijndael.c
index cf9eca91dc..eac641dffb 100644
--- a/contrib/pgcrypto/rijndael.c
+++ b/contrib/pgcrypto/rijndael.c
@@ -7,12 +7,12 @@
/* RIJNDAEL by Joan Daemen and Vincent Rijmen */
/* */
/* which is a candidate algorithm in the Advanced Encryption Standard */
-/* programme of the US National Institute of Standards and Technology. */
+/* programme of the US National Institute of Standards and Technology. */
/* */
/* Copyright in this implementation is held by Dr B R Gladman but I */
/* hereby give permission for its free direct or derivative use subject */
/* to acknowledgment of its origin and compliance with any conditions */
-/* that the originators of the algorithm place on its exploitation. */
+/* that the originators of the algorithm place on its exploitation. */
/* */
/* Dr Brian Gladman (gladman@seven77.demon.co.uk) 14th January 1999 */
@@ -188,7 +188,7 @@ gen_tabs(void)
/* rijndael specification is in big endian format with */
/* bit 0 as the most significant bit. In the remainder */
/* of the specification the bits are numbered from the */
- /* least significant end of a byte. */
+ /* least significant end of a byte. */
for (i = 0; i < 256; ++i)
{
diff --git a/contrib/pgcrypto/rijndael.h b/contrib/pgcrypto/rijndael.h
index e4c4229170..636332a309 100644
--- a/contrib/pgcrypto/rijndael.h
+++ b/contrib/pgcrypto/rijndael.h
@@ -8,12 +8,12 @@
/* RIJNDAEL by Joan Daemen and Vincent Rijmen */
/* */
/* which is a candidate algorithm in the Advanced Encryption Standard */
-/* programme of the US National Institute of Standards and Technology. */
+/* programme of the US National Institute of Standards and Technology. */
/* */
/* Copyright in this implementation is held by Dr B R Gladman but I */
/* hereby give permission for its free direct or derivative use subject */
/* to acknowledgment of its origin and compliance with any conditions */
-/* that the originators of the algorithm place on its exploitation. */
+/* that the originators of the algorithm place on its exploitation. */
/* */
/* Dr Brian Gladman (gladman@seven77.demon.co.uk) 14th January 1999 */
diff --git a/contrib/pgcrypto/sha1.c b/contrib/pgcrypto/sha1.c
index 45339a1235..612fd254d5 100644
--- a/contrib/pgcrypto/sha1.c
+++ b/contrib/pgcrypto/sha1.c
@@ -19,7 +19,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/sha1.h b/contrib/pgcrypto/sha1.h
index ae830fccad..4377ef8e0c 100644
--- a/contrib/pgcrypto/sha1.h
+++ b/contrib/pgcrypto/sha1.h
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/sha2.c b/contrib/pgcrypto/sha2.c
index 1938c43ee5..a30bb97111 100644
--- a/contrib/pgcrypto/sha2.c
+++ b/contrib/pgcrypto/sha2.c
@@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/sha2.h b/contrib/pgcrypto/sha2.h
index 8d593c60e5..4922743201 100644
--- a/contrib/pgcrypto/sha2.h
+++ b/contrib/pgcrypto/sha2.h
@@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c
index 02b8e08367..0518f87e4d 100644
--- a/contrib/pgstattuple/pgstattuple.c
+++ b/contrib/pgstattuple/pgstattuple.c
@@ -296,7 +296,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
/*
* To avoid physically reading the table twice, try to do the
- * free-space scan in parallel with the heap scan. However,
+ * free-space scan in parallel with the heap scan. However,
* heap_getnext may find no tuples on a given page, so we cannot
* simply examine the pages returned by the heap scan.
*/
diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c
index 18215969fa..e2c7b2cdd8 100644
--- a/contrib/seg/seg.c
+++ b/contrib/seg/seg.c
@@ -381,7 +381,7 @@ gseg_picksplit(GistEntryVector *entryvec,
rt_seg_size(datum_r, &size_r);
/*
- * Now split up the regions between the two seeds. An important property
+ * Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
@@ -397,7 +397,7 @@ gseg_picksplit(GistEntryVector *entryvec,
{
/*
* If we've already decided where to place this item, just put it on
- * the right list. Otherwise, we need to figure out which page needs
+ * the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/
diff --git a/contrib/spi/moddatetime.c b/contrib/spi/moddatetime.c
index 0b4d3ba351..aba2a5571b 100644
--- a/contrib/spi/moddatetime.c
+++ b/contrib/spi/moddatetime.c
@@ -89,7 +89,7 @@ moddatetime(PG_FUNCTION_ARGS)
/*
* This is were we check to see if the field we are supposed to update
- * even exits. The above function must return -1 if name not found?
+ * even exits. The above function must return -1 if name not found?
*/
if (attnum < 0)
ereport(ERROR,
diff --git a/contrib/spi/timetravel.c b/contrib/spi/timetravel.c
index 577767d81f..03291fae9d 100644
--- a/contrib/spi/timetravel.c
+++ b/contrib/spi/timetravel.c
@@ -47,17 +47,17 @@ static EPlan *find_plan(char *ident, EPlan **eplan, int *nplans);
/*
* timetravel () --
- * 1. IF an update affects tuple with stop_date eq INFINITY
+ * 1. IF an update affects tuple with stop_date eq INFINITY
* then form (and return) new tuple with start_date eq current date
* and stop_date eq INFINITY [ and update_user eq current user ]
* and all other column values as in new tuple, and insert tuple
* with old data and stop_date eq current date
* ELSE - skip updation of tuple.
- * 2. IF an delete affects tuple with stop_date eq INFINITY
+ * 2. IF an delete affects tuple with stop_date eq INFINITY
* then insert the same tuple with stop_date eq current date
* [ and delete_user eq current user ]
* ELSE - skip deletion of tuple.
- * 3. On INSERT, if start_date is NULL then current date will be
+ * 3. On INSERT, if start_date is NULL then current date will be
* inserted, if stop_date is NULL then INFINITY will be inserted.
* [ and insert_user eq current user, update_user and delete_user
* eq NULL ]
diff --git a/contrib/sslinfo/sslinfo.c b/contrib/sslinfo/sslinfo.c
index a8145653c0..1ae2069533 100644
--- a/contrib/sslinfo/sslinfo.c
+++ b/contrib/sslinfo/sslinfo.c
@@ -104,7 +104,7 @@ ssl_client_serial(PG_FUNCTION_ARGS)
* current database encoding if possible. Any invalid characters are
* replaced by question marks.
*
- * Parameter: str - OpenSSL ASN1_STRING structure. Memory managment
+ * Parameter: str - OpenSSL ASN1_STRING structure. Memory managment
* of this structure is responsibility of caller.
*
* Returns Datum, which can be directly returned from a C language SQL
diff --git a/contrib/vacuumlo/vacuumlo.c b/contrib/vacuumlo/vacuumlo.c
index b26b42cf7a..aa1abaf2be 100644
--- a/contrib/vacuumlo/vacuumlo.c
+++ b/contrib/vacuumlo/vacuumlo.c
@@ -241,7 +241,7 @@ vacuumlo(char *database, struct _param * param)
PQclear(res);
/*
- * Run the actual deletes in a single transaction. Note that this would
+ * Run the actual deletes in a single transaction. Note that this would
* be a bad idea in pre-7.1 Postgres releases (since rolling back a table
* delete used to cause problems), but it should be safe now.
*/
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 38f770f92b..6a00326d4a 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -21,7 +21,7 @@
* tuptoaster.c.
*
* This change will break any code that assumes it needn't detoast values
- * that have been put into a tuple but never sent to disk. Hopefully there
+ * that have been put into a tuple but never sent to disk. Hopefully there
* are few such places.
*
* Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
@@ -427,7 +427,7 @@ nocachegetattr(HeapTuple tuple,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (HeapTupleHasVarWidth(tuple))
@@ -494,7 +494,7 @@ nocachegetattr(HeapTuple tuple,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
@@ -590,7 +590,7 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
/*
* cmin and cmax are now both aliases for the same field, which
- * can in fact also be a combo command id. XXX perhaps we should
+ * can in fact also be a combo command id. XXX perhaps we should
* return the "real" cmin or cmax if possible, that is if we are
* inside the originating transaction?
*/
@@ -750,7 +750,7 @@ heap_form_tuple(TupleDesc tupleDescriptor,
len += data_len;
/*
- * Allocate and zero the space needed. Note that the tuple body and
+ * Allocate and zero the space needed. Note that the tuple body and
* HeapTupleData management structure are allocated in one chunk.
*/
tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index e106e64fd3..72c15f9ee3 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -71,7 +71,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
/*
* If value is stored EXTERNAL, must fetch it so we are not depending
- * on outside storage. This should be improved someday.
+ * on outside storage. This should be improved someday.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i])))
{
@@ -314,7 +314,7 @@ nocache_index_getattr(IndexTuple tup,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup))
@@ -381,7 +381,7 @@ nocache_index_getattr(IndexTuple tup,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 13a09dd9b3..7440d31c74 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -166,7 +166,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
* or some similar function; it does not contain a full set of fields.
* The targetlist will be NIL when executing a utility function that does
* not have a plan. If the targetlist isn't NIL then it is a Query node's
- * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
+ * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
* send zeroes for the format codes in that case.
*/
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 5cc66c3434..9685259bbc 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -432,7 +432,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val,
* Add a new string reloption
*
* "validator" is an optional function pointer that can be used to test the
- * validity of the values. It must elog(ERROR) when the argument string is
+ * validity of the values. It must elog(ERROR) when the argument string is
* not acceptable for the variable. Note that the default value must pass
* the validation.
*/
@@ -499,7 +499,7 @@ add_string_reloption(bits32 kinds, char *name, char *desc, char *default_val,
* Note that this is not responsible for determining whether the options
* are valid, but it does check that namespaces for all the options given are
* listed in validnsps. The NULL namespace is always valid and needs not be
- * explicitely listed. Passing a NULL pointer means that only the NULL
+ * explicitely listed. Passing a NULL pointer means that only the NULL
* namespace is valid.
*
* Both oldOptions and the result are text arrays (or NULL for "default"),
@@ -772,7 +772,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions)
* is returned.
*
* Note: values of type int, bool and real are allocated as part of the
- * returned array. Values of type string are allocated separately and must
+ * returned array. Values of type string are allocated separately and must
* be freed by the caller.
*/
relopt_value *
diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c
index f25cd860f3..2806d6a8ff 100644
--- a/src/backend/access/common/tupconvert.c
+++ b/src/backend/access/common/tupconvert.c
@@ -5,7 +5,7 @@
*
* These functions provide conversion between rowtypes that are logically
* equivalent but might have columns in a different order or different sets
- * of dropped columns. There is some overlap of functionality with the
+ * of dropped columns. There is some overlap of functionality with the
* executor's "junkfilter" routines, but these functions work on bare
* HeapTuples rather than TupleTableSlots.
*
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index 01e8589c31..83c22224d9 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -507,7 +507,7 @@ TupleDescInitEntry(TupleDesc desc,
* Given a relation schema (list of ColumnDef nodes), build a TupleDesc.
*
* Note: the default assumption is no OIDs; caller may modify the returned
- * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
+ * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* later on.
*/
TupleDesc
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 71f6cb3741..0edd88e404 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -662,7 +662,7 @@ processPendingPage(BuildAccumulator *accum, DatumArray *da,
*
* This can be called concurrently by multiple backends, so it must cope.
* On first glance it looks completely not concurrent-safe and not crash-safe
- * either. The reason it's okay is that multiple insertion of the same entry
+ * either. The reason it's okay is that multiple insertion of the same entry
* is detected and treated as a no-op by gininsert.c. If we crash after
* posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
@@ -715,7 +715,7 @@ ginInsertCleanup(Relation index, GinState *ginstate,
LockBuffer(metabuffer, GIN_UNLOCK);
/*
- * Initialize. All temporary space will be in opCtx
+ * Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
@@ -809,7 +809,7 @@ ginInsertCleanup(Relation index, GinState *ginstate,
/*
* While we left the page unlocked, more stuff might have gotten
- * added to it. If so, process those entries immediately. There
+ * added to it. If so, process those entries immediately. There
* shouldn't be very many, so we don't worry about the fact that
* we're doing this with exclusive lock. Insertion algorithm
* gurantees that inserted row(s) will not continue on next page.
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 0802d689ef..a468bc0bb5 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -1293,10 +1293,10 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
* case like
*
* stream 1 stream 2
- * ... ...
+ * ... ...
* 42/6 42/7
* 50/1 42/0xffff
- * ... ...
+ * ... ...
*
* We would conclude that 42/6 is not a match and advance stream 1,
* thus never detecting the match to the lossy pointer in stream 2.
@@ -1368,10 +1368,10 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* First, scan the pending list and collect any matching entries into the
- * bitmap. After we scan a pending item, some other backend could post it
+ * bitmap. After we scan a pending item, some other backend could post it
* into the main index, and so we might visit it a second time during the
* main scan. This is okay because we'll just re-set the same bit in the
- * bitmap. (The possibility of duplicate visits is a major reason why GIN
+ * bitmap. (The possibility of duplicate visits is a major reason why GIN
* can't support the amgettuple API, however.) Note that it would not do
* to scan the main index before the pending list, since concurrent
* cleanup could then make us miss entries entirely.
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index f23e96ab2c..0ee5ce9118 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -201,7 +201,7 @@ newScanKey(IndexScanDesc scan)
{
/*
* extractQueryFn signals that nothing can match, so we can just
- * set isVoidRes flag. No need to examine any more keys.
+ * set isVoidRes flag. No need to examine any more keys.
*/
so->isVoidRes = true;
break;
@@ -210,9 +210,9 @@ newScanKey(IndexScanDesc scan)
if (entryValues == NULL || nEntryValues == 0)
{
/*
- * extractQueryFn signals that everything matches. This would
+ * extractQueryFn signals that everything matches. This would
* require a full scan, which we can't do, but perhaps there is
- * another scankey that provides a restriction to use. So we keep
+ * another scankey that provides a restriction to use. So we keep
* going and check only at the end.
*/
continue;
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 5eadb2a286..1c123787b9 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -191,7 +191,7 @@ gistbuildCallback(Relation index,
/*
* Since we already have the index relation locked, we call gistdoinsert
* directly. Normal access method calls dispatch through gistinsert,
- * which locks the relation for write. This is the right thing to do if
+ * which locks the relation for write. This is the right thing to do if
* you're inserting single tups, but not when you're initializing the
* whole index at once.
*
diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c
index d4dae502f6..8c4e14e4d5 100644
--- a/src/backend/access/gist/gistsplit.c
+++ b/src/backend/access/gist/gistsplit.c
@@ -72,7 +72,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
* Recompute unions of left- and right-side subkeys after a page split,
* ignoring any tuples that are marked in spl->spl_dontcare[].
*
- * Note: we always recompute union keys for all index columns. In some cases
+ * Note: we always recompute union keys for all index columns. In some cases
* this might represent duplicate work for the leftmost column(s), but it's
* not safe to assume that "zero penalty to move a tuple" means "the union
* key doesn't change at all". Penalty functions aren't 100% accurate.
@@ -161,7 +161,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
/*
* Remove tuples that are marked don't-cares from the tuple index array a[]
- * of length *len. This is applied separately to the spl_left and spl_right
+ * of length *len. This is applied separately to the spl_left and spl_right
* arrays.
*/
static void
@@ -194,7 +194,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare)
/*
* Place a single don't-care tuple into either the left or right side of the
* split, according to which has least penalty for merging the tuple into
- * the previously-computed union keys. We need consider only columns starting
+ * the previously-computed union keys. We need consider only columns starting
* at attno.
*/
static void
@@ -292,7 +292,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
/*
* There is only one previously defined union, so we just choose swap
- * or not by lowest penalty for that side. We can only get here if a
+ * or not by lowest penalty for that side. We can only get here if a
* secondary split happened to have all NULLs in its column in the
* tuples that the outer recursion level had assigned to one side.
* (Note that the null checks in gistSplitByKey don't prevent the
@@ -426,7 +426,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
sv->spl_rdatum = v->spl_rattr[attno];
/*
- * Let the opclass-specific PickSplit method do its thing. Note that at
+ * Let the opclass-specific PickSplit method do its thing. Note that at
* this point we know there are no null keys in the entryvec.
*/
FunctionCall2(&giststate->picksplitFn[attno],
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index 1f35d4ee75..f224d40d25 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -448,7 +448,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* New best penalty for column. Tentatively select this tuple
- * as the target, and record the best penalty. Then reset the
+ * as the target, and record the best penalty. Then reset the
* next column's penalty to "unknown" (and indirectly, the
* same for all the ones to its right). This will force us to
* adopt this tuple's penalty values as the best for all the
@@ -464,7 +464,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* The current tuple is exactly as good for this column as the
- * best tuple seen so far. The next iteration of this loop
+ * best tuple seen so far. The next iteration of this loop
* will compare the next column.
*/
}
@@ -631,7 +631,7 @@ gistcheckpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index 0433b47ae1..1a45124663 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -528,7 +528,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
stats->std.estimated_count = info->estimated_count;
/*
- * XXX the above is wrong if index is partial. Would it be OK to just
+ * XXX the above is wrong if index is partial. Would it be OK to just
* return NULL, or is there work we must do below?
*/
}
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index ead2821f5c..5772531bf6 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -76,7 +76,7 @@ hashbuild(PG_FUNCTION_ARGS)
* (assuming their hash codes are pretty random) there will be no locality
* of access to the index, and if the index is bigger than available RAM
* then we'll thrash horribly. To prevent that scenario, we can sort the
- * tuples by (expected) bucket number. However, such a sort is useless
+ * tuples by (expected) bucket number. However, such a sort is useless
* overhead when the index does fit in RAM. We choose to sort if the
* initial index size exceeds NBuffers.
*
@@ -510,7 +510,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
/*
* Read the metapage to fetch original bucket and tuple counts. Also, we
* keep a copy of the last-seen metapage so that we can use its
- * hashm_spares[] values to compute bucket page addresses. This is a bit
+ * hashm_spares[] values to compute bucket page addresses. This is a bit
* hokey but perfectly safe, since the interesting entries in the spares
* array cannot change under us; and it beats rereading the metapage for
* each bucket.
@@ -641,7 +641,7 @@ loop_top:
{
/*
* Otherwise, our count is untrustworthy since we may have
- * double-scanned tuples in split buckets. Proceed by dead-reckoning.
+ * double-scanned tuples in split buckets. Proceed by dead-reckoning.
* (Note: we still return estimated_count = false, because using this
* count is better than not updating reltuples at all.)
*/
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 3242e2713e..66628736a1 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -11,7 +11,7 @@
* $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.59 2009/06/11 14:48:53 momjian Exp $
*
* NOTES
- * These functions are stored in pg_amproc. For each operator class
+ * These functions are stored in pg_amproc. For each operator class
* defined for hash indexes, they compute the hash value of the argument.
*
* Additional hash functions appear in /utils/adt/ files for various
@@ -163,7 +163,7 @@ hashtext(PG_FUNCTION_ARGS)
/*
* Note: this is currently identical in behavior to hashvarlena, but keep
* it as a separate function in case we someday want to do something
- * different in non-C locales. (See also hashbpchar, if so.)
+ * different in non-C locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA_ANY(key),
VARSIZE_ANY_EXHDR(key));
@@ -241,7 +241,7 @@ hashvarlena(PG_FUNCTION_ARGS)
*
* This allows some parallelism. Read-after-writes are good at doubling
* the number of bits affected, so the goal of mixing pulls in the opposite
- * direction from the goal of parallelism. I did what I could. Rotates
+ * direction from the goal of parallelism. I did what I could. Rotates
* seem to cost as much as shifts on every machine I could lay my hands on,
* and rotates are much kinder to the top and bottom bits, so I used rotates.
*----------
@@ -275,7 +275,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* substantial performance increase since final() does not need to
* do well in reverse, but is does need to affect all output bits.
* mix(), on the other hand, does not need to affect all output
- * bits (affecting 32 bits is enough). The original hash function had
+ * bits (affecting 32 bits is enough). The original hash function had
* a single mixing operation that had to satisfy both sets of requirements
* and was slower as a result.
*----------
@@ -296,7 +296,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* k : the key (the unaligned variable-length array of bytes)
* len : the length of the key, counting by bytes
*
- * Returns a uint32 value. Every bit of the key affects every bit of
+ * Returns a uint32 value. Every bit of the key affects every bit of
* the return value. Every 1-bit and 2-bit delta achieves avalanche.
* About 6*len+35 instructions. The best hash table sizes are powers
* of 2. There is no need to do mod a prime (mod is sooo slow!).
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 71e31e1cb9..1fd59942b3 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -81,7 +81,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
- * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
+ * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore). The returned overflow page will be pinned and write-locked;
* it is guaranteed to be empty.
@@ -90,12 +90,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
* That buffer is returned in the same state.
*
* The caller must hold at least share lock on the bucket, to ensure that
- * no one else tries to compact the bucket meanwhile. This guarantees that
+ * no one else tries to compact the bucket meanwhile. This guarantees that
* 'buf' won't stop being part of the bucket while it's unlocked.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
- * immediate successor of the originally passed 'buf'. Additional overflow
+ * immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
@@ -158,7 +158,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/*
* _hash_getovflpage()
*
- * Find an available overflow page and return it. The returned buffer
+ * Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
@@ -254,7 +254,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
* We create the new bitmap page with all pages marked "in use".
* Actually two pages in the new bitmap's range will exist
* immediately: the bitmap page itself, and the following page which
- * is the one we return to the caller. Both of these are correctly
+ * is the one we return to the caller. Both of these are correctly
* marked "in use". Subsequent pages do not exist yet, but it is
* convenient to pre-mark them as "in use" too.
*/
@@ -285,7 +285,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
metap->hashm_spares[splitnum]++;
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@@ -314,7 +314,7 @@ found:
blkno = bitno_to_blkno(metap, bit);
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@@ -495,7 +495,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
/*
* _hash_initbitmap()
*
- * Initialize a new bitmap page. The metapage has a write-lock upon
+ * Initialize a new bitmap page. The metapage has a write-lock upon
* entering the function, and must be written by caller after return.
*
* 'blkno' is the block number of the new bitmap page.
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 600b0128fb..ea68a3abd3 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -52,7 +52,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf,
* of the locking rules). However, we can skip taking lmgr locks when the
* index is local to the current backend (ie, either temp or new in the
* current transaction). No one else can see it, so there's no reason to
- * take locks. We still take buffer-level locks, but not lmgr locks.
+ * take locks. We still take buffer-level locks, but not lmgr locks.
*/
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
@@ -139,7 +139,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
- * _hash_pageinit() is applied automatically. Otherwise it has
+ * _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
@@ -347,7 +347,7 @@ _hash_metapinit(Relation rel, double num_tuples)
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
- * as the user-settable fillfactor parameter says. We can compute it
+ * as the user-settable fillfactor parameter says. We can compute it
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
*/
data_width = sizeof(uint32);
@@ -380,7 +380,7 @@ _hash_metapinit(Relation rel, double num_tuples)
/*
* We initialize the metapage, the first N bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
- * calls to occur. This ensures that the smgr level has the right idea of
+ * calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE);
@@ -516,9 +516,9 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Note: deadlock should be impossible here. Our own backend could only be
* holding bucket sharelocks due to stopped indexscans; those will not
* block other holders of the page-zero lock, who are only interested in
- * acquiring bucket sharelocks themselves. Exclusive bucket locks are
+ * acquiring bucket sharelocks themselves. Exclusive bucket locks are
* only taken here and in hashbulkdelete, and neither of these operations
- * needs any additional locks to complete. (If, due to some flaw in this
+ * needs any additional locks to complete. (If, due to some flaw in this
* reasoning, we manage to deadlock anyway, it's okay to error out; the
* index will be left in a consistent state.)
*/
@@ -560,7 +560,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Determine which bucket is to be split, and attempt to lock the old
- * bucket. If we can't get the lock, give up.
+ * bucket. If we can't get the lock, give up.
*
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
@@ -618,7 +618,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
}
/*
- * Okay to proceed with split. Update the metapage bucket mapping info.
+ * Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
@@ -656,7 +656,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
* split lock, other splits could begin, so these values might be out of
- * date before _hash_splitbucket finishes. That's okay, since all it
+ * date before _hash_splitbucket finishes. That's okay, since all it
* needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
@@ -897,7 +897,7 @@ _hash_splitbucket(Relation rel,
/*
* We're at the end of the old bucket chain, so we're done partitioning
- * the tuples. Before quitting, call _hash_squeezebucket to ensure the
+ * the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index 153ac6926c..db46942df4 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -251,7 +251,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
* _hash_step() -- step to the next valid item in a scan in the bucket.
*
* If no valid record exists in the requested direction, return
- * false. Else, return true and set the hashso_curpos for the
+ * false. Else, return true and set the hashso_curpos for the
* scan to the right thing.
*
* 'bufP' points to the current buffer, which is pinned and read-locked.
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 00e7dc5f5d..dc8ab59d96 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -8,7 +8,7 @@
* thrashing. We use tuplesort.c to sort the given index tuples into order.
*
* Note: if the number of rows in the table has been underestimated,
- * bucket splits may occur during the index build. In that case we'd
+ * bucket splits may occur during the index build. In that case we'd
* be inserting into two or more buckets for each possible masked-off
* hash code value. That's no big problem though, since we'll still have
* plenty of locality of access.
@@ -52,7 +52,7 @@ _h_spoolinit(Relation index, uint32 num_buckets)
hspool->index = index;
/*
- * Determine the bitmask for hash code values. Since there are currently
+ * Determine the bitmask for hash code values. Since there are currently
* num_buckets buckets in the index, the appropriate mask can be computed
* as follows.
*
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index ba1d6fbbf5..40fd398167 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -160,7 +160,7 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@@ -280,7 +280,7 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull)
*
* Returns the offset of the first index entry having hashkey >= hash_value,
* or the page's max offset plus one if hash_value is greater than all
- * existing hash keys in the page. This is the appropriate place to start
+ * existing hash keys in the page. This is the appropriate place to start
* a search, or to insert a new item.
*/
OffsetNumber
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 89cf249447..9dcbe15658 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -106,7 +106,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
* while the scan is in progress will be invisible to my snapshot anyway.
* (That is not true when using a non-MVCC snapshot. However, we couldn't
* guarantee to return tuples added after scan start anyway, since they
- * might go into pages we already scanned. To guarantee consistent
+ * might go into pages we already scanned. To guarantee consistent
* results for a non-MVCC snapshot, the caller must hold some higher-level
* lock that ensures the interesting tuple(s) won't change.)
*/
@@ -114,7 +114,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
/*
* If the table is large relative to NBuffers, use a bulk-read access
- * strategy and enable synchronized scanning (see syncscan.c). Although
+ * strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
* (However, some callers need to be able to disable one or both of these
@@ -243,7 +243,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
/*
* We must hold share lock on the buffer content while examining tuple
- * visibility. Afterwards, however, the tuples we have found to be
+ * visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@@ -1640,7 +1640,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
* possibly uncommitted version.
*
* *tid is both an input and an output parameter: it is updated to
- * show the latest version of the row. Note that it will not be changed
+ * show the latest version of the row. Note that it will not be changed
* if no version of the row passes the snapshot test.
*/
void
@@ -1758,7 +1758,7 @@ heap_get_latest_tid(Relation relation,
*
* This is called after we have waited for the XMAX transaction to terminate.
* If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
- * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
+ * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* hint bit if possible --- but beware that that may not yet be possible,
* if the transaction committed asynchronously. Hence callers should look
* only at XMAX_INVALID.
@@ -1831,7 +1831,7 @@ FreeBulkInsertState(BulkInsertState bistate)
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
- * TID where the tuple was stored. But note that any toasting of fields
+ * TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
@@ -1852,7 +1852,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* If the object id of this tuple has already been assigned, trust the
- * caller. There are a couple of ways this can happen. At initial db
+ * caller. There are a couple of ways this can happen. At initial db
* creation, the backend program sets oids for tuples. When we define
* an index, we set the oid. Finally, in the future, we may allow
* users to set their own object ids in order to support a persistent
@@ -2138,10 +2138,10 @@ l1:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
- * other subxacts of this backend. It is legal for us to delete
+ * other subxacts of this backend. It is legal for us to delete
* the tuple in either case, however (the latter case is
* essentially a situation of upgrading our former shared lock to
- * exclusive). We don't bother changing the on-disk hint bits
+ * exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@@ -2205,7 +2205,7 @@ l1:
/*
* If this transaction commits, the tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*/
@@ -2306,7 +2306,7 @@ l1:
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@@ -2402,7 +2402,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* Fetch the list of attributes to be checked for HOT update. This is
* wasted effort if we fail to update or have to put the new tuple on a
- * different page. But we must compute the list before obtaining buffer
+ * different page. But we must compute the list before obtaining buffer
* lock --- in the worst case, if we are doing an update on one of the
* relevant system catalogs, we could deadlock if we try to fetch the list
* later. In any case, the relcache caches the data so this is usually
@@ -2490,10 +2490,10 @@ l2:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
- * other subxacts of this backend. It is legal for us to update
+ * other subxacts of this backend. It is legal for us to update
* the tuple in either case, however (the latter case is
* essentially a situation of upgrading our former shared lock to
- * exclusive). We don't bother changing the on-disk hint bits
+ * exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@@ -2583,7 +2583,7 @@ l2:
* If the toaster needs to be activated, OR if the new tuple will not fit
* on the same page as the old, then we need to release the content lock
* (but not the pin!) on the old tuple's buffer while we are off doing
- * TOAST and/or table-file-extension work. We must mark the old tuple to
+ * TOAST and/or table-file-extension work. We must mark the old tuple to
* show that it's already being updated, else other processes may try to
* update it themselves.
*
@@ -2648,7 +2648,7 @@ l2:
* there's more free now than before.
*
* What's more, if we need to get a new page, we will need to acquire
- * buffer locks on both old and new pages. To avoid deadlock against
+ * buffer locks on both old and new pages. To avoid deadlock against
* some other backend trying to get the same two locks in the other
* order, we must be consistent about the order we get the locks in.
* We use the rule "lock the lower-numbered page of the relation
@@ -2696,7 +2696,7 @@ l2:
/*
* At this point newbuf and buffer are both pinned and locked, and newbuf
- * has enough space for the new tuple. If they are the same buffer, only
+ * has enough space for the new tuple. If they are the same buffer, only
* one pin is held.
*/
@@ -2704,7 +2704,7 @@ l2:
{
/*
* Since the new tuple is going into the same page, we might be able
- * to do a HOT update. Check if any of the index columns have been
+ * to do a HOT update. Check if any of the index columns have been
* changed. If not, then HOT update is possible.
*/
if (HeapSatisfiesHOTUpdate(relation, hot_attrs, &oldtup, heaptup))
@@ -2722,13 +2722,13 @@ l2:
/*
* If this transaction commits, the old tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*
* XXX Should we set hint on newbuf as well? If the transaction aborts,
* there would be a prunable tuple in the newbuf; but for now we choose
- * not to optimize for aborts. Note that heap_xlog_update must be kept in
+ * not to optimize for aborts. Note that heap_xlog_update must be kept in
* sync if this decision changes.
*/
PageSetPrunable(page, xid);
@@ -2892,7 +2892,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* Extract the corresponding values. XXX this is pretty inefficient if
- * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
+ * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
* single heap_deform_tuple call on each tuple, instead? But that doesn't
* work for system columns ...
*/
@@ -2915,7 +2915,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* We do simple binary comparison of the two datums. This may be overly
* strict because there can be multiple binary representations for the
- * same logical value. But we should be OK as long as there are no false
+ * same logical value. But we should be OK as long as there are no false
* positives. Using a type-specific equality operator is messy because
* there could be multiple notions of equality in different operator
* classes; furthermore, we cannot safely invoke user-defined functions
@@ -2971,7 +2971,7 @@ HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@@ -3053,7 +3053,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
* waiter gets the tuple, potentially leading to indefinite starvation of
* some waiters. The possibility of share-locking makes the problem much
* worse --- a steady stream of share-lockers can easily block an exclusive
- * locker forever. To provide more reliable semantics about who gets a
+ * locker forever. To provide more reliable semantics about who gets a
* tuple-level lock first, we use the standard lock manager. The protocol
* for waiting for a tuple-level lock is really
* LockTuple()
@@ -3061,7 +3061,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
* mark tuple as locked by me
* UnlockTuple()
* When there are multiple waiters, arbitration of who is to get the lock next
- * is provided by LockTuple(). However, at most one tuple-level lock will
+ * is provided by LockTuple(). However, at most one tuple-level lock will
* be held or awaited per backend at any time, so we don't risk overflow
* of the lock table. Note that incoming share-lockers are required to
* do LockTuple as well, if there is any conflict, to ensure that they don't
@@ -3203,7 +3203,7 @@ l3:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
- * other subxacts of this backend. It is legal for us to lock the
+ * other subxacts of this backend. It is legal for us to lock the
* tuple in either case, however. We don't bother changing the
* on-disk hint bits since we are about to overwrite the xmax
* altogether.
@@ -3361,7 +3361,7 @@ l3:
/*
* Can get here iff HeapTupleSatisfiesUpdate saw the old xmax
* as running, but it finished before
- * TransactionIdIsInProgress() got to run. Treat it like
+ * TransactionIdIsInProgress() got to run. Treat it like
* there's no locker in the tuple.
*/
}
@@ -3397,8 +3397,8 @@ l3:
MarkBufferDirty(*buffer);
/*
- * XLOG stuff. You might think that we don't need an XLOG record because
- * there is no state change worth restoring after a crash. You would be
+ * XLOG stuff. You might think that we don't need an XLOG record because
+ * there is no state change worth restoring after a crash. You would be
* wrong however: we have just written either a TransactionId or a
* MultiXactId that may never have been seen on disk before, and we need
* to make sure that there are XLOG entries covering those ID numbers.
@@ -3460,7 +3460,7 @@ l3:
* heap_inplace_update - update a tuple "in place" (ie, overwrite it)
*
* Overwriting violates both MVCC and transactional safety, so the uses
- * of this function in Postgres are extremely limited. Nonetheless we
+ * of this function in Postgres are extremely limited. Nonetheless we
* find some places to use it.
*
* The tuple cannot change size, and therefore it's reasonable to assume
@@ -3614,7 +3614,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* When we release shared lock, it's possible for someone else to change
* xmax before we get the lock back, so repeat the check after acquiring
- * exclusive lock. (We don't need this pushup for xmin, because only
+ * exclusive lock. (We don't need this pushup for xmin, because only
* VACUUM could be interested in changing an existing tuple's xmin, and
* there's only one VACUUM allowed on a table at a time.)
*/
@@ -3755,7 +3755,7 @@ heap_restrpos(HeapScanDesc scan)
else
{
/*
- * If we reached end of scan, rs_inited will now be false. We must
+ * If we reached end of scan, rs_inited will now be false. We must
* reset it to true to keep heapgettup from doing the wrong thing.
*/
scan->rs_inited = true;
@@ -3866,7 +3866,7 @@ log_heap_clean(Relation reln, Buffer buffer,
}
/*
- * Perform XLogInsert for a heap-freeze operation. Caller must already
+ * Perform XLogInsert for a heap-freeze operation. Caller must already
* have modified the buffer and marked it dirty.
*/
XLogRecPtr
@@ -3909,7 +3909,7 @@ log_heap_freeze(Relation reln, Buffer buffer,
}
/*
- * Perform XLogInsert for a heap-update operation. Caller must already
+ * Perform XLogInsert for a heap-update operation. Caller must already
* have modified the buffer(s) and marked them dirty.
*/
static XLogRecPtr
@@ -4032,7 +4032,7 @@ log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
* for writing the page to disk after calling this routine.
*
* Note: all current callers build pages in private memory and write them
- * directly to smgr, rather than using bufmgr. Therefore there is no need
+ * directly to smgr, rather than using bufmgr. Therefore there is no need
* to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
* the critical section.
*
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 7ed8612357..d3771a5862 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -116,7 +116,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
* same buffer we select for insertion of the new tuple (this could only
* happen if space is freed in that page after heap_update finds there's not
- * enough there). In that case, the page will be pinned and locked only once.
+ * enough there). In that case, the page will be pinned and locked only once.
*
* We normally use FSM to help us find free space. However,
* if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
@@ -133,7 +133,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* for additional constraints needed for safe usage of this behavior.)
*
* The caller can also provide a BulkInsertState object to optimize many
- * insertions into the same relation. This keeps a pin on the current
+ * insertions into the same relation. This keeps a pin on the current
* insertion target page (to save pin/unpin cycles) and also passes a
* BULKWRITE buffer selection strategy object to the buffer manager.
* Passing NULL for bistate selects the default behavior.
@@ -186,7 +186,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We first try to put the tuple on the same page we last inserted a tuple
- * on, as cached in the BulkInsertState or relcache entry. If that
+ * on, as cached in the BulkInsertState or relcache entry. If that
* doesn't work, we ask the Free Space Map to locate a suitable page.
* Since the FSM's info might be out of date, we have to be prepared to
* loop around and retry multiple times. (To insure this isn't an infinite
@@ -218,7 +218,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* If the FSM knows nothing of the rel, try the last page before we
- * give up and extend. This avoids one-tuple-per-page syndrome during
+ * give up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
@@ -279,7 +279,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Not enough space, so we must give up our page locks and pin (if
- * any) and prepare to look elsewhere. We don't care which order we
+ * any) and prepare to look elsewhere. We don't care which order we
* unlock the two buffers in, so this can be slightly simpler than the
* code above.
*/
@@ -321,7 +321,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* XXX This does an lseek - rather expensive - but at the moment it is the
- * only way to accurately determine how many blocks are in a relation. Is
+ * only way to accurately determine how many blocks are in a relation. Is
* it worth keeping an accurate file length in shared memory someplace,
* rather than relying on the kernel to do it for us?
*/
@@ -341,7 +341,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Release the file-extension lock; it's now OK for someone else to extend
- * the relation some more. Note that we cannot release this lock before
+ * the relation some more. Note that we cannot release this lock before
* we have buffer lock on the new page, or we risk a race condition
* against vacuumlazy.c --- see comments therein.
*/
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 71ea689d0e..6dc67e3894 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -92,7 +92,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
* Checking free space here is questionable since we aren't holding any
* lock on the buffer; in the worst case we could get a bogus answer. It's
* unlikely to be *seriously* wrong, though, since reading either pd_lower
- * or pd_upper is probably atomic. Avoiding taking a lock seems more
+ * or pd_upper is probably atomic. Avoiding taking a lock seems more
* important than sometimes getting a wrong answer in what is after all
* just a heuristic estimate.
*/
@@ -134,7 +134,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
*
* If redirect_move is set, we remove redirecting line pointers by
* updating the root line pointer to point directly to the first non-dead
- * tuple in the chain. NOTE: eliminating the redirect changes the first
+ * tuple in the chain. NOTE: eliminating the redirect changes the first
* tuple's effective CTID, and is therefore unsafe except within VACUUM FULL.
* The only reason we support this capability at all is that by using it,
* VACUUM FULL need not cope with LP_REDIRECT items at all; which seems a
@@ -333,8 +333,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
* OldestXmin is the cutoff XID used to identify dead tuples.
*
* We don't actually change the page here, except perhaps for hint-bit updates
- * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
- * prstate showing the changes to be made. Items to be redirected are added
+ * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
+ * prstate showing the changes to be made. Items to be redirected are added
* to the redirected[] array (two entries per redirection); items to be set to
* LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
* state are added to nowunused[].
@@ -381,7 +381,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
* We need this primarily to handle aborted HOT updates, that is,
* XMIN_INVALID heap-only tuples. Those might not be linked to by
* any chain, since the parent tuple might be re-updated before
- * any pruning occurs. So we have to be able to reap them
+ * any pruning occurs. So we have to be able to reap them
* separately from chain-pruning. (Note that
* HeapTupleHeaderIsHotUpdated will never return true for an
* XMIN_INVALID tuple, so this code will work even when there were
@@ -562,7 +562,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/*
* If the root entry had been a normal tuple, we are deleting it, so
- * count it in the result. But changing a redirect (even to DEAD
+ * count it in the result. But changing a redirect (even to DEAD
* state) doesn't count.
*/
if (ItemIdIsNormal(rootlp))
@@ -692,7 +692,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
* buffer, and is inside a critical section.
*
* This is split out because it is also used by heap_xlog_clean()
- * to replay the WAL record when needed after a crash. Note that the
+ * to replay the WAL record when needed after a crash. Note that the
* arguments are identical to those of log_heap_clean().
*/
void
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index 4a8fe8d36e..35cae4f664 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -10,7 +10,7 @@
*
* The caller is responsible for creating the new heap, all catalog
* changes, supplying the tuples to be written to the new heap, and
- * rebuilding indexes. The caller must hold AccessExclusiveLock on the
+ * rebuilding indexes. The caller must hold AccessExclusiveLock on the
* target table, because we assume no one else is writing into it.
*
* To use the facility:
@@ -43,7 +43,7 @@
* to substitute the correct ctid instead.
*
* For each ctid reference from A -> B, we might encounter either A first
- * or B first. (Note that a tuple in the middle of a chain is both A and B
+ * or B first. (Note that a tuple in the middle of a chain is both A and B
* of different pairs.)
*
* If we encounter A first, we'll store the tuple in the unresolved_tups
@@ -58,11 +58,11 @@
* and can write A immediately with the correct ctid.
*
* Entries in the hash tables can be removed as soon as the later tuple
- * is encountered. That helps to keep the memory usage down. At the end,
+ * is encountered. That helps to keep the memory usage down. At the end,
* both tables are usually empty; we should have encountered both A and B
* of each pair. However, it's possible for A to be RECENTLY_DEAD and B
* entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
- * for deadness using OldestXmin is not exact. In such a case we might
+ * for deadness using OldestXmin is not exact. In such a case we might
* encounter B first, and skip it, and find A later. Then A would be added
* to unresolved_tups, and stay there until end of the rewrite. Since
* this case is very unusual, we don't worry about the memory usage.
@@ -78,7 +78,7 @@
* of CLUSTERing on an unchanging key column, we'll see all the versions
* of a given tuple together anyway, and so the peak memory usage is only
* proportional to the number of RECENTLY_DEAD versions of a single row, not
- * in the whole table. Note that if we do fail halfway through a CLUSTER,
+ * in the whole table. Note that if we do fail halfway through a CLUSTER,
* the old table is still valid, so failure is not catastrophic.
*
* We can't use the normal heap_insert function to insert into the new
@@ -339,7 +339,7 @@ rewrite_heap_tuple(RewriteState state,
* very-old xmin or xmax, so that future VACUUM effort can be saved.
*
* Note we abuse heap_freeze_tuple() a bit here, since it's expecting to
- * be given a pointer to a tuple in a disk buffer. It happens though that
+ * be given a pointer to a tuple in a disk buffer. It happens though that
* we can get the right things to happen by passing InvalidBuffer for the
* buffer.
*/
@@ -543,7 +543,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
}
/*
- * Insert a tuple to the new relation. This has to track heap_insert
+ * Insert a tuple to the new relation. This has to track heap_insert
* and its subsidiary functions!
*
* t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
diff --git a/src/backend/access/heap/syncscan.c b/src/backend/access/heap/syncscan.c
index 6d7c42bcea..d58c6180a8 100644
--- a/src/backend/access/heap/syncscan.c
+++ b/src/backend/access/heap/syncscan.c
@@ -4,7 +4,7 @@
* heap scan synchronization support
*
* When multiple backends run a sequential scan on the same table, we try
- * to keep them synchronized to reduce the overall I/O needed. The goal is
+ * to keep them synchronized to reduce the overall I/O needed. The goal is
* to read each page into shared buffer cache only once, and let all backends
* that take part in the shared scan process the page before it falls out of
* the cache.
@@ -26,7 +26,7 @@
* don't want such queries to slow down others.
*
* There can realistically only be a few large sequential scans on different
- * tables in progress at any time. Therefore we just keep the scan positions
+ * tables in progress at any time. Therefore we just keep the scan positions
* in a small LRU list which we scan every time we need to look up or update a
* scan position. The whole mechanism is only applied for tables exceeding
* a threshold size (but that is not the concern of this module).
@@ -245,7 +245,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
* relation, or 0 if no valid location is found.
*
* We expect the caller has just done RelationGetNumberOfBlocks(), and
- * so that number is passed in rather than computing it again. The result
+ * so that number is passed in rather than computing it again. The result
* is guaranteed less than relnblocks (assuming that's > 0).
*/
BlockNumber
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 8bf6ccb7d8..e0d8f2802e 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -545,7 +545,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Fetch it back (without decompression, unless we are forcing
- * PLAIN storage). If necessary, we'll push it out as a new
+ * PLAIN storage). If necessary, we'll push it out as a new
* external value below.
*/
if (VARATT_IS_EXTERNAL(new_value))
@@ -686,7 +686,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
/*
* Second we look for attributes of attstorage 'x' or 'e' that are still
- * inline. But skip this if there's no toast table to push them to.
+ * inline. But skip this if there's no toast table to push them to.
*/
while (heap_compute_data_size(tupleDesc,
toast_values, toast_isnull) > maxDataLen &&
@@ -1310,7 +1310,7 @@ toast_save_datum(Relation rel, Datum value, int options)
heap_insert(toastrel, toasttup, mycid, options, NULL);
/*
- * Create the index entry. We cheat a little here by not using
+ * Create the index entry. We cheat a little here by not using
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table.
*
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 5662bb0108..692ff4c229 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -41,7 +41,7 @@
*
* At the end of a scan, the AM's endscan routine undoes the locking,
* but does *not* call IndexScanEnd --- the higher-level index_endscan
- * routine does that. (We can't do it in the AM because index_endscan
+ * routine does that. (We can't do it in the AM because index_endscan
* still needs to touch the IndexScanDesc after calling the AM.)
*
* Because of this, the AM does not have a choice whether to call
@@ -336,7 +336,7 @@ systable_endscan(SysScanDesc sysscan)
* index order. Also, for largely historical reasons, the index to use
* is opened and locked by the caller, not here.
*
- * Currently we do not support non-index-based scans here. (In principle
+ * Currently we do not support non-index-based scans here. (In principle
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 32623965c7..813ebe9d78 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -126,7 +126,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation,
* index_open - open an index relation by relation OID
*
* If lockmode is not "NoLock", the specified kind of lock is
- * obtained on the index. (Generally, NoLock should only be
+ * obtained on the index. (Generally, NoLock should only be
* used if the caller knows it has some appropriate lock on the
* index already.)
*
@@ -287,7 +287,7 @@ index_beginscan_internal(Relation indexRelation,
* index_rescan - (re)start a scan of an index
*
* The caller may specify a new set of scankeys (but the number of keys
- * cannot change). To restart the scan without changing keys, pass NULL
+ * cannot change). To restart the scan without changing keys, pass NULL
* for the key array.
*
* Note that this is also called when first starting an indexscan;
@@ -375,7 +375,7 @@ index_markpos(IndexScanDesc scan)
* returnable tuple in each HOT chain, and so restoring the prior state at the
* granularity of the index AM is sufficient. Since the only current user
* of mark/restore functionality is nodeMergejoin.c, this effectively means
- * that merge-join plans only work for MVCC snapshots. This could be fixed
+ * that merge-join plans only work for MVCC snapshots. This could be fixed
* if necessary, but for now it seems unimportant.
* ----------------
*/
@@ -400,7 +400,7 @@ index_restrpos(IndexScanDesc scan)
* index_getnext - get the next heap tuple from a scan
*
* The result is the next heap tuple satisfying the scan keys and the
- * snapshot, or NULL if no more matching tuples exist. On success,
+ * snapshot, or NULL if no more matching tuples exist. On success,
* the buffer containing the heap tuple is pinned (the pin will be dropped
* at the next index_getnext or index_endscan).
*
@@ -438,7 +438,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
{
/*
* We are resuming scan of a HOT chain after having returned an
- * earlier member. Must still hold pin on current heap page.
+ * earlier member. Must still hold pin on current heap page.
*/
Assert(BufferIsValid(scan->xs_cbuf));
Assert(ItemPointerGetBlockNumber(tid) ==
@@ -556,7 +556,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/*
* The xmin should match the previous xmax value, else chain is
- * broken. (Note: this test is not optional because it protects
+ * broken. (Note: this test is not optional because it protects
* us against the case where the prior chain member's xmax aborted
* since we looked at it.)
*/
@@ -758,7 +758,7 @@ index_vacuum_cleanup(IndexVacuumInfo *info,
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
- * type instead). Only the default functions are stored in relcache
+ * type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
@@ -792,7 +792,7 @@ index_getprocid(Relation irel,
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
- * support procs in the relcache. As above, only the "default"
+ * support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index db70993d96..f544a72fc7 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -25,7 +25,7 @@
* Although any negative int32 (except INT_MIN) is acceptable for reporting
* "<", and any positive int32 is acceptable for reporting ">", routines
* that work on 32-bit or wider datatypes can't just return "a - b".
- * That could overflow and give the wrong answer. Also, one must not
+ * That could overflow and give the wrong answer. Also, one must not
* return INT_MIN to report "<", since some callers will negate the result.
*
* NOTE: it is critical that the comparison function impose a total order
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index e8cdadfebd..fd17476b76 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -112,7 +112,7 @@ top:
* If the page was split between the time that we surrendered our read
* lock and acquired our write lock, then this page may no longer be the
* right place for the key we want to insert. In this case, we need to
- * move right in the tree. See Lehman and Yao for an excruciatingly
+ * move right in the tree. See Lehman and Yao for an excruciatingly
* precise description.
*/
buf = _bt_moveright(rel, buf, natts, itup_scankey, false, BT_WRITE);
@@ -169,7 +169,7 @@ top:
* is the first tuple on the next page.
*
* Returns InvalidTransactionId if there is no conflict, else an xact ID
- * we must wait for to see if it commits a conflicting tuple. If an actual
+ * we must wait for to see if it commits a conflicting tuple. If an actual
* conflict is detected, no return --- just ereport().
*/
static TransactionId
@@ -361,7 +361,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* If the new key is equal to one or more existing keys, we can
* legitimately place it anywhere in the series of equal keys --- in fact,
* if the new key is equal to the page's "high key" we can place it on
- * the next page. If it is equal to the high key, and there's not room
+ * the next page. If it is equal to the high key, and there's not room
* to insert the new tuple on the current page without splitting, then
* we can move right hoping to find more free space and avoid a split.
* (We should not move right indefinitely, however, since that leads to
@@ -373,7 +373,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* removing any LP_DEAD tuples.
*
* On entry, *buf and *offsetptr point to the first legal position
- * where the new tuple could be inserted. The caller should hold an
+ * where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
* InvalidOffsetNumber, in which case the function will search for the
* right location within the page if needed. On exit, they point to the
@@ -438,7 +438,7 @@ _bt_findinsertloc(Relation rel,
* on every insert. We implement "get tired" as a random choice,
* since stopping after scanning a fixed number of pages wouldn't work
* well (we'd never reach the right-hand side of previously split
- * pages). Currently the probability of moving right is set at 0.99,
+ * pages). Currently the probability of moving right is set at 0.99,
* which may seem too high to change the behavior much, but it does an
* excellent job of preventing O(N^2) behavior with many equal keys.
*----------
@@ -539,7 +539,7 @@ _bt_findinsertloc(Relation rel,
* + updates the metapage if a true root or fast root is split.
*
* On entry, we must have the right buffer in which to do the
- * insertion, and the buffer must be pinned and write-locked. On return,
+ * insertion, and the buffer must be pinned and write-locked. On return,
* we will have dropped both the pin and the lock on the buffer.
*
* The locking interactions in this code are critical. You should
@@ -1018,7 +1018,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* page. If you're confused, imagine that page A splits to A B and
* then again, yielding A C B, while vacuum is in progress. Tuples
* originally in A could now be in either B or C, hence vacuum must
- * examine both pages. But if D, our right sibling, has a different
+ * examine both pages. But if D, our right sibling, has a different
* cycleid then it could not contain any tuples that were in A when
* the vacuum started.
*/
@@ -1240,7 +1240,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*
* We return the index of the first existing tuple that should go on the
* righthand page, plus a boolean indicating whether the new tuple goes on
- * the left or right page. The bool is necessary to disambiguate the case
+ * the left or right page. The bool is necessary to disambiguate the case
* where firstright == newitemoff.
*/
static OffsetNumber
@@ -1476,7 +1476,7 @@ _bt_checksplitloc(FindSplitData *state,
*
* On entry, buf and rbuf are the left and right split pages, which we
* still hold write locks on per the L&Y algorithm. We release the
- * write locks once we have write lock on the parent page. (Any sooner,
+ * write locks once we have write lock on the parent page. (Any sooner,
* and it'd be possible for some other process to try to split or delete
* one of these pages, and get confused because it cannot find the downlink.)
*
@@ -1499,7 +1499,7 @@ _bt_insert_parent(Relation rel,
* Here we have to do something Lehman and Yao don't talk about: deal with
* a root split and construction of a new root. If our stack is empty
* then we have just split a node on what had been the root level when we
- * descended the tree. If it was still the root then we perform a
+ * descended the tree. If it was still the root then we perform a
* new-root construction. If it *wasn't* the root anymore, search to find
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
@@ -1649,7 +1649,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/*
* These loops will check every item on the page --- but in an
* order that's attuned to the probability of where it actually
- * is. Scan to the right first, then to the left.
+ * is. Scan to the right first, then to the left.
*/
for (offnum = start;
offnum <= maxoff;
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 9fa29776df..8cfdcf24a0 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -12,7 +12,7 @@
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.113.2.1 2010/08/29 19:33:29 tgl Exp $
*
* NOTES
- * Postgres btree pages look like ordinary relation pages. The opaque
+ * Postgres btree pages look like ordinary relation pages. The opaque
* data at high addresses includes pointers to left and right siblings
* and flag data describing page state. The first page in a btree, page
* zero, is special -- it stores meta-information describing the tree.
@@ -56,7 +56,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
metaopaque->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not essential
+ * Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) page)->pd_lower =
@@ -74,7 +74,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
*
* The access type parameter (BT_READ or BT_WRITE) controls whether
* a new root page will be created or not. If access = BT_READ,
- * and no root page exists, we just return InvalidBuffer. For
+ * and no root page exists, we just return InvalidBuffer. For
* BT_WRITE, we try to create the root page if it doesn't exist.
* NOTE that the returned root page will have only a read lock set
* on it even if access = BT_WRITE!
@@ -191,7 +191,7 @@ _bt_getroot(Relation rel, int access)
/*
* Metadata initialized by someone else. In order to guarantee no
* deadlocks, we have to release the metadata page and start all
- * over again. (Is that really true? But it's hardly worth trying
+ * over again. (Is that really true? But it's hardly worth trying
* to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
@@ -256,7 +256,7 @@ _bt_getroot(Relation rel, int access)
CacheInvalidateRelcache(rel);
/*
- * swap root write lock for read lock. There is no danger of anyone
+ * swap root write lock for read lock. There is no danger of anyone
* else accessing the new root page while it's unlocked, since no one
* else knows where it is yet.
*/
@@ -324,7 +324,7 @@ _bt_getroot(Relation rel, int access)
* By the time we acquire lock on the root page, it might have been split and
* not be the true root anymore. This is okay for the present uses of this
* routine; we only really need to be able to move up at least one tree level
- * from whatever non-root page we were at. If we ever do need to lock the
+ * from whatever non-root page we were at. If we ever do need to lock the
* one true root page, we could loop here, re-reading the metapage on each
* failure. (Note that it wouldn't do to hold the lock on the metapage while
* moving to the root --- that'd deadlock against any concurrent root split.)
@@ -423,7 +423,7 @@ _bt_checkpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@@ -449,7 +449,7 @@ _bt_checkpage(Relation rel, Buffer buf)
/*
* _bt_getbuf() -- Get a buffer by block number for read or write.
*
- * blkno == P_NEW means to get an unallocated index page. The page
+ * blkno == P_NEW means to get an unallocated index page. The page
* will be initialized before returning it.
*
* When this routine returns, the appropriate lock is set on the
@@ -480,7 +480,7 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* First see if the FSM knows of any free pages.
*
* We can't trust the FSM's report unreservedly; we have to check that
- * the page is still free. (For example, an already-free page could
+ * the page is still free. (For example, an already-free page could
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
@@ -647,7 +647,7 @@ _bt_page_recyclable(Page page)
/*
* Delete item(s) from a btree page.
*
- * This must only be used for deleting leaf items. Deleting an item on a
+ * This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
@@ -704,7 +704,7 @@ _bt_delitems(Relation rel, Buffer buf,
/*
* The target-offsets array is not in the buffer, but pretend that it
- * is. When XLogInsert stores the whole buffer, the offsets array
+ * is. When XLogInsert stores the whole buffer, the offsets array
* need not be stored too.
*/
if (nitems > 0)
@@ -869,7 +869,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
BTPageOpaque opaque;
/*
- * We can never delete rightmost pages nor root pages. While at it, check
+ * We can never delete rightmost pages nor root pages. While at it, check
* that page is not already deleted and is empty.
*/
page = BufferGetPage(buf);
@@ -941,7 +941,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
/*
* During WAL recovery, we can't use _bt_search (for one reason,
* it might invoke user-defined comparison functions that expect
- * facilities not available in recovery mode). Instead, just set
+ * facilities not available in recovery mode). Instead, just set
* up a dummy stack pointing to the left end of the parent tree
* level, from which _bt_getstackbuf will walk right to the parent
* page. Painful, but we don't care too much about performance in
@@ -976,7 +976,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
* target page. The sibling that was current a moment ago could have
* split, so we may have to move right. This search could fail if either
* the sibling or the target page was deleted by someone else meanwhile;
- * if so, give up. (Right now, that should never happen, since page
+ * if so, give up. (Right now, that should never happen, since page
* deletion is only done in VACUUM and there shouldn't be multiple VACUUMs
* concurrently on the same table.)
*/
@@ -1005,7 +1005,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
lbuf = InvalidBuffer;
/*
- * Next write-lock the target page itself. It should be okay to take just
+ * Next write-lock the target page itself. It should be okay to take just
* a write lock not a superexclusive lock, since no scans would stop on an
* empty page.
*/
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 1ffc91f0de..12da48b430 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -151,7 +151,7 @@ btbuild(PG_FUNCTION_ARGS)
/*
* If we are reindexing a pre-existing index, it is critical to send out a
* relcache invalidation SI message to ensure all backends re-read the
- * index metapage. We expect that the caller will ensure that happens
+ * index metapage. We expect that the caller will ensure that happens
* (typically as a side effect of updating index stats, but it must happen
* even if the stats don't change!)
*/
@@ -258,11 +258,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (scan->kill_prior_tuple)
{
/*
- * Yes, remember it for later. (We'll deal with all such tuples
+ * Yes, remember it for later. (We'll deal with all such tuples
* at once right before leaving the index page.) The test for
* numKilled overrun is not just paranoia: if the caller reverses
* direction in the indexscan then the same item might get entered
- * multiple times. It's not worth trying to optimize that, so we
+ * multiple times. It's not worth trying to optimize that, so we
* don't detect it, but instead just forget any excess entries.
*/
if (so->killedItems == NULL)
@@ -814,7 +814,7 @@ restart:
LockBufferForCleanup(buf);
/*
- * Check whether we need to recurse back to earlier pages. What we
+ * Check whether we need to recurse back to earlier pages. What we
* are concerned about is a page split that happened since we started
* the vacuum scan. If the split moved some tuples to a lower page
* then we might have missed 'em. If so, set up for tail recursion.
@@ -913,7 +913,7 @@ restart:
* since there can be no other transactions scanning the index. Note
* that we will only recycle the current page and not any parent pages
* that _bt_pagedel might have recursed to; this seems reasonable in
- * the name of simplicity. (Trying to do otherwise would mean we'd
+ * the name of simplicity. (Trying to do otherwise would mean we'd
* have to sort the list of recyclable pages we're building.)
*/
if (ndel && info->vacuum_full)
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 78b4700062..adb13cee23 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -49,7 +49,7 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
*
* NOTE that the returned buffer is read-locked regardless of the access
* parameter. However, access = BT_WRITE will allow an empty root page
- * to be created and returned. When access = BT_READ, an empty index
+ * to be created and returned. When access = BT_READ, an empty index
* will result in *bufP being set to InvalidBuffer.
*/
BTStack
@@ -226,7 +226,7 @@ _bt_moveright(Relation rel,
* (or leaf keys > given scankey when nextkey is true).
*
* This procedure is not responsible for walking right, it just examines
- * the given page. _bt_binsrch() has no lock or refcount side effects
+ * the given page. _bt_binsrch() has no lock or refcount side effects
* on the buffer.
*/
OffsetNumber
@@ -358,7 +358,7 @@ _bt_compare(Relation rel,
/*
* The scan key is set up with the attribute number associated with each
* term in the key. It is important that, if the index is multi-key, the
- * scan contain the first k key attributes, and that they be in order. If
+ * scan contain the first k key attributes, and that they be in order. If
* you think about how multi-key ordering works, you'll understand why
* this is.
*
@@ -397,7 +397,7 @@ _bt_compare(Relation rel,
/*
* The sk_func needs to be passed the index value as left arg and
* the sk_argument as right arg (they might be of different
- * types). Since it is convenient for callers to think of
+ * types). Since it is convenient for callers to think of
* _bt_compare as comparing the scankey to the index item, we have
* to flip the sign of the comparison result. (Unless it's a DESC
* column, in which case we *don't* flip the sign.)
@@ -425,7 +425,7 @@ _bt_compare(Relation rel,
* _bt_first() -- Find the first item in a scan.
*
* We need to be clever about the direction of scan, the search
- * conditions, and the tree ordering. We find the first item (or,
+ * conditions, and the tree ordering. We find the first item (or,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, the page containing
* the current index tuple is pinned but not locked, and data about
@@ -478,7 +478,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* We want to identify the keys that can be used as starting boundaries;
* these are =, >, or >= keys for a forward scan or =, <, <= keys for
* a backwards scan. We can use keys for multiple attributes so long as
- * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
+ * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
* a > or < boundary or find an attribute with no boundary (which can be
* thought of as the same as "> -infinity"), we can't use keys for any
* attributes to its right, because it would break our simplistic notion
@@ -641,7 +641,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* even if the row comparison is of ">" or "<" type, because the
* condition applied to all but the last row member is effectively
* ">=" or "<=", and so the extra keys don't break the positioning
- * scheme. But, by the same token, if we aren't able to use all
+ * scheme. But, by the same token, if we aren't able to use all
* the row members, then the part of the row comparison that we
* did use has to be treated as just a ">=" or "<=" condition, and
* so we'd better adjust strat_total accordingly.
@@ -758,7 +758,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* Find first item >= scankey, then back up one to arrive at last
- * item < scankey. (Note: this positioning strategy is only used
+ * item < scankey. (Note: this positioning strategy is only used
* for a backward scan, so that is always the correct starting
* position.)
*/
@@ -807,7 +807,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
case BTGreaterEqualStrategyNumber:
/*
- * Find first item >= scankey. (This is only used for forward
+ * Find first item >= scankey. (This is only used for forward
* scans.)
*/
nextkey = false;
@@ -878,7 +878,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
*
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
- * the last item on this page. Adjust the starting offset if needed. (If
+ * the last item on this page. Adjust the starting offset if needed. (If
* this results in an offset before the first item or after the last one,
* _bt_readpage will report no items found, and then we'll step to the
* next page as needed.)
@@ -1161,7 +1161,7 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
* than the walk-right case because of the possibility that the page
* to our left splits while we are in flight to it, plus the
* possibility that the page we were on gets deleted after we leave
- * it. See nbtree/README for details.
+ * it. See nbtree/README for details.
*/
for (;;)
{
@@ -1255,7 +1255,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* anymore, not that its left sibling got split more than four times.
*
* Note that it is correct to test P_ISDELETED not P_IGNORE here,
- * because half-dead pages are still in the sibling chain. Caller
+ * because half-dead pages are still in the sibling chain. Caller
* must reject half-dead pages if wanted.
*/
tries = 0;
@@ -1281,7 +1281,7 @@ _bt_walk_left(Relation rel, Buffer buf)
if (P_ISDELETED(opaque))
{
/*
- * It was deleted. Move right to first nondeleted page (there
+ * It was deleted. Move right to first nondeleted page (there
* must be one); that is the page that has acquired the deleted
* one's keyspace, so stepping left from it will take us where we
* want to be.
@@ -1325,7 +1325,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* _bt_get_endpoint() -- Find the first or last page on a given tree level
*
* If the index is empty, we will return InvalidBuffer; any other failure
- * condition causes ereport(). We will not return a dead page.
+ * condition causes ereport(). We will not return a dead page.
*
* The returned buffer is pinned and read-locked.
*/
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index d45b699fa2..29832ad528 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -7,7 +7,7 @@
*
* We use tuplesort.c to sort the given index tuples into order.
* Then we scan the index tuples in order and build the btree pages
- * for each level. We load source tuples into leaf-level pages.
+ * for each level. We load source tuples into leaf-level pages.
* Whenever we fill a page at one level, we add a link to it to its
* parent level (starting a new parent level if necessary). When
* done, we write out each final page on each level, adding it to
@@ -42,11 +42,11 @@
*
* Since the index will never be used unless it is completely built,
* from a crash-recovery point of view there is no need to WAL-log the
- * steps of the build. After completing the index build, we can just sync
+ * steps of the build. After completing the index build, we can just sync
* the whole file to disk using smgrimmedsync() before exiting this module.
* This can be seen to be sufficient for crash recovery by considering that
* it's effectively equivalent to what would happen if a CHECKPOINT occurred
- * just after the index build. However, it is clearly not sufficient if the
+ * just after the index build. However, it is clearly not sufficient if the
* DBA is using the WAL log for PITR or replication purposes, since another
* machine would not be able to reconstruct the index from WAL. Therefore,
* we log the completed index pages to WAL if and only if WAL archiving is
@@ -87,7 +87,7 @@ struct BTSpool
};
/*
- * Status record for a btree page being built. We have one of these
+ * Status record for a btree page being built. We have one of these
* for each active tree level.
*
* The reason we need to store a copy of the minimum key is that we'll
@@ -156,7 +156,7 @@ _bt_spoolinit(Relation index, bool isunique, bool isdead)
* We size the sort area as maintenance_work_mem rather than work_mem to
* speed index creation. This should be OK since a single backend can't
* run multiple index creations in parallel. Note that creation of a
- * unique index actually requires two BTSpool objects. We expect that the
+ * unique index actually requires two BTSpool objects. We expect that the
* second one (for dead tuples) won't get very full, so we give it only
* work_mem.
*/
@@ -295,7 +295,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
}
/*
- * Now write the page. We say isTemp = true even if it's not a temp
+ * Now write the page. We say isTemp = true even if it's not a temp
* index, because there's no need for smgr to schedule an fsync for this
* write; we'll do it ourselves before ending the build.
*/
@@ -421,14 +421,14 @@ _bt_sortaddtup(Page page,
* A leaf page being built looks like:
*
* +----------------+---------------------------------+
- * | PageHeaderData | linp0 linp1 linp2 ... |
+ * | PageHeaderData | linp0 linp1 linp2 ... |
* +-----------+----+---------------------------------+
* | ... linpN | |
* +-----------+--------------------------------------+
* | ^ last |
* | |
* +-------------+------------------------------------+
- * | | itemN ... |
+ * | | itemN ... |
* +-------------+------------------+-----------------+
* | ... item3 item2 item1 | "special space" |
* +--------------------------------+-----------------+
@@ -489,9 +489,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
"or use full text indexing.")));
/*
- * Check to see if page is "full". It's definitely full if the item won't
+ * Check to see if page is "full". It's definitely full if the item won't
* fit. Otherwise, compare to the target freespace derived from the
- * fillfactor. However, we must put at least two items on each page, so
+ * fillfactor. However, we must put at least two items on each page, so
* disregard fillfactor if we don't have that many.
*/
if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY))
@@ -564,7 +564,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
}
/*
- * Write out the old page. We never need to touch it again, so we can
+ * Write out the old page. We never need to touch it again, so we can
* free the opage workspace too.
*/
_bt_blwritepage(wstate, opage, oblkno);
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 1649307251..e53c223918 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -94,7 +94,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
* comparison data ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(), unless comparison
- * data is first stored into the key entries. Currently this
+ * data is first stored into the key entries. Currently this
* routine is only called by nbtsort.c and tuplesort.c, which have
* their own comparison routines.
*/
@@ -164,7 +164,7 @@ _bt_freestack(BTStack stack)
* _bt_preprocess_keys() -- Preprocess scan keys
*
* The caller-supplied search-type keys (in scan->keyData[]) are copied to
- * so->keyData[] with possible transformation. scan->numberOfKeys is
+ * so->keyData[] with possible transformation. scan->numberOfKeys is
* the number of input keys, so->numberOfKeys gets the number of output
* keys (possibly less, never greater).
*
@@ -175,7 +175,7 @@ _bt_freestack(BTStack stack)
* so that the index sorts in the desired direction.
*
* One key purpose of this routine is to discover how many scan keys
- * must be satisfied to continue the scan. It also attempts to eliminate
+ * must be satisfied to continue the scan. It also attempts to eliminate
* redundant keys and detect contradictory keys. (If the index opfamily
* provides incomplete sets of cross-type operators, we may fail to detect
* redundant or contradictory keys, but we can survive that.)
@@ -207,7 +207,7 @@ _bt_freestack(BTStack stack)
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
- * we cannot eliminate either. If there are two such keys of the same
+ * we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
@@ -267,12 +267,12 @@ _bt_preprocess_keys(IndexScanDesc scan)
{
/*
* We treat all btree operators as strict (even if they're not so
- * marked in pg_proc). This means that it is impossible for an
+ * marked in pg_proc). This means that it is impossible for an
* operator condition with a NULL comparison constant to succeed, and
* we can reject it right away.
*
* However, we now also support "x IS NULL" clauses as search
- * conditions, so in that case keep going. The planner has not filled
+ * conditions, so in that case keep going. The planner has not filled
* in any particular strategy in this case, so set it to
* BTEqualStrategyNumber --- we can treat IS NULL as an equality
* operator for purposes of search strategy.
@@ -426,7 +426,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Emit the cleaned-up keys into the outkeys[] array, and then
- * mark them if they are required. They are required (possibly
+ * mark them if they are required. They are required (possibly
* only in one direction) if all attrs before this one had "=".
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
@@ -545,7 +545,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
- * may not be able to make the comparison. If we can make the comparison
+ * may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* if the comparison could not be made.
*
@@ -686,7 +686,7 @@ _bt_mark_scankey_with_indoption(ScanKey skey, int16 *indoption)
* Mark a scankey as "required to continue the scan".
*
* Depending on the operator type, the key may be required for both scan
- * directions or just one. Also, if the key is a row comparison header,
+ * directions or just one. Also, if the key is a row comparison header,
* we have to mark the appropriate subsidiary ScanKeys as required. In
* such cases, the first subsidiary key is required, but subsequent ones
* are required only as long as they correspond to successive index columns
@@ -698,7 +698,7 @@ _bt_mark_scankey_with_indoption(ScanKey skey, int16 *indoption)
* scribbling on a data structure belonging to the index AM's caller, not on
* our private copy. This should be OK because the marking will not change
* from scan to scan within a query, and so we'd just re-mark the same way
- * anyway on a rescan. Something to keep an eye on though.
+ * anyway on a rescan. Something to keep an eye on though.
*/
static void
_bt_mark_scankey_required(ScanKey skey)
@@ -877,7 +877,7 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
@@ -890,8 +890,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. On a backward scan,
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((key->sk_flags & SK_BT_REQFWD) &&
@@ -981,7 +981,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual is
- * one of the "must match" subset. On a forward scan,
+ * one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQBKWD) &&
@@ -994,7 +994,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. On a backward scan,
+ * one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -1012,7 +1012,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
{
/*
* Unlike the simple-scankey case, this isn't a disallowed case.
- * But it can never match. If all the earlier row comparison
+ * But it can never match. If all the earlier row comparison
* columns are required for the scan direction, we can stop the
* scan, because there can't be another tuple that will succeed.
*/
@@ -1076,7 +1076,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Tuple fails this qual. If it's a required qual for the current
* scan direction, then we can conclude no further tuples will pass,
- * either. Note we have to look at the deciding column, not
+ * either. Note we have to look at the deciding column, not
* necessarily the first or last column of the row condition.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -1102,7 +1102,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* is sufficient for setting LP_DEAD status (which is only a hint).
*
* We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
+ * delete. We cope with cases where items have moved right due to insertions.
* If an item has moved off the current page due to a split, we'll fail to
* find it and do nothing (this is not an error case --- we assume the item
* will eventually get marked in a future indexscan). Note that because we
@@ -1188,8 +1188,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
/*
* The following routines manage a shared-memory area in which we track
* assignment of "vacuum cycle IDs" to currently-active btree vacuuming
- * operations. There is a single counter which increments each time we
- * start a vacuum to assign it a cycle ID. Since multiple vacuums could
+ * operations. There is a single counter which increments each time we
+ * start a vacuum to assign it a cycle ID. Since multiple vacuums could
* be active concurrently, we have to track the cycle ID for each active
* vacuum; this requires at most MaxBackends entries (usually far fewer).
* We assume at most one vacuum can be active for a given index.
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index d132d6bdee..d1771e0424 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -126,7 +126,7 @@ forget_matching_deletion(RelFileNode node, BlockNumber delblk)
* in correct itemno sequence, but physically the opposite order from the
* original, because we insert them in the opposite of itemno order. This
* does not matter in any current btree code, but it's something to keep an
- * eye on. Is it worth changing just on general principles? See also the
+ * eye on. Is it worth changing just on general principles? See also the
* notes in btree_xlog_split().
*/
static void
@@ -177,7 +177,7 @@ _bt_restore_meta(RelFileNode rnode, XLogRecPtr lsn,
pageop->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not essential
+ * Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) metapg)->pd_lower =
@@ -359,7 +359,7 @@ btree_xlog_split(bool onleft, bool isroot,
* Reconstruct left (original) sibling if needed. Note that this code
* ensures that the items remaining on the left page are in the correct
* item number order, but it does not reproduce the physical order they
- * would have had. Is this worth changing? See also _bt_restore_page().
+ * would have had. Is this worth changing? See also _bt_restore_page().
*/
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
@@ -379,7 +379,7 @@ btree_xlog_split(bool onleft, bool isroot,
/*
* Remove the items from the left page that were copied to the
- * right page. Also remove the old high key, if any. (We must
+ * right page. Also remove the old high key, if any. (We must
* remove everything before trying to insert any items, else
* we risk not having enough space.)
*/
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 8544725abb..0348c5155a 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -11,15 +11,15 @@
* log can be broken into relatively small, independent segments.
*
* XLOG interactions: this module generates an XLOG record whenever a new
- * CLOG page is initialized to zeroes. Other writes of CLOG come from
+ * CLOG page is initialized to zeroes. Other writes of CLOG come from
* recording of transaction commit or abort in xact.c, which generates its
* own XLOG records for these events and will re-perform the status update
- * on redo; so we need make no additional XLOG entry here. For synchronous
+ * on redo; so we need make no additional XLOG entry here. For synchronous
* transaction commits, the XLOG is guaranteed flushed through the XLOG commit
* record before we are called to log a commit, so the WAL rule "write xlog
* before data" is satisfied automatically. However, for async commits we
* must track the latest LSN affecting each CLOG page, so that we can flush
- * XLOG that far and satisfy the WAL rule. We don't have to worry about this
+ * XLOG that far and satisfy the WAL rule. We don't have to worry about this
* for aborts (whether sync or async), since the post-crash assumption would
* be that such transactions failed anyway.
*
@@ -104,7 +104,7 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids,
* in the tree of xid. In various cases nsubxids may be zero.
*
* lsn must be the WAL location of the commit record when recording an async
- * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
+ * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
* caller guarantees the commit record is already flushed in that case. It
* should be InvalidXLogRecPtr for abort cases, too.
*
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 025219e092..7462a4ed99 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -4,15 +4,15 @@
* PostgreSQL multi-transaction-log manager
*
* The pg_multixact manager is a pg_clog-like manager that stores an array
- * of TransactionIds for each MultiXactId. It is a fundamental part of the
- * shared-row-lock implementation. A share-locked tuple stores a
+ * of TransactionIds for each MultiXactId. It is a fundamental part of the
+ * shared-row-lock implementation. A share-locked tuple stores a
* MultiXactId in its Xmax, and a transaction that needs to wait for the
* tuple to be unlocked can sleep on the potentially-several TransactionIds
* that compose the MultiXactId.
*
* We use two SLRU areas, one for storing the offsets at which the data
* starts for each MultiXactId in the other one. This trick allows us to
- * store variable length arrays of TransactionIds. (We could alternatively
+ * store variable length arrays of TransactionIds. (We could alternatively
* use one area containing counts and TransactionIds, with valid MultiXactId
* values pointing at slots containing counts; but that way seems less robust
* since it would get completely confused if someone inquired about a bogus
@@ -32,7 +32,7 @@
*
* Like clog.c, and unlike subtrans.c, we have to preserve state across
* crashes and ensure that MXID and offset numbering increases monotonically
- * across a crash. We do this in the same way as it's done for transaction
+ * across a crash. We do this in the same way as it's done for transaction
* IDs: the WAL record is guaranteed to contain evidence of every MXID we
* could need to worry about, and we just make sure that at the end of
* replay, the next-MXID and next-offset counters are at least as large as
@@ -63,13 +63,13 @@
/*
- * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
+ * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
* used everywhere else in Postgres.
*
* Note: because both MultiXactOffsets and TransactionIds are 32 bits and
* wrap around at 0xFFFFFFFF, MultiXact page numbering also wraps around at
* 0xFFFFFFFF/MULTIXACT_*_PER_PAGE, and segment numbering at
- * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no
+ * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no
* explicit notice of that fact in this module, except when comparing segment
* and page numbers in TruncateMultiXact
* (see MultiXact{Offset,Member}PagePrecedes).
@@ -100,7 +100,7 @@ static SlruCtlData MultiXactMemberCtlData;
#define MultiXactMemberCtl (&MultiXactMemberCtlData)
/*
- * MultiXact state shared across all backends. All this state is protected
+ * MultiXact state shared across all backends. All this state is protected
* by MultiXactGenLock. (We also use MultiXactOffsetControlLock and
* MultiXactMemberControlLock to guard accesses to the two sets of SLRU
* buffers. For concurrency's sake, we avoid holding more than one of these
@@ -343,7 +343,7 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid)
/*
* Determine which of the members of the MultiXactId are still running,
* and use them to create a new one. (Removing dead members is just an
- * optimization, but a useful one. Note we have the same race condition
+ * optimization, but a useful one. Note we have the same race condition
* here as above: j could be 0 at the end of the loop.)
*/
newMembers = (TransactionId *)
@@ -408,7 +408,7 @@ MultiXactIdIsRunning(MultiXactId multi)
/*
* This could be made faster by having another entry point in procarray.c,
- * walking the PGPROC array only once for all the members. But in most
+ * walking the PGPROC array only once for all the members. But in most
* cases nmembers should be small enough that it doesn't much matter.
*/
for (i = 0; i < nmembers; i++)
@@ -527,7 +527,7 @@ MultiXactIdSetOldestMember(void)
* The value to set is the oldest of nextMXact and all the valid per-backend
* OldestMemberMXactId[] entries. Because of the locking we do, we can be
* certain that no subsequent call to MultiXactIdSetOldestMember can set
- * an OldestMemberMXactId[] entry older than what we compute here. Therefore
+ * an OldestMemberMXactId[] entry older than what we compute here. Therefore
* there is no live transaction, now or later, that can be a member of any
* MultiXactId older than the OldestVisibleMXactId we compute here.
*/
@@ -698,7 +698,7 @@ CreateMultiXactId(int nxids, TransactionId *xids)
* heap_lock_tuple() to have put it there, and heap_lock_tuple() generates
* an XLOG record that must follow ours. The normal LSN interlock between
* the data page and that XLOG record will ensure that our XLOG record
- * reaches disk first. If the SLRU members/offsets data reaches disk
+ * reaches disk first. If the SLRU members/offsets data reaches disk
* sooner than the XLOG record, we do not care because we'll overwrite it
* with zeroes unless the XLOG record is there too; see notes at top of
* this file.
@@ -805,7 +805,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
* GetNewMultiXactId
* Get the next MultiXactId.
*
- * Also, reserve the needed amount of space in the "members" area. The
+ * Also, reserve the needed amount of space in the "members" area. The
* starting offset of the reserved space is returned in *offset.
*
* This may generate XLOG records for expansion of the offsets and/or members
@@ -870,7 +870,7 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
* until after file extension has succeeded!
*
* We don't care about MultiXactId wraparound here; it will be handled by
- * the next iteration. But note that nextMXact may be InvalidMultiXactId
+ * the next iteration. But note that nextMXact may be InvalidMultiXactId
* after this routine exits, so anyone else looking at the variable must
* be prepared to deal with that. Similarly, nextOffset may be zero, but
* we won't use that as the actual start offset of the next multixact.
@@ -934,7 +934,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
* SLRU data if we did try to examine it.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. We just
+ * seen, it implies undetected ID wraparound has occurred. We just
* silently assume that such an ID is no longer running.
*
* Shared lock is enough here since we aren't modifying any global state.
@@ -950,7 +950,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Acquire the shared lock just long enough to grab the current counter
- * values. We may need both nextMXact and nextOffset; see below.
+ * values. We may need both nextMXact and nextOffset; see below.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -968,12 +968,12 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Find out the offset at which we need to start reading MultiXactMembers
- * and the number of members in the multixact. We determine the latter as
+ * and the number of members in the multixact. We determine the latter as
* the difference between this multixact's starting offset and the next
* one's. However, there are some corner cases to worry about:
*
* 1. This multixact may be the latest one created, in which case there is
- * no next one to look at. In this case the nextOffset value we just
+ * no next one to look at. In this case the nextOffset value we just
* saved is the correct endpoint.
*
* 2. The next multixact may still be in process of being filled in: that
@@ -984,11 +984,11 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
* (because we are careful to pre-zero offset pages). Because
* GetNewMultiXactId will never return zero as the starting offset for a
* multixact, when we read zero as the next multixact's offset, we know we
- * have this case. We sleep for a bit and try again.
+ * have this case. We sleep for a bit and try again.
*
* 3. Because GetNewMultiXactId increments offset zero to offset one to
* handle case #2, there is an ambiguity near the point of offset
- * wraparound. If we see next multixact's offset is one, is that our
+ * wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th
* member slot wasn't filled, it'll contain zero, and zero isn't a valid
@@ -1401,7 +1401,7 @@ multixact_twophase_postabort(TransactionId xid, uint16 info,
/*
* Initialization of shared memory for MultiXact. We use two SLRU areas,
- * thus double memory. Also, reserve space for the shared MultiXactState
+ * thus double memory. Also, reserve space for the shared MultiXactState
* struct and the per-backend MultiXactId arrays (two of those, too).
*/
Size
@@ -1461,7 +1461,7 @@ MultiXactShmemInit(void)
/*
* This func must be called ONCE on system install. It creates the initial
- * MultiXact segments. (The MultiXacts directories are assumed to have been
+ * MultiXact segments. (The MultiXacts directories are assumed to have been
* created by initdb, and MultiXactShmemInit must have been called already.)
*/
void
@@ -1534,7 +1534,7 @@ ZeroMultiXactMemberPage(int pageno, bool writeXlog)
* This must be called ONCE during postmaster or standalone-backend startup.
*
* StartupXLOG has already established nextMXact/nextOffset by calling
- * MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact. Note that we
+ * MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact. Note that we
* may already have replayed WAL data into the SLRU files.
*
* We don't need any locks here, really; the SLRU locks are taken
@@ -1558,7 +1558,7 @@ StartupMultiXact(void)
MultiXactOffsetCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current offsets page. See notes in
+ * Zero out the remainder of the current offsets page. See notes in
* StartupCLOG() for motivation.
*/
entryno = MultiXactIdToOffsetEntry(multi);
@@ -1588,7 +1588,7 @@ StartupMultiXact(void)
MultiXactMemberCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current members page. See notes in
+ * Zero out the remainder of the current members page. See notes in
* StartupCLOG() for motivation.
*/
entryno = MXOffsetToMemberEntry(offset);
@@ -1661,7 +1661,7 @@ CheckPointMultiXact(void)
/*
* Truncate the SLRU files. This could be done at any time, but
- * checkpoint seems a reasonable place for it. There is one exception: if
+ * checkpoint seems a reasonable place for it. There is one exception: if
* we are called during xlog recovery, then shared->latest_page_number
* isn't valid (because StartupMultiXact hasn't been called yet) and so
* SimpleLruTruncate would get confused. It seems best not to risk
@@ -1794,7 +1794,7 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers)
* Remove all MultiXactOffset and MultiXactMember segments before the oldest
* ones still of interest.
*
- * This is called only during checkpoints. We assume no more than one
+ * This is called only during checkpoints. We assume no more than one
* backend does this at a time.
*
* XXX do we have any issues with needing to checkpoint here?
@@ -1855,7 +1855,7 @@ TruncateMultiXact(void)
return;
/*
- * We need to determine where to truncate MultiXactMember. If we found a
+ * We need to determine where to truncate MultiXactMember. If we found a
* valid oldest MultiXactId, read its starting offset; otherwise we use
* the nextOffset value we saved above.
*/
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index 5bf91c8aa5..97e27247ec 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -15,7 +15,7 @@
*
* We use a control LWLock to protect the shared data structures, plus
* per-buffer LWLocks that synchronize I/O for each buffer. The control lock
- * must be held to examine or modify any shared state. A process that is
+ * must be held to examine or modify any shared state. A process that is
* reading in or writing out a page buffer does not hold the control lock,
* only the per-buffer lock for the buffer it is working on.
*
@@ -34,7 +34,7 @@
* could have happened while we didn't have the lock).
*
* As with the regular buffer manager, it is possible for another process
- * to re-dirty a page that is currently being written out. This is handled
+ * to re-dirty a page that is currently being written out. This is handled
* by re-setting the page's page_dirty flag.
*
*
@@ -73,7 +73,7 @@
* segment and page numbers in SimpleLruTruncate (see PagePrecedes()).
*
* Note: this file currently assumes that segment file names will be four
- * hex digits. This sets a lower bound on the segment size (64K transactions
+ * hex digits. This sets a lower bound on the segment size (64K transactions
* for 32-bit TransactionIds).
*/
#define SLRU_PAGES_PER_SEGMENT 32
@@ -113,7 +113,7 @@ typedef struct SlruFlushData
* page_lru_count entries to be "reset" to lower values than they should have,
* in case a process is delayed while it executes this macro. With care in
* SlruSelectLRUPage(), this does little harm, and in any case the absolute
- * worst possible consequence is a nonoptimal choice of page to evict. The
+ * worst possible consequence is a nonoptimal choice of page to evict. The
* gain from allowing concurrent reads of SLRU pages seems worth it.
*/
#define SlruRecentlyUsed(shared, slotno) \
@@ -499,7 +499,7 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
*
* NOTE: only one write attempt is made here. Hence, it is possible that
* the page is still dirty at exit (if someone else re-dirtied it during
- * the write). However, we *do* attempt a fresh write even if the page
+ * the write). However, we *do* attempt a fresh write even if the page
* is already being written; this is for checkpoints.
*
* Control lock must be held at entry, and will be held at exit.
@@ -597,7 +597,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
* In a crash-and-restart situation, it's possible for us to receive
* commands to set the commit status of transactions whose bits are in
* already-truncated segments of the commit log (see notes in
- * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
+ * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
* where the file doesn't exist, and return zeroes instead.
*/
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
@@ -1128,7 +1128,7 @@ restart:;
/*
* Hmm, we have (or may have) I/O operations acting on the page, so
* we've got to wait for them to finish and then start again. This is
- * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
+ * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
* wouldn't it be OK to just discard it without writing it? For now,
* keep the logic the same as it was.)
*/
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index 9c74e995db..5e5b638752 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -5,7 +5,7 @@
*
* The pg_subtrans manager is a pg_clog-like manager that stores the parent
* transaction Id for each transaction. It is a fundamental part of the
- * nested transactions implementation. A main transaction has a parent
+ * nested transactions implementation. A main transaction has a parent
* of InvalidTransactionId, and each subtransaction has its immediate parent.
* The tree can easily be walked from child to parent, but not in the
* opposite direction.
@@ -186,7 +186,7 @@ SUBTRANSShmemInit(void)
* must have been called already.)
*
* Note: it's not really necessary to create the initial segment now,
- * since slru.c would create it on first write anyway. But we may as well
+ * since slru.c would create it on first write anyway. But we may as well
* do it to be sure the directory is set up correctly.
*/
void
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index 43f7c54d1d..85254d788e 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -149,7 +149,7 @@ TransactionIdDidCommit(TransactionId transactionId)
* be a window just after database startup where we do not have complete
* knowledge in pg_subtrans of the transactions after TransactionXmin.
* StartupSUBTRANS() has ensured that any missing information will be
- * zeroed. Since this case should not happen under normal conditions, it
+ * zeroed. Since this case should not happen under normal conditions, it
* seems reasonable to emit a WARNING for it.
*/
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
@@ -305,7 +305,7 @@ TransactionIdPrecedes(TransactionId id1, TransactionId id2)
{
/*
* If either ID is a permanent XID then we can just do unsigned
- * comparison. If both are normal, do a modulo-2^32 comparison.
+ * comparison. If both are normal, do a modulo-2^32 comparison.
*/
int32 diff;
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 3be0a38cb2..f7f1a81deb 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -425,7 +425,7 @@ LockGXact(const char *gid, Oid user)
/*
* Note: it probably would be possible to allow committing from
* another database; but at the moment NOTIFY is known not to work and
- * there may be some other issues as well. Hence disallow until
+ * there may be some other issues as well. Hence disallow until
* someone gets motivated to make it work.
*/
if (MyDatabaseId != gxact->proc.databaseId)
@@ -983,7 +983,7 @@ EndPrepare(GlobalTransaction gxact)
* out the correct state file CRC, we have an inconsistency: the xact is
* prepared according to WAL but not according to our on-disk state. We
* use a critical section to force a PANIC if we are unable to complete
- * the write --- then, WAL replay should repair the inconsistency. The
+ * the write --- then, WAL replay should repair the inconsistency. The
* odds of a PANIC actually occurring should be very tiny given that we
* were able to write the bogus CRC above.
*
@@ -1021,7 +1021,7 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close two-phase state file: %m")));
/*
- * Mark the prepared transaction as valid. As soon as xact.c marks MyProc
+ * Mark the prepared transaction as valid. As soon as xact.c marks MyProc
* as not running our XID (which it will do immediately after this
* function returns), others can commit/rollback the xact.
*
@@ -1241,7 +1241,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/*
* In case we fail while running the callbacks, mark the gxact invalid so
* no one else will try to commit/rollback, and so it can be recycled
- * properly later. It is still locked by our XID so it won't go away yet.
+ * properly later. It is still locked by our XID so it won't go away yet.
*
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
*/
@@ -1435,7 +1435,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* This approach creates a race condition: someone else could delete a
* GXACT between the time we release TwoPhaseStateLock and the time we try
- * to open its state file. We handle this by special-casing ENOENT
+ * to open its state file. We handle this by special-casing ENOENT
* failures: if we see that, we verify that the GXACT is no longer valid,
* and if so ignore the failure.
*/
@@ -1515,7 +1515,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* We throw away any prepared xacts with main XID beyond nextXid --- if any
* are present, it suggests that the DBA has done a PITR recovery to an
- * earlier point in time without cleaning out pg_twophase. We dare not
+ * earlier point in time without cleaning out pg_twophase. We dare not
* try to recover such prepared xacts since they likely depend on database
* state that doesn't exist now.
*
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index e3895ba8c3..c91d4bb294 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -114,20 +114,20 @@ GetNewTransactionId(bool isSubXact)
/*
* Now advance the nextXid counter. This must not happen until after we
* have successfully completed ExtendCLOG() --- if that routine fails, we
- * want the next incoming transaction to try it again. We cannot assign
+ * want the next incoming transaction to try it again. We cannot assign
* more XIDs until there is CLOG space for them.
*/
TransactionIdAdvance(ShmemVariableCache->nextXid);
/*
* We must store the new XID into the shared ProcArray before releasing
- * XidGenLock. This ensures that every active XID older than
+ * XidGenLock. This ensures that every active XID older than
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyProc without acquiring ProcArrayLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
- * might see a partially-set xid here. But holding both locks at once
+ * might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit. So for now, assume atomicity.
*
* Note that readers of PGPROC xid fields should be careful to fetch the
@@ -238,7 +238,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid,
/*
* We'll start complaining loudly when we get within 10M transactions of
- * the stop point. This is kind of arbitrary, but if you let your gas
+ * the stop point. This is kind of arbitrary, but if you let your gas
* gauge get down to 1% of full, would you be looking for the next gas
* station? We need to be fairly liberal about this number because there
* are lots of scenarios where most transactions are done by automatic
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 8491716932..aaa96e29a0 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -478,7 +478,7 @@ GetCurrentSubTransactionId(void)
*
* "used" must be TRUE if the caller intends to use the command ID to mark
* inserted/updated/deleted tuples. FALSE means the ID is being fetched
- * for read-only purposes (ie, as a snapshot validity cutoff). See
+ * for read-only purposes (ie, as a snapshot validity cutoff). See
* CommandCounterIncrement() for discussion.
*/
CommandId
@@ -565,7 +565,7 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
/*
* We always say that BootstrapTransactionId is "not my transaction ID"
- * even when it is (ie, during bootstrap). Along with the fact that
+ * even when it is (ie, during bootstrap). Along with the fact that
* transam.c always treats BootstrapTransactionId as already committed,
* this causes the tqual.c routines to see all tuples as committed, which
* is what we need during bootstrap. (Bootstrap mode only inserts tuples,
@@ -706,7 +706,7 @@ AtStart_Memory(void)
/*
* If this is the first time through, create a private context for
* AbortTransaction to work in. By reserving some space now, we can
- * insulate AbortTransaction from out-of-memory scenarios. Like
+ * insulate AbortTransaction from out-of-memory scenarios. Like
* ErrorContext, we set it up with slow growth rate and a nonzero minimum
* size, so that space will be reserved immediately.
*/
@@ -809,7 +809,7 @@ AtSubStart_ResourceOwner(void)
Assert(s->parent != NULL);
/*
- * Create a resource owner for the subtransaction. We make it a child of
+ * Create a resource owner for the subtransaction. We make it a child of
* the immediate parent's resource owner.
*/
s->curTransactionOwner =
@@ -829,7 +829,7 @@ AtSubStart_ResourceOwner(void)
* RecordTransactionCommit
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*
* This is exported only to support an ugly hack in VACUUM FULL.
*/
@@ -869,7 +869,7 @@ RecordTransactionCommit(void)
/*
* If we didn't create XLOG entries, we're done here; otherwise we
- * should flush those entries the same as a commit record. (An
+ * should flush those entries the same as a commit record. (An
* example of a possible record that wouldn't cause an XID to be
* assigned is a sequence advance record due to nextval() --- we want
* to flush that to disk before reporting commit.)
@@ -890,7 +890,7 @@ RecordTransactionCommit(void)
BufmgrCommit();
/*
- * Mark ourselves as within our "commit critical section". This
+ * Mark ourselves as within our "commit critical section". This
* forces any concurrent checkpoint to wait until we've updated
* pg_clog. Without this, it is possible for the checkpoint to set
* REDO after the XLOG record but fail to flush the pg_clog update to
@@ -898,7 +898,7 @@ RecordTransactionCommit(void)
* crashes a little later.
*
* Note: we could, but don't bother to, set this flag in
- * RecordTransactionAbort. That's because loss of a transaction abort
+ * RecordTransactionAbort. That's because loss of a transaction abort
* is noncritical; the presumption would be that it aborted, anyway.
*
* It's safe to change the inCommit flag of our own backend without
@@ -943,7 +943,7 @@ RecordTransactionCommit(void)
* Check if we want to commit asynchronously. If the user has set
* synchronous_commit = off, and we're not doing cleanup of any non-temp
* rels nor committing any command that wanted to force sync commit, then
- * we can defer flushing XLOG. (We must not allow asynchronous commit if
+ * we can defer flushing XLOG. (We must not allow asynchronous commit if
* there are any non-temp tables to be deleted, because we might delete
* the files before the COMMIT record is flushed to disk. We do allow
* asynchronous commit if all to-be-deleted tables are temporary though,
@@ -1178,7 +1178,7 @@ AtSubCommit_childXids(void)
* RecordTransactionAbort
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionAbort(bool isSubXact)
@@ -1195,7 +1195,7 @@ RecordTransactionAbort(bool isSubXact)
/*
* If we haven't been assigned an XID, nobody will care whether we aborted
- * or not. Hence, we're done in that case. It does not matter if we have
+ * or not. Hence, we're done in that case. It does not matter if we have
* rels to delete (note that this routine is not responsible for actually
* deleting 'em). We cannot have any child XIDs, either.
*/
@@ -1211,7 +1211,7 @@ RecordTransactionAbort(bool isSubXact)
* We have a valid XID, so we should write an ABORT record for it.
*
* We do not flush XLOG to disk here, since the default assumption after a
- * crash would be that we aborted, anyway. For the same reason, we don't
+ * crash would be that we aborted, anyway. For the same reason, we don't
* need to worry about interlocking against checkpoint start.
*/
@@ -1367,7 +1367,7 @@ AtSubAbort_childXids(void)
/*
* We keep the child-XID arrays in TopTransactionContext (see
- * AtSubCommit_childXids). This means we'd better free the array
+ * AtSubCommit_childXids). This means we'd better free the array
* explicitly at abort to avoid leakage.
*/
if (s->childXids != NULL)
@@ -1517,7 +1517,7 @@ StartTransaction(void)
VirtualXactLockTableInsert(vxid);
/*
- * Advertise it in the proc array. We assume assignment of
+ * Advertise it in the proc array. We assume assignment of
* LocalTransactionID is atomic, and the backendId should be set already.
*/
Assert(MyProc->backendId == vxid.backendId);
@@ -1905,7 +1905,7 @@ PrepareTransaction(void)
XactLastRecEnd.xrecoff = 0;
/*
- * Let others know about no transaction in progress by me. This has to be
+ * Let others know about no transaction in progress by me. This has to be
* done *after* the prepared transaction has been marked valid, else
* someone may think it is unlocked and recyclable.
*/
@@ -1914,7 +1914,7 @@ PrepareTransaction(void)
/*
* This is all post-transaction cleanup. Note that if an error is raised
* here, it's too late to abort the transaction. This should be just
- * noncritical resource releasing. See notes in CommitTransaction.
+ * noncritical resource releasing. See notes in CommitTransaction.
*/
CallXactCallbacks(XACT_EVENT_PREPARE);
@@ -2078,7 +2078,7 @@ AbortTransaction(void)
ProcArrayEndTransaction(MyProc, latestXid);
/*
- * Post-abort cleanup. See notes in CommitTransaction() concerning
+ * Post-abort cleanup. See notes in CommitTransaction() concerning
* ordering. We can skip all of it if the transaction failed before
* creating a resource owner.
*/
@@ -2311,7 +2311,7 @@ CommitTransactionCommand(void)
/*
* Here we were in a perfectly good transaction block but the user
- * told us to ROLLBACK anyway. We have to abort the transaction
+ * told us to ROLLBACK anyway. We have to abort the transaction
* and then clean up.
*/
case TBLOCK_ABORT_PENDING:
@@ -2331,7 +2331,7 @@ CommitTransactionCommand(void)
/*
* We were just issued a SAVEPOINT inside a transaction block.
- * Start a subtransaction. (DefineSavepoint already did
+ * Start a subtransaction. (DefineSavepoint already did
* PushTransaction, so as to have someplace to put the SUBBEGIN
* state.)
*/
@@ -2517,7 +2517,7 @@ AbortCurrentTransaction(void)
break;
/*
- * Here, we failed while trying to COMMIT. Clean up the
+ * Here, we failed while trying to COMMIT. Clean up the
* transaction and return to idle state (we do not want to stay in
* the transaction).
*/
@@ -2579,7 +2579,7 @@ AbortCurrentTransaction(void)
/*
* If we failed while trying to create a subtransaction, clean up
- * the broken subtransaction and abort the parent. The same
+ * the broken subtransaction and abort the parent. The same
* applies if we get a failure while ending a subtransaction.
*/
case TBLOCK_SUBBEGIN:
@@ -3108,7 +3108,7 @@ UserAbortTransactionBlock(void)
break;
/*
- * We are inside a subtransaction. Mark everything up to top
+ * We are inside a subtransaction. Mark everything up to top
* level as exitable.
*/
case TBLOCK_SUBINPROGRESS:
@@ -3240,7 +3240,7 @@ ReleaseSavepoint(List *options)
break;
/*
- * We are in a non-aborted subtransaction. This is the only valid
+ * We are in a non-aborted subtransaction. This is the only valid
* case.
*/
case TBLOCK_SUBINPROGRESS:
@@ -3296,7 +3296,7 @@ ReleaseSavepoint(List *options)
/*
* Mark "commit pending" all subtransactions up to the target
- * subtransaction. The actual commits will happen when control gets to
+ * subtransaction. The actual commits will happen when control gets to
* CommitTransactionCommand.
*/
xact = CurrentTransactionState;
@@ -3394,7 +3394,7 @@ RollbackToSavepoint(List *options)
/*
* Mark "abort pending" all subtransactions up to the target
- * subtransaction. The actual aborts will happen when control gets to
+ * subtransaction. The actual aborts will happen when control gets to
* CommitTransactionCommand.
*/
xact = CurrentTransactionState;
@@ -3793,7 +3793,7 @@ CommitSubTransaction(void)
CommandCounterIncrement();
/*
- * Prior to 8.4 we marked subcommit in clog at this point. We now only
+ * Prior to 8.4 we marked subcommit in clog at this point. We now only
* perform that step, if required, as part of the atomic update of the
* whole transaction tree at top level commit or abort.
*/
@@ -4232,7 +4232,7 @@ TransStateAsString(TransState state)
/*
* xactGetCommittedChildren
*
- * Gets the list of committed children of the current transaction. The return
+ * Gets the list of committed children of the current transaction. The return
* value is the number of child transactions. *ptr is set to point to an
* array of TransactionIds. The array is allocated in TopTransactionContext;
* the caller should *not* pfree() it (this is a change from pre-8.4 code!).
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 2a99923687..c3e37c00e1 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -82,7 +82,7 @@ bool XLOG_DEBUG = false;
* future XLOG segment as long as there aren't already XLOGfileslop future
* segments; else we'll delete it. This could be made a separate GUC
* variable, but at present I think it's sufficient to hardwire it as
- * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
+ * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
* no more than 2*CheckPointSegments log segments, and we want to recycle all
* of them; the +1 allows boundary cases to happen without wasting a
* delete/create-segment cycle.
@@ -185,7 +185,7 @@ static bool recoveryStopAfter;
*
* expectedTLIs: an integer list of recoveryTargetTLI and the TLIs of
* its known parents, newest first (so recoveryTargetTLI is always the
- * first list member). Only these TLIs are expected to be seen in the WAL
+ * first list member). Only these TLIs are expected to be seen in the WAL
* segments we read, and indeed only these TLIs will be considered as
* candidate WAL files to open at all.
*
@@ -213,9 +213,9 @@ XLogRecPtr XactLastRecEnd = {0, 0};
/*
* RedoRecPtr is this backend's local copy of the REDO record pointer
* (which is almost but not quite the same as a pointer to the most recent
- * CHECKPOINT record). We update this from the shared-memory copy,
+ * CHECKPOINT record). We update this from the shared-memory copy,
* XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
- * hold the Insert lock). See XLogInsert for details. We are also allowed
+ * hold the Insert lock). See XLogInsert for details. We are also allowed
* to update from XLogCtl->Insert.RedoRecPtr if we hold the info_lck;
* see GetRedoRecPtr. A freshly spawned backend obtains the value during
* InitXLOGAccess.
@@ -247,7 +247,7 @@ static XLogRecPtr RedoRecPtr;
* without needing to grab info_lck as well.
*
* XLogCtl->Insert.LogwrtResult may lag behind the reality of the other two,
- * but is updated when convenient. Again, it exists for the convenience of
+ * but is updated when convenient. Again, it exists for the convenience of
* code that is already holding WALInsertLock but not the other locks.
*
* The unshared LogwrtResult may lag behind any or all of these, and again
@@ -1730,7 +1730,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
{
/*
* Could get here without iterating above loop, in which case we might
- * have no open file or the wrong one. However, we do not need to
+ * have no open file or the wrong one. However, we do not need to
* fsync more than one file.
*/
if (sync_method != SYNC_METHOD_OPEN &&
@@ -1994,9 +1994,9 @@ XLogFlush(XLogRecPtr record)
* We normally flush only completed blocks; but if there is nothing to do on
* that basis, we check for unflushed async commits in the current incomplete
* block, and flush through the latest one of those. Thus, if async commits
- * are not being used, we will flush complete blocks only. We can guarantee
+ * are not being used, we will flush complete blocks only. We can guarantee
* that async commits reach disk after at most three cycles; normally only
- * one or two. (We allow XLogWrite to write "flexibly", meaning it can stop
+ * one or two. (We allow XLogWrite to write "flexibly", meaning it can stop
* at the end of the buffer ring; this makes a difference only with very high
* load or long wal_writer_delay, but imposes one extra cycle for the worst
* case for async commits.)
@@ -2147,7 +2147,7 @@ XLogNeedsFlush(XLogRecPtr record)
* log, seg: identify segment to be created/opened.
*
* *use_existent: if TRUE, OK to use a pre-existing file (else, any
- * pre-existing file will be deleted). On return, TRUE if a pre-existing
+ * pre-existing file will be deleted). On return, TRUE if a pre-existing
* file was used.
*
* use_lock: if TRUE, acquire ControlFileLock while moving file into
@@ -2217,11 +2217,11 @@ XLogFileInit(uint32 log, uint32 seg,
errmsg("could not create file \"%s\": %m", tmppath)));
/*
- * Zero-fill the file. We have to do this the hard way to ensure that all
+ * Zero-fill the file. We have to do this the hard way to ensure that all
* the file space has really been allocated --- on platforms that allow
* "holes" in files, just seeking to the end doesn't allocate intermediate
* space. This way, we know that we have all the space and (after the
- * fsync below) that all the indirect blocks are down on disk. Therefore,
+ * fsync below) that all the indirect blocks are down on disk. Therefore,
* fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the
* log file.
*
@@ -2309,7 +2309,7 @@ XLogFileInit(uint32 log, uint32 seg,
* a different timeline)
*
* Currently this is only used during recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
static void
@@ -2544,7 +2544,7 @@ XLogFileRead(uint32 log, uint32 seg, int emode)
* the timelines listed in expectedTLIs.
*
* We expect curFileTLI on entry to be the TLI of the preceding file in
- * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
+ * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
* to go backwards; this prevents us from picking up the wrong file when a
* parent timeline extends to higher segment numbers than the child we
* want to read.
@@ -2612,7 +2612,7 @@ XLogFileClose(void)
/*
* WAL segment files will not be re-read in normal operation, so we advise
- * the OS to release any cached pages. But do not do so if WAL archiving
+ * the OS to release any cached pages. But do not do so if WAL archiving
* is active, because archiver process could use the cache to read the WAL
* segment. Also, don't bother with it if we are using O_DIRECT, since
* the kernel is presumably not caching in that case.
@@ -3064,7 +3064,7 @@ RemoveOldXlogFiles(uint32 log, uint32 seg, XLogRecPtr endptr)
{
/*
* We ignore the timeline part of the XLOG segment identifiers in
- * deciding whether a segment is still needed. This ensures that we
+ * deciding whether a segment is still needed. This ensures that we
* won't prematurely remove a segment from a parent timeline. We could
* probably be a little more proactive about removing segments of
* non-parent timelines, but that would be a whole lot more
@@ -3260,7 +3260,7 @@ CleanupBackupHistory(void)
* ignoring them as already applied, but that's not a huge drawback.
*
* If 'cleanup' is true, a cleanup lock is used when restoring blocks.
- * Otherwise, a normal exclusive lock is used. At the moment, that's just
+ * Otherwise, a normal exclusive lock is used. At the moment, that's just
* pro forma, because there can't be any regular backends in the system
* during recovery. The 'cleanup' argument applies to all backup blocks
* in the WAL record, that suffices for now.
@@ -3778,7 +3778,7 @@ next_record_is_invalid:;
* Check whether the xlog header of a page just read in looks valid.
*
* This is just a convenience subroutine to avoid duplicated code in
- * ReadRecord. It's not intended for use from anywhere else.
+ * ReadRecord. It's not intended for use from anywhere else.
*/
static bool
ValidXLOGHeader(XLogPageHeader hdr, int emode)
@@ -3894,7 +3894,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode)
* Try to read a timeline's history file.
*
* If successful, return the list of component TLIs (the given TLI followed by
- * its ancestor TLIs). If we can't find the history file, assume that the
+ * its ancestor TLIs). If we can't find the history file, assume that the
* timeline has no parents, and return a list of just the specified timeline
* ID.
*/
@@ -4059,7 +4059,7 @@ findNewestTimeLine(TimeLineID startTLI)
* endTLI et al: ID of the last used WAL file, for annotation purposes
*
* Currently this is only used during recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
static void
@@ -4228,7 +4228,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
* I/O routines for pg_control
*
* *ControlFile is a buffer in shared memory that holds an image of the
- * contents of pg_control. WriteControlFile() initializes pg_control
+ * contents of pg_control. WriteControlFile() initializes pg_control
* given a preloaded buffer, ReadControlFile() loads the buffer from
* the pg_control file (during postmaster or standalone-backend startup),
* and UpdateControlFile() rewrites pg_control after we modify xlog state.
@@ -4955,7 +4955,7 @@ readRecoveryCommandFile(void)
/*
* If user specified recovery_target_timeline, validate it or compute the
- * "latest" value. We can't do this until after we've gotten the restore
+ * "latest" value. We can't do this until after we've gotten the restore
* command and set InArchiveRecovery, because we need to fetch timeline
* history files from the archive.
*/
@@ -5015,7 +5015,7 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
* the existing xlog segment (if any) with the archival version. This is
* because whatever is in XLOGDIR is very possibly older than what we have
* from the archives, since it could have come from restoring a PGDATA
- * backup. In any case, the archival version certainly is more
+ * backup. In any case, the archival version certainly is more
* descriptive of what our current database state is, because that is what
* we replayed from.
*
@@ -5725,8 +5725,8 @@ StartupXLOG(void)
/*
* Consider whether we need to assign a new timeline ID.
*
- * If we are doing an archive recovery, we always assign a new ID. This
- * handles a couple of issues. If we stopped short of the end of WAL
+ * If we are doing an archive recovery, we always assign a new ID. This
+ * handles a couple of issues. If we stopped short of the end of WAL
* during recovery, then we are clearly generating a new timeline and must
* assign it a unique new ID. Even if we ran to the end, modifying the
* current last segment is problematic because it may result in trying to
@@ -5774,7 +5774,7 @@ StartupXLOG(void)
/*
* Tricky point here: readBuf contains the *last* block that the LastRec
- * record spans, not the one it starts in. The last block is indeed the
+ * record spans, not the one it starts in. The last block is indeed the
* one we want to use.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - XLOG_BLCKSZ) % XLogSegSize);
@@ -5805,7 +5805,7 @@ StartupXLOG(void)
* Write.curridx must point to the *next* page (see XLogWrite()).
*
* Note: it might seem we should do AdvanceXLInsertBuffer() here, but
- * this is sufficient. The first actual attempt to insert a log
+ * this is sufficient. The first actual attempt to insert a log
* record will advance the insert state.
*/
XLogCtl->Write.curridx = NextBufIdx(0);
@@ -6460,7 +6460,7 @@ CreateCheckPoint(int flags)
/*
* If this isn't a shutdown or forced checkpoint, and we have not inserted
* any XLOG records since the start of the last checkpoint, skip the
- * checkpoint. The idea here is to avoid inserting duplicate checkpoints
+ * checkpoint. The idea here is to avoid inserting duplicate checkpoints
* when the system is idle. That wastes log space, and more importantly it
* exposes us to possible loss of both current and previous checkpoint
* records if the machine crashes just as we're writing the update.
@@ -6726,9 +6726,9 @@ CreateCheckPoint(int flags)
/*
* Truncate pg_subtrans if possible. We can throw away all data before
- * the oldest XMIN of any running transaction. No future transaction will
+ * the oldest XMIN of any running transaction. No future transaction will
* attempt to reference any pg_subtrans entry older than that (see Asserts
- * in subtrans.c). During recovery, though, we mustn't do this because
+ * in subtrans.c). During recovery, though, we mustn't do this because
* StartupSUBTRANS hasn't been called yet.
*/
if (!RecoveryInProgress())
@@ -6956,7 +6956,7 @@ XLogPutNextOid(Oid nextOid)
* We need not flush the NEXTOID record immediately, because any of the
* just-allocated OIDs could only reach disk as part of a tuple insert or
* update that would have its own XLOG record that must follow the NEXTOID
- * record. Therefore, the standard buffer LSN interlock applied to those
+ * record. Therefore, the standard buffer LSN interlock applied to those
* records will ensure no such OID reaches disk before the NEXTOID record
* does.
*
@@ -7323,7 +7323,7 @@ pg_start_backup(PG_FUNCTION_ARGS)
* during an on-line backup even if not doing so at other times, because
* it's quite possible for the backup dump to obtain a "torn" (partially
* written) copy of a database page if it reads the page concurrently with
- * our write to the same page. This can be fixed as long as the first
+ * our write to the same page. This can be fixed as long as the first
* write to the page in the WAL sequence is a full-page write. Hence, we
* turn on forcePageWrites and then force a CHECKPOINT, to ensure there
* are no dirty pages in shared memory that might get dumped while the
@@ -7364,7 +7364,7 @@ pg_start_backup(PG_FUNCTION_ARGS)
PG_ENSURE_ERROR_CLEANUP(pg_start_backup_callback, (Datum) 0);
{
/*
- * Force a CHECKPOINT. Aside from being necessary to prevent torn
+ * Force a CHECKPOINT. Aside from being necessary to prevent torn
* page problems, this guarantees that two successive backup runs will
* have different checkpoint positions and hence different history
* file names, even if nothing happened in between.
@@ -7829,7 +7829,7 @@ pg_xlogfile_name(PG_FUNCTION_ARGS)
*
* If we see a backup_label during recovery, we assume that we are recovering
* from a backup dump file, and we therefore roll forward from the checkpoint
- * identified by the label file, NOT what pg_control says. This avoids the
+ * identified by the label file, NOT what pg_control says. This avoids the
* problem that pg_control might have been archived one or more checkpoints
* later than the start of the dump, and so if we rely on it as the start
* point, we will fail to restore a consistent database state.
@@ -8037,7 +8037,7 @@ startupproc_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index 0f2942e0ba..0a124ed762 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -94,7 +94,7 @@ static int strtable_end = -1; /* Tells us last occupied string space */
* pg_type is created.
*
* XXX several of these input/output functions do catalog scans
- * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some
+ * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some
* order dependencies in the catalog creation process.
*/
struct typinfo
@@ -558,7 +558,7 @@ bootstrap_signals(void)
}
/*
- * Begin shutdown of an auxiliary process. This is approximately the equivalent
+ * Begin shutdown of an auxiliary process. This is approximately the equivalent
* of ShutdownPostgres() in postinit.c. We can't run transactions in an
* auxiliary process, so most of the work of AbortTransaction() is not needed,
* but we do need to make sure we've released any LWLocks we are holding.
@@ -867,7 +867,7 @@ cleanup(void)
* and not an OID at all, until the first reference to a type not known in
* TypInfo[]. At that point it will read and cache pg_type in the Typ array,
* and subsequently return a real OID (and set the global pointer Ap to
- * point at the found row in Typ). So caller must check whether Typ is
+ * point at the found row in Typ). So caller must check whether Typ is
* still NULL to determine what the return value is!
* ----------------
*/
@@ -1118,7 +1118,7 @@ CompHash(char *str, int len)
* FindStr
*
* This routine looks for the specified string in the hash
- * table. It returns a pointer to the hash node found,
+ * table. It returns a pointer to the hash node found,
* or NULL if the string is not in the table.
* ----------------
*/
@@ -1213,9 +1213,9 @@ AddStr(char *str, int strlength, int mderef)
*
* At bootstrap time, we define a bunch of indexes on system catalogs.
* We postpone actually building the indexes until just before we're
- * finished with initialization, however. This is because the indexes
+ * finished with initialization, however. This is because the indexes
* themselves have catalog entries, and those have to be included in the
- * indexes on those catalogs. Doing it in two phases is the simplest
+ * indexes on those catalogs. Doing it in two phases is the simplest
* way of making sure the indexes have the right contents at the end.
*/
void
@@ -1228,7 +1228,7 @@ index_register(Oid heap,
/*
* XXX mao 10/31/92 -- don't gc index reldescs, associated info at
- * bootstrap time. we'll declare the indexes now, but want to create them
+ * bootstrap time. we'll declare the indexes now, but want to create them
* later.
*/
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 958172c8ef..b15a8304e6 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -238,7 +238,7 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs,
/*
* Restrict the operation to what we can actually grant or revoke, and
- * issue a warning if appropriate. (For REVOKE this isn't quite what the
+ * issue a warning if appropriate. (For REVOKE this isn't quite what the
* spec says to do: the spec seems to want a warning only if no privilege
* bits actually change in the ACL. In practice that behavior seems much
* too noisy, as well as inconsistent with the GRANT case.)
@@ -457,7 +457,7 @@ ExecuteGrantStmt(GrantStmt *stmt)
/*
* ExecGrantStmt_oids
*
- * "Internal" entrypoint for granting and revoking privileges. This is
+ * "Internal" entrypoint for granting and revoking privileges. This is
* exported for pg_shdepend.c to use in revoking privileges when dropping
* a role.
*/
@@ -836,7 +836,7 @@ ExecGrant_Attribute(InternalGrant *istmt, Oid relOid, const char *relname,
* If the updated ACL is empty, we can set attacl to null, and maybe even
* avoid an update of the pg_attribute row. This is worth testing because
* we'll come through here multiple times for any relation-level REVOKE,
- * even if there were never any column GRANTs. Note we are assuming that
+ * even if there were never any column GRANTs. Note we are assuming that
* the "default" ACL state for columns is empty.
*/
if (ACL_NUM(new_acl) > 0)
@@ -961,7 +961,7 @@ ExecGrant_Relation(InternalGrant *istmt)
{
/*
* Mention the object name because the user needs to know
- * which operations succeeded. This is required because
+ * which operations succeeded. This is required because
* WARNING allows the command to continue.
*/
ereport(WARNING,
@@ -990,7 +990,7 @@ ExecGrant_Relation(InternalGrant *istmt)
/*
* Set up array in which we'll accumulate any column privilege bits
- * that need modification. The array is indexed such that entry [0]
+ * that need modification. The array is indexed such that entry [0]
* corresponds to FirstLowInvalidHeapAttributeNumber.
*/
num_col_privileges = pg_class_tuple->relnatts - FirstLowInvalidHeapAttributeNumber + 1;
@@ -2311,7 +2311,7 @@ pg_aclmask(AclObjectKind objkind, Oid table_oid, AttrNumber attnum, Oid roleid,
*
* Note: this considers only privileges granted specifically on the column.
* It is caller's responsibility to take relation-level privileges into account
- * as appropriate. (For the same reason, we have no special case for
+ * as appropriate. (For the same reason, we have no special case for
* superuser-ness here.)
*/
AclMode
@@ -2429,12 +2429,12 @@ pg_class_aclmask(Oid table_oid, Oid roleid,
/*
* Deny anyone permission to update a system catalog unless
- * pg_authid.rolcatupdate is set. (This is to let superusers protect
+ * pg_authid.rolcatupdate is set. (This is to let superusers protect
* themselves from themselves.) Also allow it if allowSystemTableMods.
*
* As of 7.4 we have some updatable system views; those shouldn't be
* protected in this way. Assume the view rules can take care of
- * themselves. ACL_USAGE is if we ever have system sequences.
+ * themselves. ACL_USAGE is if we ever have system sequences.
*/
if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE | ACL_TRUNCATE | ACL_USAGE)) &&
IsSystemClass(classForm) &&
@@ -3004,7 +3004,7 @@ pg_attribute_aclcheck_all(Oid table_oid, Oid roleid, AclMode mode,
ReleaseSysCache(classTuple);
/*
- * Initialize result in case there are no non-dropped columns. We want to
+ * Initialize result in case there are no non-dropped columns. We want to
* report failure in such cases for either value of 'how'.
*/
result = ACLCHECK_NO_PRIV;
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 42371d5137..a4e9db8d92 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -283,7 +283,7 @@ IsReservedName(const char *name)
*
* Hard-wiring this list is pretty grotty, but we really need it so that
* we can compute the locktag for a relation (and then lock it) without
- * having already read its pg_class entry. If we try to retrieve relisshared
+ * having already read its pg_class entry. If we try to retrieve relisshared
* from pg_class with no pre-existing lock, there is a race condition against
* anyone who is concurrently committing a change to the pg_class entry:
* since we read system catalog entries under SnapshotNow, it's possible
@@ -350,7 +350,7 @@ IsSharedRelation(Oid relationId)
* Since the OID is not immediately inserted into the table, there is a
* race condition here; but a problem could occur only if someone else
* managed to cycle through 2^32 OIDs and generate the same OID before we
- * finish inserting our row. This seems unlikely to be a problem. Note
+ * finish inserting our row. This seems unlikely to be a problem. Note
* that if we had to *commit* the row to end the race condition, the risk
* would be rather higher; therefore we use SnapshotDirty in the test,
* so that we will see uncommitted rows.
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 3e13401ab8..e1d2f9bfc6 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -207,7 +207,7 @@ performDeletion(const ObjectAddress *object,
depRel = heap_open(DependRelationId, RowExclusiveLock);
/*
- * Acquire deletion lock on the target object. (Ideally the caller has
+ * Acquire deletion lock on the target object. (Ideally the caller has
* done this already, but many places are sloppy about it.)
*/
AcquireDeletionLock(object);
@@ -333,7 +333,7 @@ performMultipleDeletions(const ObjectAddresses *objects,
/*
* deleteWhatDependsOn: attempt to drop everything that depends on the
- * specified object, though not the object itself. Behavior is always
+ * specified object, though not the object itself. Behavior is always
* CASCADE.
*
* This is currently used only to clean out the contents of a schema
@@ -355,7 +355,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
depRel = heap_open(DependRelationId, RowExclusiveLock);
/*
- * Acquire deletion lock on the target object. (Ideally the caller has
+ * Acquire deletion lock on the target object. (Ideally the caller has
* done this already, but many places are sloppy about it.)
*/
AcquireDeletionLock(object);
@@ -407,7 +407,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
*
* For every object that depends on the starting object, acquire a deletion
* lock on the object, add it to targetObjects (if not already there),
- * and recursively find objects that depend on it. An object's dependencies
+ * and recursively find objects that depend on it. An object's dependencies
* will be placed into targetObjects before the object itself; this means
* that the finished list's order represents a safe deletion order.
*
@@ -460,7 +460,7 @@ findDependentObjects(const ObjectAddress *object,
* will not break a loop at an internal dependency: if we enter the loop
* at an "owned" object we will switch and start at the "owning" object
* instead. We could probably hack something up to avoid breaking at an
- * auto dependency, too, if we had to. However there are no known cases
+ * auto dependency, too, if we had to. However there are no known cases
* where that would be necessary.
*/
for (stackptr = stack; stackptr; stackptr = stackptr->next)
@@ -546,7 +546,7 @@ findDependentObjects(const ObjectAddress *object,
/*
* This object is part of the internal implementation of
- * another object. We have three cases:
+ * another object. We have three cases:
*
* 1. At the outermost recursion level, disallow the DROP. (We
* just ereport here, rather than proceeding, since no other
@@ -748,7 +748,7 @@ findDependentObjects(const ObjectAddress *object,
systable_endscan(scan);
/*
- * Finally, we can add the target object to targetObjects. Be careful to
+ * Finally, we can add the target object to targetObjects. Be careful to
* include any flags that were passed back down to us from inner recursion
* levels.
*/
@@ -803,7 +803,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects,
/*
* We limit the number of dependencies reported to the client to
* MAX_REPORTED_DEPS, since client software may not deal well with
- * enormous error strings. The server log always gets a full report.
+ * enormous error strings. The server log always gets a full report.
*/
#define MAX_REPORTED_DEPS 100
@@ -834,7 +834,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects,
if (extra->flags & (DEPFLAG_AUTO | DEPFLAG_INTERNAL))
{
/*
- * auto-cascades are reported at DEBUG2, not msglevel. We don't
+ * auto-cascades are reported at DEBUG2, not msglevel. We don't
* try to combine them with the regular message because the
* results are too confusing when client_min_messages and
* log_min_messages are different.
@@ -952,7 +952,7 @@ deleteOneObject(const ObjectAddress *object, Relation depRel)
/*
* First remove any pg_depend records that link from this object to
- * others. (Any records linking to this object should be gone already.)
+ * others. (Any records linking to this object should be gone already.)
*
* When dropping a whole object (subId = 0), remove all pg_depend records
* for its sub-objects too.
@@ -987,7 +987,7 @@ deleteOneObject(const ObjectAddress *object, Relation depRel)
systable_endscan(scan);
/*
- * Delete shared dependency references related to this object. Again, if
+ * Delete shared dependency references related to this object. Again, if
* subId = 0, remove records for sub-objects too.
*/
deleteSharedDependencyRecordsFor(object->classId, object->objectId,
@@ -1218,7 +1218,7 @@ recordDependencyOnExpr(const ObjectAddress *depender,
* recordDependencyOnSingleRelExpr - find expression dependencies
*
* As above, but only one relation is expected to be referenced (with
- * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
+ * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
* range table. An additional frammish is that dependencies on that
* relation (or its component columns) will be marked with 'self_behavior',
* whereas 'behavior' is used for everything else.
@@ -1388,7 +1388,7 @@ find_expr_references_walker(Node *node,
/*
* If it's a regclass or similar literal referring to an existing
- * object, add a reference to that object. (Currently, only the
+ * object, add a reference to that object. (Currently, only the
* regclass and regconfig cases have any likely use, but we may as
* well handle all the OID-alias datatypes consistently.)
*/
@@ -1915,7 +1915,7 @@ object_address_present_add_flags(const ObjectAddress *object,
{
/*
* We get here if we find a need to delete a column after
- * having already decided to drop its whole table. Obviously
+ * having already decided to drop its whole table. Obviously
* we no longer need to drop the column. But don't plaster
* its flags on the table.
*/
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 70a32011f4..961763bf08 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -21,7 +21,7 @@
* the old heap_create_with_catalog, amcreate, and amdestroy.
* those routines will soon call these routines using the function
* manager,
- * just like the poorly named "NewXXX" routines do. The
+ * just like the poorly named "NewXXX" routines do. The
* "New" routines are all going to die soon, once and for all!
* -cim 1/13/91
*
@@ -177,7 +177,7 @@ SystemAttributeDefinition(AttrNumber attno, bool relhasoids)
/*
* If the given name is a system attribute name, return a Form_pg_attribute
- * pointer for a prototype definition. If not, return NULL.
+ * pointer for a prototype definition. If not, return NULL.
*/
Form_pg_attribute
SystemAttributeByName(const char *attname, bool relhasoids)
@@ -513,7 +513,7 @@ CheckAttributeType(const char *attname, Oid atttypid,
* Caller has already opened and locked pg_attribute. new_attribute is the
* attribute to insert (but we ignore its attacl, if indeed it has one).
*
- * indstate is the index state for CatalogIndexInsert. It can be passed as
+ * indstate is the index state for CatalogIndexInsert. It can be passed as
* NULL, in which case we'll fetch the necessary info. (Don't do this when
* inserting multiple attributes, because it's a tad more expensive.)
*
@@ -720,7 +720,7 @@ InsertPgClassTuple(Relation pg_class_desc,
tup = heap_form_tuple(RelationGetDescr(pg_class_desc), values, nulls);
/*
- * The new tuple must have the oid already chosen for the rel. Sure would
+ * The new tuple must have the oid already chosen for the rel. Sure would
* be embarrassing to do this sort of thing in polite company.
*/
HeapTupleSetOid(tup, new_rel_oid);
@@ -969,7 +969,7 @@ heap_create_with_catalog(const char *relname,
/*
* Decide whether to create an array type over the relation's rowtype. We
* do not create any array types for system catalogs (ie, those made
- * during initdb). We create array types for regular relations, views,
+ * during initdb). We create array types for regular relations, views,
* and composite types ... but not, eg, for toast tables or sequences.
*/
if (IsUnderPostmaster && (relkind == RELKIND_RELATION ||
@@ -1121,8 +1121,8 @@ heap_create_with_catalog(const char *relname,
* RelationRemoveInheritance
*
* Formerly, this routine checked for child relations and aborted the
- * deletion if any were found. Now we rely on the dependency mechanism
- * to check for or delete child relations. By the time we get here,
+ * deletion if any were found. Now we rely on the dependency mechanism
+ * to check for or delete child relations. By the time we get here,
* there are no children and we need only remove any pg_inherits rows
* linking this relation to its parent(s).
*/
@@ -1368,7 +1368,7 @@ RemoveAttrDefault(Oid relid, AttrNumber attnum,
/*
* RemoveAttrDefaultById
*
- * Remove a pg_attrdef entry specified by OID. This is the guts of
+ * Remove a pg_attrdef entry specified by OID. This is the guts of
* attribute-default removal. Note it should be called via performDeletion,
* not directly.
*/
@@ -1729,7 +1729,7 @@ StoreConstraints(Relation rel, List *cooked_constraints)
/*
* Deparsing of constraint expressions will fail unless the just-created
- * pg_attribute tuples for this relation are made visible. So, bump the
+ * pg_attribute tuples for this relation are made visible. So, bump the
* command counter. CAUTION: this will cause a relcache entry rebuild.
*/
CommandCounterIncrement();
@@ -1779,7 +1779,7 @@ StoreConstraints(Relation rel, List *cooked_constraints)
* the default and constraint expressions added to the relation.
*
* NB: caller should have opened rel with AccessExclusiveLock, and should
- * hold that lock till end of transaction. Also, we assume the caller has
+ * hold that lock till end of transaction. Also, we assume the caller has
* done a CommandCounterIncrement if necessary to make the relation's catalog
* tuples visible.
*/
@@ -1921,7 +1921,7 @@ AddRelationNewConstraints(Relation rel,
checknames = lappend(checknames, ccname);
/*
- * Check against pre-existing constraints. If we are allowed to
+ * Check against pre-existing constraints. If we are allowed to
* merge with an existing constraint, there's no more to do here.
* (We omit the duplicate constraint from the result, which is
* what ATAddCheckConstraint wants.)
@@ -1937,7 +1937,7 @@ AddRelationNewConstraints(Relation rel,
* column constraint and "tab_check" for a table constraint. We
* no longer have any info about the syntactic positioning of the
* constraint phrase, so we approximate this by seeing whether the
- * expression references more than one column. (If the user
+ * expression references more than one column. (If the user
* played by the rules, the result is the same...)
*
* Note: pull_var_clause() doesn't descend into sublinks, but we
@@ -2317,7 +2317,7 @@ RemoveStatistics(Oid relid, AttrNumber attnum)
* with the heap relation to zero tuples.
*
* The routine will truncate and then reconstruct the indexes on
- * the specified relation. Caller must hold exclusive lock on rel.
+ * the specified relation. Caller must hold exclusive lock on rel.
*/
static void
RelationTruncateIndexes(Relation heapRelation)
@@ -2357,7 +2357,7 @@ RelationTruncateIndexes(Relation heapRelation)
* This routine deletes all data within all the specified relations.
*
* This is not transaction-safe! There is another, transaction-safe
- * implementation in commands/tablecmds.c. We now use this only for
+ * implementation in commands/tablecmds.c. We now use this only for
* ON COMMIT truncation of temporary tables, where it doesn't matter.
*/
void
@@ -2447,7 +2447,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
return;
/*
- * Otherwise, must scan pg_constraint. We make one pass with all the
+ * Otherwise, must scan pg_constraint. We make one pass with all the
* relations considered; if this finds nothing, then all is well.
*/
dependents = heap_truncate_find_FKs(oids);
@@ -2508,7 +2508,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
* behavior to change depending on chance locations of rows in pg_constraint.)
*
* Note: caller should already have appropriate lock on all rels mentioned
- * in relationIds. Since adding or dropping an FK requires exclusive lock
+ * in relationIds. Since adding or dropping an FK requires exclusive lock
* on both rels, this ensures that the answer will be stable.
*/
List *
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 4d7918d5a0..5f228bfd00 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -251,7 +251,7 @@ ConstructTupleDescriptor(Relation heapRelation,
/*
* We do not yet have the correct relation OID for the index, so just
- * set it invalid for now. InitializeAttributeOids() will fix it
+ * set it invalid for now. InitializeAttributeOids() will fix it
* later.
*/
to->attrelid = InvalidOid;
@@ -477,7 +477,7 @@ UpdateIndexRelation(Oid indexoid,
* heapRelationId: OID of table to build index on
* indexRelationName: what it say
* indexRelationId: normally, pass InvalidOid to let this routine
- * generate an OID for the index. During bootstrap this may be
+ * generate an OID for the index. During bootstrap this may be
* nonzero to specify a preselected OID.
* indexInfo: same info executor uses to insert into the index
* accessMethodObjectId: OID of index AM to use
@@ -490,7 +490,7 @@ UpdateIndexRelation(Oid indexoid,
* allow_system_table_mods: allow table to be a system catalog
* skip_build: true to skip the index_build() step for the moment; caller
* must do it later (typically via reindex_index())
- * concurrent: if true, do not lock the table against writers. The index
+ * concurrent: if true, do not lock the table against writers. The index
* will be marked "invalid" and the caller must take additional steps
* to fix it up.
*
@@ -990,7 +990,7 @@ index_drop(Oid indexId)
*
* IndexInfo stores the information about the index that's needed by
* FormIndexDatum, which is used for both index_build() and later insertion
- * of individual index tuples. Normally we build an IndexInfo for an index
+ * of individual index tuples. Normally we build an IndexInfo for an index
* just once per command, and then use it for (potentially) many tuples.
* ----------------
*/
@@ -1045,7 +1045,7 @@ BuildIndexInfo(Relation index)
* context must point to the heap tuple passed in.
*
* Notice we don't actually call index_form_tuple() here; we just prepare
- * its input arrays values[] and isnull[]. This is because the index AM
+ * its input arrays values[] and isnull[]. This is because the index AM
* may wish to alter the data before storage.
* ----------------
*/
@@ -1111,7 +1111,7 @@ FormIndexDatum(IndexInfo *indexInfo,
* index_update_stats --- update pg_class entry after CREATE INDEX or REINDEX
*
* This routine updates the pg_class row of either an index or its parent
- * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
+ * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
* to ensure we can do all the necessary work in just one update.
*
* hasindex: set relhasindex to this value
@@ -1124,7 +1124,7 @@ FormIndexDatum(IndexInfo *indexInfo,
*
* NOTE: an important side-effect of this operation is that an SI invalidation
* message is sent out to all backends --- including me --- causing relcache
- * entries to be flushed or updated with the new data. This must happen even
+ * entries to be flushed or updated with the new data. This must happen even
* if we find that no change is needed in the pg_class row. When updating
* a heap entry, this ensures that other backends find out about the new
* index. When updating an index, it's important because some index AMs
@@ -1162,13 +1162,13 @@ index_update_stats(Relation rel, bool hasindex, bool isprimary,
* 4. Even with just a single CREATE INDEX, there's a risk factor because
* someone else might be trying to open the rel while we commit, and this
* creates a race condition as to whether he will see both or neither of
- * the pg_class row versions as valid. Again, a non-transactional update
+ * the pg_class row versions as valid. Again, a non-transactional update
* avoids the risk. It is indeterminate which state of the row the other
* process will see, but it doesn't matter (if he's only taking
* AccessShareLock, then it's not critical that he see relhasindex true).
*
* It is safe to use a non-transactional update even though our
- * transaction could still fail before committing. Setting relhasindex
+ * transaction could still fail before committing. Setting relhasindex
* true is safe even if there are no indexes (VACUUM will eventually fix
* it), and of course the relpages and reltuples counts are correct (or at
* least more so than the old values) regardless.
@@ -1177,7 +1177,7 @@ index_update_stats(Relation rel, bool hasindex, bool isprimary,
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
/*
- * Make a copy of the tuple to update. Normally we use the syscache, but
+ * Make a copy of the tuple to update. Normally we use the syscache, but
* we can't rely on that during bootstrap or while reindexing pg_class
* itself.
*/
@@ -1298,7 +1298,7 @@ setNewRelfilenode(Relation relation, TransactionId freezeXid)
NULL);
/*
- * Find the pg_class tuple for the given relation. This is not used
+ * Find the pg_class tuple for the given relation. This is not used
* during bootstrap, so okay to use heap_update always.
*/
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
@@ -1351,7 +1351,7 @@ setNewRelfilenode(Relation relation, TransactionId freezeXid)
* index_build - invoke access-method-specific index build procedure
*
* On entry, the index's catalog entries are valid, and its physical disk
- * file has been created but is empty. We call the AM-specific build
+ * file has been created but is empty. We call the AM-specific build
* procedure to fill in the index contents. We then update the pg_class
* entries of the index and heap relation as needed, using statistics
* returned by ambuild as well as data passed by the caller.
@@ -1433,7 +1433,7 @@ index_build(Relation heapRelation,
* Therefore, this code path can only be taken during non-concurrent
* CREATE INDEX. Thus the fact that heap_update will set the pg_index
* tuple's xmin doesn't matter, because that tuple was created in the
- * current transaction anyway. That also means we don't need to worry
+ * current transaction anyway. That also means we don't need to worry
* about any concurrent readers of the tuple; no other transaction can see
* it yet.
*/
@@ -1497,8 +1497,8 @@ index_build(Relation heapRelation,
* build procedure does whatever cleanup is needed; in particular, it should
* close the heap and index relations.
*
- * The total count of heap tuples is returned. This is for updating pg_class
- * statistics. (It's annoying not to be able to do that here, but we can't
+ * The total count of heap tuples is returned. This is for updating pg_class
+ * statistics. (It's annoying not to be able to do that here, but we can't
* do it until after the relation is closed.) Note that the index AM itself
* must keep track of the number of index tuples; we don't do so here because
* the AM might reject some of the tuples for its own reasons, such as being
@@ -1539,7 +1539,7 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* Need an EState for evaluation of index expressions and partial-index
- * predicates. Also a slot to hold the current tuple.
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -1558,7 +1558,7 @@ IndexBuildHeapScan(Relation heapRelation,
* SnapshotAny because we must retrieve all tuples and do our own time
* qual checks (because we have to index RECENTLY_DEAD tuples). In a
* concurrent build, we take a regular MVCC snapshot and index whatever's
- * live according to that. During bootstrap we just use SnapshotNow.
+ * live according to that. During bootstrap we just use SnapshotNow.
*/
if (IsBootstrapProcessingMode())
{
@@ -1668,7 +1668,7 @@ IndexBuildHeapScan(Relation heapRelation,
* building it, and may need to see such tuples.)
*
* However, if it was HOT-updated then we must only index
- * the live tuple at the end of the HOT-chain. Since this
+ * the live tuple at the end of the HOT-chain. Since this
* breaks semantics for pre-existing snapshots, mark the
* index as unusable for them.
*/
@@ -1692,7 +1692,7 @@ IndexBuildHeapScan(Relation heapRelation,
* followed by CREATE INDEX within a transaction.) An
* exception occurs when reindexing a system catalog,
* because we often release lock on system catalogs before
- * committing. In that case we wait for the inserting
+ * committing. In that case we wait for the inserting
* transaction to finish and check again. (We could do
* that on user tables too, but since the case is not
* expected it seems better to throw an error.)
@@ -1824,7 +1824,7 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* You'd think we should go ahead and build the index tuple here, but
- * some index AMs want to do further processing on the data first. So
+ * some index AMs want to do further processing on the data first. So
* pass the values[] and isnull[] arrays, instead.
*/
@@ -1881,11 +1881,11 @@ IndexBuildHeapScan(Relation heapRelation,
* We do a concurrent index build by first inserting the catalog entry for the
* index via index_create(), marking it not indisready and not indisvalid.
* Then we commit our transaction and start a new one, then we wait for all
- * transactions that could have been modifying the table to terminate. Now
+ * transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* honor its constraints on HOT updates; so while existing HOT-chains might
* be broken with respect to the index, no currently live tuple will have an
- * incompatible HOT update done to it. We now build the index normally via
+ * incompatible HOT update done to it. We now build the index normally via
* index_build(), while holding a weak lock that allows concurrent
* insert/update/delete. Also, we index only tuples that are valid
* as of the start of the scan (see IndexBuildHeapScan), whereas a normal
@@ -1899,13 +1899,13 @@ IndexBuildHeapScan(Relation heapRelation,
*
* Next, we mark the index "indisready" (but still not "indisvalid") and
* commit the second transaction and start a third. Again we wait for all
- * transactions that could have been modifying the table to terminate. Now
+ * transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* insert their new tuples into it. We then take a new reference snapshot
* which is passed to validate_index(). Any tuples that are valid according
* to this snap, but are not in the index, must be added to the index.
* (Any tuples committed live after the snap will be inserted into the
- * index by their originating transaction. Any tuples committed dead before
+ * index by their originating transaction. Any tuples committed dead before
* the snap need not be indexed, because we will wait out all transactions
* that might care about them before we mark the index valid.)
*
@@ -1914,7 +1914,7 @@ IndexBuildHeapScan(Relation heapRelation,
* ever say "delete it". (This should be faster than a plain indexscan;
* also, not all index AMs support full-index indexscan.) Then we sort the
* TIDs, and finally scan the table doing a "merge join" against the TID list
- * to see which tuples are missing from the index. Thus we will ensure that
+ * to see which tuples are missing from the index. Thus we will ensure that
* all tuples valid according to the reference snapshot are in the index.
*
* Building a unique index this way is tricky: we might try to insert a
@@ -1930,7 +1930,7 @@ IndexBuildHeapScan(Relation heapRelation,
* were alive at the time of the reference snapshot are gone; this is
* necessary to be sure there are none left with a serializable snapshot
* older than the reference (and hence possibly able to see tuples we did
- * not index). Then we mark the index "indisvalid" and commit. Subsequent
+ * not index). Then we mark the index "indisvalid" and commit. Subsequent
* transactions will be able to use it for queries.
*
* Doing two full table scans is a brute-force strategy. We could try to be
@@ -1956,7 +1956,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
indexRelation = index_open(indexId, RowExclusiveLock);
/*
- * Fetch info needed for index_insert. (You might think this should be
+ * Fetch info needed for index_insert. (You might think this should be
* passed in from DefineIndex, but its copy is long gone due to having
* been built in a previous transaction.)
*/
@@ -2074,7 +2074,7 @@ validate_index_heapscan(Relation heapRelation,
/*
* Need an EState for evaluation of index expressions and partial-index
- * predicates. Also a slot to hold the current tuple.
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -2123,7 +2123,7 @@ validate_index_heapscan(Relation heapRelation,
* visit the live tuples in order by their offsets, but the root
* offsets that we need to compare against the index contents might be
* ordered differently. So we might have to "look back" within the
- * tuplesort output, but only within the current page. We handle that
+ * tuplesort output, but only within the current page. We handle that
* by keeping a bool array in_index[] showing all the
* already-passed-over tuplesort output TIDs of the current page. We
* clear that array here, when advancing onto a new heap page.
@@ -2204,7 +2204,7 @@ validate_index_heapscan(Relation heapRelation,
/*
* For the current heap tuple, extract all the attributes we use
- * in this index, and note which are null. This also performs
+ * in this index, and note which are null. This also performs
* evaluation of any expressions needed.
*/
FormIndexDatum(indexInfo,
@@ -2226,7 +2226,7 @@ validate_index_heapscan(Relation heapRelation,
* for a uniqueness check on the whole HOT-chain. That is, the
* tuple we have here could be dead because it was already
* HOT-updated, and if so the updating transaction will not have
- * thought it should insert index entries. The index AM will
+ * thought it should insert index entries. The index AM will
* check the whole HOT-chain and correctly detect a conflict if
* there is one.
*/
@@ -2316,7 +2316,7 @@ index_set_state_flags(Oid indexId, IndexStateFlagsAction action)
/*
* IndexGetRelation: given an index's relation OID, get the OID of the
- * relation it is an index on. Uses the system cache.
+ * relation it is an index on. Uses the system cache.
*/
Oid
IndexGetRelation(Oid indexId)
@@ -2355,7 +2355,7 @@ reindex_index(Oid indexId)
bool index_bad;
/*
- * Open and lock the parent heap relation. ShareLock is sufficient since
+ * Open and lock the parent heap relation. ShareLock is sufficient since
* we only need to be sure no schema or data changes are going on.
*/
heapId = IndexGetRelation(indexId);
@@ -2388,7 +2388,7 @@ reindex_index(Oid indexId)
* it the normal transaction-safe way.
*
* Since inplace processing isn't crash-safe, we only allow it in a
- * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE cases,
+ * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE cases,
* the caller should have detected this.)
*/
inplace = iRel->rd_rel->relisshared;
@@ -2517,7 +2517,7 @@ reindex_relation(Oid relid, bool toast_too)
ListCell *indexId;
/*
- * Open and lock the relation. ShareLock is sufficient since we only need
+ * Open and lock the relation. ShareLock is sufficient since we only need
* to prevent schema and data changes in it.
*/
rel = heap_open(relid, ShareLock);
@@ -2535,7 +2535,7 @@ reindex_relation(Oid relid, bool toast_too)
* reindex_index will attempt to update the pg_class rows for the relation
* and index. If we are processing pg_class itself, we want to make sure
* that the updates do not try to insert index entries into indexes we
- * have not processed yet. (When we are trying to recover from corrupted
+ * have not processed yet. (When we are trying to recover from corrupted
* indexes, that could easily cause a crash.) We can accomplish this
* because CatalogUpdateIndexes will use the relcache's index list to know
* which indexes to update. We just force the index list to be only the
@@ -2544,7 +2544,7 @@ reindex_relation(Oid relid, bool toast_too)
* It is okay to not insert entries into the indexes we have not processed
* yet because all of this is transaction-safe. If we fail partway
* through, the updated rows are dead and it doesn't matter whether they
- * have index entries. Also, a new pg_class index will be created with an
+ * have index entries. Also, a new pg_class index will be created with an
* entry for its own pg_class row because we do setNewRelfilenode() before
* we do index_build().
*
diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c
index b707972231..00d5e0ca0b 100644
--- a/src/backend/catalog/indexing.c
+++ b/src/backend/catalog/indexing.c
@@ -144,7 +144,7 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple)
* CatalogUpdateIndexes - do all the indexing work for a new catalog tuple
*
* This is a convenience routine for the common case where we only need
- * to insert or update a single tuple in a system catalog. Avoid using it for
+ * to insert or update a single tuple in a system catalog. Avoid using it for
* multiple tuples, since opening the indexes and building the index info
* structures is moderately expensive.
*/
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 2b6d95807d..01d2e3fce5 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -61,10 +61,10 @@
* when we are obeying an override search path spec that says not to use the
* temp namespace, or the temp namespace is included in the explicit list.)
*
- * 2. The system catalog namespace is always searched. If the system
+ * 2. The system catalog namespace is always searched. If the system
* namespace is present in the explicit path then it will be searched in
* the specified order; otherwise it will be searched after TEMP tables and
- * *before* the explicit list. (It might seem that the system namespace
+ * *before* the explicit list. (It might seem that the system namespace
* should be implicitly last, but this behavior appears to be required by
* SQL99. Also, this provides a way to search the system namespace first
* without thereby making it the default creation target namespace.)
@@ -82,7 +82,7 @@
* to refer to the current backend's temp namespace. This is usually also
* ignorable if the temp namespace hasn't been set up, but there's a special
* case: if "pg_temp" appears first then it should be the default creation
- * target. We kluge this case a little bit so that the temp namespace isn't
+ * target. We kluge this case a little bit so that the temp namespace isn't
* set up until the first attempt to create something in it. (The reason for
* klugery is that we can't create the temp namespace outside a transaction,
* but initial GUC processing of search_path happens outside a transaction.)
@@ -93,7 +93,7 @@
* In bootstrap mode, the search path is set equal to "pg_catalog", so that
* the system namespace is the only one searched or inserted into.
* initdb is also careful to set search_path to "pg_catalog" for its
- * post-bootstrap standalone backend runs. Otherwise the default search
+ * post-bootstrap standalone backend runs. Otherwise the default search
* path is determined by GUC. The factory default path contains the PUBLIC
* namespace (if it exists), preceded by the user's personal namespace
* (if one exists).
@@ -157,13 +157,13 @@ static List *overrideStack = NIL;
/*
* myTempNamespace is InvalidOid until and unless a TEMP namespace is set up
* in a particular backend session (this happens when a CREATE TEMP TABLE
- * command is first executed). Thereafter it's the OID of the temp namespace.
+ * command is first executed). Thereafter it's the OID of the temp namespace.
*
* myTempToastNamespace is the OID of the namespace for my temp tables' toast
- * tables. It is set when myTempNamespace is, and is InvalidOid before that.
+ * tables. It is set when myTempNamespace is, and is InvalidOid before that.
*
* myTempNamespaceSubID shows whether we've created the TEMP namespace in the
- * current subtransaction. The flag propagates up the subtransaction tree,
+ * current subtransaction. The flag propagates up the subtransaction tree,
* so the main transaction will correctly recognize the flag if all
* intermediate subtransactions commit. When it is InvalidSubTransactionId,
* we either haven't made the TEMP namespace yet, or have successfully
@@ -573,7 +573,7 @@ TypeIsVisible(Oid typid)
* and the returned nvargs will always be zero.
*
* If expand_defaults is true, functions that could match after insertion of
- * default argument values will also be retrieved. In this case the returned
+ * default argument values will also be retrieved. In this case the returned
* structs could have nargs > passed-in nargs, and ndargs is set to the number
* of additional args (which can be retrieved from the function's
* proargdefaults entry).
@@ -791,7 +791,7 @@ FuncnameGetCandidates(List *names, int nargs,
if (prevResult)
{
/*
- * We have a match with a previous result. Decide which one
+ * We have a match with a previous result. Decide which one
* to keep, or mark it ambiguous if we can't decide. The
* logic here is preference > 0 means prefer the old result,
* preference < 0 means prefer the new, preference = 0 means
@@ -1059,7 +1059,7 @@ OpernameGetOprid(List *names, Oid oprleft, Oid oprright)
* identical entries in later namespaces.
*
* The returned items always have two args[] entries --- one or the other
- * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too.
+ * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too.
*/
FuncCandidateList
OpernameGetCandidates(List *names, char oprkind)
@@ -1966,7 +1966,7 @@ TSConfigGetCfgid(List *names, bool failOK)
/*
* TSConfigIsVisible
* Determine whether a text search configuration (identified by OID)
- * is visible in the current search path. Visible means "would be found
+ * is visible in the current search path. Visible means "would be found
* by searching for the unqualified text search configuration name".
*/
bool
@@ -2431,7 +2431,7 @@ GetTempNamespaceBackendId(Oid namespaceId)
/*
* GetTempToastNamespace - get the OID of my temporary-toast-table namespace,
- * which must already be assigned. (This is only used when creating a toast
+ * which must already be assigned. (This is only used when creating a toast
* table for a temp table, so we must have already done InitTempTableNamespace)
*/
Oid
@@ -2523,7 +2523,7 @@ PushOverrideSearchPath(OverrideSearchPath *newpath)
firstNS = linitial_oid(oidlist);
/*
- * Add any implicitly-searched namespaces to the list. Note these go on
+ * Add any implicitly-searched namespaces to the list. Note these go on
* the front, not the back; also notice that we do not check USAGE
* permissions for these.
*/
@@ -2764,7 +2764,7 @@ recomputeNamespacePath(void)
}
/*
- * Remember the first member of the explicit list. (Note: this is
+ * Remember the first member of the explicit list. (Note: this is
* nominally wrong if temp_missing, but we need it anyway to distinguish
* explicit from implicit mention of pg_catalog.)
*/
@@ -2774,7 +2774,7 @@ recomputeNamespacePath(void)
firstNS = linitial_oid(oidlist);
/*
- * Add any implicitly-searched namespaces to the list. Note these go on
+ * Add any implicitly-searched namespaces to the list. Note these go on
* the front, not the back; also notice that we do not check USAGE
* permissions for these.
*/
@@ -2829,7 +2829,7 @@ InitTempTableNamespace(void)
/*
* First, do permission check to see if we are authorized to make temp
- * tables. We use a nonstandard error message here since "databasename:
+ * tables. We use a nonstandard error message here since "databasename:
* permission denied" might be a tad cryptic.
*
* Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask;
@@ -3096,7 +3096,7 @@ assign_search_path(const char *newval, bool doit, GucSource source)
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the individual names. Must accept the list on faith.
+ * cannot verify the individual names. Must accept the list on faith.
*/
if (source >= PGC_S_INTERACTIVE && IsTransactionState())
{
@@ -3107,7 +3107,7 @@ assign_search_path(const char *newval, bool doit, GucSource source)
* for USAGE rights, either; should we?
*
* When source == PGC_S_TEST, we are checking the argument of an ALTER
- * DATABASE SET or ALTER USER SET command. It could be that the
+ * DATABASE SET or ALTER USER SET command. It could be that the
* intended use of the search path is for some other database, so we
* should not error out if it mentions schemas not present in the
* current database. We reduce the message to NOTICE instead.
@@ -3216,7 +3216,7 @@ fetch_search_path(bool includeImplicit)
/*
* If the temp namespace should be first, force it to exist. This is so
* that callers can trust the result to reflect the actual default
- * creation namespace. It's a bit bogus to do this here, since
+ * creation namespace. It's a bit bogus to do this here, since
* current_schema() is supposedly a stable function without side-effects,
* but the alternatives seem worse.
*/
@@ -3238,7 +3238,7 @@ fetch_search_path(bool includeImplicit)
/*
* Fetch the active search path into a caller-allocated array of OIDs.
- * Returns the number of path entries. (If this is more than sarray_len,
+ * Returns the number of path entries. (If this is more than sarray_len,
* then the data didn't fit and is not all stored.)
*
* The returned list always includes the implicitly-prepended namespaces,
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 73b9519b31..efc14cda3e 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -36,7 +36,7 @@
* Create a constraint table entry.
*
* Subsidiary records (such as triggers or indexes to implement the
- * constraint) are *not* created here. But we do make dependency links
+ * constraint) are *not* created here. But we do make dependency links
* from the constraint to the things it depends on.
*/
Oid
diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c
index 9867cc2c67..fde2a8c24c 100644
--- a/src/backend/catalog/pg_depend.c
+++ b/src/backend/catalog/pg_depend.c
@@ -47,7 +47,7 @@ recordDependencyOn(const ObjectAddress *depender,
/*
* Record multiple dependencies (of the same kind) for a single dependent
- * object. This has a little less overhead than recording each separately.
+ * object. This has a little less overhead than recording each separately.
*/
void
recordMultipleDependencies(const ObjectAddress *depender,
@@ -324,7 +324,7 @@ isObjectPinned(const ObjectAddress *object, Relation rel)
* Detect whether a sequence is marked as "owned" by a column
*
* An ownership marker is an AUTO dependency from the sequence to the
- * column. If we find one, store the identity of the owning column
+ * column. If we find one, store the identity of the owning column
* into *tableId and *colId and return TRUE; else return FALSE.
*
* Note: if there's more than one such pg_depend entry then you get
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index 90c4acc9b2..8f22e46a2c 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -311,7 +311,7 @@ OperatorShellMake(const char *operatorName,
* specify operators that do not exist. For example, if operator
* "op" is being defined, the negator operator "negop" and the
* commutator "commop" can also be defined without specifying
- * any information other than their names. Since in order to
+ * any information other than their names. Since in order to
* add "op" to the PG_OPERATOR catalog, all the Oid's for these
* operators must be placed in the fields of "op", a forward
* declaration is done on the commutator and negator operators.
@@ -429,7 +429,7 @@ OperatorCreate(const char *operatorName,
operatorName);
/*
- * Set up the other operators. If they do not currently exist, create
+ * Set up the other operators. If they do not currently exist, create
* shells in order to get ObjectId's.
*/
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index e2e11163e9..b810969980 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -152,7 +152,7 @@ ProcedureCreate(const char *procedureName,
/*
* Do not allow polymorphic return type unless at least one input argument
- * is polymorphic. Also, do not allow return type INTERNAL unless at
+ * is polymorphic. Also, do not allow return type INTERNAL unless at
* least one input argument is INTERNAL.
*/
for (i = 0; i < parameterCount; i++)
@@ -178,7 +178,7 @@ ProcedureCreate(const char *procedureName,
/*
* We don't bother to distinguish input and output params here, so
* if there is, say, just an input INTERNAL param then we will
- * still set internalOutParam. This is OK since we don't really
+ * still set internalOutParam. This is OK since we don't really
* care.
*/
switch (allParams[i])
@@ -575,7 +575,7 @@ ProcedureCreate(const char *procedureName,
/*
* Set per-function configuration parameters so that the validation is
- * done with the environment the function expects. However, if
+ * done with the environment the function expects. However, if
* check_function_bodies is off, we don't do this, because that would
* create dump ordering hazards that pg_dump doesn't know how to deal
* with. (For example, a SET clause might refer to a not-yet-created
@@ -836,7 +836,7 @@ sql_function_parse_error_callback(void *arg)
/*
* Adjust a syntax error occurring inside the function body of a CREATE
* FUNCTION command. This can be used by any function validator, not only
- * for SQL-language functions. It is assumed that the syntax error position
+ * for SQL-language functions. It is assumed that the syntax error position
* is initially relative to the function body string (as passed in). If
* possible, we adjust the position to reference the original CREATE command;
* if we can't manage that, we set up an "internal query" syntax error instead.
@@ -967,7 +967,7 @@ match_prosrc_to_literal(const char *prosrc, const char *literal,
/*
* This implementation handles backslashes and doubled quotes in the
- * string literal. It does not handle the SQL syntax for literals
+ * string literal. It does not handle the SQL syntax for literals
* continued across line boundaries.
*
* We do the comparison a character at a time, not a byte at a time, so
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index 98248ac3ee..e2aef05c0f 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -155,7 +155,7 @@ recordDependencyOnOwner(Oid classId, Oid objectId, Oid owner)
* shdepChangeDep
*
* Update shared dependency records to account for an updated referenced
- * object. This is an internal workhorse for operations such as changing
+ * object. This is an internal workhorse for operations such as changing
* an object's owner.
*
* There must be no more than one existing entry for the given dependent
@@ -304,7 +304,7 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId)
* was previously granted some rights to the object.
*
* This step is analogous to aclnewowner's removal of duplicate entries
- * in the ACL. We have to do it to handle this scenario:
+ * in the ACL. We have to do it to handle this scenario:
* A grants some rights on an object to B
* ALTER OWNER changes the object's owner to B
* ALTER OWNER changes the object's owner to C
@@ -329,7 +329,7 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId)
* Helper for updateAclDependencies.
*
* Takes two Oid arrays and returns elements from the first not found in the
- * second. We assume both arrays are sorted and de-duped, and that the
+ * second. We assume both arrays are sorted and de-duped, and that the
* second array does not contain any values not found in the first.
*
* NOTE: Both input arrays are pfreed.
@@ -513,7 +513,7 @@ checkSharedDependencies(Oid classId, Oid objectId,
/*
* We limit the number of dependencies reported to the client to
* MAX_REPORTED_DEPS, since client software may not deal well with
- * enormous error strings. The server log always gets a full report.
+ * enormous error strings. The server log always gets a full report.
*/
#define MAX_REPORTED_DEPS 100
@@ -594,7 +594,7 @@ checkSharedDependencies(Oid classId, Oid objectId,
bool stored = false;
/*
- * XXX this info is kept on a simple List. Maybe it's not good
+ * XXX this info is kept on a simple List. Maybe it's not good
* for performance, but using a hash table seems needlessly
* complex. The expected number of databases is not high anyway,
* I suppose.
@@ -831,7 +831,7 @@ shdepAddDependency(Relation sdepRel,
/*
* Make sure the object doesn't go away while we record the dependency on
- * it. DROP routines should lock the object exclusively before they check
+ * it. DROP routines should lock the object exclusively before they check
* shared dependencies.
*/
shdepLockAndCheckObject(refclassId, refobjId);
@@ -984,7 +984,7 @@ shdepLockAndCheckObject(Oid classId, Oid objectId)
/*
* Currently, this routine need not support any other shared
- * object types besides roles. If we wanted to record explicit
+ * object types besides roles. If we wanted to record explicit
* dependencies on databases or tablespaces, we'd need code along
* these lines:
*/
@@ -1115,7 +1115,7 @@ isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel)
/*
* shdepDropOwned
*
- * Drop the objects owned by any one of the given RoleIds. If a role has
+ * Drop the objects owned by any one of the given RoleIds. If a role has
* access to an object, the grant will be removed as well (but the object
* will not, of course).
*
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index bc797fd408..beac6a679d 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -370,7 +370,7 @@ TypeCreate(Oid newTypeOid,
if (HeapTupleIsValid(tup))
{
/*
- * check that the type is not already defined. It may exist as a
+ * check that the type is not already defined. It may exist as a
* shell type, however.
*/
if (((Form_pg_type) GETSTRUCT(tup))->typisdefined)
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index 2e279c7ed8..699ac4d17d 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -34,7 +34,7 @@
* that have been created or deleted in the current transaction. When
* a relation is created, we create the physical file immediately, but
* remember it so that we can delete the file again if the current
- * transaction is aborted. Conversely, a deletion request is NOT
+ * transaction is aborted. Conversely, a deletion request is NOT
* executed immediately, but is just entered in the list. When and if
* the transaction commits, we can delete the physical file.
*
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index b284cd23aa..415a362c0e 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -312,7 +312,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
}
/*
- * Check to see whether the table needs a TOAST table. It does only if
+ * Check to see whether the table needs a TOAST table. It does only if
* (1) there are any toastable attributes, and (2) the maximum length
* of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to
* create a toast table for something like "f1 varchar(20)".)
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index 461f81005c..11170fcdd7 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -173,7 +173,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
*
* transtype can't be a pseudo-type, since we need to be able to store
* values of the transtype. However, we can allow polymorphic transtype
- * in some cases (AggregateCreate will check). Also, we allow "internal"
+ * in some cases (AggregateCreate will check). Also, we allow "internal"
* for functions that want to pass pointers to private data structures;
* but allow that only to superusers, since you could crash the system (or
* worse) by connecting up incompatible internal-using functions in an
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index ccd5c4a702..1e8bb98b14 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -34,7 +34,7 @@
/*
- * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
+ * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
* type, the function appropriate to that type is executed.
*/
void
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 2bb1fd791f..813a7be0b0 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -142,7 +142,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
anl_context = CurrentMemoryContext;
/*
- * Check for user-requested abort. Note we want this to be inside a
+ * Check for user-requested abort. Note we want this to be inside a
* transaction, so xact.c doesn't issue useless WARNING.
*/
CHECK_FOR_INTERRUPTS();
@@ -293,7 +293,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
/*
* Open all indexes of the relation, and see if there are any analyzable
- * columns in the indexes. We do not analyze index columns if there was
+ * columns in the indexes. We do not analyze index columns if there was
* an explicit column list in the ANALYZE command, however.
*/
vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel);
@@ -355,7 +355,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
/*
* Determine how many rows we need to sample, using the worst case from
- * all analyzable columns. We use a lower bound of 100 rows to avoid
+ * all analyzable columns. We use a lower bound of 100 rows to avoid
* possible overflow in Vitter's algorithm. (Note: that will also be
* the target in the corner case where there are no analyzable columns.)
*/
@@ -384,7 +384,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
&totalrows, &totaldeadrows);
/*
- * Compute the statistics. Temporary results during the calculations for
+ * Compute the statistics. Temporary results during the calculations for
* each column are stored in a child context. The calc routines are
* responsible to make sure that whatever they store into the VacAttrStats
* structure is allocated in anl_context.
@@ -425,7 +425,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
/*
* Emit the completed stats rows into pg_statistic, replacing any
- * previous statistics for the target columns. (If there are stats in
+ * previous statistics for the target columns. (If there are stats in
* pg_statistic for columns we didn't process, we leave them alone.)
*/
update_attstats(relid, attr_cnt, vacattrstats);
@@ -719,7 +719,7 @@ examine_attribute(Relation onerel, int attnum)
return NULL;
/*
- * Create the VacAttrStats struct. Note that we only have a copy of the
+ * Create the VacAttrStats struct. Note that we only have a copy of the
* fixed fields of the pg_attribute tuple.
*/
stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats));
@@ -748,7 +748,7 @@ examine_attribute(Relation onerel, int attnum)
}
/*
- * Call the type-specific typanalyze function. If none is specified, use
+ * Call the type-specific typanalyze function. If none is specified, use
* std_typanalyze().
*/
if (OidIsValid(stats->attrtype->typanalyze))
@@ -824,7 +824,7 @@ BlockSampler_Next(BlockSampler bs)
* If we are to skip, we should advance t (hence decrease K), and
* repeat the same probabilistic test for the next block. The naive
* implementation thus requires a random_fract() call for each block
- * number. But we can reduce this to one random_fract() call per
+ * number. But we can reduce this to one random_fract() call per
* selected block, by noting that each time the while-test succeeds,
* we can reinterpret V as a uniform random number in the range 0 to p.
* Therefore, instead of choosing a new V, we just adjust p to be
@@ -950,7 +950,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
/*
* We ignore unused and redirect line pointers. DEAD line
* pointers should be counted as dead, because we need vacuum to
- * run to get rid of them. Note that this rule agrees with the
+ * run to get rid of them. Note that this rule agrees with the
* way that heap_page_prune() counts things.
*/
if (!ItemIdIsNormal(itemid))
@@ -995,7 +995,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
* is the safer option.
*
* A special case is that the inserting transaction might
- * be our own. In this case we should count and sample
+ * be our own. In this case we should count and sample
* the row, to accommodate users who load a table and
* analyze it in one transaction. (pgstat_report_analyze
* has to adjust the numbers we send to the stats
@@ -1037,7 +1037,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
/*
* The first targrows sample rows are simply copied into the
* reservoir. Then we start replacing tuples in the sample
- * until we reach the end of the relation. This algorithm is
+ * until we reach the end of the relation. This algorithm is
* from Jeff Vitter's paper (see full citation below). It
* works by repeatedly computing the number of tuples to skip
* before selecting a tuple, which replaces a randomly chosen
@@ -1265,7 +1265,7 @@ compare_rows(const void *a, const void *b)
* Statistics are stored in several places: the pg_class row for the
* relation has stats about the whole relation, and there is a
* pg_statistic row for each (non-system) attribute that has ever
- * been analyzed. The pg_class values are updated by VACUUM, not here.
+ * been analyzed. The pg_class values are updated by VACUUM, not here.
*
* pg_statistic rows are just added or updated normally. This means
* that pg_statistic will probably contain some deleted rows at the
@@ -1665,7 +1665,7 @@ compute_minimal_stats(VacAttrStatsP stats,
/*
* If the value is toasted, we want to detoast it just once to
* avoid repeated detoastings and resultant excess memory usage
- * during the comparisons. Also, check to see if the value is
+ * during the comparisons. Also, check to see if the value is
* excessively wide, and if so don't detoast at all --- just
* ignore the value.
*/
@@ -1782,7 +1782,7 @@ compute_minimal_stats(VacAttrStatsP stats,
* We assume (not very reliably!) that all the multiply-occurring
* values are reflected in the final track[] list, and the other
* nonnull values all appeared but once. (XXX this usually
- * results in a drastic overestimate of ndistinct. Can we do
+ * results in a drastic overestimate of ndistinct. Can we do
* any better?)
*----------
*/
@@ -1819,7 +1819,7 @@ compute_minimal_stats(VacAttrStatsP stats,
* Decide how many values are worth storing as most-common values. If
* we are able to generate a complete MCV list (all the values in the
* sample will fit, and we think these are all the ones in the table),
- * then do so. Otherwise, store only those values that are
+ * then do so. Otherwise, store only those values that are
* significantly more common than the (estimated) average. We set the
* threshold rather arbitrarily at 25% more than average, with at
* least 2 instances in the sample.
@@ -1984,7 +1984,7 @@ compute_scalar_stats(VacAttrStatsP stats,
/*
* If the value is toasted, we want to detoast it just once to
* avoid repeated detoastings and resultant excess memory usage
- * during the comparisons. Also, check to see if the value is
+ * during the comparisons. Also, check to see if the value is
* excessively wide, and if so don't detoast at all --- just
* ignore the value.
*/
@@ -2030,7 +2030,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* accumulate ordering-correlation statistics.
*
* To determine which are most common, we first have to count the
- * number of duplicates of each value. The duplicates are adjacent in
+ * number of duplicates of each value. The duplicates are adjacent in
* the sorted list, so a brute-force approach is to compare successive
* datum values until we find two that are not equal. However, that
* requires N-1 invocations of the datum comparison routine, which are
@@ -2039,7 +2039,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* that are adjacent in the sorted order; otherwise it could not know
* that it's ordered the pair correctly.) We exploit this by having
* compare_scalars remember the highest tupno index that each
- * ScalarItem has been found equal to. At the end of the sort, a
+ * ScalarItem has been found equal to. At the end of the sort, a
* ScalarItem's tupnoLink will still point to itself if and only if it
* is the last item of its group of duplicates (since the group will
* be ordered by tupno).
@@ -2159,7 +2159,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* Decide how many values are worth storing as most-common values. If
* we are able to generate a complete MCV list (all the values in the
* sample will fit, and we think these are all the ones in the table),
- * then do so. Otherwise, store only those values that are
+ * then do so. Otherwise, store only those values that are
* significantly more common than the (estimated) average. We set the
* threshold rather arbitrarily at 25% more than average, with at
* least 2 instances in the sample. Also, we won't suppress values
@@ -2314,7 +2314,7 @@ compute_scalar_stats(VacAttrStatsP stats,
/*
* The object of this loop is to copy the first and last values[]
- * entries along with evenly-spaced values in between. So the
+ * entries along with evenly-spaced values in between. So the
* i'th value is values[(i * (nvals - 1)) / (num_hist - 1)]. But
* computing that subscript directly risks integer overflow when
* the stats target is more than a couple thousand. Instead we
@@ -2425,7 +2425,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* qsort_arg comparator for sorting ScalarItems
*
* Aside from sorting the items, we update the tupnoLink[] array
- * whenever two ScalarItems are found to contain equal datums. The array
+ * whenever two ScalarItems are found to contain equal datums. The array
* is indexed by tupno; for each ScalarItem, it contains the highest
* tupno that that item's datum has been found to be equal to. This allows
* us to avoid additional comparisons in compute_scalar_stats().
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index bfe4527514..801839c91f 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -53,7 +53,7 @@
* transaction.
*
* Like NOTIFY, LISTEN and UNLISTEN just add the desired action to a list
- * of pending actions. If we reach transaction commit, the changes are
+ * of pending actions. If we reach transaction commit, the changes are
* applied to pg_listener just before executing any pending NOTIFYs. This
* method is necessary because to avoid race conditions, we must hold lock
* on pg_listener from when we insert a new listener tuple until we commit.
@@ -133,8 +133,8 @@ static List *upperPendingActions = NIL; /* list of upper-xact lists */
/*
* State for outbound notifies consists of a list of all relnames NOTIFYed
- * in the current transaction. We do not actually perform a NOTIFY until
- * and unless the transaction commits. pendingNotifies is NIL if no
+ * in the current transaction. We do not actually perform a NOTIFY until
+ * and unless the transaction commits. pendingNotifies is NIL if no
* NOTIFYs have been done in the current transaction.
*
* The list is kept in CurTransactionContext. In subtransactions, each
@@ -636,7 +636,7 @@ Send_Notify(Relation lRel)
* If someone has already notified this listener, we don't bother
* modifying the table, but we do still send a SIGUSR2 signal,
* just in case that backend missed the earlier signal for some
- * reason. It's OK to send the signal first, because the other
+ * reason. It's OK to send the signal first, because the other
* guy can't read pg_listener until we unlock it.
*/
if (kill(listenerPID, SIGUSR2) < 0)
@@ -791,7 +791,7 @@ NotifyInterruptHandler(SIGNAL_ARGS)
int save_errno = errno;
/*
- * Note: this is a SIGNAL HANDLER. You must be very wary what you do
+ * Note: this is a SIGNAL HANDLER. You must be very wary what you do
* here. Some helpful soul had this routine sprinkled with TPRINTFs, which
* would likely lead to corruption of stdio buffers if they were ever
* turned on.
@@ -1071,7 +1071,7 @@ NotifyMyFrontEnd(char *relname, int32 listenerPID)
pq_endmessage(&buf);
/*
- * NOTE: we do not do pq_flush() here. For a self-notify, it will
+ * NOTE: we do not do pq_flush() here. For a self-notify, it will
* happen at the end of the transaction, and for incoming notifies
* ProcessIncomingNotify will do it after finding all the notifies.
*/
@@ -1124,7 +1124,7 @@ notify_twophase_postcommit(TransactionId xid, uint16 info,
/*
* Set up to issue the NOTIFY at the end of my own current transaction.
* (XXX this has some issues if my own transaction later rolls back, or if
- * there is any significant delay before I commit. OK for now because we
+ * there is any significant delay before I commit. OK for now because we
* disallow COMMIT PREPARED inside a transaction block.)
*/
Async_Notify((char *) recdata);
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index ff2a5fb60a..4aaad1e504 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -88,7 +88,7 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
*
* The single-relation case does not have any such overhead.
*
- * We also allow a relation to be specified without index. In that case,
+ * We also allow a relation to be specified without index. In that case,
* the indisclustered bit will be looked up, and an ERROR will be thrown
* if there is no index with the bit set.
*---------------------------------------------------------------------------
@@ -209,7 +209,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Build the list of relations to cluster. Note that this lives in
+ * Build the list of relations to cluster. Note that this lives in
* cluster_context.
*/
rvs = get_tables_to_cluster(cluster_context);
@@ -245,7 +245,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
*
* This clusters the table by creating a new, clustered table and
* swapping the relfilenodes of the new table and the old table, so
- * the OID of the original table is preserved. Thus we do not lose
+ * the OID of the original table is preserved. Thus we do not lose
* GRANT, inheritance nor references to this table (this was a bug
* in releases thru 7.3).
*
@@ -264,7 +264,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck, bool verbose)
/*
* We grab exclusive access to the target rel and index for the duration
- * of the transaction. (This is redundant for the single-transaction
+ * of the transaction. (This is redundant for the single-transaction
* case, since cluster() already did it.) The index lock is taken inside
* check_index_is_clusterable.
*/
@@ -299,7 +299,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck, bool verbose)
* check in the "recheck" case is appropriate (which currently means
* somebody is executing a database-wide CLUSTER), because there is
* another check in cluster() which will stop any attempt to cluster
- * remote temp tables by name. There is another check in
+ * remote temp tables by name. There is another check in
* check_index_is_clusterable which is redundant, but we leave it for
* extra safety.
*/
@@ -382,7 +382,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
/*
* Disallow clustering on incomplete indexes (those that might not index
- * every row of the relation). We could relax this by making a separate
+ * every row of the relation). We could relax this by making a separate
* seqscan pass over the table to copy the missing rows, but that seems
* expensive and tedious.
*/
@@ -592,7 +592,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
/*
* Create the new heap, using a temporary name in the same namespace as
- * the existing table. NOTE: there is some risk of collision with user
+ * the existing table. NOTE: there is some risk of collision with user
* relnames. Working around this seems more trouble than it's worth; in
* particular, we can't create the new heap in a different namespace from
* the old, or we will have problems with the TEMP status of temp tables.
@@ -633,7 +633,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
/*
* Rebuild each index on the relation (but not the toast table, which is
- * all-new at this point). We do not need CommandCounterIncrement()
+ * all-new at this point). We do not need CommandCounterIncrement()
* because reindex_relation does it.
*
* Note: because index_build is called via reindex_relation, it will never
@@ -646,9 +646,9 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
/*
* At this point, everything is kosher except that the toast table's name
- * corresponds to the temporary table. The name is irrelevant to the
+ * corresponds to the temporary table. The name is irrelevant to the
* backend because it's referenced by OID, but users looking at the
- * catalogs could be confused. Rename it to prevent this problem.
+ * catalogs could be confused. Rename it to prevent this problem.
*
* Note no lock required on the relation, because we already hold an
* exclusive lock on it.
@@ -1130,7 +1130,7 @@ swap_relation_files(Oid r1, Oid r2, TransactionId frozenXid)
}
/*
- * Blow away the old relcache entries now. We need this kluge because
+ * Blow away the old relcache entries now. We need this kluge because
* relcache.c keeps a link to the smgr relation for the physical file, and
* that will be out of date as soon as we do CommandCounterIncrement.
* Whichever of the rels is the second to be cleared during cache
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index b6a90a248b..55959d6c96 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -540,7 +540,7 @@ CommentRelation(int objtype, List *relname, char *comment)
* such as a table's column. The routine will check security
* restrictions and then attempt to look up the specified
* attribute. If successful, a comment is added/dropped, else an
- * ereport() exception is thrown. The parameters are the relation
+ * ereport() exception is thrown. The parameters are the relation
* and attribute names, and the comment
*/
static void
@@ -1403,7 +1403,7 @@ CommentLargeObject(List *qualname, char *comment)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid OID
+ * constants by the lexer. Accept these if they are valid OID
* strings.
*/
loid = DatumGetObjectId(DirectFunctionCall1(oidin,
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index eca25b1f64..9f634f1d00 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -151,7 +151,7 @@ typedef struct CopyStateData
/*
* Finally, raw_buf holds raw data read from the data source (file or
- * client connection). CopyReadLine parses this data sufficiently to
+ * client connection). CopyReadLine parses this data sufficiently to
* locate line boundaries, then transfers the data to line_buf and
* converts it. Note: we guarantee that there is a \0 at
* raw_buf[raw_buf_len].
@@ -178,7 +178,7 @@ typedef struct
* function call overhead in tight COPY loops.
*
* We must use "if (1)" because the usual "do {...} while(0)" wrapper would
- * prevent the continue/break processing from working. We end the "if (1)"
+ * prevent the continue/break processing from working. We end the "if (1)"
* with "else ((void) 0)" to ensure the "if" does not unintentionally match
* any "else" in the calling code, and to avoid any compiler warnings about
* empty statements. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros.
@@ -474,7 +474,7 @@ CopySendEndOfRow(CopyState cstate)
* CopyGetData reads data from the source (file or frontend)
*
* We attempt to read at least minread, and at most maxread, bytes from
- * the source. The actual number of bytes read is returned; if this is
+ * the source. The actual number of bytes read is returned; if this is
* less than minread, EOF was detected.
*
* Note: when copying from the frontend, we expect a proper EOF mark per
@@ -691,7 +691,7 @@ CopyLoadRawBuf(CopyState cstate)
* we also support copying the output of an arbitrary SELECT query.
*
* If <pipe> is false, transfer is between the table and the file named
- * <filename>. Otherwise, transfer is between the table and our regular
+ * <filename>. Otherwise, transfer is between the table and our regular
* input/output stream. The latter could be either stdin/stdout or a
* socket, depending on whether we're running under Postmaster control.
*
@@ -1030,7 +1030,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
errmsg("COPY (SELECT) WITH OIDS is not supported")));
/*
- * Run parse analysis and rewrite. Note this also acquires sufficient
+ * Run parse analysis and rewrite. Note this also acquires sufficient
* locks on the source table(s).
*
* Because the parser and planner tend to scribble on their input, we
@@ -1337,7 +1337,7 @@ CopyTo(CopyState cstate)
* Create a temporary memory context that we can reset once per row to
* recover palloc'd memory. This avoids any problems with leaks inside
* datatype output routines, and should be faster than retail pfree's
- * anyway. (We don't need a whole econtext as CopyFrom does.)
+ * anyway. (We don't need a whole econtext as CopyFrom does.)
*/
cstate->rowcontext = AllocSetContextCreate(CurrentMemoryContext,
"COPY TO",
@@ -2080,7 +2080,7 @@ CopyFrom(CopyState cstate)
/*
* Now compute and insert any defaults available for the columns not
- * provided by the input data. Anything not processed here or above
+ * provided by the input data. Anything not processed here or above
* will remain NULL.
*/
for (i = 0; i < num_defaults; i++)
@@ -2195,7 +2195,7 @@ CopyFrom(CopyState cstate)
* server encoding.
*
* Result is true if read was terminated by EOF, false if terminated
- * by newline. The terminating newline or EOF marker is not included
+ * by newline. The terminating newline or EOF marker is not included
* in the final value of line_buf.
*/
static bool
@@ -2349,7 +2349,7 @@ CopyReadLineText(CopyState cstate)
* of read-ahead and avoid the many calls to
* IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE protocol
* does not allow us to read too far ahead or we might read into the
- * next data, so we read-ahead only as far we know we can. One
+ * next data, so we read-ahead only as far we know we can. One
* optimization would be to read-ahead four byte here if
* cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it,
* considering the size of the buffer.
@@ -2359,7 +2359,7 @@ CopyReadLineText(CopyState cstate)
REFILL_LINEBUF;
/*
- * Try to read some more data. This will certainly reset
+ * Try to read some more data. This will certainly reset
* raw_buf_index to zero, and raw_buf_ptr must go with it.
*/
if (!CopyLoadRawBuf(cstate))
@@ -2417,7 +2417,7 @@ CopyReadLineText(CopyState cstate)
/*
* Updating the line count for embedded CR and/or LF chars is
* necessarily a little fragile - this test is probably about the
- * best we can do. (XXX it's arguable whether we should do this
+ * best we can do. (XXX it's arguable whether we should do this
* at all --- is cur_lineno a physical or logical count?)
*/
if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r'))
@@ -2596,7 +2596,7 @@ CopyReadLineText(CopyState cstate)
* after a backslash is special, so we skip over that second
* character too. If we didn't do that \\. would be
* considered an eof-of copy, while in non-CVS mode it is a
- * literal backslash followed by a period. In CSV mode,
+ * literal backslash followed by a period. In CSV mode,
* backslashes are not special, so we want to process the
* character after the backslash just like a normal character,
* so we don't increment in those cases.
@@ -2668,7 +2668,7 @@ GetDecimalFromHex(char hex)
* null_print is the null marker string. Note that this is compared to
* the pre-de-escaped input string.
*
- * The return value is the number of fields actually read. (We error out
+ * The return value is the number of fields actually read. (We error out
* if this would exceed maxfields, which is the length of fieldvals[].)
*/
static int
@@ -2698,7 +2698,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to do
+ * then transfer data without any checks for enough space. We need to do
* it this way because enlarging attribute_buf mid-stream would invalidate
* pointers already stored into fieldvals[].
*/
@@ -2926,7 +2926,7 @@ CopyReadAttributesCSV(CopyState cstate, int maxfields, char **fieldvals)
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to do
+ * then transfer data without any checks for enough space. We need to do
* it this way because enlarging attribute_buf mid-stream would invalidate
* pointers already stored into fieldvals[].
*/
@@ -3139,7 +3139,7 @@ CopyAttributeOutText(CopyState cstate, char *string)
/*
* We have to grovel through the string searching for control characters
* and instances of the delimiter character. In most cases, though, these
- * are infrequent. To avoid overhead from calling CopySendData once per
+ * are infrequent. To avoid overhead from calling CopySendData once per
* character, we dump out all characters between escaped characters in a
* single call. The loop invariant is that the data from "start" to "ptr"
* can be sent literally, but hasn't yet been.
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index b61e5cf780..9b549d1036 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -263,7 +263,7 @@ createdb(const CreatedbStmt *stmt)
* To create a database, must have createdb privilege and must be able to
* become the target role (this does not imply that the target role itself
* must have createdb privilege). The latter provision guards against
- * "giveaway" attacks. Note that a superuser will always have both of
+ * "giveaway" attacks. Note that a superuser will always have both of
* these privileges a fortiori.
*/
if (!have_createdb_privilege())
@@ -447,7 +447,7 @@ createdb(const CreatedbStmt *stmt)
/*
* If we are trying to change the default tablespace of the template,
* we require that the template not have any files in the new default
- * tablespace. This is necessary because otherwise the copied
+ * tablespace. This is necessary because otherwise the copied
* database would contain pg_class rows that refer to its default
* tablespace both explicitly (by OID) and implicitly (as zero), which
* would cause problems. For example another CREATE DATABASE using
@@ -483,7 +483,7 @@ createdb(const CreatedbStmt *stmt)
}
/*
- * Check for db name conflict. This is just to give a more friendly error
+ * Check for db name conflict. This is just to give a more friendly error
* message than "unique index violation". There's a race condition but
* we're willing to accept the less friendly message in that case.
*/
@@ -598,7 +598,7 @@ createdb(const CreatedbStmt *stmt)
*
* Inconsistency of this sort is inherent to all SnapshotNow scans, unless
* some lock is held to prevent concurrent updates of the rows being
- * sought. There should be a generic fix for that, but in the meantime
+ * sought. There should be a generic fix for that, but in the meantime
* it's worth fixing this case in particular because we are doing very
* heavyweight operations within the scan, so that the elapsed time for
* the scan is vastly longer than for most other catalog scans. That
@@ -1164,7 +1164,7 @@ movedb(const char *dbname, const char *tblspcname)
/*
* Use an ENSURE block to make sure we remove the debris if the copy fails
- * (eg, due to out-of-disk-space). This is not a 100% solution, because
+ * (eg, due to out-of-disk-space). This is not a 100% solution, because
* of the possibility of failure during transaction commit, but it should
* handle most scenarios.
*/
@@ -1733,7 +1733,7 @@ get_db_info(const char *name, LOCKMODE lockmode,
LockSharedObject(DatabaseRelationId, dbOid, 0, lockmode);
/*
- * And now, re-fetch the tuple by OID. If it's still there and still
+ * And now, re-fetch the tuple by OID. If it's still there and still
* the same name, we win; else, drop the lock and loop back to try
* again.
*/
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index 009dcfd17d..a04b4e7ad2 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -187,7 +187,7 @@ defGetInt64(DefElem *def)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid int8
+ * constants by the lexer. Accept these if they are valid int8
* strings.
*/
return DatumGetInt64(DirectFunctionCall1(int8in,
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 54f82e6dad..11129751c5 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -93,11 +93,11 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
getParamListTypes(params, &param_types, &num_params);
/*
- * Run parse analysis and rewrite. Note this also acquires sufficient
+ * Run parse analysis and rewrite. Note this also acquires sufficient
* locks on the source table(s).
*
* Because the parser and planner tend to scribble on their input, we make
- * a preliminary copy of the source querytree. This prevents problems in
+ * a preliminary copy of the source querytree. This prevents problems in
* the case that the EXPLAIN is in a portal or plpgsql function and is
* executed repeatedly. (See also the same hack in DECLARE CURSOR and
* PREPARE.) XXX FIXME someday.
diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c
index dbd75cc951..e796446355 100644
--- a/src/backend/commands/foreigncmds.c
+++ b/src/backend/commands/foreigncmds.c
@@ -76,7 +76,7 @@ optionListToArray(List *options)
/*
- * Transform a list of DefElem into text array format. This is substantially
+ * Transform a list of DefElem into text array format. This is substantially
* the same thing as optionListToArray(), except we recognize SET/ADD/DROP
* actions for modifying an existing list of options, which is passed in
* Datum form as oldOptions. Also, if fdwvalidator isn't InvalidOid
@@ -119,7 +119,7 @@ transformGenericOptions(Oid catalogId,
/*
* It is possible to perform multiple SET/DROP actions on the same
- * option. The standard permits this, as long as the options to be
+ * option. The standard permits this, as long as the options to be
* added are unique. Note that an unspecified action is taken to be
* ADD.
*/
@@ -472,7 +472,7 @@ AlterForeignDataWrapper(AlterFdwStmt *stmt)
/*
* It could be that the options for the FDW, SERVER and USER MAPPING
- * are no longer valid with the new validator. Warn about this.
+ * are no longer valid with the new validator. Warn about this.
*/
if (stmt->validator)
ereport(WARNING,
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index db22a28888..1c968826d9 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -75,7 +75,7 @@ static void AlterFunctionOwner_internal(Relation rel, HeapTuple tup,
* allow a shell type to be used, or even created if the specified return type
* doesn't exist yet. (Without this, there's no way to define the I/O procs
* for a new type.) But SQL function creation won't cope, so error out if
- * the target language is SQL. (We do this here, not in the SQL-function
+ * the target language is SQL. (We do this here, not in the SQL-function
* validator, so as not to produce a NOTICE and then an ERROR for the same
* condition.)
*/
@@ -389,7 +389,7 @@ examine_parameter_list(List *parameters, Oid languageOid,
* FUNCTION and ALTER FUNCTION and return it via one of the out
* parameters. Returns true if the passed option was recognized. If
* the out parameter we were going to assign to points to non-NULL,
- * raise a duplicate-clause error. (We don't try to detect duplicate
+ * raise a duplicate-clause error. (We don't try to detect duplicate
* SET parameters though --- if you're redundant, the last one wins.)
*/
static bool
@@ -685,7 +685,7 @@ interpret_AS_clause(Oid languageOid, const char *languageName,
{
/*
* For "C" language, store the file name in probin and, when given,
- * the link symbol name in prosrc. If link symbol is omitted,
+ * the link symbol name in prosrc. If link symbol is omitted,
* substitute procedure name. We also allow link symbol to be
* specified as "-", since that was the habit in PG versions before
* 8.4, and there might be dump files out there that don't translate
@@ -1557,7 +1557,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* Restricting the volatility of a cast function may or may not be a
* good idea in the abstract, but it definitely breaks many old
- * user-defined types. Disable this check --- tgl 2/1/03
+ * user-defined types. Disable this check --- tgl 2/1/03
*/
#ifdef NOT_USED
if (procstruct->provolatile == PROVOLATILE_VOLATILE)
@@ -1621,7 +1621,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* We know that composite, enum and array types are never binary-
- * compatible with each other. They all have OIDs embedded in them.
+ * compatible with each other. They all have OIDs embedded in them.
*
* Theoretically you could build a user-defined base type that is
* binary-compatible with a composite, enum, or array type. But we
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index b781d40fce..e8ae3ceb12 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -235,7 +235,7 @@ DefineIndex(Oid relationId,
}
/*
- * Force shared indexes into the pg_global tablespace. This is a bit of a
+ * Force shared indexes into the pg_global tablespace. This is a bit of a
* hack but seems simpler than marking them in the BKI commands.
*/
if (rel->rd_rel->relisshared)
@@ -376,7 +376,7 @@ DefineIndex(Oid relationId,
{
/*
* This shouldn't happen during CREATE TABLE, but can happen
- * during ALTER TABLE. Keep message in sync with
+ * during ALTER TABLE. Keep message in sync with
* transformIndexConstraints() in parser/parse_utilcmd.c.
*/
ereport(ERROR,
@@ -464,7 +464,7 @@ DefineIndex(Oid relationId,
* transactions. That will prevent them from making incompatible HOT
* updates. The new index will be marked not indisready and not
* indisvalid, so that no one else tries to either insert into it or use
- * it for queries. We pass skip_build = true to prevent the build.
+ * it for queries. We pass skip_build = true to prevent the build.
*/
indexRelationId =
index_create(relationId, indexRelationName, indexRelationId,
@@ -500,8 +500,8 @@ DefineIndex(Oid relationId,
* Now we must wait until no running transaction could have the table open
* with the old list of indexes. To do this, inquire which xacts
* currently would conflict with ShareLock on the table -- ie, which ones
- * have a lock that permits writing the table. Then wait for each of
- * these xacts to commit or abort. Note we do not need to worry about
+ * have a lock that permits writing the table. Then wait for each of
+ * these xacts to commit or abort. Note we do not need to worry about
* xacts that open the table for writing after this point; they will see
* the new index when they open it.
*
@@ -512,7 +512,7 @@ DefineIndex(Oid relationId,
* error out properly.
*
* Note: GetLockConflicts() never reports our own xid, hence we need not
- * check for that. Also, prepared xacts are not reported, which is fine
+ * check for that. Also, prepared xacts are not reported, which is fine
* since they certainly aren't going to do anything more.
*/
old_lockholders = GetLockConflicts(&heaplocktag, ShareLock);
@@ -529,7 +529,7 @@ DefineIndex(Oid relationId,
* indexes. We have waited out all the existing transactions and any new
* transaction will have the new index in its list, but the index is still
* marked as "not-ready-for-inserts". The index is consulted while
- * deciding HOT-safety though. This arrangement ensures that no new HOT
+ * deciding HOT-safety though. This arrangement ensures that no new HOT
* chains can be created where the new tuple and the old tuple in the
* chain have different index keys.
*
@@ -595,7 +595,7 @@ DefineIndex(Oid relationId,
/*
* Now take the "reference snapshot" that will be used by validate_index()
- * to filter candidate tuples. Beware! There might still be snapshots in
+ * to filter candidate tuples. Beware! There might still be snapshots in
* use that treat some transaction as in-progress that our reference
* snapshot treats as committed. If such a recently-committed transaction
* deleted tuples in the table, we will not include them in the index; yet
@@ -630,7 +630,7 @@ DefineIndex(Oid relationId,
/*
* The index is now valid in the sense that it contains all currently
- * interesting tuples. But since it might not contain tuples deleted just
+ * interesting tuples. But since it might not contain tuples deleted just
* before the reference snap was taken, we have to wait out any
* transactions that might have older snapshots. Obtain a list of VXIDs
* of such transactions, and wait for them individually.
@@ -645,7 +645,7 @@ DefineIndex(Oid relationId,
*
* We can also exclude autovacuum processes and processes running manual
* lazy VACUUMs, because they won't be fazed by missing index entries
- * either. (Manual ANALYZEs, however, can't be excluded because they
+ * either. (Manual ANALYZEs, however, can't be excluded because they
* might be within transactions that are going to do arbitrary operations
* later.)
*
@@ -755,7 +755,7 @@ CheckMutability(Expr *expr)
* Checks that the given partial-index predicate is valid.
*
* This used to also constrain the form of the predicate to forms that
- * indxpath.c could do something with. However, that seems overly
+ * indxpath.c could do something with. However, that seems overly
* restrictive. One useful application of partial indexes is to apply
* a UNIQUE constraint across a subset of a table, and in that scenario
* any evaluatable predicate will work. So accept any predicate here
@@ -955,7 +955,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
* 2000/07/30
*
* Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
- * too for awhile. I'm starting to think we need a better approach. tgl
+ * too for awhile. I'm starting to think we need a better approach. tgl
* 2000/10/01
*
* Release 8.0 removes bigbox_ops (which was dead code for a long while
@@ -1027,7 +1027,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
NameListToString(opclass), accessMethodName)));
/*
- * Verify that the index operator class accepts this datatype. Note we
+ * Verify that the index operator class accepts this datatype. Note we
* will accept binary compatibility.
*/
opClassId = HeapTupleGetOid(tuple);
@@ -1048,7 +1048,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
* GetDefaultOpClass
*
* Given the OIDs of a datatype and an access method, find the default
- * operator class, if any. Returns InvalidOid if there is none.
+ * operator class, if any. Returns InvalidOid if there is none.
*/
Oid
GetDefaultOpClass(Oid type_id, Oid am_id)
@@ -1143,7 +1143,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id)
* Create a name for an implicitly created index, sequence, constraint, etc.
*
* The parameters are typically: the original table name, the original field
- * name, and a "type" string (such as "seq" or "pkey"). The field name
+ * name, and a "type" string (such as "seq" or "pkey"). The field name
* and/or type can be NULL if not relevant.
*
* The result is a palloc'd string.
@@ -1151,7 +1151,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id)
* The basic result we want is "name1_name2_label", omitting "_name2" or
* "_label" when those parameters are NULL. However, we must generate
* a name with less than NAMEDATALEN characters! So, we truncate one or
- * both names if necessary to make a short-enough string. The label part
+ * both names if necessary to make a short-enough string. The label part
* is never truncated (so it had better be reasonably short).
*
* The caller is responsible for checking uniqueness of the generated
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 811e84ad6f..5bd7fc0d6b 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -311,7 +311,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
* A minimum expectation therefore is that the caller have execute
* privilege with grant option. Since we don't have a way to make the
* opclass go away if the grant option is revoked, we choose instead to
- * require ownership of the functions. It's also not entirely clear what
+ * require ownership of the functions. It's also not entirely clear what
* permissions should be required on the datatype, but ownership seems
* like a safe choice.
*
@@ -600,7 +600,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
opclassoid, procedures, false);
/*
- * Create dependencies for the opclass proper. Note: we do not create a
+ * Create dependencies for the opclass proper. Note: we do not create a
* dependency link to the AM, because we don't currently support DROP
* ACCESS METHOD.
*/
@@ -2164,7 +2164,7 @@ AlterOpFamilyOwner_oid(Oid opfamilyOid, Oid newOwnerId)
}
/*
- * The first parameter is pg_opfamily, opened and suitably locked. The second
+ * The first parameter is pg_opfamily, opened and suitably locked. The second
* parameter is a copy of the tuple from pg_opfamily we want to modify.
*/
static void
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index daa8549e49..3af0d28d0f 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -188,7 +188,7 @@ DefineOperator(List *names, List *parameters)
functionOid = LookupFuncName(functionName, nargs, typeId, false);
/*
- * We require EXECUTE rights for the function. This isn't strictly
+ * We require EXECUTE rights for the function. This isn't strictly
* necessary, since EXECUTE will be checked at any attempted use of the
* operator, but it seems like a good idea anyway.
*/
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index d6c1a580b7..879f6aa367 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -4,7 +4,7 @@
* Utility commands affecting portals (that is, SQL cursor commands)
*
* Note: see also tcop/pquery.c, which implements portal operations for
- * the FE/BE protocol. This module uses pquery.c for some operations.
+ * the FE/BE protocol. This module uses pquery.c for some operations.
* And both modules depend on utils/mmgr/portalmem.c, which controls
* storage management for portals (but doesn't run any queries in them).
*
@@ -89,7 +89,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
/*----------
* Also copy the outer portal's parameter list into the inner portal's
- * memory context. We want to pass down the parameter values in case we
+ * memory context. We want to pass down the parameter values in case we
* had a command like
* DECLARE c CURSOR FOR SELECT ... WHERE foo = $1
* This will have been parsed using the outer parameter set and the
@@ -106,7 +106,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
*
* If the user didn't specify a SCROLL type, allow or disallow scrolling
* based on whether it would require any additional runtime overhead to do
- * so. Also, we disallow scrolling for FOR UPDATE cursors.
+ * so. Also, we disallow scrolling for FOR UPDATE cursors.
*/
portal->cursorOptions = cstmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@@ -354,7 +354,7 @@ PersistHoldablePortal(Portal portal)
ExecutorRewind(queryDesc);
/*
- * Change the destination to output to the tuplestore. Note we tell
+ * Change the destination to output to the tuplestore. Note we tell
* the tuplestore receiver to detoast all data passed through it.
*/
queryDesc->dest = CreateDestReceiver(DestTuplestore);
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index c30ce35904..276dacdeb2 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -428,7 +428,7 @@ InitQueryHashTable(void)
* copy.
*
* Exception: commandTag is presumed to be a pointer to a constant string,
- * or possibly NULL, so it need not be copied. Note that commandTag should
+ * or possibly NULL, so it need not be copied. Note that commandTag should
* be NULL only if the original query (before rewriting) was empty.
*/
void
@@ -542,7 +542,7 @@ FetchPreparedStatementResultDesc(PreparedStatement *stmt)
/*
* Given a prepared statement that returns tuples, extract the query
- * targetlist. Returns NIL if the statement doesn't have a determinable
+ * targetlist. Returns NIL if the statement doesn't have a determinable
* targetlist.
*
* Note: this is pretty ugly, but since it's only used in corner cases like
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index afb61981ce..c2e90f08d3 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -227,7 +227,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (funcrettype != LANGUAGE_HANDLEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a handler function declared OPAQUE, change it to
* LANGUAGE_HANDLER. (This is probably obsolete and removable?)
*/
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 4cc83eb018..9cff0aa00e 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -65,7 +65,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
* To create a schema, must have schema-create privilege on the current
* database and must be able to become the target role (this does not
* imply that the target role itself must have create-schema privilege).
- * The latter provision guards against "giveaway" attacks. Note that a
+ * The latter provision guards against "giveaway" attacks. Note that a
* superuser will always have both of these privileges a fortiori.
*/
aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE);
@@ -113,7 +113,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
/*
* Examine the list of commands embedded in the CREATE SCHEMA command, and
* reorganize them into a sequentially executable order with no forward
- * references. Note that the result is still a list of raw parsetrees ---
+ * references. Note that the result is still a list of raw parsetrees ---
* we cannot, in general, run parse analysis on one statement until we
* have actually executed the prior ones.
*/
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index cae981effe..5141e83a85 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -59,7 +59,7 @@ typedef struct sequence_magic
* rely on the relcache, since it's only, well, a cache, and may decide to
* discard entries.)
*
- * XXX We use linear search to find pre-existing SeqTable entries. This is
+ * XXX We use linear search to find pre-existing SeqTable entries. This is
* good when only a small number of sequences are touched in a session, but
* would suck with many different sequences. Perhaps use a hashtable someday.
*/
@@ -232,8 +232,8 @@ DefineSequence(CreateSeqStmt *seq)
* Two special hacks here:
*
* 1. Since VACUUM does not process sequences, we have to force the tuple
- * to have xmin = FrozenTransactionId now. Otherwise it would become
- * invisible to SELECTs after 2G transactions. It is okay to do this
+ * to have xmin = FrozenTransactionId now. Otherwise it would become
+ * invisible to SELECTs after 2G transactions. It is okay to do this
* because if the current transaction aborts, no other xact will ever
* examine the sequence tuple anyway.
*
@@ -490,7 +490,7 @@ nextval_internal(Oid relid)
}
/*
- * Decide whether we should emit a WAL log record. If so, force up the
+ * Decide whether we should emit a WAL log record. If so, force up the
* fetch count to grab SEQ_LOG_VALS more values than we actually need to
* cache. (These will then be usable without logging.)
*
@@ -859,7 +859,7 @@ setval3_oid(PG_FUNCTION_ARGS)
* Open the sequence and acquire AccessShareLock if needed
*
* If we haven't touched the sequence already in this transaction,
- * we need to acquire AccessShareLock. We arrange for the lock to
+ * we need to acquire AccessShareLock. We arrange for the lock to
* be owned by the top transaction, so that we don't need to do it
* more than once per xact.
*/
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 1edf6c7553..a1ae344ef6 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -449,7 +449,7 @@ DefineRelation(CreateStmt *stmt, char relkind)
&inheritOids, &old_constraints, &parentOidCount);
/*
- * Create a tuple descriptor from the relation schema. Note that this
+ * Create a tuple descriptor from the relation schema. Note that this
* deals with column names, types, and NOT NULL constraints, but not
* default values or CHECK constraints; we handle those below.
*/
@@ -538,7 +538,7 @@ DefineRelation(CreateStmt *stmt, char relkind)
CommandCounterIncrement();
/*
- * Open the new relation and acquire exclusive lock on it. This isn't
+ * Open the new relation and acquire exclusive lock on it. This isn't
* really necessary for locking out other backends (since they can't see
* the new rel anyway until we commit), but it keeps the lock manager from
* complaining about deadlock risks.
@@ -830,10 +830,10 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * In CASCADE mode, suck in all referencing relations as well. This
+ * In CASCADE mode, suck in all referencing relations as well. This
* requires multiple iterations to find indirectly-dependent relations. At
* each phase, we need to exclusive-lock new rels before looking for their
- * dependencies, else we might miss something. Also, we check each rel as
+ * dependencies, else we might miss something. Also, we check each rel as
* soon as we open it, to avoid a faux pas such as holding lock for a long
* time on a rel we have no permissions for.
*/
@@ -959,7 +959,7 @@ ExecuteTruncate(TruncateStmt *stmt)
/*
* Create a new empty storage file for the relation, and assign it as
- * the relfilenode value. The old storage file is scheduled for
+ * the relfilenode value. The old storage file is scheduled for
* deletion at commit.
*/
setNewRelfilenode(rel, RecentXmin);
@@ -1029,7 +1029,7 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
+ * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
*/
static void
truncate_check_rel(Relation rel)
@@ -1367,7 +1367,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
/*
* Now copy the CHECK constraints of this parent, adjusting attnos
- * using the completed newattno[] map. Identically named constraints
+ * using the completed newattno[] map. Identically named constraints
* are merged if possible, else we throw error.
*/
if (constr && constr->num_check > 0)
@@ -1422,7 +1422,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing
+ * commit. That will prevent someone else from deleting or ALTERing
* the parent before the child is committed.
*/
heap_close(relation, NoLock);
@@ -1892,7 +1892,7 @@ renameatt(Oid myrelid,
/*
* Scan through index columns to see if there's any simple index
- * entries for this attribute. We ignore expressional entries.
+ * entries for this attribute. We ignore expressional entries.
*/
indextup = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(indexoid),
@@ -2040,7 +2040,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, Oid namespaceId)
newrelname)));
/*
- * Update pg_class tuple with new relname. (Scribbling on reltup is OK
+ * Update pg_class tuple with new relname. (Scribbling on reltup is OK
* because it's a copy...)
*/
namestrcpy(&(relform->relname), newrelname);
@@ -2093,7 +2093,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, Oid namespaceId)
* We also reject these commands if there are any pending AFTER trigger events
* for the rel. This is certainly necessary for the rewriting variants of
* ALTER TABLE, because they don't preserve tuple TIDs and so the pending
- * events would try to fetch the wrong tuples. It might be overly cautious
+ * events would try to fetch the wrong tuples. It might be overly cautious
* in other cases, but again it seems better to err on the side of paranoia.
*
* REINDEX calls this with "rel" referencing the index to be rebuilt; here
@@ -2136,23 +2136,23 @@ CheckTableNotInUse(Relation rel, const char *stmt)
* 3. Scan table(s) to check new constraints, and optionally recopy
* the data into new table(s).
* Phase 3 is not performed unless one or more of the subcommands requires
- * it. The intention of this design is to allow multiple independent
+ * it. The intention of this design is to allow multiple independent
* updates of the table schema to be performed with only one pass over the
* data.
*
- * ATPrepCmd performs phase 1. A "work queue" entry is created for
+ * ATPrepCmd performs phase 1. A "work queue" entry is created for
* each table to be affected (there may be multiple affected tables if the
* commands traverse a table inheritance hierarchy). Also we do preliminary
* validation of the subcommands, including parse transformation of those
* expressions that need to be evaluated with respect to the old table
* schema.
*
- * ATRewriteCatalogs performs phase 2 for each affected table. (Note that
+ * ATRewriteCatalogs performs phase 2 for each affected table. (Note that
* phases 2 and 3 normally do no explicit recursion, since phase 1 already
* did it --- although some subcommands have to recurse in phase 2 instead.)
* Certain subcommands need to be performed before others to avoid
* unnecessary conflicts; for example, DROP COLUMN should come before
- * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
+ * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
* lists, one for each logical "pass" of phase 2.
*
* ATRewriteTables performs phase 3 for those tables that need it.
@@ -2453,7 +2453,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
/*
* ATRewriteCatalogs
*
- * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
+ * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
* dispatched in a "safe" execution order (designed to avoid unnecessary
* conflicts).
*/
@@ -3628,7 +3628,7 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
* returned by AddRelationNewConstraints, so that the right thing happens
* when a datatype's default applies.
*
- * We skip this step completely for views. For a view, we can only get
+ * We skip this step completely for views. For a view, we can only get
* here from CREATE OR REPLACE VIEW, which historically doesn't set up
* defaults, not even for domain-typed columns. And in any case we
* mustn't invoke Phase 3 on a view, since it has no storage.
@@ -3711,7 +3711,7 @@ add_column_datatype_dependency(Oid relid, int32 attnum, Oid typid)
/*
* ALTER TABLE SET WITH OIDS
*
- * Basically this is an ADD COLUMN for the special OID column. We have
+ * Basically this is an ADD COLUMN for the special OID column. We have
* to cons up a ColumnDef node because the ADD COLUMN code needs one.
*/
static void
@@ -4090,7 +4090,7 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue)
*
* DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism,
* because we have to decide at runtime whether to recurse or not depending
- * on whether attinhcount goes to zero or not. (We can't check this in a
+ * on whether attinhcount goes to zero or not. (We can't check this in a
* static pre-pass because it won't handle multiple inheritance situations
* correctly.)
*/
@@ -4515,7 +4515,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
/*
* Add a foreign-key constraint to a single table
*
- * Subroutine for ATExecAddConstraint. Must already hold exclusive
+ * Subroutine for ATExecAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity checks for it.
* We do permissions checks here, however.
*/
@@ -4640,7 +4640,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
*
* Note that we have to be careful about the difference between the actual
* PK column type and the opclass' declared input type, which might be
- * only binary-compatible with it. The declared opcintype is the right
+ * only binary-compatible with it. The declared opcintype is the right
* thing to probe pg_amop with.
*/
if (numfks != numpks)
@@ -4854,10 +4854,10 @@ transformColumnNameList(Oid relId, List *colList,
* transformFkeyGetPrimaryKey -
*
* Look up the names, attnums, and types of the primary key attributes
- * for the pkrel. Also return the index OID and index opclasses of the
+ * for the pkrel. Also return the index OID and index opclasses of the
* index supporting the primary key.
*
- * All parameters except pkrel are output parameters. Also, the function
+ * All parameters except pkrel are output parameters. Also, the function
* return value is the number of attributes in the primary key.
*
* Used when the column list in the REFERENCES specification is omitted.
@@ -5644,7 +5644,7 @@ ATPrepAlterColumnType(List **wqueue,
ReleaseSysCache(tuple);
/*
- * The recursion case is handled by ATSimpleRecursion. However, if we are
+ * The recursion case is handled by ATSimpleRecursion. However, if we are
* told not to recurse, there had better not be any child tables; else the
* alter would put them out of step.
*/
@@ -5709,7 +5709,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
*
* We remove any implicit coercion steps at the top level of the old
* default expression; this has been agreed to satisfy the principle of
- * least surprise. (The conversion to the new column type should act like
+ * least surprise. (The conversion to the new column type should act like
* it started from what the user sees as the stored expression, and the
* implicit coercions aren't going to be shown.)
*/
@@ -5738,7 +5738,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* and record enough information to let us recreate the objects.
*
* The actual recreation does not happen here, but only after we have
- * performed all the individual ALTER TYPE operations. We have to save
+ * performed all the individual ALTER TYPE operations. We have to save
* the info before executing ALTER TYPE, though, else the deparser will
* get confused.
*
@@ -5947,7 +5947,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
heap_close(depRel, RowExclusiveLock);
/*
- * Here we go --- change the recorded column type. (Note heapTup is a
+ * Here we go --- change the recorded column type. (Note heapTup is a
* copy of the syscache entry, so okay to scribble on.)
*/
attTup->atttypid = targettype;
@@ -6014,7 +6014,7 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab)
/*
* Re-parse the index and constraint definitions, and attach them to the
- * appropriate work queue entries. We do this before dropping because in
+ * appropriate work queue entries. We do this before dropping because in
* the case of a FOREIGN KEY constraint, we might not yet have exclusive
* lock on the table the constraint is attached to, and we need to get
* that before dropping. It's safe because the parser won't actually look
@@ -6939,7 +6939,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
log_newpage(&dst->smgr_rnode, forkNum, blkno, page);
/*
- * Now write the page. We say isTemp = true even if it's not a temp
+ * Now write the page. We say isTemp = true even if it's not a temp
* rel, because there's no need for smgr to schedule an fsync for this
* write; we'll do it ourselves below.
*/
@@ -7099,7 +7099,7 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent)
MergeConstraintsIntoExisting(child_rel, parent_rel);
/*
- * OK, it looks valid. Make the catalog entries that show inheritance.
+ * OK, it looks valid. Make the catalog entries that show inheritance.
*/
StoreCatalogInheritance1(RelationGetRelid(child_rel),
RelationGetRelid(parent_rel),
@@ -8032,7 +8032,7 @@ AtEOXact_on_commit_actions(bool isCommit)
* Post-subcommit or post-subabort cleanup for ON COMMIT management.
*
* During subabort, we can immediately remove entries created during this
- * subtransaction. During subcommit, just relabel entries marked during
+ * subtransaction. During subcommit, just relabel entries marked during
* this subtransaction as being the parent's responsibility.
*/
void
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 069b456d0d..30284bfab1 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -28,7 +28,7 @@
* To allow CREATE DATABASE to give a new database a default tablespace
* that's different from the template database's default, we make the
* provision that a zero in pg_class.reltablespace means the database's
- * default tablespace. Without this, CREATE DATABASE would have to go in
+ * default tablespace. Without this, CREATE DATABASE would have to go in
* and munge the system catalogs of the new database.
*
*
@@ -300,7 +300,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
recordDependencyOnOwner(TableSpaceRelationId, tablespaceoid, ownerId);
/*
- * Attempt to coerce target directory to safe permissions. If this fails,
+ * Attempt to coerce target directory to safe permissions. If this fails,
* it doesn't exist or has the wrong owner.
*/
if (chmod(location, 0700) != 0)
@@ -319,7 +319,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
location)));
/*
- * Create the PG_VERSION file in the target directory. This has several
+ * Create the PG_VERSION file in the target directory. This has several
* purposes: to make sure we can write in the directory, to prevent
* someone from creating another tablespace pointing at the same directory
* (the emptiness check above will fail), and to label tablespace
@@ -472,7 +472,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
* Not all files deleted? However, there can be lingering empty files
* in the directories, left behind by for example DROP TABLE, that
* have been scheduled for deletion at next checkpoint (see comments
- * in mdunlink() for details). We could just delete them immediately,
+ * in mdunlink() for details). We could just delete them immediately,
* but we can't tell them apart from important data files that we
* mustn't delete. So instead, we force a checkpoint which will clean
* out any lingering files, and try again.
@@ -564,7 +564,7 @@ remove_tablespace_directories(Oid tablespaceoid, bool redo)
*
* If redo is true then ENOENT is a likely outcome here, and we allow it
* to pass without comment. In normal operation we still allow it, but
- * with a warning. This is because even though ProcessUtility disallows
+ * with a warning. This is because even though ProcessUtility disallows
* DROP TABLESPACE in a transaction block, it's possible that a previous
* DROP failed and rolled back after removing the tablespace directories
* and symlink. We want to allow a new DROP attempt to succeed at
@@ -919,7 +919,7 @@ assign_default_tablespace(const char *newval, bool doit, GucSource source)
{
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the name. Must accept the value on faith.
+ * cannot verify the name. Must accept the value on faith.
*/
if (IsTransactionState())
{
@@ -1022,7 +1022,7 @@ assign_temp_tablespaces(const char *newval, bool doit, GucSource source)
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the individual names. Must accept the list on faith.
+ * cannot verify the individual names. Must accept the list on faith.
* Fortunately, there's then also no need to pass the data to fd.c.
*/
if (IsTransactionState())
@@ -1303,7 +1303,7 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record)
char *linkloc;
/*
- * Attempt to coerce target directory to safe permissions. If this
+ * Attempt to coerce target directory to safe permissions. If this
* fails, it doesn't exist or has the wrong owner.
*/
if (chmod(location, 0700) != 0)
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index a042588a1a..fa041867ad 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -78,7 +78,7 @@ static void AfterTriggerSaveEvent(ResultRelInfo *relinfo, int event,
*
* constraintOid, if nonzero, says that this trigger is being created
* internally to implement that constraint. A suitable pg_depend entry will
- * be made to link the trigger to that constraint. constraintOid is zero when
+ * be made to link the trigger to that constraint. constraintOid is zero when
* executing a user-entered CREATE TRIGGER command.
*
* If checkPermissions is true we require ACL_TRIGGER permissions on the
@@ -185,7 +185,7 @@ CreateTrigger(CreateTrigStmt *stmt, Oid relOid, Oid refRelOid,
if (funcrettype != TRIGGEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we see a
+ * We allow OPAQUE just so we can load old dump files. When we see a
* trigger function declared OPAQUE, change it to TRIGGER.
*/
if (funcrettype == OPAQUEOID)
@@ -207,7 +207,7 @@ CreateTrigger(CreateTrigStmt *stmt, Oid relOid, Oid refRelOid,
* references one of the built-in RI_FKey trigger functions, assume it is
* from a dump of a pre-7.3 foreign key constraint, and take steps to
* convert this legacy representation into a regular foreign key
- * constraint. Ugly, but necessary for loading old dump files.
+ * constraint. Ugly, but necessary for loading old dump files.
*/
if (stmt->isconstraint && !OidIsValid(constraintOid) &&
list_length(stmt->args) >= 6 &&
@@ -411,7 +411,7 @@ CreateTrigger(CreateTrigStmt *stmt, Oid relOid, Oid refRelOid,
{
/*
* It's for a constraint, so make it an internal dependency of the
- * constraint. We can skip depending on the relations, as there'll be
+ * constraint. We can skip depending on the relations, as there'll be
* an indirect dependency via the constraint.
*/
referenced.classId = ConstraintRelationId;
@@ -451,7 +451,7 @@ CreateTrigger(CreateTrigStmt *stmt, Oid relOid, Oid refRelOid,
* full-fledged foreign key constraints.
*
* The conversion is complex because a pre-7.3 foreign key involved three
- * separate triggers, which were reported separately in dumps. While the
+ * separate triggers, which were reported separately in dumps. While the
* single trigger on the referencing table adds no new information, we need
* to know the trigger functions of both of the triggers on the referenced
* table to build the constraint declaration. Also, due to lack of proper
@@ -2290,7 +2290,7 @@ typedef SetConstraintStateData *SetConstraintState;
* Per-trigger-event data
*
* The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
- * status bits and one or two tuple CTIDs. Each event record also has an
+ * status bits and one or two tuple CTIDs. Each event record also has an
* associated AfterTriggerSharedData that is shared across all instances
* of similar events within a "chunk".
*
@@ -2304,7 +2304,7 @@ typedef SetConstraintStateData *SetConstraintState;
* Although this is mutable state, we can keep it in AfterTriggerSharedData
* because all instances of the same type of event in a given event list will
* be fired at the same time, if they were queued between the same firing
- * cycles. So we need only ensure that ats_firing_id is zero when attaching
+ * cycles. So we need only ensure that ats_firing_id is zero when attaching
* a new event to an existing AfterTriggerSharedData record.
*/
typedef uint32 TriggerFlags;
@@ -2351,7 +2351,7 @@ typedef struct AfterTriggerEventDataOneCtid
/*
* To avoid palloc overhead, we keep trigger events in arrays in successively-
* larger chunks (a slightly more sophisticated version of an expansible
- * array). The space between CHUNK_DATA_START and freeptr is occupied by
+ * array). The space between CHUNK_DATA_START and freeptr is occupied by
* AfterTriggerEventData records; the space between endfree and endptr is
* occupied by AfterTriggerSharedData records.
*/
@@ -2393,7 +2393,7 @@ typedef struct AfterTriggerEventList
*
* firing_counter is incremented for each call of afterTriggerInvokeEvents.
* We mark firable events with the current firing cycle's ID so that we can
- * tell which ones to work on. This ensures sane behavior if a trigger
+ * tell which ones to work on. This ensures sane behavior if a trigger
* function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
* only fire those events that weren't already scheduled for firing.
*
@@ -2401,7 +2401,7 @@ typedef struct AfterTriggerEventList
* This is saved and restored across failed subtransactions.
*
* events is the current list of deferred events. This is global across
- * all subtransactions of the current transaction. In a subtransaction
+ * all subtransactions of the current transaction. In a subtransaction
* abort, we know that the events added by the subtransaction are at the
* end of the list, so it is relatively easy to discard them. The event
* list chunks themselves are stored in event_cxt.
@@ -2429,12 +2429,12 @@ typedef struct AfterTriggerEventList
* which we similarly use to clean up at subtransaction abort.
*
* firing_stack is a stack of copies of subtransaction-start-time
- * firing_counter. We use this to recognize which deferred triggers were
+ * firing_counter. We use this to recognize which deferred triggers were
* fired (or marked for firing) within an aborted subtransaction.
*
* We use GetCurrentTransactionNestLevel() to determine the correct array
* index in these stacks. maxtransdepth is the number of allocated entries in
- * each stack. (By not keeping our own stack pointer, we can avoid trouble
+ * each stack. (By not keeping our own stack pointer, we can avoid trouble
* in cases where errors during subxact abort cause multiple invocations
* of AfterTriggerEndSubXact() at the same nesting depth.)
*/
@@ -2702,7 +2702,7 @@ afterTriggerRestoreEventList(AfterTriggerEventList *events,
* single trigger function.
*
* Frequently, this will be fired many times in a row for triggers of
- * a single relation. Therefore, we cache the open relation and provide
+ * a single relation. Therefore, we cache the open relation and provide
* fmgr lookup cache space at the caller level. (For triggers fired at
* the end of a query, we can even piggyback on the executor's state.)
*
@@ -3219,7 +3219,7 @@ AfterTriggerFireDeferred(void)
}
/*
- * Run all the remaining triggers. Loop until they are all gone, in case
+ * Run all the remaining triggers. Loop until they are all gone, in case
* some trigger queues more for us to do.
*/
while (afterTriggerMarkEvents(events, NULL, false))
@@ -3282,7 +3282,7 @@ AfterTriggerBeginSubXact(void)
int my_level = GetCurrentTransactionNestLevel();
/*
- * Ignore call if the transaction is in aborted state. (Probably
+ * Ignore call if the transaction is in aborted state. (Probably
* shouldn't happen?)
*/
if (afterTriggers == NULL)
@@ -3361,7 +3361,7 @@ AfterTriggerEndSubXact(bool isCommit)
CommandId subxact_firing_id;
/*
- * Ignore call if the transaction is in aborted state. (Probably
+ * Ignore call if the transaction is in aborted state. (Probably
* unneeded)
*/
if (afterTriggers == NULL)
@@ -3493,7 +3493,7 @@ SetConstraintStateCopy(SetConstraintState origstate)
}
/*
- * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
+ * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
* pointer to the state object (it will change if we have to repalloc).
*/
static SetConstraintState
@@ -3599,7 +3599,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
/*
* If we're given the schema name with the constraint, look only
- * in that schema. If given a bare constraint name, use the
+ * in that schema. If given a bare constraint name, use the
* search path to find the first matching constraint.
*/
if (constraint->schemaname)
@@ -3766,7 +3766,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
/*
* Make sure a snapshot has been established in case trigger
- * functions need one. Note that we avoid setting a snapshot if
+ * functions need one. Note that we avoid setting a snapshot if
* we don't find at least one trigger that has to be fired now.
* This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
* ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
@@ -3826,7 +3826,7 @@ AfterTriggerPendingOnRel(Oid relid)
AfterTriggerShared evtshared = GetTriggerSharedData(event);
/*
- * We can ignore completed events. (Even if a DONE flag is rolled
+ * We can ignore completed events. (Even if a DONE flag is rolled
* back by subxact abort, it's OK because the effects of the TRUNCATE
* or whatever must get rolled back too.)
*/
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index f791c53fe4..706c8a6be0 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -490,8 +490,8 @@ DefineType(List *names, List *parameters)
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
/*
- * Check permissions on functions. We choose to require the creator/owner
- * of a type to also own the underlying functions. Since creating a type
+ * Check permissions on functions. We choose to require the creator/owner
+ * of a type to also own the underlying functions. Since creating a type
* is tantamount to granting public execute access on the functions, the
* minimum sane check would be for execute-with-grant-option. But we
* don't have a way to make the type go away if the grant option is
@@ -781,7 +781,7 @@ DefineDomain(CreateDomainStmt *stmt)
get_namespace_name(domainNamespace));
/*
- * Check for collision with an existing type name. If there is one and
+ * Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid(TYPENAMENSP,
@@ -1107,7 +1107,7 @@ DefineEnum(CreateEnumStmt *stmt)
get_namespace_name(enumNamespace));
/*
- * Check for collision with an existing type name. If there is one and
+ * Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid(TYPENAMENSP,
@@ -1725,7 +1725,7 @@ AlterDomainNotNull(List *names, bool notNull)
}
/*
- * Okay to update pg_type row. We can scribble on typTup because it's a
+ * Okay to update pg_type row. We can scribble on typTup because it's a
* copy.
*/
typTup->typnotnull = notNull;
@@ -1897,7 +1897,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
/*
* Since all other constraint types throw errors, this must be a check
- * constraint. First, process the constraint expression and add an entry
+ * constraint. First, process the constraint expression and add an entry
* to pg_constraint.
*/
@@ -2125,7 +2125,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
continue;
/*
- * Okay, add column to result. We store the columns in column-number
+ * Okay, add column to result. We store the columns in column-number
* order; this is just a hack to improve predictability of regression
* test output ...
*/
@@ -2213,7 +2213,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Set up a CoerceToDomainValue to represent the occurrence of VALUE in
- * the expression. Note that it will appear to have the type of the base
+ * the expression. Note that it will appear to have the type of the base
* type, not the domain. This seems correct since within the check
* expression, we should not assume the input value can be considered a
* member of the domain.
@@ -2593,7 +2593,7 @@ AlterTypeOwner(List *names, Oid newOwnerId)
/*
* If it's a composite type, invoke ATExecChangeOwner so that we fix
- * up the pg_class entry properly. That will call back to
+ * up the pg_class entry properly. That will call back to
* AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/
if (typTup->typtype == TYPTYPE_COMPOSITE)
@@ -2716,7 +2716,7 @@ AlterTypeNamespace(List *names, const char *newschema)
* Caller must have already checked privileges.
*
* The function automatically recurses to process the type's array type,
- * if any. isImplicitArray should be TRUE only when doing this internal
+ * if any. isImplicitArray should be TRUE only when doing this internal
* recursion (outside callers must never try to move an array type directly).
*
* If errorOnTableType is TRUE, the function errors out if the type is
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 180d9c709e..23dc6481a3 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -938,7 +938,7 @@ DropRole(DropRoleStmt *stmt)
ReleaseSysCache(tuple);
/*
- * Remove role from the pg_auth_members table. We have to remove all
+ * Remove role from the pg_auth_members table. We have to remove all
* tuples that show it as either a role or a member.
*
* XXX what about grantor entries? Maybe we should do one heap scan.
@@ -1036,7 +1036,7 @@ RenameRole(const char *oldname, const char *newname)
* XXX Client applications probably store the session user somewhere, so
* renaming it could cause confusion. On the other hand, there may not be
* an actual problem besides a little confusion, so think about this and
- * decide. Same for SET ROLE ... we don't restrict renaming the current
+ * decide. Same for SET ROLE ... we don't restrict renaming the current
* effective userid, though.
*/
@@ -1302,7 +1302,7 @@ AddRoleMems(const char *rolename, Oid roleid,
/*
* Check permissions: must have createrole or admin option on the role to
- * be changed. To mess with a superuser role, you gotta be superuser.
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
@@ -1450,7 +1450,7 @@ DelRoleMems(const char *rolename, Oid roleid,
/*
* Check permissions: must have createrole or admin option on the role to
- * be changed. To mess with a superuser role, you gotta be superuser.
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index dbf10401b6..43fdac88b5 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -4,7 +4,7 @@
* The postgres vacuum cleaner.
*
* This file includes the "full" version of VACUUM, as well as control code
- * used by all three of full VACUUM, lazy VACUUM, and ANALYZE. See
+ * used by all three of full VACUUM, lazy VACUUM, and ANALYZE. See
* vacuumlazy.c and analyze.c for the rest of the code for the latter two.
*
*
@@ -314,7 +314,7 @@ vacuum(VacuumStmt *vacstmt, Oid relid, bool do_toast,
* certainly not be the desired behavior. (This only applies to VACUUM
* FULL, though. We could in theory run lazy VACUUM inside a transaction
* block, but we choose to disallow that case because we'd rather commit
- * as soon as possible after finishing the vacuum. This is mainly so that
+ * as soon as possible after finishing the vacuum. This is mainly so that
* we can let go the AccessExclusiveLock that we may be holding.)
*
* ANALYZE (without VACUUM) can run either way.
@@ -604,9 +604,9 @@ vacuum_set_xid_limits(int freeze_min_age,
TransactionId safeLimit;
/*
- * We can always ignore processes running lazy vacuum. This is because we
+ * We can always ignore processes running lazy vacuum. This is because we
* use these values only for deciding which tuples we must keep in the
- * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
+ * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
* ignore it. In theory it could be problematic to ignore lazy vacuums on
* a full vacuum, but keep in mind that only one vacuum process can be
* working on a particular table at any time, and that each vacuum is
@@ -832,7 +832,7 @@ vac_update_relstats(Relation relation,
/*
* If we have discovered that there are no indexes, then there's no
- * primary key either. This could be done more thoroughly...
+ * primary key either. This could be done more thoroughly...
*/
if (!hasindex)
{
@@ -882,7 +882,7 @@ vac_update_relstats(Relation relation,
* advance pg_database.datfrozenxid, also try to truncate pg_clog.
*
* We violate transaction semantics here by overwriting the database's
- * existing pg_database tuple with the new value. This is reasonably
+ * existing pg_database tuple with the new value. This is reasonably
* safe since the new value is correct whether or not this transaction
* commits. As with vac_update_relstats, this avoids leaving dead tuples
* behind after a VACUUM.
@@ -988,7 +988,7 @@ vac_update_datfrozenxid(void)
* Also update the XID wrap limit info maintained by varsup.c.
*
* The passed XID is simply the one I just wrote into my pg_database
- * entry. It's used to initialize the "min" calculation.
+ * entry. It's used to initialize the "min" calculation.
*
* This routine is shared by full and lazy VACUUM. Note that it's
* only invoked when we've managed to change our DB's datfrozenxid
@@ -1079,7 +1079,7 @@ vac_truncate_clog(TransactionId frozenXID)
* vacuum_rel() -- vacuum one heap relation
*
* Doing one heap at a time incurs extra overhead, since we need to
- * check that the heap exists again just before we vacuum it. The
+ * check that the heap exists again just before we vacuum it. The
* reason that we do this is so that vacuuming can be spread across
* many small transactions. Otherwise, two-phase locking would require
* us to lock the entire database during one pass of the vacuum cleaner.
@@ -1137,7 +1137,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
}
/*
- * Check for user-requested abort. Note we want this to be inside a
+ * Check for user-requested abort. Note we want this to be inside a
* transaction, so xact.c doesn't issue useless WARNING.
*/
CHECK_FOR_INTERRUPTS();
@@ -1169,7 +1169,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
*
* We allow the user to vacuum a table if he is superuser, the table
* owner, or the database owner (but in the latter case, only if it's not
- * a shared relation). pg_class_ownercheck includes the superuser case.
+ * a shared relation). pg_class_ownercheck includes the superuser case.
*
* Note we choose to treat permissions failure as a WARNING and keep
* trying to vacuum the rest of the DB --- is this appropriate?
@@ -1290,7 +1290,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
/*
* If the relation has a secondary toast rel, vacuum that too while we
* still hold the session lock on the master table. Note however that
- * "analyze" will not get done on the toast table. This is good, because
+ * "analyze" will not get done on the toast table. This is good, because
* the toaster always uses hardcoded index access and statistics are
* totally unimportant for toast relations.
*/
@@ -1498,10 +1498,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* dirty. To ensure that invalid data doesn't get written to disk, we
* must take exclusive buffer lock wherever we potentially modify
* pages. In fact, we insist on cleanup lock so that we can safely
- * call heap_page_prune(). (This might be overkill, since the
+ * call heap_page_prune(). (This might be overkill, since the
* bgwriter pays no attention to individual tuples, but on the other
* hand it's unlikely that the bgwriter has this particular page
- * pinned at this instant. So violating the coding rule would buy us
+ * pinned at this instant. So violating the coding rule would buy us
* little anyway.)
*/
LockBufferForCleanup(buf);
@@ -1619,7 +1619,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* live tuples have XMIN_COMMITTED set --- see comments in
* repair_frag()'s walk-along-page loop. Use of async
* commit may prevent HeapTupleSatisfiesVacuum from
- * setting the bit for a recently committed tuple. Rather
+ * setting the bit for a recently committed tuple. Rather
* than trying to handle this corner case, we just give up
* and don't shrink.
*/
@@ -1764,7 +1764,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Here we are building a temporary copy of the page with dead
- * tuples removed. Below we will apply
+ * tuples removed. Below we will apply
* PageRepairFragmentation to the copy, so that we can
* determine how much space will be available after removal of
* dead tuples. But note we are NOT changing the real page
@@ -1828,7 +1828,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Add the page to vacuum_pages if it requires reaping, and add it to
* fraged_pages if it has a useful amount of free space. "Useful"
- * means enough for a minimal-sized tuple. But we don't know that
+ * means enough for a minimal-sized tuple. But we don't know that
* accurately near the start of the relation, so add pages
* unconditionally if they have >= BLCKSZ/10 free space. Also
* forcibly add pages with no live tuples, to avoid confusing the
@@ -2242,7 +2242,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* xmin xact completes.
*
* To be on the safe side, we abandon the repair_frag process if
- * we cannot find the parent tuple in vtlinks. This may be overly
+ * we cannot find the parent tuple in vtlinks. This may be overly
* conservative; AFAICS it would be safe to move the chain.
*
* Also, because we distinguish DEAD and RECENTLY_DEAD tuples
@@ -2347,7 +2347,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
/*
- * Must check for DEAD or MOVED_IN tuple, too. This could
+ * Must check for DEAD or MOVED_IN tuple, too. This could
* potentially update hint bits, so we'd better hold the
* buffer content lock.
*/
@@ -2520,7 +2520,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (chain_move_failed)
{
/*
- * Undo changes to offsets_used state. We don't bother
+ * Undo changes to offsets_used state. We don't bother
* cleaning up the amount-free state, since we're not
* going to do any further tuple motion.
*/
@@ -2798,7 +2798,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* that VACUUM FULL always uses sync commit, too.) The transaction
* continues to be shown as running in the ProcArray.
*
- * XXX This desperately needs to be revisited. Any failure after this
+ * XXX This desperately needs to be revisited. Any failure after this
* point will result in a PANIC "cannot abort transaction nnn, it was
* already committed"! As a precaution, we prevent cancel interrupts
* after this point to mitigate this problem; caller is responsible for
@@ -3535,7 +3535,7 @@ scan_index(Relation indrel, double num_tuples)
pg_rusage_show(&ru0))));
/*
- * Check for tuple count mismatch. If the index is partial, then it's OK
+ * Check for tuple count mismatch. If the index is partial, then it's OK
* for it to have fewer tuples than the heap; else we got trouble.
*/
if (!stats->estimated_count &&
@@ -3614,7 +3614,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
pg_rusage_show(&ru0))));
/*
- * Check for tuple count mismatch. If the index is partial, then it's OK
+ * Check for tuple count mismatch. If the index is partial, then it's OK
* for it to have fewer tuples than the heap; else we got trouble.
*/
if (!stats->estimated_count &&
@@ -3763,7 +3763,7 @@ vpage_insert(VacPageList vacpagelist, VacPage vpnew)
/*
* vac_bsearch: just like standard C library routine bsearch(),
* except that we first test to see whether the target key is outside
- * the range of the table entries. This case is handled relatively slowly
+ * the range of the table entries. This case is handled relatively slowly
* by the normal binary search algorithm (ie, no faster than any other key)
* but it occurs often enough in VACUUM to be worth optimizing.
*/
@@ -3854,7 +3854,7 @@ vac_cmp_vtlinks(const void *left, const void *right)
/*
* Open all the vacuumable indexes of the given relation, obtaining the
- * specified kind of lock on each. Return an array of Relation pointers for
+ * specified kind of lock on each. Return an array of Relation pointers for
* the indexes into *Irel, and the number of indexes into *nindexes.
*
* We consider an index vacuumable if it is marked insertable (IndexIsReady).
@@ -3904,7 +3904,7 @@ vac_open_indexes(Relation relation, LOCKMODE lockmode,
}
/*
- * Release the resources acquired by vac_open_indexes. Optionally release
+ * Release the resources acquired by vac_open_indexes. Optionally release
* the locks (say NoLock to keep 'em).
*/
void
@@ -3967,7 +3967,7 @@ PageGetFreeSpaceWithFillFactor(Relation relation, Page page)
{
/*
* It is correct to use PageGetExactFreeSpace() here, *not*
- * PageGetHeapFreeSpace(). This is because (a) we do our own, exact
+ * PageGetHeapFreeSpace(). This is because (a) we do our own, exact
* accounting for whether line pointers must be added, and (b) we will
* recycle any LP_DEAD line pointers before starting to add rows to a
* page, but that may not have happened yet at the time this function is
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index bcd99efc93..0ef21b38e1 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -13,7 +13,7 @@
* We are willing to use at most maintenance_work_mem memory space to keep
* track of dead tuples. We initially allocate an array of TIDs of that size,
* with an upper limit that depends on table size (this limit ensures we don't
- * allocate a huge area uselessly for vacuuming small tables). If the array
+ * allocate a huge area uselessly for vacuuming small tables). If the array
* threatens to overflow, we suspend the heap scan phase and perform a pass of
* index cleanup and page compaction, then resume the heap scan with an empty
* TID array.
@@ -798,8 +798,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/*
* If we remembered any tuples for deletion, then the page will be
* visited again by lazy_vacuum_heap, which will compute and record
- * its post-compaction free space. If not, then we're done with this
- * page, so remember its free space as-is. (This path will always be
+ * its post-compaction free space. If not, then we're done with this
+ * page, so remember its free space as-is. (This path will always be
* taken if there are no indexes.)
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
@@ -1127,7 +1127,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
/*
* Note: once we have truncated, we *must* keep the exclusive lock until
- * commit. The sinval message won't be sent until commit, and other
+ * commit. The sinval message won't be sent until commit, and other
* backends must see it and reset their rd_targblock values before they
* can safely access the table again.
*/
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index cbed164237..0903695e2b 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -183,7 +183,7 @@ assign_datestyle(const char *value, bool doit, GucSource source)
return value;
/*
- * Prepare the canonical string to return. GUC wants it malloc'd.
+ * Prepare the canonical string to return. GUC wants it malloc'd.
*/
result = (char *) malloc(32);
if (!result)
@@ -268,7 +268,7 @@ assign_timezone(const char *value, bool doit, GucSource source)
/*
* Try to parse it. XXX an invalid interval format will result in
- * ereport(ERROR), which is not desirable for GUC. We did what we
+ * ereport(ERROR), which is not desirable for GUC. We did what we
* could to guard against this in flatten_set_variable_args, but a
* string coming in from postgresql.conf might contain anything.
*/
@@ -335,7 +335,7 @@ assign_timezone(const char *value, bool doit, GucSource source)
*
* During GUC initialization, since the timezone library isn't set
* up yet, pg_get_timezone_name will return NULL and we will leave
- * the setting as UNKNOWN. If this isn't overridden from the
+ * the setting as UNKNOWN. If this isn't overridden from the
* config file then pg_timezone_initialize() will eventually
* select a default value from the environment.
*/
@@ -391,7 +391,7 @@ assign_timezone(const char *value, bool doit, GucSource source)
return value;
/*
- * Prepare the canonical string to return. GUC wants it malloc'd.
+ * Prepare the canonical string to return. GUC wants it malloc'd.
*/
if (HasCTZSet)
{
@@ -467,7 +467,7 @@ assign_log_timezone(const char *value, bool doit, GucSource source)
*
* During GUC initialization, since the timezone library isn't set up
* yet, pg_get_timezone_name will return NULL and we will leave the
- * setting as UNKNOWN. If this isn't overridden from the config file
+ * setting as UNKNOWN. If this isn't overridden from the config file
* then pg_timezone_initialize() will eventually select a default
* value from the environment.
*/
@@ -521,7 +521,7 @@ assign_log_timezone(const char *value, bool doit, GucSource source)
return value;
/*
- * Prepare the canonical string to return. GUC wants it malloc'd.
+ * Prepare the canonical string to return. GUC wants it malloc'd.
*/
result = strdup(value);
@@ -656,7 +656,7 @@ assign_client_encoding(const char *value, bool doit, GucSource source)
/*
* Note: if we are in startup phase then SetClientEncoding may not be able
- * to really set the encoding. In this case we will assume that the
+ * to really set the encoding. In this case we will assume that the
* encoding is okay, and InitializeClientEncoding() will fix things once
* initialization is complete.
*/
@@ -684,7 +684,7 @@ assign_client_encoding(const char *value, bool doit, GucSource source)
* limit on names, so we can tell whether we're being passed an initial
* role name or a saved/restored value. (NOTE: we rely on guc.c to have
* properly truncated any incoming value, but not to truncate already-stored
- * values. See GUC_IS_NAME processing.)
+ * values. See GUC_IS_NAME processing.)
*/
extern char *session_authorization_string; /* in guc.c */
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index b06a48b7ab..da1b8bce76 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -343,11 +343,11 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse)
*rt_entry2;
/*
- * Make a copy of the given parsetree. It's not so much that we don't
+ * Make a copy of the given parsetree. It's not so much that we don't
* want to scribble on our input, it's that the parser has a bad habit of
* outputting multiple links to the same subtree for constructs like
* BETWEEN, and we mustn't have OffsetVarNodes increment the varno of a
- * Var node twice. copyObject will expand any multiply-referenced subtree
+ * Var node twice. copyObject will expand any multiply-referenced subtree
* into multiple copies.
*/
viewParse = (Query *) copyObject(viewParse);
@@ -443,7 +443,7 @@ DefineView(ViewStmt *stmt, const char *queryString)
/*
* If the user didn't explicitly ask for a temporary view, check whether
- * we need one implicitly. We allow TEMP to be inserted automatically as
+ * we need one implicitly. We allow TEMP to be inserted automatically as
* long as the CREATE command is consistent with that --- no explicit
* schema name.
*/
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index cd23d8d8b6..1980b0fff9 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -63,7 +63,7 @@ static bool IndexSupportsBackwardScan(Oid indexid);
* needs access to variables of the current outer tuple. (The handling of
* this parameter is currently pretty inconsistent: some callers pass NULL
* and some pass down their parent's value; so don't rely on it in other
- * situations. It'd probably be better to remove the whole thing and use
+ * situations. It'd probably be better to remove the whole thing and use
* the generalized parameter mechanism instead.)
*/
void
@@ -295,7 +295,7 @@ ExecMarkPos(PlanState *node)
*
* NOTE: the semantics of this are that the first ExecProcNode following
* the restore operation will yield the same tuple as the first one following
- * the mark operation. It is unspecified what happens to the plan node's
+ * the mark operation. It is unspecified what happens to the plan node's
* result TupleTableSlot. (In most cases the result slot is unchanged by
* a restore, but the node may choose to clear it or to load it with the
* restored-to tuple.) Hence the caller should discard any previously
@@ -370,7 +370,7 @@ ExecSupportsMarkRestore(NodeTag plantype)
/*
* T_Result only supports mark/restore if it has a child plan that
* does, so we do not have enough information to give a really
- * correct answer. However, for current uses it's enough to
+ * correct answer. However, for current uses it's enough to
* always say "false", because this routine is not asked about
* gating Result plans, only base-case Results.
*/
diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c
index d9ecc973e1..153a70839a 100644
--- a/src/backend/executor/execCurrent.c
+++ b/src/backend/executor/execCurrent.c
@@ -138,7 +138,7 @@ execCurrentOf(CurrentOfExpr *cexpr,
/*
* This table didn't produce the cursor's current row; some other
- * inheritance child of the same parent must have. Signal caller to
+ * inheritance child of the same parent must have. Signal caller to
* do nothing on this table.
*/
return false;
diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c
index a4c77f0ad3..42206e49d7 100644
--- a/src/backend/executor/execJunk.c
+++ b/src/backend/executor/execJunk.c
@@ -54,7 +54,7 @@
*
* Initialize the Junk filter.
*
- * The source targetlist is passed in. The output tuple descriptor is
+ * The source targetlist is passed in. The output tuple descriptor is
* built from the non-junk tlist entries, plus the passed specification
* of whether to include room for an OID or not.
* An optional resultSlot can be passed as well.
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 53df597552..05f2f1359f 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -125,7 +125,7 @@ static void intorel_destroy(DestReceiver *self);
* query plan
*
* Takes a QueryDesc previously created by CreateQueryDesc (it's not real
- * clear why we bother to separate the two functions, but...). The tupDesc
+ * clear why we bother to separate the two functions, but...). The tupDesc
* field of the QueryDesc is filled in to describe the tuples that will be
* returned, and the internal fields (estate and planstate) are set up.
*
@@ -483,7 +483,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
* userid to check as: current user unless we have a setuid indication.
*
* Note: GetUserId() is presently fast enough that there's no harm in
- * calling it separately for each RTE. If that stops being true, we could
+ * calling it separately for each RTE. If that stops being true, we could
* call it once in ExecCheckRTPerms and pass the userid down from there.
* But for now, no need for the extra clutter.
*/
@@ -1070,7 +1070,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
econtext = CreateExprContext(estate);
/*
- * Build a projection for each result rel. Note that any SubPlans in
+ * Build a projection for each result rel. Note that any SubPlans in
* the RETURNING lists get attached to the topmost plan node.
*/
Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
@@ -1267,7 +1267,7 @@ ExecCheckPlanOutput(Relation resultRel, List *targetList)
* if so it doesn't matter which one we pick.) However, it is sometimes
* necessary to fire triggers on other relations; this happens mainly when an
* RI update trigger queues additional triggers on other relations, which will
- * be processed in the context of the outer query. For efficiency's sake,
+ * be processed in the context of the outer query. For efficiency's sake,
* we want to have a ResultRelInfo for those triggers too; that can avoid
* repeated re-opening of the relation. (It also provides a way for EXPLAIN
* ANALYZE to report the runtimes of such triggers.) So we make additional
@@ -1925,7 +1925,7 @@ ExecDelete(ItemPointer tupleid,
*
* Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
* the row to be deleted is visible to that snapshot, and throw a can't-
- * serialize error if not. This is a special-case behavior needed for
+ * serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
ldelete:;
@@ -1990,7 +1990,7 @@ ldelete:;
{
/*
* We have to put the target tuple into a slot, which means first we
- * gotta fetch it. We can use the trigger tuple slot.
+ * gotta fetch it. We can use the trigger tuple slot.
*/
TupleTableSlot *slot = estate->es_trig_tuple_slot;
HeapTupleData deltuple;
@@ -2019,7 +2019,7 @@ ldelete:;
* note: we can't run UPDATE queries with transactions
* off because UPDATEs are actually INSERTs and our
* scan will mistakenly loop forever, updating the tuple
- * it just inserted.. This should be fixed but until it
+ * it just inserted.. This should be fixed but until it
* is, we don't want to get stuck in an infinite loop
* which corrupts your database..
* ----------------------------------------------------------------
@@ -2088,7 +2088,7 @@ lreplace:;
*
* Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
* the row to be updated is visible to that snapshot, and throw a can't-
- * serialize error if not. This is a special-case behavior needed for
+ * serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
result = heap_update(resultRelationDesc, tupleid, tuple,
@@ -2141,7 +2141,7 @@ lreplace:;
* Note: instead of having to update the old index tuples associated with
* the heap tuple, all we do is form and insert new index tuples. This is
* because UPDATEs are actually DELETEs and INSERTs, and index tuple
- * deletion is done later by VACUUM (see notes in ExecDelete). All we do
+ * deletion is done later by VACUUM (see notes in ExecDelete). All we do
* here is insert new index tuples. -cim 9/27/89
*/
@@ -2371,7 +2371,7 @@ EvalPlanQual(EState *estate, Index rti,
{
/*
* If xmin isn't what we're expecting, the slot must have been
- * recycled and reused for an unrelated tuple. This implies that
+ * recycled and reused for an unrelated tuple. This implies that
* the latest version of the row was deleted, so we need do
* nothing. (Should be safe to examine xmin without getting
* buffer's content lock, since xmin never changes in an existing
@@ -2479,7 +2479,7 @@ EvalPlanQual(EState *estate, Index rti,
*tid = tuple.t_self;
/*
- * Need to run a recheck subquery. Find or create a PQ stack entry.
+ * Need to run a recheck subquery. Find or create a PQ stack entry.
*/
epq = estate->es_evalPlanQual;
endNode = true;
@@ -2716,7 +2716,7 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
/*
* Each epqstate must have its own es_evTupleNull state, but all the stack
- * entries share es_evTuple state. This allows sub-rechecks to inherit
+ * entries share es_evTuple state. This allows sub-rechecks to inherit
* the value being examined by an outer recheck.
*/
epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
@@ -2766,7 +2766,7 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
*
* This is a cut-down version of ExecutorEnd(); basically we want to do most
* of the normal cleanup, but *not* close result relations (which we are
- * just sharing from the outer query). We do, however, have to close any
+ * just sharing from the outer query). We do, however, have to close any
* trigger target relations that got opened, since those are not shared.
*/
static void
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index 1b1dd91f2a..69a9d3b288 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -52,7 +52,7 @@
* * ExecInitNode() notices that it is looking at a nest loop and
* as the code below demonstrates, it calls ExecInitNestLoop().
* Eventually this calls ExecInitNode() on the right and left subplans
- * and so forth until the entire plan is initialized. The result
+ * and so forth until the entire plan is initialized. The result
* of ExecInitNode() is a plan state tree built with the same structure
* as the underlying plan tree.
*
@@ -642,7 +642,7 @@ ExecCountSlotsNode(Plan *node)
* at 'node'.
*
* After this operation, the query plan will not be able to
- * processed any further. This should be called only after
+ * processed any further. This should be called only after
* the query plan has been fully executed.
* ----------------------------------------------------------------
*/
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index d5e828967f..97c7d175d3 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -26,7 +26,7 @@
* ExecProject() is used to make tuple projections. Rather then
* trying to speed it up, the execution plan should be pre-processed
* to facilitate attribute sharing between nodes wherever possible,
- * instead of doing needless copying. -cim 5/31/91
+ * instead of doing needless copying. -cim 5/31/91
*
* During expression evaluation, we check_stack_depth only in
* ExecMakeFunctionResult (and substitute routines) rather than at every
@@ -196,7 +196,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
*
* Note: for notational simplicity we declare these functions as taking the
* specific type of ExprState that they work on. This requires casting when
- * assigning the function pointer in ExecInitExpr. Be careful that the
+ * assigning the function pointer in ExecInitExpr. Be careful that the
* function signature is declared correctly, because the cast suppresses
* automatic checking!
*
@@ -231,7 +231,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
* The caller should already have switched into the temporary memory
* context econtext->ecxt_per_tuple_memory. The convenience entry point
* ExecEvalExprSwitchContext() is provided for callers who don't prefer to
- * do the switch in an outer loop. We do not do the switch in these routines
+ * do the switch in an outer loop. We do not do the switch in these routines
* because it'd be a waste of cycles during nested expression evaluation.
* ----------------------------------------------------------------
*/
@@ -434,7 +434,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
/*
* For assignment to varlena arrays, we handle a NULL original array
* by substituting an empty (zero-dimensional) array; insertion of the
- * new element will result in a singleton array value. It does not
+ * new element will result in a singleton array value. It does not
* matter whether the new element is NULL.
*/
if (*isNull)
@@ -815,11 +815,11 @@ ExecEvalWholeRowVar(WholeRowVarExprState *wrvstate, ExprContext *econtext,
* We really only care about numbers of attributes and data types.
* Also, we can ignore type mismatch on columns that are dropped in
* the destination type, so long as (1) the physical storage matches
- * or (2) the actual column value is NULL. Case (1) is helpful in
+ * or (2) the actual column value is NULL. Case (1) is helpful in
* some cases involving out-of-date cached plans, while case (2) is
* expected behavior in situations such as an INSERT into a table with
* dropped columns (the planner typically generates an INT4 NULL
- * regardless of the dropped column type). If we find a dropped
+ * regardless of the dropped column type). If we find a dropped
* column and cannot verify that case (1) holds, we have to use
* ExecEvalWholeRowSlow to check (2) for each row.
*/
@@ -1414,7 +1414,7 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo,
* ExecPrepareTuplestoreResult
*
* Subroutine for ExecMakeFunctionResult: prepare to extract rows from a
- * tuplestore function result. We must set up a funcResultSlot (unless
+ * tuplestore function result. We must set up a funcResultSlot (unless
* already done in a previous call cycle) and verify that the function
* returned the expected tuple descriptor.
*/
@@ -1459,7 +1459,7 @@ ExecPrepareTuplestoreResult(FuncExprState *fcache,
}
/*
- * If function provided a tupdesc, cross-check it. We only really need to
+ * If function provided a tupdesc, cross-check it. We only really need to
* do this for functions returning RECORD, but might as well do it always.
*/
if (resultDesc)
@@ -1608,7 +1608,7 @@ restart:
* For non-set-returning functions, we just use a local-variable
* FunctionCallInfoData. For set-returning functions we keep the callinfo
* record in fcache->setArgs so that it can survive across multiple
- * value-per-call invocations. (The reason we don't just do the latter
+ * value-per-call invocations. (The reason we don't just do the latter
* all the time is that plpgsql expects to be able to use simple
* expression trees re-entrantly. Which might not be a good idea, but the
* penalty for not doing so is high.)
@@ -1658,7 +1658,7 @@ restart:
if (fcache->func.fn_retset || hasSetArg)
{
/*
- * We need to return a set result. Complain if caller not ready to
+ * We need to return a set result. Complain if caller not ready to
* accept one.
*/
if (isDone == NULL)
@@ -1980,7 +1980,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
/*
* Normally the passed expression tree will be a FuncExprState, since the
* grammar only allows a function call at the top level of a table
- * function reference. However, if the function doesn't return set then
+ * function reference. However, if the function doesn't return set then
* the planner might have replaced the function call via constant-folding
* or inlining. So if we see any other kind of expression node, execute
* it via the general ExecEvalExpr() code; the only difference is that we
@@ -2015,7 +2015,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
*
* Note: ideally, we'd do this in the per-tuple context, but then the
* argument values would disappear when we reset the context in the
- * inner loop. So do it in caller context. Perhaps we should make a
+ * inner loop. So do it in caller context. Perhaps we should make a
* separate context just to hold the evaluated arguments?
*/
fcinfo.flinfo = &(fcache->func);
@@ -2101,7 +2101,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
* Can't do anything very useful with NULL rowtype values. For a
* function returning set, we consider this a protocol violation
* (but another alternative would be to just ignore the result and
- * "continue" to get another row). For a function not returning
+ * "continue" to get another row). For a function not returning
* set, we fall out of the loop; we'll cons up an all-nulls result
* row below.
*/
@@ -2235,7 +2235,7 @@ no_function_result:
}
/*
- * If function provided a tupdesc, cross-check it. We only really need to
+ * If function provided a tupdesc, cross-check it. We only really need to
* do this for functions returning RECORD, but might as well do it always.
*/
if (rsinfo.setDesc)
@@ -2415,7 +2415,7 @@ ExecEvalDistinct(FuncExprState *fcache,
*
* Evaluate "scalar op ANY/ALL (array)". The operator always yields boolean,
* and we combine the results across all array elements using OR and AND
- * (for ANY and ALL respectively). Of course we short-circuit as soon as
+ * (for ANY and ALL respectively). Of course we short-circuit as soon as
* the result is known.
*/
static Datum
@@ -2600,7 +2600,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
* qualification to conjunctive normal form. If we ever get
* an AND to evaluate, we can be sure that it's not a top-level
* clause in the qualification, but appears lower (as a function
- * argument, for example), or in the target list. Not that you
+ * argument, for example), or in the target list. Not that you
* need to know this, mind you...
* ----------------------------------------------------------------
*/
@@ -2731,7 +2731,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
/* ----------------------------------------------------------------
* ExecEvalConvertRowtype
*
- * Evaluate a rowtype coercion operation. This may require
+ * Evaluate a rowtype coercion operation. This may require
* rearranging field positions.
* ----------------------------------------------------------------
*/
@@ -2926,7 +2926,7 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
/*
* if we have a true test, then we return the result, since the case
- * statement is satisfied. A NULL result from the test is not
+ * statement is satisfied. A NULL result from the test is not
* considered true.
*/
if (DatumGetBool(clause_value) && !*isNull)
@@ -3140,7 +3140,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
* If all items were null or empty arrays, return an empty array;
* otherwise, if some were and some weren't, raise error. (Note: we
* must special-case this somehow to avoid trying to generate a 1-D
- * array formed from empty arrays. It's not ideal...)
+ * array formed from empty arrays. It's not ideal...)
*/
if (haveempty)
{
@@ -4301,7 +4301,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
* ExecInitExpr: prepare an expression tree for execution
*
* This function builds and returns an ExprState tree paralleling the given
- * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
+ * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
* for execution. Because the Expr tree itself is read-only as far as
* ExecInitExpr and ExecEvalExpr are concerned, several different executions
* of the same plan tree can occur concurrently.
@@ -4312,9 +4312,9 @@ ExecEvalExprSwitchContext(ExprState *expression,
*
* Any Aggref, WindowFunc, or SubPlan nodes found in the tree are added to the
* lists of such nodes held by the parent PlanState. Otherwise, we do very
- * little initialization here other than building the state-node tree. Any
+ * little initialization here other than building the state-node tree. Any
* nontrivial work associated with initializing runtime info for a node should
- * happen during the first actual evaluation of that node. (This policy lets
+ * happen during the first actual evaluation of that node. (This policy lets
* us avoid work if the node is never actually evaluated.)
*
* Note: there is no ExecEndExpr function; we assume that any resource
@@ -5099,7 +5099,7 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
/*
- * Evaluate the qual conditions one at a time. If we find a FALSE result,
+ * Evaluate the qual conditions one at a time. If we find a FALSE result,
* we can stop evaluating and return FALSE --- the AND result must be
* FALSE. Also, if we find a NULL result when resultForNull is FALSE, we
* can stop and return FALSE --- the AND result must be FALSE or NULL in
@@ -5258,7 +5258,7 @@ ExecTargetList(List *targetlist,
else
{
/*
- * We have some done and some undone sets. Restart the done ones
+ * We have some done and some undone sets. Restart the done ones
* so that we can deliver a tuple (if possible).
*/
foreach(tl, targetlist)
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 97bd26a854..2cff2daada 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* execTuples.c
- * Routines dealing with the executor tuple tables. These are used to
+ * Routines dealing with the executor tuple tables. These are used to
* ensure that the executor frees copies of tuples (made by
* ExecTargetList) properly.
*
@@ -276,7 +276,7 @@ ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
* ExecAllocTableSlot
*
* This routine is used to reserve slots in the table for
- * use by the various plan nodes. It is expected to be
+ * use by the various plan nodes. It is expected to be
* called by the node init routines (ex: ExecInitNestLoop)
* once per slot needed by the node. Not all nodes need
* slots (some just pass tuples around).
@@ -329,7 +329,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
ExecClearTuple(slot);
/*
- * Release any old descriptor. Also release old Datum/isnull arrays if
+ * Release any old descriptor. Also release old Datum/isnull arrays if
* present (we don't bother to check if they could be re-used).
*/
if (slot->tts_tupleDescriptor)
@@ -379,7 +379,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
* Another case where it is 'false' is when the referenced tuple is held
* in a tuple table slot belonging to a lower-level executor Proc node.
* In this case the lower-level slot retains ownership and responsibility
- * for eventually releasing the tuple. When this method is used, we must
+ * for eventually releasing the tuple. When this method is used, we must
* be certain that the upper-level Proc node will lose interest in the tuple
* sooner than the lower-level one does! If you're not certain, copy the
* lower-level tuple with heap_copytuple and let the upper-level table
@@ -718,7 +718,7 @@ ExecFetchSlotTuple(TupleTableSlot *slot)
* Fetch the slot's minimal physical tuple.
*
* If the slot contains a virtual tuple, we convert it to minimal
- * physical form. The slot retains ownership of the minimal tuple.
+ * physical form. The slot retains ownership of the minimal tuple.
* If it contains a regular tuple we convert to minimal form and store
* that in addition to the regular tuple (not instead of, because
* callers may hold pointers to Datums within the regular tuple).
@@ -897,7 +897,7 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
* ExecInit{Result,Scan,Extra}TupleSlot
*
* These are convenience routines to initialize the specified slot
- * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
+ * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
* is used for initializing special-purpose slots.
* --------------------------------
*/
@@ -1206,7 +1206,7 @@ BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values)
* code would have no way to obtain a tupledesc for the tuple.
*
* Note that if we do build a new tuple, it's palloc'd in the current
- * memory context. Beware of code that changes context between the initial
+ * memory context. Beware of code that changes context between the initial
* heap_form_tuple/etc call and calling HeapTuple(Header)GetDatum.
*
* For performance-critical callers, it could be worthwhile to take extra
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 3af803d936..f3b07f280a 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -439,7 +439,7 @@ FreeExprContext(ExprContext *econtext, bool isCommit)
* ReScanExprContext
*
* Reset an expression context in preparation for a rescan of its
- * plan node. This requires calling any registered shutdown callbacks,
+ * plan node. This requires calling any registered shutdown callbacks,
* since any partially complete set-returning-functions must be canceled.
*
* Note we make no assumption about the caller's memory context.
@@ -480,7 +480,7 @@ MakePerTupleExprContext(EState *estate)
/* ----------------
* ExecAssignExprContext
*
- * This initializes the ps_ExprContext field. It is only necessary
+ * This initializes the ps_ExprContext field. It is only necessary
* to do this for nodes which use ExecQual or ExecProject
* because those routines require an econtext. Other nodes that
* don't have to evaluate expressions don't need to do this.
@@ -526,7 +526,7 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
/*
* ExecTypeFromTL needs the parse-time representation of the tlist, not a
- * list of ExprStates. This is good because some plan nodes don't bother
+ * list of ExprStates. This is good because some plan nodes don't bother
* to set up planstate->targetlist ...
*/
tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid);
@@ -554,7 +554,7 @@ ExecGetResultType(PlanState *planstate)
* the given tlist should be a list of ExprState nodes, not Expr nodes.
*
* inputDesc can be NULL, but if it is not, we check to see whether simple
- * Vars in the tlist match the descriptor. It is important to provide
+ * Vars in the tlist match the descriptor. It is important to provide
* inputDesc for relation-scan plan nodes, as a cross check that the relation
* hasn't been changed since the plan was made. At higher levels of a plan,
* there is no need to recheck.
@@ -756,7 +756,7 @@ ExecAssignProjectionInfo(PlanState *planstate,
*
* However ... there is no particular need to do it during ExecEndNode,
* because FreeExecutorState will free any remaining ExprContexts within
- * the EState. Letting FreeExecutorState do it allows the ExprContexts to
+ * the EState. Letting FreeExecutorState do it allows the ExprContexts to
* be freed in reverse order of creation, rather than order of creation as
* will happen if we delete them here, which saves O(N^2) work in the list
* cleanup inside FreeExprContext.
@@ -776,7 +776,7 @@ ExecFreeExprContext(PlanState *planstate)
* the following scan type support functions are for
* those nodes which are stubborn and return tuples in
* their Scan tuple slot instead of their Result tuple
- * slot.. luck fur us, these nodes do not do projections
+ * slot.. luck fur us, these nodes do not do projections
* so we don't have to worry about getting the ProjectionInfo
* right for them... -cim 6/3/91
* ----------------------------------------------------------------
@@ -1125,7 +1125,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
isnull);
/*
- * The index AM does the rest. Note we suppress unique-index checks
+ * The index AM does the rest. Note we suppress unique-index checks
* if we are being called from VACUUM, since VACUUM may need to move
* dead tuples that have the same keys as live ones.
*/
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index b5fa130579..82b894e31e 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -44,7 +44,7 @@ typedef struct
} DR_sqlfunction;
/*
- * We have an execution_state record for each query in a function. Each
+ * We have an execution_state record for each query in a function. Each
* record contains a plantree for its query. If the query is currently in
* F_EXEC_RUN state then there's a QueryDesc too.
*/
@@ -331,7 +331,7 @@ init_sql_fcache(FmgrInfo *finfo, bool lazyEvalOK)
* any polymorphic arguments.
*
* Note: we set fcache->returnsTuple according to whether we are returning
- * the whole tuple result or just a single column. In the latter case we
+ * the whole tuple result or just a single column. In the latter case we
* clear returnsTuple because we need not act different from the scalar
* result case, even if it's a rowtype column. (However, we have to force
* lazy eval mode in that case; otherwise we'd need extra code to expand
@@ -571,7 +571,7 @@ postquel_get_single_result(TupleTableSlot *slot,
/*
* Set up to return the function value. For pass-by-reference datatypes,
* be sure to allocate the result in resultcontext, not the current memory
- * context (which has query lifespan). We can't leave the data in the
+ * context (which has query lifespan). We can't leave the data in the
* TupleTableSlot because we intend to clear the slot before returning.
*/
oldcontext = MemoryContextSwitchTo(resultcontext);
@@ -714,7 +714,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
* Break from loop if we didn't shut down (implying we got a
* lazily-evaluated row). Otherwise we'll press on till the whole
* function is done, relying on the tuplestore to keep hold of the
- * data to eventually be returned. This is necessary since an
+ * data to eventually be returned. This is necessary since an
* INSERT/UPDATE/DELETE RETURNING that sets the result might be
* followed by additional rule-inserted commands, and we want to
* finish doing all those commands before we return anything.
@@ -770,7 +770,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
else if (fcache->lazyEval)
{
/*
- * We are done with a lazy evaluation. Clean up.
+ * We are done with a lazy evaluation. Clean up.
*/
tuplestore_clear(fcache->tstore);
@@ -794,8 +794,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
else
{
/*
- * We are done with a non-lazy evaluation. Return whatever is in
- * the tuplestore. (It is now caller's responsibility to free the
+ * We are done with a non-lazy evaluation. Return whatever is in
+ * the tuplestore. (It is now caller's responsibility to free the
* tuplestore when done.)
*/
rsi->returnMode = SFRM_Materialize;
@@ -900,7 +900,7 @@ sql_exec_error_callback(void *arg)
/*
* Try to determine where in the function we failed. If there is a query
- * with non-null QueryDesc, finger it. (We check this rather than looking
+ * with non-null QueryDesc, finger it. (We check this rather than looking
* for F_EXEC_RUN state, so that errors during ExecutorStart or
* ExecutorEnd are blamed on the appropriate query; see postquel_start and
* postquel_end.)
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index a852428bbe..73379eeb60 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -19,7 +19,7 @@
* The agg's first input type and transtype must be the same in this case!
*
* If transfunc is marked "strict" then NULL input_values are skipped,
- * keeping the previous transvalue. If transfunc is not strict then it
+ * keeping the previous transvalue. If transfunc is not strict then it
* is called for every input tuple and must deal with NULL initcond
* or NULL input_values for itself.
*
@@ -53,7 +53,7 @@
* pass-by-ref inputs, but in the aggregate case we know the left input is
* either the initial transition value or a previous function result, and
* in either case its value need not be preserved. See int8inc() for an
- * example. Notice that advance_transition_function() is coded to avoid a
+ * example. Notice that advance_transition_function() is coded to avoid a
* data copy step when the previous transition value pointer is returned.
* Also, some transition functions make use of the aggcontext to store
* working state.
@@ -189,7 +189,7 @@ typedef struct AggStatePerGroupData
/*
* Note: noTransValue initially has the same value as transValueIsNull,
- * and if true both are cleared to false at the same time. They are not
+ * and if true both are cleared to false at the same time. They are not
* the same though: if transfn later returns a NULL, we want to keep that
* NULL and not auto-replace it with a later input value. Only the first
* non-NULL input will be auto-substituted.
@@ -199,7 +199,7 @@ typedef struct AggStatePerGroupData
/*
* To implement hashed aggregation, we need a hashtable that stores a
* representative tuple and an array of AggStatePerGroup structs for each
- * distinct set of GROUP BY column values. We compute the hash key from
+ * distinct set of GROUP BY column values. We compute the hash key from
* the GROUP BY columns.
*/
typedef struct AggHashEntryData *AggHashEntry;
@@ -384,7 +384,7 @@ advance_transition_function(AggState *aggstate,
/*
* If pass-by-ref datatype, must copy the new value into aggcontext and
- * pfree the prior transValue. But if transfn returned a pointer to its
+ * pfree the prior transValue. But if transfn returned a pointer to its
* first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
@@ -408,7 +408,7 @@ advance_transition_function(AggState *aggstate,
}
/*
- * Advance all the aggregates for one input tuple. The input tuple
+ * Advance all the aggregates for one input tuple. The input tuple
* has been stored in tmpcontext->ecxt_outertuple, so that it is accessible
* to ExecEvalExpr. pergroup is the array of per-group structs to use
* (this might be in a hashtable entry).
@@ -470,7 +470,7 @@ advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
/*
* Run the transition function for a DISTINCT aggregate. This is called
* after we have completed entering all the input values into the sort
- * object. We complete the sort, read out the values in sorted order,
+ * object. We complete the sort, read out the values in sorted order,
* and run the transition function on each non-duplicate value.
*
* When called, CurrentMemoryContext should be the per-query context.
@@ -792,9 +792,9 @@ lookup_hash_entry(AggState *aggstate, TupleTableSlot *inputslot)
* the appropriate attribute for each aggregate function use (Aggref
* node) appearing in the targetlist or qual of the node. The number
* of tuples to aggregate over depends on whether grouped or plain
- * aggregation is selected. In grouped aggregation, we produce a result
+ * aggregation is selected. In grouped aggregation, we produce a result
* row for each group; in plain aggregation there's a single result row
- * for the whole query. In either case, the value of each aggregate is
+ * for the whole query. In either case, the value of each aggregate is
* stored in the expression context to be used when ExecProject evaluates
* the result tuple.
*/
@@ -992,7 +992,7 @@ agg_retrieve_direct(AggState *aggstate)
/*
* Use the representative input tuple for any references to
- * non-aggregated input columns in the qual and tlist. (If we are not
+ * non-aggregated input columns in the qual and tlist. (If we are not
* grouping, and there are no input rows at all, we will come here
* with an empty firstSlot ... but if not grouping, there can't be any
* references to non-aggregated input columns, so no problem.)
@@ -1216,8 +1216,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
aggstate->hashtable = NULL;
/*
- * Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a little
+ * Create expression contexts. We need two, one for per-input-tuple
+ * processing and one for per-output-tuple processing. We cheat a little
* by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &aggstate->ss.ps);
@@ -1252,7 +1252,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* initialize child expressions
*
* Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs
- * contain other agg calls in their arguments. This would make no sense
+ * contain other agg calls in their arguments. This would make no sense
* under SQL semantics anyway (and it's forbidden by the spec). Because
* that is true, we don't need to worry about evaluating the aggs in any
* particular order.
@@ -1299,7 +1299,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* This is not an error condition: we might be using the Agg node just
* to do hash-based grouping. Even in the regular case,
* constant-expression simplification could optimize away all of the
- * Aggrefs in the targetlist and qual. So keep going, but force local
+ * Aggrefs in the targetlist and qual. So keep going, but force local
* copy of numaggs positive so that palloc()s below don't choke.
*/
numaggs = 1;
@@ -1407,7 +1407,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
peraggstate->numArguments = numArguments;
/*
- * Get actual datatypes of the inputs. These could be different from
+ * Get actual datatypes of the inputs. These could be different from
* the agg's declared input types, when the agg accepts ANY or a
* polymorphic type.
*/
@@ -1527,7 +1527,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* If the transfn is strict and the initval is NULL, make sure input
* type and transtype are the same (or at least binary-compatible), so
* that it's OK to use the first input value as the initial
- * transValue. This should have been checked at agg definition time,
+ * transValue. This should have been checked at agg definition time,
* but just in case...
*/
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index 3a70ee447f..53d0d47a2d 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -33,7 +33,7 @@
* /
* Append -------+------+------+--- nil
* / \ | | |
- * nil nil ... ... ...
+ * nil nil ... ... ...
* subplans
*
* Append nodes are currently used for unions, and to support
@@ -345,7 +345,7 @@ ExecReScanAppend(AppendState *node, ExprContext *exprCtxt)
/*
* If chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode. However, if caller is passing us an exprCtxt
+ * first ExecProcNode. However, if caller is passing us an exprCtxt
* then forcibly rescan all the subnodes now, so that we can pass the
* exprCtxt down to the subnodes (needed for appendrel indexscan).
*/
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 306e7f6e17..4544a33e70 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -5,7 +5,7 @@
*
* NOTE: it is critical that this plan type only be used with MVCC-compliant
* snapshots (ie, regular snapshots, not SnapshotNow or one of the other
- * special snapshots). The reason is that since index and heap scans are
+ * special snapshots). The reason is that since index and heap scans are
* decoupled, there can be no assurance that the index tuple prompting a
* visit to a particular heap TID still exists when the visit is made.
* Therefore the tuple might not exist anymore either (which is OK because
@@ -361,7 +361,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
/*
* We must hold share lock on the buffer content while examining tuple
- * visibility. Afterwards, however, the tuples we have found to be
+ * visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c
index f38199650e..8cb02b31ed 100644
--- a/src/backend/executor/nodeFunctionscan.c
+++ b/src/backend/executor/nodeFunctionscan.c
@@ -277,7 +277,7 @@ ExecFunctionReScan(FunctionScanState *node, ExprContext *exprCtxt)
/*
* Here we have a choice whether to drop the tuplestore (and recompute the
* function outputs) or just rescan it. We must recompute if the
- * expression contains parameters, else we rescan. XXX maybe we should
+ * expression contains parameters, else we rescan. XXX maybe we should
* recompute if the function is volatile?
*/
if (node->ss.ps.chgParam != NULL)
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 64ae04d4f8..6ff1368480 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -373,7 +373,7 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
/*
* Set up for skew optimization, if possible and there's a need for more
- * than one batch. (In a one-batch join, there's no point in it.)
+ * than one batch. (In a one-batch join, there's no point in it.)
*/
if (nbatch > 1)
ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
@@ -415,7 +415,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/*
* Estimate tupsize based on footprint of tuple in hashtable... note this
- * does not allow for any palloc overhead. The manipulations of spaceUsed
+ * does not allow for any palloc overhead. The manipulations of spaceUsed
* don't count palloc overhead either.
*/
tupsize = HJTUPLE_OVERHEAD +
@@ -506,8 +506,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/*
* Both nbuckets and nbatch must be powers of 2 to make
- * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate
- * nbuckets to the next larger power of 2. We also force nbuckets to not
+ * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate
+ * nbuckets to the next larger power of 2. We also force nbuckets to not
* be real small, by starting the search at 2^10. (Note: above we made
* sure that nbuckets is not more than INT_MAX / 2, so this loop cannot
* overflow, nor can the final shift to recalculate nbuckets.)
@@ -810,7 +810,7 @@ ExecHashGetHashValue(HashJoinTable hashtable,
* the hash support function as strict even if the operator is not.
*
* Note: currently, all hashjoinable operators must be strict since
- * the hash index AM assumes that. However, it takes so little extra
+ * the hash index AM assumes that. However, it takes so little extra
* code here to allow non-strict that we may as well do it.
*/
if (isNull)
@@ -1104,7 +1104,7 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
/*
* While we have not hit a hole in the hashtable and have not hit
* the desired bucket, we have collided with some previous hash
- * value, so try the next bucket location. NB: this code must
+ * value, so try the next bucket location. NB: this code must
* match ExecHashGetSkewBucket.
*/
bucket = hashvalue & (nbuckets - 1);
@@ -1297,7 +1297,7 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
* NOTE: this is not nearly as simple as it looks on the surface, because
* of the possibility of collisions in the hashtable. Suppose that hash
* values A and B collide at a particular hashtable entry, and that A was
- * entered first so B gets shifted to a different table entry. If we were
+ * entered first so B gets shifted to a different table entry. If we were
* to remove A first then ExecHashGetSkewBucket would mistakenly start
* reporting that B is not in the hashtable, because it would hit the NULL
* before finding B. However, we always remove entries in the reverse
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index bfb07472b0..c336f4c6ef 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -108,7 +108,7 @@ ExecHashJoin(HashJoinState *node)
* If the outer relation is completely empty, we can quit without
* building the hash table. However, for an inner join it is only a
* win to check this when the outer relation's startup cost is less
- * than the projected cost of building the hash table. Otherwise it's
+ * than the projected cost of building the hash table. Otherwise it's
* best to build the hash table first and see if the inner relation is
* empty. (When it's an outer join, we should always make this check,
* since we aren't going to be able to skip the join on the strength
@@ -116,7 +116,7 @@ ExecHashJoin(HashJoinState *node)
*
* If we are rescanning the join, we make use of information gained on
* the previous scan: don't bother to try the prefetch if the previous
- * scan found the outer relation nonempty. This is not 100% reliable
+ * scan found the outer relation nonempty. This is not 100% reliable
* since with new parameters the outer relation might yield different
* results, but it's a good heuristic.
*
@@ -430,7 +430,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags)
* tuple slot of the Hash node (which is our inner plan). we do this
* because Hash nodes don't return tuples via ExecProcNode() -- instead
* the hash join node uses ExecScanHashBucket() to get at the contents of
- * the hash table. -cim 6/9/91
+ * the hash table. -cim 6/9/91
*/
{
HashState *hashstate = (HashState *) innerPlanState(hjstate);
@@ -807,7 +807,7 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue,
/*
* ExecHashJoinGetSavedTuple
- * read the next tuple from a batch file. Return NULL if no more.
+ * read the next tuple from a batch file. Return NULL if no more.
*
* On success, *hashvalue is set to the tuple's hash value, and the tuple
* itself is stored in the given slot.
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 0ed6b86893..1f9113474f 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -252,7 +252,7 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext,
/*
* For each run-time key, extract the run-time expression and evaluate
- * it with respect to the current outer tuple. We then stick the
+ * it with respect to the current outer tuple. We then stick the
* result into the proper scan key.
*
* Note: the result of the eval could be a pass-by-ref value that's
@@ -385,7 +385,7 @@ ExecIndexAdvanceArrayKeys(IndexArrayKeyInfo *arrayKeys, int numArrayKeys)
/*
* Note we advance the rightmost array key most quickly, since it will
* correspond to the lowest-order index column among the available
- * qualifications. This is hypothesized to result in better locality of
+ * qualifications. This is hypothesized to result in better locality of
* access in the index.
*/
for (j = numArrayKeys - 1; j >= 0; j--)
@@ -705,7 +705,7 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Index scanrelid,
/*
* If there are any RowCompareExpr quals, we need extra ScanKey entries
* for them, and possibly extra runtime-key entries. Count up what's
- * needed. (The subsidiary ScanKey arrays for the RowCompareExprs could
+ * needed. (The subsidiary ScanKey arrays for the RowCompareExprs could
* be allocated as separate chunks, but we have to count anyway to make
* runtime_keys large enough, so might as well just do one palloc.)
*/
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index d002848bd3..32bf2f8b2d 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -111,7 +111,7 @@ ExecLimit(LimitState *node)
/*
* The subplan is known to return no tuples (or not more than
- * OFFSET tuples, in general). So we return no tuples.
+ * OFFSET tuples, in general). So we return no tuples.
*/
return NULL;
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 817a7e7824..aaee9d85aa 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -185,7 +185,7 @@ ExecInitMaterial(Material *node, EState *estate, int eflags)
/*
* Tuplestore's interpretation of the flag bits is subtly different from
* the general executor meaning: it doesn't think BACKWARD necessarily
- * means "backwards all the way to start". If told to support BACKWARD we
+ * means "backwards all the way to start". If told to support BACKWARD we
* must include REWIND in the tuplestore eflags, else tuplestore_trim
* might throw away too much.
*/
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index bde0f3d874..ab97c525d2 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -41,7 +41,7 @@
*
* Therefore, rather than directly executing the merge join clauses,
* we evaluate the left and right key expressions separately and then
- * compare the columns one at a time (see MJCompare). The planner
+ * compare the columns one at a time (see MJCompare). The planner
* passes us enough information about the sort ordering of the inputs
* to allow us to determine how to make the comparison. We may use the
* appropriate btree comparison function, since Postgres' only notion
@@ -160,7 +160,7 @@ typedef enum
* sort ordering for each merge key. The mergejoinable operator is an
* equality operator in this opfamily, and the two inputs are guaranteed to be
* ordered in either increasing or decreasing (respectively) order according
- * to this opfamily, with nulls at the indicated end of the range. This
+ * to this opfamily, with nulls at the indicated end of the range. This
* allows us to obtain the needed comparison function from the opfamily.
*/
static MergeJoinClause
@@ -307,7 +307,7 @@ MJEvalOuterValues(MergeJoinState *mergestate)
/*
* MJEvalInnerValues
*
- * Same as above, but for the inner tuple. Here, we have to be prepared
+ * Same as above, but for the inner tuple. Here, we have to be prepared
* to load data from either the true current inner, or the marked inner,
* so caller must tell us which slot to load from.
*/
@@ -435,7 +435,7 @@ MJCompare(MergeJoinState *mergestate)
/*
* If we had any null comparison results or NULL-vs-NULL inputs, we do not
* want to report that the tuples are equal. Instead, if result is still
- * 0, change it to +1. This will result in advancing the inner side of
+ * 0, change it to +1. This will result in advancing the inner side of
* the join.
*
* Likewise, if there was a constant-false joinqual, do not report
@@ -749,7 +749,7 @@ ExecMergeJoin(MergeJoinState *node)
{
case MJEVAL_MATCHABLE:
/*
- * OK, we have the initial tuples. Begin by skipping
+ * OK, we have the initial tuples. Begin by skipping
* non-matching tuples.
*/
node->mj_JoinState = EXEC_MJ_SKIP_TEST;
@@ -1137,7 +1137,7 @@ ExecMergeJoin(MergeJoinState *node)
* which means that all subsequent outer tuples will be
* larger than our marked inner tuples. So we need not
* revisit any of the marked tuples but can proceed to
- * look for a match to the current inner. If there's
+ * look for a match to the current inner. If there's
* no more inners, no more matches are possible.
* ----------------
*/
@@ -1525,7 +1525,7 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags)
* For certain types of inner child nodes, it is advantageous to issue
* MARK every time we advance past an inner tuple we will never return to.
* For other types, MARK on a tuple we cannot return to is a waste of
- * cycles. Detect which case applies and set mj_ExtraMarks if we want to
+ * cycles. Detect which case applies and set mj_ExtraMarks if we want to
* issue "unnecessary" MARK calls.
*
* Currently, only Material wants the extra MARKs, and it will be helpful
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index 39b687f221..07841246eb 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -329,7 +329,7 @@ ExecRecursiveUnionReScan(RecursiveUnionState *node, ExprContext *exprCtxt)
/*
* if chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode. Because of above, we only have to do this to the
+ * first ExecProcNode. Because of above, we only have to do this to the
* non-recursive term.
*/
if (outerPlan->chgParam == NULL)
diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c
index e5173a65b5..76aa377382 100644
--- a/src/backend/executor/nodeResult.c
+++ b/src/backend/executor/nodeResult.c
@@ -308,7 +308,7 @@ ExecReScanResult(ResultState *node, ExprContext *exprCtxt)
/*
* If chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode. However, if caller is passing us an exprCtxt then
+ * first ExecProcNode. However, if caller is passing us an exprCtxt then
* forcibly rescan the subnode now, so that we can pass the exprCtxt down
* to the subnode (needed for gated indexscan).
*/
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index cff155abe6..90a180e468 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -5,7 +5,7 @@
*
* The input of a SetOp node consists of tuples from two relations,
* which have been combined into one dataset, with a junk attribute added
- * that shows which relation each tuple came from. In SETOP_SORTED mode,
+ * that shows which relation each tuple came from. In SETOP_SORTED mode,
* the input has furthermore been sorted according to all the grouping
* columns (ie, all the non-junk attributes). The SetOp node scans each
* group of identical tuples to determine how many came from each input
@@ -18,7 +18,7 @@
* relation is the left-hand one for EXCEPT, and tries to make the smaller
* input relation come first for INTERSECT. We build a hash table in memory
* with one entry for each group of identical tuples, and count the number of
- * tuples in the group from each relation. After seeing all the input, we
+ * tuples in the group from each relation. After seeing all the input, we
* scan the hashtable and generate the correct output using those counts.
* We can avoid making hashtable entries for any tuples appearing only in the
* second input relation, since they cannot result in any output.
@@ -267,7 +267,7 @@ setop_retrieve_direct(SetOpState *setopstate)
/*
* Store the copied first input tuple in the tuple table slot reserved
- * for it. The tuple will be deleted when it is cleared from the
+ * for it. The tuple will be deleted when it is cleared from the
* slot.
*/
ExecStoreTuple(setopstate->grp_firstTuple,
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index b2c771f410..1aa1706d04 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -259,12 +259,12 @@ ExecScanSubPlan(SubPlanState *node,
* semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK.
* (ROWCOMPARE_SUBLINK doesn't allow multiple tuples from the subplan.)
* NULL results from the combining operators are handled according to the
- * usual SQL semantics for OR and AND. The result for no input tuples is
+ * usual SQL semantics for OR and AND. The result for no input tuples is
* FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
* ROWCOMPARE_SUBLINK.
*
* For EXPR_SUBLINK we require the subplan to produce no more than one
- * tuple, else an error is raised. If zero tuples are produced, we return
+ * tuple, else an error is raised. If zero tuples are produced, we return
* NULL. Assuming we get a tuple, we just use its first column (there can
* be only one non-junk column in this case).
*
@@ -407,7 +407,7 @@ ExecScanSubPlan(SubPlanState *node,
else if (!found)
{
/*
- * deal with empty subplan result. result/isNull were previously
+ * deal with empty subplan result. result/isNull were previously
* initialized correctly for all sublink types except EXPR and
* ROWCOMPARE; for those, return NULL.
*/
@@ -902,7 +902,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent)
*
* This is called from ExecEvalParam() when the value of a PARAM_EXEC
* parameter is requested and the param's execPlan field is set (indicating
- * that the param has not yet been evaluated). This allows lazy evaluation
+ * that the param has not yet been evaluated). This allows lazy evaluation
* of initplans: we don't run the subplan until/unless we need its output.
* Note that this routine MUST clear the execPlan fields of the plan's
* output parameters after evaluating them!
@@ -1130,7 +1130,7 @@ ExecInitAlternativeSubPlan(AlternativeSubPlan *asplan, PlanState *parent)
/*
* Select the one to be used. For this, we need an estimate of the number
* of executions of the subplan. We use the number of output rows
- * expected from the parent plan node. This is a good estimate if we are
+ * expected from the parent plan node. This is a good estimate if we are
* in the parent's targetlist, and an underestimate (but probably not by
* more than a factor of 2) if we are in the qual.
*/
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index 7af42ccb4a..2dd26889f8 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -98,7 +98,7 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags)
Assert(!(eflags & EXEC_FLAG_MARK));
/*
- * SubqueryScan should not have any "normal" children. Also, if planner
+ * SubqueryScan should not have any "normal" children. Also, if planner
* left anything in subrtable, it's fishy.
*/
Assert(outerPlan(node) == NULL);
@@ -209,7 +209,7 @@ ExecSubqueryReScan(SubqueryScanState *node, ExprContext *exprCtxt)
/*
* ExecReScan doesn't know about my subplan, so I have to do
- * changed-parameter signaling myself. This is just as well, because the
+ * changed-parameter signaling myself. This is just as well, because the
* subplan has its own memory context in which its chgParam state lives.
*/
if (node->ss.ps.chgParam != NULL)
diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c
index 8878e6ede9..9498a5d900 100644
--- a/src/backend/executor/nodeUnique.c
+++ b/src/backend/executor/nodeUnique.c
@@ -4,7 +4,7 @@
* Routines to handle unique'ing of queries where appropriate
*
* Unique is a very simple node type that just filters out duplicate
- * tuples from a stream of sorted tuples from its subplan. It's essentially
+ * tuples from a stream of sorted tuples from its subplan. It's essentially
* a dumbed-down form of Group: the duplicate-removal functionality is
* identical. However, Unique doesn't do projection nor qual checking,
* so it's marginally more efficient for cases where neither is needed.
diff --git a/src/backend/executor/nodeValuesscan.c b/src/backend/executor/nodeValuesscan.c
index 0ce1f576dc..7875c1b92c 100644
--- a/src/backend/executor/nodeValuesscan.c
+++ b/src/backend/executor/nodeValuesscan.c
@@ -205,7 +205,7 @@ ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags)
planstate = &scanstate->ss.ps;
/*
- * Create expression contexts. We need two, one for per-sublist
+ * Create expression contexts. We need two, one for per-sublist
* processing and one for execScan.c to use for quals and projections. We
* cheat a little by using ExecAssignExprContext() to build both.
*/
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index 3cd71eb9f0..4c765e15fa 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -4,7 +4,7 @@
* routines to handle WindowAgg nodes.
*
* A WindowAgg node evaluates "window functions" across suitable partitions
- * of the input tuple set. Any one WindowAgg works for just a single window
+ * of the input tuple set. Any one WindowAgg works for just a single window
* specification, though it can evaluate multiple window functions sharing
* identical window specifications. The input tuples are required to be
* delivered in sorted order, with the PARTITION BY columns (if any) as
@@ -14,7 +14,7 @@
*
* Since window functions can require access to any or all of the rows in
* the current partition, we accumulate rows of the partition into a
- * tuplestore. The window functions are called using the WindowObject API
+ * tuplestore. The window functions are called using the WindowObject API
* so that they can access those rows as needed.
*
* We also support using plain aggregate functions as window functions.
@@ -295,7 +295,7 @@ advance_windowaggregate(WindowAggState *winstate,
/*
* If pass-by-ref datatype, must copy the new value into wincontext and
- * pfree the prior transValue. But if transfn returned a pointer to its
+ * pfree the prior transValue. But if transfn returned a pointer to its
* first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
@@ -427,7 +427,7 @@ eval_windowaggregates(WindowAggState *winstate)
* TODO: In the future, we should implement the full SQL-standard set of
* framing rules. We could implement the other cases by recalculating the
* aggregates whenever a row exits the frame. That would be pretty slow,
- * though. For aggregates like SUM and COUNT we could implement a
+ * though. For aggregates like SUM and COUNT we could implement a
* "negative transition function" that would be called for each row as it
* exits the frame. We'd have to think about avoiding recalculation of
* volatile arguments of aggregate functions, too.
@@ -467,7 +467,7 @@ eval_windowaggregates(WindowAggState *winstate)
* Advance until we reach a row not in frame (or end of partition).
*
* Note the loop invariant: agg_row_slot is either empty or holds the row
- * at position aggregatedupto. The agg_ptr read pointer must always point
+ * at position aggregatedupto. The agg_ptr read pointer must always point
* to the next row to read into agg_row_slot.
*/
agg_row_slot = winstate->agg_row_slot;
@@ -712,7 +712,7 @@ spool_tuples(WindowAggState *winstate, int64 pos)
/*
* If the tuplestore has spilled to disk, alternate reading and writing
- * becomes quite expensive due to frequent buffer flushes. It's cheaper
+ * becomes quite expensive due to frequent buffer flushes. It's cheaper
* to force the entire partition to get spooled in one go.
*
* XXX this is a horrid kluge --- it'd be better to fix the performance
@@ -803,7 +803,7 @@ release_partition(WindowAggState *winstate)
* to our window framing rule
*
* The caller must have already determined that the row is in the partition
- * and fetched it into a slot. This function just encapsulates the framing
+ * and fetched it into a slot. This function just encapsulates the framing
* rules.
*/
static bool
@@ -1090,8 +1090,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
winstate->ss.ps.state = estate;
/*
- * Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a little
+ * Create expression contexts. We need two, one for per-input-tuple
+ * processing and one for per-output-tuple processing. We cheat a little
* by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &winstate->ss.ps);
@@ -1621,7 +1621,7 @@ window_gettupleslot(WindowObject winobj, int64 pos, TupleTableSlot *slot)
* requested amount of space. Subsequent calls just return the same chunk.
*
* Memory obtained this way is normally used to hold state that should be
- * automatically reset for each new partition. If a window function wants
+ * automatically reset for each new partition. If a window function wants
* to hold state across the whole query, fcinfo->fn_extra can be used in the
* usual way for that.
*/
diff --git a/src/backend/executor/nodeWorktablescan.c b/src/backend/executor/nodeWorktablescan.c
index 87a0204525..5ab5fca8fc 100644
--- a/src/backend/executor/nodeWorktablescan.c
+++ b/src/backend/executor/nodeWorktablescan.c
@@ -74,7 +74,7 @@ ExecWorkTableScan(WorkTableScanState *node)
{
/*
* On the first call, find the ancestor RecursiveUnion's state via the
- * Param slot reserved for it. (We can't do this during node init because
+ * Param slot reserved for it. (We can't do this during node init because
* there are corner cases where we'll get the init call before the
* RecursiveUnion does.)
*/
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 0f8f742caa..0ba7595e07 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -254,7 +254,7 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid)
}
/*
- * Pop the stack entry and reset global variables. Unlike
+ * Pop the stack entry and reset global variables. Unlike
* SPI_finish(), we don't risk switching to memory contexts that might
* be already gone.
*/
@@ -957,7 +957,7 @@ SPI_cursor_open(const char *name, SPIPlanPtr plan,
/*
* SPI_cursor_open_with_args()
*
- * Parse and plan a query and open it as a portal. Like SPI_execute_with_args,
+ * Parse and plan a query and open it as a portal. Like SPI_execute_with_args,
* we can tell the planner to rely on the parameter values as constants,
* because the plan will only be used once.
*/
@@ -1167,7 +1167,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
}
/*
- * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
+ * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
* check in transformDeclareCursorStmt because the cursor options might
* not have come through there.
*/
@@ -1425,7 +1425,7 @@ SPI_plan_is_valid(SPIPlanPtr plan)
/*
* SPI_result_code_string --- convert any SPI return code to a string
*
- * This is often useful in error messages. Most callers will probably
+ * This is often useful in error messages. Most callers will probably
* only pass negative (error-case) codes, but for generality we recognize
* the success codes too.
*/
@@ -1847,7 +1847,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
/*
* The last canSetTag query sets the status values returned to the
- * caller. Be careful to free any tuptables not returned, to
+ * caller. Be careful to free any tuptables not returned, to
* avoid intratransaction memory leak.
*/
if (canSetTag)
diff --git a/src/backend/executor/tstoreReceiver.c b/src/backend/executor/tstoreReceiver.c
index 10723a5014..f97ccf4dd6 100644
--- a/src/backend/executor/tstoreReceiver.c
+++ b/src/backend/executor/tstoreReceiver.c
@@ -5,7 +5,7 @@
* a Tuplestore.
*
* Optionally, we can force detoasting (but not decompression) of out-of-line
- * toasted values. This is to support cursors WITH HOLD, which must retain
+ * toasted values. This is to support cursors WITH HOLD, which must retain
* data even if the underlying table is dropped.
*
*
diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c
index 2fc4422a31..b16725f4d9 100644
--- a/src/backend/lib/stringinfo.c
+++ b/src/backend/lib/stringinfo.c
@@ -99,7 +99,7 @@ appendStringInfo(StringInfo str, const char *fmt,...)
* appendStringInfoVA
*
* Attempt to format text data under the control of fmt (an sprintf-style
- * format string) and append it to whatever is already in str. If successful
+ * format string) and append it to whatever is already in str. If successful
* return true; if not (because there's not enough space), return false
* without modifying str. Typically the caller would enlarge str and retry
* on false return --- see appendStringInfo for standard usage pattern.
@@ -234,7 +234,7 @@ enlargeStringInfo(StringInfo str, int needed)
int newlen;
/*
- * Guard against out-of-range "needed" values. Without this, we can get
+ * Guard against out-of-range "needed" values. Without this, we can get
* an overflow or infinite loop in the following.
*/
if (needed < 0) /* should not happen */
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 8c76eafef5..79e30bc478 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -52,7 +52,7 @@ static int recv_and_check_password_packet(Port *port);
/* Max size of username ident server can return */
#define IDENT_USERNAME_MAX 512
-/* Standard TCP port number for Ident service. Assigned by IANA */
+/* Standard TCP port number for Ident service. Assigned by IANA */
#define IDENT_PORT 113
static int authident(hbaPort *port);
@@ -580,7 +580,7 @@ recv_password_packet(Port *port)
(errmsg("received password packet")));
/*
- * Return the received string. Note we do not attempt to do any
+ * Return the received string. Note we do not attempt to do any
* character-set conversion on it; since we don't yet know the client's
* encoding, there wouldn't be much point.
*/
@@ -1480,7 +1480,7 @@ interpret_ident_response(const char *ident_response,
/*
* Talk to the ident server on host "remote_ip_addr" and find out who
* owns the tcp connection from his port "remote_port" to port
- * "local_port_addr" on host "local_ip_addr". Return the user name the
+ * "local_port_addr" on host "local_ip_addr". Return the user name the
* ident server gives as "*ident_user".
*
* IP addresses and port numbers are in network byte order.
@@ -1842,7 +1842,7 @@ ident_unix(int sock, char *ident_user)
/*
* Determine the username of the initiator of the connection described
- * by "port". Then look in the usermap file under the usermap
+ * by "port". Then look in the usermap file under the usermap
* port->hba->usermap and see if that user is equivalent to Postgres user
* port->user.
*
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index ee0991affa..094e74b455 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -30,13 +30,13 @@
* impersonations.
*
* Another benefit of EDH is that it allows the backend and
- * clients to use DSA keys. DSA keys can only provide digital
+ * clients to use DSA keys. DSA keys can only provide digital
* signatures, not encryption, and are often acceptable in
* jurisdictions where RSA keys are unacceptable.
*
* The downside to EDH is that it makes it impossible to
* use ssldump(1) if there's a problem establishing an SSL
- * session. In this case you'll need to temporarily disable
+ * session. In this case you'll need to temporarily disable
* EDH by commenting out the callback.
*
* ...
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index 5762fad3ef..4ed85713d2 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -50,7 +50,7 @@ static List *parsed_hba_lines = NIL;
* These variables hold the pre-parsed contents of the ident
* configuration files, as well as the flat auth file.
* Each is a list of sublists, one sublist for
- * each (non-empty, non-comment) line of the file. Each sublist's
+ * each (non-empty, non-comment) line of the file. Each sublist's
* first item is an integer line number (so we can give somewhat-useful
* location info in error messages). Remaining items are palloc'd strings,
* one string per token on the line. Note there will always be at least
@@ -1745,7 +1745,7 @@ load_ident(void)
/*
* Determine what authentication method should be used when accessing database
- * "database" from frontend "raddr", user "user". Return the method and
+ * "database" from frontend "raddr", user "user". Return the method and
* an optional argument (stored in fields of *port), and STATUS_OK.
*
* Note that STATUS_ERROR indicates a problem with the hba config file.
diff --git a/src/backend/libpq/md5.c b/src/backend/libpq/md5.c
index 91cfe187ee..d5e6cab201 100644
--- a/src/backend/libpq/md5.c
+++ b/src/backend/libpq/md5.c
@@ -2,7 +2,7 @@
* md5.c
*
* Implements the MD5 Message-Digest Algorithm as specified in
- * RFC 1321. This implementation is a simple one, in that it
+ * RFC 1321. This implementation is a simple one, in that it
* needs every input byte to be buffered before doing any
* calculations. I do not expect this file to be used for
* general purpose MD5'ing of large amounts of data, only for
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index a89701ce1d..60d8d3b289 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -401,7 +401,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
/*
* Note: This might fail on some OS's, like Linux older than
* 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and map
- * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4
+ * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4
* connections.
*/
err = bind(fd, addr->ai_addr, addr->ai_addrlen);
@@ -981,7 +981,7 @@ pq_getmessage(StringInfo s, int maxlen)
if (len > 0)
{
/*
- * Allocate space for message. If we run out of room (ridiculously
+ * Allocate space for message. If we run out of room (ridiculously
* large message), we will elog(ERROR), but we want to discard the
* message body so as not to lose communication sync.
*/
diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c
index 7731d19eaf..c122b1a66e 100644
--- a/src/backend/libpq/pqformat.c
+++ b/src/backend/libpq/pqformat.c
@@ -120,7 +120,7 @@ pq_sendbytes(StringInfo buf, const char *data, int datalen)
* pq_sendcountedtext - append a counted text string (with character set conversion)
*
* The data sent to the frontend by this routine is a 4-byte count field
- * followed by the string. The count includes itself or not, as per the
+ * followed by the string. The count includes itself or not, as per the
* countincludesself flag (pre-3.0 protocol requires it to include itself).
* The passed text string need not be null-terminated, and the data sent
* to the frontend isn't either.
diff --git a/src/backend/libpq/pqsignal.c b/src/backend/libpq/pqsignal.c
index ed26fbdb51..67e0114b03 100644
--- a/src/backend/libpq/pqsignal.c
+++ b/src/backend/libpq/pqsignal.c
@@ -27,14 +27,14 @@
* Ultrix and SunOS provide BSD signal(2) semantics by default.
*
* SVID2 and POSIX signal(2) semantics differ from BSD signal(2)
- * semantics. We can use the POSIX sigaction(2) on systems that
+ * semantics. We can use the POSIX sigaction(2) on systems that
* allow us to request restartable signals (SA_RESTART).
*
* Some systems don't allow restartable signals at all unless we
* link to a special BSD library.
*
* We devoutly hope that there aren't any systems that provide
- * neither POSIX signals nor BSD signals. The alternative
+ * neither POSIX signals nor BSD signals. The alternative
* is to do signal-handler reinstallation, which doesn't work well
* at all.
* ------------------------------------------------------------------------*/
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 10ed18c281..84101459be 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -68,7 +68,7 @@ main(int argc, char *argv[])
/*
* Remember the physical location of the initially given argv[] array for
- * possible use by ps display. On some platforms, the argv[] storage must
+ * possible use by ps display. On some platforms, the argv[] storage must
* be overwritten in order to set the process title for ps. In such cases
* save_ps_display_args makes and returns a new copy of the argv[] array.
*
@@ -89,10 +89,10 @@ main(int argc, char *argv[])
MemoryContextInit();
/*
- * Set up locale information from environment. Note that LC_CTYPE and
+ * Set up locale information from environment. Note that LC_CTYPE and
* LC_COLLATE will be overridden later from pg_control if we are in an
* already-initialized database. We set them here so that they will be
- * available to fill pg_control during initdb. LC_MESSAGES will get set
+ * available to fill pg_control during initdb. LC_MESSAGES will get set
* later during GUC option processing, but we set it here to allow startup
* error messages to be localized.
*/
@@ -201,7 +201,7 @@ main(int argc, char *argv[])
/*
- * Place platform-specific startup hacks here. This is the right
+ * Place platform-specific startup hacks here. This is the right
* place to put code that must be executed early in launch of either a
* postmaster, a standalone backend, or a standalone bootstrap run.
* Note that this code will NOT be executed when a backend or
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c
index 55f3537d80..0384336a07 100644
--- a/src/backend/nodes/bitmapset.c
+++ b/src/backend/nodes/bitmapset.c
@@ -39,7 +39,7 @@
* where x's are unspecified bits. The two's complement negative is formed
* by inverting all the bits and adding one. Inversion gives
* yyyyyy01111
- * where each y is the inverse of the corresponding x. Incrementing gives
+ * where each y is the inverse of the corresponding x. Incrementing gives
* yyyyyy10000
* and then ANDing with the original value gives
* 00000010000
@@ -721,7 +721,7 @@ bms_join(Bitmapset *a, Bitmapset *b)
/*----------
* bms_first_member - find and remove first member of a set
*
- * Returns -1 if set is empty. NB: set is destructively modified!
+ * Returns -1 if set is empty. NB: set is destructively modified!
*
* This is intended as support for iterating through the members of a set.
* The typical pattern is
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index fdb30f77e9..dfe2c48aa6 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -4,7 +4,7 @@
* Copy functions for Postgres tree nodes.
*
* NOTE: we currently support copying all node types found in parse and
- * plan trees. We do not support copying executor state trees; there
+ * plan trees. We do not support copying executor state trees; there
* is no need for that, and no point in maintaining all the code that
* would be needed. We also do not support copying Path trees, mainly
* because the circular linkages between RelOptInfo and Path nodes can't
@@ -30,7 +30,7 @@
/*
* Macros to simplify copying of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in a Copy routine are
* named 'newnode' and 'from'.
*/
@@ -867,7 +867,7 @@ _copyIntoClause(IntoClause *from)
/*
* We don't need a _copyExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out copying the common fields...
*/
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 445a59f731..dc62a95843 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -11,7 +11,7 @@
* be handled easily in a simple depth-first traversal.
*
* Currently, in fact, equal() doesn't know how to compare Plan trees
- * either. This might need to be fixed someday.
+ * either. This might need to be fixed someday.
*
* NOTE: it is intentional that parse location fields (in nodes that have
* one) are not compared. This is because we want, for example, a variable
@@ -34,8 +34,8 @@
/*
- * Macros to simplify comparison of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify comparison of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in an Equal routine are
* named 'a' and 'b'.
*/
@@ -125,7 +125,7 @@ _equalIntoClause(IntoClause *a, IntoClause *b)
/*
* We don't need an _equalExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out comparing the common fields...
*/
@@ -773,9 +773,9 @@ static bool
_equalPlaceHolderVar(PlaceHolderVar *a, PlaceHolderVar *b)
{
/*
- * We intentionally do not compare phexpr. Two PlaceHolderVars with the
+ * We intentionally do not compare phexpr. Two PlaceHolderVars with the
* same ID and levelsup should be considered equal even if the contained
- * expressions have managed to mutate to different states. One way in
+ * expressions have managed to mutate to different states. One way in
* which that can happen is that initplan sublinks would get replaced by
* differently-numbered Params when sublink folding is done. (The end
* result of such a situation would be some unreferenced initplans, which
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index 04e17439e8..b3dbb56768 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -793,7 +793,7 @@ list_union_oid(List *list1, List *list2)
* "intersection" if list1 is known unique beforehand.
*
* This variant works on lists of pointers, and determines list
- * membership via equal(). Note that the list1 member will be pointed
+ * membership via equal(). Note that the list1 member will be pointed
* to in the result.
*/
List *
@@ -985,7 +985,7 @@ list_append_unique_oid(List *list, Oid datum)
* via equal().
*
* This is almost the same functionality as list_union(), but list1 is
- * modified in-place rather than being copied. Note also that list2's cells
+ * modified in-place rather than being copied. Note also that list2's cells
* are not inserted in list1, so the analogy to list_concat() isn't perfect.
*/
List *
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 0d21c9fcb0..da51235c3d 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -231,7 +231,7 @@ exprType(Node *expr)
/*
* exprTypmod -
* returns the type-specific modifier of the expression's result type,
- * if it can be determined. In many cases, it can't and we return -1.
+ * if it can be determined. In many cases, it can't and we return -1.
*/
int32
exprTypmod(Node *expr)
@@ -989,8 +989,8 @@ leftmostLoc(int loc1, int loc2)
*
* The walker routine should return "false" to continue the tree walk, or
* "true" to abort the walk and immediately return "true" to the top-level
- * caller. This can be used to short-circuit the traversal if the walker
- * has found what it came for. "false" is returned to the top-level caller
+ * caller. This can be used to short-circuit the traversal if the walker
+ * has found what it came for. "false" is returned to the top-level caller
* iff no invocation of the walker returned "true".
*
* The node types handled by expression_tree_walker include all those
@@ -1028,7 +1028,7 @@ leftmostLoc(int loc1, int loc2)
*
* expression_tree_walker will handle SubPlan nodes by recursing normally
* into the "testexpr" and the "args" list (which are expressions belonging to
- * the outer plan). It will not touch the completed subplan, however. Since
+ * the outer plan). It will not touch the completed subplan, however. Since
* there is no link to the original Query, it is not possible to recurse into
* subselects of an already-planned expression tree. This is OK for current
* uses, but may need to be revisited in future.
@@ -2032,7 +2032,7 @@ expression_tree_mutator(Node *node,
* This routine exists just to reduce the number of places that need to know
* where all the expression subtrees of a Query are. Note it can be used
* for starting a walk at top level of a Query regardless of whether the
- * mutator intends to descend into subqueries. It is also useful for
+ * mutator intends to descend into subqueries. It is also useful for
* descending into subqueries within a mutator.
*
* Some callers want to suppress mutating of certain items in the Query,
@@ -2042,7 +2042,7 @@ expression_tree_mutator(Node *node,
* indicated items. (More flag bits may be added as needed.)
*
* Normally the Query node itself is copied, but some callers want it to be
- * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
+ * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
* modified substructure is safely copied in any case.
*/
Query *
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index c8c4409fb9..a0a1fb065b 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -13,7 +13,7 @@
* NOTES
* Every node type that can appear in stored rules' parsetrees *must*
* have an output function defined here (as well as an input function
- * in readfuncs.c). For use in debugging, we also provide output
+ * in readfuncs.c). For use in debugging, we also provide output
* functions for nodes that appear in raw parsetrees, path, and plan trees.
* These nodes however need not have input functions.
*
@@ -30,8 +30,8 @@
/*
- * Macros to simplify output of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify output of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in an Out
* routine.
*/
diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c
index 4687d60ec1..cec5dc1896 100644
--- a/src/backend/nodes/read.c
+++ b/src/backend/nodes/read.c
@@ -85,21 +85,21 @@ stringToNode(char *str)
* Backslashes themselves must also be backslashed for consistency.
* Any other character can be, but need not be, backslashed as well.
* * If the resulting token is '<>' (with no backslash), it is returned
- * as a non-NULL pointer to the token but with length == 0. Note that
+ * as a non-NULL pointer to the token but with length == 0. Note that
* there is no other way to get a zero-length token.
*
* Returns a pointer to the start of the next token, and the length of the
- * token (including any embedded backslashes!) in *length. If there are
+ * token (including any embedded backslashes!) in *length. If there are
* no more tokens, NULL and 0 are returned.
*
* NOTE: this routine doesn't remove backslashes; the caller must do so
* if necessary (see "debackslash").
*
* NOTE: prior to release 7.0, this routine also had a special case to treat
- * a token starting with '"' as extending to the next '"'. This code was
+ * a token starting with '"' as extending to the next '"'. This code was
* broken, however, since it would fail to cope with a string containing an
* embedded '"'. I have therefore removed this special case, and instead
- * introduced rules for using backslashes to quote characters. Higher-level
+ * introduced rules for using backslashes to quote characters. Higher-level
* code should add backslashes to a string constant to ensure it is treated
* as a single token.
*/
@@ -258,7 +258,7 @@ nodeTokenType(char *token, int length)
* Slightly higher-level reader.
*
* This routine applies some semantic knowledge on top of the purely
- * lexical tokenizer pg_strtok(). It can read
+ * lexical tokenizer pg_strtok(). It can read
* * Value token nodes (integers, floats, or strings);
* * General nodes (via parseNodeString() from readfuncs.c);
* * Lists of the above;
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index b27cd513a5..a7e58bf367 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -12,7 +12,7 @@
*
* NOTES
* Path and Plan nodes do not have any readfuncs support, because we
- * never have occasion to read them in. (There was once code here that
+ * never have occasion to read them in. (There was once code here that
* claimed to read them, but it was broken as well as unused.) We
* never read executor state trees, either.
*
@@ -34,7 +34,7 @@
/*
* Macros to simplify reading of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in a Read
* routine.
*/
@@ -127,7 +127,7 @@
/*
* NOTE: use atoi() to read values written with %d, or atoui() to read
* values written with %u in outfuncs.c. An exception is OID values,
- * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
+ * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
* but this will probably change in the future.)
*/
#define atoui(x) ((unsigned int) strtoul((x), NULL, 10))
@@ -539,7 +539,7 @@ _readOpExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -568,7 +568,7 @@ _readDistinctExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -597,7 +597,7 @@ _readScalarArrayOpExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -921,7 +921,7 @@ _readNullIfExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index 20d792c479..b32757af12 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -19,7 +19,7 @@
* of lossiness. In theory we could fall back to page ranges at some
* point, but for now that seems useless complexity.
*
- * We also support the notion of candidate matches, or rechecking. This
+ * We also support the notion of candidate matches, or rechecking. This
* means we know that a search need visit only some tuples on a page,
* but we are not certain that all of those tuples are real matches.
* So the eventual heap scan must recheck the quals for these tuples only,
@@ -49,7 +49,7 @@
/*
* The maximum number of tuples per page is not large (typically 256 with
* 8K pages, or 1024 with 32K pages). So there's not much point in making
- * the per-page bitmaps variable size. We just legislate that the size
+ * the per-page bitmaps variable size. We just legislate that the size
* is this:
*/
#define MAX_TUPLES_PER_PAGE MaxHeapTuplesPerPage
@@ -62,10 +62,10 @@
* for that page in the page table.
*
* We actually store both exact pages and lossy chunks in the same hash
- * table, using identical data structures. (This is because dynahash.c's
+ * table, using identical data structures. (This is because dynahash.c's
* memory management doesn't allow space to be transferred easily from one
* hashtable to another.) Therefore it's best if PAGES_PER_CHUNK is the
- * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
+ * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
* also want PAGES_PER_CHUNK to be a power of 2 to avoid expensive integer
* remainder operations. So, define it like this:
*/
@@ -143,7 +143,7 @@ struct TIDBitmap
/*
* When iterating over a bitmap in sorted order, a TBMIterator is used to
- * track our progress. There can be several iterators scanning the same
+ * track our progress. There can be several iterators scanning the same
* bitmap concurrently. Note that the bitmap becomes read-only as soon as
* any iterator is created.
*/
@@ -791,7 +791,7 @@ tbm_find_pageentry(const TIDBitmap *tbm, BlockNumber pageno)
*
* If new, the entry is marked as an exact (non-chunk) entry.
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static PagetableEntry *
@@ -871,7 +871,7 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno)
/*
* tbm_mark_page_lossy - mark the page number as lossily stored
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static void
@@ -892,7 +892,7 @@ tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno)
chunk_pageno = pageno - bitno;
/*
- * Remove any extant non-lossy entry for the page. If the page is its own
+ * Remove any extant non-lossy entry for the page. If the page is its own
* chunk header, however, we skip this and handle the case below.
*/
if (bitno != 0)
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index 392cfc5aa3..c74c1b2586 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -59,7 +59,7 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
* them an artificially bad fitness.
*
* init_tour() is aware of this rule and so we should never reject a tour
- * during the initial filling of the pool. It seems difficult to persuade
+ * during the initial filling of the pool. It seems difficult to persuade
* the recombination logic never to break the rule, however.
*/
if (num_gene >= 2 && tour[0] > tour[1])
@@ -86,11 +86,11 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
* not already contain some entries. The newly added entries will be
* recycled by the MemoryContextDelete below, so we must ensure that the
* list is restored to its former state before exiting. We can do this by
- * truncating the list to its original length. NOTE this assumes that any
+ * truncating the list to its original length. NOTE this assumes that any
* added entries are appended at the end!
*
* We also must take care not to mess up the outer join_rel_hash, if there
- * is one. We can do this by just temporarily setting the link to NULL.
+ * is one. We can do this by just temporarily setting the link to NULL.
* (If we are dealing with enough join rels, which we very likely are, a
* new hash table will get built and used locally.)
*/
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 36c5c116dc..7a65702365 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -272,7 +272,7 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
* set_append_rel_pathlist
* Build access paths for an "append relation"
*
- * The passed-in rel and RTE represent the entire append relation. The
+ * The passed-in rel and RTE represent the entire append relation. The
* relation's contents are computed by appending together the output of
* the individual member relations. Note that in the inheritance case,
* the first member relation is actually the same table as is mentioned in
@@ -342,7 +342,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* We have to copy the parent's targetlist and quals to the child,
- * with appropriate substitution of variables. However, only the
+ * with appropriate substitution of variables. However, only the
* baserestrictinfo quals are needed before we can check for
* constraint exclusion; so do that first and then check to see if we
* can disregard this child.
@@ -485,7 +485,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* Finally, build Append path and install it as the only access path for
- * the parent rel. (Note: this is correct even if we have zero or one
+ * the parent rel. (Note: this is correct even if we have zero or one
* live subpath due to constraint exclusion.)
*/
add_path(rel, (Path *) create_append_path(rel, subpaths));
@@ -889,7 +889,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
* independent jointree items in the query. This is > 1.
*
* 'initial_rels' is a list of RelOptInfo nodes for each independent
- * jointree item. These are the components to be joined together.
+ * jointree item. These are the components to be joined together.
* Note that levels_needed == list_length(initial_rels).
*
* Returns the final level of join relations, i.e., the relation that is
@@ -905,7 +905,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
* needed for these paths need have been instantiated.
*
* Note to plugin authors: the functions invoked during standard_join_search()
- * modify root->join_rel_list and root->join_rel_hash. If you want to do more
+ * modify root->join_rel_list and root->join_rel_hash. If you want to do more
* than one join-order search, you'll probably need to save and restore the
* original states of those data structures. See geqo_eval() for an example.
*/
@@ -996,7 +996,7 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
* column k is found to be unsafe to reference, we set unsafeColumns[k] to
* TRUE, but we don't reject the subquery overall since column k might
* not be referenced by some/all quals. The unsafeColumns[] array will be
- * consulted later by qual_is_pushdown_safe(). It's better to do it this
+ * consulted later by qual_is_pushdown_safe(). It's better to do it this
* way than to make the checks directly in qual_is_pushdown_safe(), because
* when the subquery involves set operations we have to check the output
* expressions in each arm of the set op.
@@ -1089,7 +1089,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery,
* check_output_expressions - check subquery's output expressions for safety
*
* There are several cases in which it's unsafe to push down an upper-level
- * qual if it references a particular output column of a subquery. We check
+ * qual if it references a particular output column of a subquery. We check
* each output column of the subquery and set unsafeColumns[k] to TRUE if
* that column is unsafe for a pushed-down qual to reference. The conditions
* checked here are:
@@ -1107,7 +1107,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery,
* of rows returned. (This condition is vacuous for DISTINCT, because then
* there are no non-DISTINCT output columns, so we needn't check. But note
* we are assuming that the qual can't distinguish values that the DISTINCT
- * operator sees as equal. This is a bit shaky but we have no way to test
+ * operator sees as equal. This is a bit shaky but we have no way to test
* for the case, and it's unlikely enough that we shouldn't refuse the
* optimization just because it could theoretically happen.)
*/
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c
index 34407af607..48674b283d 100644
--- a/src/backend/optimizer/path/clausesel.c
+++ b/src/backend/optimizer/path/clausesel.c
@@ -59,7 +59,7 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
* See clause_selectivity() for the meaning of the additional parameters.
*
* Our basic approach is to take the product of the selectivities of the
- * subclauses. However, that's only right if the subclauses have independent
+ * subclauses. However, that's only right if the subclauses have independent
* probabilities, and in reality they are often NOT independent. So,
* we want to be smarter where we can.
@@ -76,12 +76,12 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
* see that hisel is the fraction of the range below the high bound, while
* losel is the fraction above the low bound; so hisel can be interpreted
* directly as a 0..1 value but we need to convert losel to 1-losel before
- * interpreting it as a value. Then the available range is 1-losel to hisel.
+ * interpreting it as a value. Then the available range is 1-losel to hisel.
* However, this calculation double-excludes nulls, so really we need
* hisel + losel + null_frac - 1.)
*
* If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation
- * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
+ * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
* yields an impossible (negative) result.
*
* A free side-effect is that we can recognize redundant inequalities such
@@ -175,7 +175,7 @@ clauselist_selectivity(PlannerInfo *root,
{
/*
* If it's not a "<" or ">" operator, just merge the
- * selectivity in generically. But if it's the right oprrest,
+ * selectivity in generically. But if it's the right oprrest,
* add the clause to rqlist for later processing.
*/
switch (get_oprrest(expr->opno))
@@ -460,14 +460,14 @@ treat_as_join_clause(Node *clause, RestrictInfo *rinfo,
* nestloop join's inner relation --- varRelid should then be the ID of the
* inner relation.
*
- * When varRelid is 0, all variables are treated as variables. This
+ * When varRelid is 0, all variables are treated as variables. This
* is appropriate for ordinary join clauses and restriction clauses.
*
* jointype is the join type, if the clause is a join clause. Pass JOIN_INNER
* if the clause isn't a join clause.
*
* sjinfo is NULL for a non-join clause, otherwise it provides additional
- * context information about the join being performed. There are some
+ * context information about the join being performed. There are some
* special cases:
* 1. For a special (not INNER) join, sjinfo is always a member of
* root->join_info_list.
@@ -502,7 +502,7 @@ clause_selectivity(PlannerInfo *root,
/*
* If the clause is marked pseudoconstant, then it will be used as a
* gating qual and should not affect selectivity estimates; hence
- * return 1.0. The only exception is that a constant FALSE may be
+ * return 1.0. The only exception is that a constant FALSE may be
* taken as having selectivity 0.0, since it will surely mean no rows
* out of the plan. This case is simple enough that we need not
* bother caching the result.
@@ -521,11 +521,11 @@ clause_selectivity(PlannerInfo *root,
/*
* If possible, cache the result of the selectivity calculation for
- * the clause. We can cache if varRelid is zero or the clause
+ * the clause. We can cache if varRelid is zero or the clause
* contains only vars of that relid --- otherwise varRelid will affect
* the result, so mustn't cache. Outer join quals might be examined
* with either their join's actual jointype or JOIN_INNER, so we need
- * two cache variables to remember both cases. Note: we assume the
+ * two cache variables to remember both cases. Note: we assume the
* result won't change if we are switching the input relations or
* considering a unique-ified case, so we only need one cache variable
* for all non-JOIN_INNER cases.
@@ -682,7 +682,7 @@ clause_selectivity(PlannerInfo *root,
/*
* This is not an operator, so we guess at the selectivity. THIS IS A
* HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
- * SELECTIVITIES THEMSELVES. -- JMH 7/9/92
+ * SELECTIVITIES THEMSELVES. -- JMH 7/9/92
*/
s1 = (Selectivity) 0.3333333;
}
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index fbd45d9b38..4776189ed9 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -24,7 +24,7 @@
*
* Obviously, taking constants for these values is an oversimplification,
* but it's tough enough to get any useful estimates even at this level of
- * detail. Note that all of these parameters are user-settable, in case
+ * detail. Note that all of these parameters are user-settable, in case
* the default values are drastically off for a particular platform.
*
* We compute two separate costs for each path:
@@ -428,7 +428,7 @@ cost_index(IndexPath *path, PlannerInfo *root,
* computed for us by query_planner.
*
* Caller is expected to have ensured that tuples_fetched is greater than zero
- * and rounded to integer (see clamp_row_est). The result will likewise be
+ * and rounded to integer (see clamp_row_est). The result will likewise be
* greater than zero and integral.
*/
double
@@ -617,7 +617,7 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
/*
* For small numbers of pages we should charge random_page_cost apiece,
* while if nearly all the table's pages are being read, it's more
- * appropriate to charge seq_page_cost apiece. The effect is nonlinear,
+ * appropriate to charge seq_page_cost apiece. The effect is nonlinear,
* too. For lack of a better idea, interpolate like this to determine the
* cost per page.
*/
@@ -688,7 +688,7 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
* Estimate the cost of a BitmapAnd node
*
* Note that this considers only the costs of index scanning and bitmap
- * creation, not the eventual heap access. In that sense the object isn't
+ * creation, not the eventual heap access. In that sense the object isn't
* truly a Path, but it has enough path-like properties (costs in particular)
* to warrant treating it as one.
*/
@@ -745,7 +745,7 @@ cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
/*
* We estimate OR selectivity on the assumption that the inputs are
* non-overlapping, since that's often the case in "x IN (list)" type
- * situations. Of course, we clamp to 1.0 at the end.
+ * situations. Of course, we clamp to 1.0 at the end.
*
* The runtime cost of the BitmapOr itself is estimated at 100x
* cpu_operator_cost for each tbm_union needed. Probably too small,
@@ -821,7 +821,7 @@ cost_tidscan(Path *path, PlannerInfo *root,
/*
* We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
- * understands how to do it correctly. Therefore, honor enable_tidscan
+ * understands how to do it correctly. Therefore, honor enable_tidscan
* only when CURRENT OF isn't present. Also note that cost_qual_eval
* counts a CurrentOfExpr as having startup cost disable_cost, which we
* subtract off here; that's to prevent other plan types such as seqscan
@@ -955,7 +955,7 @@ cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
*
* Note: this is used for both self-reference and regular CTEs; the
* possible cost differences are below the threshold of what we could
- * estimate accurately anyway. Note that the costs of evaluating the
+ * estimate accurately anyway. Note that the costs of evaluating the
* referenced CTE query are added into the final plan as initplan costs,
* and should NOT be counted here.
*/
@@ -1039,7 +1039,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
* If the total volume exceeds work_mem, we switch to a tape-style merge
* algorithm. There will still be about t*log2(t) tuple comparisons in
* total, but we will also need to write and read each tuple once per
- * merge pass. We expect about ceil(logM(r)) merge passes where r is the
+ * merge pass. We expect about ceil(logM(r)) merge passes where r is the
* number of initial runs formed and M is the merge order used by tuplesort.c.
* Since the average initial run should be about twice work_mem, we have
* disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
@@ -1215,7 +1215,7 @@ cost_material(Path *path,
startup_cost += cpu_tuple_cost * 0.1 * tuples;
/*
- * Also charge a small amount per extracted tuple. We use cpu_tuple_cost
+ * Also charge a small amount per extracted tuple. We use cpu_tuple_cost
* so that it doesn't appear worthwhile to materialize a bare seqscan.
*/
run_cost += cpu_tuple_cost * tuples;
@@ -1253,7 +1253,7 @@ cost_agg(Path *path, PlannerInfo *root,
* group otherwise. We charge cpu_tuple_cost for each output tuple.
*
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
- * same total CPU cost, but AGG_SORTED has lower startup cost. If the
+ * same total CPU cost, but AGG_SORTED has lower startup cost. If the
* input path is already sorted appropriately, AGG_SORTED should be
* preferred (since it has no risk of memory overflow). This will happen
* as long as the computed total costs are indeed exactly equal --- but if
@@ -1578,7 +1578,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
/*
- * Get approx # tuples passing the mergequals. We use approx_tuple_count
+ * Get approx # tuples passing the mergequals. We use approx_tuple_count
* here because we need an estimate done with JOIN_INNER semantics.
*/
mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
@@ -1595,7 +1595,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* estimated approximately as size of merge join output minus size of
* inner relation. Assume that the distinct key values are 1, 2, ..., and
* denote the number of values of each key in the outer relation as m1,
- * m2, ...; in the inner relation, n1, n2, ... Then we have
+ * m2, ...; in the inner relation, n1, n2, ... Then we have
*
* size of join = m1 * n1 + m2 * n2 + ...
*
@@ -1606,7 +1606,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* This equation works correctly for outer tuples having no inner match
* (nk = 0), but not for inner tuples having no outer match (mk = 0); we
* are effectively subtracting those from the number of rescanned tuples,
- * when we should not. Can we do better without expensive selectivity
+ * when we should not. Can we do better without expensive selectivity
* computations?
*
* The whole issue is moot if we are working from a unique-ified outer
@@ -1801,7 +1801,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
/*
* For each tuple that gets through the mergejoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic since
+ * clauses that are to be applied at the join. (This is pessimistic since
* not all of the quals may get evaluated at each tuple.)
*
* Note: we could adjust for SEMI/ANTI joins skipping some qual
@@ -2026,7 +2026,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* If inner relation is too big then we will need to "batch" the join,
* which implies writing and reading most of the tuples to disk an extra
* time. Charge seq_page_cost per page, since the I/O should be nice and
- * sequential. Writing the inner rel counts as startup cost, all the rest
+ * sequential. Writing the inner rel counts as startup cost, all the rest
* as run cost.
*/
if (numbatches > 1)
@@ -2118,7 +2118,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
/*
* For each tuple that gets through the hashjoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic since
+ * clauses that are to be applied at the join. (This is pessimistic since
* not all of the quals may get evaluated at each tuple.)
*/
startup_cost += qp_qual_cost.startup;
@@ -2171,7 +2171,7 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
{
/*
* Otherwise we will be rescanning the subplan output on each
- * evaluation. We need to estimate how much of the output we will
+ * evaluation. We need to estimate how much of the output we will
* actually need to scan. NOTE: this logic should agree with the
* tuple_fraction estimates used by make_subplan() in
* plan/subselect.c.
@@ -2326,14 +2326,14 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
*
* Note that Aggref and WindowFunc nodes are (and should be) treated like
* Vars --- whatever execution cost they have is absorbed into
- * plan-node-specific costing. As far as expression evaluation is
+ * plan-node-specific costing. As far as expression evaluation is
* concerned they're just like Vars.
*
* Should we try to account for the possibility of short-circuit
* evaluation of AND/OR? Probably *not*, because that would make the
* results depend on the clause ordering, and we are not in any position
* to expect that the current ordering of the clauses is the one that's
- * going to end up being used. (Is it worth applying order_qual_clauses
+ * going to end up being used. (Is it worth applying order_qual_clauses
* much earlier in the planning process to fix this?)
*/
if (IsA(node, FuncExpr))
@@ -2434,7 +2434,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
else if (IsA(node, AlternativeSubPlan))
{
/*
- * Arbitrarily use the first alternative plan for costing. (We should
+ * Arbitrarily use the first alternative plan for costing. (We should
* certainly only include one alternative, and we don't yet have
* enough information to know which one the executor is most likely to
* use.)
@@ -2560,7 +2560,7 @@ adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
/*
* jselec can be interpreted as the fraction of outer-rel rows that have
* any matches (this is true for both SEMI and ANTI cases). And nselec is
- * the fraction of the Cartesian product that matches. So, the average
+ * the fraction of the Cartesian product that matches. So, the average
* number of matches for each outer-rel row that has at least one match is
* nselec * inner_rows / jselec.
*
@@ -2583,7 +2583,7 @@ adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
/*
* If requested, check whether the inner path uses all the joinquals as
- * indexquals. (If that's true, we can assume that an unmatched outer
+ * indexquals. (If that's true, we can assume that an unmatched outer
* tuple is cheap to process, whereas otherwise it's probably expensive.)
*/
if (indexed_join_quals)
@@ -2740,7 +2740,7 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
double nrows;
/*
- * Compute joinclause selectivity. Note that we are only considering
+ * Compute joinclause selectivity. Note that we are only considering
* clauses that become restriction clauses at this join level; we are not
* double-counting them because they were not considered in estimating the
* sizes of the component rels.
@@ -2798,7 +2798,7 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
*
* If we are doing an outer join, take that into account: the joinqual
* selectivity has to be clamped using the knowledge that the output must
- * be at least as large as the non-nullable input. However, any
+ * be at least as large as the non-nullable input. However, any
* pushed-down quals are applied after the outer join, so their
* selectivity applies fully.
*
@@ -2945,7 +2945,7 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
* NB: this works best on plain relations because it prefers to look at
* real Vars. It will fail to make use of pg_statistic info when applied
* to a subquery relation, even if the subquery outputs are simple vars
- * that we could have gotten info for. Is it worth trying to be smarter
+ * that we could have gotten info for. Is it worth trying to be smarter
* about subqueries?
*
* The per-attribute width estimates are cached for possible re-use while
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index f40a46b0d7..88eca2a2d2 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -71,7 +71,7 @@ static bool reconsider_full_join_clause(PlannerInfo *root,
*
* If below_outer_join is true, then the clause was found below the nullable
* side of an outer join, so its sides might validly be both NULL rather than
- * strictly equal. We can still deduce equalities in such cases, but we take
+ * strictly equal. We can still deduce equalities in such cases, but we take
* care to mark an EquivalenceClass if it came from any such clauses. Also,
* we have to check that both sides are either pseudo-constants or strict
* functions of Vars, else they might not both go to NULL above the outer
@@ -164,14 +164,14 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
* Sweep through the existing EquivalenceClasses looking for matches to
* item1 and item2. These are the possible outcomes:
*
- * 1. We find both in the same EC. The equivalence is already known, so
+ * 1. We find both in the same EC. The equivalence is already known, so
* there's nothing to do.
*
* 2. We find both in different ECs. Merge the two ECs together.
*
* 3. We find just one. Add the other to its EC.
*
- * 4. We find neither. Make a new, two-entry EC.
+ * 4. We find neither. Make a new, two-entry EC.
*
* Note: since all ECs are built through this process or the similar
* search in get_eclass_for_sort_expr(), it's impossible that we'd match
@@ -500,7 +500,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
*
* When an EC contains pseudoconstants, our strategy is to generate
* "member = const1" clauses where const1 is the first constant member, for
- * every other member (including other constants). If we are able to do this
+ * every other member (including other constants). If we are able to do this
* then we don't need any "var = var" comparisons because we've successfully
* constrained all the vars at their points of creation. If we fail to
* generate any of these clauses due to lack of cross-type operators, we fall
@@ -525,7 +525,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
* "WHERE a.x = b.y AND b.y = a.z", the scheme breaks down if we cannot
* generate "a.x = a.z" as a restriction clause for A.) In this case we mark
* the EC "ec_broken" and fall back to regurgitating its original source
- * RestrictInfos at appropriate times. We do not try to retract any derived
+ * RestrictInfos at appropriate times. We do not try to retract any derived
* clauses already generated from the broken EC, so the resulting plan could
* be poor due to bad selectivity estimates caused by redundant clauses. But
* the correct solution to that is to fix the opfamilies ...
@@ -784,7 +784,7 @@ generate_base_implied_equalities_broken(PlannerInfo *root,
* we consider different join paths, we avoid generating multiple copies:
* whenever we select a particular pair of EquivalenceMembers to join,
* we check to see if the pair matches any original clause (in ec_sources)
- * or previously-built clause (in ec_derives). This saves memory and allows
+ * or previously-built clause (in ec_derives). This saves memory and allows
* re-use of information cached in RestrictInfos.
*/
List *
@@ -854,7 +854,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
* First, scan the EC to identify member values that are computable at the
* outer rel, at the inner rel, or at this relation but not in either
* input rel. The outer-rel members should already be enforced equal,
- * likewise for the inner-rel members. We'll need to create clauses to
+ * likewise for the inner-rel members. We'll need to create clauses to
* enforce that any newly computable members are all equal to each other
* as well as to at least one input member, plus enforce at least one
* outer-rel member equal to at least one inner-rel member.
@@ -877,12 +877,12 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
}
/*
- * First, select the joinclause if needed. We can equate any one outer
+ * First, select the joinclause if needed. We can equate any one outer
* member to any one inner member, but we have to find a datatype
* combination for which an opfamily member operator exists. If we have
* choices, we prefer simple Var members (possibly with RelabelType) since
* these are (a) cheapest to compute at runtime and (b) most likely to
- * have useful statistics. Also, if enable_hashjoin is on, we prefer
+ * have useful statistics. Also, if enable_hashjoin is on, we prefer
* operators that are also hashjoinable.
*/
if (outer_members && inner_members)
@@ -1078,8 +1078,8 @@ create_join_clause(PlannerInfo *root,
/*
* Search to see if we already built a RestrictInfo for this pair of
- * EquivalenceMembers. We can use either original source clauses or
- * previously-derived clauses. The check on opno is probably redundant,
+ * EquivalenceMembers. We can use either original source clauses or
+ * previously-derived clauses. The check on opno is probably redundant,
* but be safe ...
*/
foreach(lc, ec->ec_sources)
@@ -1209,7 +1209,7 @@ create_join_clause(PlannerInfo *root,
*
* Outer join clauses that are marked outerjoin_delayed are special: this
* condition means that one or both VARs might go to null due to a lower
- * outer join. We can still push a constant through the clause, but only
+ * outer join. We can still push a constant through the clause, but only
* if its operator is strict; and we *have to* throw the clause back into
* regular joinclause processing. By keeping the strict join clause,
* we ensure that any null-extended rows that are mistakenly generated due
@@ -1398,7 +1398,7 @@ reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo,
/*
* Yes it does! Try to generate a clause INNERVAR = CONSTANT for each
- * CONSTANT in the EC. Note that we must succeed with at least one
+ * CONSTANT in the EC. Note that we must succeed with at least one
* constant before we can decide to throw away the outer-join clause.
*/
match = false;
@@ -1858,7 +1858,7 @@ find_eclass_clauses_for_index_join(PlannerInfo *root, RelOptInfo *rel,
* a joinclause between the two given relations.
*
* This is essentially a very cut-down version of
- * generate_join_implied_equalities(). Note it's OK to occasionally say "yes"
+ * generate_join_implied_equalities(). Note it's OK to occasionally say "yes"
* incorrectly. Hence we don't bother with details like whether the lack of a
* cross-type operator might prevent the clause from actually being generated.
*/
@@ -1888,7 +1888,7 @@ have_relevant_eclass_joinclause(PlannerInfo *root,
* as a possibly-overoptimistic heuristic.
*
* We don't test ec_has_const either, even though a const eclass won't
- * generate real join clauses. This is because if we had "WHERE a.x =
+ * generate real join clauses. This is because if we had "WHERE a.x =
* b.y and a.x = 42", it is worth considering a join between a and b,
* since the join result is likely to be small even though it'll end
* up being an unqualified nestloop.
@@ -1962,7 +1962,7 @@ has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
* as a possibly-overoptimistic heuristic.
*
* We don't test ec_has_const either, even though a const eclass won't
- * generate real join clauses. This is because if we had "WHERE a.x =
+ * generate real join clauses. This is because if we had "WHERE a.x =
* b.y and a.x = 42", it is worth considering a join between a and b,
* since the join result is likely to be small even though it'll end
* up being an unqualified nestloop.
@@ -2009,7 +2009,7 @@ has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
* against the specified relation.
*
* This is just a heuristic test and doesn't have to be exact; it's better
- * to say "yes" incorrectly than "no". Hence we don't bother with details
+ * to say "yes" incorrectly than "no". Hence we don't bother with details
* like whether the lack of a cross-type operator might prevent the clause
* from actually being generated.
*/
@@ -2030,7 +2030,7 @@ eclass_useful_for_merging(EquivalenceClass *eclass,
/*
* Note we don't test ec_broken; if we did, we'd need a separate code path
- * to look through ec_sources. Checking the members anyway is OK as a
+ * to look through ec_sources. Checking the members anyway is OK as a
* possibly-overoptimistic heuristic.
*/
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index b0660ad977..5a5253f4ec 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -139,7 +139,7 @@ static Const *string_to_const(const char *str, Oid datatype);
* scan this routine deems potentially interesting for the current query.
*
* We also determine the set of other relids that participate in join
- * clauses that could be used with each index. The actually best innerjoin
+ * clauses that could be used with each index. The actually best innerjoin
* path will be generated for each outer relation later on, but knowing the
* set of potential otherrels allows us to identify equivalent outer relations
* and avoid repeated computation.
@@ -315,16 +315,16 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * Ignore partial indexes that do not match the query. If a partial
+ * Ignore partial indexes that do not match the query. If a partial
* index is marked predOK then we know it's OK; otherwise, if we are
* at top level we know it's not OK (since predOK is exactly whether
* its predicate could be proven from the toplevel clauses).
* Otherwise, we have to test whether the added clauses are sufficient
- * to imply the predicate. If so, we could use the index in the
+ * to imply the predicate. If so, we could use the index in the
* current context.
*
* We set useful_predicate to true iff the predicate was proven using
- * the current set of clauses. This is needed to prevent matching a
+ * the current set of clauses. This is needed to prevent matching a
* predOK index to an arm of an OR, which would be a legal but
* pointlessly inefficient plan. (A better plan will be generated by
* just scanning the predOK index alone, no OR.)
@@ -600,7 +600,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
* Given a nonempty list of bitmap paths, AND them into one path.
*
* This is a nontrivial decision since we can legally use any subset of the
- * given path set. We want to choose a good tradeoff between selectivity
+ * given path set. We want to choose a good tradeoff between selectivity
* and cost of computing the bitmap.
*
* The result is either a single one of the inputs, or a BitmapAndPath
@@ -628,12 +628,12 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
* In theory we should consider every nonempty subset of the given paths.
* In practice that seems like overkill, given the crude nature of the
* estimates, not to mention the possible effects of higher-level AND and
- * OR clauses. Moreover, it's completely impractical if there are a large
+ * OR clauses. Moreover, it's completely impractical if there are a large
* number of paths, since the work would grow as O(2^N).
*
* As a heuristic, we first check for paths using exactly the same sets of
* WHERE clauses + index predicate conditions, and reject all but the
- * cheapest-to-scan in any such group. This primarily gets rid of indexes
+ * cheapest-to-scan in any such group. This primarily gets rid of indexes
* that include the interesting columns but also irrelevant columns. (In
* situations where the DBA has gone overboard on creating variant
* indexes, this can make for a very large reduction in the number of
@@ -653,14 +653,14 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
* costsize.c and clausesel.c aren't very smart about redundant clauses.
* They will usually double-count the redundant clauses, producing a
* too-small selectivity that makes a redundant AND step look like it
- * reduces the total cost. Perhaps someday that code will be smarter and
+ * reduces the total cost. Perhaps someday that code will be smarter and
* we can remove this limitation. (But note that this also defends
* against flat-out duplicate input paths, which can happen because
* best_inner_indexscan will find the same OR join clauses that
* create_or_index_quals has pulled OR restriction clauses out of.)
*
* For the same reason, we reject AND combinations in which an index
- * predicate clause duplicates another clause. Here we find it necessary
+ * predicate clause duplicates another clause. Here we find it necessary
* to be even stricter: we'll reject a partial index if any of its
* predicate clauses are implied by the set of WHERE clauses and predicate
* clauses used so far. This covers cases such as a condition "x = 42"
@@ -723,7 +723,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
/*
* For each surviving index, consider it as an "AND group leader", and see
* whether adding on any of the later indexes results in an AND path with
- * cheaper total cost than before. Then take the cheapest AND group.
+ * cheaper total cost than before. Then take the cheapest AND group.
*/
for (i = 0; i < npaths; i++)
{
@@ -979,7 +979,7 @@ find_indexpath_quals(Path *bitmapqual, List **quals, List **preds)
/*
* find_list_position
* Return the given node's position (counting from 0) in the given
- * list of nodes. If it's not equal() to any existing list member,
+ * list of nodes. If it's not equal() to any existing list member,
* add it at the end, and return that position.
*/
static int
@@ -1020,7 +1020,7 @@ find_list_position(Node *node, List **nodelist)
*
* We can use clauses from either the current clauses or outer_clauses lists,
* but *found_clause is set TRUE only if we used at least one clause from
- * the "current clauses" list. See find_usable_indexes() for motivation.
+ * the "current clauses" list. See find_usable_indexes() for motivation.
*
* outer_relids determines what Vars will be allowed on the other side
* of a possible index qual; see match_clause_to_indexcol().
@@ -1139,7 +1139,7 @@ group_clauses_by_indexkey(IndexOptInfo *index,
* to the caller-specified outer_relids relations (which had better not
* include the relation whose index is being tested). outer_relids should
* be NULL when checking simple restriction clauses, and the outer side
- * of the join when building a join inner scan. Other than that, the
+ * of the join when building a join inner scan. Other than that, the
* only thing we don't like is volatile functions.
*
* Note: in most cases we already know that the clause as a whole uses
@@ -1157,7 +1157,7 @@ group_clauses_by_indexkey(IndexOptInfo *index,
* It is also possible to match RowCompareExpr clauses to indexes (but
* currently, only btree indexes handle this). In this routine we will
* report a match if the first column of the row comparison matches the
- * target index column. This is sufficient to guarantee that some index
+ * target index column. This is sufficient to guarantee that some index
* condition can be constructed from the RowCompareExpr --- whether the
* remaining columns match the index too is considered in
* expand_indexqual_rowcompare().
@@ -1198,7 +1198,7 @@ match_clause_to_indexcol(IndexOptInfo *index,
bool plain_op;
/*
- * Never match pseudoconstants to indexes. (Normally this could not
+ * Never match pseudoconstants to indexes. (Normally this could not
* happen anyway, since a pseudoconstant clause couldn't contain a Var,
* but what if someone builds an expression index on a constant? It's not
* totally unreasonable to do so with a partial index, either.)
@@ -1437,7 +1437,7 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
/*
* indexable_outerrelids
* Finds all other relids that participate in any indexable join clause
- * for the specified table. Returns a set of relids.
+ * for the specified table. Returns a set of relids.
*/
static Relids
indexable_outerrelids(PlannerInfo *root, RelOptInfo *rel)
@@ -1631,7 +1631,7 @@ eclass_matches_any_index(EquivalenceClass *ec, EquivalenceMember *em,
* compatible with the EC, since no clause generated from the EC
* could be used with the index. For non-btree indexes, we can't
* easily tell whether clauses generated from the EC could be used
- * with the index, so only check for expression match. This might
+ * with the index, so only check for expression match. This might
* mean we return "true" for a useless index, but that will just
* cause some wasted planner cycles; it's better than ignoring
* useful indexes.
@@ -1731,7 +1731,7 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
/*
* Look to see if we already computed the result for this set of relevant
* outerrels. (We include the isouterjoin status in the cache lookup key
- * for safety. In practice I suspect this is not necessary because it
+ * for safety. In practice I suspect this is not necessary because it
* should always be the same for a given combination of rels.)
*
* NOTE: because we cache on outer_relids rather than outer_rel->relids,
@@ -1760,7 +1760,7 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
*
* Note: because we include restriction clauses, we will find indexscans
* that could be plain indexscans, ie, they don't require the join context
- * at all. This may seem redundant, but we need to include those scans in
+ * at all. This may seem redundant, but we need to include those scans in
* the input given to choose_bitmap_and() to be sure we find optimal AND
* combinations of join and non-join scans. Also, even if the "best inner
* indexscan" is just a plain indexscan, it will have a different cost
@@ -1898,7 +1898,7 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
/*
* Also check to see if any EquivalenceClasses can produce a relevant
- * joinclause. Since all such clauses are effectively pushed-down, this
+ * joinclause. Since all such clauses are effectively pushed-down, this
* doesn't apply to outer joins.
*/
if (!isouterjoin && rel->has_eclass_joins)
@@ -1964,7 +1964,7 @@ match_index_to_operand(Node *operand,
int indkey;
/*
- * Ignore any RelabelType node above the operand. This is needed to be
+ * Ignore any RelabelType node above the operand. This is needed to be
* able to apply indexscanning in binary-compatible-operator cases. Note:
* we can assume there is at most one RelabelType node;
* eval_const_expressions() will have simplified if more than one.
@@ -2031,10 +2031,10 @@ match_index_to_operand(Node *operand,
* indexscan machinery. The key idea is that these operators allow us
* to derive approximate indexscan qual clauses, such that any tuples
* that pass the operator clause itself must also satisfy the simpler
- * indexscan condition(s). Then we can use the indexscan machinery
+ * indexscan condition(s). Then we can use the indexscan machinery
* to avoid scanning as much of the table as we'd otherwise have to,
* while applying the original operator as a qpqual condition to ensure
- * we deliver only the tuples we want. (In essence, we're using a regular
+ * we deliver only the tuples we want. (In essence, we're using a regular
* index as if it were a lossy index.)
*
* An example of what we're doing is
@@ -2048,7 +2048,7 @@ match_index_to_operand(Node *operand,
*
* Another thing that we do with this machinery is to provide special
* smarts for "boolean" indexes (that is, indexes on boolean columns
- * that support boolean equality). We can transform a plain reference
+ * that support boolean equality). We can transform a plain reference
* to the indexkey into "indexkey = true", or "NOT indexkey" into
* "indexkey = false", so as to make the expression indexable using the
* regular index operators. (As of Postgres 8.1, we must do this here
@@ -2458,7 +2458,7 @@ expand_indexqual_opclause(RestrictInfo *rinfo, Oid opfamily)
/*
* LIKE and regex operators are not members of any btree index opfamily,
* but they can be members of opfamilies for more exotic index types such
- * as GIN. Therefore, we should only do expansion if the operator is
+ * as GIN. Therefore, we should only do expansion if the operator is
* actually not in the opfamily. But checking that requires a syscache
* lookup, so it's best to first see if the operator is one we are
* interested in.
@@ -2541,7 +2541,7 @@ expand_indexqual_opclause(RestrictInfo *rinfo, Oid opfamily)
* column matches) or a simple OpExpr (if the first-column match is all
* there is). In these cases the modified clause is always "<=" or ">="
* even when the original was "<" or ">" --- this is necessary to match all
- * the rows that could match the original. (We are essentially building a
+ * the rows that could match the original. (We are essentially building a
* lossy version of the row comparison when we do this.)
*/
static RestrictInfo *
@@ -2622,7 +2622,7 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
break; /* no good, volatile comparison value */
/*
- * The Var side can match any column of the index. If the user does
+ * The Var side can match any column of the index. If the user does
* something weird like having multiple identical index columns, we
* insist the match be on the first such column, to avoid confusing
* the executor.
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index dec484d4af..4b0804cd5e 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -110,7 +110,7 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* 3. Consider paths where the inner relation need not be explicitly
- * sorted. This includes mergejoins only (nestloops were already built in
+ * sorted. This includes mergejoins only (nestloops were already built in
* match_unsorted_outer).
*
* Diked out as redundant 2/13/2000 -- tgl. There isn't any really
@@ -223,7 +223,7 @@ sort_inner_and_outer(PlannerInfo *root,
*
* Actually, it's not quite true that every mergeclause ordering will
* generate a different path order, because some of the clauses may be
- * partially redundant (refer to the same EquivalenceClasses). Therefore,
+ * partially redundant (refer to the same EquivalenceClasses). Therefore,
* what we do is convert the mergeclause list to a list of canonical
* pathkeys, and then consider different orderings of the pathkeys.
*
@@ -316,7 +316,7 @@ sort_inner_and_outer(PlannerInfo *root,
* cheapest-total inner-indexscan path (if any), and one on the
* cheapest-startup inner-indexscan path (if different).
*
- * We also consider mergejoins if mergejoin clauses are available. We have
+ * We also consider mergejoins if mergejoin clauses are available. We have
* two ways to generate the inner path for a mergejoin: sort the cheapest
* inner path, or use an inner path that is already suitably ordered for the
* merge. If we have several mergeclauses, it could be that there is no inner
@@ -639,7 +639,7 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Look for an inner path ordered well enough for the first
- * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
+ * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
* destructively, which is why we made a copy...
*/
trialsortkeys = list_truncate(trialsortkeys, sortkeycnt);
@@ -877,7 +877,7 @@ hash_inner_and_outer(PlannerInfo *root,
* best_appendrel_indexscan
* Finds the best available set of inner indexscans for a nestloop join
* with the given append relation on the inside and the given outer_rel
- * outside. Returns an AppendPath comprising the best inner scans, or
+ * outside. Returns an AppendPath comprising the best inner scans, or
* NULL if there are no possible inner indexscans.
*
* Note that we currently consider only cheapest-total-cost. It's not
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 1fbd9ee239..0f522dca32 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -191,7 +191,7 @@ join_search_one_level(PlannerInfo *root, int level, List **joinrels)
* SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0;
*
* The join clause will be usable at level 3, but at level 2 we have no
- * choice but to make cartesian joins. We consider only left-sided and
+ * choice but to make cartesian joins. We consider only left-sided and
* right-sided cartesian joins in this case (no bushy).
*/
if (result_rels == NIL)
@@ -221,7 +221,7 @@ join_search_one_level(PlannerInfo *root, int level, List **joinrels)
/*----------
* When special joins are involved, there may be no legal way
- * to make an N-way join for some values of N. For example consider
+ * to make an N-way join for some values of N. For example consider
*
* SELECT ... FROM t1 WHERE
* x IN (SELECT ... FROM t2,t3 WHERE ...) AND
@@ -355,7 +355,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
ListCell *l;
/*
- * Ensure output params are set on failure return. This is just to
+ * Ensure output params are set on failure return. This is just to
* suppress uninitialized-variable warnings from overly anal compilers.
*/
*sjinfo_p = NULL;
@@ -363,7 +363,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
/*
* If we have any special joins, the proposed join might be illegal; and
- * in any case we have to determine its join type. Scan the join info
+ * in any case we have to determine its join type. Scan the join info
* list for conflicts.
*/
match_sjinfo = NULL;
@@ -586,7 +586,7 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
/*
* If it's a plain inner join, then we won't have found anything in
- * join_info_list. Make up a SpecialJoinInfo so that selectivity
+ * join_info_list. Make up a SpecialJoinInfo so that selectivity
* estimation functions will know what's being joined.
*/
if (sjinfo == NULL)
@@ -850,7 +850,7 @@ have_join_order_restriction(PlannerInfo *root,
*
* Essentially, this tests whether have_join_order_restriction() could
* succeed with this rel and some other one. It's OK if we sometimes
- * say "true" incorrectly. (Therefore, we don't bother with the relatively
+ * say "true" incorrectly. (Therefore, we don't bother with the relatively
* expensive has_legal_joinclause test.)
*/
static bool
diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c
index 6a644f9a0f..739d6c0a8d 100644
--- a/src/backend/optimizer/path/orindxpath.c
+++ b/src/backend/optimizer/path/orindxpath.c
@@ -41,7 +41,7 @@
*
* The added quals are partially redundant with the original OR, and therefore
* will cause the size of the joinrel to be underestimated when it is finally
- * formed. (This would be true of a full transformation to CNF as well; the
+ * formed. (This would be true of a full transformation to CNF as well; the
* fault is not really in the transformation, but in clauselist_selectivity's
* inability to recognize redundant conditions.) To minimize the collateral
* damage, we want to minimize the number of quals added. Therefore we do
@@ -56,7 +56,7 @@
* it is finally formed. This is a MAJOR HACK: it depends on the fact
* that clause selectivities are cached and on the fact that the same
* RestrictInfo node will appear in every joininfo list that might be used
- * when the joinrel is formed. And it probably isn't right in cases where
+ * when the joinrel is formed. And it probably isn't right in cases where
* the size estimation is nonlinear (i.e., outer and IN joins). But it
* beats not doing anything.
*
@@ -96,10 +96,10 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
* enforced at the relation scan level.
*
* We must also ignore clauses that are marked !is_pushed_down (ie they
- * are themselves outer-join clauses). It would be safe to extract an
+ * are themselves outer-join clauses). It would be safe to extract an
* index condition from such a clause if we are within the nullable rather
* than the non-nullable side of its join, but we haven't got enough
- * context here to tell which applies. OR clauses in outer-join quals
+ * context here to tell which applies. OR clauses in outer-join quals
* aren't exactly common, so we'll let that case go unoptimized for now.
*/
foreach(i, rel->joininfo)
@@ -114,7 +114,7 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
* Use the generate_bitmap_or_paths() machinery to estimate the
* value of each OR clause. We can use regular restriction
* clauses along with the OR clause contents to generate
- * indexquals. We pass outer_rel = NULL so that sub-clauses that
+ * indexquals. We pass outer_rel = NULL so that sub-clauses that
* are actually joins will be ignored.
*/
List *orpaths;
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 215cb7c924..510805bda6 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -253,7 +253,7 @@ make_pathkey_from_sortinfo(PlannerInfo *root,
* builds a reverse-sort opfamily, but there's not much point in that
* anymore. But EquivalenceClasses need to contain opfamily lists based
* on the family membership of equality operators, which could easily be
- * bigger. So, look up the equality operator that goes with the ordering
+ * bigger. So, look up the equality operator that goes with the ordering
* operator (this should be unique) and get its membership.
*/
@@ -410,7 +410,7 @@ get_cheapest_path_for_pathkeys(List *paths, List *pathkeys,
/*
* Since cost comparison is a lot cheaper than pathkey comparison, do
- * that first. (XXX is that still true?)
+ * that first. (XXX is that still true?)
*/
if (matched_path != NULL &&
compare_path_costs(matched_path, path, cost_criterion) <= 0)
@@ -473,7 +473,7 @@ get_cheapest_fractional_path_for_pathkeys(List *paths,
* its sortops arrays, and we will return NIL.)
*
* If 'scandir' is BackwardScanDirection, attempt to build pathkeys
- * representing a backwards scan of the index. Return NIL if can't do it.
+ * representing a backwards scan of the index. Return NIL if can't do it.
*
* The result is canonical, meaning that redundant pathkeys are removed;
* it may therefore have fewer entries than there are index columns.
@@ -580,7 +580,7 @@ find_indexkey_var(PlannerInfo *root, RelOptInfo *rel, AttrNumber varattno)
/*
* convert_subquery_pathkeys
* Build a pathkeys list that describes the ordering of a subquery's
- * result, in the terms of the outer query. This is essentially a
+ * result, in the terms of the outer query. This is essentially a
* task of conversion.
*
* 'rel': outer query's RelOptInfo for the subquery relation.
@@ -663,7 +663,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
/*
* Otherwise, the sub_pathkey's EquivalenceClass could contain
* multiple elements (representing knowledge that multiple items
- * are effectively equal). Each element might match none, one, or
+ * are effectively equal). Each element might match none, one, or
* more of the output columns that are visible to the outer query.
* This means we may have multiple possible representations of the
* sub_pathkey in the context of the outer query. Ideally we
@@ -1031,7 +1031,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can still
+ * sort-key positions in the pathkeys are useless. (But we can still
* mergejoin if we found at least one mergeclause.)
*/
if (matched_restrictinfos == NIL)
@@ -1063,7 +1063,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
* Returns a pathkeys list that can be applied to the outer relation.
*
* Since we assume here that a sort is required, there is no particular use
- * in matching any available ordering of the outerrel. (joinpath.c has an
+ * in matching any available ordering of the outerrel. (joinpath.c has an
* entirely separate code path for considering sort-free mergejoins.) Rather,
* it's interesting to try to match the requested query_pathkeys so that a
* second output sort may be avoided; and failing that, we try to list "more
@@ -1394,7 +1394,7 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can still
+ * sort-key positions in the pathkeys are useless. (But we can still
* mergejoin if we found at least one mergeclause.)
*/
if (matched)
@@ -1424,7 +1424,7 @@ right_merge_direction(PlannerInfo *root, PathKey *pathkey)
pathkey->pk_opfamily == query_pathkey->pk_opfamily)
{
/*
- * Found a matching query sort column. Prefer this pathkey's
+ * Found a matching query sort column. Prefer this pathkey's
* direction iff it matches. Note that we ignore pk_nulls_first,
* which means that a sort might be needed anyway ... but we still
* want to prefer only one of the two possible directions, and we
@@ -1500,13 +1500,13 @@ truncate_useless_pathkeys(PlannerInfo *root,
* useful according to truncate_useless_pathkeys().
*
* This is a cheap test that lets us skip building pathkeys at all in very
- * simple queries. It's OK to err in the direction of returning "true" when
+ * simple queries. It's OK to err in the direction of returning "true" when
* there really aren't any usable pathkeys, but erring in the other direction
* is bad --- so keep this in sync with the routines above!
*
* We could make the test more complex, for example checking to see if any of
* the joinclauses are really mergejoinable, but that likely wouldn't win
- * often enough to repay the extra cycles. Queries with neither a join nor
+ * often enough to repay the extra cycles. Queries with neither a join nor
* a sort are reasonably common, though, so this much work seems worthwhile.
*/
bool
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index 0796ace43a..b5f119618f 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -19,7 +19,7 @@
* representation all the way through to execution.
*
* There is currently no special support for joins involving CTID; in
- * particular nothing corresponding to best_inner_indexscan(). Since it's
+ * particular nothing corresponding to best_inner_indexscan(). Since it's
* not very useful to store TIDs of one table in another table, there
* doesn't seem to be enough use-case to justify adding a lot of code
* for that.
@@ -57,7 +57,7 @@ static List *TidQualFromRestrictinfo(List *restrictinfo, int varno);
* or
* pseudoconstant = CTID
*
- * We check that the CTID Var belongs to relation "varno". That is probably
+ * We check that the CTID Var belongs to relation "varno". That is probably
* redundant considering this is only applied to restriction clauses, but
* let's be safe.
*/
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index ab07acae8f..1247a17216 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -211,7 +211,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
/*
* For table scans, rather than using the relation targetlist (which is
* only those Vars actually needed by the query), we prefer to generate a
- * tlist containing all Vars in order. This will allow the executor to
+ * tlist containing all Vars in order. This will allow the executor to
* optimize away projection of the table tuples, if possible. (Note that
* planner.c may replace the tlist we generate here, forcing projection to
* occur.)
@@ -402,7 +402,7 @@ use_physical_tlist(PlannerInfo *root, RelOptInfo *rel)
*
* If the plan node immediately above a scan would prefer to get only
* needed Vars and not a physical tlist, it must call this routine to
- * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
+ * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
* and Material nodes want this, so they don't have to store useless columns.
*/
static void
@@ -520,7 +520,7 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path)
/*
* * Expensive function pullups may have pulled local predicates * into
- * this path node. Put them in the qpqual of the plan node. * JMH,
+ * this path node. Put them in the qpqual of the plan node. * JMH,
* 6/15/92
*/
if (get_loc_restrictinfo(best_path) != NIL)
@@ -549,7 +549,7 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path)
/*
* It is possible for the subplans list to contain only one entry, or even
- * no entries. Handle these cases specially.
+ * no entries. Handle these cases specially.
*
* XXX ideally, if there's just one entry, we'd not bother to generate an
* Append node but just return the single child. At the moment this does
@@ -1078,7 +1078,7 @@ create_bitmap_scan_plan(PlannerInfo *root,
/*
* When dealing with special operators, we will at this point have
- * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
+ * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
* 'em from bitmapqualorig, since there's no point in making the tests
* twice.
*/
@@ -1168,7 +1168,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
- * to just "true". We do not try to eliminate redundant subclauses
+ * to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
@@ -1258,7 +1258,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
/*
* We know that the index predicate must have been implied by the
* query condition as a whole, but it may or may not be implied by
- * the conditions that got pushed into the bitmapqual. Avoid
+ * the conditions that got pushed into the bitmapqual. Avoid
* generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), ipath->indexclauses))
@@ -2321,7 +2321,7 @@ order_qual_clauses(PlannerInfo *root, List *clauses)
/*
* Sort. We don't use qsort() because it's not guaranteed stable for
- * equal keys. The expected number of entries is small enough that a
+ * equal keys. The expected number of entries is small enough that a
* simple insertion sort should be good enough.
*/
for (i = 1; i < nitems; i++)
@@ -2867,7 +2867,7 @@ make_mergejoin(List *tlist,
* make_sort --- basic routine to build a Sort plan node
*
* Caller must have built the sortColIdx, sortOperators, and nullsFirst
- * arrays already. limit_tuples is as for cost_sort (in particular, pass
+ * arrays already. limit_tuples is as for cost_sort (in particular, pass
* -1 if no limit)
*/
static Sort *
@@ -2906,7 +2906,7 @@ make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
* once as a sort key column; if so, the extra mentions are redundant.
*
* Caller is assumed to have allocated the arrays large enough for the
- * max possible number of columns. Return value is the new column count.
+ * max possible number of columns. Return value is the new column count.
*/
static int
add_sort_column(AttrNumber colIdx, Oid sortOp, bool nulls_first,
@@ -3008,7 +3008,7 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
/*
* Otherwise, we can sort by any non-constant expression listed in
* the pathkey's EquivalenceClass. For now, we take the first one
- * that corresponds to an available item in the tlist. If there
+ * that corresponds to an available item in the tlist. If there
* isn't any, use the first one that is an expression in the
* input's vars. (The non-const restriction only matters if the
* EC is below_outer_join; but if it isn't, it won't contain
@@ -3264,7 +3264,7 @@ make_material(Plan *lefttree)
* materialize_finished_plan: stick a Material node atop a completed plan
*
* There are a couple of places where we want to attach a Material node
- * after completion of subquery_planner(). This currently requires hackery.
+ * after completion of subquery_planner(). This currently requires hackery.
* Since subquery_planner has already run SS_finalize_plan on the subplan
* tree, we have to kluge up parameter lists for the Material node.
* Possibly this could be fixed by postponing SS_finalize_plan processing
@@ -3476,7 +3476,7 @@ make_group(PlannerInfo *root,
/*
* distinctList is a list of SortGroupClauses, identifying the targetlist items
- * that should be considered by the Unique filter. The input path must
+ * that should be considered by the Unique filter. The input path must
* already be sorted accordingly.
*/
Unique *
@@ -3494,7 +3494,7 @@ make_unique(Plan *lefttree, List *distinctList)
/*
* Charge one cpu_operator_cost per comparison per input tuple. We assume
- * all columns get compared at most of the tuples. (XXX probably this is
+ * all columns get compared at most of the tuples. (XXX probably this is
* an overestimate.)
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index be0ace6c0c..68e6d7abf1 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -76,12 +76,12 @@ static void check_hashjoinable(RestrictInfo *restrictinfo);
* appearing in the jointree.
*
* The initial invocation must pass root->parse->jointree as the value of
- * jtnode. Internally, the function recurses through the jointree.
+ * jtnode. Internally, the function recurses through the jointree.
*
* At the end of this process, there should be one baserel RelOptInfo for
* every non-join RTE that is used in the query. Therefore, this routine
* is the only place that should call build_simple_rel with reloptkind
- * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
+ * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
* "other rel" RelOptInfos for the members of any appendrels we find here.)
*/
void
@@ -221,7 +221,7 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars,
* deconstruct_jointree
* Recursively scan the query's join tree for WHERE and JOIN/ON qual
* clauses, and add these to the appropriate restrictinfo and joininfo
- * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes
+ * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes
* to root->join_info_list for any outer joins appearing in the query tree.
* Return a "joinlist" data structure showing the join order decisions
* that need to be made by make_one_rel().
@@ -238,9 +238,9 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars,
* be evaluated at the lowest level where all the variables it mentions are
* available. However, we cannot push a qual down into the nullable side(s)
* of an outer join since the qual might eliminate matching rows and cause a
- * NULL row to be incorrectly emitted by the join. Therefore, we artificially
+ * NULL row to be incorrectly emitted by the join. Therefore, we artificially
* OR the minimum-relids of such an outer join into the required_relids of
- * clauses appearing above it. This forces those clauses to be delayed until
+ * clauses appearing above it. This forces those clauses to be delayed until
* application of the outer join (or maybe even higher in the join tree).
*/
List *
@@ -376,7 +376,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
* regard for whether this level is an outer join, which is correct.
* Then we place our own join quals, which are restricted by lower
* outer joins in any case, and are forced to this level if this is an
- * outer join and they mention the outer side. Finally, if this is an
+ * outer join and they mention the outer side. Finally, if this is an
* outer join, we create a join_info_list entry for the join. This
* will prevent quals above us in the join tree that use those rels
* from being pushed down below this level. (It's okay for upper
@@ -577,7 +577,7 @@ make_outerjoininfo(PlannerInfo *root,
* any nullable rel is FOR UPDATE/SHARE.
*
* You might be wondering why this test isn't made far upstream in the
- * parser. It's because the parser hasn't got enough info --- consider
+ * parser. It's because the parser hasn't got enough info --- consider
* FOR UPDATE applied to a view. Only after rewriting and flattening do
* we know whether the view contains an outer join.
*/
@@ -629,7 +629,7 @@ make_outerjoininfo(PlannerInfo *root,
min_lefthand = bms_intersect(clause_relids, left_rels);
/*
- * Similarly for required RHS. But here, we must also include any lower
+ * Similarly for required RHS. But here, we must also include any lower
* inner joins, to ensure we don't try to commute with any of them.
*/
min_righthand = bms_int_members(bms_union(clause_relids, inner_join_rels),
@@ -681,7 +681,7 @@ make_outerjoininfo(PlannerInfo *root,
* Here, we have to consider that "our join condition" includes any
* clauses that syntactically appeared above the lower OJ and below
* ours; those are equivalent to degenerate clauses in our OJ and must
- * be treated as such. Such clauses obviously can't reference our
+ * be treated as such. Such clauses obviously can't reference our
* LHS, and they must be non-strict for the lower OJ's RHS (else
* reduce_outer_joins would have reduced the lower OJ to a plain
* join). Hence the other ways in which we handle clauses within our
@@ -765,7 +765,7 @@ make_outerjoininfo(PlannerInfo *root,
* distribute_qual_to_rels
* Add clause information to either the baserestrictinfo or joininfo list
* (depending on whether the clause is a join) of each base relation
- * mentioned in the clause. A RestrictInfo node is created and added to
+ * mentioned in the clause. A RestrictInfo node is created and added to
* the appropriate list for each rel. Alternatively, if the clause uses a
* mergejoinable operator and is not delayed by outer-join rules, enter
* the left- and right-side expressions into the query's list of
@@ -847,10 +847,10 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* gating Result plan node. We put such a clause into the regular
* RestrictInfo lists for the moment, but eventually createplan.c will
* pull it out and make a gating Result node immediately above whatever
- * plan node the pseudoconstant clause is assigned to. It's usually best
+ * plan node the pseudoconstant clause is assigned to. It's usually best
* to put a gating node as high in the plan tree as possible. If we are
* not below an outer join, we can actually push the pseudoconstant qual
- * all the way to the top of the tree. If we are below an outer join, we
+ * all the way to the top of the tree. If we are below an outer join, we
* leave the qual at its original syntactic level (we could push it up to
* just below the outer join, but that seems more complex than it's
* worth).
@@ -904,7 +904,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* Note: it is not immediately obvious that a simple boolean is enough
* for this: if for some reason we were to attach a degenerate qual to
* its original join level, it would need to be treated as an outer join
- * qual there. However, this cannot happen, because all the rels the
+ * qual there. However, this cannot happen, because all the rels the
* clause mentions must be in the outer join's min_righthand, therefore
* the join it needs must be formed before the outer join; and we always
* attach quals to the lowest level where they can be evaluated. But
@@ -938,7 +938,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* We can't use such a clause to deduce equivalence (the left and
* right sides might be unequal above the join because one of them has
* gone to NULL) ... but we might be able to use it for more limited
- * deductions, if it is mergejoinable. So consider adding it to the
+ * deductions, if it is mergejoinable. So consider adding it to the
* lists of set-aside outer-join clauses.
*/
is_pushed_down = false;
@@ -968,7 +968,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
else
{
/*
- * Normal qual clause or degenerate outer-join clause. Either way, we
+ * Normal qual clause or degenerate outer-join clause. Either way, we
* can mark it as pushed-down.
*/
is_pushed_down = true;
@@ -1143,7 +1143,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* For an is_pushed_down qual, we can evaluate the qual as soon as (1) we have
* all the rels it mentions, and (2) we are at or above any outer joins that
* can null any of these rels and are below the syntactic location of the
- * given qual. We must enforce (2) because pushing down such a clause below
+ * given qual. We must enforce (2) because pushing down such a clause below
* the OJ might cause the OJ to emit null-extended rows that should not have
* been formed, or that should have been rejected by the clause. (This is
* only an issue for non-strict quals, since if we can prove a qual mentioning
@@ -1169,7 +1169,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* required relids overlap the LHS too) causes that OJ's delay_upper_joins
* flag to be set TRUE. This will prevent any higher-level OJs from
* being interchanged with that OJ, which would result in not having any
- * correct place to evaluate the qual. (The case we care about here is a
+ * correct place to evaluate the qual. (The case we care about here is a
* sub-select WHERE clause within the RHS of some outer join. The WHERE
* clause must effectively be treated as a degenerate clause of that outer
* join's condition. Rather than trying to match such clauses with joins
@@ -1556,7 +1556,7 @@ check_mergejoinable(RestrictInfo *restrictinfo)
* info fields in the restrictinfo.
*
* Currently, we support hashjoin for binary opclauses where
- * the operator is a hashjoinable operator. The arguments can be
+ * the operator is a hashjoinable operator. The arguments can be
* anything --- as long as there are no volatile functions in them.
*/
static void
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index 6bdbd2a913..a3d86c6959 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -64,7 +64,7 @@ static Oid fetch_agg_sort_op(Oid aggfnoid);
* generic scan-all-the-rows plan.
*
* We are passed the preprocessed tlist, and the best path
- * devised for computing the input of a standard Agg node. If we are able
+ * devised for computing the input of a standard Agg node. If we are able
* to optimize all the aggregates, and the result is estimated to be cheaper
* than the generic aggregate method, then generate and return a Plan that
* does it that way. Otherwise, return NULL.
@@ -127,7 +127,7 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
/*
* Since this optimization is not applicable all that often, we want to
* fall out before doing very much work if possible. Therefore we do the
- * work in several passes. The first pass scans the tlist and HAVING qual
+ * work in several passes. The first pass scans the tlist and HAVING qual
* to find all the aggregates and verify that each of them is a MIN/MAX
* aggregate. If that succeeds, the second pass looks at each aggregate
* to see if it is optimizable; if so we make an IndexPath describing how
@@ -480,7 +480,7 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info)
SortGroupClause *sortcl;
/*
- * Generate a suitably modified query. Much of the work here is probably
+ * Generate a suitably modified query. Much of the work here is probably
* unnecessary in the normal case, but we want to make it look good if
* someone tries to EXPLAIN the result.
*/
@@ -523,7 +523,7 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info)
FLOAT8PASSBYVAL);
/*
- * Generate the plan for the subquery. We already have a Path for the
+ * Generate the plan for the subquery. We already have a Path for the
* basic indexscan, but we have to convert it to a Plan and attach a LIMIT
* node above it.
*
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index 04d86c2aaa..4ac3685b7c 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -35,7 +35,7 @@
* which may involve joins but not any fancier features.
*
* Since query_planner does not handle the toplevel processing (grouping,
- * sorting, etc) it cannot select the best path by itself. It selects
+ * sorting, etc) it cannot select the best path by itself. It selects
* two paths: the cheapest path that produces all the required tuples,
* independent of any ordering considerations, and the cheapest path that
* produces the expected fraction of the required tuples in the required
@@ -59,7 +59,7 @@
* does not use grouping
*
* Note: the PlannerInfo node also includes a query_pathkeys field, which is
- * both an input and an output of query_planner(). The input value signals
+ * both an input and an output of query_planner(). The input value signals
* query_planner that the indicated sort order is wanted in the final output
* plan. But this value has not yet been "canonicalized", since the needed
* info does not get computed until we scan the qual clauses. We canonicalize
@@ -103,7 +103,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If the query has an empty join tree, then it's something easy like
- * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly.
+ * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly.
*/
if (parse->jointree->fromlist == NIL)
{
@@ -179,7 +179,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* We should now have size estimates for every actual table involved in
- * the query, so we can compute total_table_pages. Note that appendrels
+ * the query, so we can compute total_table_pages. Note that appendrels
* are not double-counted here, even though we don't bother to distinguish
* RelOptInfos for appendrel parents, because the parents will still have
* size zero.
@@ -227,14 +227,14 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If we formed any equivalence classes, generate additional restriction
- * clauses as appropriate. (Implied join clauses are formed on-the-fly
+ * clauses as appropriate. (Implied join clauses are formed on-the-fly
* later.)
*/
generate_base_implied_equalities(root);
/*
* We have completed merging equivalence sets, so it's now possible to
- * convert the requested query_pathkeys to canonical form. Also
+ * convert the requested query_pathkeys to canonical form. Also
* canonicalize the groupClause, windowClause, distinctClause and
* sortClause pathkeys for use later.
*/
@@ -294,7 +294,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If both GROUP BY and ORDER BY are specified, we will need two
* levels of sort --- and, therefore, certainly need to read all the
- * tuples --- unless ORDER BY is a subset of GROUP BY. Likewise if we
+ * tuples --- unless ORDER BY is a subset of GROUP BY. Likewise if we
* have both DISTINCT and GROUP BY, or if we have a window
* specification not compatible with the GROUP BY.
*/
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 042c1d0676..65daa18e86 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -172,7 +172,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
/*
* We document cursor_tuple_fraction as simply being a fraction, which
- * means the edge cases 0 and 1 have to be treated specially here. We
+ * means the edge cases 0 and 1 have to be treated specially here. We
* convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
*/
if (tuple_fraction >= 1.0)
@@ -418,7 +418,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
* to execute that we're better off doing it only once per group, despite
* the loss of selectivity. This is hard to estimate short of doing the
* entire planning process twice, so we use a heuristic: clauses
- * containing subplans are left in HAVING. Otherwise, we move or copy the
+ * containing subplans are left in HAVING. Otherwise, we move or copy the
* HAVING clause into WHERE, in hopes of eliminating tuples before
* aggregation instead of after.
*
@@ -529,7 +529,7 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
* Simplify constant expressions.
*
* Note: one essential effect here is to insert the current actual values
- * of any default arguments for functions. To ensure that happens, we
+ * of any default arguments for functions. To ensure that happens, we
* *must* process all expressions here. Previous PG versions sometimes
* skipped const-simplification if it didn't seem worth the trouble, but
* we can't do that anymore.
@@ -798,7 +798,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* If there's a top-level ORDER BY, assume we have to fetch all the
- * tuples. This might be too simplistic given all the hackery below
+ * tuples. This might be too simplistic given all the hackery below
* to possibly avoid the sort; but the odds of accurate estimates here
* are pretty low anyway.
*/
@@ -826,7 +826,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* We should not need to call preprocess_targetlist, since we must be
- * in a SELECT query node. Instead, use the targetlist returned by
+ * in a SELECT query node. Instead, use the targetlist returned by
* plan_set_operations (since this tells whether it returned any
* resjunk columns!), and transfer any sort key information from the
* original tlist.
@@ -910,7 +910,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* Calculate pathkeys that represent grouping/ordering requirements.
* Stash them in PlannerInfo so that query_planner can canonicalize
- * them after EquivalenceClasses have been formed. The sortClause is
+ * them after EquivalenceClasses have been formed. The sortClause is
* certainly sort-able, but GROUP BY and DISTINCT might not be, in
* which case we just leave their pathkeys empty.
*/
@@ -984,7 +984,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a
* superset of GROUP BY, it would be tempting to request sort by ORDER
* BY --- but that might just leave us failing to exploit an available
- * sort order at all. Needs more thought. The choice for DISTINCT
+ * sort order at all. Needs more thought. The choice for DISTINCT
* versus ORDER BY is much easier, since we know that the parser
* ensured that one is a superset of the other.
*/
@@ -1019,7 +1019,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* Executor doesn't support hashed aggregation with DISTINCT
- * aggregates. (Doing so would imply storing *all* the input
+ * aggregates. (Doing so would imply storing *all* the input
* values in the hash table, which seems like a certain loser.)
*/
can_hash = (agg_counts.numDistinctAggs == 0 &&
@@ -1102,7 +1102,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* create_plan() returns a plan with just a "flat" tlist of
* required Vars. Usually we need to insert the sub_tlist as the
- * tlist of the top plan node. However, we can skip that if we
+ * tlist of the top plan node. However, we can skip that if we
* determined that whatever query_planner chose to return will be
* good enough.
*/
@@ -1265,7 +1265,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* Furthermore, there cannot be any variables in either HAVING
* or the targetlist, so we actually do not need the FROM
* table at all! We can just throw away the plan-so-far and
- * generate a Result node. This is a sufficiently unusual
+ * generate a Result node. This is a sufficiently unusual
* corner case that it's not worth contorting the structure of
* this routine to avoid having to generate the plan in the
* first place.
@@ -1314,7 +1314,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* make_sort_from_pathkeys won't add those on its own, and anyway
* we want them evaluated only once at the bottom of the stack.
* As we climb up the stack, we add outputs for the WindowFuncs
- * computed at each level. Also, each input tlist has to present
+ * computed at each level. Also, each input tlist has to present
* all the columns needed to sort the data for the next WindowAgg
* step. That's handled internally by make_sort_from_pathkeys,
* but we need the copyObject steps here to ensure that each plan
@@ -1638,7 +1638,7 @@ is_dummy_plan(Plan *plan)
* preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
*
* We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
- * results back in *count_est and *offset_est. These variables are set to
+ * results back in *count_est and *offset_est. These variables are set to
* 0 if the corresponding clause is not present, and -1 if it's present
* but we couldn't estimate the value for it. (The "0" convention is OK
* for OFFSET but a little bit bogus for LIMIT: effectively we estimate
@@ -1647,7 +1647,7 @@ is_dummy_plan(Plan *plan)
* be passed to make_limit, which see if you change this code.
*
* The return value is the suitably adjusted tuple_fraction to use for
- * planning the query. This adjustment is not overridable, since it reflects
+ * planning the query. This adjustment is not overridable, since it reflects
* plan actions that grouping_planner() will certainly take, not assumptions
* about context.
*/
@@ -1771,7 +1771,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
else if (*offset_est != 0 && tuple_fraction > 0.0)
{
/*
- * We have an OFFSET but no LIMIT. This acts entirely differently
+ * We have an OFFSET but no LIMIT. This acts entirely differently
* from the LIMIT case: here, we need to increase rather than decrease
* the caller's tuple_fraction, because the OFFSET acts to cause more
* tuples to be fetched instead of fewer. This only matters if we got
@@ -1786,7 +1786,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
/*
* If we have absolute counts from both caller and OFFSET, add them
- * together; likewise if they are both fractional. If one is
+ * together; likewise if they are both fractional. If one is
* fractional and the other absolute, we want to take the larger, and
* we heuristically assume that's the fractional one.
*/
@@ -2057,7 +2057,7 @@ choose_hashed_grouping(PlannerInfo *root,
* differences that it doesn't seem worth trying to unify the two functions.
*
* But note that making the two choices independently is a bit bogus in
- * itself. If the two could be combined into a single choice operation
+ * itself. If the two could be combined into a single choice operation
* it'd probably be better, but that seems far too unwieldy to be practical,
* especially considering that the combination of GROUP BY and DISTINCT
* isn't very common in real queries. By separating them, we are giving
@@ -2119,7 +2119,7 @@ choose_hashed_distinct(PlannerInfo *root,
dNumDistinctRows, input_plan->plan_width, limit_tuples);
/*
- * Now for the GROUP case. See comments in grouping_planner about the
+ * Now for the GROUP case. See comments in grouping_planner about the
* sorting choices here --- this code should match that code.
*/
sorted_p.startup_cost = input_plan->startup_cost;
@@ -2185,7 +2185,7 @@ choose_hashed_distinct(PlannerInfo *root,
* we want to pass this targetlist to the subplan:
* a,b,c,d,a+b
* where the a+b target will be used by the Sort/Group steps, and the
- * other targets will be used for computing the final results. (In the
+ * other targets will be used for computing the final results. (In the
* above example we could theoretically suppress the a and b targets and
* pass down only c,d,a+b, but it's not really worth the trouble to
* eliminate simple var references from the subplan. We will avoid doing
@@ -2298,7 +2298,7 @@ make_subplanTargetList(PlannerInfo *root,
* Locate grouping columns in the tlist chosen by query_planner.
*
* This is only needed if we don't use the sub_tlist chosen by
- * make_subplanTargetList. We have to forget the column indexes found
+ * make_subplanTargetList. We have to forget the column indexes found
* by that routine and re-locate the grouping exprs in the real sub_tlist.
* We assume the grouping exprs are just Vars (see make_subplanTargetList).
*/
@@ -2329,11 +2329,11 @@ locate_grouping_columns(PlannerInfo *root,
/*
* The grouping column returned by create_plan might not have the same
- * typmod as the original Var. (This can happen in cases where a
+ * typmod as the original Var. (This can happen in cases where a
* set-returning function has been inlined, so that we now have more
* knowledge about what it returns than we did when the original Var
* was created.) So we can't use tlist_member() to search the tlist;
- * instead use tlist_member_match_var. For safety, still check that
+ * instead use tlist_member_match_var. For safety, still check that
* the vartype matches.
*/
if (!(groupexpr && IsA(groupexpr, Var)))
@@ -2562,7 +2562,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
* This depends on the behavior of make_pathkeys_for_window()!
*
* We are given the target WindowClause and an array of the input column
- * numbers associated with the resulting pathkeys. In the easy case, there
+ * numbers associated with the resulting pathkeys. In the easy case, there
* are the same number of pathkey columns as partitioning + ordering columns
* and we just have to copy some data around. However, it's possible that
* some of the original partitioning + ordering columns were eliminated as
@@ -2574,7 +2574,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
* determine which keys are significant.
*
* The method used here is a bit brute-force: add the sort columns to a list
- * one at a time and note when the resulting pathkey list gets longer. But
+ * one at a time and note when the resulting pathkey list gets longer. But
* it's a sufficiently uncommon case that a faster way doesn't seem worth
* the amount of code refactoring that'd be needed.
*----------
@@ -2676,7 +2676,7 @@ get_column_info_for_window(PlannerInfo *root, WindowClause *wc, List *tlist,
* Currently, we disallow sublinks in standalone expressions, so there's no
* real "planning" involved here. (That might not always be true though.)
* What we must do is run eval_const_expressions to ensure that any function
- * default arguments get inserted. The fact that constant subexpressions
+ * default arguments get inserted. The fact that constant subexpressions
* get simplified is a side-effect that is useful when the expression will
* get evaluated more than once. Also, we must fix operator function IDs.
*
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index e006633b39..41fb9dde93 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -132,7 +132,7 @@ static bool extract_query_dependencies_walker(Node *node,
/*
* set_plan_references
*
- * This is the final processing pass of the planner/optimizer. The plan
+ * This is the final processing pass of the planner/optimizer. The plan
* tree is complete; we just have to adjust some representational details
* for the convenience of the executor:
*
@@ -179,7 +179,7 @@ static bool extract_query_dependencies_walker(Node *node,
* and glob->invalItems (for everything else).
*
* Notice that we modify Plan nodes in-place, but use expression_tree_mutator
- * to process targetlist and qual expressions. We can assume that the Plan
+ * to process targetlist and qual expressions. We can assume that the Plan
* nodes were just built by the planner and are not multiply referenced, but
* it's not so safe to assume that for expression tree nodes.
*/
@@ -223,7 +223,7 @@ set_plan_references(PlannerGlobal *glob, Plan *plan, List *rtable)
* We do this even though the RTE might be unreferenced in the plan
* tree; this would correspond to cases such as views that were
* expanded, child tables that were eliminated by constraint
- * exclusion, etc. Schema invalidation on such a rel must still force
+ * exclusion, etc. Schema invalidation on such a rel must still force
* rebuilding of the plan.
*
* Note we don't bother to avoid duplicate list entries. We could,
@@ -388,7 +388,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
/*
* These plan types don't actually bother to evaluate their
* targetlists, because they just return their unmodified input
- * tuples. Even though the targetlist won't be used by the
+ * tuples. Even though the targetlist won't be used by the
* executor, we fix it up for possible use by EXPLAIN (not to
* mention ease of debugging --- wrong varnos are very confusing).
*/
@@ -572,7 +572,7 @@ set_subqueryscan_references(PlannerGlobal *glob,
else
{
/*
- * Keep the SubqueryScan node. We have to do the processing that
+ * Keep the SubqueryScan node. We have to do the processing that
* set_plan_references would otherwise have done on it. Notice we do
* not do set_upper_references() here, because a SubqueryScan will
* always have been created with correct references to its subplan's
@@ -785,7 +785,7 @@ fix_scan_expr_mutator(Node *node, fix_scan_expr_context *context)
/*
* We should not see any Vars marked INNER, but in a nestloop inner
- * scan there could be OUTER Vars. Leave them alone.
+ * scan there could be OUTER Vars. Leave them alone.
*/
Assert(var->varno != INNER);
if (var->varno > 0 && var->varno != OUTER)
@@ -908,7 +908,7 @@ set_join_references(PlannerGlobal *glob, Join *join, int rtoffset)
*
* To handle bitmap-scan plan trees, we have to be able to recurse down
* to the bottom BitmapIndexScan nodes; likewise, appendrel indexscans
- * require recursing through Append nodes. This is split out as a separate
+ * require recursing through Append nodes. This is split out as a separate
* function so that it can recurse.
*
* Note we do *not* apply any rtoffset for non-join Vars; this is because
@@ -923,7 +923,7 @@ set_inner_join_references(PlannerGlobal *glob, Plan *inner_plan,
{
/*
* An index is being used to reduce the number of tuples scanned in
- * the inner relation. If there are join clauses being used with the
+ * the inner relation. If there are join clauses being used with the
* index, we must update their outer-rel var nodes to refer to the
* outer side of the join.
*/
@@ -1214,7 +1214,7 @@ set_dummy_tlist_references(Plan *plan, int rtoffset)
*
* In most cases, subplan tlists will be "flat" tlists with only Vars,
* so we try to optimize that case by extracting information about Vars
- * in advance. Matching a parent tlist to a child is still an O(N^2)
+ * in advance. Matching a parent tlist to a child is still an O(N^2)
* operation, but at least with a much smaller constant factor than plain
* tlist_member() searches.
*
@@ -1664,7 +1664,7 @@ fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context)
* adjust any Vars that refer to other tables to reference junk tlist
* entries in the top plan's targetlist. Vars referencing the result
* table should be left alone, however (the executor will evaluate them
- * using the actual heap tuple, after firing triggers if any). In the
+ * using the actual heap tuple, after firing triggers if any). In the
* adjusted RETURNING list, result-table Vars will still have their
* original varno, but Vars for other rels will have varno OUTER.
*
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index bf0f25d781..ff9a350354 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -333,7 +333,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
Node *result;
/*
- * Copy the source Query node. This is a quick and dirty kluge to resolve
+ * Copy the source Query node. This is a quick and dirty kluge to resolve
* the fact that the parser can generate trees with multiple links to the
* same sub-Query node, but the planner wants to scribble on the Query.
* Try to clean this up when we do querytree redesign...
@@ -358,7 +358,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
* path/costsize.c.
*
* XXX If an ANY subplan is uncorrelated, build_subplan may decide to hash
- * its output. In that case it would've been better to specify full
+ * its output. In that case it would've been better to specify full
* retrieval. At present, however, we can only check hashability after
* we've made the subplan :-(. (Determining whether it'll fit in work_mem
* is the really hard part.) Therefore, we don't want to be too
@@ -396,7 +396,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
/*
* If it's a correlated EXISTS with an unimportant targetlist, we might be
* able to transform it to the equivalent of an IN and then implement it
- * by hashing. We don't have enough information yet to tell which way is
+ * by hashing. We don't have enough information yet to tell which way is
* likely to be better (it depends on the expected number of executions of
* the EXISTS qual, and we are much too early in planning the outer query
* to be able to guess that). So we generate both plans, if possible, and
@@ -622,7 +622,7 @@ build_subplan(PlannerInfo *root, Plan *plan, List *rtable,
* Otherwise, we have the option to tack a MATERIAL node onto the top
* of the subplan, to reduce the cost of reading it repeatedly. This
* is pointless for a direct-correlated subplan, since we'd have to
- * recompute its results each time anyway. For uncorrelated/undirect
+ * recompute its results each time anyway. For uncorrelated/undirect
* correlated subplans, we add MATERIAL unless the subplan's top plan
* node would materialize its output anyway.
*/
@@ -663,10 +663,10 @@ build_subplan(PlannerInfo *root, Plan *plan, List *rtable,
/*
* A parameterless subplan (not initplan) should be prepared to handle
- * REWIND efficiently. If it has direct parameters then there's no point
+ * REWIND efficiently. If it has direct parameters then there's no point
* since it'll be reset on each scan anyway; and if it's an initplan then
* there's no point since it won't get re-run without parameter changes
- * anyway. The input of a hashed subplan doesn't need REWIND either.
+ * anyway. The input of a hashed subplan doesn't need REWIND either.
*/
if (splan->parParam == NIL && !isInitPlan && !splan->useHashTable)
root->glob->rewindPlanIDs = bms_add_member(root->glob->rewindPlanIDs,
@@ -769,7 +769,7 @@ generate_subquery_vars(PlannerInfo *root, List *tlist, Index varno)
/*
* convert_testexpr: convert the testexpr given by the parser into
* actually executable form. This entails replacing PARAM_SUBLINK Params
- * with Params or Vars representing the results of the sub-select. The
+ * with Params or Vars representing the results of the sub-select. The
* nodes to be substituted are passed in as the List result from
* generate_subquery_params or generate_subquery_vars.
*/
@@ -871,7 +871,7 @@ testexpr_is_hashable(Node *testexpr)
*
* The combining operators must be hashable and strict. The need for
* hashability is obvious, since we want to use hashing. Without
- * strictness, behavior in the presence of nulls is too unpredictable. We
+ * strictness, behavior in the presence of nulls is too unpredictable. We
* actually must assume even more than plain strictness: they can't yield
* NULL for non-null inputs, either (see nodeSubplan.c). However, hash
* indexes and hash joins assume that too.
@@ -962,7 +962,7 @@ SS_process_ctes(PlannerInfo *root)
}
/*
- * Copy the source Query node. Probably not necessary, but let's keep
+ * Copy the source Query node. Probably not necessary, but let's keep
* this similar to make_subplan.
*/
subquery = (Query *) copyObject(cte->ctequery);
@@ -988,7 +988,7 @@ SS_process_ctes(PlannerInfo *root)
elog(ERROR, "unexpected outer reference in CTE query");
/*
- * Make a SubPlan node for it. This is just enough unlike
+ * Make a SubPlan node for it. This is just enough unlike
* build_subplan that we can't share code.
*
* Note plan_id, plan_name, and cost fields are set further down.
@@ -1211,7 +1211,7 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
/*
* See if the subquery can be simplified based on the knowledge that it's
- * being used in EXISTS(). If we aren't able to get rid of its
+ * being used in EXISTS(). If we aren't able to get rid of its
* targetlist, we have to fail, because the pullup operation leaves us
* with noplace to evaluate the targetlist.
*/
@@ -1260,9 +1260,9 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
* what pull_up_subqueries has to go through.
*
* In fact, it's even easier than what convert_ANY_sublink_to_join has to
- * do. The machinations of simplify_EXISTS_query ensured that there is
+ * do. The machinations of simplify_EXISTS_query ensured that there is
* nothing interesting in the subquery except an rtable and jointree, and
- * even the jointree FromExpr no longer has quals. So we can just append
+ * even the jointree FromExpr no longer has quals. So we can just append
* the rtable to our own and use the FromExpr in our jointree. But first,
* adjust all level-zero varnos in the subquery to account for the rtable
* merger.
@@ -1392,7 +1392,7 @@ simplify_EXISTS_query(Query *query)
*
* On success, the modified subselect is returned, and we store a suitable
* upper-level test expression at *testexpr, plus a list of the subselect's
- * output Params at *paramIds. (The test expression is already Param-ified
+ * output Params at *paramIds. (The test expression is already Param-ified
* and hence need not go through convert_testexpr, which is why we have to
* deal with the Param IDs specially.)
*
@@ -1551,7 +1551,7 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
return NULL;
/*
- * Also reject sublinks in the stuff we intend to pull up. (It might be
+ * Also reject sublinks in the stuff we intend to pull up. (It might be
* possible to support this, but doesn't seem worth the complication.)
*/
if (contain_subplans((Node *) leftargs))
@@ -1750,7 +1750,7 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context)
* is needed for a bare List.)
*
* Anywhere within the top-level AND/OR clause structure, we can tell
- * make_subplan() that NULL and FALSE are interchangeable. So isTopQual
+ * make_subplan() that NULL and FALSE are interchangeable. So isTopQual
* propagates down in both cases. (Note that this is unlike the meaning
* of "top level qual" used in most other places in Postgres.)
*/
@@ -1855,7 +1855,7 @@ SS_finalize_plan(PlannerInfo *root, Plan *plan, bool attach_initplans)
* Now determine the set of params that are validly referenceable in this
* query level; to wit, those available from outer query levels plus the
* output parameters of any local initPlans. (We do not include output
- * parameters of regular subplans. Those should only appear within the
+ * parameters of regular subplans. Those should only appear within the
* testexpr of SubPlan nodes, and are taken care of locally within
* finalize_primnode.)
*/
@@ -1999,7 +1999,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params)
/*
* In a SubqueryScan, SS_finalize_plan has already been run on the
* subplan by the inner invocation of subquery_planner, so there's
- * no need to do it again. Instead, just pull out the subplan's
+ * no need to do it again. Instead, just pull out the subplan's
* extParams list, which represents the params it needs from my
* level and higher levels.
*/
@@ -2222,7 +2222,7 @@ finalize_primnode(Node *node, finalize_primnode_context *context)
/*
* Remove any param IDs of output parameters of the subplan that were
- * referenced in the testexpr. These are not interesting for
+ * referenced in the testexpr. These are not interesting for
* parameter change signaling since we always re-evaluate the subplan.
* Note that this wouldn't work too well if there might be uses of the
* same param IDs elsewhere in the plan, but that can't happen because
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index 3e99776522..34e83bb65d 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -103,7 +103,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
*
* A clause "foo op ANY (sub-SELECT)" can be processed by pulling the
* sub-SELECT up to become a rangetable entry and treating the implied
- * comparisons as quals of a semijoin. However, this optimization *only*
+ * comparisons as quals of a semijoin. However, this optimization *only*
* works at the top level of WHERE or a JOIN/ON clause, because we cannot
* distinguish whether the ANY ought to return FALSE or NULL in cases
* involving NULL inputs. Also, in an outer join's ON clause we can only
@@ -120,7 +120,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
* transformations if any are found.
*
* This routine has to run before preprocess_expression(), so the quals
- * clauses are not yet reduced to implicit-AND format. That means we need
+ * clauses are not yet reduced to implicit-AND format. That means we need
* to recursively search through explicit AND clauses, which are
* probably only binary ANDs. We stop as soon as we hit a non-AND item.
*/
@@ -238,7 +238,7 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
* pull up quals for which that's okay.
*
* XXX for the moment, we refrain from pulling up IN/EXISTS clauses
- * appearing in LEFT or RIGHT join conditions. Although it is
+ * appearing in LEFT or RIGHT join conditions. Although it is
* semantically valid to do so under the above conditions, we end up
* with a query in which the semijoin or antijoin must be evaluated
* below the outer join, which could perform far worse than leaving it
@@ -284,7 +284,7 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
/*
* Although we could include the pulled-up subqueries in the returned
* relids, there's no need since upper quals couldn't refer to their
- * outputs anyway. But we *do* need to include the join's own rtindex
+ * outputs anyway. But we *do* need to include the join's own rtindex
* because we haven't yet collapsed join alias variables, so upper
* levels would mistakenly think they couldn't use references to this
* join.
@@ -624,7 +624,7 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode,
* Attempt to pull up a single simple subquery.
*
* jtnode is a RangeTblRef that has been tentatively identified as a simple
- * subquery by pull_up_subqueries. We return the replacement jointree node,
+ * subquery by pull_up_subqueries. We return the replacement jointree node,
* or jtnode itself if we determine that the subquery can't be pulled up after
* all.
*
@@ -656,7 +656,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Create a PlannerInfo data structure for this subquery.
*
* NOTE: the next few steps should match the first processing in
- * subquery_planner(). Can we refactor to avoid code duplication, or
+ * subquery_planner(). Can we refactor to avoid code duplication, or
* would that just make things uglier?
*/
subroot = makeNode(PlannerInfo);
@@ -704,7 +704,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
/*
* Now we must recheck whether the subquery is still simple enough to pull
- * up. If not, abandon processing it.
+ * up. If not, abandon processing it.
*
* We don't really need to recheck all the conditions involved, but it's
* easier just to keep this "if" looking the same as the one in
@@ -721,7 +721,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Give up, return unmodified RangeTblRef.
*
* Note: The work we just did will be redone when the subquery gets
- * planned on its own. Perhaps we could avoid that by storing the
+ * planned on its own. Perhaps we could avoid that by storing the
* modified subquery back into the rangetable, but I'm not gonna risk
* it now.
*/
@@ -819,7 +819,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* You might think that we could avoid using PHVs for alias vars of joins
* below lowest_outer_join, but that doesn't work because the alias vars
* could be referenced above that join; we need the PHVs to be present in
- * such references after the alias vars get flattened. (It might be worth
+ * such references after the alias vars get flattened. (It might be worth
* trying to be smarter here, someday.)
*/
foreach(lc, parse->rtable)
@@ -910,7 +910,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Pull up a single simple UNION ALL subquery.
*
* jtnode is a RangeTblRef that has been identified as a simple UNION ALL
- * subquery by pull_up_subqueries. We pull up the leaf subqueries and
+ * subquery by pull_up_subqueries. We pull up the leaf subqueries and
* build an "append relation" for the union set. The result value is just
* jtnode, since we don't actually need to change the query jointree.
*/
@@ -1097,7 +1097,7 @@ is_simple_subquery(Query *subquery)
/*
* Don't pull up a subquery that has any set-returning functions in its
- * targetlist. Otherwise we might well wind up inserting set-returning
+ * targetlist. Otherwise we might well wind up inserting set-returning
* functions into places where they mustn't go, such as quals of higher
* queries.
*/
@@ -1106,7 +1106,7 @@ is_simple_subquery(Query *subquery)
/*
* Don't pull up a subquery that has any volatile functions in its
- * targetlist. Otherwise we might introduce multiple evaluations of these
+ * targetlist. Otherwise we might introduce multiple evaluations of these
* functions, if they get copied to multiple places in the upper query,
* leading to surprising results. (Note: the PlaceHolderVar mechanism
* doesn't quite guarantee single evaluation; else we could pull up anyway
@@ -1492,7 +1492,7 @@ pullup_replace_vars_callback(Var *var,
* SELECT ... FROM a LEFT JOIN b ON (a.x = b.y) WHERE b.y IS NULL;
* If the join clause is strict for b.y, then only null-extended rows could
* pass the upper WHERE, and we can conclude that what the query is really
- * specifying is an anti-semijoin. We change the join type from JOIN_LEFT
+ * specifying is an anti-semijoin. We change the join type from JOIN_LEFT
* to JOIN_ANTI. The IS NULL clause then becomes redundant, and must be
* removed to prevent bogus selectivity calculations, but we leave it to
* distribute_qual_to_rels to get rid of such clauses.
@@ -1732,7 +1732,7 @@ reduce_outer_joins_pass2(Node *jtnode,
/*
* See if we can reduce JOIN_LEFT to JOIN_ANTI. This is the case if
* the join's own quals are strict for any var that was forced null by
- * higher qual levels. NOTE: there are other ways that we could
+ * higher qual levels. NOTE: there are other ways that we could
* detect an anti-join, in particular if we were to check whether Vars
* coming from the RHS must be non-null because of table constraints.
* That seems complicated and expensive though (in particular, one
diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c
index f488c581aa..6aa02d97ec 100644
--- a/src/backend/optimizer/prep/prepqual.c
+++ b/src/backend/optimizer/prep/prepqual.c
@@ -72,7 +72,7 @@ canonicalize_qual(Expr *qual)
return NULL;
/*
- * Push down NOTs. We do this only in the top-level boolean expression,
+ * Push down NOTs. We do this only in the top-level boolean expression,
* without examining arguments of operators/functions. The main reason for
* doing this is to expose as much top-level AND/OR structure as we can,
* so there's no point in descending further.
@@ -158,10 +158,10 @@ pull_ors(List *orlist)
* find_nots
* Traverse the qualification, looking for NOTs to take care of.
* For NOT clauses, apply push_nots() to try to push down the NOT.
- * For AND and OR clause types, simply recurse. Otherwise stop
+ * For AND and OR clause types, simply recurse. Otherwise stop
* recursing (we do not worry about structure below the top AND/OR tree).
*
- * Returns the modified qualification. AND/OR flatness is preserved.
+ * Returns the modified qualification. AND/OR flatness is preserved.
*/
static Expr *
find_nots(Expr *qual)
@@ -303,7 +303,7 @@ push_nots(Expr *qual)
*
* This may seem like a fairly useless activity, but it turns out to be
* applicable to many machine-generated queries, and there are also queries
- * in some of the TPC benchmarks that need it. This was in fact almost the
+ * in some of the TPC benchmarks that need it. This was in fact almost the
* sole useful side-effect of the old prepqual code that tried to force
* the query into canonical AND-of-ORs form: the canonical equivalent of
* ((A AND B) OR (A AND C))
@@ -322,7 +322,7 @@ push_nots(Expr *qual)
* OR clauses to which the inverse OR distributive law might apply.
* Only the top-level AND/OR structure is searched.
*
- * Returns the modified qualification. AND/OR flatness is preserved.
+ * Returns the modified qualification. AND/OR flatness is preserved.
*/
static Expr *
find_duplicate_ors(Expr *qual)
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index 73a158e5d8..fd41bce07d 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -4,7 +4,7 @@
* Routines to preprocess the parse tree target list
*
* This module takes care of altering the query targetlist as needed for
- * INSERT, UPDATE, and DELETE queries. For INSERT and UPDATE queries,
+ * INSERT, UPDATE, and DELETE queries. For INSERT and UPDATE queries,
* the targetlist must contain an entry for each attribute of the target
* relation in the correct order. For both UPDATE and DELETE queries,
* we need a junk targetlist entry holding the CTID attribute --- the
@@ -80,7 +80,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
* for "update" and "delete" queries, add ctid of the result relation into
* the target list so that the ctid will propagate through execution and
* ExecutePlan() will be able to identify the right tuple to replace or
- * delete. This extra field is marked "junk" so that it is not stored
+ * delete. This extra field is marked "junk" so that it is not stored
* back into the tuple.
*/
if (command_type == CMD_UPDATE || command_type == CMD_DELETE)
@@ -108,7 +108,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
}
/*
- * Add TID targets for rels selected FOR UPDATE/SHARE. The executor uses
+ * Add TID targets for rels selected FOR UPDATE/SHARE. The executor uses
* the TID to know which rows to lock, much as for UPDATE or DELETE.
*/
if (parse->rowMarks)
@@ -184,7 +184,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
/*
* If the query has a RETURNING list, add resjunk entries for any Vars
* used in RETURNING that belong to other relations. We need to do this
- * to make these Vars available for the RETURNING calculation. Vars that
+ * to make these Vars available for the RETURNING calculation. Vars that
* belong to the result rel don't need to be added, because they will be
* made to refer to the actual heap tuple.
*/
@@ -294,9 +294,9 @@ expand_targetlist(List *tlist, int command_type,
* When generating a NULL constant for a dropped column, we label
* it INT4 (any other guaranteed-to-exist datatype would do as
* well). We can't label it with the dropped column's datatype
- * since that might not exist anymore. It does not really matter
+ * since that might not exist anymore. It does not really matter
* what we claim the type is, since NULL is NULL --- its
- * representation is datatype-independent. This could perhaps
+ * representation is datatype-independent. This could perhaps
* confuse code comparing the finished plan to the target
* relation, however.
*/
@@ -373,7 +373,7 @@ expand_targetlist(List *tlist, int command_type,
/*
* The remaining tlist entries should be resjunk; append them all to the
* end of the new tlist, making sure they have resnos higher than the last
- * real attribute. (Note: although the rewriter already did such
+ * real attribute. (Note: although the rewriter already did such
* renumbering, we have to do it again here in case we are doing an UPDATE
* in a table with dropped columns, or an inheritance child table with
* extra columns.)
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index fb72508dff..fbcfd37aa7 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -6,14 +6,14 @@
*
* There are two code paths in the planner for set-operation queries.
* If a subquery consists entirely of simple UNION ALL operations, it
- * is converted into an "append relation". Otherwise, it is handled
+ * is converted into an "append relation". Otherwise, it is handled
* by the general code in this module (plan_set_operations and its
* subroutines). There is some support code here for the append-relation
* case, but most of the heavy lifting for that is done elsewhere,
* notably in prepjointree.c and allpaths.c.
*
* There is also some code here to support planning of queries that use
- * inheritance (SELECT FROM foo*). Inheritance trees are converted into
+ * inheritance (SELECT FROM foo*). Inheritance trees are converted into
* append relations, and thenceforth share code with the UNION ALL case.
*
*
@@ -539,7 +539,7 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
*
* The tlist for an Append plan isn't important as far as the Append is
* concerned, but we must make it look real anyway for the benefit of the
- * next plan level up. In fact, it has to be real enough that the flag
+ * next plan level up. In fact, it has to be real enough that the flag
* column is shown as a variable not a constant, else setrefs.c will get
* confused.
*/
@@ -1094,7 +1094,7 @@ generate_setop_grouplist(SetOperationStmt *op, List *targetlist)
/*
* expand_inherited_tables
* Expand each rangetable entry that represents an inheritance set
- * into an "append relation". At the conclusion of this process,
+ * into an "append relation". At the conclusion of this process,
* the "inh" flag is set in all and only those RTEs that are append
* relation parents.
*/
@@ -1126,7 +1126,7 @@ expand_inherited_tables(PlannerInfo *root)
* Check whether a rangetable entry represents an inheritance set.
* If so, add entries for all the child tables to the query's
* rangetable, and build AppendRelInfo nodes for all the child tables
- * and add them to root->append_rel_list. If not, clear the entry's
+ * and add them to root->append_rel_list. If not, clear the entry's
* "inh" flag to prevent later code from looking for AppendRelInfos.
*
* Note that the original RTE is considered to represent the whole
@@ -1430,7 +1430,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation,
* parent rel's attribute numbering to the child's.
*
* The only surprise here is that we don't translate a parent whole-row
- * reference into a child whole-row reference. That would mean requiring
+ * reference into a child whole-row reference. That would mean requiring
* permissions on all child columns, which is overly strict, since the
* query is really only going to reference the inherited columns. Instead
* we set the per-column bits for all inherited columns.
@@ -1736,7 +1736,7 @@ adjust_relid_set(Relids relids, Index oldrelid, Index newrelid)
*
* The expressions have already been fixed, but we have to make sure that
* the target resnos match the child table (they may not, in the case of
- * a column that was added after-the-fact by ALTER TABLE). In some cases
+ * a column that was added after-the-fact by ALTER TABLE). In some cases
* this can force us to re-order the tlist to preserve resno ordering.
* (We do all this work in special cases so that preptlist.c is fast for
* the typical case.)
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index da1ce390c1..bc3f27e189 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -519,7 +519,7 @@ count_agg_clauses_walker(Node *node, AggClauseCounts *counts)
/*
* If the transition type is pass-by-value then it doesn't add
- * anything to the required size of the hashtable. If it is
+ * anything to the required size of the hashtable. If it is
* pass-by-reference then we have to add the estimated size of the
* value itself, plus palloc overhead.
*/
@@ -659,7 +659,7 @@ find_window_functions_walker(Node *node, WindowFuncLists *lists)
* Estimate the number of rows in a set result.
*
* We use the product of the rowcount estimates of all the functions in
- * the given tree. The result is 1 if there are no set-returning functions.
+ * the given tree. The result is 1 if there are no set-returning functions.
*
* Note: keep this in sync with expression_returns_set() in nodes/nodeFuncs.c.
*/
@@ -775,7 +775,7 @@ contain_subplans_walker(Node *node, void *context)
* Recursively search for mutable functions within a clause.
*
* Returns true if any mutable function (or operator implemented by a
- * mutable function) is found. This test is needed so that we don't
+ * mutable function) is found. This test is needed so that we don't
* mistakenly think that something like "WHERE random() < 0.5" can be treated
* as a constant qualification.
*
@@ -902,7 +902,7 @@ contain_mutable_functions_walker(Node *node, void *context)
* invalid conversions of volatile expressions into indexscan quals.
*
* We will recursively look into Query nodes (i.e., SubLink sub-selects)
- * but not into SubPlans. This is a bit odd, but intentional. If we are
+ * but not into SubPlans. This is a bit odd, but intentional. If we are
* looking at a SubLink, we are probably deciding whether a query tree
* transformation is safe, and a contained sub-select should affect that;
* for example, duplicating a sub-select containing a volatile function
@@ -1033,7 +1033,7 @@ contain_volatile_functions_walker(Node *node, void *context)
* The idea here is that the caller has verified that the expression contains
* one or more Var or Param nodes (as appropriate for the caller's need), and
* now wishes to prove that the expression result will be NULL if any of these
- * inputs is NULL. If we return false, then the proof succeeded.
+ * inputs is NULL. If we return false, then the proof succeeded.
*/
bool
contain_nonstrict_functions(Node *clause)
@@ -1150,7 +1150,7 @@ contain_nonstrict_functions_walker(Node *node, void *context)
*
* Returns the set of all Relids that are referenced in the clause in such
* a way that the clause cannot possibly return TRUE if any of these Relids
- * is an all-NULL row. (It is OK to err on the side of conservatism; hence
+ * is an all-NULL row. (It is OK to err on the side of conservatism; hence
* the analysis here is simplistic.)
*
* The semantics here are subtly different from contain_nonstrict_functions:
@@ -1256,7 +1256,7 @@ find_nonnullable_rels_walker(Node *node, bool top_level)
* could be FALSE (hence not NULL). However, if *all* the
* arms produce NULL then the result is NULL, so we can take
* the intersection of the sets of nonnullable rels, just as
- * for OR. Fall through to share code.
+ * for OR. Fall through to share code.
*/
/* FALL THRU */
case OR_EXPR:
@@ -1458,7 +1458,7 @@ find_nonnullable_vars_walker(Node *node, bool top_level)
* could be FALSE (hence not NULL). However, if *all* the
* arms produce NULL then the result is NULL, so we can take
* the intersection of the sets of nonnullable vars, just as
- * for OR. Fall through to share code.
+ * for OR. Fall through to share code.
*/
/* FALL THRU */
case OR_EXPR:
@@ -1722,7 +1722,7 @@ is_strict_saop(ScalarArrayOpExpr *expr, bool falseOK)
* variables of the current query level and no uses of volatile functions.
* Such an expr is not necessarily a true constant: it can still contain
* Params and outer-level Vars, not to mention functions whose results
- * may vary from one statement to the next. However, the expr's value
+ * may vary from one statement to the next. However, the expr's value
* will be constant over any one scan of the current query, so it can be
* used as, eg, an indexscan key.
*
@@ -2022,7 +2022,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum,
* expression tree, for example "2 + 2" => "4". More interestingly,
* we can reduce certain boolean expressions even when they contain
* non-constant subexpressions: "x OR true" => "true" no matter what
- * the subexpression x is. (XXX We assume that no such subexpression
+ * the subexpression x is. (XXX We assume that no such subexpression
* will have important side-effects, which is not necessarily a good
* assumption in the presence of user-defined functions; do we need a
* pg_proc flag that prevents discarding the execution of a function?)
@@ -2209,7 +2209,7 @@ eval_const_expressions_mutator(Node *node,
(void *) context);
/*
- * Need to get OID of underlying function. Okay to scribble on input
+ * Need to get OID of underlying function. Okay to scribble on input
* to this extent.
*/
set_opfuncid(expr);
@@ -2300,7 +2300,7 @@ eval_const_expressions_mutator(Node *node,
/* (NOT okay to try to inline it, though!) */
/*
- * Need to get OID of underlying function. Okay to scribble on
+ * Need to get OID of underlying function. Okay to scribble on
* input to this extent.
*/
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
@@ -2679,7 +2679,7 @@ eval_const_expressions_mutator(Node *node,
/*
* Found a TRUE condition, so none of the remaining alternatives
- * can be reached. We treat the result as the default result.
+ * can be reached. We treat the result as the default result.
*/
defresult = caseresult;
break;
@@ -3040,7 +3040,7 @@ simplify_or_arguments(List *args,
/*
* Since the parser considers OR to be a binary operator, long OR lists
* become deeply nested expressions. We must flatten these into long
- * argument lists of a single OR operator. To avoid blowing out the stack
+ * argument lists of a single OR operator. To avoid blowing out the stack
* with recursion of eval_const_expressions, we resort to some tenseness
* here: we keep a list of not-yet-processed inputs, and handle flattening
* of nested ORs by prepending to the to-do list instead of recursing.
@@ -3088,7 +3088,7 @@ simplify_or_arguments(List *args,
}
/*
- * OK, we have a const-simplified non-OR argument. Process it per
+ * OK, we have a const-simplified non-OR argument. Process it per
* comments above.
*/
if (IsA(arg, Const))
@@ -3325,7 +3325,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
*
* It is possible for some of the defaulted arguments to be polymorphic;
* therefore we can't assume that the default expressions have the correct
- * data types already. We have to re-resolve polymorphics and do coercion
+ * data types already. We have to re-resolve polymorphics and do coercion
* just like the parser did.
*/
static List *
@@ -3521,7 +3521,7 @@ evaluate_function(Oid funcid, Oid result_type, int32 result_typmod, List *args,
* do not re-expand them. Also, if a parameter is used more than once
* in the SQL-function body, we require it not to contain any volatile
* functions (volatiles might deliver inconsistent answers) nor to be
- * unreasonably expensive to evaluate. The expensiveness check not only
+ * unreasonably expensive to evaluate. The expensiveness check not only
* prevents us from doing multiple evaluations of an expensive parameter
* at runtime, but is a safety value to limit growth of an expression due
* to repeated inlining.
@@ -3560,7 +3560,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
/*
* Forget it if the function is not SQL-language or has other showstopper
- * properties. (The nargs check is just paranoia.)
+ * properties. (The nargs check is just paranoia.)
*/
if (funcform->prolang != SQLlanguageId ||
funcform->prosecdef ||
@@ -3625,7 +3625,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
/*
* We just do parsing and parse analysis, not rewriting, because rewriting
* will not affect table-free-SELECT-only queries, which is all that we
- * care about. Also, we can punt as soon as we detect more than one
+ * care about. Also, we can punt as soon as we detect more than one
* command in the function body.
*/
raw_parsetree_list = pg_parse_query(src);
@@ -3663,7 +3663,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
/*
* Make sure the function (still) returns what it's declared to. This
* will raise an error if wrong, but that's okay since the function would
- * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
* a RelabelType if needed to make the tlist expression match the declared
* type of the function.
*
@@ -3708,7 +3708,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
/*
* We may be able to do it; there are still checks on parameter usage to
* make, but those are most easily done in combination with the actual
- * substitution of the inputs. So start building expression with inputs
+ * substitution of the inputs. So start building expression with inputs
* substituted.
*/
usecounts = (int *) palloc0(funcform->pronargs * sizeof(int));
@@ -3885,7 +3885,7 @@ evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod)
fix_opfuncids((Node *) expr);
/*
- * Prepare expr for execution. (Note: we can't use ExecPrepareExpr
+ * Prepare expr for execution. (Note: we can't use ExecPrepareExpr
* because it'd result in recursively invoking eval_const_expressions.)
*/
exprstate = ExecInitExpr(expr, NULL);
@@ -3994,7 +3994,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
* Refuse to inline if the arguments contain any volatile functions or
* sub-selects. Volatile functions are rejected because inlining may
* result in the arguments being evaluated multiple times, risking a
- * change in behavior. Sub-selects are rejected partly for implementation
+ * change in behavior. Sub-selects are rejected partly for implementation
* reasons (pushing them down another level might change their behavior)
* and partly because they're likely to be expensive and so multiple
* evaluation would be bad.
@@ -4019,7 +4019,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Forget it if the function is not SQL-language or has other showstopper
- * properties. In particular it mustn't be declared STRICT, since we
+ * properties. In particular it mustn't be declared STRICT, since we
* couldn't enforce that. It also mustn't be VOLATILE, because that is
* supposed to cause it to be executed with its own snapshot, rather than
* sharing the snapshot of the calling query. (The nargs check is just
@@ -4108,7 +4108,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Make sure the function (still) returns what it's declared to. This
* will raise an error if wrong, but that's okay since the function would
- * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
* RelabelType(s) and/or NULL columns if needed to make the tlist
* expression(s) match the declared type of the function.
*
diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c
index 4fd5075f84..267df732e9 100644
--- a/src/backend/optimizer/util/joininfo.c
+++ b/src/backend/optimizer/util/joininfo.c
@@ -73,7 +73,7 @@ have_relevant_joinclause(PlannerInfo *root,
* Add 'restrictinfo' to the joininfo list of each relation it requires.
*
* Note that the same copy of the restrictinfo node is linked to by all the
- * lists it is in. This allows us to exploit caching of information about
+ * lists it is in. This allows us to exploit caching of information about
* the restriction clause (but we must be careful that the information does
* not depend on context).
*
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index b0358cb112..3f506ae3b0 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -284,7 +284,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
/*
* If the two paths compare differently for startup and total cost,
* then we want to keep both, and we can skip the (much slower)
- * comparison of pathkeys. If they compare the same, proceed with the
+ * comparison of pathkeys. If they compare the same, proceed with the
* pathkeys comparison. Note: this test relies on the fact that
* compare_fuzzy_path_costs will only return 0 if both costs are
* effectively equal (and, therefore, there's no need to call it twice
@@ -1076,7 +1076,7 @@ translate_sub_tlist(List *tlist, int relid)
*
* colnos is an integer list of output column numbers (resno's). We are
* interested in whether rows consisting of just these columns are certain
- * to be distinct. "Distinctness" is defined according to whether the
+ * to be distinct. "Distinctness" is defined according to whether the
* corresponding upper-level equality operators listed in opids would think
* the values are distinct. (Note: the opids entries could be cross-type
* operators, and thus not exactly the equality operators that the subquery
@@ -1197,7 +1197,7 @@ query_is_distinct_for(Query *query, List *colnos, List *opids)
* distinct_col_search - subroutine for query_is_distinct_for
*
* If colno is in colnos, return the corresponding element of opids,
- * else return InvalidOid. (We expect colnos does not contain duplicates,
+ * else return InvalidOid. (We expect colnos does not contain duplicates,
* so the result is well-defined.)
*/
static Oid
@@ -1421,7 +1421,7 @@ create_mergejoin_path(PlannerInfo *root,
/*
* We expect the materialize won't spill to disk (it could only do so
* if there were a whole lot of duplicate tuples, which is a case
- * cost_mergejoin will avoid choosing anyway). Therefore
+ * cost_mergejoin will avoid choosing anyway). Therefore
* cost_material's cost estimate is bogus and we should charge just
* cpu_tuple_cost per tuple. (Keep this estimate in sync with similar
* ones in cost_mergejoin and create_mergejoin_plan.)
@@ -1483,10 +1483,10 @@ create_hashjoin_path(PlannerInfo *root,
/*
* A hashjoin never has pathkeys, since its output ordering is
- * unpredictable due to possible batching. XXX If the inner relation is
+ * unpredictable due to possible batching. XXX If the inner relation is
* small enough, we could instruct the executor that it must not batch,
* and then we could assume that the output inherits the outer relation's
- * ordering, which might save a sort step. However there is considerable
+ * ordering, which might save a sort step. However there is considerable
* downside if our estimate of the inner relation size is badly off. For
* the moment we don't risk it. (Note also that if we wanted to take this
* seriously, joinpath.c would have to consider many more paths for the
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index f275b1df24..b679075f27 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -189,7 +189,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
/*
* Allocate per-column info arrays. To save a few palloc cycles
- * we allocate all the Oid-type arrays in one request. Note that
+ * we allocate all the Oid-type arrays in one request. Note that
* the opfamily array needs an extra, terminating zero at the end.
* We pre-zero the ordering info in case the index is unordered.
*/
@@ -353,7 +353,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* kluge things here instead.
*
* We approximate "never vacuumed" by "has relpages = 0", which
- * means this will also fire on genuinely empty relations. Not
+ * means this will also fire on genuinely empty relations. Not
* great, but fortunately that's a seldom-seen case in the real
* world, and it shouldn't degrade the quality of the plan too
* much anyway to err in this direction.
@@ -608,7 +608,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
return false;
/*
- * OK to fetch the constraint expressions. Include "col IS NOT NULL"
+ * OK to fetch the constraint expressions. Include "col IS NOT NULL"
* expressions for attnotnull columns, in case we can refute those.
*/
constraint_pred = get_relation_constraints(root, rte->relid, rel, true);
@@ -656,7 +656,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
* Exception: if there are any dropped columns, we punt and return NIL.
* Ideally we would like to handle the dropped-column case too. However this
* creates problems for ExecTypeFromTL, which may be asked to build a tupdesc
- * for a tlist that includes vars of no-longer-existent types. In theory we
+ * for a tlist that includes vars of no-longer-existent types. In theory we
* could dig out the required info from the pg_attribute entries of the
* relation, but that data is not readily available to ExecTypeFromTL.
* For now, we don't apply the physical-tlist optimization when there are
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 9e30340758..ccfd71dcff 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -135,7 +135,7 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list)
/*
* If either input is a single-element list, replace it with its lone
- * member; this avoids one useless level of AND-recursion. We only need
+ * member; this avoids one useless level of AND-recursion. We only need
* to worry about this at top level, since eval_const_expressions should
* have gotten rid of any trivial ANDs or ORs below that.
*/
@@ -193,7 +193,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
/*
* If either input is a single-element list, replace it with its lone
- * member; this avoids one useless level of AND-recursion. We only need
+ * member; this avoids one useless level of AND-recursion. We only need
* to worry about this at top level, since eval_const_expressions should
* have gotten rid of any trivial ANDs or ORs below that.
*/
@@ -227,7 +227,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
* OR-expr A => AND-expr B iff: A => each of B's components
* OR-expr A => OR-expr B iff: each of A's components => any of B's
*
- * An "atom" is anything other than an AND or OR node. Notice that we don't
+ * An "atom" is anything other than an AND or OR node. Notice that we don't
* have any special logic to handle NOT nodes; these should have been pushed
* down or eliminated where feasible by prepqual.c.
*
@@ -822,7 +822,7 @@ predicate_classify(Node *clause, PredIterInfo info)
}
/*
- * PredIterInfo routines for iterating over regular Lists. The iteration
+ * PredIterInfo routines for iterating over regular Lists. The iteration
* state variable is the next ListCell to visit.
*/
static void
@@ -1011,13 +1011,13 @@ arrayexpr_cleanup_fn(PredIterInfo info)
* implies another:
*
* A simple and general way is to see if they are equal(); this works for any
- * kind of expression. (Actually, there is an implied assumption that the
+ * kind of expression. (Actually, there is an implied assumption that the
* functions in the expression are immutable, ie dependent only on their input
* arguments --- but this was checked for the predicate by the caller.)
*
* When the predicate is of the form "foo IS NOT NULL", we can conclude that
* the predicate is implied if the clause is a strict operator or function
- * that has "foo" as an input. In this case the clause must yield NULL when
+ * that has "foo" as an input. In this case the clause must yield NULL when
* "foo" is NULL, which we can take as equivalent to FALSE because we know
* we are within an AND/OR subtree of a WHERE clause. (Again, "foo" is
* already known immutable, so the clause will certainly always fail.)
@@ -1239,7 +1239,7 @@ list_member_strip(List *list, Expr *datum)
*
* The strategy numbers defined by btree indexes (see access/skey.h) are:
* (1) < (2) <= (3) = (4) >= (5) >
- * and in addition we use (6) to represent <>. <> is not a btree-indexable
+ * and in addition we use (6) to represent <>. <> is not a btree-indexable
* operator, but we assume here that if an equality operator of a btree
* opfamily has a negator operator, the negator behaves as <> for the opfamily.
*
@@ -1322,7 +1322,7 @@ static const StrategyNumber BT_refute_table[6][6] = {
* if not able to prove it.
*
* What we look for here is binary boolean opclauses of the form
- * "foo op constant", where "foo" is the same in both clauses. The operators
+ * "foo op constant", where "foo" is the same in both clauses. The operators
* and constants can be different but the operators must be in the same btree
* operator family. We use the above operator implication tables to
* derive implications between nonidentical clauses. (Note: "foo" is known
@@ -1410,7 +1410,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
/*
* Check for matching subexpressions on the non-Const sides. We used to
* only allow a simple Var, but it's about as easy to allow any
- * expression. Remember we already know that the pred expression does not
+ * expression. Remember we already know that the pred expression does not
* contain any non-immutable functions, so identical expressions should
* yield identical results.
*/
@@ -1748,7 +1748,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
* Last check: test_op must be immutable.
*
* Note that we require only the test_op to be immutable, not the
- * original clause_op. (pred_op is assumed to have been checked
+ * original clause_op. (pred_op is assumed to have been checked
* immutable by the caller.) Essentially we are assuming that the
* opfamily is consistent even if it contains operators that are
* merely stable.
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index 4ca3eeaaf2..cf3a8d6293 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -225,7 +225,7 @@ RelOptInfo *
find_join_rel(PlannerInfo *root, Relids relids)
{
/*
- * Switch to using hash lookup when list grows "too long". The threshold
+ * Switch to using hash lookup when list grows "too long". The threshold
* is arbitrary and is known only here.
*/
if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
@@ -475,7 +475,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
* the join list need only be computed once for any join RelOptInfo.
* The join list is fully determined by the set of rels making up the
* joinrel, so we should get the same results (up to ordering) from any
- * candidate pair of sub-relations. But the restriction list is whatever
+ * candidate pair of sub-relations. But the restriction list is whatever
* is not handled in the sub-relations, so it depends on which
* sub-relations are considered.
*
@@ -484,7 +484,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
* we put it into the joininfo list for the joinrel. Otherwise,
* the clause is now a restrict clause for the joined relation, and we
* return it to the caller of build_joinrel_restrictlist() to be stored in
- * join paths made from this pair of sub-relations. (It will not need to
+ * join paths made from this pair of sub-relations. (It will not need to
* be considered further up the join tree.)
*
* In many case we will find the same RestrictInfos in both input
@@ -503,7 +503,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
*
* NB: Formerly, we made deep(!) copies of each input RestrictInfo to pass
* up to the join relation. I believe this is no longer necessary, because
- * RestrictInfo nodes are no longer context-dependent. Instead, just include
+ * RestrictInfo nodes are no longer context-dependent. Instead, just include
* the original nodes in the lists made for the join relation.
*/
static List *
@@ -523,7 +523,7 @@ build_joinrel_restrictlist(PlannerInfo *root,
result = subbuild_joinrel_restrictlist(joinrel, inner_rel->joininfo, result);
/*
- * Add on any clauses derived from EquivalenceClasses. These cannot be
+ * Add on any clauses derived from EquivalenceClasses. These cannot be
* redundant with the clauses in the joininfo lists, so don't bother
* checking.
*/
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index 565b81e605..da9c3f8684 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -152,7 +152,7 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
- * to just "true". We do not try to eliminate redundant subclauses
+ * to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
@@ -249,7 +249,7 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
* We know that the index predicate must have been implied by
* the query condition as a whole, but it may or may not be
* implied by the conditions that got pushed into the
- * bitmapqual. Avoid generating redundant conditions.
+ * bitmapqual. Avoid generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), result))
result = lappend(result,
@@ -391,7 +391,7 @@ make_restrictinfo_internal(Expr *clause,
/*
* Fill in all the cacheable fields with "not yet set" markers. None of
- * these will be computed until/unless needed. Note in particular that we
+ * these will be computed until/unless needed. Note in particular that we
* don't mark a binary opclause as mergejoinable or hashjoinable here;
* that happens only if it appears in the right context (top level of a
* joinclause list).
@@ -669,7 +669,7 @@ select_nonredundant_join_clauses(PlannerInfo *root,
* OK because we're only trying to prove we can dispense with some
* join quals; failing to prove that doesn't result in an incorrect
* plan. It's quite unlikely that a join qual could be proven
- * redundant by an index predicate anyway. (Also, if we did manage to
+ * redundant by an index predicate anyway. (Also, if we did manage to
* prove it, we'd have to have a special case for update targets; see
* notes about EvalPlanQual testing in create_indexscan_plan().)
*/
diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c
index 2f9b36e42f..232c379f84 100644
--- a/src/backend/optimizer/util/tlist.c
+++ b/src/backend/optimizer/util/tlist.c
@@ -28,7 +28,7 @@
/*
* tlist_member
* Finds the (first) member of the given tlist whose expression is
- * equal() to the given expression. Result is NULL if no such member.
+ * equal() to the given expression. Result is NULL if no such member.
*/
TargetEntry *
tlist_member(Node *node, List *targetlist)
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index 705bf243d9..dbd9499750 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -260,7 +260,7 @@ contain_var_clause_walker(Node *node, void *context)
*
* Returns true if any such Var found.
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*/
bool
contain_vars_of_level(Node *node, int levelsup)
@@ -320,10 +320,10 @@ contain_vars_of_level_walker(Node *node, int *sublevels_up)
* Find the parse location of any Var of the specified query level.
*
* Returns -1 if no such Var is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*
* Note: it might seem appropriate to merge this functionality into
* contain_vars_of_level, but that would complicate that function's API.
@@ -396,7 +396,7 @@ locate_var_of_level_walker(Node *node,
* Returns -1 if no such Var is in the querytree, or if they all have
* unknown parse location.
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*/
int
locate_var_of_relation(Node *node, int relid, int levelsup)
@@ -470,7 +470,7 @@ locate_var_of_relation_walker(Node *node,
*
* -1 is returned if the clause has no variables at all.
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*/
int
find_minimum_var_level(Node *node)
@@ -627,7 +627,7 @@ find_minimum_var_level_walker(Node *node,
* Upper-level vars (with varlevelsup > 0) are not included.
* (These probably represent errors too, but we don't complain.)
*
- * Returns list of nodes found. Note the nodes themselves are not
+ * Returns list of nodes found. Note the nodes themselves are not
* copied, only referenced.
*
* Does not examine subqueries, therefore must only be used after reduction
@@ -682,7 +682,7 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context)
* flatten_join_alias_vars
* Replace Vars that reference JOIN outputs with references to the original
* relation variables instead. This allows quals involving such vars to be
- * pushed down. Whole-row Vars that reference JOIN relations are expanded
+ * pushed down. Whole-row Vars that reference JOIN relations are expanded
* into RowExpr constructs that name the individual output Vars. This
* is necessary since we will not scan the JOIN as a base relation, which
* is the only way that the executor can directly handle whole-row Vars.
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index e4b41c7431..370cef74b3 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -73,7 +73,7 @@ static bool check_parameter_resolution_walker(Node *node, ParseState *pstate);
* Optionally, information about $n parameter types can be supplied.
* References to $n indexes not defined by paramTypes[] are disallowed.
*
- * The result is a Query node. Optimizable statements require considerable
+ * The result is a Query node. Optimizable statements require considerable
* transformation, while utility-type statements are simply hung off
* a dummy CMD_UTILITY Query node.
*/
@@ -371,7 +371,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
/*
* If a non-nil rangetable/namespace was passed in, and we are doing
* INSERT/SELECT, arrange to pass the rangetable/namespace down to the
- * SELECT. This can only happen if we are inside a CREATE RULE, and in
+ * SELECT. This can only happen if we are inside a CREATE RULE, and in
* that case we want the rule's OLD and NEW rtable entries to appear as
* part of the SELECT's rtable, not as outer references for it. (Kluge!)
* The SELECT's joinlist is not affected however. We must do this before
@@ -588,7 +588,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* Another thing we can't currently support is NEW/OLD references in
* rules --- seems we'd need something like SQL99's LATERAL construct
* to ensure that the values would be available while evaluating the
- * VALUES RTE. This is a shame. FIXME
+ * VALUES RTE. This is a shame. FIXME
*/
if (list_length(pstate->p_rtable) != 1 &&
contain_vars_of_level((Node *) exprsLists, 0))
@@ -624,7 +624,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* INSERT INTO foo VALUES(bar.*)
*
* The sublist is just computed directly as the Query's targetlist,
- * with no VALUES RTE. So it works just like SELECT without FROM.
+ * with no VALUES RTE. So it works just like SELECT without FROM.
*----------
*/
List *valuesLists = selectStmt->valuesLists;
@@ -737,7 +737,7 @@ transformInsertRow(ParseState *pstate, List *exprlist,
* Check length of expr list. It must not have more expressions than
* there are target columns. We allow fewer, but only if no explicit
* columns list was given (the remaining columns are implicitly
- * defaulted). Note we must check this *after* transformation because
+ * defaulted). Note we must check this *after* transformation because
* that could expand '*' into multiple items.
*/
if (list_length(exprlist) > list_length(icolumns))
@@ -1282,7 +1282,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* As a first step towards supporting sort clauses that are expressions
* using the output columns, generate a varnamespace entry that makes the
- * output columns visible. A Join RTE node is handy for this, since we
+ * output columns visible. A Join RTE node is handy for this, since we
* can easily control the Vars generated upon matches.
*
* Note: we don't yet do anything useful with such cases, but at least
diff --git a/src/backend/parser/kwlookup.c b/src/backend/parser/kwlookup.c
index 7321a57c15..46d60df31c 100644
--- a/src/backend/parser/kwlookup.c
+++ b/src/backend/parser/kwlookup.c
@@ -53,7 +53,7 @@ ScanKeywordLookup(const char *text)
return NULL;
/*
- * Apply an ASCII-only downcasing. We must not use tolower() since it may
+ * Apply an ASCII-only downcasing. We must not use tolower() since it may
* produce the wrong translation in some locales (eg, Turkish).
*/
for (i = 0; i < len; i++)
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index 0594396921..efb996d702 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -43,7 +43,7 @@ static bool check_ungrouped_columns_walker(Node *node,
* Finish initial transformation of an aggregate call
*
* parse_func.c has recognized the function as an aggregate, and has set
- * up all the fields of the Aggref except agglevelsup. Here we must
+ * up all the fields of the Aggref except agglevelsup. Here we must
* determine which query level the aggregate actually belongs to, set
* agglevelsup accordingly, and mark p_hasAggs true in the corresponding
* pstate level.
@@ -62,7 +62,7 @@ transformAggregateCall(ParseState *pstate, Aggref *agg)
/*
* An aggregate can't directly contain another aggregate call of the same
- * level (though outer aggs are okay). We can skip this check if we
+ * level (though outer aggs are okay). We can skip this check if we
* didn't find any local vars or aggs.
*/
if (min_varlevel == 0)
@@ -276,7 +276,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
/*
* If there are join alias vars involved, we have to flatten them to the
* underlying vars, so that aliased and unaliased vars will be correctly
- * taken as equal. We can skip the expense of doing this if no rangetable
+ * taken as equal. We can skip the expense of doing this if no rangetable
* entries are RTE_JOIN kind. We use the planner's flatten_join_alias_vars
* routine to do the flattening; it wants a PlannerInfo root node, which
* fortunately can be mostly dummy.
@@ -314,7 +314,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
*
* Note: because we check resjunk tlist elements as well as regular ones,
* this will also find ungrouped variables that came from ORDER BY and
- * WINDOW clauses. For that matter, it's also going to examine the
+ * WINDOW clauses. For that matter, it's also going to examine the
* grouping expressions themselves --- but they'll all pass the test ...
*/
clause = (Node *) qry->targetList;
@@ -345,7 +345,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
* Check for window functions where they shouldn't be.
*
* We have to forbid window functions in WHERE, JOIN/ON, HAVING, GROUP BY,
- * and window specifications. (Other clauses, such as RETURNING and LIMIT,
+ * and window specifications. (Other clauses, such as RETURNING and LIMIT,
* have already been checked.) Transformation of all these clauses must
* be completed already.
*/
@@ -501,7 +501,7 @@ check_ungrouped_columns_walker(Node *node,
/*
* If we have an ungrouped Var of the original query level, we have a
* failure. Vars below the original query level are not a problem, and
- * neither are Vars from above it. (If such Vars are ungrouped as far as
+ * neither are Vars from above it. (If such Vars are ungrouped as far as
* their own query level is concerned, that's someone else's problem...)
*/
if (IsA(node, Var))
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index bdd92a4829..e8e5ade1d2 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -147,7 +147,7 @@ transformFromClause(ParseState *pstate, List *frmList)
*
* If alsoSource is true, add the target to the query's joinlist and
* namespace. For INSERT, we don't want the target to be joined to;
- * it's a destination of tuples, not a source. For UPDATE/DELETE,
+ * it's a destination of tuples, not a source. For UPDATE/DELETE,
* we do need to scan or join the target. (NOTE: we do not bother
* to check for namespace conflict; we assume that the namespace was
* initially empty in these cases.)
@@ -213,7 +213,7 @@ setTargetTable(ParseState *pstate, RangeVar *relation,
* Simplify InhOption (yes/no/default) into boolean yes/no.
*
* The reason we do things this way is that we don't want to examine the
- * SQL_inheritance option flag until parse_analyze() is run. Otherwise,
+ * SQL_inheritance option flag until parse_analyze() is run. Otherwise,
* we'd do the wrong thing with query strings that intermix SET commands
* with queries.
*/
@@ -404,7 +404,7 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j,
* rels outside the input subtrees of the JOIN. It could do that despite
* our hack on the namespace if it uses fully-qualified names. So, grovel
* through the transformed clause and make sure there are no bogus
- * references. (Outer references are OK, and are ignored here.)
+ * references. (Outer references are OK, and are ignored here.)
*/
clause_varnos = pull_varnos(result);
clause_varnos = bms_del_members(clause_varnos, containedRels);
@@ -483,7 +483,7 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r)
isLockedRefname(pstate, r->alias->aliasname));
/*
- * Check that we got something reasonable. Many of these conditions are
+ * Check that we got something reasonable. Many of these conditions are
* impossible given restrictions of the grammar, but check 'em anyway.
*/
if (!IsA(query, Query) ||
@@ -541,7 +541,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
/*
* Get function name for possible use as alias. We use the same
- * transformation rules as for a SELECT output expression. For a FuncCall
+ * transformation rules as for a SELECT output expression. For a FuncCall
* node, the result will be the function name, but it is possible for the
* grammar to hand back other node types.
*/
@@ -554,7 +554,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
/*
* The function parameters cannot make use of any variables from other
- * FROM items. (Compare to transformRangeSubselect(); the coding is
+ * FROM items. (Compare to transformRangeSubselect(); the coding is
* different though because we didn't parse as a sub-select with its own
* level of namespace.)
*
@@ -572,7 +572,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
}
/*
- * Disallow aggregate functions in the expression. (No reason to postpone
+ * Disallow aggregate functions in the expression. (No reason to postpone
* this check until parseCheckAggregates.)
*/
if (pstate->p_hasAggs &&
@@ -630,7 +630,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
* (We could extract this from the function return node, but it saves cycles
* to pass it back separately.)
*
- * *top_rti: receives the rangetable index of top_rte. (Ditto.)
+ * *top_rti: receives the rangetable index of top_rte. (Ditto.)
*
* *relnamespace: receives a List of the RTEs exposed as relation names
* by this item.
@@ -1245,7 +1245,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
*
* 1. Bare ColumnName (no qualifier or subscripts)
* For a bare identifier, we search for a matching column name
- * in the existing target list. Multiple matches are an error
+ * in the existing target list. Multiple matches are an error
* unless they refer to identical values; for example,
* we allow SELECT a, a FROM table ORDER BY a
* but not SELECT a AS b, b FROM table ORDER BY b
@@ -1254,7 +1254,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
* For GROUP BY, it is incorrect to match the grouping item against
* targetlist entries: according to SQL92, an identifier in GROUP BY
* is a reference to a column name exposed by FROM, not to a target
- * list column. However, many implementations (including pre-7.0
+ * list column. However, many implementations (including pre-7.0
* PostgreSQL) accept this anyway. So for GROUP BY, we look first
* to see if the identifier matches any FROM column name, and only
* try for a targetlist name if it doesn't. This ensures that we
@@ -1417,7 +1417,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist)
/*
* If no matches, construct a new target entry which is appended to the
- * end of the target list. This target is given resjunk = TRUE so that it
+ * end of the target list. This target is given resjunk = TRUE so that it
* will not be projected into the final tuple.
*/
target_result = transformTargetEntry(pstate, node, expr, NULL, true);
@@ -1622,7 +1622,7 @@ transformWindowDefinitions(ParseState *pstate,
* <window clause> syntax rule 10 and general rule 1. The frame
* clause rule is especially bizarre because it makes "OVER foo"
* different from "OVER (foo)", and requires the latter to throw an
- * error if foo has a nondefault frame clause. Well, ours not to
+ * error if foo has a nondefault frame clause. Well, ours not to
* reason why, but we do go out of our way to throw a useful error
* message for such cases.
*/
@@ -1716,7 +1716,7 @@ transformDistinctClause(ParseState *pstate,
/*
* The distinctClause should consist of all ORDER BY items followed by all
- * other non-resjunk targetlist items. There must not be any resjunk
+ * other non-resjunk targetlist items. There must not be any resjunk
* ORDER BY items --- that would imply that we are sorting by a value that
* isn't necessarily unique within a DISTINCT group, so the results
* wouldn't be well-defined. This construction ensures we follow the rule
@@ -1837,7 +1837,7 @@ transformDistinctOnClause(ParseState *pstate, List *distinctlist,
/*
* Now add any remaining DISTINCT ON items, using default sort/group
- * semantics for their data types. (Note: this is pretty questionable; if
+ * semantics for their data types. (Note: this is pretty questionable; if
* the ORDER BY list doesn't include all the DISTINCT ON items and more
* besides, you certainly aren't using DISTINCT ON in the intended way,
* and you probably aren't going to get consistent results. It might be
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index 6a627a7cbe..989b3b1dd4 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -55,12 +55,12 @@ static bool is_complex_array(Oid typid);
* Convert an expression to a target type and typmod.
*
* This is the general-purpose entry point for arbitrary type coercion
- * operations. Direct use of the component operations can_coerce_type,
+ * operations. Direct use of the component operations can_coerce_type,
* coerce_type, and coerce_type_typmod should be restricted to special
* cases (eg, when the conversion is expected to succeed).
*
* Returns the possibly-transformed expression tree, or NULL if the type
- * conversion is not possible. (We do this, rather than ereport'ing directly,
+ * conversion is not possible. (We do this, rather than ereport'ing directly,
* so that callers can generate custom error messages indicating context.)
*
* pstate - parse state (can be NULL, see coerce_type)
@@ -118,7 +118,7 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype,
* already be properly coerced to the specified typmod.
*
* pstate is only used in the case that we are able to resolve the type of
- * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the
+ * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the
* caller does not want type information updated for Params.
*/
Node *
@@ -147,7 +147,7 @@ coerce_type(ParseState *pstate, Node *node,
*
* Note: by returning the unmodified node here, we are saying that
* it's OK to treat an UNKNOWN constant as a valid input for a
- * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be
+ * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be
* all right, since an UNKNOWN value is still a perfectly valid Datum.
* However an UNKNOWN value is definitely *not* an array, and so we
* mustn't accept it for ANYARRAY. (Instead, we will call anyarray_in
@@ -187,7 +187,7 @@ coerce_type(ParseState *pstate, Node *node,
/*
* If the target type is a domain, we want to call its base type's
- * input routine, not domain_in(). This is to avoid premature failure
+ * input routine, not domain_in(). This is to avoid premature failure
* when the domain applies a typmod: existing input routines follow
* implicit-coercion semantics for length checks, which is not always
* what we want here. The needed check will be applied properly
@@ -200,7 +200,7 @@ coerce_type(ParseState *pstate, Node *node,
* For most types we pass typmod -1 to the input routine, because
* existing input routines follow implicit-coercion semantics for
* length checks, which is not always what we want here. Any length
- * constraint will be applied later by our caller. An exception
+ * constraint will be applied later by our caller. An exception
* however is the INTERVAL type, for which we *must* pass the typmod
* or it won't be able to obey the bizarre SQL-spec input rules. (Ugly
* as sin, but so is this part of the spec...)
@@ -331,7 +331,7 @@ coerce_type(ParseState *pstate, Node *node,
{
/*
* Generate an expression tree representing run-time application
- * of the conversion function. If we are dealing with a domain
+ * of the conversion function. If we are dealing with a domain
* target type, the conversion function will yield the base type,
* and we need to extract the correct typmod to use from the
* domain's typtypmod.
@@ -367,7 +367,7 @@ coerce_type(ParseState *pstate, Node *node,
* to have the intended type when inspected by higher-level code.
*
* Also, domains may have value restrictions beyond the base type
- * that must be accounted for. If the destination is a domain
+ * that must be accounted for. If the destination is a domain
* then we won't need a RelabelType node.
*/
result = coerce_to_domain(node, InvalidOid, -1, targetTypeId,
@@ -611,7 +611,7 @@ coerce_to_domain(Node *arg, Oid baseTypeId, int32 baseTypeMod, Oid typeId,
}
/*
- * Now build the domain coercion node. This represents run-time checking
+ * Now build the domain coercion node. This represents run-time checking
* of any constraints currently attached to the domain. This also ensures
* that the expression is properly labeled as to result type.
*/
@@ -683,7 +683,7 @@ coerce_type_typmod(Node *node, Oid targetTypeId, int32 targetTypMod,
* Mark a coercion node as IMPLICIT so it will never be displayed by
* ruleutils.c. We use this when we generate a nest of coercion nodes
* to implement what is logically one conversion; the inner nodes are
- * forced to IMPLICIT_CAST format. This does not change their semantics,
+ * forced to IMPLICIT_CAST format. This does not change their semantics,
* only display behavior.
*
* It is caller error to call this on something that doesn't have a
@@ -1138,7 +1138,7 @@ select_common_type(ParseState *pstate, List *exprs, const char *context,
}
/*
- * Nope, so set up for the full algorithm. Note that at this point, lc
+ * Nope, so set up for the full algorithm. Note that at this point, lc
* points to the first list item with type different from pexpr's; we need
* not re-examine any items the previous loop advanced over.
*/
@@ -1423,14 +1423,14 @@ check_generic_type_consistency(Oid *actual_arg_types,
*
* When allow_poly is false, we are not expecting any of the actual_arg_types
* to be polymorphic, and we should not return a polymorphic result type
- * either. When allow_poly is true, it is okay to have polymorphic "actual"
+ * either. When allow_poly is true, it is okay to have polymorphic "actual"
* arg types, and we can return ANYARRAY or ANYELEMENT as the result. (This
* case is currently used only to check compatibility of an aggregate's
* declaration with the underlying transfn.)
*
* A special case is that we could see ANYARRAY as an actual_arg_type even
* when allow_poly is false (this is possible only because pg_statistic has
- * columns shown as anyarray in the catalogs). We allow this to match a
+ * columns shown as anyarray in the catalogs). We allow this to match a
* declared ANYARRAY argument, but only if there is no ANYELEMENT argument
* or result (since we can't determine a specific element type to match to
* ANYELEMENT). Note this means that functions taking ANYARRAY had better
@@ -1513,7 +1513,7 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
/*
* Fast Track: if none of the arguments are polymorphic, return the
- * unmodified rettype. We assume it can't be polymorphic either.
+ * unmodified rettype. We assume it can't be polymorphic either.
*/
if (!have_generics)
return rettype;
@@ -1773,8 +1773,8 @@ IsPreferredType(TYPCATEGORY category, Oid type)
* Check if srctype is binary-coercible to targettype.
*
* This notion allows us to cheat and directly exchange values without
- * going through the trouble of calling a conversion function. Note that
- * in general, this should only be an implementation shortcut. Before 7.4,
+ * going through the trouble of calling a conversion function. Note that
+ * in general, this should only be an implementation shortcut. Before 7.4,
* this was also used as a heuristic for resolving overloaded functions and
* operators, but that's basically a bad idea.
*
@@ -1787,7 +1787,7 @@ IsPreferredType(TYPCATEGORY category, Oid type)
* types.
*
* This function replaces IsBinaryCompatible(), which was an inherently
- * symmetric test. Since the pg_cast entries aren't necessarily symmetric,
+ * symmetric test. Since the pg_cast entries aren't necessarily symmetric,
* the order of the operands is now significant.
*/
bool
@@ -1963,7 +1963,7 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId,
* Hack: disallow coercions to oidvector and int2vector, which
* otherwise tend to capture coercions that should go to "real" array
* types. We want those types to be considered "real" arrays for many
- * purposes, but not this one. (Also, ArrayCoerceExpr isn't
+ * purposes, but not this one. (Also, ArrayCoerceExpr isn't
* guaranteed to produce an output that meets the restrictions of
* these datatypes, such as being 1-dimensional.)
*/
diff --git a/src/backend/parser/parse_cte.c b/src/backend/parser/parse_cte.c
index 10ec1fe18b..ba4e319e0f 100644
--- a/src/backend/parser/parse_cte.c
+++ b/src/backend/parser/parse_cte.c
@@ -169,7 +169,7 @@ transformWithClause(ParseState *pstate, WithClause *withClause)
checkWellFormedRecursion(&cstate);
/*
- * Set up the ctenamespace for parse analysis. Per spec, all the WITH
+ * Set up the ctenamespace for parse analysis. Per spec, all the WITH
* items are visible to all others, so stuff them all in before parse
* analysis. We build the list in safe processing order so that the
* planner can process the queries in sequence.
@@ -195,7 +195,7 @@ transformWithClause(ParseState *pstate, WithClause *withClause)
{
/*
* For non-recursive WITH, just analyze each CTE in sequence and then
- * add it to the ctenamespace. This corresponds to the spec's
+ * add it to the ctenamespace. This corresponds to the spec's
* definition of the scope of each WITH name. However, to allow error
* reports to be aware of the possibility of an erroneous reference,
* we maintain a list in p_future_ctes of the not-yet-visible CTEs.
@@ -233,7 +233,7 @@ analyzeCTE(ParseState *pstate, CommonTableExpr *cte)
cte->ctequery = (Node *) query;
/*
- * Check that we got something reasonable. Many of these conditions are
+ * Check that we got something reasonable. Many of these conditions are
* impossible given restrictions of the grammar, but check 'em anyway.
* (These are the same checks as in transformRangeSubselect.)
*/
@@ -257,7 +257,7 @@ analyzeCTE(ParseState *pstate, CommonTableExpr *cte)
{
/*
* Verify that the previously determined output column types match
- * what the query really produced. We have to check this because the
+ * what the query really produced. We have to check this because the
* recursive term could have overridden the non-recursive term, and we
* don't have any easy way to fix that.
*/
@@ -325,7 +325,7 @@ analyzeCTETargetList(ParseState *pstate, CommonTableExpr *cte, List *tlist)
/*
* We need to determine column names and types. The alias column names
- * override anything coming from the query itself. (Note: the SQL spec
+ * override anything coming from the query itself. (Note: the SQL spec
* says that the alias list must be empty or exactly as long as the output
* column set; but we allow it to be shorter for consistency with Alias
* handling.)
@@ -356,7 +356,7 @@ analyzeCTETargetList(ParseState *pstate, CommonTableExpr *cte, List *tlist)
/*
* If the CTE is recursive, force the exposed column type of any
- * "unknown" column to "text". This corresponds to the fact that
+ * "unknown" column to "text". This corresponds to the fact that
* SELECT 'foo' UNION SELECT 'bar' will ultimately produce text. We
* might see "unknown" as a result of an untyped literal in the
* non-recursive term's select list, and if we don't convert to text
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index df4024533a..f062580f14 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -455,7 +455,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
}
/*
- * Try to find the name as a relation. Note that only
+ * Try to find the name as a relation. Note that only
* relations already entered into the rangetable will be
* recognized.
*
@@ -971,7 +971,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
* We try to generate a ScalarArrayOpExpr from IN/NOT IN, but this is only
* possible if there is a suitable array type available. If not, we fall
* back to a boolean condition tree with multiple copies of the lefthand
- * expression. Also, any IN-list items that contain Vars are handled as
+ * expression. Also, any IN-list items that contain Vars are handled as
* separate boolean conditions, because that gives the planner more scope
* for optimization on such clauses.
*
@@ -1002,7 +1002,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
Oid array_type;
/*
- * Try to select a common type for the array elements. Note that
+ * Try to select a common type for the array elements. Note that
* since the LHS' type is first in the list, it will be preferred when
* there is doubt (eg, when all the RHS items are unknown literals).
*
@@ -1265,7 +1265,7 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
qtree = parse_sub_analyze(sublink->subselect, pstate, NULL, false);
/*
- * Check that we got something reasonable. Many of these conditions are
+ * Check that we got something reasonable. Many of these conditions are
* impossible given restrictions of the grammar, but check 'em anyway.
*/
if (!IsA(qtree, Query) ||
@@ -1670,7 +1670,7 @@ transformXmlExpr(ParseState *pstate, XmlExpr *x)
newx->location = x->location;
/*
- * gram.y built the named args as a list of ResTarget. Transform each,
+ * gram.y built the named args as a list of ResTarget. Transform each,
* and break the names out as a separate list.
*/
newx->named_args = NIL;
@@ -1986,8 +1986,8 @@ transformWholeRowRef(ParseState *pstate, char *schemaname, char *relname,
default:
/*
- * RTE is a join or subselect. We represent this as a whole-row
- * Var of RECORD type. (Note that in most cases the Var will be
+ * RTE is a join or subselect. We represent this as a whole-row
+ * Var of RECORD type. (Note that in most cases the Var will be
* expanded to a RowExpr during planning, but that is not our
* concern here.)
*/
@@ -2144,7 +2144,7 @@ make_row_comparison_op(ParseState *pstate, List *opname,
/*
* Now we must determine which row comparison semantics (= <> < <= > >=)
- * apply to this set of operators. We look for btree opfamilies
+ * apply to this set of operators. We look for btree opfamilies
* containing the operators, and see which interpretations (strategy
* numbers) exist for each operator.
*/
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index 310b9310e0..286658e58d 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -84,7 +84,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Most of the rest of the parser just assumes that functions do not have
- * more than FUNC_MAX_ARGS parameters. We have to test here to protect
+ * more than FUNC_MAX_ARGS parameters. We have to test here to protect
* against array overruns, etc. Of course, this may not be a function,
* but the test doesn't hurt.
*/
@@ -101,7 +101,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
* Extract arg type info in preparation for function lookup.
*
* If any arguments are Param markers of type VOID, we discard them from
- * the parameter list. This is a hack to allow the JDBC driver to not
+ * the parameter list. This is a hack to allow the JDBC driver to not
* have to distinguish "input" and "output" parameter symbols while
* parsing function-call constructs. We can't use foreach() because we
* may modify the list ...
@@ -251,7 +251,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
* If there are default arguments, we have to include their types in
* actual_arg_types for the purpose of checking generic type consistency.
* However, we do NOT put them into the generated parse node, because
- * their actual values might change before the query gets run. The
+ * their actual values might change before the query gets run. The
* planner has to insert the up-to-date values at plan time.
*/
nargsplusdefs = nargs;
@@ -344,7 +344,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Reject attempt to call a parameterless aggregate without (*)
- * syntax. This is mere pedantry but some folks insisted ...
+ * syntax. This is mere pedantry but some folks insisted ...
*/
if (fargs == NIL && !agg_star)
ereport(ERROR,
@@ -397,7 +397,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Reject attempt to call a parameterless aggregate without (*)
- * syntax. This is mere pedantry but some folks insisted ...
+ * syntax. This is mere pedantry but some folks insisted ...
*/
if (wfunc->winagg && fargs == NIL && !agg_star)
ereport(ERROR,
@@ -558,7 +558,7 @@ func_select_candidate(int nargs,
* matches" in the exact-match heuristic; it also makes it possible to do
* something useful with the type-category heuristics. Note that this
* makes it difficult, but not impossible, to use functions declared to
- * take a domain as an input datatype. Such a function will be selected
+ * take a domain as an input datatype. Such a function will be selected
* over the base-type function only if it is an exact match at all
* argument positions, and so was already chosen by our caller.
*/
@@ -667,7 +667,7 @@ func_select_candidate(int nargs,
* essentially a special case of the general algorithm we try next.
*
* We do this by examining each unknown argument position to see if we can
- * determine a "type category" for it. If any candidate has an input
+ * determine a "type category" for it. If any candidate has an input
* datatype of STRING category, use STRING category (this bias towards
* STRING is appropriate since unknown-type literals look like strings).
* Otherwise, if all the candidates agree on the type category of this
@@ -678,7 +678,7 @@ func_select_candidate(int nargs,
* the candidates takes a preferred datatype within the category.
*
* Having completed this examination, remove candidates that accept the
- * wrong category at any unknown position. Also, if at least one
+ * wrong category at any unknown position. Also, if at least one
* candidate accepted a preferred type at a position, remove candidates
* that accept non-preferred types.
*
@@ -889,7 +889,7 @@ func_get_detail(List *funcname,
*
* NB: it's important that this code does not exceed what coerce_type
* can do, because the caller will try to apply coerce_type if we
- * return FUNCDETAIL_COERCION. If we return that result for something
+ * return FUNCDETAIL_COERCION. If we return that result for something
* coerce_type can't handle, we'll cause infinite recursion between
* this module and coerce_type!
*/
@@ -1112,7 +1112,7 @@ FuncNameAsType(List *funcname)
* ParseComplexProjection -
* handles function calls with a single argument that is of complex type.
* If the function call is actually a column projection, return a suitably
- * transformed expression tree. If not, return NULL.
+ * transformed expression tree. If not, return NULL.
*/
static Node *
ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg,
diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c
index 093b789d23..f56facd618 100644
--- a/src/backend/parser/parse_node.c
+++ b/src/backend/parser/parse_node.c
@@ -92,8 +92,8 @@ free_parsestate(ParseState *pstate)
* is a dummy (always 0, in fact).
*
* The locations stored in raw parsetrees are byte offsets into the source
- * string. We have to convert them to 1-based character indexes for reporting
- * to clients. (We do things this way to avoid unnecessary overhead in the
+ * string. We have to convert them to 1-based character indexes for reporting
+ * to clients. (We do things this way to avoid unnecessary overhead in the
* normal non-error case: computing character indexes would be much more
* expensive than storing token offsets.)
*/
@@ -122,7 +122,7 @@ parser_errposition(ParseState *pstate, int location)
* Sometimes the parser calls functions that aren't part of the parser
* subsystem and can't reasonably be passed a ParseState; yet we would
* like any errors thrown in those functions to be tagged with a parse
- * error location. Use this function to set up an error context stack
+ * error location. Use this function to set up an error context stack
* entry that will accomplish that. Usage pattern:
*
* declare a local variable "ParseCallbackState pcbstate"
@@ -235,7 +235,7 @@ transformArrayType(Oid arrayType)
*
* In an array assignment, we are given a destination array value plus a
* source value that is to be assigned to a single element or a slice of
- * that array. We produce an expression that represents the new array value
+ * that array. We produce an expression that represents the new array value
* with the source data inserted into the right part of the array.
*
* pstate Parse state
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index 198a2bc4e9..8e1cdb53fc 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -483,7 +483,7 @@ oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId,
*
* This is tighter than oper() because it will not return an operator that
* requires coercion of the input datatypes (but binary-compatible operators
- * are accepted). Otherwise, the semantics are the same.
+ * are accepted). Otherwise, the semantics are the same.
*/
Operator
compatible_oper(ParseState *pstate, List *op, Oid arg1, Oid arg2,
@@ -1028,7 +1028,7 @@ make_scalar_array_op(ParseState *pstate, List *opname,
* mapping is pretty expensive to compute, especially for ambiguous operators;
* this is mainly because there are a *lot* of instances of popular operator
* names such as "=", and we have to check each one to see which is the
- * best match. So once we have identified the correct mapping, we save it
+ * best match. So once we have identified the correct mapping, we save it
* in a cache that need only be flushed on pg_operator or pg_cast change.
* (pg_cast must be considered because changes in the set of implicit casts
* affect the set of applicable operators for any given input datatype.)
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 39b49875a3..14357c3f2d 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -71,7 +71,7 @@ static void warnAutoRange(ParseState *pstate, RangeVar *relation);
*
* A qualified refname (schemaname != NULL) can only match a relation RTE
* that (a) has no alias and (b) is for the same relation identified by
- * schemaname.refname. In this case we convert schemaname.refname to a
+ * schemaname.refname. In this case we convert schemaname.refname to a
* relation OID and search by relid, rather than by alias name. This is
* peculiar, but it's what SQL92 says to do.
*/
@@ -150,7 +150,7 @@ scanNameSpaceForRefname(ParseState *pstate, const char *refname, int location)
/*
* Search the query's table namespace for a relation RTE matching the
- * given relation OID. Return the RTE if a unique match, or NULL
+ * given relation OID. Return the RTE if a unique match, or NULL
* if no match. Raise error if multiple matches (which shouldn't
* happen if the namespace was checked correctly when it was created).
*
@@ -343,7 +343,7 @@ checkNameSpaceConflicts(ParseState *pstate, List *namespace1,
/*
* given an RTE, return RT index (starting with 1) of the entry,
- * and optionally get its nesting depth (0 = current). If sublevels_up
+ * and optionally get its nesting depth (0 = current). If sublevels_up
* is NULL, only consider rels at the current nesting level.
* Raises error if RTE not found.
*/
@@ -924,7 +924,7 @@ addRangeTableEntry(ParseState *pstate,
/*
* Get the rel's OID. This access also ensures that we have an up-to-date
- * relcache entry for the rel. Since this is typically the first access
+ * relcache entry for the rel. Since this is typically the first access
* to a rel in a statement, be careful to get the right access level
* depending on whether we're doing SELECT FOR UPDATE/SHARE.
*/
@@ -2453,7 +2453,7 @@ warnAutoRange(ParseState *pstate, RangeVar *relation)
/*
* Check to see if there are any potential matches in the query's
- * rangetable. This affects the message we provide.
+ * rangetable. This affects the message we provide.
*/
rte = searchRangeTable(pstate, relation);
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index b56f3a095a..bf1c537a77 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -160,7 +160,7 @@ transformTargetList(ParseState *pstate, List *targetlist)
* This is the identical transformation to transformTargetList, except that
* the input list elements are bare expressions without ResTarget decoration,
* and the output elements are likewise just expressions without TargetEntry
- * decoration. We use this for ROW() and VALUES() constructs.
+ * decoration. We use this for ROW() and VALUES() constructs.
*/
List *
transformExpressionList(ParseState *pstate, List *exprlist)
@@ -329,7 +329,7 @@ markTargetListOrigin(ParseState *pstate, TargetEntry *tle,
/*
* transformAssignedExpr()
- * This is used in INSERT and UPDATE statements only. It prepares an
+ * This is used in INSERT and UPDATE statements only. It prepares an
* expression for assignment to a column of the target table.
* This includes coercing the given value to the target column's type
* (if necessary), and dealing with any subfield names or subscripts
@@ -347,7 +347,7 @@ markTargetListOrigin(ParseState *pstate, TargetEntry *tle,
*
* Note: location points at the target column name (SET target or INSERT
* column name list entry), and must therefore be -1 in an INSERT that
- * omits the column name list. So we should usually prefer to use
+ * omits the column name list. So we should usually prefer to use
* exprLocation(expr) for errors that can happen in a default INSERT.
*/
Expr *
@@ -407,7 +407,7 @@ transformAssignedExpr(ParseState *pstate,
/*
* If there is indirection on the target column, prepare an array or
- * subfield assignment expression. This will generate a new column value
+ * subfield assignment expression. This will generate a new column value
* that the source value has been inserted into, which can then be placed
* in the new tuple constructed by INSERT or UPDATE.
*/
@@ -510,7 +510,7 @@ updateTargetListEntry(ParseState *pstate,
/*
* Set the resno to identify the target column --- the rewriter and
- * planner depend on this. We also set the resname to identify the target
+ * planner depend on this. We also set the resname to identify the target
* column, but this is only for debugging purposes; it should not be
* relied on. (In particular, it might be out of date in a stored rule.)
*/
@@ -1116,7 +1116,7 @@ ExpandIndirectionStar(ParseState *pstate, A_Indirection *ind,
* Get the tuple descriptor for a Var of type RECORD, if possible.
*
* Since no actual table or view column is allowed to have type RECORD, such
- * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
+ * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
* drill down to find the ultimate defining expression and attempt to infer
* the tupdesc from it. We ereport if we can't determine the tupdesc.
*
@@ -1198,7 +1198,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
{
/*
* Recurse into the sub-select to see what its Var refers
- * to. We have to build an additional level of ParseState
+ * to. We have to build an additional level of ParseState
* to keep in step with varlevelsup in the subselect.
*/
ParseState mypstate;
@@ -1275,7 +1275,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass to
+ * get_expr_result_type() can do anything with it. If not, pass to
* lookup_rowtype_tupdesc() which will probably fail, but will give an
* appropriate error message while failing.
*/
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index 295763fc48..0dd2ef728d 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -34,7 +34,7 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typename,
/*
* LookupTypeName
* Given a TypeName object, lookup the pg_type syscache entry of the type.
- * Returns NULL if no such type can be found. If the type is found,
+ * Returns NULL if no such type can be found. If the type is found,
* the typmod value represented in the TypeName struct is computed and
* stored into *typmod_p.
*
@@ -47,7 +47,7 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typename,
*
* typmod_p can be passed as NULL if the caller does not care to know the
* typmod value, but the typmod decoration (if any) will be validated anyway,
- * except in the case where the type is not found. Note that if the type is
+ * except in the case where the type is not found. Note that if the type is
* found but is a shell, and there is typmod decoration, an error will be
* thrown --- this is intentional.
*
@@ -483,7 +483,7 @@ typeTypeRelid(Type typ)
/*
* Given a type structure and a string, returns the internal representation
- * of that string. The "string" can be NULL to perform conversion of a NULL
+ * of that string. The "string" can be NULL to perform conversion of a NULL
* (which might result in failure, if the input function rejects NULLs).
*/
Datum
@@ -507,7 +507,7 @@ stringTypeDatum(Type tp, char *string, int32 atttypmod)
* instability in the input function is that comparison of Const nodes
* relies on bytewise comparison of the datums, so if the input function
* leaves garbage then subexpressions that should be identical may not get
- * recognized as such. See pgsql-hackers discussion of 2008-04-04.
+ * recognized as such. See pgsql-hackers discussion of 2008-04-04.
*/
if (string && !typform->typbyval)
{
@@ -556,7 +556,7 @@ pts_error_callback(void *arg)
/*
* Currently we just suppress any syntax error position report, rather
- * than transforming to an "internal query" error. It's unlikely that a
+ * than transforming to an "internal query" error. It's unlikely that a
* type name is complex enough to need positioning.
*/
errposition(0);
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 71281f0e2f..1c75f70cc6 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -150,7 +150,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
* If the target relation name isn't schema-qualified, make it so. This
* prevents some corner cases in which added-on rewritten commands might
* think they should apply to other relations that have the same name and
- * are earlier in the search path. "istemp" is equivalent to a
+ * are earlier in the search path. "istemp" is equivalent to a
* specification of pg_temp, so no need for anything extra in that case.
*/
if (stmt->relation->schemaname == NULL && !stmt->relation->istemp)
@@ -754,7 +754,7 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing the
+ * commit. That will prevent someone else from deleting or ALTERing the
* parent before the child is committed.
*/
heap_close(relation, NoLock);
@@ -1353,7 +1353,7 @@ transformFKConstraints(ParseState *pstate, CreateStmtContext *cxt,
* transformIndexStmt - parse analysis for CREATE INDEX
*
* Note: this is a no-op for an index not using either index expressions or
- * a predicate expression. There are several code paths that create indexes
+ * a predicate expression. There are several code paths that create indexes
* without bothering to call this, because they know they don't have any
* such expressions to deal with.
*
@@ -1457,7 +1457,7 @@ transformRuleStmt(RuleStmt *stmt, const char *queryString,
/*
* To avoid deadlock, make sure the first thing we do is grab
- * AccessExclusiveLock on the target relation. This will be needed by
+ * AccessExclusiveLock on the target relation. This will be needed by
* DefineQueryRewrite(), and we don't want to grab a lesser lock
* beforehand.
*/
diff --git a/src/backend/parser/parser.c b/src/backend/parser/parser.c
index ee77c38f86..25d1b9ab7a 100644
--- a/src/backend/parser/parser.c
+++ b/src/backend/parser/parser.c
@@ -96,7 +96,7 @@ pg_parse_string_token(const char *token)
* Intermediate filter between parser and base lexer (base_yylex in scan.l).
*
* The filter is needed because in some cases the standard SQL grammar
- * requires more than one token lookahead. We reduce these cases to one-token
+ * requires more than one token lookahead. We reduce these cases to one-token
* lookahead by combining tokens here, in order to keep the grammar LALR(1).
*
* Using a filter is simpler than trying to recognize multiword tokens
diff --git a/src/backend/port/darwin/system.c b/src/backend/port/darwin/system.c
index 9cdcbddae5..a2d5b955cb 100644
--- a/src/backend/port/darwin/system.c
+++ b/src/backend/port/darwin/system.c
@@ -24,7 +24,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/darwin.c b/src/backend/port/dynloader/darwin.c
index 4c24682e67..11335b12ce 100644
--- a/src/backend/port/dynloader/darwin.c
+++ b/src/backend/port/dynloader/darwin.c
@@ -47,7 +47,7 @@ pg_dlerror(void)
/*
* These routines were taken from the Apache source, but were made
- * available with a PostgreSQL-compatible license. Kudos Wilfredo
+ * available with a PostgreSQL-compatible license. Kudos Wilfredo
* Sánchez <wsanchez@apple.com>.
*/
diff --git a/src/backend/port/dynloader/freebsd.c b/src/backend/port/dynloader/freebsd.c
index b4000af489..dff95e9d6a 100644
--- a/src/backend/port/dynloader/freebsd.c
+++ b/src/backend/port/dynloader/freebsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/netbsd.c b/src/backend/port/dynloader/netbsd.c
index 99054d0d6c..a6365e04f9 100644
--- a/src/backend/port/dynloader/netbsd.c
+++ b/src/backend/port/dynloader/netbsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/openbsd.c b/src/backend/port/dynloader/openbsd.c
index 45cf4fd35f..0cefe62fa5 100644
--- a/src/backend/port/dynloader/openbsd.c
+++ b/src/backend/port/dynloader/openbsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c
index 5b829149ed..4054d06b54 100644
--- a/src/backend/port/posix_sema.c
+++ b/src/backend/port/posix_sema.c
@@ -138,7 +138,7 @@ PosixSemaphoreKill(sem_t * sem)
*
* This is called during postmaster start or shared memory reinitialization.
* It should do whatever is needed to be able to support up to maxSemas
- * subsequent PGSemaphoreCreate calls. Also, if any system resources
+ * subsequent PGSemaphoreCreate calls. Also, if any system resources
* are acquired here or in PGSemaphoreCreate, register an on_shmem_exit
* callback to release them.
*
diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c
index 814039e83a..204d1b9f4d 100644
--- a/src/backend/port/sysv_sema.c
+++ b/src/backend/port/sysv_sema.c
@@ -258,7 +258,7 @@ IpcSemaphoreCreate(int numSems)
/*
* Can only get here if some other process managed to create the same
- * sema key before we did. Let him have that one, loop around to try
+ * sema key before we did. Let him have that one, loop around to try
* next key.
*/
}
@@ -283,12 +283,12 @@ IpcSemaphoreCreate(int numSems)
*
* This is called during postmaster start or shared memory reinitialization.
* It should do whatever is needed to be able to support up to maxSemas
- * subsequent PGSemaphoreCreate calls. Also, if any system resources
+ * subsequent PGSemaphoreCreate calls. Also, if any system resources
* are acquired here or in PGSemaphoreCreate, register an on_shmem_exit
* callback to release them.
*
* The port number is passed for possible use as a key (for SysV, we use
- * it to generate the starting semaphore key). In a standalone backend,
+ * it to generate the starting semaphore key). In a standalone backend,
* zero will be passed.
*
* In the SysV implementation, we acquire semaphore sets on-demand; the
@@ -383,7 +383,7 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* from the operation prematurely because we were sent a signal. So we
* try and lock the semaphore again.
*
- * Each time around the loop, we check for a cancel/die interrupt. On
+ * Each time around the loop, we check for a cancel/die interrupt. On
* some platforms, if such an interrupt comes in while we are waiting, it
* will cause the semop() call to exit with errno == EINTR, allowing us to
* service the interrupt (if not in a critical section already) during the
@@ -401,7 +401,7 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will
* execute directly. However, there is a huge pitfall: there is another
* window of a few instructions after the semop() before we are able to
- * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
+ * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
* control, which means that the lock has been acquired but our caller did
* not get a chance to record the fact. Therefore, we only set
* ImmediateInterruptOK if the caller tells us it's OK to do so, ie, the
@@ -414,9 +414,9 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* On some platforms, signals marked SA_RESTART (which is most, for us)
* will not interrupt the semop(); it will just keep waiting. Therefore
* it's necessary for cancel/die interrupts to be serviced directly by the
- * signal handler. On these platforms the behavior is really the same
+ * signal handler. On these platforms the behavior is really the same
* whether the signal arrives just before the semop() begins, or while it
- * is waiting. The loop on EINTR is thus important only for other types
+ * is waiting. The loop on EINTR is thus important only for other types
* of interrupts.
*/
do
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index 98de9346ce..bc95c9ac2b 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -226,7 +226,7 @@ IpcMemoryDelete(int status, Datum shmId)
* Is a previously-existing shmem segment still existing and in use?
*
* The point of this exercise is to detect the case where a prior postmaster
- * crashed, but it left child backends that are still running. Therefore
+ * crashed, but it left child backends that are still running. Therefore
* we only care about shmem segments that are associated with the intended
* DataDir. This is an important consideration since accidental matches of
* shmem segment IDs are reasonably common.
@@ -325,14 +325,14 @@ PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2)
* the storage.
*
* Dead Postgres segments are recycled if found, but we do not fail upon
- * collision with non-Postgres shmem segments. The idea here is to detect and
+ * collision with non-Postgres shmem segments. The idea here is to detect and
* re-use keys that may have been assigned by a crashed postmaster or backend.
*
* makePrivate means to always create a new segment, rather than attach to
* or recycle any existing segment.
*
* The port number is passed for possible use as a key (for SysV, we use
- * it to generate the starting shmem key). In a standalone backend,
+ * it to generate the starting shmem key). In a standalone backend,
* zero will be passed.
*/
PGShmemHeader *
@@ -442,7 +442,7 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
/*
* PGSharedMemoryReAttach
*
- * Re-attach to an already existing shared memory segment. In the non
+ * Re-attach to an already existing shared memory segment. In the non
* EXEC_BACKEND case this is not used, because postmaster children inherit
* the shared memory segment attachment via fork().
*
@@ -484,7 +484,7 @@ PGSharedMemoryReAttach(void)
*
* Detach from the shared memory segment, if still attached. This is not
* intended for use by the process that originally created the segment
- * (it will have an on_shmem_exit callback registered to do that). Rather,
+ * (it will have an on_shmem_exit callback registered to do that). Rather,
* this is for subprocesses that have inherited an attachment and want to
* get rid of it.
*/
diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c
index 618fa5717a..70d5bcad8b 100644
--- a/src/backend/port/win32_shmem.c
+++ b/src/backend/port/win32_shmem.c
@@ -78,7 +78,7 @@ GetSharedMemName(void)
* Is a previously-existing shmem segment still existing and in use?
*
* The point of this exercise is to detect the case where a prior postmaster
- * crashed, but it left child backends that are still running. Therefore
+ * crashed, but it left child backends that are still running. Therefore
* we only care about shmem segments that are associated with the intended
* DataDir. This is an important consideration since accidental matches of
* shmem segment IDs are reasonably common.
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index ae640843b5..dc9820920a 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -21,21 +21,21 @@
* There is an autovacuum shared memory area, where the launcher stores
* information about the database it wants vacuumed. When it wants a new
* worker to start, it sets a flag in shared memory and sends a signal to the
- * postmaster. Then postmaster knows nothing more than it must start a worker;
- * so it forks a new child, which turns into a worker. This new process
+ * postmaster. Then postmaster knows nothing more than it must start a worker;
+ * so it forks a new child, which turns into a worker. This new process
* connects to shared memory, and there it can inspect the information that the
* launcher has set up.
*
* If the fork() call fails in the postmaster, it sets a flag in the shared
* memory area, and sends a signal to the launcher. The launcher, upon
* noticing the flag, can try starting the worker again by resending the
- * signal. Note that the failure can only be transient (fork failure due to
+ * signal. Note that the failure can only be transient (fork failure due to
* high load, memory pressure, too many processes, etc); more permanent
* problems, like failure to connect to a database, are detected later in the
* worker and dealt with just by having the worker exit normally. The launcher
* will launch a new worker again later, per schedule.
*
- * When the worker is done vacuuming it sends SIGUSR1 to the launcher. The
+ * When the worker is done vacuuming it sends SIGUSR1 to the launcher. The
* launcher then wakes up and is able to launch another worker, if the schedule
* is so tight that a new worker is needed immediately. At this time the
* launcher can also balance the settings for the various remaining workers'
@@ -233,7 +233,7 @@ typedef enum
/*-------------
* The main autovacuum shmem struct. On shared memory we store this main
- * struct and the array of WorkerInfo structs. This struct keeps:
+ * struct and the array of WorkerInfo structs. This struct keeps:
*
* av_signal set by other processes to indicate various conditions
* av_launcherpid the PID of the autovacuum launcher
@@ -413,7 +413,7 @@ AutoVacLauncherMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (autovacuum probably never has any
+ * can signal any child processes too. (autovacuum probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -423,7 +423,7 @@ AutoVacLauncherMain(int argc, char *argv[])
#endif
/*
- * Set up signal handlers. Since this is an auxiliary process, it has
+ * Set up signal handlers. Since this is an auxiliary process, it has
* particular signal requirements -- no deadlock checker or sinval
* catchup, for example.
*/
@@ -485,7 +485,7 @@ AutoVacLauncherMain(int argc, char *argv[])
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about, but we do have LWLocks.
*/
LWLockReleaseAll();
@@ -841,7 +841,7 @@ launcher_determine_sleep(bool canlaunch, bool recursing, struct timeval * nap)
* this the "new" database, because when the database was already present on
* the list, we expect that this function is not called at all). The
* preexisting list, if any, will be used to preserve the order of the
- * databases in the autovacuum_naptime period. The new database is put at the
+ * databases in the autovacuum_naptime period. The new database is put at the
* end of the interval. The actual values are not saved, which should not be
* much of a problem.
*/
@@ -1054,7 +1054,7 @@ db_comparator(const void *a, const void *b)
*
* Bare-bones procedure for starting an autovacuum worker from the launcher.
* It determines what database to work on, sets up shared memory stuff and
- * signals postmaster to start the worker. It fails gracefully if invoked when
+ * signals postmaster to start the worker. It fails gracefully if invoked when
* autovacuum_workers are already active.
*
* Return value is the OID of the database that the worker is going to process,
@@ -1306,7 +1306,7 @@ launch_worker(TimestampTz now)
/*
* Called from postmaster to signal a failure to fork a process to become
- * worker. The postmaster should kill(SIGUSR1) the launcher shortly
+ * worker. The postmaster should kill(SIGUSR1) the launcher shortly
* after calling this function.
*/
void
@@ -1358,7 +1358,7 @@ avl_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
@@ -1472,7 +1472,7 @@ AutoVacWorkerMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (autovacuum probably never has any
+ * can signal any child processes too. (autovacuum probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -1482,7 +1482,7 @@ AutoVacWorkerMain(int argc, char *argv[])
#endif
/*
- * Set up signal handlers. We operate on databases much like a regular
+ * Set up signal handlers. We operate on databases much like a regular
* backend, so we use the same signal handling. See equivalent code in
* tcop/postgres.c.
*
@@ -1534,7 +1534,7 @@ AutoVacWorkerMain(int argc, char *argv[])
EmitErrorReport();
/*
- * We can now go away. Note that because we called InitProcess, a
+ * We can now go away. Note that because we called InitProcess, a
* callback was registered to do ProcKill, which will clean up
* necessary state.
*/
@@ -1548,7 +1548,7 @@ AutoVacWorkerMain(int argc, char *argv[])
/*
* Force zero_damaged_pages OFF in the autovac process, even if it is set
- * in postgresql.conf. We don't really want such a dangerous option being
+ * in postgresql.conf. We don't really want such a dangerous option being
* applied non-interactively.
*/
SetConfigOption("zero_damaged_pages", "false", PGC_SUSET, PGC_S_OVERRIDE);
@@ -1657,7 +1657,7 @@ FreeWorkerInfo(int code, Datum arg)
/*
* Wake the launcher up so that he can launch a new worker immediately
* if required. We only save the launcher's PID in local memory here;
- * the actual signal will be sent when the PGPROC is recycled. Note
+ * the actual signal will be sent when the PGPROC is recycled. Note
* that we always do this, so that the launcher can rebalance the cost
* limit setting of the remaining workers.
*
@@ -2212,7 +2212,7 @@ do_autovacuum(void)
}
/*
- * Ok, good to go. Store the table in shared memory before releasing
+ * Ok, good to go. Store the table in shared memory before releasing
* the lock so that other workers don't vacuum it concurrently.
*/
MyWorkerInfo->wi_tableoid = relid;
@@ -2248,7 +2248,7 @@ do_autovacuum(void)
/*
* Save the relation name for a possible error message, to avoid a
- * catalog lookup in case of an error. If any of these return NULL,
+ * catalog lookup in case of an error. If any of these return NULL,
* then the relation has been dropped since last we checked; skip it.
* Note: they must live in a long-lived memory context because we call
* vacuum and analyze in different transactions.
@@ -2663,7 +2663,7 @@ relation_needs_vacanalyze(Oid relid,
{
/*
* Skip a table not found in stat hash, unless we have to force vacuum
- * for anti-wrap purposes. If it's not acted upon, there's no need to
+ * for anti-wrap purposes. If it's not acted upon, there's no need to
* vacuum it.
*/
*dovacuum = force_vacuum;
@@ -2860,7 +2860,7 @@ AutoVacuumShmemInit(void)
* Refresh pgstats data for an autovacuum process
*
* Cause the next pgstats read operation to obtain fresh data, but throttle
- * such refreshing in the autovacuum launcher. This is mostly to avoid
+ * such refreshing in the autovacuum launcher. This is mostly to avoid
* rereading the pgstats files too many times in quick succession when there
* are many databases.
*
diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index 0bc596ce13..e48d4dbcb9 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -2,15 +2,15 @@
*
* bgwriter.c
*
- * The background writer (bgwriter) is new as of Postgres 8.0. It attempts
+ * The background writer (bgwriter) is new as of Postgres 8.0. It attempts
* to keep regular backends from having to write out dirty shared buffers
* (which they would only do when needing to free a shared buffer to read in
* another page). In the best scenario all writes from shared buffers will
- * be issued by the background writer process. However, regular backends are
+ * be issued by the background writer process. However, regular backends are
* still empowered to issue writes if the bgwriter fails to maintain enough
* clean shared buffers.
*
- * The bgwriter is also charged with handling all checkpoints. It will
+ * The bgwriter is also charged with handling all checkpoints. It will
* automatically dispatch a checkpoint after a certain amount of time has
* elapsed since the last one, and it can be signaled to perform requested
* checkpoints as well. (The GUC parameter that mandates a checkpoint every
@@ -22,7 +22,7 @@
* finishes, or as soon as recovery begins if we are doing archive recovery.
* It remains alive until the postmaster commands it to terminate.
* Normal termination is by SIGUSR2, which instructs the bgwriter to execute
- * a shutdown checkpoint and then exit(0). (All backends must be stopped
+ * a shutdown checkpoint and then exit(0). (All backends must be stopped
* before SIGUSR2 is issued!) Emergency termination is by SIGQUIT; like any
* backend, the bgwriter will simply abort and exit on SIGQUIT.
*
@@ -206,7 +206,7 @@ BackgroundWriterMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (bgwriter probably never has any
+ * can signal any child processes too. (bgwriter probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -219,7 +219,7 @@ BackgroundWriterMain(void)
* Properly accept or ignore signals the postmaster might send us
*
* Note: we deliberately ignore SIGTERM, because during a standard Unix
- * system shutdown cycle, init will SIGTERM all processes at once. We
+ * system shutdown cycle, init will SIGTERM all processes at once. We
* want to wait for the backends to exit, whereupon the postmaster will
* tell us it's okay to shut down (via SIGUSR2).
*
@@ -293,7 +293,7 @@ BackgroundWriterMain(void)
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about in bgwriter, but we do have LWLocks, buffers, and temp files.
*/
LWLockReleaseAll();
@@ -495,7 +495,7 @@ BackgroundWriterMain(void)
ckpt_performed = CreateRestartPoint(flags);
/*
- * After any checkpoint, close all smgr files. This is so we
+ * After any checkpoint, close all smgr files. This is so we
* won't hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
@@ -639,7 +639,7 @@ BgWriterNap(void)
}
/*
- * Returns true if an immediate checkpoint request is pending. (Note that
+ * Returns true if an immediate checkpoint request is pending. (Note that
* this does not check the *current* checkpoint's IMMEDIATE flag, but whether
* there is one pending behind it.)
*/
@@ -817,7 +817,7 @@ bg_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
@@ -943,7 +943,7 @@ RequestCheckpoint(int flags)
CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE);
/*
- * After any checkpoint, close all smgr files. This is so we won't
+ * After any checkpoint, close all smgr files. This is so we won't
* hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
@@ -1121,7 +1121,7 @@ ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
* Although a full fsync request queue is not common, it can lead to severe
* performance problems when it does happen. So far, this situation has
* only been observed to occur when the system is under heavy write load,
- * and especially during the "sync" phase of a checkpoint. Without this
+ * and especially during the "sync" phase of a checkpoint. Without this
* logic, each backend begins doing an fsync for every block written, which
* gets very expensive and can slow down the whole system.
*
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index 45f90fd058..1fedb5c105 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -552,9 +552,9 @@ pgarch_archiveXlog(char *xlog)
{
/*
* If either the shell itself, or a called command, died on a signal,
- * abort the archiver. We do this because system() ignores SIGINT and
+ * abort the archiver. We do this because system() ignores SIGINT and
* SIGQUIT while waiting; so a signal is very likely something that
- * should have interrupted us too. If we overreact it's no big deal,
+ * should have interrupted us too. If we overreact it's no big deal,
* the postmaster will just start the archiver again.
*
* Per the Single Unix Spec, shells report exit status > 128 when a
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index b12a0d86ec..9c9d9b5db7 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -329,7 +329,7 @@ pgstat_init(void)
* On some platforms, pg_getaddrinfo_all() may return multiple addresses
* only one of which will actually work (eg, both IPv6 and IPv4 addresses
* when kernel will reject IPv6). Worse, the failure may occur at the
- * bind() or perhaps even connect() stage. So we must loop through the
+ * bind() or perhaps even connect() stage. So we must loop through the
* results till we find a working combination. We will generate LOG
* messages, but no error, for bogus combinations.
*/
@@ -593,7 +593,7 @@ pgstat_start(void)
/*
* Do nothing if too soon since last collector start. This is a safety
* valve to protect against continuous respawn attempts if the collector
- * is dying immediately at launch. Note that since we will be re-called
+ * is dying immediately at launch. Note that since we will be re-called
* from the postmaster main loop, we will get another chance later.
*/
curtime = time(NULL);
@@ -1219,7 +1219,7 @@ pgstat_report_analyze(Relation rel, PgStat_Counter livetuples,
* have counted such rows as live or dead respectively. Because we will
* report our counts of such rows at transaction end, we should subtract
* off these counts from what we send to the collector now, else they'll
- * be double-counted after commit. (This approach also ensures that the
+ * be double-counted after commit. (This approach also ensures that the
* collector ends up with the right numbers if we abort instead of
* committing.)
*/
@@ -1820,7 +1820,7 @@ AtPrepare_PgStat(void)
* Clean up after successful PREPARE.
*
* All we need do here is unlink the transaction stats state from the
- * nontransactional state. The nontransactional action counts will be
+ * nontransactional state. The nontransactional action counts will be
* reported to the stats collector immediately, while the effects on live
* and dead tuple counts are preserved in the 2PC state file.
*
@@ -2425,12 +2425,12 @@ pgstat_read_current_status(void)
* pgstat_get_backend_current_activity() -
*
* Return a string representing the current activity of the backend with
- * the specified PID. This looks directly at the BackendStatusArray,
+ * the specified PID. This looks directly at the BackendStatusArray,
* and so will provide current information regardless of the age of our
* transaction's snapshot of the status array.
*
* It is the caller's responsibility to invoke this only for backends whose
- * state is expected to remain stable while the result is in use. The
+ * state is expected to remain stable while the result is in use. The
* only current use is in deadlock reporting, where we can expect that
* the target backend is blocked on a lock. (There are corner cases
* where the target's wait could get aborted while we are looking at it,
@@ -2579,7 +2579,7 @@ pgstat_send_bgwriter(void)
/* ----------
* PgstatCollectorMain() -
*
- * Start up the statistics collector process. This is the body of the
+ * Start up the statistics collector process. This is the body of the
* postmaster child process.
*
* The argc/argv parameters are valid only in EXEC_BACKEND case.
@@ -2608,7 +2608,7 @@ PgstatCollectorMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (pgstat probably never has any
+ * can signal any child processes too. (pgstat probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -2655,7 +2655,7 @@ PgstatCollectorMain(int argc, char *argv[])
pgStatDBHash = pgstat_read_statsfile(InvalidOid, true);
/*
- * Setup the descriptor set for select(2). Since only one bit in the set
+ * Setup the descriptor set for select(2). Since only one bit in the set
* ever changes, we need not repeat FD_ZERO each time.
*/
#if !defined(HAVE_POLL) && !defined(WIN32)
@@ -2668,7 +2668,7 @@ PgstatCollectorMain(int argc, char *argv[])
*
* For performance reasons, we don't want to do a PostmasterIsAlive() test
* after every message; instead, do it only when select()/poll() is
- * interrupted by timeout. In essence, we'll stay alive as long as
+ * interrupted by timeout. In essence, we'll stay alive as long as
* backends keep sending us stuff often, even if the postmaster is gone.
*/
for (;;)
@@ -3391,14 +3391,14 @@ backend_read_statsfile(void)
/*
* We set the minimum acceptable timestamp to PGSTAT_STAT_INTERVAL msec
- * before now. This indirectly ensures that the collector needn't write
+ * before now. This indirectly ensures that the collector needn't write
* the file more often than PGSTAT_STAT_INTERVAL. In an autovacuum
* worker, however, we want a lower delay to avoid using stale data, so we
* use PGSTAT_RETRY_DELAY (since the number of worker is low, this
* shouldn't be a problem).
*
* Note that we don't recompute min_ts after sleeping; so we might end up
- * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In practice
+ * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In practice
* that shouldn't happen, though, as long as the sleep time is less than
* PGSTAT_STAT_INTERVAL; and we don't want to lie to the collector about
* what our cutoff time really is.
@@ -3462,7 +3462,7 @@ pgstat_setup_memcxt(void)
/* ----------
* pgstat_clear_snapshot() -
*
- * Discard any data collected in the current transaction. Any subsequent
+ * Discard any data collected in the current transaction. Any subsequent
* request will cause new snapshots to be read.
*
* This is also invoked during transaction commit or abort to discard
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 4c61c5d0c4..857f16921c 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -2,7 +2,7 @@
*
* postmaster.c
* This program acts as a clearing house for requests to the
- * POSTGRES system. Frontend programs send a startup message
+ * POSTGRES system. Frontend programs send a startup message
* to the Postmaster and the postmaster uses the info in the
* message to setup a backend process.
*
@@ -15,7 +15,7 @@
* The postmaster process creates the shared memory and semaphore
* pools during startup, but as a rule does not touch them itself.
* In particular, it is not a member of the PGPROC array of backends
- * and so it cannot participate in lock-manager operations. Keeping
+ * and so it cannot participate in lock-manager operations. Keeping
* the postmaster away from shared memory operations makes it simpler
* and more reliable. The postmaster is almost always able to recover
* from crashes of individual backends by resetting shared memory;
@@ -58,7 +58,7 @@
* Error Reporting:
* Use write_stderr() only for reporting "interactive" errors
* (essentially, bogus arguments on the command line). Once the
- * postmaster is launched, use ereport(). In particular, don't use
+ * postmaster is launched, use ereport(). In particular, don't use
* write_stderr() for anything that occurs after pmdaemonize.
*
*-------------------------------------------------------------------------
@@ -131,10 +131,10 @@
* children we have and send them appropriate signals when necessary.
*
* "Special" children such as the startup, bgwriter and autovacuum launcher
- * tasks are not in this list. Autovacuum worker processes are in it.
+ * tasks are not in this list. Autovacuum worker processes are in it.
* Also, "dead_end" children are in it: these are children launched just
* for the purpose of sending a friendly rejection message to a would-be
- * client. We must track them because they are attached to shared memory,
+ * client. We must track them because they are attached to shared memory,
* but we know they will never become live backends. dead_end children are
* not assigned a PMChildSlot.
*/
@@ -181,10 +181,10 @@ static char ExtraOptions[MAXPGPATH];
/*
* These globals control the behavior of the postmaster in case some
- * backend dumps core. Normally, it kills all peers of the dead backend
+ * backend dumps core. Normally, it kills all peers of the dead backend
* and reinitializes shared memory. By specifying -s or -n, we can have
* the postmaster stop (rather than kill) peers and not reinitialize
- * shared data structures. (Reinit is currently dead code, though.)
+ * shared data structures. (Reinit is currently dead code, though.)
*/
static bool Reinit = true;
static int SendStop = false;
@@ -527,7 +527,7 @@ PostmasterMain(int argc, char *argv[])
opterr = 1;
/*
- * Parse command-line options. CAUTION: keep this in sync with
+ * Parse command-line options. CAUTION: keep this in sync with
* tcop/postgres.c (the option sets should not conflict) and with the
* common help() function in main/main.c.
*/
@@ -789,7 +789,7 @@ PostmasterMain(int argc, char *argv[])
CreateDataDirLockFile(true);
/*
- * If timezone is not set, determine what the OS uses. (In theory this
+ * If timezone is not set, determine what the OS uses. (In theory this
* should be done during GUC initialization, but because it can take as
* much as several seconds, we delay it until after we've created the
* postmaster.pid file. This prevents problems with boot scripts that
@@ -817,7 +817,7 @@ PostmasterMain(int argc, char *argv[])
process_shared_preload_libraries();
/*
- * Remove old temporary files. At this point there can be no other
+ * Remove old temporary files. At this point there can be no other
* Postgres processes running in this directory, so this should be safe.
*/
RemovePgTempFiles();
@@ -1512,7 +1512,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
{
/*
* EOF after SSLdone probably means the client didn't like our
- * response to NEGOTIATE_SSL_CODE. That's not an error condition, so
+ * response to NEGOTIATE_SSL_CODE. That's not an error condition, so
* don't clutter the log with a complaint.
*/
if (!SSLdone)
@@ -1636,7 +1636,7 @@ retry1:
int32 offset = sizeof(ProtocolVersion);
/*
- * Scan packet body for name/option pairs. We can assume any string
+ * Scan packet body for name/option pairs. We can assume any string
* beginning within the packet body is null-terminated, thanks to
* zeroing extra byte above.
*/
@@ -2024,7 +2024,7 @@ reset_shared(int port)
*
* Note: in each "cycle of life" we will normally assign the same IPC keys
* (if using SysV shmem and/or semas), since the port number is used to
- * determine IPC keys. This helps ensure that we will clean up dead IPC
+ * determine IPC keys. This helps ensure that we will clean up dead IPC
* objects if the postmaster crashes and is restarted.
*/
CreateSharedMemoryAndSemaphores(false, port);
@@ -2345,7 +2345,7 @@ reaper(SIGNAL_ARGS)
/*
* OK, we saw normal exit of the bgwriter after it's been told
* to shut down. We expect that it wrote a shutdown
- * checkpoint. (If for some reason it didn't, recovery will
+ * checkpoint. (If for some reason it didn't, recovery will
* occur on next postmaster start.)
*
* At this point we should have no normal backend children
@@ -2404,7 +2404,7 @@ reaper(SIGNAL_ARGS)
/*
* Was it the autovacuum launcher? Normal exit can be ignored; we'll
* start a new one at the next iteration of the postmaster's main
- * loop, if necessary. Any other exit condition is treated as a
+ * loop, if necessary. Any other exit condition is treated as a
* crash.
*/
if (pid == AutoVacPID)
@@ -2531,7 +2531,7 @@ CleanupBackend(int pid,
if (!ReleasePostmasterChildSlot(bp->child_slot))
{
/*
- * Uh-oh, the child failed to clean itself up. Treat as a
+ * Uh-oh, the child failed to clean itself up. Treat as a
* crash after all.
*/
HandleChildCrash(pid, exitstatus, _("server process"));
@@ -2791,7 +2791,7 @@ PostmasterStateMachine(void)
* PM_WAIT_BACKENDS state ends when we have no regular backends
* (including autovac workers) and no walwriter or autovac launcher.
* If we are doing crash recovery then we expect the bgwriter to exit
- * too, otherwise not. The archiver, stats, and syslogger processes
+ * too, otherwise not. The archiver, stats, and syslogger processes
* are disregarded since they are not connected to shared memory; we
* also disregard dead_end children here.
*/
@@ -2804,7 +2804,7 @@ PostmasterStateMachine(void)
if (FatalError)
{
/*
- * Start waiting for dead_end children to die. This state
+ * Start waiting for dead_end children to die. This state
* change causes ServerLoop to stop creating new ones.
*/
pmState = PM_WAIT_DEAD_END;
@@ -2882,7 +2882,7 @@ PostmasterStateMachine(void)
/*
* If we've been told to shut down, we exit as soon as there are no
- * remaining children. If there was a crash, cleanup will occur at the
+ * remaining children. If there was a crash, cleanup will occur at the
* next startup. (Before PostgreSQL 8.3, we tried to recover from the
* crash before exiting, but that seems unwise if we are quitting because
* we got SIGTERM from init --- there may well not be time for recovery
@@ -2951,7 +2951,7 @@ PostmasterStateMachine(void)
* system().
*
* There is a race condition for recently-forked children: they might not
- * have executed setsid() yet. So we signal the child directly as well as
+ * have executed setsid() yet. So we signal the child directly as well as
* the group. We assume such a child will handle the signal before trying
* to spawn any grandchild processes. We also assume that signaling the
* child twice will not cause any problems.
@@ -3121,7 +3121,7 @@ BackendStartup(Port *port)
/*
* Try to report backend fork() failure to client before we close the
- * connection. Since we do not care to risk blocking the postmaster on
+ * connection. Since we do not care to risk blocking the postmaster on
* this connection, we set the connection to non-blocking and try only once.
*
* This is grungy special-purpose code; we cannot use backend libpq since
@@ -3230,7 +3230,7 @@ BackendInitialize(Port *port)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (We do this now on the off chance
+ * can signal any child processes too. (We do this now on the off chance
* that something might spawn a child process during authentication.)
*/
#ifdef HAVE_SETSID
@@ -3291,7 +3291,7 @@ BackendInitialize(Port *port)
* the PostmasterContext (which didn't exist before, in this process) to
* contain the data.
*
- * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
+ * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
*/
#ifdef EXEC_BACKEND
Assert(PostmasterContext == NULL);
@@ -3902,7 +3902,7 @@ SubPostmasterMain(int argc, char *argv[])
read_nondefault_variables();
/*
- * Reload any libraries that were preloaded by the postmaster. Since we
+ * Reload any libraries that were preloaded by the postmaster. Since we
* exec'd this process, those libraries didn't come along with us; but we
* should load them into all child processes to be consistent with the
* non-EXEC_BACKEND behavior.
@@ -3955,7 +3955,7 @@ SubPostmasterMain(int argc, char *argv[])
*
* This prevents a randomized stack base address that causes child
* shared memory to be at a different address than the parent, making
- * it impossible to attached to shared memory. Return the value to
+ * it impossible to attached to shared memory. Return the value to
* '1' when finished.
*/
CreateSharedMemoryAndSemaphores(false, 0);
@@ -4061,7 +4061,7 @@ ExitPostmaster(int status)
/* should cleanup shared memory and kill all backends */
/*
- * Not sure of the semantics here. When the Postmaster dies, should the
+ * Not sure of the semantics here. When the Postmaster dies, should the
* backends all be killed? probably not.
*
* MUST -- vadim 05-10-1999
@@ -4093,7 +4093,7 @@ sigusr1_handler(SIGNAL_ARGS)
FatalError = false;
/*
- * Crank up the background writer. It doesn't matter if this fails,
+ * Crank up the background writer. It doesn't matter if this fails,
* we'll just try again later.
*/
Assert(BgWriterPID == 0);
@@ -4271,7 +4271,7 @@ CountChildren(void)
/*
* StartChildProcess -- start an auxiliary process for the postmaster
*
- * xlop determines what kind of child will be started. All child types
+ * xlop determines what kind of child will be started. All child types
* initially go to AuxiliaryProcessMain, which will handle common setup.
*
* Return value of StartChildProcess is subprocess' PID, or 0 if failed
@@ -4486,9 +4486,9 @@ CreateOptsFile(int argc, char *argv[], char *fullprogname)
* This reports the number of entries needed in per-child-process arrays
* (the PMChildFlags array, and if EXEC_BACKEND the ShmemBackendArray).
* These arrays include regular backends and autovac workers, but not special
- * children nor dead_end children. This allows the arrays to have a fixed
+ * children nor dead_end children. This allows the arrays to have a fixed
* maximum size, to wit the same too-many-children limit enforced by
- * canAcceptConnections(). The exact value isn't too critical as long as
+ * canAcceptConnections(). The exact value isn't too critical as long as
* it's more than MaxBackends.
*/
int
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index 05a0e072da..a731005f96 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -65,7 +65,7 @@
/*
- * GUC parameters. Logging_collector cannot be changed after postmaster
+ * GUC parameters. Logging_collector cannot be changed after postmaster
* start, but the rest can change at SIGHUP.
*/
bool Logging_collector = false;
@@ -184,7 +184,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* If we restarted, our stderr is already redirected into our own input
* pipe. This is of course pretty useless, not to mention that it
- * interferes with detecting pipe EOF. Point stderr to /dev/null. This
+ * interferes with detecting pipe EOF. Point stderr to /dev/null. This
* assumes that all interesting messages generated in the syslogger will
* come through elog.c and will be sent to write_syslogger_file.
*/
@@ -194,7 +194,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* The closes might look redundant, but they are not: we want to be
- * darn sure the pipe gets closed even if the open failed. We can
+ * darn sure the pipe gets closed even if the open failed. We can
* survive running with stderr pointing nowhere, but we can't afford
* to have extra pipe input descriptors hanging around.
*/
@@ -235,7 +235,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (syslogger probably never has any
+ * can signal any child processes too. (syslogger probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -474,7 +474,7 @@ SysLoggerMain(int argc, char *argv[])
(errmsg("logger shutting down")));
/*
- * Normal exit from the syslogger is here. Note that we
+ * Normal exit from the syslogger is here. Note that we
* deliberately do not close syslogFile before exiting; this is to
* allow for the possibility of elog messages being generated
* inside proc_exit. Regular exit() will take care of flushing
@@ -1282,7 +1282,7 @@ set_next_rotation_time(void)
/*
* The requirements here are to choose the next time > now that is a
* "multiple" of the log rotation interval. "Multiple" can be interpreted
- * fairly loosely. In this version we align to log_timezone rather than
+ * fairly loosely. In this version we align to log_timezone rather than
* GMT.
*/
rotinterval = Log_RotationAge * SECS_PER_MINUTE; /* convert to seconds */
diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c
index 9e08a5b710..7df618b6d4 100644
--- a/src/backend/postmaster/walwriter.c
+++ b/src/backend/postmaster/walwriter.c
@@ -92,7 +92,7 @@ WalWriterMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (walwriter probably never has any
+ * can signal any child processes too. (walwriter probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -169,7 +169,7 @@ WalWriterMain(void)
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about in walwriter, but we do have LWLocks, and perhaps buffers?
*/
LWLockReleaseAll();
@@ -298,7 +298,7 @@ wal_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/regex/regc_color.c b/src/backend/regex/regc_color.c
index 846f24f7c2..7c631faf5c 100644
--- a/src/backend/regex/regc_color.c
+++ b/src/backend/regex/regc_color.c
@@ -2,7 +2,7 @@
* colorings of characters
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_cvec.c b/src/backend/regex/regc_cvec.c
index 25bfae3e32..5187ef61bd 100644
--- a/src/backend/regex/regc_cvec.c
+++ b/src/backend/regex/regc_cvec.c
@@ -2,7 +2,7 @@
* Utility functions for handling cvecs
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c
index 782c0085c2..fd6966f4f7 100644
--- a/src/backend/regex/regc_lex.c
+++ b/src/backend/regex/regc_lex.c
@@ -2,7 +2,7 @@
* lexical analyzer
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_locale.c b/src/backend/regex/regc_locale.c
index 28f5e7ca12..0d63f8f4c8 100644
--- a/src/backend/regex/regc_locale.c
+++ b/src/backend/regex/regc_locale.c
@@ -30,7 +30,7 @@
*
* THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
+ * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
* IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
* NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
* MODIFICATIONS.
@@ -38,7 +38,7 @@
* GOVERNMENT USE: If you are acquiring this software on behalf of the
* U.S. government, the Government shall have only "Restricted Rights"
* in the software and related documentation as defined in the Federal
- * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
+ * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
* are acquiring the software on behalf of the Department of Defense, the
* software shall be classified as "Commercial Computer Software" and the
* Government shall have only "Restricted Rights" as defined in Clause
@@ -780,7 +780,7 @@ allcases(struct vars * v, /* context */
/*
* cmp - chr-substring compare
*
- * Backrefs need this. It should preferably be efficient.
+ * Backrefs need this. It should preferably be efficient.
* Note that it does not need to report anything except equal/unequal.
* Note also that the length is exact, and the comparison should not
* stop at embedded NULs!
diff --git a/src/backend/regex/regc_nfa.c b/src/backend/regex/regc_nfa.c
index e0e32344ca..eef0cffb01 100644
--- a/src/backend/regex/regc_nfa.c
+++ b/src/backend/regex/regc_nfa.c
@@ -2,7 +2,7 @@
* NFA utilities.
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -1304,7 +1304,7 @@ fixempties(struct nfa * nfa,
}
/*
- * And remove any states that have become useless. (This cleanup is not
+ * And remove any states that have become useless. (This cleanup is not
* very thorough, and would be even less so if we tried to combine it with
* the previous step; but cleanup() will take care of anything we miss.)
*/
@@ -1372,7 +1372,7 @@ replaceempty(struct nfa * nfa,
* non-EMPTY out-arcs), we must keep it so, so always push forward in that
* case.
*
- * The fan-out/fan-in comparison should count only non-EMPTY arcs. If
+ * The fan-out/fan-in comparison should count only non-EMPTY arcs. If
* "from" is doomed, we can skip counting "to"'s arcs, since we want to
* force taking the copyins path in that case.
*/
diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c
index 8d9e58820f..3f0504789e 100644
--- a/src/backend/regex/regcomp.c
+++ b/src/backend/regex/regcomp.c
@@ -2,7 +2,7 @@
* re_*comp and friends - compile REs
* This file #includes several others (see the bottom).
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -556,7 +556,7 @@ makesearch(struct vars * v,
* constraints, often knowing when you were in the pre state tells you
* little; it's the next state(s) that are informative. But some of them
* may have other inarcs, i.e. it may be possible to make actual progress
- * and then return to one of them. We must de-optimize such cases,
+ * and then return to one of them. We must de-optimize such cases,
* splitting each such state into progress and no-progress states.
*/
@@ -602,7 +602,7 @@ makesearch(struct vars * v,
* parse - parse an RE
*
* This is actually just the top level, which parses a bunch of branches
- * tied together with '|'. They appear in the tree as the left children
+ * tied together with '|'. They appear in the tree as the left children
* of a chain of '|' subres.
*/
static struct subre *
@@ -1318,7 +1318,7 @@ bracket(struct vars * v,
/*
* cbracket - handle complemented bracket expression
* We do it by calling bracket() with dummy endpoints, and then complementing
- * the result. The alternative would be to invoke rainbow(), and then delete
+ * the result. The alternative would be to invoke rainbow(), and then delete
* arcs as the b.e. is seen... but that gets messy.
*/
static void
diff --git a/src/backend/regex/rege_dfa.c b/src/backend/regex/rege_dfa.c
index d277ec48f9..d00595268f 100644
--- a/src/backend/regex/rege_dfa.c
+++ b/src/backend/regex/rege_dfa.c
@@ -2,7 +2,7 @@
* DFA routines
* This file is #included by regexec.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regerror.c b/src/backend/regex/regerror.c
index dfcb462e01..3bdcfdbd86 100644
--- a/src/backend/regex/regerror.c
+++ b/src/backend/regex/regerror.c
@@ -1,7 +1,7 @@
/*
* regerror - error-code expansion
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regexec.c b/src/backend/regex/regexec.c
index 06596e8e28..b554bb79e3 100644
--- a/src/backend/regex/regexec.c
+++ b/src/backend/regex/regexec.c
@@ -1,7 +1,7 @@
/*
* re_*exec and friends - match REs
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regfree.c b/src/backend/regex/regfree.c
index 2b7a5431b1..10cc26b0d8 100644
--- a/src/backend/regex/regfree.c
+++ b/src/backend/regex/regfree.c
@@ -1,7 +1,7 @@
/*
* regfree - free an RE
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 77a889412f..3ed7d14eaf 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -237,7 +237,7 @@ DefineQueryRewrite(char *rulename,
/*
* If we are installing an ON SELECT rule, we had better grab
* AccessExclusiveLock to ensure no SELECTs are currently running on the
- * event relation. For other types of rules, it might be sufficient to
+ * event relation. For other types of rules, it might be sufficient to
* grab ShareLock to lock out insert/update/delete actions. But for now,
* let's just grab AccessExclusiveLock all the time.
*/
@@ -388,7 +388,7 @@ DefineQueryRewrite(char *rulename,
*
* If so, check that the relation is empty because the storage for the
* relation is going to be deleted. Also insist that the rel not have
- * any triggers, indexes, or child tables. (Note: these tests are too
+ * any triggers, indexes, or child tables. (Note: these tests are too
* strict, because they will reject relations that once had such but
* don't anymore. But we don't really care, because this whole
* business of converting relations to views is just a kluge to allow
@@ -604,7 +604,7 @@ checkRuleResultList(List *targetList, TupleDesc resultDesc, bool isSelect)
* Note: for a view (ON SELECT rule), the checkAsUser field of the *OLD*
* RTE entry will be overridden when the view rule is expanded, and the
* checkAsUser field of the *NEW* entry is irrelevant because that entry's
- * requiredPerms bits will always be zero. However, for other types of rules
+ * requiredPerms bits will always be zero. However, for other types of rules
* it's important to set these fields to match the rule owner. So we just set
* them always.
*/
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 4e7b307ef2..578b382a19 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -188,7 +188,7 @@ AcquireRewriteLocks(Query *parsetree, bool forExecute)
/*
* The elements of an alias list have to refer to
* earlier RTEs of the same rtable, because that's the
- * order the planner builds things in. So we already
+ * order the planner builds things in. So we already
* processed the referenced RTE, and so it's safe to
* use get_rte_attribute_is_dropped on it. (This might
* not hold after rewriting or planning, but it's OK
@@ -345,7 +345,7 @@ rewriteRuleAction(Query *parsetree,
/*
* Generate expanded rtable consisting of main parsetree's rtable plus
* rule action's rtable; this becomes the complete rtable for the rule
- * action. Some of the entries may be unused after we finish rewriting,
+ * action. Some of the entries may be unused after we finish rewriting,
* but we leave them all in place for two reasons:
*
* We'd have a much harder job to adjust the query's varnos if we
@@ -411,7 +411,7 @@ rewriteRuleAction(Query *parsetree,
* that if the rule action refers to OLD, its jointree will add a
* reference to rt_index. If the rule action doesn't refer to OLD, but
* either the rule_qual or the user query quals do, then we need to keep
- * the original rtindex in the jointree to provide data for the quals. We
+ * the original rtindex in the jointree to provide data for the quals. We
* don't want the original rtindex to be joined twice, however, so avoid
* keeping it if the rule action mentions it.
*
@@ -433,7 +433,7 @@ rewriteRuleAction(Query *parsetree,
{
/*
* If sub_action is a setop, manipulating its jointree will do no
- * good at all, because the jointree is dummy. (Perhaps someday
+ * good at all, because the jointree is dummy. (Perhaps someday
* we could push the joining and quals down to the member
* statements of the setop?)
*/
@@ -589,7 +589,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index)
* then junk fields (these in no particular order).
*
* We must do items 1 and 2 before firing rewrite rules, else rewritten
- * references to NEW.foo will produce wrong or incomplete results. Item 3
+ * references to NEW.foo will produce wrong or incomplete results. Item 3
* is not needed for rewriting, but will be needed by the planner, and we
* can do it essentially for free while handling items 1 and 2.
*
@@ -773,7 +773,7 @@ process_matched_tle(TargetEntry *src_tle,
}
/*----------
- * Multiple assignments to same attribute. Allow only if all are
+ * Multiple assignments to same attribute. Allow only if all are
* FieldStore or ArrayRef assignment operations. This is a bit
* tricky because what we may actually be looking at is a nest of
* such nodes; consider
@@ -791,7 +791,7 @@ process_matched_tle(TargetEntry *src_tle,
* assignments appear to occur left-to-right.
*
* For FieldStore, instead of nesting we can generate a single
- * FieldStore with multiple target fields. We must nest when
+ * FieldStore with multiple target fields. We must nest when
* ArrayRefs are involved though.
*----------
*/
@@ -1290,7 +1290,7 @@ markQueryForLocking(Query *qry, Node *jtnode, bool forUpdate, bool noWait)
* in the given tree.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * SubLink nodes in-place. It is caller's responsibility to ensure that
+ * SubLink nodes in-place. It is caller's responsibility to ensure that
* no unwanted side-effects occur!
*
* This is unlike most of the other routines that recurse into subselects,
@@ -1470,7 +1470,7 @@ fireRIRrules(Query *parsetree, List *activeRIRs)
* not just "NOT x" which the planner is much smarter about, else we will
* do the wrong thing when the qual evaluates to NULL.)
*
- * The rule_qual may contain references to OLD or NEW. OLD references are
+ * The rule_qual may contain references to OLD or NEW. OLD references are
* replaced by references to the specified rt_index (the relation that the
* rule applies to). NEW references are only possible for INSERT and UPDATE
* queries on the relation itself, and so they should be replaced by copies
@@ -1540,7 +1540,7 @@ CopyAndAddInvertedQual(Query *parsetree,
* rows that the qualified action doesn't act on. (If there are multiple
* qualified INSTEAD rules, we AND all the negated quals onto a single
* modified original query.) We won't execute the original, unmodified
- * query if we find either qualified or unqualified INSTEAD rules. If
+ * query if we find either qualified or unqualified INSTEAD rules. If
* we find both, the modified original query is discarded too.
*/
static List *
@@ -1931,7 +1931,7 @@ QueryRewrite(Query *parsetree)
*
* If the original query is still in the list, it sets the command tag.
* Otherwise, the last INSTEAD query of the same kind as the original is
- * allowed to set the tag. (Note these rules can leave us with no query
+ * allowed to set the tag. (Note these rules can leave us with no query
* setting the tag. The tcop code has to cope with this by setting up a
* default tag based on the original un-rewritten query.)
*
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index 5ff0919115..2beb9f1175 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -68,7 +68,7 @@ checkExprHasAggs(Node *node)
* specified query level.
*
* The objective of this routine is to detect whether there are aggregates
- * belonging to the given query level. Aggregates belonging to subqueries
+ * belonging to the given query level. Aggregates belonging to subqueries
* or outer queries do NOT cause a true result. We must recurse into
* subqueries to detect outer-reference aggregates that logically belong to
* the specified query level.
@@ -123,7 +123,7 @@ contain_aggs_of_level_walker(Node *node,
* Find the parse location of any aggregate of the specified query level.
*
* Returns -1 if no such agg is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
* Note: it might seem appropriate to merge this functionality into
@@ -218,7 +218,7 @@ contain_windowfuncs_walker(Node *node, void *context)
* Find the parse location of any windowfunc of the current query level.
*
* Returns -1 if no such windowfunc is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
* Note: it might seem appropriate to merge this functionality into
@@ -297,11 +297,11 @@ checkExprHasSubLink_walker(Node *node, void *context)
*
* Find all Var nodes in the given tree with varlevelsup == sublevels_up,
* and increment their varno fields (rangetable indexes) by 'offset'.
- * The varnoold fields are adjusted similarly. Also, adjust other nodes
+ * The varnoold fields are adjusted similarly. Also, adjust other nodes
* that contain rangetable indexes, such as RangeTblRef and JoinExpr.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. The given expression tree should have been copied
+ * nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
@@ -457,11 +457,11 @@ offset_relid_set(Relids relids, int offset)
*
* Find all Var nodes in the given tree belonging to a specific relation
* (identified by sublevels_up and rt_index), and change their varno fields
- * to 'new_index'. The varnoold fields are changed too. Also, adjust other
+ * to 'new_index'. The varnoold fields are changed too. Also, adjust other
* nodes that contain rangetable indexes, such as RangeTblRef and JoinExpr.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. The given expression tree should have been copied
+ * nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
@@ -641,7 +641,7 @@ adjust_relid_set(Relids relids, int oldrelid, int newrelid)
* Likewise for other nodes containing levelsup fields, such as Aggref.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * Var nodes in-place. The given expression tree should have been copied
+ * Var nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
@@ -1355,7 +1355,7 @@ ResolveNew_callback(Var *var,
* If generating an expansion for a var of a named rowtype
* (ie, this is a plain relation RTE), then we must include
* dummy items for dropped columns. If the var is RECORD (ie,
- * this is a JOIN), then omit dropped columns. Either way,
+ * this is a JOIN), then omit dropped columns. Either way,
* attach column names to the RowExpr for use of ruleutils.c.
*/
expandRTE(rcon->target_rte,
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index 2f20d96f09..43ea20a5f3 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -54,7 +54,7 @@ long int BufFileWriteCount;
*
* IO_IN_PROGRESS -- this is a flag in the buffer descriptor.
* It must be set when an IO is initiated and cleared at
- * the end of the IO. It is there to make sure that one
+ * the end of the IO. It is there to make sure that one
* process doesn't start to use a buffer while another is
* faulting it in. see WaitIO and related routines.
*
@@ -64,7 +64,7 @@ long int BufFileWriteCount;
*
* PrivateRefCount -- Each buffer also has a private refcount that keeps
* track of the number of times the buffer is pinned in the current
- * process. This is used for two purposes: first, if we pin a
+ * process. This is used for two purposes: first, if we pin a
* a buffer more than once, we only need to change the shared refcount
* once, thus only lock the shared state once; second, when a transaction
* aborts, it should only unpin the buffers exactly the number of times it
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index f3f7553a62..fbc43fead6 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -3,7 +3,7 @@
* buf_table.c
* routines for mapping BufferTags to buffer indexes.
*
- * Note: the routines in this file do no locking of their own. The caller
+ * Note: the routines in this file do no locking of their own. The caller
* must hold a suitable lock on the appropriate BufMappingLock, as specified
* in the comments. We can't do the locking inside these functions because
* in most cases the caller needs to adjust the buffer header contents
@@ -115,7 +115,7 @@ BufTableLookup(BufferTag *tagPtr, uint32 hashcode)
* Insert a hashtable entry for given tag and buffer ID,
* unless an entry already exists for that tag
*
- * Returns -1 on successful insertion. If a conflicting entry exists
+ * Returns -1 on successful insertion. If a conflicting entry exists
* already, returns the buffer ID in that entry.
*
* Caller must hold exclusive lock on BufMappingLock for tag's partition
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 76de560676..5564296309 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -106,7 +106,7 @@ static void AtProcExit_Buffers(int code, Datum arg);
* PrefetchBuffer -- initiate asynchronous read of a block of a relation
*
* This is named by analogy to ReadBuffer but doesn't actually allocate a
- * buffer. Instead it tries to ensure that a future ReadBuffer for the given
+ * buffer. Instead it tries to ensure that a future ReadBuffer for the given
* block will not be delayed by the I/O. Prefetching is optional.
* No-op if prefetching isn't compiled in.
*/
@@ -202,7 +202,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* for non-critical data, where the caller is prepared to repair errors.
*
* In RBM_ZERO mode, if the page isn't in buffer cache already, it's filled
- * with zeros instead of reading it from disk. Useful when the caller is
+ * with zeros instead of reading it from disk. Useful when the caller is
* going to fill the page from scratch, since this saves I/O and avoids
* unnecessary failure if the page-on-disk has corrupt page headers.
* Caution: do not use this mode to read a page that is beyond the relation's
@@ -348,7 +348,7 @@ ReadBuffer_common(SMgrRelation smgr, bool isLocalBuf, ForkNumber forkNum,
* This can happen because mdread doesn't complain about reads beyond
* EOF (when zero_damaged_pages is ON) and so a previous attempt to
* read a block beyond EOF could have left a "valid" zero-filled
- * buffer. Unfortunately, we have also seen this case occurring
+ * buffer. Unfortunately, we have also seen this case occurring
* because of buggy Linux kernels that sometimes return an
* lseek(SEEK_END) result that doesn't account for a recent write. In
* that situation, the pre-existing buffer would contain valid data
@@ -558,7 +558,7 @@ BufferAlloc(SMgrRelation smgr, ForkNumber forkNum,
/*
* Didn't find it in the buffer pool. We'll have to initialize a new
- * buffer. Remember to unlock the mapping lock while doing the work.
+ * buffer. Remember to unlock the mapping lock while doing the work.
*/
LWLockRelease(newPartitionLock);
@@ -568,7 +568,7 @@ BufferAlloc(SMgrRelation smgr, ForkNumber forkNum,
bool lock_held;
/*
- * Select a victim buffer. The buffer is returned with its header
+ * Select a victim buffer. The buffer is returned with its header
* spinlock still held! Also (in most cases) the BufFreelistLock is
* still held, since it would be bad to hold the spinlock while
* possibly waking up other processes.
@@ -617,7 +617,7 @@ BufferAlloc(SMgrRelation smgr, ForkNumber forkNum,
* If using a nondefault strategy, and writing the buffer
* would require a WAL flush, let the strategy decide whether
* to go ahead and write/reuse the buffer or to choose another
- * victim. We need lock to inspect the page LSN, so this
+ * victim. We need lock to inspect the page LSN, so this
* can't be done inside StrategyGetBuffer.
*/
if (strategy != NULL &&
@@ -738,7 +738,7 @@ BufferAlloc(SMgrRelation smgr, ForkNumber forkNum,
{
/*
* We can only get here if (a) someone else is still reading
- * in the page, or (b) a previous read attempt failed. We
+ * in the page, or (b) a previous read attempt failed. We
* have to wait for any active read attempt to finish, and
* then set up our own read attempt if the page is still not
* BM_VALID. StartBufferIO does it all.
@@ -828,7 +828,7 @@ BufferAlloc(SMgrRelation smgr, ForkNumber forkNum,
* This is used only in contexts such as dropping a relation. We assume
* that no other backend could possibly be interested in using the page,
* so the only reason the buffer might be pinned is if someone else is
- * trying to write it out. We have to let them finish before we can
+ * trying to write it out. We have to let them finish before we can
* reclaim the buffer.
*
* The buffer could get reclaimed by someone else while we are waiting
@@ -927,7 +927,7 @@ retry:
*
* Marks buffer contents as dirty (actual write happens later).
*
- * Buffer must be pinned and exclusive-locked. (If caller does not hold
+ * Buffer must be pinned and exclusive-locked. (If caller does not hold
* exclusive lock, then somebody could be in process of writing the buffer,
* leading to risk of bad data written to disk.)
*/
@@ -971,7 +971,7 @@ MarkBufferDirty(Buffer buffer)
*
* Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
* compared to calling the two routines separately. Now it's mainly just
- * a convenience function. However, if the passed buffer is valid and
+ * a convenience function. However, if the passed buffer is valid and
* already contains the desired block, we just return it as-is; and that
* does save considerable work compared to a full release and reacquire.
*
@@ -1023,7 +1023,7 @@ ReleaseAndReadBuffer(Buffer buffer,
* when we first pin it; for other strategies we just make sure the usage_count
* isn't zero. (The idea of the latter is that we don't want synchronized
* heap scans to inflate the count, but we need it to not be zero to discourage
- * other backends from stealing buffers from our ring. As long as we cycle
+ * other backends from stealing buffers from our ring. As long as we cycle
* through the ring faster than the global clock-sweep cycles, buffers in
* our ring won't be chosen as victims for replacement by other backends.)
*
@@ -1031,7 +1031,7 @@ ReleaseAndReadBuffer(Buffer buffer,
*
* Note that ResourceOwnerEnlargeBuffers must have been done already.
*
- * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
+ * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
* some callers to avoid an extra spinlock cycle.
*/
static bool
@@ -1174,7 +1174,7 @@ BufferSync(int flags)
* have the flag set.
*
* Note that if we fail to write some buffer, we may leave buffers with
- * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
+ * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
* certainly need to be written for the next checkpoint attempt, too.
*/
num_to_write = 0;
@@ -1965,7 +1965,7 @@ RelationGetNumberOfBlocks(Relation relation)
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* Currently, this is called only from smgr.c when the underlying file
@@ -1974,7 +1974,7 @@ RelationGetNumberOfBlocks(Relation relation)
* be deleted momentarily anyway, and there is no point in writing it.
* It is the responsibility of higher-level code to ensure that the
* deletion or truncation does not lose any data that could be needed
- * later. It is also the responsibility of higher-level code to ensure
+ * later. It is also the responsibility of higher-level code to ensure
* that no other process could be trying to load more pages of the
* relation into buffers.
*
@@ -2015,9 +2015,9 @@ DropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum, bool istemp,
*
* This function removes all the buffers in the buffer cache for a
* particular database. Dirty pages are simply dropped, without
- * bothering to write them out first. This is used when we destroy a
+ * bothering to write them out first. This is used when we destroy a
* database, to avoid trying to flush data to disk when the directory
- * tree no longer exists. Implementation is pretty similar to
+ * tree no longer exists. Implementation is pretty similar to
* DropRelFileNodeBuffers() which is for destroying just one relation.
* --------------------------------------------------------------------
*/
@@ -2316,9 +2316,9 @@ SetBufferCommitInfoNeedsSave(Buffer buffer)
/*
* This routine might get called many times on the same page, if we are
* making the first scan after commit of an xact that added/deleted many
- * tuples. So, be as quick as we can if the buffer is already dirty. We
+ * tuples. So, be as quick as we can if the buffer is already dirty. We
* do this by not acquiring spinlock if it looks like the status bits are
- * already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED
+ * already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED
* immediately after we look, because the buffer content update is already
* done and will be reflected in the I/O.)
*/
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index e4d32d9bbe..e7fcb54ab4 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -36,7 +36,7 @@ typedef struct
*/
/*
- * Statistics. These counters should be wide enough that they can't
+ * Statistics. These counters should be wide enough that they can't
* overflow during a single bgwriter cycle.
*/
uint32 completePasses; /* Complete cycles of the clock sweep */
@@ -129,7 +129,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
/*
* We count buffer allocation requests so that the bgwriter can estimate
- * the rate of buffer consumption. Note that buffers recycled by a
+ * the rate of buffer consumption. Note that buffers recycled by a
* strategy object are intentionally not counted here.
*/
StrategyControl->numBufferAllocs++;
@@ -248,7 +248,7 @@ StrategyFreeBuffer(volatile BufferDesc *buf)
*
* In addition, we return the completed-pass count (which is effectively
* the higher-order bits of nextVictimBuffer) and the count of recent buffer
- * allocs if non-NULL pointers are passed. The alloc count is reset after
+ * allocs if non-NULL pointers are passed. The alloc count is reset after
* being read.
*/
int
@@ -442,7 +442,7 @@ GetBufferFromRing(BufferAccessStrategy strategy)
/*
* If the slot hasn't been filled yet, tell the caller to allocate a new
- * buffer with the normal allocation strategy. He will then fill this
+ * buffer with the normal allocation strategy. He will then fill this
* slot by calling AddBufferToRing with the new buffer.
*/
bufnum = strategy->buffers[strategy->current];
@@ -495,7 +495,7 @@ AddBufferToRing(BufferAccessStrategy strategy, volatile BufferDesc *buf)
*
* When a nondefault strategy is used, the buffer manager calls this function
* when it turns out that the buffer selected by StrategyGetBuffer needs to
- * be written out and doing so would require flushing WAL too. This gives us
+ * be written out and doing so would require flushing WAL too. This gives us
* a chance to choose a different victim.
*
* Returns true if buffer manager should ask for a new victim, and false
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 3caa42a1a6..275e4d4b5d 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -94,7 +94,7 @@ LocalPrefetchBuffer(SMgrRelation smgr, ForkNumber forkNum,
* Find or create a local buffer for the given page of the given relation.
*
* API is similar to bufmgr.c's BufferAlloc, except that we do not need
- * to do any locking since this is all local. Also, IO_IN_PROGRESS
+ * to do any locking since this is all local. Also, IO_IN_PROGRESS
* does not get set. Lastly, we support only default access strategy
* (hence, usage_count is always advanced).
*/
@@ -284,7 +284,7 @@ MarkLocalBufferDirty(Buffer buffer)
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* See DropRelFileNodeBuffers in bufmgr.c for more notes.
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index 21a4f04ae2..6d70136725 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -29,7 +29,7 @@
* that was current at that time.
*
* BufFile also supports temporary files that exceed the OS file size limit
- * (by opening multiple fd.c temporary files). This is an essential feature
+ * (by opening multiple fd.c temporary files). This is an essential feature
* for sorts and hashjoins on large amounts of data.
*-------------------------------------------------------------------------
*/
@@ -71,7 +71,7 @@ struct BufFile
bool dirty; /* does buffer need to be written? */
/*
- * resowner is the ResourceOwner to use for underlying temp files. (We
+ * resowner is the ResourceOwner to use for underlying temp files. (We
* don't need to remember the memory context we're using explicitly,
* because after creation we only repalloc our arrays larger.)
*/
@@ -518,7 +518,7 @@ BufFileSeek(BufFile *file, int fileno, off_t offset, int whence)
{
/*
* Seek is to a point within existing buffer; we can just adjust
- * pos-within-buffer, without flushing buffer. Note this is OK
+ * pos-within-buffer, without flushing buffer. Note this is OK
* whether reading or writing, but buffer remains dirty if we were
* writing.
*/
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 8bc266641a..76c0d0ac7d 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -63,7 +63,7 @@
* and other code that tries to open files without consulting fd.c. This
* is the number left free. (While we can be pretty sure we won't get
* EMFILE, there's never any guarantee that we won't get ENFILE due to
- * other processes chewing up FDs. So it's a bad idea to try to open files
+ * other processes chewing up FDs. So it's a bad idea to try to open files
* without consulting fd.c. Nonetheless we cannot control all code.)
*
* Because this is just a fixed setting, we are effectively assuming that
@@ -153,8 +153,8 @@ typedef struct vfd
} Vfd;
/*
- * Virtual File Descriptor array pointer and size. This grows as
- * needed. 'File' values are indexes into this array.
+ * Virtual File Descriptor array pointer and size. This grows as
+ * needed. 'File' values are indexes into this array.
* Note that VfdCache[0] is not a usable VFD, just a list header.
*/
static Vfd *VfdCache;
@@ -224,7 +224,7 @@ static int nextTempTableSpace = 0;
*
* The Least Recently Used ring is a doubly linked list that begins and
* ends on element zero. Element zero is special -- it doesn't represent
- * a file and its "fd" field always == VFD_CLOSED. Element zero is just an
+ * a file and its "fd" field always == VFD_CLOSED. Element zero is just an
* anchor that shows us the beginning/end of the ring.
* Only VFD elements that are currently really open (have an FD assigned) are
* in the Lru ring. Elements that are "virtually" open can be recognized
@@ -359,7 +359,7 @@ InitFileAccess(void)
* We stop counting if usable_fds reaches max_to_probe. Note: a small
* value of max_to_probe might result in an underestimate of already_open;
* we must fill in any "gaps" in the set of used FDs before the calculation
- * of already_open will give the right answer. In practice, max_to_probe
+ * of already_open will give the right answer. In practice, max_to_probe
* of a couple of dozen should be enough to ensure good results.
*
* We assume stdin (FD 0) is available for dup'ing
@@ -436,7 +436,7 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
pfree(fd);
/*
- * Return results. usable_fds is just the number of successful dups. We
+ * Return results. usable_fds is just the number of successful dups. We
* assume that the system limit is highestfd+1 (remember 0 is a legal FD
* number) and so already_open is highestfd+1 - usable_fds.
*/
@@ -919,7 +919,7 @@ OpenTemporaryFile(bool interXact)
/*
* If not, or if tablespace is bad, create in database's default
- * tablespace. MyDatabaseTableSpace should normally be set before we get
+ * tablespace. MyDatabaseTableSpace should normally be set before we get
* here, but just in case it isn't, fall back to pg_default tablespace.
*/
if (file <= 0)
@@ -1370,7 +1370,7 @@ FileTruncate(File file, off_t offset)
/*
* Routines that want to use stdio (ie, FILE*) should use AllocateFile
* rather than plain fopen(). This lets fd.c deal with freeing FDs if
- * necessary to open the file. When done, call FreeFile rather than fclose.
+ * necessary to open the file. When done, call FreeFile rather than fclose.
*
* Note that files that will be open for any significant length of time
* should NOT be handled this way, since they cannot share kernel file
@@ -1549,7 +1549,7 @@ TryAgain:
* Read a directory opened with AllocateDir, ereport'ing any error.
*
* This is easier to use than raw readdir() since it takes care of some
- * otherwise rather tedious and error-prone manipulation of errno. Also,
+ * otherwise rather tedious and error-prone manipulation of errno. Also,
* if you are happy with a generic error message for AllocateDir failure,
* you can just do
*
@@ -1665,7 +1665,7 @@ SetTempTablespaces(Oid *tableSpaces, int numSpaces)
numTempTableSpaces = numSpaces;
/*
- * Select a random starting point in the list. This is to minimize
+ * Select a random starting point in the list. This is to minimize
* conflicts between backends that are most likely sharing the same list
* of temp tablespaces. Note that if we create multiple temp files in the
* same transaction, we'll advance circularly through the list --- this
@@ -1694,7 +1694,7 @@ TempTablespacesAreSet(void)
/*
* GetNextTempTableSpace
*
- * Select the next temp tablespace to use. A result of InvalidOid means
+ * Select the next temp tablespace to use. A result of InvalidOid means
* to use the current database's default tablespace.
*/
Oid
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index f61c5143ba..85e1d20a54 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -52,7 +52,7 @@
* Range Category
* 0 - 31 0
* 32 - 63 1
- * ... ... ...
+ * ... ... ...
* 8096 - 8127 253
* 8128 - 8163 254
* 8164 - 8192 255
@@ -127,7 +127,7 @@ static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof);
* will turn out to have too little space available by the time the caller
* gets a lock on it. In that case, the caller should report the actual
* amount of free space available on that page and then try again (see
- * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
+ * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
* extend the relation.
*/
BlockNumber
diff --git a/src/backend/storage/freespace/fsmpage.c b/src/backend/storage/freespace/fsmpage.c
index e8feaf0aa2..cca190234c 100644
--- a/src/backend/storage/freespace/fsmpage.c
+++ b/src/backend/storage/freespace/fsmpage.c
@@ -185,13 +185,13 @@ restart:
/*----------
* Start the search from the target slot. At every step, move one
- * node to the right, then climb up to the parent. Stop when we reach
+ * node to the right, then climb up to the parent. Stop when we reach
* a node with enough free space (as we must, since the root has enough
* space).
*
* The idea is to gradually expand our "search triangle", that is, all
* nodes covered by the current node, and to be sure we search to the
- * right from the start point. At the first step, only the target slot
+ * right from the start point. At the first step, only the target slot
* is examined. When we move up from a left child to its parent, we are
* adding the right-hand subtree of that parent to the search triangle.
* When we move right then up from a right child, we are dropping the
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index 4b5ff57b97..57ed0e64b0 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -4,7 +4,7 @@
* POSTGRES inter-process communication definitions.
*
* This file is misnamed, as it no longer has much of anything directly
- * to do with IPC. The functionality here is concerned with managing
+ * to do with IPC. The functionality here is concerned with managing
* exit-time cleanup for either a postmaster or a backend.
*
*
@@ -84,7 +84,7 @@ static int on_proc_exit_index,
* -cim 2/6/90
*
* Unfortunately, we can't really guarantee that add-on code
- * obeys the rule of not calling exit() directly. So, while
+ * obeys the rule of not calling exit() directly. So, while
* this is the preferred way out of the system, we also register
* an atexit callback that will make sure cleanup happens.
* ----------------------------------------------------------------
@@ -103,7 +103,7 @@ proc_exit(int code)
* fixed file name, each backend will overwrite earlier profiles. To
* fix that, we create a separate subdirectory for each backend
* (./gprof/pid) and 'cd' to that subdirectory before we exit() - that
- * forces mcleanup() to write each profile into its own directory. We
+ * forces mcleanup() to write each profile into its own directory. We
* end up with something like: $PGDATA/gprof/8829/gmon.out
* $PGDATA/gprof/8845/gmon.out ...
*
@@ -257,7 +257,7 @@ atexit_callback(int exitstatus, void *arg)
* on_proc_exit
*
* this function adds a callback function to the list of
- * functions invoked by proc_exit(). -cim 2/6/90
+ * functions invoked by proc_exit(). -cim 2/6/90
* ----------------------------------------------------------------
*/
void
@@ -288,7 +288,7 @@ on_proc_exit(pg_on_exit_callback function, Datum arg)
* on_shmem_exit
*
* this function adds a callback function to the list of
- * functions invoked by shmem_exit(). -cim 2/6/90
+ * functions invoked by shmem_exit(). -cim 2/6/90
* ----------------------------------------------------------------
*/
void
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 281215d218..ef21102446 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -46,7 +46,7 @@ static bool addin_request_allowed = true;
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
@@ -76,7 +76,7 @@ RequestAddinShmemSpace(Size size)
* This is a bit code-wasteful and could be cleaned up.)
*
* If "makePrivate" is true then we only need private memory, not shared
- * memory. This is true for a standalone backend, false for a postmaster.
+ * memory. This is true for a standalone backend, false for a postmaster.
*/
void
CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
diff --git a/src/backend/storage/ipc/pmsignal.c b/src/backend/storage/ipc/pmsignal.c
index 1b77f463b6..98703402d3 100644
--- a/src/backend/storage/ipc/pmsignal.c
+++ b/src/backend/storage/ipc/pmsignal.c
@@ -25,9 +25,9 @@
/*
* The postmaster is signaled by its children by sending SIGUSR1. The
- * specific reason is communicated via flags in shared memory. We keep
+ * specific reason is communicated via flags in shared memory. We keep
* a boolean flag for each possible "reason", so that different reasons
- * can be signaled by different backends at the same time. (However,
+ * can be signaled by different backends at the same time. (However,
* if the same reason is signaled more than once simultaneously, the
* postmaster will observe it only once.)
*
@@ -41,7 +41,7 @@
* have three possible states: UNUSED, ASSIGNED, ACTIVE. An UNUSED slot is
* available for assignment. An ASSIGNED slot is associated with a postmaster
* child process, but either the process has not touched shared memory yet,
- * or it has successfully cleaned up after itself. A ACTIVE slot means the
+ * or it has successfully cleaned up after itself. A ACTIVE slot means the
* process is actively using shared memory. The slots are assigned to
* child processes at random, and postmaster.c is responsible for tracking
* which one goes with which PID.
@@ -246,7 +246,7 @@ PostmasterIsAlive(bool amDirectChild)
else
{
/*
- * Use kill() to see if the postmaster is still alive. This can
+ * Use kill() to see if the postmaster is still alive. This can
* sometimes give a false positive result, since the postmaster's PID
* may get recycled, but it is good enough for existing uses by
* indirect children.
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index f4b8ad700a..9fbaae02b4 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -141,7 +141,7 @@ ProcArrayAdd(PGPROC *proc)
if (arrayP->numProcs >= arrayP->maxProcs)
{
/*
- * Ooops, no room. (This really shouldn't happen, since there is a
+ * Ooops, no room. (This really shouldn't happen, since there is a
* fixed supply of PGPROC structs too, and so we should have failed
* earlier.)
*/
@@ -326,7 +326,7 @@ ProcArrayClearTransaction(PGPROC *proc)
* We can find this out cheaply too.
*
* 3. Search the SubTrans tree to find the Xid's topmost parent, and then
- * see if that is running according to PGPROC. This is the slowest, but
+ * see if that is running according to PGPROC. This is the slowest, but
* sadly it has to be done always if the other two failed, unless we see
* that the cached subxact sets are complete (none have overflowed).
*
@@ -569,9 +569,9 @@ TransactionIdIsActive(TransactionId xid)
* ignored.
*
* This is used by VACUUM to decide which deleted tuples must be preserved
- * in a table. allDbs = TRUE is needed for shared relations, but allDbs =
+ * in a table. allDbs = TRUE is needed for shared relations, but allDbs =
* FALSE is sufficient for non-shared relations, since only backends in my
- * own database could ever see the tuples in them. Also, we can ignore
+ * own database could ever see the tuples in them. Also, we can ignore
* concurrently running lazy VACUUMs because (a) they must be working on other
* tables, and (b) they don't need to do snapshot-based lookups.
*
@@ -766,7 +766,7 @@ GetSnapshotData(Snapshot snapshot)
/*
* If the transaction has been assigned an xid < xmax we add it to the
- * snapshot, and update xmin if necessary. There's no need to store
+ * snapshot, and update xmin if necessary. There's no need to store
* XIDs >= xmax, since we'll treat them as running anyway. We don't
* bother to examine their subxids either.
*
@@ -791,7 +791,7 @@ GetSnapshotData(Snapshot snapshot)
* while holding the ProcArrayLock.
*
* The other backend can add more subxids concurrently, but cannot
- * remove any. Hence it's important to fetch nxids just once. Should
+ * remove any. Hence it's important to fetch nxids just once. Should
* be safe to use memcpy, though. (We needn't worry about missing any
* xids added concurrently, because they must postdate xmax.)
*
@@ -990,7 +990,7 @@ BackendPidGetProc(int pid)
* Only main transaction Ids are considered. This function is mainly
* useful for determining what backend owns a lock.
*
- * Beware that not every xact has an XID assigned. However, as long as you
+ * Beware that not every xact has an XID assigned. However, as long as you
* only call this using an XID found on disk, you're safe.
*/
int
@@ -1050,7 +1050,7 @@ IsBackendPid(int pid)
* some snapshot we have. Since we examine the procarray with only shared
* lock, there are race conditions: a backend could set its xmin just after
* we look. Indeed, on multiprocessors with weak memory ordering, the
- * other backend could have set its xmin *before* we look. We know however
+ * other backend could have set its xmin *before* we look. We know however
* that such a backend must have held shared ProcArrayLock overlapping our
* own hold of ProcArrayLock, else we would see its xmin update. Therefore,
* any snapshot the other backend is taking concurrently with our scan cannot
@@ -1319,7 +1319,7 @@ CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared)
* XidCacheRemoveRunningXids
*
* Remove a bunch of TransactionIds from the list of known-running
- * subtransactions for my backend. Both the specified xid and those in
+ * subtransactions for my backend. Both the specified xid and those in
* the xids[] array (of length nxids) are removed from the subxids cache.
* latestXid must be the latest XID among the group.
*/
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 0e76409940..0bba5d00c8 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -26,7 +26,7 @@
* for a module and should never be allocated after the shared memory
* initialization phase. Hash tables have a fixed maximum size, but
* their actual size can vary dynamically. When entries are added
- * to the table, more space is allocated. Queues link data structures
+ * to the table, more space is allocated. Queues link data structures
* that have been allocated either within fixed-size structures or as hash
* buckets. Each shared data structure has a string name to identify
* it (assigned in the module that declares it).
@@ -40,7 +40,7 @@
* The shmem index has two purposes: first, it gives us
* a simple model of how the world looks when a backend process
* initializes. If something is present in the shmem index,
- * it is initialized. If it is not, it is uninitialized. Second,
+ * it is initialized. If it is not, it is uninitialized. Second,
* the shmem index allows us to allocate shared memory on demand
* instead of trying to preallocate structures and hard-wire the
* sizes and locations in header files. If you are using a lot
@@ -55,8 +55,8 @@
* pointers using the method described in (b) above.
*
* (d) memory allocation model: shared memory can never be
- * freed, once allocated. Each hash table has its own free list,
- * so hash buckets can be reused when an item is deleted. However,
+ * freed, once allocated. Each hash table has its own free list,
+ * so hash buckets can be reused when an item is deleted. However,
* if one hash table grows very large and then shrinks, its space
* cannot be redistributed to other tables. We could build a simple
* hash bucket garbage collector if need be. Right now, it seems
@@ -116,7 +116,7 @@ InitShmemAllocation(void)
Assert(shmhdr != NULL);
/*
- * Initialize the spinlock used by ShmemAlloc. We have to do the space
+ * Initialize the spinlock used by ShmemAlloc. We have to do the space
* allocation the hard way, since obviously ShmemAlloc can't be called
* yet.
*/
@@ -215,7 +215,7 @@ InitShmemIndex(void)
/*
* Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex
* hashtable to exist already, we have a bit of a circularity problem in
- * initializing the ShmemIndex itself. The special "ShmemIndex" hash
+ * initializing the ShmemIndex itself. The special "ShmemIndex" hash
* table name will tell ShmemInitStruct to fake it.
*/
@@ -299,7 +299,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
* memory.
*
* This is called during initialization to find or allocate
- * a data structure in shared memory. If no other process
+ * a data structure in shared memory. If no other process
* has created the structure, this routine allocates space
* for it. If it exists already, a pointer to the existing
* table is returned.
@@ -335,7 +335,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
* be trying to init the shmem index itself.
*
* Notice that the ShmemIndexLock is released before the shmem
- * index has been initialized. This should be OK because no other
+ * index has been initialized. This should be OK because no other
* process can be accessing shared memory yet.
*/
Assert(shmemseghdr->index == NULL);
diff --git a/src/backend/storage/ipc/shmqueue.c b/src/backend/storage/ipc/shmqueue.c
index 3bdf474453..57f0b31dc8 100644
--- a/src/backend/storage/ipc/shmqueue.c
+++ b/src/backend/storage/ipc/shmqueue.c
@@ -14,7 +14,7 @@
*
* Package for managing doubly-linked lists in shared memory.
* The only tricky thing is that SHM_QUEUE will usually be a field
- * in a larger record. SHMQueueNext has to return a pointer
+ * in a larger record. SHMQueueNext has to return a pointer
* to the record itself instead of a pointer to the SHMQueue field
* of the record. It takes an extra parameter and does some extra
* pointer arithmetic to do this correctly.
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index 4b3d42348f..d2d8f6884e 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -26,7 +26,7 @@
* Because backends sitting idle will not be reading sinval events, we
* need a way to give an idle backend a swift kick in the rear and make
* it catch up before the sinval queue overflows and forces it to go
- * through a cache reset exercise. This is done by sending SIGUSR1
+ * through a cache reset exercise. This is done by sending SIGUSR1
* to any backend that gets too far behind.
*
* State for catchup events consists of two flags: one saying whether
@@ -65,7 +65,7 @@ SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
* NOTE: it is entirely possible for this routine to be invoked recursively
* as a consequence of processing inside the invalFunction or resetFunction.
* Furthermore, such a recursive call must guarantee that all outstanding
- * inval messages have been processed before it exits. This is the reason
+ * inval messages have been processed before it exits. This is the reason
* for the strange-looking choice to use a statically allocated buffer array
* and counters; it's so that a recursive call can process messages already
* sucked out of sinvaladt.c.
@@ -131,7 +131,7 @@ ReceiveSharedInvalidMessages(
* We are now caught up. If we received a catchup signal, reset that
* flag, and call SICleanupQueue(). This is not so much because we need
* to flush dead messages right now, as that we want to pass on the
- * catchup signal to the next slowest backend. "Daisy chaining" the
+ * catchup signal to the next slowest backend. "Daisy chaining" the
* catchup signal this way avoids creating spikes in system load for what
* should be just a background maintenance activity.
*/
@@ -151,7 +151,7 @@ ReceiveSharedInvalidMessages(
*
* If we are idle (catchupInterruptEnabled is set), we can safely
* invoke ProcessCatchupEvent directly. Otherwise, just set a flag
- * to do it later. (Note that it's quite possible for normal processing
+ * to do it later. (Note that it's quite possible for normal processing
* of the current transaction to cause ReceiveSharedInvalidMessages()
* to be run later on; in that case the flag will get cleared again,
* since there's no longer any reason to do anything.)
@@ -162,7 +162,7 @@ CatchupInterruptHandler(SIGNAL_ARGS)
int save_errno = errno;
/*
- * Note: this is a SIGNAL HANDLER. You must be very wary what you do
+ * Note: this is a SIGNAL HANDLER. You must be very wary what you do
* here.
*/
@@ -231,7 +231,7 @@ CatchupInterruptHandler(SIGNAL_ARGS)
* EnableCatchupInterrupt
*
* This is called by the PostgresMain main loop just before waiting
- * for a frontend command. We process any pending catchup events,
+ * for a frontend command. We process any pending catchup events,
* and enable the signal handler to process future events directly.
*
* NOTE: the signal handler starts out disabled, and stays so until
@@ -276,7 +276,7 @@ EnableCatchupInterrupt(void)
* DisableCatchupInterrupt
*
* This is called by the PostgresMain main loop just after receiving
- * a frontend command. Signal handler execution of catchup events
+ * a frontend command. Signal handler execution of catchup events
* is disabled until the next EnableCatchupInterrupt call.
*
* The SIGUSR2 signal handler also needs to call this, so as to
diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c
index fcd1e42a7f..80102658d0 100644
--- a/src/backend/storage/ipc/sinvaladt.c
+++ b/src/backend/storage/ipc/sinvaladt.c
@@ -44,7 +44,7 @@
* In reality, the messages are stored in a circular buffer of MAXNUMMESSAGES
* entries. We translate MsgNum values into circular-buffer indexes by
* computing MsgNum % MAXNUMMESSAGES (this should be fast as long as
- * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
+ * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
* doesn't exceed minMsgNum by more than MAXNUMMESSAGES, we have enough space
* in the buffer. If the buffer does overflow, we recover by setting the
* "reset" flag for each backend that has fallen too far behind. A backend
@@ -57,7 +57,7 @@
* normal behavior is that at most one such interrupt is in flight at a time;
* when a backend completes processing a catchup interrupt, it executes
* SICleanupQueue, which will signal the next-furthest-behind backend if
- * needed. This avoids undue contention from multiple backends all trying
+ * needed. This avoids undue contention from multiple backends all trying
* to catch up at once. However, the furthest-back backend might be stuck
* in a state where it can't catch up. Eventually it will get reset, so it
* won't cause any more problems for anyone but itself. But we don't want
@@ -88,7 +88,7 @@
* the writer wants to change maxMsgNum while readers need to read it.
* We deal with that by having a spinlock that readers must take for just
* long enough to read maxMsgNum, while writers take it for just long enough
- * to write maxMsgNum. (The exact rule is that you need the spinlock to
+ * to write maxMsgNum. (The exact rule is that you need the spinlock to
* read maxMsgNum if you are not holding SInvalWriteLock, and you need the
* spinlock to write maxMsgNum unless you are holding both locks.)
*
@@ -394,7 +394,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
SISeg *segP = shmInvalBuffer;
/*
- * N can be arbitrarily large. We divide the work into groups of no more
+ * N can be arbitrarily large. We divide the work into groups of no more
* than WRITE_QUANTUM messages, to be sure that we don't hold the lock for
* an unreasonably long time. (This is not so much because we care about
* letting in other writers, as that some just-caught-up backend might be
@@ -416,7 +416,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* If the buffer is full, we *must* acquire some space. Clean the
* queue and reset anyone who is preventing space from being freed.
* Otherwise, clean the queue only when it's exceeded the next
- * fullness threshold. We have to loop and recheck the buffer state
+ * fullness threshold. We have to loop and recheck the buffer state
* after any call of SICleanupQueue.
*/
for (;;)
@@ -470,11 +470,11 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* executing on behalf of other backends, since each instance will modify only
* fields of its own backend's ProcState, and no instance will look at fields
* of other backends' ProcStates. We express this by grabbing SInvalReadLock
- * in shared mode. Note that this is not exactly the normal (read-only)
+ * in shared mode. Note that this is not exactly the normal (read-only)
* interpretation of a shared lock! Look closely at the interactions before
* allowing SInvalReadLock to be grabbed in shared mode for any other reason!
*
- * NB: this can also run in parallel with SIInsertDataEntries. It is not
+ * NB: this can also run in parallel with SIInsertDataEntries. It is not
* guaranteed that we will return any messages added after the routine is
* entered.
*
@@ -557,7 +557,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
*
* Caution: because we transiently release write lock when we have to signal
* some other backend, it is NOT guaranteed that there are still minFree
- * free message slots at exit. Caller must recheck and perhaps retry.
+ * free message slots at exit. Caller must recheck and perhaps retry.
*/
void
SICleanupQueue(bool callerHasWriteLock, int minFree)
diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c
index aeda7a2c76..afce9d79a5 100644
--- a/src/backend/storage/large_object/inv_api.c
+++ b/src/backend/storage/large_object/inv_api.c
@@ -776,7 +776,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
/*
* If we found the page of the truncation point we need to truncate the
- * data in it. Otherwise if we're in a hole, we need to create a page to
+ * data in it. Otherwise if we're in a hole, we need to create a page to
* mark the end of data.
*/
if (olddata != NULL && olddata->pageno == pageno)
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index 3e2051dbda..9041e71046 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -51,7 +51,7 @@ typedef struct
} WAIT_ORDER;
/*
- * Information saved about each edge in a detected deadlock cycle. This
+ * Information saved about each edge in a detected deadlock cycle. This
* is used to print a diagnostic message upon failure.
*
* Note: because we want to examine this info after releasing the lock
@@ -119,7 +119,7 @@ static PGPROC *blocking_autovacuum_proc = NULL;
* InitDeadLockChecking -- initialize deadlock checker during backend startup
*
* This does per-backend initialization of the deadlock checker; primarily,
- * allocation of working memory for DeadLockCheck. We do this per-backend
+ * allocation of working memory for DeadLockCheck. We do this per-backend
* since there's no percentage in making the kernel do copy-on-write
* inheritance of workspace from the postmaster. We want to allocate the
* space at startup because (a) the deadlock checker might be invoked when
@@ -291,10 +291,10 @@ GetBlockingAutoVacuumPgproc(void)
* DeadLockCheckRecurse -- recursively search for valid orderings
*
* curConstraints[] holds the current set of constraints being considered
- * by an outer level of recursion. Add to this each possible solution
+ * by an outer level of recursion. Add to this each possible solution
* constraint for any cycle detected at this level.
*
- * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
+ * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
* state is attainable, in which case waitOrders[] shows the required
* rearrangements of lock wait queues (if any).
*/
@@ -429,7 +429,7 @@ TestConfiguration(PGPROC *startProc)
*
* Since we need to be able to check hypothetical configurations that would
* exist after wait queue rearrangement, the routine pays attention to the
- * table of hypothetical queue orders in waitOrders[]. These orders will
+ * table of hypothetical queue orders in waitOrders[]. These orders will
* be believed in preference to the actual ordering seen in the locktable.
*/
static bool
@@ -505,7 +505,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
/*
- * Scan for procs that already hold conflicting locks. These are "hard"
+ * Scan for procs that already hold conflicting locks. These are "hard"
* edges in the waits-for graph.
*/
procLocks = &(lock->procLocks);
@@ -703,7 +703,7 @@ ExpandConstraints(EDGE *constraints,
nWaitOrders = 0;
/*
- * Scan constraint list backwards. This is because the last-added
+ * Scan constraint list backwards. This is because the last-added
* constraint is the only one that could fail, and so we want to test it
* for inconsistency first.
*/
@@ -757,7 +757,7 @@ ExpandConstraints(EDGE *constraints,
* The initial queue ordering is taken directly from the lock's wait queue.
* The output is an array of PGPROC pointers, of length equal to the lock's
* wait queue length (the caller is responsible for providing this space).
- * The partial order is specified by an array of EDGE structs. Each EDGE
+ * The partial order is specified by an array of EDGE structs. Each EDGE
* is one that we need to reverse, therefore the "waiter" must appear before
* the "blocker" in the output array. The EDGE array may well contain
* edges associated with other locks; these should be ignored.
@@ -827,7 +827,7 @@ TopoSort(LOCK *lock,
afterConstraints[k] = i + 1;
}
/*--------------------
- * Now scan the topoProcs array backwards. At each step, output the
+ * Now scan the topoProcs array backwards. At each step, output the
* last proc that has no remaining before-constraints, and decrease
* the beforeConstraints count of each of the procs it was constrained
* against.
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index 5955bb38d5..01773862ef 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -65,7 +65,7 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid)
/*
* LockRelationOid
*
- * Lock a relation given only its OID. This should generally be used
+ * Lock a relation given only its OID. This should generally be used
* before attempting to open the relation's relcache entry.
*/
void
@@ -234,7 +234,7 @@ UnlockRelation(Relation relation, LOCKMODE lockmode)
/*
* LockRelationIdForSession
*
- * This routine grabs a session-level lock on the target relation. The
+ * This routine grabs a session-level lock on the target relation. The
* session lock persists across transaction boundaries. It will be removed
* when UnlockRelationIdForSession() is called, or if an ereport(ERROR) occurs,
* or if the backend exits.
@@ -437,7 +437,7 @@ XactLockTableInsert(TransactionId xid)
*
* Delete the lock showing that the given transaction ID is running.
* (This is never used for main transaction IDs; those locks are only
- * released implicitly at transaction end. But we do use it for subtrans IDs.)
+ * released implicitly at transaction end. But we do use it for subtrans IDs.)
*/
void
XactLockTableDelete(TransactionId xid)
@@ -458,7 +458,7 @@ XactLockTableDelete(TransactionId xid)
* subtransaction, we will exit as soon as it aborts or its top parent commits.
* It takes some extra work to ensure this, because to save on shared memory
* the XID lock of a subtransaction is released when it ends, whether
- * successfully or unsuccessfully. So we have to check if it's "still running"
+ * successfully or unsuccessfully. So we have to check if it's "still running"
* and if so wait for its parent.
*/
void
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 6a29210496..e5f11385a3 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -728,7 +728,7 @@ LockAcquire(const LOCKTAG *locktag,
/*
* If lock requested conflicts with locks requested by waiters, must join
- * wait queue. Otherwise, check for conflict with already-held locks.
+ * wait queue. Otherwise, check for conflict with already-held locks.
* (That's last because most complex check.)
*/
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
@@ -805,7 +805,7 @@ LockAcquire(const LOCKTAG *locktag,
/*
* NOTE: do not do any material change of state between here and
- * return. All required changes in locktable state must have been
+ * return. All required changes in locktable state must have been
* done when the lock was granted to us --- see notes in WaitOnLock.
*/
@@ -885,7 +885,7 @@ LockCheckConflicts(LockMethod lockMethodTable,
}
/*
- * Rats. Something conflicts. But it could still be my own lock. We have
+ * Rats. Something conflicts. But it could still be my own lock. We have
* to construct a conflict mask that does not reflect our own locks, but
* only lock types held by other processes.
*/
@@ -977,7 +977,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
/*
* We need only run ProcLockWakeup if the released lock conflicts with at
- * least one of the lock types requested by waiter(s). Otherwise whatever
+ * least one of the lock types requested by waiter(s). Otherwise whatever
* conflict made them wait must still exist. NOTE: before MVCC, we could
* skip wakeup if lock->granted[lockmode] was still positive. But that's
* not true anymore, because the remaining granted locks might belong to
@@ -997,7 +997,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
}
/*
- * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
+ * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
* proclock and lock objects if possible, and call ProcLockWakeup if there
* are remaining requests and the caller says it's OK. (Normally, this
* should be called after UnGrantLock, and wakeupNeeded is the result from
@@ -1356,7 +1356,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
}
/*
- * Decrease the total local count. If we're still holding the lock, we're
+ * Decrease the total local count. If we're still holding the lock, we're
* done.
*/
locallock->nLocks--;
@@ -2022,7 +2022,7 @@ PostPrepare_Locks(TransactionId xid)
/*
* We cannot simply modify proclock->tag.myProc to reassign
* ownership of the lock, because that's part of the hash key and
- * the proclock would then be in the wrong hash chain. So, unlink
+ * the proclock would then be in the wrong hash chain. So, unlink
* and delete the old proclock; create a new one with the right
* contents; and link it into place. We do it in this order to be
* certain we won't run out of shared memory (the way dynahash.c
@@ -2144,7 +2144,7 @@ GetLockStatusData(void)
* view of the state.
*
* Since this is a read-only operation, we take shared instead of
- * exclusive lock. There's not a whole lot of point to this, because all
+ * exclusive lock. There's not a whole lot of point to this, because all
* the normal operations require exclusive lock, but it doesn't hurt
* anything either. It will at least allow two backends to do
* GetLockStatusData in parallel.
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index b92ee50c0d..7ec3d245ff 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -6,7 +6,7 @@
* Lightweight locks are intended primarily to provide mutual exclusion of
* access to shared-memory data structures. Therefore, they offer both
* exclusive and shared lock modes (to support read/write and read-only
- * access to a shared object). There are few other frammishes. User-level
+ * access to a shared object). There are few other frammishes. User-level
* locking should be done with the full lock manager --- which depends on
* LWLocks to protect its shared state.
*
@@ -51,7 +51,7 @@ typedef struct LWLock
* (LWLockIds are indexes into the array.) We force the array stride to
* be a power of 2, which saves a few cycles in indexing, but more
* importantly also ensures that individual LWLocks don't cross cache line
- * boundaries. This reduces cache contention problems, especially on AMD
+ * boundaries. This reduces cache contention problems, especially on AMD
* Opterons. (Of course, we have to also ensure that the array start
* address is suitably aligned.)
*
@@ -192,7 +192,7 @@ NumLWLocks(void)
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
@@ -370,7 +370,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
* in the presence of contention. The efficiency of being able to do that
* outweighs the inefficiency of sometimes wasting a process dispatch
* cycle because the lock is not free when a released waiter finally gets
- * to run. See pgsql-hackers archives for 29-Dec-01.
+ * to run. See pgsql-hackers archives for 29-Dec-01.
*/
for (;;)
{
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 439bea29f1..2a2531c22e 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -311,7 +311,7 @@ InitProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -369,7 +369,7 @@ InitProcessPhase2(void)
*
* Auxiliary processes are presently not expected to wait for real (lockmgr)
* locks, so we need not set up the deadlock checker. They are never added
- * to the ProcArray or the sinval messaging mechanism, either. They also
+ * to the ProcArray or the sinval messaging mechanism, either. They also
* don't get a VXID assigned, since this is only useful when we actually
* hold lockmgr locks.
*/
@@ -449,7 +449,7 @@ InitAuxiliaryProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -534,7 +534,7 @@ LockWaitCancel(void)
/*
* We used to do PGSemaphoreReset() here to ensure that our proc's wait
- * semaphore is reset to zero. This prevented a leftover wakeup signal
+ * semaphore is reset to zero. This prevented a leftover wakeup signal
* from remaining in the semaphore if someone else had granted us the lock
* we wanted before we were able to remove ourselves from the wait-list.
* However, now that ProcSleep loops until waitStatus changes, a leftover
@@ -643,7 +643,7 @@ ProcKill(int code, Datum arg)
/*
* AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
- * processes (bgwriter, etc). The PGPROC and sema are not released, only
+ * processes (bgwriter, etc). The PGPROC and sema are not released, only
* marked as not-in-use.
*/
static void
@@ -767,7 +767,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
*
* Special case: if I find I should go in front of some waiter, check to
* see if I conflict with already-held locks or the requests before that
- * waiter. If not, then just grant myself the requested lock immediately.
+ * waiter. If not, then just grant myself the requested lock immediately.
* This is the same as the test for immediate grant in LockAcquire, except
* we are only considering the part of the wait queue before my insertion
* point.
@@ -786,7 +786,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
{
/*
- * Yes, so we have a deadlock. Easiest way to clean up
+ * Yes, so we have a deadlock. Easiest way to clean up
* correctly is to call RemoveFromWaitQueue(), but we
* can't do that until we are *on* the wait queue. So, set
* a flag to check below, and break out of loop. Also,
@@ -884,8 +884,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
- * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
- * implementation. While this is normally good, there are cases where a
+ * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
+ * implementation. While this is normally good, there are cases where a
* saved wakeup might be leftover from a previous operation (for example,
* we aborted ProcWaitForSignal just before someone did ProcSendSignal).
* So, loop to wait again if the waitStatus shows we haven't been granted
@@ -905,7 +905,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* waitStatus could change from STATUS_WAITING to something else
- * asynchronously. Read it just once per loop to prevent surprising
+ * asynchronously. Read it just once per loop to prevent surprising
* behavior (such as missing log messages).
*/
myWaitStatus = MyProc->waitStatus;
@@ -1299,10 +1299,10 @@ check_done:
* This can share the semaphore normally used for waiting for locks,
* since a backend could never be waiting for a lock and a signal at
* the same time. As with locks, it's OK if the signal arrives just
- * before we actually reach the waiting state. Also as with locks,
+ * before we actually reach the waiting state. Also as with locks,
* it's necessary that the caller be robust against bogus wakeups:
* always check that the desired state has occurred, and wait again
- * if not. This copes with possible "leftover" wakeups.
+ * if not. This copes with possible "leftover" wakeups.
*/
void
ProcWaitForSignal(void)
@@ -1373,7 +1373,7 @@ PublishStartupProcessInformation(void)
/*
* Enable the SIGALRM interrupt to fire after the specified delay
*
- * Delay is given in milliseconds. Caller should be sure a SIGALRM
+ * Delay is given in milliseconds. Caller should be sure a SIGALRM
* signal handler is installed before this is called.
*
* This code properly handles nesting of deadlock timeout alarms within
@@ -1424,7 +1424,7 @@ enable_sig_alarm(int delayms, bool is_statement_timeout)
* NOTE: in this case it is possible that this routine will be
* interrupted by the previously-set timer alarm. This is okay
* because the signal handler will do only what it should do according
- * to the state variables. The deadlock checker may get run earlier
+ * to the state variables. The deadlock checker may get run earlier
* than normal, but that does no harm.
*/
timeout_start_time = GetCurrentTimestamp();
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index cecb6d4fb0..988d626156 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -77,7 +77,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
*
* We time out and declare error after NUM_DELAYS delays (thus, exactly
* that many tries). With the given settings, this will usually take 2 or
- * so minutes. It seems better to fix the total number of tries (and thus
+ * so minutes. It seems better to fix the total number of tries (and thus
* the probability of unintended failure) than to fix the total time
* spent.
*
@@ -140,7 +140,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
* Note: spins_per_delay is local within our current process. We want to
* average these observations across multiple backends, since it's
* relatively rare for this function to even get entered, and so a single
- * backend might not live long enough to converge on a good value. That
+ * backend might not live long enough to converge on a good value. That
* is handled by the two routines below.
*/
if (cur_delay == 0)
@@ -179,7 +179,7 @@ update_spins_per_delay(int shared_spins_per_delay)
/*
* We use an exponential moving average with a relatively slow adaption
* rate, so that noise in any one backend's result won't affect the shared
- * value too much. As long as both inputs are within the allowed range,
+ * value too much. As long as both inputs are within the allowed range,
* the result must be too, so we need not worry about clamping the result.
*
* We deliberately truncate rather than rounding; this is so that single
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index d374fe27e2..894dc9cbbf 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -5,7 +5,7 @@
*
*
* For machines that have test-and-set (TAS) instructions, s_lock.h/.c
- * define the spinlock implementation. This file contains only a stub
+ * define the spinlock implementation. This file contains only a stub
* implementation for spinlocks using PGSemaphores. Unless semaphores
* are implemented in a way that doesn't involve a kernel call, this
* is too slow to be very useful :-(
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 70784cf35a..a6d44c4679 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -54,7 +54,7 @@ PageInit(Page page, Size pageSize, Size specialSize)
* PageHeaderIsValid
* Check that the header fields of a page appear valid.
*
- * This is called when a page has just been read in from disk. The idea is
+ * This is called when a page has just been read in from disk. The idea is
* to cheaply detect trashed pages before we go nuts following bogus item
* pointers, testing invalid transaction identifiers, etc.
*
@@ -99,7 +99,7 @@ PageHeaderIsValid(PageHeader page)
/*
* PageAddItem
*
- * Add an item to a page. Return value is offset at which it was
+ * Add an item to a page. Return value is offset at which it was
* inserted, or InvalidOffsetNumber if there's not room to insert.
*
* If overwrite is true, we just store the item at the specified
@@ -699,7 +699,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
* PageIndexMultiDelete
*
* This routine handles the case of deleting multiple tuples from an
- * index page at once. It is considerably faster than a loop around
+ * index page at once. It is considerably faster than a loop around
* PageIndexTupleDelete ... however, the caller *must* supply the array
* of item numbers to be deleted in item number order!
*/
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 5a7dcda151..5c437d0195 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -76,7 +76,7 @@
* not needed because of an mdtruncate() operation. The reason for leaving
* them present at size zero, rather than unlinking them, is that other
* backends and/or the bgwriter might be holding open file references to
- * such segments. If the relation expands again after mdtruncate(), such
+ * such segments. If the relation expands again after mdtruncate(), such
* that a deactivated segment becomes active again, it is important that
* such file references still be valid --- else data might get written
* out to an unlinked old copy of a segment file that will eventually
@@ -113,7 +113,7 @@ static MemoryContext MdCxt; /* context for all md.c allocations */
* we keep track of pending fsync operations: we need to remember all relation
* segments that have been written since the last checkpoint, so that we can
* fsync them down to disk before completing the next checkpoint. This hash
- * table remembers the pending operations. We use a hash table mostly as
+ * table remembers the pending operations. We use a hash table mostly as
* a convenient way of eliminating duplicate requests.
*
* We use a similar mechanism to remember no-longer-needed files that can
@@ -269,7 +269,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* During bootstrap, there are cases where a system relation will be
* accessed (by internal backend processes) before the bootstrap
* script nominally creates it. Therefore, allow the file to exist
- * already, even if isRedo is not set. (See also mdopen)
+ * already, even if isRedo is not set. (See also mdopen)
*/
if (isRedo || IsBootstrapProcessingMode())
fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
@@ -311,7 +311,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* if the contents of the file were repopulated by subsequent WAL entries.
* But if we didn't WAL-log insertions, but instead relied on fsyncing the
* file after populating it (as for instance CLUSTER and CREATE INDEX do),
- * the contents of the file would be lost forever. By leaving the empty file
+ * the contents of the file would be lost forever. By leaving the empty file
* until after the next checkpoint, we prevent reassignment of the relfilenode
* number until it's safe, because relfilenode assignment skips over any
* existing file.
@@ -451,7 +451,7 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
/*
* Note: because caller usually obtained blocknum by calling mdnblocks,
* which did a seek(SEEK_END), this seek is often redundant and will be
- * optimized away by fd.c. It's not redundant, however, if there is a
+ * optimized away by fd.c. It's not redundant, however, if there is a
* partial page at the end of the file. In that case we want to try to
* overwrite the partial page with a full page. It's also not redundant
* if bufmgr.c had to dump another buffer of the same file to make room
@@ -748,9 +748,9 @@ mdnblocks(SMgrRelation reln, ForkNumber forknum)
* exactly RELSEG_SIZE long, and it's useless to recheck that each time.
*
* NOTE: this assumption could only be wrong if another backend has
- * truncated the relation. We rely on higher code levels to handle that
+ * truncated the relation. We rely on higher code levels to handle that
* scenario by closing and re-opening the md fd, which is handled via
- * relcache flush. (Since the bgwriter doesn't participate in relcache
+ * relcache flush. (Since the bgwriter doesn't participate in relcache
* flush, it could have segment chain entries for inactive segments;
* that's OK because the bgwriter never needs to compute relation size.)
*/
@@ -938,7 +938,7 @@ mdsync(void)
/*
* If we are in the bgwriter, the sync had better include all fsync
- * requests that were queued by backends up to this point. The tightest
+ * requests that were queued by backends up to this point. The tightest
* race condition that could occur is that a buffer that must be written
* and fsync'd for the checkpoint could have been dumped by a backend just
* before it was visited by BufferSync(). We know the backend will have
@@ -1030,7 +1030,7 @@ mdsync(void)
* have been deleted (unlinked) by the time we get to them. Rather
* than just hoping an ENOENT (or EACCES on Windows) error can be
* ignored, what we do on error is absorb pending requests and
- * then retry. Since mdunlink() queues a "revoke" message before
+ * then retry. Since mdunlink() queues a "revoke" message before
* actually unlinking, the fsync request is guaranteed to be
* marked canceled after the absorb if it really was this case.
* DROP DATABASE likewise has to tell us to forget fsync requests
@@ -1387,7 +1387,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/*
* NB: it's intentional that we don't change cycle_ctr if the entry
- * already exists. The fsync request must be treated as old, even
+ * already exists. The fsync request must be treated as old, even
* though the new request will be satisfied too by any subsequent
* fsync.
*
@@ -1395,7 +1395,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
* act just as though it wasn't there. The only case where this could
* happen would be if a file had been deleted, we received but did not
* yet act on the cancel request, and the same relfilenode was then
- * assigned to a new file. We mustn't lose the new request, but it
+ * assigned to a new file. We mustn't lose the new request, but it
* should be considered new not old.
*/
}
@@ -1544,7 +1544,7 @@ _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
{
/*
* Normally we will create new segments only if authorized by the
- * caller (i.e., we are doing mdextend()). But when doing WAL
+ * caller (i.e., we are doing mdextend()). But when doing WAL
* recovery, create segments anyway; this allows cases such as
* replaying WAL data that has a write into a high-numbered
* segment of a relation that was later deleted. We want to go
diff --git a/src/backend/tcop/fastpath.c b/src/backend/tcop/fastpath.c
index 96519f638d..0a8c2fa732 100644
--- a/src/backend/tcop/fastpath.c
+++ b/src/backend/tcop/fastpath.c
@@ -43,8 +43,8 @@
* each fastpath call as a separate transaction command, and so the
* cached data could never actually have been reused. If it had worked
* as intended, it would have had problems anyway with dangling references
- * in the FmgrInfo struct. So, forget about caching and just repeat the
- * syscache fetches on each usage. They're not *that* expensive.
+ * in the FmgrInfo struct. So, forget about caching and just repeat the
+ * syscache fetches on each usage. They're not *that* expensive.
*/
struct fp_info
{
@@ -204,7 +204,7 @@ fetch_fp_info(Oid func_id, struct fp_info * fip)
/*
* Since the validity of this structure is determined by whether the
- * funcid is OK, we clear the funcid here. It must not be set to the
+ * funcid is OK, we clear the funcid here. It must not be set to the
* correct value until we are about to return with a good struct fp_info,
* since we can be interrupted (i.e., with an ereport(ERROR, ...)) at any
* time. [No longer really an issue since we don't save the struct
@@ -258,7 +258,7 @@ fetch_fp_info(Oid func_id, struct fp_info * fip)
* RETURNS:
* 0 if successful completion, EOF if frontend connection lost.
*
- * Note: All ordinary errors result in ereport(ERROR,...). However,
+ * Note: All ordinary errors result in ereport(ERROR,...). However,
* if we lose the frontend connection there is no one to ereport to,
* and no use in proceeding...
*
@@ -518,7 +518,7 @@ parse_fcall_arguments(StringInfo msgBuf, struct fp_info * fip,
/*
* Since stringinfo.c keeps a trailing null in place even for
- * binary data, the contents of abuf are a valid C string. We
+ * binary data, the contents of abuf are a valid C string. We
* have to do encoding conversion before calling the typinput
* routine, though.
*/
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 4ffd582b7a..c50aa2f335 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -410,7 +410,7 @@ SocketBackend(StringInfo inBuf)
default:
/*
- * Otherwise we got garbage from the frontend. We treat this as
+ * Otherwise we got garbage from the frontend. We treat this as
* fatal because we have probably lost message boundary sync, and
* there's no good way to recover.
*/
@@ -806,7 +806,7 @@ exec_simple_query(const char *query_string)
ResetUsage();
/*
- * Start up a transaction command. All queries generated by the
+ * Start up a transaction command. All queries generated by the
* query_string will be in this same command block, *unless* we find a
* BEGIN/COMMIT/ABORT statement; we have to force a new xact command after
* one of those, else bad things will happen in xact.c. (Note that this
@@ -815,7 +815,7 @@ exec_simple_query(const char *query_string)
start_xact_command();
/*
- * Zap any pre-existing unnamed statement. (While not strictly necessary,
+ * Zap any pre-existing unnamed statement. (While not strictly necessary,
* it seems best to define simple-Query mode as if it used the unnamed
* statement and portal; this ensures we recover any storage used by prior
* unnamed operations.)
@@ -874,7 +874,7 @@ exec_simple_query(const char *query_string)
/*
* Get the command name for use in status display (it also becomes the
- * default completion tag, down inside PortalRun). Set ps_status and
+ * default completion tag, down inside PortalRun). Set ps_status and
* do any special start-of-SQL-command processing needed by the
* destination.
*/
@@ -961,7 +961,7 @@ exec_simple_query(const char *query_string)
/*
* Select the appropriate output format: text unless we are doing a
- * FETCH from a binary cursor. (Pretty grotty to have to do this here
+ * FETCH from a binary cursor. (Pretty grotty to have to do this here
* --- but it avoids grottiness in other places. Ah, the joys of
* backward compatibility...)
*/
@@ -1274,7 +1274,7 @@ exec_parse_message(const char *query_string, /* string to execute */
}
else
{
- /* Empty input string. This is legal. */
+ /* Empty input string. This is legal. */
raw_parse_tree = NULL;
commandTag = NULL;
stmt_list = NIL;
@@ -1333,7 +1333,7 @@ exec_parse_message(const char *query_string, /* string to execute */
/*
* We do NOT close the open transaction command here; that only happens
- * when the client sends Sync. Instead, do CommandCounterIncrement just
+ * when the client sends Sync. Instead, do CommandCounterIncrement just
* in case something happened during parse/plan.
*/
CommandCounterIncrement();
@@ -1476,7 +1476,7 @@ exec_bind_message(StringInfo input_message)
* If we are in aborted transaction state, the only portals we can
* actually run are those containing COMMIT or ROLLBACK commands. We
* disallow binding anything else to avoid problems with infrastructure
- * that expects to run inside a valid transaction. We also disallow
+ * that expects to run inside a valid transaction. We also disallow
* binding any parameters, since we can't risk calling user-defined I/O
* functions.
*/
@@ -1557,7 +1557,7 @@ exec_bind_message(StringInfo input_message)
/*
* Rather than copying data around, we just set up a phony
* StringInfo pointing to the correct portion of the message
- * buffer. We assume we can scribble on the message buffer so
+ * buffer. We assume we can scribble on the message buffer so
* as to maintain the convention that StringInfos have a
* trailing null. This is grotty but is a big win when
* dealing with very large parameter strings.
@@ -1947,7 +1947,7 @@ exec_execute_message(const char *portal_name, long max_rows)
if (is_xact_command)
{
/*
- * If this was a transaction control statement, commit it. We
+ * If this was a transaction control statement, commit it. We
* will start a new xact command for the next command (if any).
*/
finish_xact_command();
@@ -2309,7 +2309,7 @@ exec_describe_portal_message(const char *portal_name)
/*
* If we are in aborted transaction state, we can't run
* SendRowDescriptionMessage(), because that needs catalog accesses.
- * Hence, refuse to Describe portals that return data. (We shouldn't just
+ * Hence, refuse to Describe portals that return data. (We shouldn't just
* refuse all Describes, since that might break the ability of some
* clients to issue COMMIT or ROLLBACK commands, if they use code that
* blindly Describes whatever it does.)
@@ -2520,7 +2520,7 @@ quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
@@ -3052,7 +3052,7 @@ PostgresMain(int argc, char *argv[], const char *username)
gucsource = PGC_S_ARGV; /* initial switches came from command line */
/*
- * Parse command-line options. CAUTION: keep this in sync with
+ * Parse command-line options. CAUTION: keep this in sync with
* postmaster/postmaster.c (the option sets should not conflict) and with
* the common help() function in main/main.c.
*/
@@ -3301,7 +3301,7 @@ PostgresMain(int argc, char *argv[], const char *username)
* we have set up the handler.
*
* Also note: it's best not to use any signals that are SIG_IGNored in the
- * postmaster. If such a signal arrives before we are able to change the
+ * postmaster. If such a signal arrives before we are able to change the
* handler to non-SIG_IGN, it'll get dropped. Instead, make a dummy
* handler in the postmaster to reserve the signal. (Of course, this isn't
* an issue for signals that are locally generated, such as SIGALRM and
@@ -3544,7 +3544,7 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* NOTE: if you are tempted to add more code in this if-block,
* consider the high probability that it should be in
- * AbortTransaction() instead. The only stuff done directly here
+ * AbortTransaction() instead. The only stuff done directly here
* should be stuff that is guaranteed to apply *only* for outer-level
* error recovery, such as adjusting the FE/BE protocol status.
*/
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 9933f7de07..7129e7f4f2 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -519,7 +519,7 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
false);
/*
- * We do *not* call AfterTriggerBeginQuery() here. We assume
+ * We do *not* call AfterTriggerBeginQuery() here. We assume
* that a SELECT cannot queue any triggers. It would be messy
* to support triggers since the execution of the portal may
* be interleaved with other queries.
@@ -564,7 +564,7 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
/*
* We don't start the executor until we are told to run the
- * portal. We do need to set up the result tupdesc.
+ * portal. We do need to set up the result tupdesc.
*/
{
PlannedStmt *pstmt;
@@ -917,7 +917,7 @@ PortalRunSelect(Portal portal,
Assert(queryDesc || portal->holdStore);
/*
- * Force the queryDesc destination to the right thing. This supports
+ * Force the queryDesc destination to the right thing. This supports
* MOVE, for example, which will pass in dest = DestNone. This is okay to
* change as long as we do it on every fetch. (The Executor must not
* assume that dest never changes.)
@@ -1163,7 +1163,7 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
elog(DEBUG3, "ProcessUtility");
/*
- * Set snapshot if utility stmt needs one. Most reliable way to do this
+ * Set snapshot if utility stmt needs one. Most reliable way to do this
* seems to be to enumerate those that do not need one; this is a short
* list. Transaction control, LOCK, and SET must *not* set a snapshot
* since they need to be executable at the start of a serializable
@@ -1203,7 +1203,7 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
/*
* Some utility commands may pop the ActiveSnapshot stack from under us,
- * so we only pop the stack if we actually see a snapshot set. Note that
+ * so we only pop the stack if we actually see a snapshot set. Note that
* the set of utility commands that do this must be the same set
* disallowed to run inside a transaction; otherwise, we could be popping
* a snapshot that belongs to some other operation.
@@ -1480,7 +1480,7 @@ DoPortalRunFetch(Portal portal,
* Definition: Rewind to start, advance count-1 rows, return
* next row (if any). In practice, if the goal is less than
* halfway back to the start, it's better to scan from where
- * we are. In any case, we arrange to fetch the target row
+ * we are. In any case, we arrange to fetch the target row
* going forwards.
*/
if (portal->posOverflow || portal->portalPos == LONG_MAX ||
@@ -1587,7 +1587,7 @@ DoPortalRunFetch(Portal portal,
* If we are sitting on a row, back up one so we can re-fetch it.
* If we are not sitting on a row, we still have to start up and
* shut down the executor so that the destination is initialized
- * and shut down correctly; so keep going. To PortalRunSelect,
+ * and shut down correctly; so keep going. To PortalRunSelect,
* count == 0 means we will retrieve no row.
*/
if (on_row)
diff --git a/src/backend/tsearch/ts_locale.c b/src/backend/tsearch/ts_locale.c
index cc1500dedf..0ef71bdca2 100644
--- a/src/backend/tsearch/ts_locale.c
+++ b/src/backend/tsearch/ts_locale.c
@@ -81,7 +81,7 @@ t_isprint(const char *ptr)
/*
- * Set up to read a file using tsearch_readline(). This facility is
+ * Set up to read a file using tsearch_readline(). This facility is
* better than just reading the file directly because it provides error
* context pointing to the specific line where a problem is detected.
*
@@ -159,7 +159,7 @@ tsearch_readline_callback(void *arg)
/*
* We can't include the text of the config line for errors that occur
- * during t_readline() itself. This is only partly a consequence of our
+ * during t_readline() itself. This is only partly a consequence of our
* arms-length use of that routine: the major cause of such errors is
* encoding violations, and we daren't try to print error messages
* containing badly-encoded data.
diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c
index 94127155a4..7515841c16 100644
--- a/src/backend/tsearch/ts_typanalyze.c
+++ b/src/backend/tsearch/ts_typanalyze.c
@@ -233,7 +233,7 @@ compute_tsvector_stats(VacAttrStats *stats,
/*
* We loop through the lexemes in the tsvector and add them to our
- * tracking hashtable. Note: the hashtable entries will point into
+ * tracking hashtable. Note: the hashtable entries will point into
* the (detoasted) tsvector value, therefore we cannot free that
* storage until we're done.
*/
@@ -364,7 +364,7 @@ compute_tsvector_stats(VacAttrStats *stats,
* they get sorted on frequencies. The rationale is that we
* usually search through most common elements looking for a
* specific value, so we can grab its frequency. When values are
- * presorted we can employ binary search for that. See
+ * presorted we can employ binary search for that. See
* ts_selfuncs.c for a real usage scenario.
*/
qsort(sort_table, num_mcelem, sizeof(TrackItem *),
diff --git a/src/backend/tsearch/ts_utils.c b/src/backend/tsearch/ts_utils.c
index 64bdf12d91..66d2edfdbd 100644
--- a/src/backend/tsearch/ts_utils.c
+++ b/src/backend/tsearch/ts_utils.c
@@ -25,8 +25,8 @@
/*
* Given the base name and extension of a tsearch config file, return
- * its full path name. The base name is assumed to be user-supplied,
- * and is checked to prevent pathname attacks. The extension is assumed
+ * its full path name. The base name is assumed to be user-supplied,
+ * and is checked to prevent pathname attacks. The extension is assumed
* to be safe.
*
* The result is a palloc'd string.
@@ -39,7 +39,7 @@ get_tsearch_config_filename(const char *basename,
char *result;
/*
- * We limit the basename to contain a-z, 0-9, and underscores. This may
+ * We limit the basename to contain a-z, 0-9, and underscores. This may
* be overly restrictive, but we don't want to allow access to anything
* outside the tsearch_data directory, so for instance '/' *must* be
* rejected, and on some platforms '\' and ':' are risky as well. Allowing
@@ -69,7 +69,7 @@ comparestr(const void *a, const void *b)
/*
* Reads a stop-word file. Each word is run through 'wordop'
- * function, if given. wordop may either modify the input in-place,
+ * function, if given. wordop may either modify the input in-place,
* or palloc a new version.
*/
void
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index 5b61c51055..61decd62ec 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -113,7 +113,7 @@ static void RoleMembershipCacheCallback(Datum arg, int cacheid, ItemPointer tupl
/*
* getid
* Consumes the first alphanumeric string (identifier) found in string
- * 's', ignoring any leading white space. If it finds a double quote
+ * 's', ignoring any leading white space. If it finds a double quote
* it returns the word inside the quotes.
*
* RETURNS:
@@ -219,7 +219,7 @@ putid(char *p, const char *s)
*
* RETURNS:
* the string position in 's' immediately following the ACL
- * specification. Also:
+ * specification. Also:
* - loads the structure pointed to by 'aip' with the appropriate
* UID/GID, id type identifier and mode type values.
*/
@@ -804,7 +804,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
}
/*
- * Remove abandoned privileges (cascading revoke). Currently we can only
+ * Remove abandoned privileges (cascading revoke). Currently we can only
* handle this when the grantee is not PUBLIC.
*/
if ((old_goptions & ~new_goptions) != 0)
@@ -870,7 +870,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
/*
* If the old ACL contained any references to the new owner, then we may
- * now have generated an ACL containing duplicate entries. Find them and
+ * now have generated an ACL containing duplicate entries. Find them and
* merge them so that there are not duplicates. (This is relatively
* expensive since we use a stupid O(N^2) algorithm, but it's unlikely to
* be the normal case.)
@@ -881,7 +881,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
* remove privilege-free entries, should there be any in the input.) dst
* is the next output slot, targ is the currently considered input slot
* (always >= dst), and src scans entries to the right of targ looking for
- * duplicates. Once an entry has been emitted to dst it is known
+ * duplicates. Once an entry has been emitted to dst it is known
* duplicate-free and need not be considered anymore.
*/
if (newpresent)
@@ -1928,7 +1928,7 @@ column_privilege_check(Oid tableoid, AttrNumber attnum,
* existence of the pg_class row before risking calling pg_class_aclcheck.
* Note: it might seem there's a race condition against concurrent DROP,
* but really it's safe because there will be no syscache flush between
- * here and there. So if we see the row in the syscache, so will
+ * here and there. So if we see the row in the syscache, so will
* pg_class_aclcheck.
*/
if (!SearchSysCacheExists(RELOID,
@@ -4340,14 +4340,14 @@ count_one_bits(AclMode mask)
* The grantor must always be either the object owner or some role that has
* been explicitly granted grant options. This ensures that all granted
* privileges appear to flow from the object owner, and there are never
- * multiple "original sources" of a privilege. Therefore, if the would-be
+ * multiple "original sources" of a privilege. Therefore, if the would-be
* grantor is a member of a role that has the needed grant options, we have
* to do the grant as that role instead.
*
* It is possible that the would-be grantor is a member of several roles
* that have different subsets of the desired grant options, but no one
* role has 'em all. In this case we pick a role with the largest number
- * of desired options. Ties are broken in favor of closer ancestors.
+ * of desired options. Ties are broken in favor of closer ancestors.
*
* roleId: the role attempting to do the GRANT/REVOKE
* privileges: the privileges to be granted/revoked
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index 62ff6f49f3..32aab08fc6 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -505,7 +505,7 @@ array_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
@@ -520,7 +520,7 @@ array_agg_finalfn(PG_FUNCTION_ARGS)
int lbs[1];
/*
- * Test for null before Asserting we are in right context. This is to
+ * Test for null before Asserting we are in right context. This is to
* avoid possible Assert failure in 8.4beta installations, where it is
* possible for users to create NULL constants of type internal.
*/
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 87e611573a..a3878447f1 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -637,7 +637,7 @@ ReadArrayStr(char *arrayStr,
/*
* We have to remove " and \ characters to create a clean item value to
- * pass to the datatype input routine. We overwrite each item value
+ * pass to the datatype input routine. We overwrite each item value
* in-place within arrayStr to do this. srcptr is the current scan point,
* and dstptr is where we are copying to.
*
@@ -837,7 +837,7 @@ ReadArrayStr(char *arrayStr,
* referenced by Datums after copying them.
*
* If the input data is of varlena type, the caller must have ensured that
- * the values are not toasted. (Doing it here doesn't work since the
+ * the values are not toasted. (Doing it here doesn't work since the
* caller has already allocated space for the array...)
*/
static void
@@ -1919,7 +1919,7 @@ array_get_slice(ArrayType *array,
memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int));
/*
- * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
+ * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
* copied the given lowerIndx values ... but that seems confusing.
*/
newlb = ARR_LBOUND(newarray);
@@ -2551,7 +2551,7 @@ array_set_slice(ArrayType *array,
/*
* array_map()
*
- * Map an array through an arbitrary function. Return a new array with
+ * Map an array through an arbitrary function. Return a new array with
* same dimensions and each source element transformed by fn(). Each
* source element is passed as the first argument to fn(); additional
* arguments to be passed to fn() can be specified by the caller.
@@ -2566,9 +2566,9 @@ array_set_slice(ArrayType *array,
* first argument position initially holds the input array value.
* * inpType: OID of element type of input array. This must be the same as,
* or binary-compatible with, the first argument type of fn().
- * * retType: OID of element type of output array. This must be the same as,
+ * * retType: OID of element type of output array. This must be the same as,
* or binary-compatible with, the result type of fn().
- * * amstate: workspace for array_map. Must be zeroed by caller before
+ * * amstate: workspace for array_map. Must be zeroed by caller before
* first call, and not touched after that.
*
* It is legitimate to pass a freshly-zeroed ArrayMapState on each call,
@@ -3379,7 +3379,7 @@ array_cmp(FunctionCallInfo fcinfo)
/*
* If arrays contain same data (up to end of shorter one), apply
- * additional rules to sort by dimensionality. The relative significance
+ * additional rules to sort by dimensionality. The relative significance
* of the different bits of information is historical; mainly we just care
* that we don't say "equal" for arrays of different dimensionality.
*/
@@ -3524,7 +3524,7 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, bool matchall,
/*
* We assume that the comparison operator is strict, so a NULL can't
- * match anything. XXX this diverges from the "NULL=NULL" behavior of
+ * match anything. XXX this diverges from the "NULL=NULL" behavior of
* array_eq, should we act like that?
*/
if (isnull1)
@@ -3830,7 +3830,7 @@ array_copy(char *destptr, int nitems,
*
* Note: this could certainly be optimized using standard bitblt methods.
* However, it's not clear that the typical Postgres array has enough elements
- * to make it worth worrying too much. For the moment, KISS.
+ * to make it worth worrying too much. For the moment, KISS.
*/
void
array_bitmap_copy(bits8 *destbitmap, int destoffset,
@@ -4027,7 +4027,7 @@ array_extract_slice(ArrayType *newarray,
* Insert a slice into an array.
*
* ndim/dim[]/lb[] are dimensions of the original array. A new array with
- * those same dimensions is to be constructed. destArray must already
+ * those same dimensions is to be constructed. destArray must already
* have been allocated and its header initialized.
*
* st[]/endp[] identify the slice to be replaced. Elements within the slice
@@ -4695,7 +4695,7 @@ array_unnest(PG_FUNCTION_ARGS)
* Get the array value and detoast if needed. We can't do this
* earlier because if we have to detoast, we want the detoasted copy
* to be in multi_call_memory_ctx, so it will go away when we're done
- * and not before. (If no detoast happens, we assume the originally
+ * and not before. (If no detoast happens, we assume the originally
* passed array will stick around till then.)
*/
arr = PG_GETARG_ARRAYTYPE_P(0);
diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c
index 2597b0f2a8..35fc3c3abc 100644
--- a/src/backend/utils/adt/arrayutils.c
+++ b/src/backend/utils/adt/arrayutils.c
@@ -193,7 +193,7 @@ mda_next_tuple(int n, int *curr, const int *span)
/*
* ArrayGetIntegerTypmods: verify that argument is a 1-D cstring array,
- * and get the contents converted to integers. Returns a palloc'd array
+ * and get the contents converted to integers. Returns a palloc'd array
* and places the length at *n.
*/
int32 *
diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c
index 97ba0e0313..0accde5639 100644
--- a/src/backend/utils/adt/char.c
+++ b/src/backend/utils/adt/char.c
@@ -59,7 +59,7 @@ charout(PG_FUNCTION_ARGS)
* charrecv - converts external binary format to char
*
* The external representation is one byte, with no character set
- * conversion. This is somewhat dubious, perhaps, but in many
+ * conversion. This is somewhat dubious, perhaps, but in many
* cases people use char for a 1-byte binary type.
*/
Datum
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index e2e690d25c..07b992aa19 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -1245,7 +1245,7 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -1594,7 +1594,7 @@ time_interval(PG_FUNCTION_ARGS)
* Convert interval to time data type.
*
* This is defined as producing the fractional-day portion of the interval.
- * Therefore, we can just ignore the months field. It is not real clear
+ * Therefore, we can just ignore the months field. It is not real clear
* what to do with negative intervals, but we choose to subtract the floor,
* so that, say, '-2 hours' becomes '22:00:00'.
*/
@@ -2584,7 +2584,7 @@ timetz_zone(PG_FUNCTION_ARGS)
pg_tz *tzp;
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 22878627cc..abe78e62a8 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -352,7 +352,7 @@ j2date(int jd, int *year, int *month, int *day)
* j2day - convert Julian date to day-of-week (0..6 == Sun..Sat)
*
* Note: various places use the locution j2day(date - 1) to produce a
- * result according to the convention 0..6 = Mon..Sun. This is a bit of
+ * result according to the convention 0..6 = Mon..Sun. This is a bit of
* a crock, but will work as long as the computation here is just a modulo.
*/
int
@@ -2469,7 +2469,7 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
/*
* Nothing so far; make a decision about what we think the input
- * is. There used to be lots of heuristics here, but the
+ * is. There used to be lots of heuristics here, but the
* consensus now is to be paranoid. It *must* be either
* YYYY-MM-DD (with a more-than-two-digit year field), or the
* field order defined by DateOrder.
@@ -2502,9 +2502,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
{
/*
* We are at the first numeric field of a date that included a
- * textual month name. We want to support the variants
+ * textual month name. We want to support the variants
* MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous
- * inputs. We will also accept MON-DD-YY or DD-MON-YY in
+ * inputs. We will also accept MON-DD-YY or DD-MON-YY in
* either DMY or MDY modes, as well as YY-MON-DD in YMD mode.
*/
if (flen >= 3 || DateOrder == DATEORDER_YMD)
@@ -3312,7 +3312,7 @@ DecodeISO8601Interval(char *str,
return dterr;
/*
- * Note: we could step off the end of the string here. Code below
+ * Note: we could step off the end of the string here. Code below
* *must* exit the loop if unit == '\0'.
*/
unit = *str++;
@@ -4091,7 +4091,7 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
/*
* We've been burnt by stupid errors in the ordering of the datetkn tables
- * once too often. Arrange to check them during postmaster start.
+ * once too often. Arrange to check them during postmaster start.
*/
static bool
CheckDateTokenTable(const char *tablename, const datetkn *base, int nel)
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index 9c2ab2e943..d93be9b271 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -181,7 +181,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
/*
* just compare the two datums. NOTE: just comparing "len" bytes will
* not do the work, because we do not know how these bytes are aligned
- * inside the "Datum". We assume instead that any given datatype is
+ * inside the "Datum". We assume instead that any given datatype is
* consistent about how it fills extraneous bits in the Datum.
*/
res = (value1 == value2);
diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c
index 0aaa9b8be1..95718845d2 100644
--- a/src/backend/utils/adt/domains.c
+++ b/src/backend/utils/adt/domains.c
@@ -12,11 +12,11 @@
* The overhead required for constraint checking can be high, since examining
* the catalogs to discover the constraints for a given domain is not cheap.
* We have three mechanisms for minimizing this cost:
- * 1. In a nest of domains, we flatten the checking of all the levels
+ * 1. In a nest of domains, we flatten the checking of all the levels
* into just one operation.
- * 2. We cache the list of constraint items in the FmgrInfo struct
+ * 2. We cache the list of constraint items in the FmgrInfo struct
* passed by the caller.
- * 3. If there are CHECK constraints, we cache a standalone ExprContext
+ * 3. If there are CHECK constraints, we cache a standalone ExprContext
* to evaluate them in.
*
*
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index 41e4236c1e..ab9f63828d 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -286,7 +286,7 @@ float4in(PG_FUNCTION_ARGS)
/*
* In some IRIX versions, strtod() recognizes only "inf", so if the input
- * is "infinity" we have to skip over "inity". Also, it may return
+ * is "infinity" we have to skip over "inity". Also, it may return
* positive infinity for "-inf".
*/
if (isinf(val))
@@ -507,7 +507,7 @@ float8in(PG_FUNCTION_ARGS)
/*
* In some IRIX versions, strtod() recognizes only "inf", so if the input
- * is "infinity" we have to skip over "inity". Also, it may return
+ * is "infinity" we have to skip over "inity". Also, it may return
* positive infinity for "-inf".
*/
if (isinf(val))
@@ -2053,7 +2053,7 @@ float8_stddev_samp(PG_FUNCTION_ARGS)
* in that order. Note that Y is the first argument to the aggregates!
*
* It might seem attractive to optimize this by having multiple accumulator
- * functions that only calculate the sums actually needed. But on most
+ * functions that only calculate the sums actually needed. But on most
* modern machines, a couple of extra floating-point multiplies will be
* insignificant compared to the other per-tuple overhead, so I've chosen
* to minimize code space instead.
diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c
index a0ea23307a..95f0657395 100644
--- a/src/backend/utils/adt/format_type.c
+++ b/src/backend/utils/adt/format_type.c
@@ -46,14 +46,14 @@ __attribute__((format(printf, 2, 3)));
* double quoted if it contains funny characters or matches a keyword.
*
* If typemod is NULL then we are formatting a type name in a context where
- * no typemod is available, eg a function argument or result type. This
+ * no typemod is available, eg a function argument or result type. This
* yields a slightly different result from specifying typemod = -1 in some
* cases. Given typemod = -1 we feel compelled to produce an output that
* the parser will interpret as having typemod -1, so that pg_dump will
- * produce CREATE TABLE commands that recreate the original state. But
+ * produce CREATE TABLE commands that recreate the original state. But
* given NULL typemod, we assume that the parser's interpretation of
* typemod doesn't matter, and so we are willing to output a slightly
- * "prettier" representation of the same type. For example, type = bpchar
+ * "prettier" representation of the same type. For example, type = bpchar
* and typemod = NULL gets you "character", whereas typemod = -1 gets you
* "bpchar" --- the former will be interpreted as character(1) by the
* parser, which does not yield typemod -1.
@@ -137,7 +137,7 @@ format_type_internal(Oid type_oid, int32 typemod,
/*
* Check if it's an array (and not a domain --- we don't want to show the
- * substructure of a domain type). Fixed-length array types such as
+ * substructure of a domain type). Fixed-length array types such as
* "name" shouldn't get deconstructed either. As of Postgres 8.1, rather
* than checking typlen we check the toast property, and don't deconstruct
* "plain storage" array types --- this is because we don't want to show
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 3b823fa78f..6c35dab4c3 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -3378,7 +3378,7 @@ do_to_timestamp(text *date_txt, text *fmt,
{
/*
* The month and day field have not been set, so we use the
- * day-of-year field to populate them. Depending on the date mode,
+ * day-of-year field to populate them. Depending on the date mode,
* this field may be interpreted as a Gregorian day-of-year, or an ISO
* week date day-of-year.
*/
diff --git a/src/backend/utils/adt/geo_selfuncs.c b/src/backend/utils/adt/geo_selfuncs.c
index d926076ccf..4387a13456 100644
--- a/src/backend/utils/adt/geo_selfuncs.c
+++ b/src/backend/utils/adt/geo_selfuncs.c
@@ -22,7 +22,7 @@
/*
- * Selectivity functions for geometric operators. These are bogus -- unless
+ * Selectivity functions for geometric operators. These are bogus -- unless
* we know the actual key distribution in the index, we can't make a good
* prediction of the selectivity of these operators.
*
@@ -34,7 +34,7 @@
* In general, GiST needs to search multiple subtrees in order to guarantee
* that all occurrences of the same key have been found. Because of this,
* the estimated cost for scanning the index ought to be higher than the
- * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
+ * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
* ought to be adjusted accordingly --- but until we can generate somewhat
* realistic numbers here, it hardly matters...
*/
diff --git a/src/backend/utils/adt/inet_net_ntop.c b/src/backend/utils/adt/inet_net_ntop.c
index 95b9cc9a24..b50df46dae 100644
--- a/src/backend/utils/adt/inet_net_ntop.c
+++ b/src/backend/utils/adt/inet_net_ntop.c
@@ -203,7 +203,7 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
else
{
- /* Copy src to private buffer. Zero host part. */
+ /* Copy src to private buffer. Zero host part. */
p = (bits + 7) / 8;
memcpy(inbuf, src, p);
memset(inbuf + p, 0, 16 - p);
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index 1a82321ae1..dae9c21fe8 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -632,7 +632,7 @@ int4pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -653,8 +653,8 @@ int4mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -674,7 +674,7 @@ int4mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will
* overflow and thus incorrectly match).
@@ -784,7 +784,7 @@ int2pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -805,8 +805,8 @@ int2mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -887,7 +887,7 @@ int24pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -908,8 +908,8 @@ int24mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -929,7 +929,7 @@ int24mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -975,7 +975,7 @@ int42pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -996,8 +996,8 @@ int42mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1017,7 +1017,7 @@ int42mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index c0b602cb5a..b2895ba751 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -73,7 +73,7 @@ scanint8(const char *str, bool errorOK, int64 *result)
ptr++;
/*
- * Do an explicit check for INT64_MIN. Ugly though this is, it's
+ * Do an explicit check for INT64_MIN. Ugly though this is, it's
* cleaner than trying to get the loop below to handle it portably.
*/
#ifndef INT64_IS_BUSTED
@@ -525,7 +525,7 @@ int8pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -546,8 +546,8 @@ int8mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -567,7 +567,7 @@ int8mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself
* will overflow and thus incorrectly match).
@@ -730,7 +730,7 @@ int8inc(PG_FUNCTION_ARGS)
/*
* These functions are exactly like int8inc but are used for aggregates that
- * count only non-null values. Since the functions are declared strict,
+ * count only non-null values. Since the functions are declared strict,
* the null checks happen before we ever get here, and all we need do is
* increment the state value. We could actually make these pg_proc entries
* point right at int8inc, but then the opr_sanity regression test would
@@ -784,7 +784,7 @@ int84pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -805,8 +805,8 @@ int84mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -826,7 +826,7 @@ int84mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -893,7 +893,7 @@ int48pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -914,8 +914,8 @@ int48mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -935,7 +935,7 @@ int48mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -981,7 +981,7 @@ int82pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1002,8 +1002,8 @@ int82mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1023,7 +1023,7 @@ int82mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -1090,7 +1090,7 @@ int28pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1111,8 +1111,8 @@ int28mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1132,7 +1132,7 @@ int28mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 3e0dbedc83..1b8f637a4e 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -70,12 +70,12 @@ wchareq(char *p1, char *p2)
/*
* Formerly we had a routine iwchareq() here that tried to do case-insensitive
- * comparison of multibyte characters. It did not work at all, however,
+ * comparison of multibyte characters. It did not work at all, however,
* because it relied on tolower() which has a single-byte API ... and
* towlower() wouldn't be much better since we have no suitably cheap way
* of getting a single character transformed to the system's wchar_t format.
* So now, we just downcase the strings using lower() and apply regular LIKE
- * comparison. This should be revisited when we install better locale support.
+ * comparison. This should be revisited when we install better locale support.
*/
#define NextByte(p, plen) ((p)++, (plen)--)
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index f78fc7363d..961cd8b16c 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -275,7 +275,7 @@ pg_sleep(PG_FUNCTION_ARGS)
* pg_usleep's upper bound on allowed delays.
*
* By computing the intended stop time initially, we avoid accumulation of
- * extra delay across multiple sleeps. This also ensures we won't delay
+ * extra delay across multiple sleeps. This also ensures we won't delay
* less than the specified time if pg_usleep is interrupted by other
* signals such as SIGHUP.
*/
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index d1c41e138f..5d23060765 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -198,7 +198,7 @@ tm2abstime(struct pg_tm * tm, int tz)
sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE;
/*
- * check for overflow. We need a little slop here because the H/M/S plus
+ * check for overflow. We need a little slop here because the H/M/S plus
* TZ offset could add up to more than 1 day.
*/
if ((day >= MAX_DAYNUM - 10 && sec < 0) ||
@@ -1151,7 +1151,7 @@ tintervalsame(PG_FUNCTION_ARGS)
* tinterval comparison routines
*
* Note: comparison is based on the lengths of the tintervals, not on
- * endpoint value. This is pretty bogus, but since it's only a legacy
+ * endpoint value. This is pretty bogus, but since it's only a legacy
* datatype I'm not going to propose changing it.
*/
static int
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index 81d05aa8ae..9d8431a99e 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -29,7 +29,7 @@ static int ip_addrsize(inet *inetptr);
static inet *internal_inetpl(inet *ip, int64 addend);
/*
- * Access macros. We use VARDATA_ANY so that we can process short-header
+ * Access macros. We use VARDATA_ANY so that we can process short-header
* varlena values without detoasting them. This requires a trick:
* VARDATA_ANY assumes the varlena header is already filled in, which is
* not the case when constructing a new value (until SET_INET_VARSIZE is
@@ -88,7 +88,7 @@ network_in(char *src, bool is_cidr)
dst = (inet *) palloc0(sizeof(inet));
/*
- * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
+ * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
* will have a : somewhere in them (several, in fact) so if there is one
* present, assume it's V6, otherwise assume it's V4.
*/
@@ -193,7 +193,7 @@ cidr_out(PG_FUNCTION_ARGS)
* family, bits, is_cidr, address length, address in network byte order.
*
* Presence of is_cidr is largely for historical reasons, though it might
- * allow some code-sharing on the client side. We send it correctly on
+ * allow some code-sharing on the client side. We send it correctly on
* output, but ignore the value on input.
*/
static inet *
@@ -1392,7 +1392,7 @@ inetmi(PG_FUNCTION_ARGS)
/*
* We form the difference using the traditional complement, increment,
* and add rule, with the increment part being handled by starting the
- * carry off at 1. If you don't think integer arithmetic is done in
+ * carry off at 1. If you don't think integer arithmetic is done in
* two's complement, too bad.
*/
int nb = ip_addrsize(ip);
@@ -1414,7 +1414,7 @@ inetmi(PG_FUNCTION_ARGS)
else
{
/*
- * Input wider than int64: check for overflow. All bytes to
+ * Input wider than int64: check for overflow. All bytes to
* the left of what will fit should be 0 or 0xFF, depending on
* sign of the now-complete result.
*/
@@ -1445,9 +1445,9 @@ inetmi(PG_FUNCTION_ARGS)
* XXX This should go away someday!
*
* This is a kluge needed because we don't yet support zones in stored inet
- * values. Since the result of getnameinfo() might include a zone spec,
+ * values. Since the result of getnameinfo() might include a zone spec,
* call this to remove it anywhere we want to feed getnameinfo's output to
- * network_in. Beats failing entirely.
+ * network_in. Beats failing entirely.
*
* An alternative approach would be to let network_in ignore %-parts for
* itself, but that would mean we'd silently drop zone specs in user input,
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index ba3721b12b..ce4059784f 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -49,7 +49,7 @@
* Numeric values are represented in a base-NBASE floating point format.
* Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed
* and wide enough to store a digit. We assume that NBASE*NBASE can fit in
- * an int. Although the purely calculational routines could handle any even
+ * an int. Although the purely calculational routines could handle any even
* NBASE that's less than sqrt(INT_MAX), in practice we are only interested
* in NBASE a power of ten, so that I/O conversions and decimal rounding
* are easy. Also, it's actually more efficient if NBASE is rather less than
@@ -90,19 +90,19 @@ typedef int16 NumericDigit;
/* ----------
- * NumericVar is the format we use for arithmetic. The digit-array part
+ * NumericVar is the format we use for arithmetic. The digit-array part
* is the same as the NumericData storage format, but the header is more
* complex.
*
* The value represented by a NumericVar is determined by the sign, weight,
* ndigits, and digits[] array.
* Note: the first digit of a NumericVar's value is assumed to be multiplied
- * by NBASE ** weight. Another way to say it is that there are weight+1
+ * by NBASE ** weight. Another way to say it is that there are weight+1
* digits before the decimal point. It is possible to have weight < 0.
*
* buf points at the physical start of the palloc'd digit buffer for the
- * NumericVar. digits points at the first digit in actual use (the one
- * with the specified weight). We normally leave an unused digit or two
+ * NumericVar. digits points at the first digit in actual use (the one
+ * with the specified weight). We normally leave an unused digit or two
* (preset to zeroes) between buf and digits, so that there is room to store
* a carry out of the top digit without reallocating space. We just need to
* decrement digits (and increment weight) to make room for the carry digit.
@@ -702,7 +702,7 @@ numeric_uminus(PG_FUNCTION_ARGS)
/*
* The packed format is known to be totally zero digit trimmed always. So
- * we can identify a ZERO by the fact that there are no digits at all. Do
+ * we can identify a ZERO by the fact that there are no digits at all. Do
* nothing to a zero.
*/
if (VARSIZE(num) != NUMERIC_HDRSZ)
@@ -1708,7 +1708,7 @@ numeric_sqrt(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -1761,7 +1761,7 @@ numeric_exp(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -2367,9 +2367,9 @@ numeric_avg_accum(PG_FUNCTION_ARGS)
/*
* Integer data types all use Numeric accumulators to share code and
- * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
+ * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
* is overkill for the N and sum(X) values, but definitely not overkill
- * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
+ * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
* for stddev/variance --- there are faster special-purpose accumulator
* routines for SUM and AVG of these datatypes.
*/
@@ -2632,7 +2632,7 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
* the initial condition of the transition data value needs to be NULL. This
* means we can't rely on ExecAgg to automatically insert the first non-null
* data value into the transition data: it doesn't know how to do the type
- * conversion. The upshot is that these routines have to be marked non-strict
+ * conversion. The upshot is that these routines have to be marked non-strict
* and handle substitution of the first non-null input themselves.
*/
@@ -3045,7 +3045,7 @@ set_var_from_str(const char *str, const char *cp, NumericVar *dest)
/*
* We first parse the string to extract decimal digits and determine the
- * correct decimal weight. Then convert to NBASE representation.
+ * correct decimal weight. Then convert to NBASE representation.
*/
switch (*cp)
{
@@ -3514,7 +3514,7 @@ apply_typmod(NumericVar *var, int32 typmod)
/*
* Convert numeric to int8, rounding if needed.
*
- * If overflow, return FALSE (no error is raised). Return TRUE if okay.
+ * If overflow, return FALSE (no error is raised). Return TRUE if okay.
*
* CAUTION: var's contents may be modified by rounding!
*/
@@ -3978,7 +3978,7 @@ sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
* mul_var() -
*
* Multiplication on variable level. Product of var1 * var2 is stored
- * in result. Result is rounded to no more than rscale fractional digits.
+ * in result. Result is rounded to no more than rscale fractional digits.
*/
static void
mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
@@ -4022,7 +4022,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Determine number of result digits to compute. If the exact result
* would have more than rscale fractional digits, truncate the computation
- * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
+ * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
* or both inputs have fewer digits than they really do.
*/
res_ndigits = var1ndigits + var2ndigits + 1;
@@ -4265,7 +4265,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
*
* We need the first divisor digit to be >= NBASE/2. If it isn't,
* make it so by scaling up both the divisor and dividend by the
- * factor "d". (The reason for allocating dividend[0] above is to
+ * factor "d". (The reason for allocating dividend[0] above is to
* leave room for possible carry here.)
*/
if (divisor[1] < HALF_NBASE)
@@ -4309,7 +4309,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* If next2digits are 0, then quotient digit must be 0 and there's
- * no need to adjust the working dividend. It's worth testing
+ * no need to adjust the working dividend. It's worth testing
* here to fall out ASAP when processing trailing zeroes in a
* dividend.
*/
@@ -4327,7 +4327,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Adjust quotient digit if it's too large. Knuth proves that
* after this step, the quotient digit will be either correct or
- * just one too large. (Note: it's OK to use dividend[j+2] here
+ * just one too large. (Note: it's OK to use dividend[j+2] here
* because we know the divisor length is at least 2.)
*/
while (divisor2 * qhat >
@@ -4502,7 +4502,7 @@ div_var_fast(NumericVar *var1, NumericVar *var2, NumericVar *result,
* dividend's digits (plus appended zeroes to reach the desired precision
* including guard digits). Each step of the main loop computes an
* (approximate) quotient digit and stores it into div[], removing one
- * position of dividend space. A final pass of carry propagation takes
+ * position of dividend space. A final pass of carry propagation takes
* care of any mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
@@ -5359,7 +5359,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale)
/*
* The general case repeatedly multiplies base according to the bit
- * pattern of exp. We do the multiplications with some extra precision.
+ * pattern of exp. We do the multiplications with some extra precision.
*/
neg = (exp < 0);
exp = Abs(exp);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 03b6fad3ef..148c23de1e 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -20,12 +20,12 @@
*
* The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are also
* settable at run-time. However, we don't actually set those locale
- * categories permanently. This would have bizarre effects like no
+ * categories permanently. This would have bizarre effects like no
* longer accepting standard floating-point literals in some locales.
* Instead, we only set the locales briefly when needed, cache the
* required information obtained from localeconv(), and set them back.
* The cached information is only used by the formatting functions
- * (to_char, etc.) and the money type. For the user, this should all be
+ * (to_char, etc.) and the money type. For the user, this should all be
* transparent.
*
* !!! NOW HEAR THIS !!!
@@ -39,7 +39,7 @@
* fail = true;
* setlocale(category, save);
* DOES NOT WORK RELIABLY: on some platforms the second setlocale() call
- * will change the memory save is pointing at. To do this sort of thing
+ * will change the memory save is pointing at. To do this sort of thing
* safely, you *must* pstrdup what setlocale returns the first time.
*----------
*/
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index 3978e9581c..476884cf35 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -578,7 +578,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
/*
* If we've emitted more than first_success_by bytes without finding
- * anything compressible at all, fail. This lets us fall out
+ * anything compressible at all, fail. This lets us fall out
* reasonably quickly when looking at incompressible input (such as
* pre-compressed data).
*/
@@ -602,7 +602,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
found_match = true;
}
@@ -616,7 +616,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
}
diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c
index e8cc146970..815c4acacc 100644
--- a/src/backend/utils/adt/pseudotypes.c
+++ b/src/backend/utils/adt/pseudotypes.c
@@ -6,7 +6,7 @@
* A pseudo-type isn't really a type and never has any operations, but
* we do need to supply input and output functions to satisfy the links
* in the pseudo-type's entry in pg_type. In most cases the functions
- * just throw an error if invoked. (XXX the error messages here cover
+ * just throw an error if invoked. (XXX the error messages here cover
* the most common case, but might be confusing in some contexts. Can
* we do better?)
*
@@ -138,7 +138,7 @@ anyarray_out(PG_FUNCTION_ARGS)
* anyarray_recv - binary input routine for pseudo-type ANYARRAY.
*
* XXX this could actually be made to work, since the incoming array
- * data will contain the element type OID. Need to think through
+ * data will contain the element type OID. Need to think through
* type-safety issues before allowing it, however.
*/
Datum
@@ -192,7 +192,7 @@ anyenum_out(PG_FUNCTION_ARGS)
* void_in - input routine for pseudo-type VOID.
*
* We allow this so that PL functions can return VOID without any special
- * hack in the PL handler. Whatever value the PL thinks it's returning
+ * hack in the PL handler. Whatever value the PL thinks it's returning
* will just be ignored.
*/
Datum
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index a5d7c83d84..9c51236900 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -143,7 +143,7 @@ RE_compile_and_cache(text *text_re, int cflags)
char errMsg[100];
/*
- * Look for a match among previously compiled REs. Since the data
+ * Look for a match among previously compiled REs. Since the data
* structure is self-organizing with most-used entries at the front, our
* search strategy can just be to scan from the front.
*/
@@ -191,7 +191,7 @@ RE_compile_and_cache(text *text_re, int cflags)
/*
* Here and in other places in this file, do CHECK_FOR_INTERRUPTS
- * before reporting a regex error. This is so that if the regex
+ * before reporting a regex error. This is so that if the regex
* library aborts and returns REG_CANCEL, we don't print an error
* message that implies the regex was invalid.
*/
@@ -296,7 +296,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len,
* dat_len --- the length of the data string
* nmatch, pmatch --- optional return area for match details
*
- * Data is given in the database encoding. We internally
+ * Data is given in the database encoding. We internally
* convert to array of pg_wchar which is what Spencer's regex package wants.
*/
static bool
@@ -698,7 +698,7 @@ similar_escape(PG_FUNCTION_ARGS)
* which is bizarre enough to require some explanation. "***:" is a
* director prefix to force the regex to be treated as an ARE regardless
* of the current regex_flavor setting. We need "^" and "$" to force
- * the pattern to match the entire input string as per SQL99 spec. The
+ * the pattern to match the entire input string as per SQL99 spec. The
* "(?:" and ")" are a non-capturing set of parens; we have to have
* parens in case the string contains "|", else the "^" and "$" will
* be bound into the first and last alternatives which is not what we
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index fd90a29022..e6cca1d960 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -81,7 +81,7 @@ regprocin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_proc for a unique match. This is needed for
+ * just search pg_proc for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -268,7 +268,7 @@ regprocedurein(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -431,7 +431,7 @@ regoperin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_operator for a unique match. This is needed for
+ * just search pg_operator for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -622,7 +622,7 @@ regoperatorin(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -863,7 +863,7 @@ regclassout(PG_FUNCTION_ARGS)
/*
* In bootstrap mode, skip the fancy namespace stuff and just return
- * the class name. (This path is only needed for debugging output
+ * the class name. (This path is only needed for debugging output
* anyway.)
*/
if (IsBootstrapProcessingMode())
@@ -1359,7 +1359,7 @@ stringToQualifiedNameList(const char *string)
/*
* Given a C string, parse it into a qualified function or operator name
- * followed by a parenthesized list of type names. Reduce the
+ * followed by a parenthesized list of type names. Reduce the
* type names to an array of OIDs (returned into *nargs and *argtypes;
* the argtypes array should be of size FUNC_MAX_ARGS). The function or
* operator name is returned to *names as a List of Strings.
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index ec0eeddc26..fada1f239e 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -2004,11 +2004,11 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
/*
* "MATCH <unspecified>" only changes columns corresponding to the
- * referenced columns that have changed in pk_rel. This means the
+ * referenced columns that have changed in pk_rel. This means the
* "SET attrn=NULL [, attrn=NULL]" string will be change as well.
* In this case, we need to build a temporary plan rather than use
* our cached plan, unless the update happens to change all
- * columns in the key. Fortunately, for the most common case of a
+ * columns in the key. Fortunately, for the most common case of a
* single-column foreign key, this will be true.
*
* In case you're wondering, the inequality check works because we
@@ -2732,7 +2732,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
* Temporarily increase work_mem so that the check query can be executed
* more efficiently. It seems okay to do this because the query is simple
* enough to not use a multiple of work_mem, and one typically would not
- * have many large foreign-key validations happening concurrently. So
+ * have many large foreign-key validations happening concurrently. So
* this seems to meet the criteria for being considered a "maintenance"
* operation, and accordingly we use maintenance_work_mem.
*
@@ -3434,7 +3434,7 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
errhint("This is most likely due to a rule having rewritten the query.")));
/*
- * Determine which relation to complain about. If tupdesc wasn't passed
+ * Determine which relation to complain about. If tupdesc wasn't passed
* by caller, assume the violator tuple came from there.
*/
onfk = (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK);
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index eafa9fd671..ea00be4017 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -271,7 +271,7 @@ record_in(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -629,7 +629,7 @@ record_recv(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -883,7 +883,7 @@ record_cmp(FunctionCallInfo fcinfo)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1109,7 +1109,7 @@ record_eq(PG_FUNCTION_ARGS)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index ea2bb65d3d..fa2ca63697 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -736,7 +736,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, bool showTblSpc,
context = deparse_context_for(get_relation_name(indrelid), indrelid);
/*
- * Start the index definition. Note that the index's name should never be
+ * Start the index definition. Note that the index's name should never be
* schema-qualified, but the indexed rel's name may be.
*/
initStringInfo(&buf);
@@ -1138,7 +1138,7 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
prettyFlags, 0);
/*
- * Now emit the constraint definition. There are cases where
+ * Now emit the constraint definition. There are cases where
* the constraint expression will be fully parenthesized and
* we don't need the outer parens ... but there are other
* cases where we do need 'em. Be conservative for now.
@@ -1914,7 +1914,7 @@ deparse_expression_pretty(Node *expr, List *dpcontext,
*
* Given the reference name (alias) and OID of a relation, build deparsing
* context for an expression referencing only that relation (as varno 1,
- * varlevelsup 0). This is sufficient for many uses of deparse_expression.
+ * varlevelsup 0). This is sufficient for many uses of deparse_expression.
* ----------
*/
List *
@@ -1953,7 +1953,7 @@ deparse_context_for(const char *aliasname, Oid relid)
* right child plans. A special case is that a nestloop inner indexscan
* might have OUTER Vars, but the outer side of the join is not a child
* plan node. To handle such cases the outer plan node must be passed
- * separately. (Pass NULL for outer_plan otherwise.)
+ * separately. (Pass NULL for outer_plan otherwise.)
*
* Note: plan and outer_plan really ought to be declared as "Plan *", but
* we use "Node *" to avoid having to include plannodes.h in builtins.h.
@@ -2272,8 +2272,8 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace,
/*
* Before we begin to examine the query, acquire locks on referenced
- * relations, and fix up deleted columns in JOIN RTEs. This ensures
- * consistent results. Note we assume it's OK to scribble on the passed
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
* querytree!
*
* We are only deparsing the query (we are not about to execute it), so we
@@ -2687,7 +2687,7 @@ get_target_list(List *targetList, deparse_context *context,
}
/*
- * Figure out what the result column should be called. In the context
+ * Figure out what the result column should be called. In the context
* of a view, use the view's tuple descriptor (so as to pick up the
* effects of any column RENAME that's been done on the view).
* Otherwise, just use what we can find in the TLE.
@@ -2827,7 +2827,7 @@ get_rule_sortgroupclause(SortGroupClause *srt, List *tlist, bool force_colno,
* expression is a constant, force it to be dumped with an explicit cast
* as decoration --- this is because a simple integer constant is
* ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
- * dump it without any decoration. Otherwise, just dump the expression
+ * dump it without any decoration. Otherwise, just dump the expression
* normally.
*/
if (force_colno)
@@ -3323,7 +3323,7 @@ get_utility_query_def(Query *query, deparse_context *context)
* push_plan: set up deparse_namespace to recurse into the tlist of a subplan
*
* When expanding an OUTER or INNER reference, we must push new outer/inner
- * subplans in case the referenced expression itself uses OUTER/INNER. We
+ * subplans in case the referenced expression itself uses OUTER/INNER. We
* modify the top stack entry in-place to avoid affecting levelsup issues
* (although in a Plan tree there really shouldn't be any).
*
@@ -3575,9 +3575,9 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
*
* This is fairly straightforward except for the case of a Var of type RECORD.
* Since no actual table or view column is allowed to have type RECORD, such
- * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
+ * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
* drill down to find the ultimate defining expression and attempt to infer
- * the field name from it. We ereport if we can't determine the name.
+ * the field name from it. We ereport if we can't determine the name.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
*/
@@ -3924,7 +3924,7 @@ get_name_for_var_field(Var *var, int fieldno,
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass to
+ * get_expr_result_type() can do anything with it. If not, pass to
* lookup_rowtype_tupdesc() which will probably fail, but will give an
* appropriate error message while failing.
*/
@@ -4518,7 +4518,7 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* We cannot see an already-planned subplan in rule deparsing,
- * only while EXPLAINing a query plan. We don't try to
+ * only while EXPLAINing a query plan. We don't try to
* reconstruct the original SQL, just reference the subplan
* that appears elsewhere in EXPLAIN's result.
*/
@@ -5465,7 +5465,7 @@ get_coercion_expr(Node *arg, deparse_context *context,
* Since parse_coerce.c doesn't immediately collapse application of
* length-coercion functions to constants, what we'll typically see in
* such cases is a Const with typmod -1 and a length-coercion function
- * right above it. Avoid generating redundant output. However, beware of
+ * right above it. Avoid generating redundant output. However, beware of
* suppressing casts when the user actually wrote something like
* 'foo'::text::char(3).
*/
@@ -5540,7 +5540,7 @@ get_const_expr(Const *constval, deparse_context *context, int showtype)
/*
* These types are printed without quotes unless they contain
* values that aren't accepted by the scanner unquoted (e.g.,
- * 'NaN'). Note that strtod() and friends might accept NaN,
+ * 'NaN'). Note that strtod() and friends might accept NaN,
* so we can't use that to test.
*
* In reality we only need to defend against infinity and NaN,
@@ -6164,7 +6164,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype,
if (!OidIsValid(actual_datatype) ||
GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass)
{
- /* Okay, we need the opclass name. Do we need to qualify it? */
+ /* Okay, we need the opclass name. Do we need to qualify it? */
opcname = NameStr(opcrec->opcname);
if (OpclassIsVisible(opclass))
appendStringInfo(buf, " %s", quote_identifier(opcname));
@@ -6458,7 +6458,7 @@ generate_relation_name(Oid relid, List *namespaces)
* given that it is being called with the specified actual arg types.
* (Arg types matter because of ambiguous-function resolution rules.)
*
- * The result includes all necessary quoting and schema-prefixing. We can
+ * The result includes all necessary quoting and schema-prefixing. We can
* also pass back an indication of whether the function is variadic.
*/
static char *
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 5feb46e12c..1040a2c80d 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -72,7 +72,7 @@
* float8 oprjoin (internal, oid, internal, int2, internal);
*
* (Before Postgres 8.4, join estimators had only the first four of these
- * parameters. That signature is still allowed, but deprecated.) The
+ * parameters. That signature is still allowed, but deprecated.) The
* relationship between jointype and sjinfo is explained in the comments for
* clause_selectivity() --- the short version is that jointype is usually
* best ignored in favor of examining sjinfo.
@@ -179,7 +179,7 @@ static Const *string_to_bytea_const(const char *str, size_t str_len);
*
* Note: this routine is also used to estimate selectivity for some
* operators that are not "=" but have comparable selectivity behavior,
- * such as "~=" (geometric approximate-match). Even for "=", we must
+ * such as "~=" (geometric approximate-match). Even for "=", we must
* keep in mind that the left and right datatypes may differ.
*/
Datum
@@ -264,7 +264,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* Is the constant "=" to any of the column's most common values?
* (Although the given operator may not really be "=", we will assume
- * that seeing whether it returns TRUE is an appropriate test. If you
+ * that seeing whether it returns TRUE is an appropriate test. If you
* don't like this, maybe you shouldn't be using eqsel for your
* operator...)
*/
@@ -395,7 +395,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
* result averaged over all possible values whether common or
* uncommon. (Essentially, we are assuming that the not-yet-known
* comparison value is equally likely to be any of the possible
- * values, regardless of their frequency in the table. Is that a good
+ * values, regardless of their frequency in the table. Is that a good
* idea?)
*/
selec = 1.0 - stats->stanullfrac;
@@ -614,7 +614,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
* essentially using the histogram just as a representative sample. However,
* small histograms are unlikely to be all that representative, so the caller
* should be prepared to fall back on some other estimation approach when the
- * histogram is missing or very small. It may also be prudent to combine this
+ * histogram is missing or very small. It may also be prudent to combine this
* approach with another one when the histogram is small.
*
* If the actual histogram size is not at least min_hist_size, we won't bother
@@ -632,7 +632,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
*
* Note that the result disregards both the most-common-values (if any) and
* null entries. The caller is expected to combine this result with
- * statistics for those portions of the column population. It may also be
+ * statistics for those portions of the column population. It may also be
* prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
*/
double
@@ -803,7 +803,7 @@ ineq_histogram_selectivity(VariableStatData *vardata,
/*
* Watch out for the possibility that we got a NaN or
- * Infinity from the division. This can happen
+ * Infinity from the division. This can happen
* despite the previous checks, if for example "low"
* is -Infinity.
*/
@@ -818,7 +818,7 @@ ineq_histogram_selectivity(VariableStatData *vardata,
* Ideally we'd produce an error here, on the grounds that
* the given operator shouldn't have scalarXXsel
* registered as its selectivity func unless we can deal
- * with its operand types. But currently, all manner of
+ * with its operand types. But currently, all manner of
* stuff is invoking scalarXXsel, so give a default
* estimate until that can be fixed.
*/
@@ -844,7 +844,7 @@ ineq_histogram_selectivity(VariableStatData *vardata,
/*
* The histogram boundaries are only approximate to begin with,
- * and may well be out of date anyway. Therefore, don't believe
+ * and may well be out of date anyway. Therefore, don't believe
* extremely small or large selectivity estimates.
*/
if (hist_selec < 0.0001)
@@ -1035,7 +1035,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If this is for a NOT LIKE or similar operator, get the corresponding
- * positive-match operator and work with that. Set result to the correct
+ * positive-match operator and work with that. Set result to the correct
* default estimate, too.
*/
if (negate)
@@ -1234,7 +1234,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If we have most-common-values info, add up the fractions of the MCV
* entries that satisfy MCV OP PATTERN. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
+ * directly to the result selectivity. Also add up the total fraction
* represented by MCV entries.
*/
mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
@@ -2032,9 +2032,9 @@ eqjoinsel_inner(Oid operator,
if (have_mcvs1 && have_mcvs2)
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2066,7 +2066,7 @@ eqjoinsel_inner(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2273,9 +2273,9 @@ eqjoinsel_semi(Oid operator,
if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2306,7 +2306,7 @@ eqjoinsel_semi(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2342,7 +2342,7 @@ eqjoinsel_semi(Oid operator,
/*
* Now we need to estimate the fraction of relation 1 that has at
- * least one join partner. We know for certain that the matched MCVs
+ * least one join partner. We know for certain that the matched MCVs
* do, so that gives us a lower bound, but we're really in the dark
* about everything else. Our crude approach is: if nd1 <= nd2 then
* assume all non-null rel1 rows have join partners, else assume for
@@ -2939,11 +2939,11 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* case (all possible cross-product terms actually appear as groups) since
* very often the grouped-by Vars are highly correlated. Our current approach
* is as follows:
- * 1. Expressions yielding boolean are assumed to contribute two groups,
+ * 1. Expressions yielding boolean are assumed to contribute two groups,
* independently of their content, and are ignored in the subsequent
- * steps. This is mainly because tests like "col IS NULL" break the
+ * steps. This is mainly because tests like "col IS NULL" break the
* heuristic used in step 2 especially badly.
- * 2. Reduce the given expressions to a list of unique Vars used. For
+ * 2. Reduce the given expressions to a list of unique Vars used. For
* example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
* It is clearly correct not to count the same Var more than once.
* It is also reasonable to treat f(x) the same as x: f() cannot
@@ -2953,14 +2953,14 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* As a special case, if a GROUP BY expression can be matched to an
* expressional index for which we have statistics, then we treat the
* whole expression as though it were just a Var.
- * 3. If the list contains Vars of different relations that are known equal
+ * 3. If the list contains Vars of different relations that are known equal
* due to equivalence classes, then drop all but one of the Vars from each
* known-equal set, keeping the one with smallest estimated # of values
* (since the extra values of the others can't appear in joined rows).
* Note the reason we only consider Vars of different relations is that
* if we considered ones of the same rel, we'd be double-counting the
* restriction selectivity of the equality in the next step.
- * 4. For Vars within a single source rel, we multiply together the numbers
+ * 4. For Vars within a single source rel, we multiply together the numbers
* of values, clamp to the number of rows in the rel (divided by 10 if
* more than one Var), and then multiply by the selectivity of the
* restriction clauses for that rel. When there's more than one Var,
@@ -2971,7 +2971,7 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* by the restriction selectivity is effectively assuming that the
* restriction clauses are independent of the grouping, which is a crummy
* assumption, but it's hard to do better.
- * 5. If there are Vars from multiple rels, we repeat step 4 for each such
+ * 5. If there are Vars from multiple rels, we repeat step 4 for each such
* rel, and multiply the results together.
* Note that rels not containing grouped Vars are ignored completely, as are
* join clauses. Such rels cannot increase the number of groups, and we
@@ -3002,7 +3002,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
return 1.0;
/*
- * Count groups derived from boolean grouping expressions. For other
+ * Count groups derived from boolean grouping expressions. For other
* expressions, find the unique Vars used, treating an expression as a Var
* if we can find stats for it. For each one, record the statistical
* estimate of number of distinct values (total in its table, without
@@ -3089,7 +3089,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* Group Vars by relation and estimate total numdistinct.
*
* For each iteration of the outer loop, we process the frontmost Var in
- * varinfos, plus all other Vars in the same relation. We remove these
+ * varinfos, plus all other Vars in the same relation. We remove these
* Vars from the newvarinfos list for the next iteration. This is the
* easiest way to group Vars of same rel together.
*/
@@ -3190,11 +3190,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* distribution, so this will have to do for now.
*
* We are passed the number of buckets the executor will use for the given
- * input relation. If the data were perfectly distributed, with the same
+ * input relation. If the data were perfectly distributed, with the same
* number of tuples going into each available bucket, then the bucketsize
* fraction would be 1/nbuckets. But this happy state of affairs will occur
* only if (a) there are at least nbuckets distinct data values, and (b)
- * we have a not-too-skewed data distribution. Otherwise the buckets will
+ * we have a not-too-skewed data distribution. Otherwise the buckets will
* be nonuniformly occupied. If the other relation in the join has a key
* distribution similar to this one's, then the most-loaded buckets are
* exactly those that will be probed most often. Therefore, the "average"
@@ -3368,7 +3368,7 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
* operators to estimate selectivity for the other's. This is outright
* wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
- * majority of cases that we do it anyway. Should think about more
+ * majority of cases that we do it anyway. Should think about more
* rigorous ways to do it.
*/
switch (valuetypid)
@@ -3953,7 +3953,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
right = (Node *) lsecond(args);
/*
- * Examine both sides. Note that when varRelid is nonzero, Vars of other
+ * Examine both sides. Note that when varRelid is nonzero, Vars of other
* relations will be treated as pseudoconstants.
*/
examine_variable(root, left, varRelid, vardata);
@@ -4122,7 +4122,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
* XXX This means the Var comes from a JOIN or sub-SELECT. Later
* add code to dig down into the join etc and see if we can trace
* the variable to something with stats. (But beware of
- * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no
+ * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no
* cases where this would really be useful, because we'd have
* flattened the subselect if it is??)
*/
@@ -4133,7 +4133,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
/*
* Okay, it's a more complicated expression. Determine variable
- * membership. Note that when varRelid isn't zero, only vars of that
+ * membership. Note that when varRelid isn't zero, only vars of that
* relation are considered "real" vars.
*/
varnos = pull_varnos(basenode);
@@ -4182,13 +4182,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (onerel)
{
/*
- * We have an expression in vars of a single relation. Try to match
+ * We have an expression in vars of a single relation. Try to match
* it to expressional index columns, in hopes of finding some
* statistics.
*
* XXX it's conceivable that there are multiple matches with different
* index opfamilies; if so, we need to pick one that matches the
- * operator we are estimating for. FIXME later.
+ * operator we are estimating for. FIXME later.
*/
ListCell *ilist;
@@ -4284,7 +4284,7 @@ get_variable_numdistinct(VariableStatData *vardata)
double ntuples;
/*
- * Determine the stadistinct value to use. There are cases where we can
+ * Determine the stadistinct value to use. There are cases where we can
* get an estimate even without a pg_statistic entry, or can get a better
* value than is in pg_statistic.
*/
@@ -4755,7 +4755,7 @@ pattern_fixed_prefix(Const *patt, Pattern_Type ptype,
* together with info about MCVs and NULLs.
*
* We use the >= and < operators from the specified btree opfamily to do the
- * estimation. The given variable and Const must be of the associated
+ * estimation. The given variable and Const must be of the associated
* datatype.
*
* XXX Note: we make use of the upper bound to estimate operator selectivity
@@ -4814,7 +4814,7 @@ prefix_selectivity(VariableStatData *vardata,
/*
* Merge the two selectivities in the same way as for a range query
- * (see clauselist_selectivity()). Note that we don't need to worry
+ * (see clauselist_selectivity()). Note that we don't need to worry
* about double-exclusion of nulls, since ineq_histogram_selectivity
* doesn't count those anyway.
*/
@@ -5038,7 +5038,7 @@ regex_selectivity(const char *patt, int pattlen, bool case_insensitive,
* that is not a bulletproof guarantee that an extension of the string might
* not sort after it; an example is that "foo " is less than "foo!", but it
* is not clear that a "dictionary" sort ordering will consider "foo!" less
- * than "foo bar". CAUTION: Therefore, this function should be used only for
+ * than "foo bar". CAUTION: Therefore, this function should be used only for
* estimation purposes when working in a non-C locale.
*
* To try to catch most cases where an extended string might otherwise sort
@@ -5383,9 +5383,9 @@ genericcostestimate(PlannerInfo *root,
* The above calculations are all per-index-scan. However, if we are in a
* nestloop inner scan, we can expect the scan to be repeated (with
* different search keys) for each row of the outer relation. Likewise,
- * ScalarArrayOpExpr quals result in multiple index scans. This creates
+ * ScalarArrayOpExpr quals result in multiple index scans. This creates
* the potential for cache effects to reduce the number of disk page
- * fetches needed. We want to estimate the average per-scan I/O cost in
+ * fetches needed. We want to estimate the average per-scan I/O cost in
* the presence of caching.
*
* We use the Mackert-Lohman formula (see costsize.c for details) to
@@ -5457,7 +5457,7 @@ genericcostestimate(PlannerInfo *root,
* evaluated once at the start of the scan to reduce them to runtime keys
* to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
* CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
- * indexqual operator. Because we have numIndexTuples as a per-scan
+ * indexqual operator. Because we have numIndexTuples as a per-scan
* number, we have to multiply by num_sa_scans to get the correct result
* for ScalarArrayOpExpr cases.
*
@@ -5528,7 +5528,7 @@ btcostestimate(PG_FUNCTION_ARGS)
* the index scan). Additional quals can suppress visits to the heap, so
* it's OK to count them in indexSelectivity, but they should not count
* for estimating numIndexTuples. So we must examine the given indexQuals
- * to find out which ones count as boundary quals. We rely on the
+ * to find out which ones count as boundary quals. We rely on the
* knowledge that they are given in index column order.
*
* For a RowCompareExpr, we consider only the first column, just as
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index fdd0cf4393..9b7e161ad1 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -369,7 +369,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -968,7 +968,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
* that fields to the right of the last one specified are zeroed out,
* but those to the left of it remain valid. Thus for example there
* is no operational difference between INTERVAL YEAR TO MONTH and
- * INTERVAL MONTH. In some cases we could meaningfully enforce that
+ * INTERVAL MONTH. In some cases we could meaningfully enforce that
* higher-order fields are zero; for example INTERVAL DAY could reject
* nonzero "month" field. However that seems a bit pointless when we
* can't do it consistently. (We cannot enforce a range limit on the
@@ -977,9 +977,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
*
* Note: before PG 8.4 we interpreted a limited set of fields as
* actually causing a "modulo" operation on a given value, potentially
- * losing high-order as well as low-order information. But there is
+ * losing high-order as well as low-order information. But there is
* no support for such behavior in the standard, and it seems fairly
- * undesirable on data consistency grounds anyway. Now we only
+ * undesirable on data consistency grounds anyway. Now we only
* perform truncation or rounding of low-order fields.
*/
if (range == INTERVAL_FULL_RANGE)
@@ -1099,7 +1099,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
/*
* Note: this round-to-nearest code is not completely consistent
* about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
+ * values. On most platforms, rint() will implement
* round-to-nearest-even, but the integer code always rounds up
* (away from zero). Is it worth trying to be consistent?
*/
@@ -1309,7 +1309,7 @@ timestamptz_to_time_t(TimestampTz t)
* Produce a C-string representation of a TimestampTz.
*
* This is mostly for use in emitting messages. The primary difference
- * from timestamptz_out is that we force the output format to ISO. Note
+ * from timestamptz_out is that we force the output format to ISO. Note
* also that the result is in a static buffer, not pstrdup'd.
*/
const char *
@@ -1479,7 +1479,7 @@ recalc_t:
*
* First, convert to an integral timestamp, avoiding possibly
* platform-specific roundoff-in-wrong-direction errors, and adjust to
- * Unix epoch. Then see if we can convert to pg_time_t without loss. This
+ * Unix epoch. Then see if we can convert to pg_time_t without loss. This
* coding avoids hardwiring any assumptions about the width of pg_time_t,
* so it should behave sanely on machines without int64.
*/
@@ -4396,7 +4396,7 @@ timestamp_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMPTZ(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
@@ -4570,7 +4570,7 @@ timestamptz_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMP(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index aac1039f55..fc60f6bac5 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -256,7 +256,7 @@ bpcharsend(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
@@ -550,7 +550,7 @@ varcharsend(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 59c0c0282b..dea340c699 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -475,7 +475,7 @@ textlen(PG_FUNCTION_ARGS)
* Does the real work for textlen()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed form. We can avoid decompressing it at all
* in some cases.
*/
@@ -633,7 +633,7 @@ text_substr_no_len(PG_FUNCTION_ARGS)
* Does the real work for text_substr() and text_substr_no_len()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed/toasted form. We can avoid detoasting all
* of it in some cases.
*
@@ -922,7 +922,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* searched (t1) and the "needle" is the pattern being sought (t2).
*
* If the needle is empty or bigger than the haystack then there is no
- * point in wasting cycles initializing the table. We also choose not to
+ * point in wasting cycles initializing the table. We also choose not to
* use B-M-H for needles of length 1, since the skip table can't possibly
* save anything in that case.
*/
@@ -938,7 +938,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* declaration of TextPositionState allows up to 256 elements, but for
* short search problems we don't really want to have to initialize so
* many elements --- it would take too long in comparison to the
- * actual search time. So we choose a useful skip table size based on
+ * actual search time. So we choose a useful skip table size based on
* the haystack length minus the needle length. The closer the needle
* length is to the haystack length the less useful skipping becomes.
*
@@ -970,7 +970,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
state->skiptable[i] = len2;
/*
- * Now examine the needle. For each character except the last one,
+ * Now examine the needle. For each character except the last one,
* set the corresponding table element to the appropriate skip
* distance. Note that when two characters share the same skip table
* entry, the one later in the needle must determine the skip
@@ -1058,11 +1058,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[(unsigned char) *hptr & skiptablemask];
@@ -1114,11 +1114,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[*hptr & skiptablemask];
@@ -1153,7 +1153,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
/*
* Unfortunately, there is no strncoll(), so in the non-C locale case we
- * have to do some memory copying. This turns out to be significantly
+ * have to do some memory copying. This turns out to be significantly
* slower, so we optimize the case where LC_COLLATE is C. We also try to
* optimize relatively-short strings by avoiding palloc/pfree overhead.
*/
@@ -2004,7 +2004,7 @@ textToQualifiedNameList(text *textval)
* SplitIdentifierString --- parse a string containing identifiers
*
* This is the guts of textToQualifiedNameList, and is exported for use in
- * other situations such as parsing GUC variables. In the GUC case, it's
+ * other situations such as parsing GUC variables. In the GUC case, it's
* important to avoid memory leaks, so the API is designed to minimize the
* amount of stuff that needs to be allocated and freed.
*
@@ -2012,7 +2012,7 @@ textToQualifiedNameList(text *textval)
* rawstring: the input string; must be overwritable! On return, it's
* been modified to contain the separated identifiers.
* separator: the separator punctuation expected between identifiers
- * (typically '.' or ','). Whitespace may also appear around
+ * (typically '.' or ','). Whitespace may also appear around
* identifiers.
* Outputs:
* namelist: filled with a palloc'd list of pointers to identifiers within
@@ -2081,7 +2081,7 @@ SplitIdentifierString(char *rawstring, char separator,
*
* XXX because we want to overwrite the input in-place, we cannot
* support a downcasing transformation that increases the string
- * length. This is not a problem given the current implementation
+ * length. This is not a problem given the current implementation
* of downcase_truncate_identifier, but we'll probably have to do
* something about this someday.
*/
@@ -2416,7 +2416,7 @@ check_replace_text_has_escape_char(const text *replace_text)
* appendStringInfoRegexpSubstr
*
* Append replace_text to str, substituting regexp back references for
- * \n escapes. start_ptr is the start of the match in the source string,
+ * \n escapes. start_ptr is the start of the match in the source string,
* at logical character position data_pos.
*/
static void
@@ -2499,7 +2499,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/*
- * Copy the text that is back reference of regexp. Note so and eo
+ * Copy the text that is back reference of regexp. Note so and eo
* are counted in characters not bytes.
*/
char *chunk_start;
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 81aac773aa..08e4f4f539 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -19,7 +19,7 @@
* fail. For one thing, this avoids having to manage variant catalog
* installations. But it also has nice effects such as that you can
* dump a database containing XML type data even if the server is not
- * linked with libxml. Thus, make sure xml_out() works even if nothing
+ * linked with libxml. Thus, make sure xml_out() works even if nothing
* else does.
*/
@@ -254,7 +254,7 @@ xml_out(PG_FUNCTION_ARGS)
xmltype *x = PG_GETARG_XML_P(0);
/*
- * xml_out removes the encoding property in all cases. This is because we
+ * xml_out removes the encoding property in all cases. This is because we
* cannot control from here whether the datum will be converted to a
* different client encoding, so we'd do more harm than good by including
* it.
@@ -425,7 +425,7 @@ xmlcomment(PG_FUNCTION_ARGS)
/*
* TODO: xmlconcat needs to merge the notations and unparsed entities
- * of the argument values. Not very important in practice, though.
+ * of the argument values. Not very important in practice, though.
*/
xmltype *
xmlconcat(List *args)
@@ -559,7 +559,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
/*
* We first evaluate all the arguments, then start up libxml and create
- * the result. This avoids issues if one of the arguments involves a call
+ * the result. This avoids issues if one of the arguments involves a call
* to some other function or subsystem that wants to use libxml on its own
* terms.
*/
@@ -909,7 +909,7 @@ pg_xml_init(void)
resetStringInfo(xml_err_buf);
/*
- * We re-establish the error callback function every time. This makes
+ * We re-establish the error callback function every time. This makes
* it safe for other subsystems (PL/Perl, say) to also use libxml with
* their own callbacks ... so long as they likewise set up the
* callbacks on every use. It's cheap enough to not be worth worrying
@@ -1116,7 +1116,7 @@ finished:
/*
* Write an XML declaration. On output, we adjust the XML declaration
- * as follows. (These rules are the moral equivalent of the clause
+ * as follows. (These rules are the moral equivalent of the clause
* "Serialization of an XML value" in the SQL standard.)
*
* We try to avoid generating an XML declaration if possible. This is
@@ -1637,8 +1637,8 @@ map_xml_name_to_sql_identifier(char *name)
*
* When xml_escape_strings is true, then certain characters in string
* values are replaced by entity references (&lt; etc.), as specified
- * in SQL/XML:2003 section 9.16 GR 8) ii). This is normally what is
- * wanted. The false case is mainly useful when the resulting value
+ * in SQL/XML:2003 section 9.16 GR 8) ii). This is normally what is
+ * wanted. The false case is mainly useful when the resulting value
* is used with xmlTextWriterWriteAttribute() to write out an
* attribute, because that function does the escaping itself. The SQL
* standard of 2003 is somewhat buggy in this regard, so we do our
@@ -1886,7 +1886,7 @@ _SPI_strdup(const char *s)
* SQL to XML mapping functions
*
* What follows below is intentionally organized so that you can read
- * along in the SQL/XML:2003 standard. The functions are mostly split
+ * along in the SQL/XML:2003 standard. The functions are mostly split
* up and ordered they way the clauses lay out in the standards
* document, and the identifiers are also aligned with the standard
* text. (SQL/XML:2006 appears to be ordered differently,
@@ -1896,13 +1896,13 @@ _SPI_strdup(const char *s)
*
* There are two kinds of mappings: Mapping SQL data (table contents)
* to XML documents, and mapping SQL structure (the "schema") to XML
- * Schema. And there are functions that do both at the same time.
+ * Schema. And there are functions that do both at the same time.
*
* Then you can map a database, a schema, or a table, each in both
* ways. This breaks down recursively: Mapping a database invokes
* mapping schemas, which invokes mapping tables, which invokes
* mapping rows, which invokes mapping columns, although you can't
- * call the last two from the outside. Because of this, there are a
+ * call the last two from the outside. Because of this, there are a
* number of xyz_internal() functions which are to be called both from
* the function manager wrapper and from some upper layer in a
* recursive call.
@@ -1911,7 +1911,7 @@ _SPI_strdup(const char *s)
* nulls, tableforest, and targetns mean.
*
* Some style guidelines for XML output: Use double quotes for quoting
- * XML attributes. Indent XML elements by two spaces, but remember
+ * XML attributes. Indent XML elements by two spaces, but remember
* that a lot of code is called recursively at different levels, so
* it's better not to indent rather than create output that indents
* and outdents weirdly. Add newlines to make the output look nice.
@@ -2075,12 +2075,12 @@ cursor_to_xml(PG_FUNCTION_ARGS)
* Write the start tag of the root element of a data mapping.
*
* top_level means that this is the very top level of the eventual
- * output. For example, when the user calls table_to_xml, then a call
+ * output. For example, when the user calls table_to_xml, then a call
* with a table name to this function is the top level. When the user
* calls database_to_xml, then a call with a schema name to this
* function is not the top level. If top_level is false, then the XML
* namespace declarations are omitted, because they supposedly already
- * appeared earlier in the output. Repeating them is not wrong, but
+ * appeared earlier in the output. Repeating them is not wrong, but
* it looks ugly.
*/
static void
@@ -3022,7 +3022,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list)
* sections 9.11 and 9.15.
*
* (The distinction between 9.11 and 9.15 is basically that 9.15 adds
- * a name attribute, which this function does. The name-less version
+ * a name attribute, which this function does. The name-less version
* 9.11 doesn't appear to be required anywhere.)
*/
static const char *
@@ -3200,7 +3200,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
/*
* Map an SQL row to an XML element, taking the row from the active
- * SPI cursor. See also SQL/XML:2003 section 9.12.
+ * SPI cursor. See also SQL/XML:2003 section 9.12.
*/
static void
SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 44ac8d9363..f71d552a0f 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -427,11 +427,11 @@ CatCacheRemoveCList(CatCache *cache, CatCList *cl)
* item pointer. Positive entries are deleted if they match the item
* pointer. Negative entries must be deleted if they match the hash
* value (since we do not have the exact key of the tuple that's being
- * inserted). But this should only rarely result in loss of a cache
+ * inserted). But this should only rarely result in loss of a cache
* entry that could have been kept.
*
* Note that it's not very relevant whether the tuple identified by
- * the item pointer is being inserted or deleted. We don't expect to
+ * the item pointer is being inserted or deleted. We don't expect to
* find matching positive entries in the one case, and we don't expect
* to find matching negative entries in the other; but we will do the
* right things in any case.
@@ -895,7 +895,7 @@ InitCatCache(int id,
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
+ * descriptor and set up the hash and equality function links. We assume
* that the relcache entry can be opened at this point!
*/
#ifdef CACHEDEBUG
@@ -1097,7 +1097,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
* if necessary (on the first access to a particular cache).
*
* The result is NULL if not found, or a pointer to a HeapTuple in
- * the cache. The caller must not modify the tuple, and must call
+ * the cache. The caller must not modify the tuple, and must call
* ReleaseCatCache() when done with it.
*
* The search key values should be expressed as Datums of the key columns'
@@ -1224,8 +1224,8 @@ SearchCatCache(CatCache *cache,
* the relation --- for example, due to shared-cache-inval messages being
* processed during heap_open(). This is OK. It's even possible for one
* of those lookups to find and enter the very same tuple we are trying to
- * fetch here. If that happens, we will enter a second copy of the tuple
- * into the cache. The first copy will never be referenced again, and
+ * fetch here. If that happens, we will enter a second copy of the tuple
+ * into the cache. The first copy will never be referenced again, and
* will eventually age out of the cache, so there's no functional problem.
* This case is rare enough that it's not worth expending extra cycles to
* detect.
@@ -1264,7 +1264,7 @@ SearchCatCache(CatCache *cache,
*
* In bootstrap mode, we don't build negative entries, because the cache
* invalidation mechanism isn't alive and can't clear them if the tuple
- * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
+ * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
* cache inval for that.)
*/
if (ct == NULL)
@@ -1551,7 +1551,7 @@ SearchCatCacheList(CatCache *cache,
/*
* We are now past the last thing that could trigger an elog before we
* have finished building the CatCList and remembering it in the
- * resource owner. So it's OK to fall out of the PG_TRY, and indeed
+ * resource owner. So it's OK to fall out of the PG_TRY, and indeed
* we'd better do so before we start marking the members as belonging
* to the list.
*/
@@ -1641,7 +1641,7 @@ ReleaseCatCacheList(CatCList *list)
/*
* CatalogCacheCreateEntry
* Create a new CatCTup entry, copying the given HeapTuple and other
- * supplied data into it. The new entry initially has refcount 0.
+ * supplied data into it. The new entry initially has refcount 0.
*/
static CatCTup *
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
@@ -1776,7 +1776,7 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
* the specified relation, find all catcaches it could be in, compute the
* correct hash value for each such catcache, and call the specified function
* to record the cache id, hash value, and tuple ItemPointer in inval.c's
- * lists. CatalogCacheIdInvalidate will be called later, if appropriate,
+ * lists. CatalogCacheIdInvalidate will be called later, if appropriate,
* using the recorded information.
*
* Note that it is irrelevant whether the given tuple is actually loaded
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 29b9c19f0e..a137d18857 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -29,23 +29,23 @@
*
* If we successfully complete the transaction, we have to broadcast all
* these invalidation events to other backends (via the SI message queue)
- * so that they can flush obsolete entries from their caches. Note we have
+ * so that they can flush obsolete entries from their caches. Note we have
* to record the transaction commit before sending SI messages, otherwise
* the other backends won't see our updated tuples as good.
*
* When a subtransaction aborts, we can process and discard any events
- * it has queued. When a subtransaction commits, we just add its events
+ * it has queued. When a subtransaction commits, we just add its events
* to the pending lists of the parent transaction.
*
* In short, we need to remember until xact end every insert or delete
- * of a tuple that might be in the system caches. Updates are treated as
+ * of a tuple that might be in the system caches. Updates are treated as
* two events, delete + insert, for simplicity. (There are cases where
* it'd be possible to record just one event, but we don't currently try.)
*
* We do not need to register EVERY tuple operation in this way, just those
- * on tuples in relations that have associated catcaches. We do, however,
+ * on tuples in relations that have associated catcaches. We do, however,
* have to register every operation on every tuple that *could* be in a
- * catcache, whether or not it currently is in our cache. Also, if the
+ * catcache, whether or not it currently is in our cache. Also, if the
* tuple is in a relation that has multiple catcaches, we need to register
* an invalidation message for each such catcache. catcache.c's
* PrepareToInvalidateCacheTuple() routine provides the knowledge of which
@@ -53,10 +53,10 @@
*
* Also, whenever we see an operation on a pg_class or pg_attribute tuple,
* we register a relcache flush operation for the relation described by that
- * tuple. pg_class updates trigger an smgr flush operation as well.
+ * tuple. pg_class updates trigger an smgr flush operation as well.
*
* We keep the relcache and smgr flush requests in lists separate from the
- * catcache tuple flush requests. This allows us to issue all the pending
+ * catcache tuple flush requests. This allows us to issue all the pending
* catcache flushes before we issue relcache flushes, which saves us from
* loading a catcache tuple during relcache load only to flush it again
* right away. Also, we avoid queuing multiple relcache flush requests for
@@ -101,7 +101,7 @@
/*
* To minimize palloc traffic, we keep pending requests in successively-
* larger chunks (a slightly more sophisticated version of an expansible
- * array). All request types can be stored as SharedInvalidationMessage
+ * array). All request types can be stored as SharedInvalidationMessage
* records. The ordering of requests within a list is never significant.
*/
typedef struct InvalidationChunk
@@ -624,7 +624,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
* Note: during a pg_class row update that assigns a new relfilenode
* or reltablespace value, we will be called on both the old and new
* tuples, and thus will broadcast invalidation messages showing both
- * the old and new RelFileNode values. This ensures that other
+ * the old and new RelFileNode values. This ensures that other
* backends will close smgr references to the old file.
*
* XXX possible future cleanup: it might be better to trigger smgr
@@ -650,7 +650,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
* This essentially means that only backends in this same database
* will react to the relcache flush request. This is in fact
* appropriate, since only those backends could see our pg_attribute
- * change anyway. It looks a bit ugly though. (In practice, shared
+ * change anyway. It looks a bit ugly though. (In practice, shared
* relations can't have schema changes after bootstrap, so we should
* never come here for a shared rel anyway.)
*/
@@ -662,7 +662,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
/*
* When a pg_index row is updated, we should send out a relcache inval
- * for the index relation. As above, we don't know the shared status
+ * for the index relation. As above, we don't know the shared status
* of the index, but in practice it doesn't matter since indexes of
* shared catalogs can't have such updates.
*/
@@ -708,7 +708,7 @@ AcceptInvalidationMessages(void)
*
* If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
* slows things by at least a factor of 10000, so I wouldn't suggest
- * trying to run the entire regression tests that way. It's useful to try
+ * trying to run the entire regression tests that way. It's useful to try
* a few simple tests, to make sure that cache reload isn't subject to
* internal cache-flush hazards, but after you've done a few thousand
* recursive reloads it's unlikely you'll learn more.
@@ -859,12 +859,12 @@ inval_twophase_postcommit(TransactionId xid, uint16 info,
* If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
* to the shared invalidation message queue. Note that these will be read
* not only by other backends, but also by our own backend at the next
- * transaction start (via AcceptInvalidationMessages). This means that
+ * transaction start (via AcceptInvalidationMessages). This means that
* we can skip immediate local processing of anything that's still in
* CurrentCmdInvalidMsgs, and just send that list out too.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
* since they'll not have seen our changed tuples anyway. We can forget
* about CurrentCmdInvalidMsgs too, since those changes haven't touched
* the caches yet.
@@ -923,11 +923,11 @@ AtEOXact_Inval(bool isCommit)
* parent's PriorCmdInvalidMsgs list.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
* We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
* touched the caches yet.
*
- * In any case, pop the transaction stack. We need not physically free memory
+ * In any case, pop the transaction stack. We need not physically free memory
* here, since CurTransactionContext is about to be emptied anyway
* (if aborting). Beware of the possibility of aborting the same nesting
* level twice, though.
@@ -983,7 +983,7 @@ AtEOSubXact_Inval(bool isCommit)
* in a transaction.
*
* Here, we send no messages to the shared queue, since we don't know yet if
- * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
+ * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
* list, so as to flush our caches of any entries we have outdated in the
* current command. We then move the current-cmd list over to become part
* of the prior-cmds list.
@@ -1015,7 +1015,7 @@ CommandEndInvalidationMessages(void)
* Prepare for invalidation messages for nontransactional updates.
*
* A nontransactional invalidation is one that must be sent whether or not
- * the current transaction eventually commits. We arrange for all invals
+ * the current transaction eventually commits. We arrange for all invals
* queued between this call and EndNonTransactionalInvalidation() to be sent
* immediately when the latter is called.
*
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index 2250e854b9..4ca207e315 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -150,13 +150,13 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
* (This indicates that the operator is not a valid ordering operator.)
*
* Note: the operator could be registered in multiple families, for example
- * if someone were to build a "reverse sort" opfamily. This would result in
+ * if someone were to build a "reverse sort" opfamily. This would result in
* uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST
* or NULLS LAST, as well as inefficient planning due to failure to match up
* pathkeys that should be the same. So we want a determinate result here.
* Because of the way the syscache search works, we'll use the interpretation
* associated with the opfamily with smallest OID, which is probably
- * determinate enough. Since there is no longer any particularly good reason
+ * determinate enough. Since there is no longer any particularly good reason
* to build reverse-sort opfamilies, it doesn't seem worth expending any
* additional effort on ensuring consistency.
*/
@@ -358,7 +358,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
*
* The planner currently uses simple equal() tests to compare the lists
* returned by this function, which makes the list order relevant, though
- * strictly speaking it should not be. Because of the way syscache list
+ * strictly speaking it should not be. Because of the way syscache list
* searches are handled, in normal operation the result will be sorted by OID
* so everything works fine. If running with system index usage disabled,
* the result ordering is unspecified and hence the planner might fail to
@@ -1751,7 +1751,7 @@ get_typbyval(Oid typid)
* A two-fer: given the type OID, return both typlen and typbyval.
*
* Since both pieces of info are needed to know how to copy a Datum,
- * many places need both. Might as well get them with one cache lookup
+ * many places need both. Might as well get them with one cache lookup
* instead of two. Also, this routine raises an error instead of
* returning a bogus value when given a bad type OID.
*/
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index f43ed25e5f..18ae6b7569 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -12,16 +12,16 @@
*
* The plan cache manager itself is principally responsible for tracking
* whether cached plans should be invalidated because of schema changes in
- * the objects they depend on. When (and if) the next demand for a cached
+ * the objects they depend on. When (and if) the next demand for a cached
* plan occurs, the query will be replanned. Note that this could result
* in an error, for example if a column referenced by the query is no
- * longer present. The creator of a cached plan can specify whether it
+ * longer present. The creator of a cached plan can specify whether it
* is allowable for the query to change output tupdesc on replan (this
* could happen with "SELECT *" for example) --- if so, it's up to the
* caller to notice changes and cope with them.
*
* Currently, we track exactly the dependencies of plans on relations and
- * user-defined functions. On relcache invalidation events or pg_proc
+ * user-defined functions. On relcache invalidation events or pg_proc
* syscache invalidation events, we invalidate just those plans that depend
* on the particular object being modified. (Note: this scheme assumes
* that any table modification that requires replanning will generate a
@@ -205,7 +205,7 @@ CreateCachedPlan(Node *raw_parse_tree,
* avoids extra copy steps during plan construction. If the query ever does
* need replanning, we'll generate a separate new CachedPlan at that time, but
* the CachedPlanSource and the initial CachedPlan share the caller-provided
- * context and go away together when neither is needed any longer. (Because
+ * context and go away together when neither is needed any longer. (Because
* the parser and planner generate extra cruft in addition to their real
* output, this approach means that the context probably contains a bunch of
* useless junk as well as the useful trees. Hence, this method is a
@@ -368,7 +368,7 @@ StoreCachedPlan(CachedPlanSource *plansource,
* DropCachedPlan: destroy a cached plan.
*
* Actually this only destroys the CachedPlanSource: the referenced CachedPlan
- * is released, but not destroyed until its refcount goes to zero. That
+ * is released, but not destroyed until its refcount goes to zero. That
* handles the situation where DropCachedPlan is called while the plan is
* still in use.
*/
@@ -538,7 +538,7 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
}
/*
- * Check or update the result tupdesc. XXX should we use a weaker
+ * Check or update the result tupdesc. XXX should we use a weaker
* condition than equalTupleDescs() here?
*/
resultDesc = PlanCacheComputeResultDesc(slist);
@@ -603,7 +603,7 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
*
* Note: useResOwner = false is used for releasing references that are in
* persistent data structures, such as the parent CachedPlanSource or a
- * Portal. Transient references should be protected by a resource owner.
+ * Portal. Transient references should be protected by a resource owner.
*/
void
ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
@@ -860,7 +860,7 @@ plan_list_is_transient(List *stmt_list)
/*
* PlanCacheComputeResultDesc: given a list of either fully-planned statements
- * or Queries, determine the result tupledesc it will produce. Returns NULL
+ * or Queries, determine the result tupledesc it will produce. Returns NULL
* if the execution will not return tuples.
*
* Note: the result is created or copied into current memory context.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 7a7febf385..bc5cdfd790 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -108,7 +108,7 @@ bool criticalRelcachesBuilt = false;
/*
* This counter counts relcache inval events received since backend startup
- * (but only for rels that are actually in cache). Presently, we use it only
+ * (but only for rels that are actually in cache). Presently, we use it only
* to detect whether data about to be written by write_relcache_init_file()
* might already be obsolete.
*/
@@ -426,7 +426,7 @@ RelationBuildTupleDesc(Relation relation)
Int16GetDatum(0));
/*
- * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
+ * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
* built the critical relcache entries (this includes initdb and startup
* without a pg_internal.init file).
*/
@@ -489,7 +489,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* The attcacheoff values we read from pg_attribute should all be -1
- * ("unknown"). Verify this if assert checking is on. They will be
+ * ("unknown"). Verify this if assert checking is on. They will be
* computed when and if needed during tuple access.
*/
#ifdef USE_ASSERT_CHECKING
@@ -503,7 +503,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* However, we can easily set the attcacheoff value for the first
- * attribute: it must be zero. This eliminates the need for special cases
+ * attribute: it must be zero. This eliminates the need for special cases
* for attnum=1 that used to exist in fastgetattr() and index_getattr().
*/
if (relation->rd_rel->relnatts > 0)
@@ -559,7 +559,7 @@ RelationBuildTupleDesc(Relation relation)
* each relcache entry that has associated rules. The context is used
* just for rule info, not for any other subsidiary data of the relcache
* entry, because that keeps the update logic in RelationClearRelation()
- * manageable. The other subsidiary data structures are simple enough
+ * manageable. The other subsidiary data structures are simple enough
* to be easy to free explicitly, anyway.
*/
static void
@@ -668,9 +668,9 @@ RelationBuildRuleLock(Relation relation)
/*
* We want the rule's table references to be checked as though by the
- * table owner, not the user referencing the rule. Therefore, scan
+ * table owner, not the user referencing the rule. Therefore, scan
* through the rule's actions and set the checkAsUser field on all
- * rtable entries. We have to look at the qual as well, in case it
+ * rtable entries. We have to look at the qual as well, in case it
* contains sublinks.
*
* The reason for doing this when the rule is loaded, rather than when
@@ -966,7 +966,7 @@ RelationInitIndexAccessInfo(Relation relation)
amsupport = aform->amsupport;
/*
- * Make the private context to hold index access info. The reason we need
+ * Make the private context to hold index access info. The reason we need
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*
@@ -1124,7 +1124,7 @@ IndexSupportInitialize(oidvector *indclass,
* Note there is no provision for flushing the cache. This is OK at the
* moment because there is no way to ALTER any interesting properties of an
* existing opclass --- all you can do is drop it, which will result in
- * a useless but harmless dead entry in the cache. To support altering
+ * a useless but harmless dead entry in the cache. To support altering
* opclass membership (not the same as opfamily membership!), we'd need to
* be able to flush this cache as well as the contents of relcache entries
* for indexes.
@@ -1281,7 +1281,7 @@ LookupOpclassInfo(Oid operatorClassOid,
}
/*
- * Scan pg_amproc to obtain support procs for the opclass. We only fetch
+ * Scan pg_amproc to obtain support procs for the opclass. We only fetch
* the default ones (those with lefttype = righttype = opcintype).
*/
if (numSupport > 0)
@@ -1393,7 +1393,7 @@ formrdesc(const char *relationName, Oid relationReltype,
/*
* It's important to distinguish between shared and non-shared relations,
- * even at bootstrap time, to make sure we know where they are stored. At
+ * even at bootstrap time, to make sure we know where they are stored. At
* present, all relations that formrdesc is used for are not shared.
*/
relation->rd_rel->relisshared = false;
@@ -1805,7 +1805,7 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* Make sure smgr and lower levels close the relation's files, if they
* weren't closed already. If the relation is not getting deleted, the
- * next smgr access should reopen the files automatically. This ensures
+ * next smgr access should reopen the files automatically. This ensures
* that the low-level file access state is updated after, say, a vacuum
* truncation.
*/
@@ -1818,7 +1818,7 @@ RelationClearRelation(Relation relation, bool rebuild)
* VACUUM. Likewise reset the fsm and vm size info.
*
* If it's a nailed index, then we need to re-read the pg_class row to see
- * if its relfilenode changed. We can't necessarily do that here, because
+ * if its relfilenode changed. We can't necessarily do that here, because
* we might be in a failed transaction. We assume it's okay to do it if
* there are open references to the relcache entry (cf notes for
* AtEOXact_RelationCache). Otherwise just mark the entry as possibly
@@ -2073,7 +2073,7 @@ RelationCacheInvalidateEntry(Oid relationId)
/*
* RelationCacheInvalidate
* Blow away cached relation descriptors that have zero reference counts,
- * and rebuild those with positive reference counts. Also reset the smgr
+ * and rebuild those with positive reference counts. Also reset the smgr
* relation cache.
*
* This is currently used only to recover from SI message buffer overflow,
@@ -2086,7 +2086,7 @@ RelationCacheInvalidateEntry(Oid relationId)
* We do this in two phases: the first pass deletes deletable items, and
* the second one rebuilds the rebuildable items. This is essential for
* safety, because hash_seq_search only copes with concurrent deletion of
- * the element it is currently visiting. If a second SI overflow were to
+ * the element it is currently visiting. If a second SI overflow were to
* occur while we are walking the table, resulting in recursive entry to
* this routine, we could crash because the inner invocation blows away
* the entry next to be visited by the outer scan. But this way is OK,
@@ -2202,7 +2202,7 @@ AtEOXact_RelationCache(bool isCommit)
* unless there is actually something for this routine to do. Other than
* the debug-only Assert checks, most transactions don't create any work
* for us to do here, so we keep a static flag that gets set if there is
- * anything to do. (Currently, this means either a relation is created in
+ * anything to do. (Currently, this means either a relation is created in
* the current xact, or one is given a new relfilenode, or an index list
* is forced.) For simplicity, the flag remains set till end of top-level
* transaction, even though we could clear it at subtransaction end in
@@ -2354,7 +2354,7 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
* RelationCacheMarkNewRelfilenode
*
* Mark the rel as having been given a new relfilenode in the current
- * (sub) transaction. This is a hint that can be used to optimize
+ * (sub) transaction. This is a hint that can be used to optimize
* later operations on the rel in the same transaction.
*/
void
@@ -2494,7 +2494,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* Insert relation physical and logical identifiers (OIDs) into the right
- * places. Note that the physical ID (relfilenode) is initially the same
+ * places. Note that the physical ID (relfilenode) is initially the same
* as the logical ID (OID).
*/
rel->rd_rel->relisshared = shared_relation;
@@ -2582,7 +2582,7 @@ RelationCacheInitialize(void)
* the system catalogs. We first try to read pre-computed relcache
* entries from the pg_internal.init file. If that's missing or
* broken, make phony entries for the minimum set of nailed-in-cache
- * relations. Then (unless bootstrapping) make sure we have entries
+ * relations. Then (unless bootstrapping) make sure we have entries
* for the critical system indexes. Once we've done all this, we
* have enough infrastructure to open any system catalog or use any
* catcache. The last step is to rewrite pg_internal.init if needed.
@@ -2630,9 +2630,9 @@ RelationCacheInitializePhase2(void)
/*
* If we didn't get the critical system indexes loaded into relcache, do
- * so now. These are critical because the catcache and/or opclass cache
+ * so now. These are critical because the catcache and/or opclass cache
* depend on them for fetches done during relcache load. Thus, we have an
- * infinite-recursion problem. We can break the recursion by doing
+ * infinite-recursion problem. We can break the recursion by doing
* heapscans instead of indexscans at certain key spots. To avoid hobbling
* performance, we only want to do that until we have the critical indexes
* loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to
@@ -2649,7 +2649,7 @@ RelationCacheInitializePhase2(void)
* RewriteRelRulenameIndexId and TriggerRelidNameIndexId are not critical
* in the same way as the others, because the critical catalogs don't
* (currently) have any rules or triggers, and so these indexes can be
- * rebuilt without inducing recursion. However they are used during
+ * rebuilt without inducing recursion. However they are used during
* relcache load when a rel does have rules or triggers, so we choose to
* nail them for performance reasons.
*/
@@ -3071,7 +3071,7 @@ RelationGetIndexList(Relation relation)
/*
* We build the list we intend to return (in the caller's context) while
- * doing the scan. After successfully completing the scan, we copy that
+ * doing the scan. After successfully completing the scan, we copy that
* list into the relcache entry. This avoids cache-context memory leakage
* if we get some sort of error partway through.
*/
@@ -3825,7 +3825,7 @@ load_relcache_init_file(void)
return true;
/*
- * init file is broken, so do it the hard way. We don't bother trying to
+ * init file is broken, so do it the hard way. We don't bother trying to
* free the clutter we just allocated; it's not in the relcache so it
* won't hurt.
*/
@@ -3880,7 +3880,7 @@ write_relcache_init_file(void)
}
/*
- * Write a magic number to serve as a file version identifier. We can
+ * Write a magic number to serve as a file version identifier. We can
* change the magic number whenever the relcache layout changes.
*/
magic = RELCACHE_INIT_FILEMAGIC;
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index 922c4a626f..4fd8fe5fb6 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -764,7 +764,7 @@ static bool CacheInitialized = false;
* InitCatalogCache - initialize the caches
*
* Note that no database access is done here; we only allocate memory
- * and initialize the cache structure. Interrogation of the database
+ * and initialize the cache structure. Interrogation of the database
* to complete initialization of a cache happens upon first use
* of that cache.
*/
@@ -1003,7 +1003,7 @@ SearchSysCacheExistsAttName(Oid relid, const char *attname)
* extract a specific attribute.
*
* This is equivalent to using heap_getattr() on a tuple fetched
- * from a non-cached relation. Usually, this is only used for attributes
+ * from a non-cached relation. Usually, this is only used for attributes
* that could be NULL or variable length; the fixed-size attributes in
* a system table are accessed just by mapping the tuple onto the C struct
* declarations from include/catalog/.
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index 5e428e0b21..fc846f2650 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -66,7 +66,7 @@ static HTAB *TypeCacheHash = NULL;
*
* Stored record types are remembered in a linear array of TupleDescs,
* which can be indexed quickly with the assigned typmod. There is also
- * a hash table to speed searches for matching TupleDescs. The hash key
+ * a hash table to speed searches for matching TupleDescs. The hash key
* uses just the first N columns' type OIDs, and so we may have multiple
* entries with the same hash key.
*/
@@ -294,7 +294,7 @@ lookup_type_cache(Oid type_id, int flags)
/*
* Link to the tupdesc and increment its refcount (we assert it's a
- * refcounted descriptor). We don't use IncrTupleDescRefCount() for
+ * refcounted descriptor). We don't use IncrTupleDescRefCount() for
* this, because the reference mustn't be entered in the current
* resource owner; it can outlive the current query.
*/
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 49c9c5c8ef..918a6324ce 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -9,23 +9,23 @@
* if we run out of memory, it's important to be able to report that fact.
* There are a number of considerations that go into this.
*
- * First, distinguish between re-entrant use and actual recursion. It
+ * First, distinguish between re-entrant use and actual recursion. It
* is possible for an error or warning message to be emitted while the
- * parameters for an error message are being computed. In this case
+ * parameters for an error message are being computed. In this case
* errstart has been called for the outer message, and some field values
- * may have already been saved, but we are not actually recursing. We handle
- * this by providing a (small) stack of ErrorData records. The inner message
+ * may have already been saved, but we are not actually recursing. We handle
+ * this by providing a (small) stack of ErrorData records. The inner message
* can be computed and sent without disturbing the state of the outer message.
* (If the inner message is actually an error, this isn't very interesting
* because control won't come back to the outer message generator ... but
* if the inner message is only debug or log data, this is critical.)
*
* Second, actual recursion will occur if an error is reported by one of
- * the elog.c routines or something they call. By far the most probable
+ * the elog.c routines or something they call. By far the most probable
* scenario of this sort is "out of memory"; and it's also the nastiest
* to handle because we'd likely also run out of memory while trying to
* report this error! Our escape hatch for this case is to reset the
- * ErrorContext to empty before trying to process the inner error. Since
+ * ErrorContext to empty before trying to process the inner error. Since
* ErrorContext is guaranteed to have at least 8K of space in it (see mcxt.c),
* we should be able to process an "out of memory" message successfully.
* Since we lose the prior error state due to the reset, we won't be able
@@ -97,7 +97,7 @@ int Log_destination = LOG_DESTINATION_STDERR;
/*
* Max string length to send to syslog(). Note that this doesn't count the
* sequence-number prefix we add, and of course it doesn't count the prefix
- * added by syslog itself. On many implementations it seems that the hard
+ * added by syslog itself. On many implementations it seems that the hard
* limit is approximately 2K bytes including both those prefixes.
*/
#ifndef PG_SYSLOG_LIMIT
@@ -220,7 +220,7 @@ errstart(int elevel, const char *filename, int lineno,
{
/*
* If we are inside a critical section, all errors become PANIC
- * errors. See miscadmin.h.
+ * errors. See miscadmin.h.
*/
if (CritSectionCount > 0)
elevel = PANIC;
@@ -233,7 +233,7 @@ errstart(int elevel, const char *filename, int lineno,
*
* 2. ExitOnAnyError mode switch is set (initdb uses this).
*
- * 3. the error occurred after proc_exit has begun to run. (It's
+ * 3. the error occurred after proc_exit has begun to run. (It's
* proc_exit's responsibility to see that this doesn't turn into
* infinite recursion!)
*/
@@ -330,7 +330,7 @@ errstart(int elevel, const char *filename, int lineno,
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -432,7 +432,7 @@ errfinish(int dummy,...)
*
* Reset InterruptHoldoffCount in case we ereport'd from inside an
* interrupt holdoff section. (We assume here that no handler will
- * itself be inside a holdoff section. If necessary, such a handler
+ * itself be inside a holdoff section. If necessary, such a handler
* could save and restore InterruptHoldoffCount for itself, but this
* should make life easier for most.)
*
@@ -458,7 +458,7 @@ errfinish(int dummy,...)
* progress, so that we can report the message before dying. (Without
* this, pq_putmessage will refuse to send the message at all, which is
* what we want for NOTICE messages, but not for fatal exits.) This hack
- * is necessary because of poor design of old-style copy protocol. Note
+ * is necessary because of poor design of old-style copy protocol. Note
* we must do this even if client is fool enough to have set
* client_min_messages above FATAL, so don't look at output_to_client.
*/
@@ -570,7 +570,7 @@ errcode(int sqlerrcode)
/*
* errcode_for_file_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of disk file access.
*
* NOTE: the primary error message string should generally include %m
@@ -641,7 +641,7 @@ errcode_for_file_access(void)
/*
* errcode_for_socket_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of socket access.
*
* NOTE: the primary error message string should generally include %m
@@ -679,7 +679,7 @@ errcode_for_socket_access(void)
* This macro handles expansion of a format string and associated parameters;
* it's common code for errmsg(), errdetail(), etc. Must be called inside
* a routine that is declared like "const char *fmt, ..." and has an edata
- * pointer set up. The message is assigned to edata->targetfield, or
+ * pointer set up. The message is assigned to edata->targetfield, or
* appended to it if appendval is true. The message is subject to translation
* if translateit is true.
*
@@ -1131,7 +1131,7 @@ elog_start(const char *filename, int lineno, const char *funcname)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery. Note that the message is intentionally not localized,
* else failure to convert it to client encoding could cause further
@@ -1226,7 +1226,7 @@ EmitErrorReport(void)
/*
* CopyErrorData --- obtain a copy of the topmost error stack entry
*
- * This is only for use in error handler code. The data is copied into the
+ * This is only for use in error handler code. The data is copied into the
* current memory context, so callers should always switch away from
* ErrorContext first; otherwise it will be lost when FlushErrorState is done.
*/
@@ -1317,7 +1317,7 @@ FlushErrorState(void)
*
* A handler can do CopyErrorData/FlushErrorState to get out of the error
* subsystem, then do some processing, and finally ReThrowError to re-throw
- * the original error. This is slower than just PG_RE_THROW() but should
+ * the original error. This is slower than just PG_RE_THROW() but should
* be used if the "some processing" is likely to incur another error.
*/
void
@@ -1334,7 +1334,7 @@ ReThrowError(ErrorData *edata)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -1487,7 +1487,7 @@ set_syslog_parameters(const char *ident, int facility)
{
/*
* guc.c is likely to call us repeatedly with same parameters, so don't
- * thrash the syslog connection unnecessarily. Also, we do not re-open
+ * thrash the syslog connection unnecessarily. Also, we do not re-open
* the connection until needed, since this routine will get called whether
* or not Log_destination actually mentions syslog.
*
@@ -2597,7 +2597,7 @@ useful_strerror(int errnum)
str = strerror(errnum);
/*
- * Some strerror()s return an empty string for out-of-range errno. This
+ * Some strerror()s return an empty string for out-of-range errno. This
* is ANSI C spec compliant, but not exactly useful. Also, we may get
* back strings of question marks if libc cannot transcode the message to
* the codeset specified by LC_CTYPE. If we get nothing useful, first try
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index abe5a870c7..739e710f4e 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -131,7 +131,7 @@ load_external_function(char *filename, char *funcname,
/*
* This function loads a shlib file without looking up any particular
- * function in it. If the same shlib has previously been loaded,
+ * function in it. If the same shlib has previously been loaded,
* unload and reload it.
*
* When 'restricted' is true, only libraries in the presumed-secure
@@ -171,7 +171,7 @@ lookup_external_function(void *filehandle, char *funcname)
/*
* Load the specified dynamic-link library file, unless it already is
- * loaded. Return the pg_dl* handle for the file.
+ * loaded. Return the pg_dl* handle for the file.
*
* Note: libname is expected to be an exact name for the library file.
*/
@@ -473,7 +473,7 @@ file_exists(const char *name)
* If name contains a slash, check if the file exists, if so return
* the name. Else (no slash) try to expand using search path (see
* find_in_dynamic_libpath below); if that works, return the fully
- * expanded file name. If the previous failed, append DLSUFFIX and
+ * expanded file name. If the previous failed, append DLSUFFIX and
* try again. If all fails, just return the original name.
*
* The result will always be freshly palloc'd.
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index bcfbf5ac37..87c3de0a73 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -91,7 +91,7 @@ static Datum fmgr_security_definer(PG_FUNCTION_ARGS);
/*
- * Lookup routines for builtin-function table. We can search by either Oid
+ * Lookup routines for builtin-function table. We can search by either Oid
* or name, but search by Oid is much faster.
*/
@@ -578,7 +578,7 @@ clear_external_function_hash(void *filehandle)
* Copy an FmgrInfo struct
*
* This is inherently somewhat bogus since we can't reliably duplicate
- * language-dependent subsidiary info. We cheat by zeroing fn_extra,
+ * language-dependent subsidiary info. We cheat by zeroing fn_extra,
* instead, meaning that subsidiary info will have to be recomputed.
*/
void
@@ -858,7 +858,7 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
/*
- * Support for security-definer and proconfig-using functions. We support
+ * Support for security-definer and proconfig-using functions. We support
* both of these features using the same call handler, because they are
* often used together and it would be inefficient (as well as notationally
* messy) to have two levels of call handler involved.
@@ -873,11 +873,11 @@ struct fmgr_security_definer_cache
/*
* Function handler for security-definer/proconfig functions. We extract the
* OID of the actual function and do a fmgr lookup again. Then we fetch the
- * pg_proc row and copy the owner ID and proconfig fields. (All this info
+ * pg_proc row and copy the owner ID and proconfig fields. (All this info
* is cached for the duration of the current query.) To execute a call,
* we temporarily replace the flinfo with the cached/looked-up one, while
* keeping the outer fcinfo (which contains all the actual arguments, etc.)
- * intact. This is not re-entrant, but then the fcinfo itself can't be used
+ * intact. This is not re-entrant, but then the fcinfo itself can't be used
* re-entrantly anyway.
*/
static Datum
@@ -954,7 +954,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* We don't need to restore GUC or userid settings on error, because the
- * ensuing xact or subxact abort will do that. The PG_TRY block is only
+ * ensuing xact or subxact abort will do that. The PG_TRY block is only
* needed to clean up the flinfo link.
*/
save_flinfo = fcinfo->flinfo;
@@ -1003,7 +1003,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* These are for invocation of a specifically named function with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. Also, the function cannot be one that needs to
+ * are allowed to be NULL. Also, the function cannot be one that needs to
* look at FmgrInfo, since there won't be any.
*/
Datum
@@ -1548,8 +1548,8 @@ FunctionCall9(FmgrInfo *flinfo, Datum arg1, Datum arg2,
/*
* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. These are essentially fmgr_info() followed
- * by FunctionCallN(). If the same function is to be invoked repeatedly,
+ * are allowed to be NULL. These are essentially fmgr_info() followed
+ * by FunctionCallN(). If the same function is to be invoked repeatedly,
* do the fmgr_info() once and then use FunctionCallN().
*/
Datum
@@ -1858,7 +1858,7 @@ OidFunctionCall9(Oid functionId, Datum arg1, Datum arg2,
*
* One important difference from the bare function call is that we will
* push any active SPI context, allowing SPI-using I/O functions to be
- * called from other SPI functions without extra notation. This is a hack,
+ * called from other SPI functions without extra notation. This is a hack,
* but the alternative of expecting all SPI functions to do SPI_push/SPI_pop
* around I/O calls seems worse.
*/
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index 7ecd781220..01d0c667b3 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -135,7 +135,7 @@ per_MultiFuncCall(PG_FUNCTION_ARGS)
* FuncCallContext is pointing to it), but in most usage patterns the
* tuples stored in it will be in the function's per-tuple context. So at
* the beginning of each call, the Slot will hold a dangling pointer to an
- * already-recycled tuple. We clear it out here.
+ * already-recycled tuple. We clear it out here.
*
* Note: use of retval->slot is obsolete as of 8.0, and we expect that it
* will always be NULL. This is just here for backwards compatibility in
@@ -191,13 +191,13 @@ shutdown_MultiFuncCall(Datum arg)
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result.
*
* One hard case that this handles is resolution of actual rowtypes for
* functions returning RECORD (from either the function's OUT parameter
- * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
+ * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
* only when we couldn't resolve the actual rowtype for lack of information.
*
* The other hard case that this handles is resolution of polymorphism.
@@ -280,7 +280,7 @@ get_func_result_type(Oid functionId,
/*
* internal_get_result_type -- workhorse code implementing all the above
*
- * funcid must always be supplied. call_expr and rsinfo can be NULL if not
+ * funcid must always be supplied. call_expr and rsinfo can be NULL if not
* available. We will return TYPEFUNC_RECORD, and store NULL into
* *resultTupleDesc, if we cannot deduce the complete result rowtype from
* the available information.
@@ -442,7 +442,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
return true;
/*
- * Otherwise, extract actual datatype(s) from input arguments. (We assume
+ * Otherwise, extract actual datatype(s) from input arguments. (We assume
* the parser already validated consistency of the arguments.)
*/
if (!call_expr)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index ea748f653f..69f1266e92 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -5,19 +5,19 @@
*
* dynahash.c supports both local-to-a-backend hash tables and hash tables in
* shared memory. For shared hash tables, it is the caller's responsibility
- * to provide appropriate access interlocking. The simplest convention is
- * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
+ * to provide appropriate access interlocking. The simplest convention is
+ * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
* hash_seq_search) need only shared lock, but any update requires exclusive
* lock. For heavily-used shared tables, the single-lock approach creates a
* concurrency bottleneck, so we also support "partitioned" locking wherein
* there are multiple LWLocks guarding distinct subsets of the table. To use
* a hash table in partitioned mode, the HASH_PARTITION flag must be given
- * to hash_create. This prevents any attempt to split buckets on-the-fly.
+ * to hash_create. This prevents any attempt to split buckets on-the-fly.
* Therefore, each hash bucket chain operates independently, and no fields
* of the hash header change after init except nentries and freeList.
* A partitioned table uses a spinlock to guard changes of those two fields.
* This lets any subset of the hash buckets be treated as a separately
- * lockable partition. We expect callers to use the low-order bits of a
+ * lockable partition. We expect callers to use the low-order bits of a
* lookup key's hash value as a partition number --- this will work because
* of the way calc_bucket() maps hash values to bucket numbers.
*
@@ -76,7 +76,7 @@
* Constants
*
* A hash table has a top-level "directory", each of whose entries points
- * to a "segment" of ssize bucket headers. The maximum number of hash
+ * to a "segment" of ssize bucket headers. The maximum number of hash
* buckets is thus dsize * ssize (but dsize may be expansible). Of course,
* the number of records in the table can be larger, but we don't want a
* whole lot of records per bucket or performance goes down.
@@ -84,7 +84,7 @@
* In a hash table allocated in shared memory, the directory cannot be
* expanded because it must stay at a fixed address. The directory size
* should be selected using hash_select_dirsize (and you'd better have
- * a good idea of the maximum number of entries!). For non-shared hash
+ * a good idea of the maximum number of entries!). For non-shared hash
* tables, the initial directory size can be left at the default.
*/
#define DEF_SEGSIZE 256
@@ -329,7 +329,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
{
/*
* ctl structure and directory are preallocated for shared memory
- * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
+ * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
* well.
*/
hashp->hctl = info->hctl;
@@ -776,7 +776,7 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val)
* the result is a dangling pointer that shouldn't be dereferenced!)
*
* HASH_ENTER will normally ereport a generic "out of memory" error if
- * it is unable to create a new entry. The HASH_ENTER_NULL operation is
+ * it is unable to create a new entry. The HASH_ENTER_NULL operation is
* the same except it will return NULL if out of memory. Note that
* HASH_ENTER_NULL cannot be used with the default palloc-based allocator,
* since palloc internally ereports on out-of-memory.
@@ -1242,7 +1242,7 @@ expand_table(HTAB *hashp)
}
/*
- * Relocate records to the new bucket. NOTE: because of the way the hash
+ * Relocate records to the new bucket. NOTE: because of the way the hash
* masking is done in calc_bucket, only one old bucket can need to be
* split at this point. With a different way of reducing the hash value,
* that might not be true!
@@ -1388,7 +1388,7 @@ hash_corrupted(HTAB *hashp)
{
/*
* If the corruption is in a shared hashtable, we'd better force a
- * systemwide restart. Otherwise, just shut down this one backend.
+ * systemwide restart. Otherwise, just shut down this one backend.
*/
if (hashp->isshared)
elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
@@ -1433,7 +1433,7 @@ next_pow2_int(long num)
/************************* SEQ SCAN TRACKING ************************/
/*
- * We track active hash_seq_search scans here. The need for this mechanism
+ * We track active hash_seq_search scans here. The need for this mechanism
* comes from the fact that a scan will get confused if a bucket split occurs
* while it's in progress: it might visit entries twice, or even miss some
* entirely (if it's partway through the same bucket that splits). Hence
@@ -1453,7 +1453,7 @@ next_pow2_int(long num)
*
* This arrangement is reasonably robust if a transient hashtable is deleted
* without notifying us. The absolute worst case is we might inhibit splits
- * in another table created later at exactly the same address. We will give
+ * in another table created later at exactly the same address. We will give
* a warning at transaction end for reference leaks, so any bugs leading to
* lack of notification should be easy to catch.
*/
diff --git a/src/backend/utils/init/flatfiles.c b/src/backend/utils/init/flatfiles.c
index ad9c46394c..1cab7e3fab 100644
--- a/src/backend/utils/init/flatfiles.c
+++ b/src/backend/utils/init/flatfiles.c
@@ -173,7 +173,7 @@ name_okay(const char *str)
* so we can set or update the XID wrap limit.
*
* Also, if "startup" is true, we tell relcache.c to clear out the relcache
- * init file in each database. That's a bit nonmodular, but scanning
+ * init file in each database. That's a bit nonmodular, but scanning
* pg_database twice during system startup seems too high a price for keeping
* things better separated.
*/
@@ -439,9 +439,9 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
/*
* We can't use heap_getattr() here because during startup we will not
- * have any tupdesc for pg_authid. Fortunately it's not too hard to
+ * have any tupdesc for pg_authid. Fortunately it's not too hard to
* work around this. rolpassword is the first possibly-null field so
- * we can compute its offset directly. Note that this only works
+ * we can compute its offset directly. Note that this only works
* reliably because the preceding field (rolconnlimit) is int4, and
* therefore rolpassword is always 4-byte-aligned, and will be at the
* same offset no matter whether it uses 1-byte or 4-byte header.
@@ -547,7 +547,7 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
heap_endscan(scan);
/*
- * Search for memberships. We can skip all this if pg_auth_members is
+ * Search for memberships. We can skip all this if pg_auth_members is
* empty.
*/
if (total_mem > 0)
@@ -926,7 +926,7 @@ AtEOSubXact_UpdateFlatFiles(bool isCommit,
* or pg_auth_members via general-purpose INSERT/UPDATE/DELETE commands.
*
* It is sufficient for this to be a STATEMENT trigger since we don't
- * care which individual rows changed. It doesn't much matter whether
+ * care which individual rows changed. It doesn't much matter whether
* it's a BEFORE or AFTER trigger.
*/
Datum
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 35ef0de709..e6b18e82c0 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -56,7 +56,7 @@ static char socketLockFile[MAXPGPATH];
*
* NOTE: "ignoring system indexes" means we do not use the system indexes
* for lookups (either in hardwired catalog accesses or in planner-generated
- * plans). We do, however, still update the indexes when a catalog
+ * plans). We do, however, still update the indexes when a catalog
* modification is made.
* ----------------------------------------------------------------
*/
@@ -383,7 +383,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
* ever throw any kind of error. This is because they are used by
* StartTransaction and AbortTransaction to save/restore the settings,
* and during the first transaction within a backend, the value to be saved
- * and perhaps restored is indeed invalid. We have to be able to get
+ * and perhaps restored is indeed invalid. We have to be able to get
* through AbortTransaction without asserting in case InitPostgres fails.
*/
void
@@ -514,7 +514,7 @@ InitializeSessionUserId(const char *rolename)
* Check connection limit for this role.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -628,7 +628,7 @@ GetCurrentRoleId(void)
* Change Role ID while running (SET ROLE)
*
* If roleid is InvalidOid, we are doing SET ROLE NONE: revert to the
- * session user authorization. In this case the is_superuser argument
+ * session user authorization. In this case the is_superuser argument
* is ignored.
*
* When roleid is not InvalidOid, the caller must have checked whether
@@ -752,7 +752,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
pid_t my_pid = getpid();
/*
- * We need a loop here because of race conditions. But don't loop forever
+ * We need a loop here because of race conditions. But don't loop forever
* (for example, a non-writable $PGDATA directory might cause a failure
* that won't go away). 100 tries seems like plenty.
*/
@@ -761,7 +761,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
- * Think not to make the file protection weaker than 0600. See
+ * Think not to make the file protection weaker than 0600. See
* comments below.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
@@ -815,10 +815,10 @@ CreateLockFile(const char *filename, bool amPostmaster,
* the file must be stale (probably left over from a previous system
* boot cycle). We need this test because of the likelihood that a
* reboot will assign exactly the same PID as we had in the previous
- * reboot. Also, if there is just one more process launch in this
+ * reboot. Also, if there is just one more process launch in this
* reboot than in the previous one, the lockfile might mention our
* parent's PID. We can reject that since we'd never be launched
- * directly by a competing postmaster. We can't detect grandparent
+ * directly by a competing postmaster. We can't detect grandparent
* processes unfortunately, but if the init script is written
* carefully then all but the immediate parent shell will be
* root-owned processes and so the kill test will fail with EPERM.
@@ -827,7 +827,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
* implies that the existing process has a different userid than we
* do, which means it cannot be a competing postmaster. A postmaster
* cannot successfully attach to a data directory owned by a userid
- * other than its own. (This is now checked directly in
+ * other than its own. (This is now checked directly in
* checkDataDir(), but has been true for a long time because of the
* restriction that the data directory isn't group- or
* world-accessible.) Also, since we create the lockfiles mode 600,
@@ -874,9 +874,9 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * No, the creating process did not exist. However, it could be that
+ * No, the creating process did not exist. However, it could be that
* the postmaster crashed (or more likely was kill -9'd by a clueless
- * admin) but has left orphan backends behind. Check for this by
+ * admin) but has left orphan backends behind. Check for this by
* looking to see if there is an associated shmem segment that is
* still in use.
*
@@ -913,7 +913,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Looks like nobody's home. Unlink the file and try again to create
- * it. Need a loop because of possible race condition against other
+ * it. Need a loop because of possible race condition against other
* would-be creators.
*/
if (unlink(filename) < 0)
@@ -1045,7 +1045,7 @@ TouchSocketLockFile(void)
* lock file.
*
* This may be called multiple times in the life of a postmaster, if we
- * delete and recreate shmem due to backend crash. Therefore, be prepared
+ * delete and recreate shmem due to backend crash. Therefore, be prepared
* to overwrite existing information. (As of 7.1, a postmaster only creates
* one shm seg at a time; but for the purposes here, if we did have more than
* one then any one of them would do anyway.)
@@ -1093,7 +1093,7 @@ RecordSharedMemoryInLockFile(unsigned long id1, unsigned long id2)
ptr++;
/*
- * Append key information. Format to try to keep it the same length
+ * Append key information. Format to try to keep it the same length
* always (trailing junk won't hurt, but might confuse humans).
*/
sprintf(ptr, "%9lu %9lu\n", id1, id2);
@@ -1250,7 +1250,7 @@ load_libraries(const char *libraries, const char *gucname, bool restricted)
/*
* Choose notice level: avoid repeat messages when re-loading a library
- * that was preloaded into the postmaster. (Only possible in EXEC_BACKEND
+ * that was preloaded into the postmaster. (Only possible in EXEC_BACKEND
* configurations)
*/
#ifdef EXEC_BACKEND
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index a3d029f59c..2ea43b950e 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -114,7 +114,7 @@ FindMyDatabase(const char *name, Oid *db_id, Oid *db_tablespace)
* FindMyDatabaseByOid
*
* As above, but the actual database Id is known. Return its name and the
- * tablespace OID. Return TRUE if found, FALSE if not. The same restrictions
+ * tablespace OID. Return TRUE if found, FALSE if not. The same restrictions
* as FindMyDatabase apply.
*/
static bool
@@ -201,7 +201,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
name)));
/*
- * Check privilege to connect to the database. (The am_superuser test
+ * Check privilege to connect to the database. (The am_superuser test
* is redundant, but since we have the flag, might as well check it
* and save a few cycles.)
*/
@@ -217,7 +217,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
* Check connection limit for this database.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -355,7 +355,7 @@ BaseInit(void)
* Initialize POSTGRES.
*
* The database can be specified by name, using the in_dbname parameter, or by
- * OID, using the dboid parameter. In the latter case, the computed database
+ * OID, using the dboid parameter. In the latter case, the computed database
* name is passed out to the caller as a palloc'ed string in out_dbname.
*
* In bootstrap mode no parameters are used.
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index a167d4e4c8..224abd6411 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -231,7 +231,7 @@ static unsigned short BinarySearchRange
/*
* NOTE: big5 high_byte: 0xa1-0xfe, low_byte: 0x40-0x7e,
* 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix is
- * 0x9d. [region_low, region_high] We
+ * 0x9d. [region_low, region_high] We
* should remember big5 has two different regions (above).
* There is a bias for the distance between these regions.
* 0xa1 - 0x7e + bias = 1 (Distance between 0xa1 and 0x7e is
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 6f79815458..8f37f85aba 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -30,7 +30,7 @@
/*
* We maintain a simple linked list caching the fmgr lookup info for the
* currently selected conversion functions, as well as any that have been
- * selected previously in the current session. (We remember previous
+ * selected previously in the current session. (We remember previous
* settings because we must be able to restore a previous setting during
* transaction rollback, without doing any fresh catalog accesses.)
*
@@ -192,8 +192,8 @@ SetClientEncoding(int encoding, bool doit)
{
/*
* If we're not in a live transaction, the only thing we can do is
- * restore a previous setting using the cache. This covers all
- * transaction-rollback cases. The only case it might not work for is
+ * restore a previous setting using the cache. This covers all
+ * transaction-rollback cases. The only case it might not work for is
* trying to change client_encoding on the fly by editing
* postgresql.conf and SIGHUP'ing. Which would probably be a stupid
* thing to do anyway.
@@ -275,7 +275,7 @@ pg_get_client_encoding_name(void)
*
* CAUTION: although the presence of a length argument means that callers
* can pass non-null-terminated strings, care is required because the same
- * string will be passed back if no conversion occurs. Such callers *must*
+ * string will be passed back if no conversion occurs. Such callers *must*
* check whether result == src and handle that case differently.
*
* Note: we try to avoid raising error, since that could get us into
@@ -512,7 +512,7 @@ pg_client_to_server(const char *s, int len)
* the selected client_encoding. If the client encoding is ASCII-safe
* then we just do a straight validation under that encoding. For an
* ASCII-unsafe encoding we have a problem: we dare not pass such data
- * to the parser but we have no way to convert it. We compromise by
+ * to the parser but we have no way to convert it. We compromise by
* rejecting the data if it contains any non-ASCII characters.
*/
if (PG_VALID_BE_ENCODING(ClientEncoding->encoding))
@@ -663,7 +663,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen)
* This has almost the API of mbstowcs(), except that *from need not be
* null-terminated; instead, the number of input bytes is specified as
* fromlen. Also, we ereport() rather than returning -1 for invalid
- * input encoding. tolen is the maximum number of wchar_t's to store at *to.
+ * input encoding. tolen is the maximum number of wchar_t's to store at *to.
* The output will be zero-terminated iff there is room.
*/
size_t
diff --git a/src/backend/utils/mb/wstrcmp.c b/src/backend/utils/mb/wstrcmp.c
index 22cd7e93c0..a89bdfd753 100644
--- a/src/backend/utils/mb/wstrcmp.c
+++ b/src/backend/utils/mb/wstrcmp.c
@@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/mb/wstrncmp.c b/src/backend/utils/mb/wstrncmp.c
index 60400e903b..797981ac17 100644
--- a/src/backend/utils/mb/wstrncmp.c
+++ b/src/backend/utils/mb/wstrncmp.c
@@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 4377ff555f..cb3b092cf5 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -2904,7 +2904,7 @@ get_guc_variables(void)
/*
- * Build the sorted array. This is split out so that it could be
+ * Build the sorted array. This is split out so that it could be
* re-executed after startup (eg, we could allow loadable modules to
* add vars, and then we'd need to re-sort).
*/
@@ -3062,7 +3062,7 @@ add_placeholder_variable(const char *name, int elevel)
/*
* The char* is allocated at the end of the struct since we have no
- * 'static' place to point to. Note that the current value, as well as
+ * 'static' place to point to. Note that the current value, as well as
* the boot and reset values, start out NULL.
*/
var->variable = (char **) (var + 1);
@@ -3140,7 +3140,7 @@ find_option(const char *name, bool create_placeholders, int elevel)
return *res;
/*
- * See if the name is an obsolete name for a variable. We assume that the
+ * See if the name is an obsolete name for a variable. We assume that the
* set of supported old names is short enough that a brute-force search is
* the best way.
*/
@@ -3259,7 +3259,7 @@ InitializeGUCOptions(void)
/*
* For historical reasons, some GUC parameters can receive defaults from
- * environment variables. Process those settings. NB: if you add or
+ * environment variables. Process those settings. NB: if you add or
* remove anything here, see also ProcessConfigFile().
*/
@@ -3775,7 +3775,7 @@ AtStart_GUC(void)
/*
* Enter a new nesting level for GUC values. This is called at subtransaction
- * start and when entering a function that has proconfig settings. NOTE that
+ * start and when entering a function that has proconfig settings. NOTE that
* we must not risk error here, else subtransaction start will be unhappy.
*/
int
@@ -3820,7 +3820,7 @@ AtEOXact_GUC(bool isCommit, int nestLevel)
GucStack *stack;
/*
- * Process and pop each stack entry within the nest level. To
+ * Process and pop each stack entry within the nest level. To
* simplify fmgr_security_definer(), we allow failure exit from a
* function-with-SET-options to be recovered at the surrounding
* transaction or subtransaction abort; so there could be more than
@@ -4577,7 +4577,7 @@ set_config_option(const char *name, const char *value,
/*
* We are reading a PGC_POSTMASTER var from postgresql.conf.
* We can't change the setting, so give a warning if the DBA
- * tries to change it. (Throwing an error would be more
+ * tries to change it. (Throwing an error would be more
* consistent, but seems overly rigid.)
*/
if (changeVal && !is_newvalue_equal(record, value))
@@ -4622,7 +4622,7 @@ set_config_option(const char *name, const char *value,
* If a PGC_BACKEND parameter is changed in the config file,
* we want to accept the new value in the postmaster (whence
* it will propagate to subsequently-started backends), but
- * ignore it in existing backends. This is a tad klugy, but
+ * ignore it in existing backends. This is a tad klugy, but
* necessary because we don't re-read the config file during
* backend start.
*
@@ -5181,7 +5181,7 @@ set_config_sourcefile(const char *name, char *sourcefile, int sourceline)
/*
* Set a config option to the given value. See also set_config_option,
- * this is just the wrapper to be called from outside GUC. NB: this
+ * this is just the wrapper to be called from outside GUC. NB: this
* is used only for non-transactional operations.
*
* Note: there is no support here for setting source file/line, as it
@@ -5318,7 +5318,7 @@ IsSuperuserConfigOption(const char *name)
* report (in addition to the generic "invalid value for option FOO" that
* guc.c will provide). Note that the result might be ERROR or a lower
* level, so the caller must be prepared for control to return from ereport,
- * or not. If control does return, return false/NULL from the hook function.
+ * or not. If control does return, return false/NULL from the hook function.
*
* At some point it'd be nice to replace this with a mechanism that allows
* the custom message to become the DETAIL line of guc.c's generic message.
@@ -5463,7 +5463,7 @@ flatten_set_variable_args(const char *name, List *args)
else
{
/*
- * Plain string literal or identifier. For quote mode,
+ * Plain string literal or identifier. For quote mode,
* quote it if it's not a vanilla identifier.
*/
if (flags & GUC_LIST_QUOTE)
@@ -6555,7 +6555,7 @@ _ShowOption(struct config_generic * record, bool use_units)
{
/*
* Use int64 arithmetic to avoid overflows in units
- * conversion. If INT64_IS_BUSTED we might overflow
+ * conversion. If INT64_IS_BUSTED we might overflow
* anyway and print bogus answers, but there are few
* enough such machines that it doesn't seem worth trying
* harder.
@@ -7029,7 +7029,7 @@ ParseLongOption(const char *string, char **name, char **value)
/*
* Handle options fetched from pg_database.datconfig, pg_authid.rolconfig,
- * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
+ * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
*
* The array parameter must be an array of TEXT (it must not be NULL).
*/
@@ -7626,7 +7626,7 @@ assign_timezone_abbreviations(const char *newval, bool doit, GucSource source)
/*
* If reading config file, only the postmaster should bleat loudly
- * about problems. Otherwise, it's just this one process doing it,
+ * about problems. Otherwise, it's just this one process doing it,
* and we use WARNING message level.
*/
if (source == PGC_S_FILE)
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index abd7b09cff..8508925fb9 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -109,7 +109,7 @@ static char **save_argv;
* from being clobbered by subsequent ps_display actions.
*
* (The original argv[] will not be overwritten by this routine, but may be
- * overwritten during init_ps_display. Also, the physical location of the
+ * overwritten during init_ps_display. Also, the physical location of the
* environment strings may be moved, so this should be called before any code
* that might try to hang onto a getenv() result.)
*/
@@ -210,7 +210,7 @@ save_ps_display_args(int argc, char **argv)
/*
* Call this once during subprocess startup to set the identification
- * values. At this point, the original argv[] array may be overwritten.
+ * values. At this point, the original argv[] array may be overwritten.
*/
void
init_ps_display(const char *username, const char *dbname,
@@ -360,7 +360,7 @@ set_ps_display(const char *activity, bool force)
/*
* Returns what's currently in the ps display, in case someone needs
- * it. Note that only the activity part is returned. On some platforms
+ * it. Note that only the activity part is returned. On some platforms
* the string will not be null-terminated, so return the effective
* length into *displen.
*/
diff --git a/src/backend/utils/misc/tzparser.c b/src/backend/utils/misc/tzparser.c
index cc688dcaba..1b204cbc8b 100644
--- a/src/backend/utils/misc/tzparser.c
+++ b/src/backend/utils/misc/tzparser.c
@@ -193,7 +193,7 @@ addToArray(tzEntry **base, int *arraysize, int n,
/*
* Search the array for a duplicate; as a useful side effect, the array is
- * maintained in sorted order. We use strcmp() to ensure we match the
+ * maintained in sorted order. We use strcmp() to ensure we match the
* sort order datetime.c expects.
*/
arrayptr = *base;
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index f11e15d57c..1cced7adac 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -38,7 +38,7 @@
* request, even if it was much larger than necessary. This led to more
* and more wasted space in allocated chunks over time. To fix, get rid
* of the midrange behavior: we now handle only "small" power-of-2-size
- * chunks as chunks. Anything "large" is passed off to malloc(). Change
+ * chunks as chunks. Anything "large" is passed off to malloc(). Change
* the number of freelists to change the small/large boundary.
*
*
@@ -54,7 +54,7 @@
* Thus, if someone makes the common error of writing past what they've
* requested, the problem is likely to go unnoticed ... until the day when
* there *isn't* any wasted space, perhaps because of different memory
- * alignment on a new platform, or some other effect. To catch this sort
+ * alignment on a new platform, or some other effect. To catch this sort
* of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond
* the requested space whenever the request is less than the actual chunk
* size, and verifies that the byte is undamaged when the chunk is freed.
@@ -150,7 +150,7 @@ typedef AllocSetContext *AllocSet;
/*
* AllocBlock
* An AllocBlock is the unit of memory that is obtained by aset.c
- * from malloc(). It contains one or more AllocChunks, which are
+ * from malloc(). It contains one or more AllocChunks, which are
* the units requested by palloc() and freed by pfree(). AllocChunks
* cannot be returned to malloc() individually, instead they are put
* on freelists by pfree() and re-used by the next palloc() that has
@@ -358,7 +358,7 @@ AllocSetContextCreate(MemoryContext parent,
* Compute the allocation chunk size limit for this context. It can't be
* more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
* If maxBlockSize is small then requests exceeding the maxBlockSize
- * should be treated as large chunks, too. We have to have
+ * should be treated as large chunks, too. We have to have
* allocChunkLimit a power of two, because the requested and
* actually-allocated sizes of any chunk must be on the same side of the
* limit, else we get confused about whether the chunk is "big".
@@ -428,7 +428,7 @@ AllocSetInit(MemoryContext context)
* Actually, this routine has some discretion about what to do.
* It should mark all allocated chunks freed, but it need not necessarily
* give back all the resources the set owns. Our actual implementation is
- * that we hang onto any "keeper" block specified for the set. In this way,
+ * that we hang onto any "keeper" block specified for the set. In this way,
* we don't thrash malloc() when a context is repeatedly reset after small
* allocations, which is typical behavior for per-tuple contexts.
*/
@@ -672,7 +672,7 @@ AllocSetAlloc(MemoryContext context, Size size)
/*
* In most cases, we'll get back the index of the next larger
- * freelist than the one we need to put this chunk on. The
+ * freelist than the one we need to put this chunk on. The
* exception is when availchunk is exactly a power of 2.
*/
if (availchunk != (1 << (a_fidx + ALLOC_MINBITS)))
@@ -820,7 +820,7 @@ AllocSetFree(MemoryContext context, void *pointer)
{
/*
* Big chunks are certain to have been allocated as single-chunk
- * blocks. Find the containing block and return it to malloc().
+ * blocks. Find the containing block and return it to malloc().
*/
AllocBlock block = set->blocks;
AllocBlock prevblock = NULL;
@@ -919,7 +919,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
if (oldsize > set->allocChunkLimit)
{
/*
- * The chunk must have been allocated as a single-chunk block. Find
+ * The chunk must have been allocated as a single-chunk block. Find
* the containing block and use realloc() to make it bigger with
* minimum space wastage.
*/
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 3430fd29cd..7e52e67062 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -158,7 +158,7 @@ MemoryContextResetChildren(MemoryContext context)
*
* The type-specific delete routine removes all subsidiary storage
* for the context, but we have to delete the context node itself,
- * as well as recurse to get the children. We must also delink the
+ * as well as recurse to get the children. We must also delink the
* node from its parent, if it has one.
*/
void
@@ -413,22 +413,22 @@ MemoryContextContains(MemoryContext context, void *pointer)
* we want to be sure that we don't leave the context tree invalid
* in case of failure (such as insufficient memory to allocate the
* context node itself). The procedure goes like this:
- * 1. Context-type-specific routine first calls MemoryContextCreate(),
+ * 1. Context-type-specific routine first calls MemoryContextCreate(),
* passing the appropriate tag/size/methods values (the methods
* pointer will ordinarily point to statically allocated data).
* The parent and name parameters usually come from the caller.
- * 2. MemoryContextCreate() attempts to allocate the context node,
+ * 2. MemoryContextCreate() attempts to allocate the context node,
* plus space for the name. If this fails we can ereport() with no
* damage done.
- * 3. We fill in all of the type-independent MemoryContext fields.
- * 4. We call the type-specific init routine (using the methods pointer).
+ * 3. We fill in all of the type-independent MemoryContext fields.
+ * 4. We call the type-specific init routine (using the methods pointer).
* The init routine is required to make the node minimally valid
* with zero chance of failure --- it can't allocate more memory,
* for example.
- * 5. Now we have a minimally valid node that can behave correctly
+ * 5. Now we have a minimally valid node that can behave correctly
* when told to reset or delete itself. We link the node to its
* parent (if any), making the node part of the context tree.
- * 6. We return to the context-type-specific routine, which finishes
+ * 6. We return to the context-type-specific routine, which finishes
* up type-specific initialization. This routine can now do things
* that might fail (like allocate more memory), so long as it's
* sure the node is left in a state that delete will handle.
@@ -440,7 +440,7 @@ MemoryContextContains(MemoryContext context, void *pointer)
*
* Normally, the context node and the name are allocated from
* TopMemoryContext (NOT from the parent context, since the node must
- * survive resets of its parent context!). However, this routine is itself
+ * survive resets of its parent context!). However, this routine is itself
* used to create TopMemoryContext! If we see that TopMemoryContext is NULL,
* we assume we are creating TopMemoryContext and use malloc() to allocate
* the node.
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 0445705ade..62bfc5f257 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -143,14 +143,14 @@ GetPortalByName(const char *name)
* Get the "primary" stmt within a portal, ie, the one marked canSetTag.
*
* Returns NULL if no such stmt. If multiple PlannedStmt structs within the
- * portal are marked canSetTag, returns the first one. Neither of these
+ * portal are marked canSetTag, returns the first one. Neither of these
* cases should occur in present usages of this function.
*
* Copes if given a list of Querys --- can't happen in a portal, but this
* code also supports plancache.c, which needs both cases.
*
* Note: the reason this is just handed a List is so that plancache.c
- * can share the code. For use with a portal, use PortalGetPrimaryStmt
+ * can share the code. For use with a portal, use PortalGetPrimaryStmt
* rather than calling this directly.
*/
Node *
@@ -276,7 +276,7 @@ CreateNewPortal(void)
* you can pass a constant string, perhaps "(query not available)".)
*
* commandTag shall be NULL if and only if the original query string
- * (before rewriting) was an empty string. Also, the passed commandTag must
+ * (before rewriting) was an empty string. Also, the passed commandTag must
* be a pointer to a constant string, since it is not copied.
*
* If cplan is provided, then it is a cached plan containing the stmts,
@@ -437,12 +437,12 @@ PortalDrop(Portal portal, bool isTopCommit)
PortalReleaseCachedPlan(portal);
/*
- * Release any resources still attached to the portal. There are several
+ * Release any resources still attached to the portal. There are several
* cases being covered here:
*
* Top transaction commit (indicated by isTopCommit): normally we should
* do nothing here and let the regular end-of-transaction resource
- * releasing mechanism handle these resources too. However, if we have a
+ * releasing mechanism handle these resources too. However, if we have a
* FAILED portal (eg, a cursor that got an error), we'd better clean up
* its resources to avoid resource-leakage warning messages.
*
@@ -454,7 +454,7 @@ PortalDrop(Portal portal, bool isTopCommit)
* cleaned up in transaction abort.
*
* Ordinary portal drop: must release resources. However, if the portal
- * is not FAILED then we do not release its locks. The locks become the
+ * is not FAILED then we do not release its locks. The locks become the
* responsibility of the transaction's ResourceOwner (since it is the
* parent of the portal's owner) and will be released when the transaction
* eventually ends.
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index 9332678f01..6e73604e1c 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -150,7 +150,7 @@ ResourceOwnerCreate(ResourceOwner parent, const char *name)
* but don't delete the owner objects themselves.
*
* Note that this executes just one phase of release, and so typically
- * must be called three times. We do it this way because (a) we want to
+ * must be called three times. We do it this way because (a) we want to
* do all the recursion separately for each phase, thereby preserving
* the needed order of operations; and (b) xact.c may have other operations
* to do between the phases.
@@ -224,7 +224,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
*
* During a commit, there shouldn't be any remaining pins --- that
* would indicate failure to clean up the executor correctly --- so
- * issue warnings. In the abort case, just clean up quietly.
+ * issue warnings. In the abort case, just clean up quietly.
*
* We are careful to do the releasing back-to-front, so as to avoid
* O(N^2) behavior in ResourceOwnerForgetBuffer().
@@ -373,7 +373,7 @@ ResourceOwnerDelete(ResourceOwner owner)
/*
* We delink the owner from its parent before deleting it, so that if
* there's an error we won't have deleted/busted owners still attached to
- * the owner tree. Better a leak than a crash.
+ * the owner tree. Better a leak than a crash.
*/
ResourceOwnerNewParent(owner, NULL);
@@ -565,7 +565,7 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
/*
* Scan back-to-front because it's more likely we are releasing a
- * recently pinned buffer. This isn't always the case of course, but
+ * recently pinned buffer. This isn't always the case of course, but
* it's the way to bet.
*/
for (i = nb1; i >= 0; i--)
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index d96915fe33..71fac3aac3 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -7,14 +7,14 @@
* tuplesort.c). Merging is an ideal algorithm for tape devices, but if
* we implement it on disk by creating a separate file for each "tape",
* there is an annoying problem: the peak space usage is at least twice
- * the volume of actual data to be sorted. (This must be so because each
+ * the volume of actual data to be sorted. (This must be so because each
* datum will appear in both the input and output tapes of the final
- * merge pass. For seven-tape polyphase merge, which is otherwise a
+ * merge pass. For seven-tape polyphase merge, which is otherwise a
* pretty good algorithm, peak usage is more like 4x actual data volume.)
*
* We can work around this problem by recognizing that any one tape
* dataset (with the possible exception of the final output) is written
- * and read exactly once in a perfectly sequential manner. Therefore,
+ * and read exactly once in a perfectly sequential manner. Therefore,
* a datum once read will not be required again, and we can recycle its
* space for use by the new tape dataset(s) being generated. In this way,
* the total space usage is essentially just the actual data volume, plus
@@ -55,7 +55,7 @@
* To support the above policy of writing to the lowest free block,
* ltsGetFreeBlock sorts the list of free block numbers into decreasing
* order each time it is asked for a block and the list isn't currently
- * sorted. This is an efficient way to handle it because we expect cycles
+ * sorted. This is an efficient way to handle it because we expect cycles
* of releasing many blocks followed by re-using many blocks, due to
* tuplesort.c's "preread" behavior.
*
@@ -117,7 +117,7 @@ typedef struct LogicalTape
/*
* The total data volume in the logical tape is numFullBlocks * BLCKSZ +
- * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
+ * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
* only at completion of a write phase.
*/
long numFullBlocks; /* number of complete blocks in log tape */
@@ -157,7 +157,7 @@ struct LogicalTapeSet
*
* If blocksSorted is true then the block numbers in freeBlocks are in
* *decreasing* order, so that removing the last entry gives us the lowest
- * free block. We re-sort the blocks whenever a block is demanded; this
+ * free block. We re-sort the blocks whenever a block is demanded; this
* should be reasonably efficient given the expected usage pattern.
*/
bool forgetFreeSpace; /* are we remembering free blocks? */
@@ -218,7 +218,7 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
/*
* Read a block-sized buffer from the specified block of the underlying file.
*
- * No need for an error return convention; we ereport() on any error. This
+ * No need for an error return convention; we ereport() on any error. This
* module should never attempt to read a block it doesn't know is there.
*/
static void
@@ -353,7 +353,7 @@ ltsRecordBlockNum(LogicalTapeSet *lts, IndirectBlock *indirect,
/*
* Reset a logical tape's indirect-block hierarchy after a write pass
- * to prepare for reading. We dump out partly-filled blocks except
+ * to prepare for reading. We dump out partly-filled blocks except
* at the top of the hierarchy, and we rewind each level to the start.
* This call returns the first data block number, or -1L if the tape
* is empty.
@@ -540,7 +540,7 @@ LogicalTapeSetCreate(int ntapes)
/*
* Initialize per-tape structs. Note we allocate the I/O buffer and
* first-level indirect block for a tape only when it is first actually
- * written to. This avoids wasting memory space when tuplesort.c
+ * written to. This avoids wasting memory space when tuplesort.c
* overestimates the number of tapes needed.
*/
for (i = 0; i < ntapes; i++)
@@ -591,7 +591,7 @@ LogicalTapeSetClose(LogicalTapeSet *lts)
* Mark a logical tape set as not needing management of free space anymore.
*
* This should be called if the caller does not intend to write any more data
- * into the tape set, but is reading from un-frozen tapes. Since no more
+ * into the tape set, but is reading from un-frozen tapes. Since no more
* writes are planned, remembering free blocks is no longer useful. Setting
* this flag lets us avoid wasting time and space in ltsReleaseBlock(), which
* is not designed to handle large numbers of free blocks.
@@ -732,7 +732,7 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
else
{
/*
- * Completion of a read phase. Rewind and prepare for write.
+ * Completion of a read phase. Rewind and prepare for write.
*
* NOTE: we assume the caller has read the tape to the end; otherwise
* untouched data and indirect blocks will not have been freed. We
@@ -826,7 +826,7 @@ LogicalTapeRead(LogicalTapeSet *lts, int tapenum,
*
* This *must* be called just at the end of a write pass, before the
* tape is rewound (after rewind is too late!). It performs a rewind
- * and switch to read mode "for free". An immediately following rewind-
+ * and switch to read mode "for free". An immediately following rewind-
* for-read call is OK but not necessary.
*/
void
@@ -862,7 +862,7 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum)
}
/*
- * Backspace the tape a given number of bytes. (We also support a more
+ * Backspace the tape a given number of bytes. (We also support a more
* general seek interface, see below.)
*
* *Only* a frozen-for-read tape can be backed up; we don't support
@@ -966,7 +966,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
return false;
/*
- * OK, advance or back up to the target block. This implementation would
+ * OK, advance or back up to the target block. This implementation would
* be pretty inefficient for long seeks, but we really aren't expecting
* that (a seek over one tuple is typical).
*/
@@ -999,7 +999,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
* Obtain current position in a form suitable for a later LogicalTapeSeek.
*
* NOTE: it'd be OK to do this during write phase with intention of using
- * the position for a seek after freezing. Not clear if anyone needs that.
+ * the position for a seek after freezing. Not clear if anyone needs that.
*/
void
LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index c507ebfbcc..d800f153ed 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -6,7 +6,7 @@
* This module handles sorting of heap tuples, index tuples, or single
* Datums (and could easily support other kinds of sortable objects,
* if necessary). It works efficiently for both small and large amounts
- * of data. Small amounts are sorted in-memory using qsort(). Large
+ * of data. Small amounts are sorted in-memory using qsort(). Large
* amounts are sorted using temporary files and a standard external sort
* algorithm.
*
@@ -40,7 +40,7 @@
* into sorted runs in temporary tapes, emitting just enough tuples at each
* step to get back within the workMem limit. Whenever the run number at
* the top of the heap changes, we begin a new run with a new output tape
- * (selected per Algorithm D). After the end of the input is reached,
+ * (selected per Algorithm D). After the end of the input is reached,
* we dump out remaining tuples in memory into a final run (or two),
* then merge the runs using Algorithm D.
*
@@ -57,17 +57,17 @@
* access at all, defeating the read-ahead methods used by most Unix kernels.
* Worse, the output tape gets written into a very random sequence of blocks
* of the temp file, ensuring that things will be even worse when it comes
- * time to read that tape. A straightforward merge pass thus ends up doing a
+ * time to read that tape. A straightforward merge pass thus ends up doing a
* lot of waiting for disk seeks. We can improve matters by prereading from
* each source tape sequentially, loading about workMem/M bytes from each tape
* in turn. Then we run the merge algorithm, writing but not reading until
- * one of the preloaded tuple series runs out. Then we switch back to preread
+ * one of the preloaded tuple series runs out. Then we switch back to preread
* mode, fill memory again, and repeat. This approach helps to localize both
* read and write accesses.
*
* When the caller requests random access to the sort result, we form
* the final sorted run on a logical tape which is then "frozen", so
- * that we can access it randomly. When the caller does not need random
+ * that we can access it randomly. When the caller does not need random
* access, we return from tuplesort_performsort() as soon as we are down
* to one run per logical tape. The final merge is then performed
* on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
@@ -77,7 +77,7 @@
* grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
* to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
* tape drives are expensive beasts, and in particular that there will always
- * be many more runs than tape drives. In our implementation a "tape drive"
+ * be many more runs than tape drives. In our implementation a "tape drive"
* doesn't cost much more than a few Kb of memory buffers, so we can afford
* to have lots of them. In particular, if we can have as many tape drives
* as sorted runs, we can eliminate any repeated I/O at all. In the current
@@ -133,28 +133,28 @@ bool optimize_bounded_sort = true;
/*
- * The objects we actually sort are SortTuple structs. These contain
+ * The objects we actually sort are SortTuple structs. These contain
* a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
* which is a separate palloc chunk --- we assume it is just one chunk and
* can be freed by a simple pfree(). SortTuples also contain the tuple's
* first key column in Datum/nullflag format, and an index integer.
*
* Storing the first key column lets us save heap_getattr or index_getattr
- * calls during tuple comparisons. We could extract and save all the key
+ * calls during tuple comparisons. We could extract and save all the key
* columns not just the first, but this would increase code complexity and
* overhead, and wouldn't actually save any comparison cycles in the common
* case where the first key determines the comparison result. Note that
* for a pass-by-reference datatype, datum1 points into the "tuple" storage.
*
* When sorting single Datums, the data value is represented directly by
- * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
+ * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
* then datum1 points to a separately palloc'd data value that is also pointed
* to by the "tuple" pointer; otherwise "tuple" is NULL.
*
* While building initial runs, tupindex holds the tuple's run number. During
* merge passes, we re-use it to hold the input tape number that each tuple in
* the heap was read from, or to hold the index of the next tuple pre-read
- * from the same tape in the case of pre-read entries. tupindex goes unused
+ * from the same tape in the case of pre-read entries. tupindex goes unused
* if the sort occurs entirely in memory.
*/
typedef struct
@@ -235,7 +235,7 @@ struct Tuplesortstate
void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() the out-of-line data (not the SortTuple struct!), and increase
@@ -261,7 +261,7 @@ struct Tuplesortstate
void (*reversedirection) (Tuplesortstate *state);
/*
- * This array holds the tuples now in sort memory. If we are in state
+ * This array holds the tuples now in sort memory. If we are in state
* INITIAL, the tuples are in no particular order; if we are in state
* SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
* and FINALMERGE, the tuples are organized in "heap" order per Algorithm
@@ -395,7 +395,7 @@ struct Tuplesortstate
* If state->randomAccess is true, then the stored representation of the
* tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* randomAccess is not true, the write/read routines may omit the extra
* length word.
*
@@ -405,7 +405,7 @@ struct Tuplesortstate
* the back length word (if present).
*
* The write/read routines can make use of the tuple description data
- * stored in the Tuplesortstate record, if needed. They are also expected
+ * stored in the Tuplesortstate record, if needed. They are also expected
* to adjust state->availMem by the amount of memory space (not tape space!)
* released or consumed. There is no error return from either writetup
* or readtup; they should ereport() on failure.
@@ -479,7 +479,7 @@ static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
*
* After calling tuplesort_begin, the caller should call tuplesort_putXXX
* zero or more times, then call tuplesort_performsort when all the tuples
- * have been supplied. After performsort, retrieve the tuples in sorted
+ * have been supplied. After performsort, retrieve the tuples in sorted
* order by calling tuplesort_getXXX until it returns false/NULL. (If random
* access was requested, rescan, markpos, and restorepos can also be called.)
* Call tuplesort_end to terminate the operation and release memory/disk space.
@@ -767,7 +767,7 @@ tuplesort_begin_datum(Oid datumType,
*
* Advise tuplesort that at most the first N result tuples are required.
*
- * Must be called before inserting any tuples. (Actually, we could allow it
+ * Must be called before inserting any tuples. (Actually, we could allow it
* as long as the sort hasn't spilled to disk, but there seems no need for
* delayed calls at the moment.)
*
@@ -875,7 +875,7 @@ grow_memtuples(Tuplesortstate *state)
* We need to be sure that we do not cause LACKMEM to become true, else
* the space management algorithm will go nuts. We assume here that the
* memory chunk overhead associated with the memtuples array is constant
- * and so there will be no unexpected addition to what we ask for. (The
+ * and so there will be no unexpected addition to what we ask for. (The
* minimum array size established in tuplesort_begin_common is large
* enough to force palloc to treat it as a separate chunk, so this
* assumption should be good. But let's check it.)
@@ -990,7 +990,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
case TSS_INITIAL:
/*
- * Save the tuple into the unsorted array. First, grow the array
+ * Save the tuple into the unsorted array. First, grow the array
* as needed. Note that we try to grow the array when there is
* still one free slot remaining --- if we fail, there'll still be
* room to store the incoming tuple, and then we'll switch to
@@ -1011,7 +1011,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
* enough tuples to meet the bound.
*
* Note that once we enter TSS_BOUNDED state we will always try to
- * complete the sort that way. In the worst case, if later input
+ * complete the sort that way. In the worst case, if later input
* tuples are larger than earlier ones, this might cause us to
* exceed workMem significantly.
*/
@@ -1142,7 +1142,7 @@ tuplesort_performsort(Tuplesortstate *state)
/*
* We were able to accumulate all the tuples required for output
- * in memory, using a heap to eliminate excess tuples. Now we
+ * in memory, using a heap to eliminate excess tuples. Now we
* have to transform the heap to a properly-sorted array.
*/
sort_bounded_heap(state);
@@ -1156,7 +1156,7 @@ tuplesort_performsort(Tuplesortstate *state)
case TSS_BUILDRUNS:
/*
- * Finish tape-based sort. First, flush all tuples remaining in
+ * Finish tape-based sort. First, flush all tuples remaining in
* memory out to tape; then merge until we have a single remaining
* run (or, if !randomAccess, one run per tape). Note that
* mergeruns sets the correct state->status.
@@ -1217,7 +1217,7 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
/*
* Complain if caller tries to retrieve more tuples than
- * originally asked for in a bounded sort. This is because
+ * originally asked for in a bounded sort. This is because
* returning EOF here might be the wrong thing.
*/
if (state->bounded && state->current >= state->bound)
@@ -1423,7 +1423,7 @@ tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
/*
* Fetch the next index tuple in either forward or back direction.
- * Returns NULL if no more tuples. If *should_free is set, the
+ * Returns NULL if no more tuples. If *should_free is set, the
* caller must pfree the returned tuple when done with it.
*/
IndexTuple
@@ -1494,7 +1494,7 @@ tuplesort_merge_order(long allowedMem)
/*
* We need one tape for each merge input, plus another one for the output,
- * and each of these tapes needs buffer space. In addition we want
+ * and each of these tapes needs buffer space. In addition we want
* MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
* count).
*
@@ -1548,7 +1548,7 @@ inittapes(Tuplesortstate *state)
* don't decrease it to the point that we have no room for tuples. (That
* case is only likely to occur if sorting pass-by-value Datums; in all
* other scenarios the memtuples[] array is unlikely to occupy more than
- * half of allowedMem. In the pass-by-value case it's not important to
+ * half of allowedMem. In the pass-by-value case it's not important to
* account for tuple space, so we don't care if LACKMEM becomes
* inaccurate.)
*/
@@ -1672,7 +1672,7 @@ mergeruns(Tuplesortstate *state)
/*
* If we produced only one initial run (quite likely if the total data
* volume is between 1X and 2X workMem), we can just use that tape as the
- * finished output, rather than doing a useless merge. (This obvious
+ * finished output, rather than doing a useless merge. (This obvious
* optimization is not in Knuth's algorithm.)
*/
if (state->currentRun == 1)
@@ -1778,7 +1778,7 @@ mergeruns(Tuplesortstate *state)
* the loop without performing the last iteration of step D6, we have not
* rearranged the tape unit assignment, and therefore the result is on
* TAPE[T]. We need to do it this way so that we can freeze the final
- * output tape while rewinding it. The last iteration of step D6 would be
+ * output tape while rewinding it. The last iteration of step D6 would be
* a waste of cycles anyway...
*/
state->result_tape = state->tp_tapenum[state->tapeRange];
@@ -1862,7 +1862,7 @@ mergeonerun(Tuplesortstate *state)
* beginmerge - initialize for a merge pass
*
* We decrease the counts of real and dummy runs for each tape, and mark
- * which tapes contain active input runs in mergeactive[]. Then, load
+ * which tapes contain active input runs in mergeactive[]. Then, load
* as many tuples as we can from each active input tape, and finally
* fill the merge heap with the first tuple from each active tape.
*/
@@ -1955,7 +1955,7 @@ beginmerge(Tuplesortstate *state)
* This routine exists to improve sequentiality of reads during a merge pass,
* as explained in the header comments of this file. Load tuples from each
* active source tape until the tape's run is exhausted or it has used up
- * its fair share of available memory. In any case, we guarantee that there
+ * its fair share of available memory. In any case, we guarantee that there
* is at least one preread tuple available from each unexhausted input tape.
*
* We invoke this routine at the start of a merge pass for initial load,
@@ -2219,7 +2219,7 @@ tuplesort_explain(Tuplesortstate *state)
* accurately once we have begun to return tuples to the caller (since we
* don't account for pfree's the caller is expected to do), so we cannot
* rely on availMem in a disk sort. This does not seem worth the overhead
- * to fix. Is it worth creating an API for the memory context code to
+ * to fix. Is it worth creating an API for the memory context code to
* tell us how much is actually used in sortcontext?
*/
if (state->tapeset)
@@ -2261,7 +2261,7 @@ tuplesort_explain(Tuplesortstate *state)
/*
* Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
*
- * Compare two SortTuples. If checkIndex is true, use the tuple index
+ * Compare two SortTuples. If checkIndex is true, use the tuple index
* as the front of the sort key; otherwise, no.
*/
@@ -2365,7 +2365,7 @@ sort_bounded_heap(Tuplesortstate *state)
/*
* Insert a new tuple into an empty or existing heap, maintaining the
- * heap invariant. Caller is responsible for ensuring there's room.
+ * heap invariant. Caller is responsible for ensuring there's room.
*
* Note: we assume *tuple is a temporary variable that can be scribbled on.
* For some callers, tuple actually points to a memtuples[] entry above the
@@ -2470,7 +2470,7 @@ markrunend(Tuplesortstate *state, int tapenum)
/*
- * Set up for an external caller of ApplySortFunction. This function
+ * Set up for an external caller of ApplySortFunction. This function
* basically just exists to localize knowledge of the encoding of sk_flags
* used in this module.
*/
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 5d34a670be..2607ac0f42 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -8,7 +8,7 @@
* a dumbed-down version of tuplesort.c; it does no sorting of tuples
* but can only store and regurgitate a sequence of tuples. However,
* because no sort is required, it is allowed to start reading the sequence
- * before it has all been written. This is particularly useful for cursors,
+ * before it has all been written. This is particularly useful for cursors,
* because it allows random access within the already-scanned portion of
* a query without having to process the underlying scan to completion.
* Also, it is possible to support multiple independent read pointers.
@@ -17,7 +17,7 @@
* space limit specified by the caller.
*
* The (approximate) amount of memory allowed to the tuplestore is specified
- * in kilobytes by the caller. We absorb tuples and simply store them in an
+ * in kilobytes by the caller. We absorb tuples and simply store them in an
* in-memory array as long as we haven't exceeded maxKBytes. If we do exceed
* maxKBytes, we dump all the tuples into a temp file and then read from that
* when needed.
@@ -29,7 +29,7 @@
* When the caller requests backward-scan capability, we write the temp file
* in a format that allows either forward or backward scan. Otherwise, only
* forward scan is allowed. A request for backward scan must be made before
- * putting any tuples into the tuplestore. Rewind is normally allowed but
+ * putting any tuples into the tuplestore. Rewind is normally allowed but
* can be turned off via tuplestore_set_eflags; turning off rewind for all
* read pointers enables truncation of the tuplestore at the oldest read point
* for minimal memory usage. (The caller must explicitly call tuplestore_trim
@@ -63,7 +63,7 @@
/*
- * Possible states of a Tuplestore object. These denote the states that
+ * Possible states of a Tuplestore object. These denote the states that
* persist between calls of Tuplestore routines.
*/
typedef enum
@@ -82,7 +82,7 @@ typedef enum
*
* Special case: if eof_reached is true, then the pointer's read position is
* implicitly equal to the write position, and current/file/offset aren't
- * maintained. This way we need not update all the read pointers each time
+ * maintained. This way we need not update all the read pointers each time
* we write.
*/
typedef struct
@@ -126,7 +126,7 @@ struct Tuplestorestate
void *(*copytup) (Tuplestorestate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() it, and increase state->availMem by the amount of memory space
@@ -194,7 +194,7 @@ struct Tuplestorestate
* If state->backward is true, then the stored representation of
* the tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* state->backward is not set, the write/read routines may omit the extra
* length word.
*
@@ -290,7 +290,7 @@ tuplestore_begin_common(int eflags, bool interXact, int maxKBytes)
* tuple store are allowed.
*
* interXact: if true, the files used for on-disk storage persist beyond the
- * end of the current transaction. NOTE: It's the caller's responsibility to
+ * end of the current transaction. NOTE: It's the caller's responsibility to
* create such a tuplestore in a memory context and resource owner that will
* also survive transaction boundaries, and to ensure the tuplestore is closed
* when it's no longer wanted.
@@ -329,7 +329,7 @@ tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
* any data into the tuplestore.
*
* eflags is a bitmask following the meanings used for executor node
- * startup flags (see executor.h). tuplestore pays attention to these bits:
+ * startup flags (see executor.h). tuplestore pays attention to these bits:
* EXEC_FLAG_REWIND need rewind to start
* EXEC_FLAG_BACKWARD need backward fetch
* If tuplestore_set_eflags is not called, REWIND is allowed, and BACKWARD
@@ -739,7 +739,7 @@ tuplestore_puttuple_common(Tuplestorestate *state, void *tuple)
/*
* Fetch the next tuple in either forward or back direction.
- * Returns NULL if no more tuples. If should_free is set, the
+ * Returns NULL if no more tuples. If should_free is set, the
* caller must pfree the returned tuple when done with it.
*
* Backward scan is only allowed if randomAccess was set true or
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index 2ca33d7987..bfed10b72d 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -15,7 +15,7 @@
* this module.
*
* To allow reusing existing combo cids, we also keep a hash table that
- * maps cmin,cmax pairs to combo cids. This keeps the data structure size
+ * maps cmin,cmax pairs to combo cids. This keeps the data structure size
* reasonable in most cases, since the number of unique pairs used by any
* one transaction is likely to be small.
*
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index 9c06124ba9..6b99653fbc 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -8,9 +8,9 @@
* (tracked by separate refcounts on each snapshot), its memory can be freed.
*
* These arrangements let us reset MyProc->xmin when there are no snapshots
- * referenced by this transaction. (One possible improvement would be to be
+ * referenced by this transaction. (One possible improvement would be to be
* able to advance Xmin when the snapshot with the earliest Xmin is no longer
- * referenced. That's a bit harder though, it requires more locking, and
+ * referenced. That's a bit harder though, it requires more locking, and
* anyway it should be rather uncommon to keep snapshots referenced for too
* long.)
*
@@ -59,7 +59,7 @@ static Snapshot SecondarySnapshot = NULL;
* mode, we don't want it to say that BootstrapTransactionId is in progress.
*
* RecentGlobalXmin is initialized to InvalidTransactionId, to ensure that no
- * one tries to use a stale value. Readers should ensure that it has been set
+ * one tries to use a stale value. Readers should ensure that it has been set
* to something else before using it.
*/
TransactionId TransactionXmin = FirstNormalTransactionId;
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index c0b8970ff8..67a2665904 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -20,7 +20,7 @@
* TransactionIdDidCommit will both return true. If we check only
* TransactionIdDidCommit, we could consider a tuple committed when a
* later GetSnapshotData call will still think the originating transaction
- * is in progress, which leads to application-level inconsistency. The
+ * is in progress, which leads to application-level inconsistency. The
* upshot is that we gotta check TransactionIdIsInProgress first in all
* code paths, except for a few cases where we are looking at
* subtransactions of our own main transaction and so there can't be any
@@ -87,12 +87,12 @@ static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot);
* buffer, so we can't use the LSN to interlock this; we have to just refrain
* from setting the hint bit until some future re-examination of the tuple.
*
- * We can always set hint bits when marking a transaction aborted. (Some
+ * We can always set hint bits when marking a transaction aborted. (Some
* code in heapam.c relies on that!)
*
* Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then
* we can always set the hint bits, since VACUUM FULL always uses synchronous
- * commits and doesn't move tuples that weren't previously hinted. (This is
+ * commits and doesn't move tuples that weren't previously hinted. (This is
* not known by this subroutine, but is applied by its callers.)
*
* Normal commits may be asynchronous, so for those we need to get the LSN
@@ -483,7 +483,7 @@ HeapTupleSatisfiesAny(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
* This is a simplified version that only checks for VACUUM moving conditions.
* It's appropriate for TOAST usage because TOAST really doesn't want to do
* its own time qual checks; if you can see the main table row that contains
- * a TOAST reference, you should be able to see the TOASTed value. However,
+ * a TOAST reference, you should be able to see the TOASTed value. However,
* vacuuming a TOAST table is independent of the main table, and in case such
* a vacuum fails partway through, we'd better do this much checking.
*
@@ -1045,7 +1045,7 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
* we mainly want to know is if a tuple is potentially visible to *any*
* running transaction. If so, it can't be removed yet by VACUUM.
*
- * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
+ * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
* deleted by XIDs >= OldestXmin are deemed "recently dead"; they might
* still be visible to some open transaction, so we can't remove them,
* even if we see that the deleting transaction has committed.
@@ -1129,7 +1129,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
}
/*
- * Okay, the inserter committed, so it was good at some point. Now what
+ * Okay, the inserter committed, so it was good at some point. Now what
* about the deleting transaction?
*/
if (tuple->t_infomask & HEAP_XMAX_INVALID)
@@ -1228,7 +1228,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
/*
* Make a quick range check to eliminate most XIDs without looking at the
- * xip arrays. Note that this is OK even if we convert a subxact XID to
+ * xip arrays. Note that this is OK even if we convert a subxact XID to
* its parent below, because a subxact with XID < xmin has surely also got
* a parent with XID < xmin, while one with XID >= xmax must belong to a
* parent that was not yet committed at the time of this snapshot.
@@ -1244,7 +1244,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
/*
* If the snapshot contains full subxact data, the fastest way to check
* things is just to compare the given XID against both subxact XIDs and
- * top-level XIDs. If the snapshot overflowed, we have to use pg_subtrans
+ * top-level XIDs. If the snapshot overflowed, we have to use pg_subtrans
* to convert a subxact XID to its parent XID, but then we need only look
* at top-level XIDs not subxacts.
*/
@@ -1268,7 +1268,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
/*
* If xid was indeed a subxact, we might now have an xid < xmin, so
- * recheck to avoid an array scan. No point in rechecking xmax.
+ * recheck to avoid an array scan. No point in rechecking xmax.
*/
if (TransactionIdPrecedes(xid, snapshot->xmin))
return false;
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index 1084267c77..7565b9b2de 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -18,7 +18,7 @@
* to produce a new database.
*
* For largely-historical reasons, the template1 database is the one built
- * by the basic bootstrap process. After it is complete, template0 and
+ * by the basic bootstrap process. After it is complete, template0 and
* the default database, postgres, are made just by copying template1.
*
* To create template1, we run the postgres (backend) program in bootstrap
@@ -779,7 +779,7 @@ find_matching_ts_config(const char *lc_type)
/*
* Convert lc_ctype to a language name by stripping everything after an
- * underscore. Just for paranoia, we also stop at '.' or '@'.
+ * underscore. Just for paranoia, we also stop at '.' or '@'.
*/
if (lc_type == NULL)
langname = xstrdup("");
@@ -1787,7 +1787,7 @@ setup_dictionary(void)
/*
* Set up privileges
*
- * We mark most system catalogs as world-readable. We don't currently have
+ * We mark most system catalogs as world-readable. We don't currently have
* to touch functions, languages, or databases, because their default
* permissions are OK.
*
@@ -2090,7 +2090,7 @@ check_ok(void)
*
* Note: this is used to process both postgresql.conf entries and SQL
* string literals. Since postgresql.conf strings are defined to treat
- * backslashes as escapes, we have to double backslashes here. Hence,
+ * backslashes as escapes, we have to double backslashes here. Hence,
* when using this for a SQL string literal, use E'' syntax.
*
* We do not need to worry about encoding considerations because all
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index 9c8c8c12e8..93db5935b5 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -958,7 +958,7 @@ postmaster_is_alive(pid_t pid)
* postmaster we are after.
*
* Don't believe that our own PID or parent shell's PID is the postmaster,
- * either. (Windows hasn't got getppid(), though.)
+ * either. (Windows hasn't got getppid(), though.)
*/
if (pid == getpid())
return false;
diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c
index e159bc4478..5ffad38ddc 100644
--- a/src/bin/pg_dump/common.c
+++ b/src/bin/pg_dump/common.c
@@ -40,7 +40,7 @@ static int numCatalogIds = 0;
/*
* These variables are static to avoid the notational cruft of having to pass
- * them into findTableByOid() and friends. For each of these arrays, we
+ * them into findTableByOid() and friends. For each of these arrays, we
* build a sorted-by-OID index array immediately after it's built, and then
* we use binary search in findTableByOid() and friends. (qsort'ing the base
* arrays themselves would be simpler, but it doesn't work because pg_dump.c
@@ -467,7 +467,7 @@ findObjectByDumpId(DumpId dumpId)
*
* We use binary search in a sorted list that is built on first call.
* If AssignDumpId() and findObjectByCatalogId() calls were intermixed,
- * the code would work, but possibly be very slow. In the current usage
+ * the code would work, but possibly be very slow. In the current usage
* pattern that does not happen, indeed we only need to build the list once.
*/
DumpableObject *
diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c
index 345d6cf3c8..1fe7036ede 100644
--- a/src/bin/pg_dump/dumputils.c
+++ b/src/bin/pg_dump/dumputils.c
@@ -168,7 +168,7 @@ fmtId(const char *rawid)
* standard_conforming_strings settings.
*
* This is essentially equivalent to libpq's PQescapeStringInternal,
- * except for the output buffer structure. We need it in situations
+ * except for the output buffer structure. We need it in situations
* where we do not have a PGconn available. Where we do,
* appendStringLiteralConn is a better choice.
*/
@@ -429,7 +429,7 @@ parse_version(const char *versionString)
* into individual items.
*
* On success, returns true and sets *itemarray and *nitems to describe
- * an array of individual strings. On parse failure, returns false;
+ * an array of individual strings. On parse failure, returns false;
* *itemarray may exist or be NULL.
*
* NOTE: free'ing itemarray is sufficient to deallocate the working storage.
@@ -564,7 +564,7 @@ buildACLCommands(const char *name, const char *subname,
/*
* At the end, these two will be pasted together to form the result. But
* the owner privileges need to go before the other ones to keep the
- * dependencies valid. In recent versions this is normally the case, but
+ * dependencies valid. In recent versions this is normally the case, but
* in old versions they come after the PUBLIC privileges and that results
* in problems if we need to run REVOKE on the owner privileges.
*/
@@ -718,7 +718,7 @@ buildACLCommands(const char *name, const char *subname,
*
* The returned grantee string will be the dequoted username or groupname
* (preceded with "group " in the latter case). The returned grantor is
- * the dequoted grantor name or empty. Privilege characters are decoded
+ * the dequoted grantor name or empty. Privilege characters are decoded
* and split between privileges with grant option (privswgo) and without
* (privs).
*
@@ -935,7 +935,7 @@ AddAcl(PQExpBuffer aclbuf, const char *keyword, const char *subname)
* namevar: name of query variable to match against an object-name pattern.
* altnamevar: NULL, or name of an alternative variable to match against name.
* visibilityrule: clause to use if we want to restrict to visible objects
- * (for example, "pg_catalog.pg_table_is_visible(p.oid)"). Can be NULL.
+ * (for example, "pg_catalog.pg_table_is_visible(p.oid)"). Can be NULL.
*
* Formatting note: the text already present in buf should end with a newline.
* The appended text, if any, will end with one too.
@@ -980,7 +980,7 @@ processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern,
* last alternatives which is not what we want.
*
* Note: the result of this pass is the actual regexp pattern(s) we want
- * to execute. Quoting/escaping into SQL literal format will be done
+ * to execute. Quoting/escaping into SQL literal format will be done
* below using appendStringLiteralConn().
*/
appendPQExpBufferStr(&namebuf, "^(");
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 4914e2e9cf..fe930739af 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -563,8 +563,8 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
/*
* In parallel restore, if we created the table earlier in
* the run then we wrap the COPY in a transaction and
- * precede it with a TRUNCATE. If archiving is not on
- * this prevents WAL-logging the COPY. This obtains a
+ * precede it with a TRUNCATE. If archiving is not on
+ * this prevents WAL-logging the COPY. This obtains a
* speedup similar to that from using single_txn mode in
* non-parallel restores.
*/
@@ -2413,7 +2413,7 @@ _doSetSessionAuth(ArchiveHandle *AH, const char *user)
appendPQExpBuffer(cmd, "SET SESSION AUTHORIZATION ");
/*
- * SQL requires a string literal here. Might as well be correct.
+ * SQL requires a string literal here. Might as well be correct.
*/
if (user && *user)
appendStringLiteralAHX(cmd, user, AH);
@@ -2544,7 +2544,7 @@ _becomeUser(ArchiveHandle *AH, const char *user)
}
/*
- * Become the owner of the the given TOC entry object. If
+ * Become the owner of the the given TOC entry object. If
* changes in ownership are not allowed, this doesn't do anything.
*/
static void
@@ -3233,7 +3233,7 @@ restore_toc_entries_parallel(ArchiveHandle *AH)
}
/*
- * Now close parent connection in prep for parallel steps. We do this
+ * Now close parent connection in prep for parallel steps. We do this
* mainly to ensure that we don't exceed the specified number of parallel
* connections.
*/
@@ -3733,7 +3733,7 @@ fix_dependencies(ArchiveHandle *AH)
/*
* For some of the steps here, it is convenient to have an array that
* indexes the TOC entries by dump ID, rather than searching the TOC list
- * repeatedly. Entries for dump IDs not present in the TOC will be NULL.
+ * repeatedly. Entries for dump IDs not present in the TOC will be NULL.
*
* NOTE: because maxDumpId is just the highest dump ID defined in the
* archive, there might be dependencies for IDs > maxDumpId. All uses
@@ -3759,7 +3759,7 @@ fix_dependencies(ArchiveHandle *AH)
* Note: currently, a TABLE DATA should always have exactly one
* dependency, on its TABLE item. So we don't bother to search, but look
* just at the first dependency. We do trouble to make sure that it's a
- * TABLE, if possible. However, if the dependency isn't in the archive
+ * TABLE, if possible. However, if the dependency isn't in the archive
* then just assume it was a TABLE; this is to cover cases where the table
* was suppressed but we have the data and some dependent post-data items.
*/
@@ -3809,7 +3809,7 @@ fix_dependencies(ArchiveHandle *AH)
/*
* It is possible that the dependencies list items that are not in the
- * archive at all. Subtract such items from the depCounts.
+ * archive at all. Subtract such items from the depCounts.
*/
for (te = AH->toc->next; te != AH->toc; te = te->next)
{
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index 3bfcaf8db0..236f7e2e96 100644
--- a/src/bin/pg_dump/pg_backup_db.c
+++ b/src/bin/pg_dump/pg_backup_db.c
@@ -71,7 +71,7 @@ _check_database_version(ArchiveHandle *AH)
/*
* Reconnect to the server. If dbname is not NULL, use that database,
* else the one associated with the archive handle. If username is
- * not NULL, use that user name, else the one from the handle. If
+ * not NULL, use that user name, else the one from the handle. If
* both the database and the user match the existing connection already,
* nothing will be done.
*
@@ -112,7 +112,7 @@ ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *username)
*
* Note: it's not really all that sensible to use a single-entry password
* cache if the username keeps changing. In current usage, however, the
- * username never does change, so one savedPassword is sufficient. We do
+ * username never does change, so one savedPassword is sufficient. We do
* update the cache on the off chance that the password has changed since the
* start of the run.
*/
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index b1c4f8d12e..77ba04b435 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -1007,7 +1007,7 @@ selectDumpableTable(TableInfo *tbinfo)
* Mark a type as to be dumped or not
*
* If it's a table's rowtype or an autogenerated array type, we also apply a
- * special type code to facilitate sorting into the desired order. (We don't
+ * special type code to facilitate sorting into the desired order. (We don't
* want to consider those to be ordinary types because that would bring tables
* up into the datatype part of the dump order.) We still set the object's
* dump flag; that's not going to cause the dummy type to be dumped, but we
@@ -1315,7 +1315,7 @@ dumpTableData_insert(Archive *fout, void *dcontext)
/*
* These types are printed without quotes unless
* they contain values that aren't accepted by the
- * scanner unquoted (e.g., 'NaN'). Note that
+ * scanner unquoted (e.g., 'NaN'). Note that
* strtod() and friends might accept NaN, so we
* can't use that to test.
*
@@ -1510,7 +1510,7 @@ getTableDataFKConstraints(void)
/*
* guessConstraintInheritance:
* In pre-8.4 databases, we can't tell for certain which constraints
- * are inherited. We assume a CHECK constraint is inherited if its name
+ * are inherited. We assume a CHECK constraint is inherited if its name
* matches the name of any constraint in the parent. Originally this code
* tried to compare the expression texts, but that can fail for various
* reasons --- for example, if the parent and child tables are in different
@@ -2244,7 +2244,7 @@ getNamespaces(int *numNamespaces)
* getNamespaces
*
* NB: for pre-7.3 source database, we use object OID to guess whether it's
- * a system object or not. In 7.3 and later there is no guessing, and we
+ * a system object or not. In 7.3 and later there is no guessing, and we
* don't use objoid at all.
*/
static NamespaceInfo *
@@ -2322,7 +2322,7 @@ getTypes(int *numTypes)
* auto-generated array type by checking the element type's typarray.
* (Before that the test is capable of generating false positives.) We
* still check for name beginning with '_', though, so as to avoid the
- * cost of the subselect probe for all standard types. This would have to
+ * cost of the subselect probe for all standard types. This would have to
* be revisited if the backend ever allows renaming of array types.
*/
@@ -2449,7 +2449,7 @@ getTypes(int *numTypes)
/*
* If it's a base type, make a DumpableObject representing a shell
- * definition of the type. We will need to dump that ahead of the I/O
+ * definition of the type. We will need to dump that ahead of the I/O
* functions for the type.
*
* Note: the shell type doesn't have a catId. You might think it
@@ -3892,7 +3892,7 @@ getIndexes(TableInfo tblinfo[], int numTables)
/*
* In pre-7.4 releases, indkeys may contain more entries than
* indnkeys says (since indnkeys will be 1 for a functional
- * index). We don't actually care about this case since we don't
+ * index). We don't actually care about this case since we don't
* examine indkeys except for indexes associated with PRIMARY and
* UNIQUE constraints, which are never functional indexes. But we
* have to allocate enough space to keep parseOidArray from
@@ -4983,7 +4983,7 @@ getTableAttrs(TableInfo *tblinfo, int numTables)
/*
* Defaults on a VIEW must always be dumped as separate ALTER
- * TABLE commands. Defaults on regular tables are dumped as
+ * TABLE commands. Defaults on regular tables are dumped as
* part of the CREATE TABLE if possible, which it won't be
* if the column is not going to be emitted explicitly.
*/
@@ -5150,7 +5150,7 @@ getTableAttrs(TableInfo *tblinfo, int numTables)
/*
* If the constraint is inherited, this will be detected later
- * (in pre-8.4 databases). We also detect later if the
+ * (in pre-8.4 databases). We also detect later if the
* constraint must be split out from the table definition.
*/
}
@@ -5858,7 +5858,7 @@ findComments(Archive *fout, Oid classoid, Oid objoid,
/*
* Pre-7.2, pg_description does not contain classoid, so collectComments
- * just stores a zero. If there's a collision on object OID, well, you
+ * just stores a zero. If there's a collision on object OID, well, you
* get duplicate comments.
*/
if (fout->remoteVersion < 70200)
@@ -6870,7 +6870,7 @@ dumpShellType(Archive *fout, ShellTypeInfo *stinfo)
/*
* Note the lack of a DROP command for the shell type; any required DROP
- * is driven off the base type entry, instead. This interacts with
+ * is driven off the base type entry, instead. This interacts with
* _printTocEntry()'s use of the presence of a DROP command to decide
* whether an entry needs an ALTER OWNER command. We don't want to alter
* the shell type's owner immediately on creation; that should happen only
@@ -6903,7 +6903,7 @@ dumpShellType(Archive *fout, ShellTypeInfo *stinfo)
*
* For some backwards compatibility with the older behavior, we forcibly
* dump a PL if its handler function (and validator if any) are in a
- * dumpable namespace. That case is not checked here.
+ * dumpable namespace. That case is not checked here.
*/
static bool
shouldDumpProcLangs(void)
@@ -7515,7 +7515,7 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
/*
* COST and ROWS are emitted only if present and not default, so as not to
- * break backwards-compatibility of the dump without need. Keep this code
+ * break backwards-compatibility of the dump without need. Keep this code
* in sync with the defaults in functioncmds.c.
*/
if (strcmp(procost, "0") != 0)
@@ -8294,7 +8294,7 @@ dumpOpclass(Archive *fout, OpclassInfo *opcinfo)
* XXX RECHECK is gone as of 8.4, but we'll still print it if dumping
* an older server's opclass in which it is used. This is to avoid
* hard-to-detect breakage if a newer pg_dump is used to dump from an
- * older server and then reload into that old version. This can go
+ * older server and then reload into that old version. This can go
* away once 8.3 is so old as to not be of interest to anyone.
*/
appendPQExpBuffer(query, "SELECT amopstrategy, false AS amopreqcheck, "
@@ -8507,7 +8507,7 @@ dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo)
* XXX RECHECK is gone as of 8.4, but we'll still print it if dumping
* an older server's opclass in which it is used. This is to avoid
* hard-to-detect breakage if a newer pg_dump is used to dump from an
- * older server and then reload into that old version. This can go
+ * older server and then reload into that old version. This can go
* away once 8.3 is so old as to not be of interest to anyone.
*/
appendPQExpBuffer(query, "SELECT amopstrategy, false AS amopreqcheck, "
@@ -9706,7 +9706,7 @@ dumpUserMappings(Archive *fout,
* 'objCatId' is the catalog ID of the underlying object.
* 'objDumpId' is the dump ID of the underlying object.
* 'type' must be TABLE, FUNCTION, LANGUAGE, SCHEMA, DATABASE, or TABLESPACE.
- * 'name' is the formatted name of the object. Must be quoted etc. already.
+ * 'name' is the formatted name of the object. Must be quoted etc. already.
* 'subname' is the formatted name of the sub-object, if any. Must be quoted.
* 'tag' is the tag for the archive entry (typ. unquoted name of object).
* 'nspname' is the namespace the object is in (NULL if none).
@@ -9774,7 +9774,7 @@ dumpTable(Archive *fout, TableInfo *tbinfo)
tbinfo->relacl);
/*
- * Handle column ACLs, if any. Note: we pull these with a separate
+ * Handle column ACLs, if any. Note: we pull these with a separate
* query rather than trying to fetch them during getTableAttrs, so
* that we won't miss ACLs on system columns.
*/
@@ -10364,7 +10364,7 @@ dumpIndex(Archive *fout, IndxInfo *indxinfo)
/*
* If there's an associated constraint, don't dump the index per se, but
- * do dump any comment for it. (This is safe because dependency ordering
+ * do dump any comment for it. (This is safe because dependency ordering
* will have ensured the constraint is emitted first.) Note that the
* emitted comment has to be shown as depending on the constraint, not
* the index, in such cases.
@@ -10697,7 +10697,7 @@ findLastBuiltinOid_V71(const char *dbname)
* find the last built in oid
*
* For 7.0, we do this by assuming that the last thing that initdb does is to
- * create the pg_indexes view. This sucks in general, but seeing that 7.0.x
+ * create the pg_indexes view. This sucks in general, but seeing that 7.0.x
* initdb won't be changing anymore, it'll do.
*/
static Oid
@@ -10854,7 +10854,7 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
/*
* Versions before 8.4 did not remember the true start value. If
* is_called is false then the sequence has never been incremented
- * so we can use last_val. Otherwise punt and let it default.
+ * so we can use last_val. Otherwise punt and let it default.
*/
if (!called)
appendPQExpBuffer(query, " START WITH %s\n", last);
@@ -11386,7 +11386,7 @@ getDependencies(void)
/*
* Ordinarily, table rowtypes have implicit dependencies on their
- * tables. However, for a composite type the implicit dependency goes
+ * tables. However, for a composite type the implicit dependency goes
* the other way in pg_depend; which is the right thing for DROP but
* it doesn't produce the dependency ordering we need. So in that one
* case, we reverse the direction of the dependency.
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 10ce6729a2..642997e6bd 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -76,10 +76,10 @@ typedef struct SimpleStringList
*
* NOTE: the structures described here live for the entire pg_dump run;
* and in most cases we make a struct for every object we can find in the
- * catalogs, not only those we are actually going to dump. Hence, it's
+ * catalogs, not only those we are actually going to dump. Hence, it's
* best to store a minimal amount of per-object info in these structs,
* and retrieve additional per-object info when and if we dump a specific
- * object. In particular, try to avoid retrieving expensive-to-compute
+ * object. In particular, try to avoid retrieving expensive-to-compute
* information until it's known to be needed. We do, however, have to
* store enough info to determine whether an object should be dumped and
* what order to dump in.
@@ -323,7 +323,7 @@ typedef struct _triggerInfo
} TriggerInfo;
/*
- * struct ConstraintInfo is used for all constraint types. However we
+ * struct ConstraintInfo is used for all constraint types. However we
* use a different objType for foreign key constraints, to make it easier
* to sort them the way we want.
*/
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index 541fed7f78..0008943236 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -21,7 +21,7 @@ static const char *modulename = gettext_noop("sorter");
/*
* Sort priority for object types when dumping a pre-7.3 database.
* Objects are sorted by priority levels, and within an equal priority level
- * by OID. (This is a relatively crude hack to provide semi-reasonable
+ * by OID. (This is a relatively crude hack to provide semi-reasonable
* behavior for old databases without full dependency info.) Note: text
* search and foreign-data objects can't really happen here, so the rather
* bogus priorities for them don't matter.
@@ -226,11 +226,11 @@ sortDumpableObjects(DumpableObject **objs, int numObjs)
* TopoSort -- topological sort of a dump list
*
* Generate a re-ordering of the dump list that satisfies all the dependency
- * constraints shown in the dump list. (Each such constraint is a fact of a
+ * constraints shown in the dump list. (Each such constraint is a fact of a
* partial ordering.) Minimize rearrangement of the list not needed to
* achieve the partial ordering.
*
- * The input is the list of numObjs objects in objs[]. This list is not
+ * The input is the list of numObjs objects in objs[]. This list is not
* modified.
*
* Returns TRUE if able to build an ordering that satisfies all the
@@ -273,7 +273,7 @@ TopoSort(DumpableObject **objs,
* linked list of items-ready-to-output as Knuth does, we maintain a heap
* of their item numbers, which we can use as a priority queue. This
* turns the algorithm from O(N) to O(N log N) because each insertion or
- * removal of a heap item takes O(log N) time. However, that's still
+ * removal of a heap item takes O(log N) time. However, that's still
* plenty fast enough for this application.
*/
@@ -337,9 +337,9 @@ TopoSort(DumpableObject **objs,
}
/*--------------------
- * Now emit objects, working backwards in the output list. At each step,
+ * Now emit objects, working backwards in the output list. At each step,
* we use the priority heap to select the last item that has no remaining
- * before-constraints. We remove that item from the heap, output it to
+ * before-constraints. We remove that item from the heap, output it to
* ordering[], and decrease the beforeConstraints count of each of the
* items it was constrained against. Whenever an item's beforeConstraints
* count is thereby decreased to zero, we insert it into the priority heap
@@ -467,7 +467,7 @@ removeHeapElement(int *heap, int heapLength)
* before trying TopoSort again. We can safely repair loops that are
* disjoint (have no members in common); if we find overlapping loops
* then we repair only the first one found, because the action taken to
- * repair the first might have repaired the other as well. (If not,
+ * repair the first might have repaired the other as well. (If not,
* we'll fix it on the next go-round.)
*
* objs[] lists the objects TopoSort couldn't sort
@@ -954,7 +954,7 @@ repairDependencyLoop(DumpableObject **loop,
/*
* If all the objects are TABLE_DATA items, what we must have is a
* circular set of foreign key constraints (or a single self-referential
- * table). Print an appropriate complaint and break the loop arbitrarily.
+ * table). Print an appropriate complaint and break the loop arbitrarily.
*/
for (i = 0; i < nLoop; i++)
{
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 5e5472fb5f..3895c64f6b 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -459,9 +459,9 @@ main(int argc, char *argv[])
if (!data_only)
{
/*
- * If asked to --clean, do that first. We can avoid detailed
+ * If asked to --clean, do that first. We can avoid detailed
* dependency analysis because databases never depend on each other,
- * and tablespaces never depend on each other. Roles could have
+ * and tablespaces never depend on each other. Roles could have
* grants to each other, but DROP ROLE will clean those up silently.
*/
if (output_clean)
@@ -1094,7 +1094,7 @@ dumpCreateDB(PGconn *conn)
* commands for just those databases with values different from defaults.
*
* We consider template0's encoding and locale (or, pre-7.1, template1's)
- * to define the installation default. Pre-8.4 installations do not have
+ * to define the installation default. Pre-8.4 installations do not have
* per-database locale settings; for them, every database must necessarily
* be using the installation default, so there's no need to do anything
* (which is good, since in very old versions there is no good way to find
diff --git a/src/bin/pg_resetxlog/pg_resetxlog.c b/src/bin/pg_resetxlog/pg_resetxlog.c
index 5f3de38d07..4560a2c7e7 100644
--- a/src/bin/pg_resetxlog/pg_resetxlog.c
+++ b/src/bin/pg_resetxlog/pg_resetxlog.c
@@ -31,7 +31,7 @@
/*
* We have to use postgres.h not postgres_fe.h here, because there's so much
* backend-only stuff in the XLOG include files we need. But we need a
- * frontend-ish environment otherwise. Hence this ugly hack.
+ * frontend-ish environment otherwise. Hence this ugly hack.
*/
#define FRONTEND 1
@@ -681,7 +681,7 @@ FindEndOfXLOG(void)
/*
* Initialize the max() computation using the last checkpoint address from
- * old pg_control. Note that for the moment we are working with segment
+ * old pg_control. Note that for the moment we are working with segment
* numbering according to the old xlog seg size.
*/
newXlogId = ControlFile.checkPointCopy.redo.xlogid;
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 1b51bbe6bf..f7f99ef2fc 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -326,7 +326,7 @@ ConnectionUp(void)
* see if it can be restored.
*
* Returns true if either the connection was still there, or it could be
- * restored successfully; false otherwise. If, however, there was no
+ * restored successfully; false otherwise. If, however, there was no
* connection and the session is non-interactive, this will exit the program
* with a code of EXIT_BADCONN.
*/
diff --git a/src/bin/psql/copy.c b/src/bin/psql/copy.c
index 045ad92727..d4828bafa4 100644
--- a/src/bin/psql/copy.c
+++ b/src/bin/psql/copy.c
@@ -407,7 +407,7 @@ error:
/*
- * Handle one of the "string" options of COPY. If the user gave a quoted
+ * Handle one of the "string" options of COPY. If the user gave a quoted
* string, pass it to the backend as-is; if it wasn't quoted then quote
* and escape it.
*/
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 1f8acb53cb..9f416548eb 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -3206,7 +3206,7 @@ listUserMappings(const char *pattern, bool verbose)
* printACLColumn
*
* Helper function for consistently formatting ACL (privilege) columns.
- * The proper targetlist entry is appended to buf. Note lack of any
+ * The proper targetlist entry is appended to buf. Note lack of any
* whitespace or comma decoration.
*/
static void
diff --git a/src/bin/psql/input.c b/src/bin/psql/input.c
index 829f229682..644f0d5be1 100644
--- a/src/bin/psql/input.c
+++ b/src/bin/psql/input.c
@@ -29,7 +29,7 @@ char *psql_history;
* Preserve newlines in saved queries by mapping '\n' to NL_IN_HISTORY
*
* It is assumed NL_IN_HISTORY will never be entered by the user
- * nor appear inside a multi-byte string. 0x00 is not properly
+ * nor appear inside a multi-byte string. 0x00 is not properly
* handled by the readline routines so it can not be used
* for this purpose.
*/
@@ -151,7 +151,7 @@ pg_send_history(PQExpBuffer history_buf)
*
* Caller *must* have set up sigint_interrupt_jmp before calling.
*
- * Note: we re-use a static PQExpBuffer for each call. This is to avoid
+ * Note: we re-use a static PQExpBuffer for each call. This is to avoid
* leaking memory if interrupted by SIGINT.
*/
char *
diff --git a/src/bin/psql/large_obj.c b/src/bin/psql/large_obj.c
index e4da1d3e96..5819204fa7 100644
--- a/src/bin/psql/large_obj.c
+++ b/src/bin/psql/large_obj.c
@@ -47,7 +47,7 @@ print_lo_result(const char *fmt,...)
/*
- * Prepare to do a large-object operation. We *must* be inside a transaction
+ * Prepare to do a large-object operation. We *must* be inside a transaction
* block for all these operations, so start one if needed.
*
* Returns TRUE if okay, FALSE if failed. *own_transaction is set to indicate
diff --git a/src/bin/psql/mainloop.c b/src/bin/psql/mainloop.c
index e47f464d14..46171d7818 100644
--- a/src/bin/psql/mainloop.c
+++ b/src/bin/psql/mainloop.c
@@ -271,7 +271,7 @@ MainLoop(FILE *source)
* If we added a newline to query_buf, and nothing else has
* been inserted in query_buf by the lexer, then strip off the
* newline again. This avoids any change to query_buf when a
- * line contains only a backslash command. Also, in this
+ * line contains only a backslash command. Also, in this
* situation we force out any previous lines as a separate
* history entry; we don't want SQL and backslash commands
* intermixed in history if at all possible.
diff --git a/src/bin/psql/mbprint.c b/src/bin/psql/mbprint.c
index 72a860f450..489556791f 100644
--- a/src/bin/psql/mbprint.c
+++ b/src/bin/psql/mbprint.c
@@ -21,7 +21,7 @@
* To avoid version-skew problems, this file must not use declarations
* from pg_wchar.h: the encoding IDs we are dealing with are determined
* by the libpq.so we are linked with, and that might not match the
- * numbers we see at compile time. (If this file were inside libpq,
+ * numbers we see at compile time. (If this file were inside libpq,
* the problem would go away...)
*
* Hence, we have our own definition of pg_wchar, and we get the values
diff --git a/src/bin/psql/print.c b/src/bin/psql/print.c
index 8c4ff0cdb0..8a9bc34dbf 100644
--- a/src/bin/psql/print.c
+++ b/src/bin/psql/print.c
@@ -579,7 +579,7 @@ print_aligned_text(const printTableContent *cont, FILE *fout)
* Optional optimized word wrap. Shrink columns with a high max/avg
* ratio. Slighly bias against wider columns. (Increases chance a
* narrow column will fit in its cell.) If available columns is
- * positive... and greater than the width of the unshrinkable column
+ * positive... and greater than the width of the unshrinkable column
* headers
*/
if (output_columns > 0 && output_columns >= total_header_width)
@@ -2011,7 +2011,7 @@ printTableAddCell(printTableContent *const content, const char *cell,
* strdup'd, so there is no need to keep the original footer string around.
*
* Footers are never translated by the function. If you want the footer
- * translated you must do so yourself, before calling printTableAddFooter. The
+ * translated you must do so yourself, before calling printTableAddFooter. The
* reason this works differently to headers and cells is that footers tend to
* be made of up individually translated components, rather than being
* translated as a whole.
@@ -2334,7 +2334,7 @@ setDecimalLocale(void)
/*
* Compute the byte distance to the end of the string or *target_width
- * display character positions, whichever comes first. Update *target_width
+ * display character positions, whichever comes first. Update *target_width
* to be the number of display character positions actually filled.
*/
static int
diff --git a/src/bin/psql/settings.h b/src/bin/psql/settings.h
index 31b76e112d..465a1f52ba 100644
--- a/src/bin/psql/settings.h
+++ b/src/bin/psql/settings.h
@@ -94,7 +94,7 @@ typedef struct _psqlSettings
/*
* The remaining fields are set by assign hooks associated with entries in
- * "vars". They should not be set directly except by those hook
+ * "vars". They should not be set directly except by those hook
* functions.
*/
bool autocommit;
diff --git a/src/bin/psql/stringutils.c b/src/bin/psql/stringutils.c
index de2e11c544..b4cc00fea2 100644
--- a/src/bin/psql/stringutils.c
+++ b/src/bin/psql/stringutils.c
@@ -77,7 +77,7 @@ strtokx(const char *s,
/*
* We may need extra space to insert delimiter nulls for adjacent
- * tokens. 2X the space is a gross overestimate, but it's unlikely
+ * tokens. 2X the space is a gross overestimate, but it's unlikely
* that this code will be used on huge strings anyway.
*/
storage = pg_malloc(2 * strlen(s) + 1);
@@ -107,7 +107,7 @@ strtokx(const char *s,
{
/*
* If not at end of string, we need to insert a null to terminate the
- * returned token. We can just overwrite the next character if it
+ * returned token. We can just overwrite the next character if it
* happens to be in the whitespace set ... otherwise move over the
* rest of the string to make room. (This is why we allocated extra
* space above).
@@ -161,7 +161,7 @@ strtokx(const char *s,
/*
* If not at end of string, we need to insert a null to terminate the
- * returned token. See notes above.
+ * returned token. See notes above.
*/
if (*p != '\0')
{
@@ -184,7 +184,7 @@ strtokx(const char *s,
}
/*
- * Otherwise no quoting character. Scan till next whitespace, delimiter
+ * Otherwise no quoting character. Scan till next whitespace, delimiter
* or quote. NB: at this point, *start is known not to be '\0',
* whitespace, delim, or quote, so we will consume at least one character.
*/
@@ -210,7 +210,7 @@ strtokx(const char *s,
/*
* If not at end of string, we need to insert a null to terminate the
- * returned token. See notes above.
+ * returned token. See notes above.
*/
if (*p != '\0')
{
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index 6af3f2f1ea..ecc49fae62 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -84,7 +84,7 @@ typedef struct SchemaQuery
/*
* Selection condition --- only rows meeting this condition are candidates
- * to display. If catname mentions multiple tables, include the necessary
+ * to display. If catname mentions multiple tables, include the necessary
* join condition here. For example, "c.relkind = 'r'". Write NULL (not
* an empty string) if not needed.
*/
@@ -352,7 +352,7 @@ static const SchemaQuery Query_for_list_of_views = {
* restricted to names matching a partially entered name. In these queries,
* the first %s will be replaced by the text entered so far (suitably escaped
* to become a SQL literal string). %d will be replaced by the length of the
- * string (in unescaped form). A second and third %s, if present, will be
+ * string (in unescaped form). A second and third %s, if present, will be
* replaced by a suitably-escaped version of the string provided in
* completion_info_charp. A fourth and fifth %s are similarly replaced by
* completion_info_charp2.
@@ -2499,7 +2499,7 @@ _complete_from_query(int is_schema_query, const char *text, int state)
/*
* When fetching relation names, suppress system catalogs unless
- * the input-so-far begins with "pg_". This is a compromise
+ * the input-so-far begins with "pg_". This is a compromise
* between not offering system catalogs for completion at all, and
* having them swamp the result when the input is just "p".
*/
diff --git a/src/bin/scripts/common.c b/src/bin/scripts/common.c
index 763594455c..00c59e1f9a 100644
--- a/src/bin/scripts/common.c
+++ b/src/bin/scripts/common.c
@@ -226,7 +226,7 @@ executeMaintenanceCommand(PGconn *conn, const char *query, bool echo)
}
/*
- * "Safe" wrapper around strdup(). Pulled from psql/common.c
+ * "Safe" wrapper around strdup(). Pulled from psql/common.c
*/
char *
pg_strdup(const char *string)
@@ -248,7 +248,7 @@ pg_strdup(const char *string)
}
/*
- * Check yes/no answer in a localized way. 1=yes, 0=no, -1=neither.
+ * Check yes/no answer in a localized way. 1=yes, 0=no, -1=neither.
*/
/* translator: abbreviation for "yes" */
diff --git a/src/include/access/attnum.h b/src/include/access/attnum.h
index 04825b6bb8..4d57b8ca9c 100644
--- a/src/include/access/attnum.h
+++ b/src/include/access/attnum.h
@@ -16,7 +16,7 @@
/*
- * user defined attribute numbers start at 1. -ay 2/95
+ * user defined attribute numbers start at 1. -ay 2/95
*/
typedef int16 AttrNumber;
diff --git a/src/include/access/gin.h b/src/include/access/gin.h
index c3b55027e9..35e16757fd 100644
--- a/src/include/access/gin.h
+++ b/src/include/access/gin.h
@@ -29,7 +29,7 @@
/*
* Max depth allowed in search tree during bulk inserts. This is to keep from
* degenerating to O(N^2) behavior when the tree is unbalanced due to sorted
- * or nearly-sorted input. (Perhaps it would be better to use a balanced-tree
+ * or nearly-sorted input. (Perhaps it would be better to use a balanced-tree
* algorithm, but in common cases that would only add useless overhead.)
*/
#define GIN_MAX_TREE_DEPTH 100
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 7d02f28add..9a5fa56711 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -184,7 +184,7 @@ typedef HashMetaPageData *HashMetaPage;
#define ALL_SET ((uint32) ~0)
/*
- * Bitmap pages do not contain tuples. They do contain the standard
+ * Bitmap pages do not contain tuples. They do contain the standard
* page headers and trailers; however, everything in between is a
* giant bit array. The number of bits that fit on a page obviously
* depends on the page size and the header/trailer overhead. We require
diff --git a/src/include/access/htup.h b/src/include/access/htup.h
index a10550c32c..0346deac9d 100644
--- a/src/include/access/htup.h
+++ b/src/include/access/htup.h
@@ -69,7 +69,7 @@
*
* We store five "virtual" fields Xmin, Cmin, Xmax, Cmax, and Xvac in three
* physical fields. Xmin and Xmax are always really stored, but Cmin, Cmax
- * and Xvac share a field. This works because we know that Cmin and Cmax
+ * and Xvac share a field. This works because we know that Cmin and Cmax
* are only interesting for the lifetime of the inserting and deleting
* transaction respectively. If a tuple is inserted and deleted in the same
* transaction, we store a "combo" command id that can be mapped to the real
@@ -81,7 +81,7 @@
* in-progress or delete-in-progress tuple.)
*
* A word about t_ctid: whenever a new tuple is stored on disk, its t_ctid
- * is initialized with its own TID (location). If the tuple is ever updated,
+ * is initialized with its own TID (location). If the tuple is ever updated,
* its t_ctid is changed to point to the replacement version of the tuple.
* Thus, a tuple is the latest version of its row iff XMAX is invalid or
* t_ctid points to itself (in which case, if XMAX is valid, the tuple is
@@ -96,10 +96,10 @@
* check fails, one may assume that there is no live descendant version.
*
* Following the fixed header fields, the nulls bitmap is stored (beginning
- * at t_bits). The bitmap is *not* stored if t_infomask shows that there
+ * at t_bits). The bitmap is *not* stored if t_infomask shows that there
* are no nulls in the tuple. If an OID field is present (as indicated by
* t_infomask), then it is stored just before the user data, which begins at
- * the offset shown by t_hoff. Note that t_hoff must be a multiple of
+ * the offset shown by t_hoff. Note that t_hoff must be a multiple of
* MAXALIGN.
*/
@@ -196,7 +196,7 @@ typedef HeapTupleHeaderData *HeapTupleHeader;
/*
* HeapTupleHeader accessor macros
*
- * Note: beware of multiple evaluations of "tup" argument. But the Set
+ * Note: beware of multiple evaluations of "tup" argument. But the Set
* macros evaluate their other argument only once.
*/
@@ -401,7 +401,7 @@ do { \
* MinimalTuple is an alternative representation that is used for transient
* tuples inside the executor, in places where transaction status information
* is not required, the tuple rowtype is known, and shaving off a few bytes
- * is worthwhile because we need to store many tuples. The representation
+ * is worthwhile because we need to store many tuples. The representation
* is chosen so that tuple access routines can work with either full or
* minimal tuples via a HeapTupleData pointer structure. The access routines
* see no difference, except that they must not access the transaction status
@@ -425,7 +425,7 @@ do { \
* the MINIMAL_TUPLE_OFFSET distance. t_len does not include that, however.
*
* MINIMAL_TUPLE_DATA_OFFSET is the offset to the first useful (non-pad) data
- * other than the length word. tuplesort.c and tuplestore.c use this to avoid
+ * other than the length word. tuplesort.c and tuplestore.c use this to avoid
* writing the padding to disk.
*/
#define MINIMAL_TUPLE_OFFSET \
@@ -477,12 +477,12 @@ typedef MinimalTupleData *MinimalTuple;
* This is the output format of heap_form_tuple and related routines.
*
* * Separately allocated tuple: t_data points to a palloc'd chunk that
- * is not adjacent to the HeapTupleData. (This case is deprecated since
+ * is not adjacent to the HeapTupleData. (This case is deprecated since
* it's difficult to tell apart from case #1. It should be used only in
* limited contexts where the code knows that case #1 will never apply.)
*
* * Separately allocated minimal tuple: t_data points MINIMAL_TUPLE_OFFSET
- * bytes before the start of a MinimalTuple. As with the previous case,
+ * bytes before the start of a MinimalTuple. As with the previous case,
* this can't be told apart from case #1 by inspection; code setting up
* or destroying this representation has to know what it's doing.
*
@@ -575,7 +575,7 @@ typedef HeapTupleData *HeapTuple;
*/
#define XLOG_HEAP_INIT_PAGE 0x80
/*
- * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes
+ * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes
* are associated with RM_HEAP2_ID, but are not logically different from
* the ones above associated with RM_HEAP_ID. We apply XLOG_HEAP_OPMASK,
* although currently XLOG_HEAP_INIT_PAGE is not used for any of these.
@@ -664,7 +664,7 @@ typedef struct xl_heap_update
* should be interpreted as physically moving the "to" item pointer to the
* "from" slot, rather than placing a redirection item in the "from" slot.
* The moved pointers should be replaced by LP_UNUSED items (there will not
- * be explicit entries in the "now-unused" list for this). Also, the
+ * be explicit entries in the "now-unused" list for this). Also, the
* HEAP_ONLY bit in the moved tuples must be turned off.
*/
typedef struct xl_heap_clean
@@ -791,7 +791,7 @@ extern Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
* and set *isnull == true. Otherwise, we set *isnull == false.
*
* <tup> is the pointer to the heap tuple. <attnum> is the attribute
- * number of the column (field) caller wants. <tupleDesc> is a
+ * number of the column (field) caller wants. <tupleDesc> is a
* pointer to the structure describing the row and all its fields.
* ----------------
*/
diff --git a/src/include/access/itup.h b/src/include/access/itup.h
index 8527de65c9..23603a1594 100644
--- a/src/include/access/itup.h
+++ b/src/include/access/itup.h
@@ -22,7 +22,7 @@
/*
* Index tuple header structure
*
- * All index tuples start with IndexTupleData. If the HasNulls bit is set,
+ * All index tuples start with IndexTupleData. If the HasNulls bit is set,
* this is followed by an IndexAttributeBitMapData. The index attribute
* values follow, beginning at a MAXALIGN boundary.
*
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index 7656c488e1..ddaa255707 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -36,9 +36,9 @@ typedef uint16 BTCycleId;
* and status. If the page is deleted, we replace the level with the
* next-transaction-ID value indicating when it is safe to reclaim the page.
*
- * We also store a "vacuum cycle ID". When a page is split while VACUUM is
+ * We also store a "vacuum cycle ID". When a page is split while VACUUM is
* processing the index, a nonzero value associated with the VACUUM run is
- * stored into both halves of the split page. (If VACUUM is not running,
+ * stored into both halves of the split page. (If VACUUM is not running,
* both pages receive zero cycleids.) This allows VACUUM to detect whether
* a page was split since it started, with a small probability of false match
* if the page was last split some exact multiple of MAX_BT_CYCLE_ID VACUUMs
@@ -75,7 +75,7 @@ typedef BTPageOpaqueData *BTPageOpaque;
#define BTP_HAS_GARBAGE (1 << 6) /* page has LP_DEAD tuples */
/*
- * The max allowed value of a cycle ID is a bit less than 64K. This is
+ * The max allowed value of a cycle ID is a bit less than 64K. This is
* for convenience of pg_filedump and similar utilities: we want to use
* the last 2 bytes of special space as an index type indicator, and
* restricting cycle ID lets btree use that space for vacuum cycle IDs
@@ -268,9 +268,9 @@ typedef struct xl_btree_insert
* Note: the four XLOG_BTREE_SPLIT xl_info codes all use this data record.
* The _L and _R variants indicate whether the inserted tuple went into the
* left or right split page (and thus, whether newitemoff and the new item
- * are stored or not). The _ROOT variants indicate that we are splitting
+ * are stored or not). The _ROOT variants indicate that we are splitting
* the root page, and thus that a newroot record rather than an insert or
- * split record should follow. Note that a split record never carries a
+ * split record should follow. Note that a split record never carries a
* metapage update --- we'll do that in the parent-level update.
*/
typedef struct xl_btree_split
@@ -283,13 +283,13 @@ typedef struct xl_btree_split
OffsetNumber firstright; /* first item moved to right page */
/*
- * If level > 0, BlockIdData downlink follows. (We use BlockIdData rather
+ * If level > 0, BlockIdData downlink follows. (We use BlockIdData rather
* than BlockNumber for alignment reasons: SizeOfBtreeSplit is only 16-bit
* aligned.)
*
* If level > 0, an IndexTuple representing the HIKEY of the left page
* follows. We don't need this on leaf pages, because it's the same as
- * the leftmost key in the new right page. Also, it's suppressed if
+ * the leftmost key in the new right page. Also, it's suppressed if
* XLogInsert chooses to store the left page's whole page image.
*
* In the _L variants, next are OffsetNumber newitemoff and the new item.
@@ -320,7 +320,7 @@ typedef struct xl_btree_delete
/*
* This is what we need to know about deletion of a btree page. The target
* identifies the tuple removed from the parent page (note that we remove
- * this tuple's downlink and the *following* tuple's key). Note we do not
+ * this tuple's downlink and the *following* tuple's key). Note we do not
* store any content for the deleted page --- it is just rewritten as empty
* during recovery.
*/
@@ -404,7 +404,7 @@ typedef BTStackData *BTStack;
* BTScanOpaqueData is the btree-private state needed for an indexscan.
* This consists of preprocessed scan keys (see _bt_preprocess_keys() for
* details of the preprocessing), information about the current location
- * of the scan, and information about the marked location, if any. (We use
+ * of the scan, and information about the marked location, if any. (We use
* BTScanPosData to represent the data needed for each of current and marked
* locations.) In addition we can remember some known-killed index entries
* that must be marked before we can move off the current page.
@@ -412,9 +412,9 @@ typedef BTStackData *BTStack;
* Index scans work a page at a time: we pin and read-lock the page, identify
* all the matching items on the page and save them in BTScanPosData, then
* release the read-lock while returning the items to the caller for
- * processing. This approach minimizes lock/unlock traffic. Note that we
+ * processing. This approach minimizes lock/unlock traffic. Note that we
* keep the pin on the index page until the caller is done with all the items
- * (this is needed for VACUUM synchronization, see nbtree/README). When we
+ * (this is needed for VACUUM synchronization, see nbtree/README). When we
* are ready to step to the next page, if the caller has told us any of the
* items were killed, we re-lock the page to mark them killed, then unlock.
* Finally we drop the pin and step to the next page in the appropriate
@@ -489,7 +489,7 @@ typedef BTScanOpaqueData *BTScanOpaque;
/*
* We use some private sk_flags bits in preprocessed scan keys. We're allowed
- * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
+ * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
* index's indoption[] array entry for the index attribute.
*/
#define SK_BT_REQFWD 0x00010000 /* required to continue forward scan */
diff --git a/src/include/access/reloptions.h b/src/include/access/reloptions.h
index fd4aaf049b..753bdf8e1d 100644
--- a/src/include/access/reloptions.h
+++ b/src/include/access/reloptions.h
@@ -191,7 +191,7 @@ typedef struct
* "base" is a pointer to the reloptions structure, and "offset" is an integer
* variable that must be initialized to sizeof(reloptions structure). This
* struct must have been allocated with enough space to hold any string option
- * present, including terminating \0 for every option. SET_VARSIZE() must be
+ * present, including terminating \0 for every option. SET_VARSIZE() must be
* called on the struct with this offset as the second argument, after all the
* string options have been processed.
*/
diff --git a/src/include/access/skey.h b/src/include/access/skey.h
index d295d3d12d..7e74aea9b9 100644
--- a/src/include/access/skey.h
+++ b/src/include/access/skey.h
@@ -42,7 +42,7 @@ typedef uint16 StrategyNumber;
/*
* A ScanKey represents the application of a comparison operator between
- * a table or index column and a constant. When it's part of an array of
+ * a table or index column and a constant. When it's part of an array of
* ScanKeys, the comparison conditions are implicitly ANDed. The index
* column is the left argument of the operator, if it's a binary operator.
* (The data structure can support unary indexable operators too; in that
@@ -95,7 +95,7 @@ typedef ScanKeyData *ScanKey;
* must be sorted according to the leading column number.
*
* The subsidiary ScanKey array appears in logical column order of the row
- * comparison, which may be different from index column order. The array
+ * comparison, which may be different from index column order. The array
* elements are like a normal ScanKey array except that:
* sk_flags must include SK_ROW_MEMBER, plus SK_ROW_END in the last
* element (needed since row header does not include a count)
diff --git a/src/include/access/transam.h b/src/include/access/transam.h
index b23a663c53..5b1a81f1f5 100644
--- a/src/include/access/transam.h
+++ b/src/include/access/transam.h
@@ -69,7 +69,7 @@
* using the OID generator. (We start the generator at 10000.)
*
* OIDs beginning at 16384 are assigned from the OID generator
- * during normal multiuser operation. (We force the generator up to
+ * during normal multiuser operation. (We force the generator up to
* 16384 as soon as we are in normal operation.)
*
* The choices of 10000 and 16384 are completely arbitrary, and can be moved
diff --git a/src/include/access/tupdesc.h b/src/include/access/tupdesc.h
index ef48ae9119..df8abe5bb9 100644
--- a/src/include/access/tupdesc.h
+++ b/src/include/access/tupdesc.h
@@ -53,7 +53,7 @@ typedef struct tupleConstr
* TupleDesc; with the exception that tdhasoid indicates if OID is present.
*
* If the tupdesc is known to correspond to a named rowtype (such as a table's
- * rowtype) then tdtypeid identifies that type and tdtypmod is -1. Otherwise
+ * rowtype) then tdtypeid identifies that type and tdtypmod is -1. Otherwise
* tdtypeid is RECORDOID, and tdtypmod can be either -1 for a fully anonymous
* row type, or a value >= 0 to allow the rowtype to be looked up in the
* typcache.c type cache.
diff --git a/src/include/access/tupmacs.h b/src/include/access/tupmacs.h
index 824adc9b7b..52c6ad5cb5 100644
--- a/src/include/access/tupmacs.h
+++ b/src/include/access/tupmacs.h
@@ -92,7 +92,7 @@
/*
* att_align_datum aligns the given offset as needed for a datum of alignment
- * requirement attalign and typlen attlen. attdatum is the Datum variable
+ * requirement attalign and typlen attlen. attdatum is the Datum variable
* we intend to pack into a tuple (it's only accessed if we are dealing with
* a varlena type). Note that this assumes the Datum will be stored as-is;
* callers that are intending to convert non-short varlena datums to short
@@ -110,7 +110,7 @@
* pointer; when accessing a varlena field we have to "peek" to see if we
* are looking at a pad byte or the first byte of a 1-byte-header datum.
* (A zero byte must be either a pad byte, or the first byte of a correctly
- * aligned 4-byte length word; in either case we can align safely. A non-zero
+ * aligned 4-byte length word; in either case we can align safely. A non-zero
* byte must be either a 1-byte length word, or the first byte of a correctly
* aligned 4-byte length word; in either case we need not align.)
*
diff --git a/src/include/access/tuptoaster.h b/src/include/access/tuptoaster.h
index 5d70bca59b..1f5ec73e08 100644
--- a/src/include/access/tuptoaster.h
+++ b/src/include/access/tuptoaster.h
@@ -62,7 +62,7 @@
/*
* When we store an oversize datum externally, we divide it into chunks
- * containing at most TOAST_MAX_CHUNK_SIZE data bytes. This number *must*
+ * containing at most TOAST_MAX_CHUNK_SIZE data bytes. This number *must*
* be small enough that the completed toast-table tuple (including the
* ID and sequence fields and all overhead) will fit on a page.
* The coding here sets the size on the theory that we want to fit
diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h
index 052a314d74..c260e22cbd 100644
--- a/src/include/access/xlog.h
+++ b/src/include/access/xlog.h
@@ -32,11 +32,11 @@
* where there can be zero to three backup blocks (as signaled by xl_info flag
* bits). XLogRecord structs always start on MAXALIGN boundaries in the WAL
* files, and we round up SizeOfXLogRecord so that the rmgr data is also
- * guaranteed to begin on a MAXALIGN boundary. However, no padding is added
+ * guaranteed to begin on a MAXALIGN boundary. However, no padding is added
* to align BkpBlock structs or backup block data.
*
* NOTE: xl_len counts only the rmgr data, not the XLogRecord header,
- * and also not any backup blocks. xl_tot_len counts everything. Neither
+ * and also not any backup blocks. xl_tot_len counts everything. Neither
* length field is rounded up to an alignment boundary.
*/
typedef struct XLogRecord
@@ -112,7 +112,7 @@ extern int sync_method;
* value (ignoring InvalidBuffer) appearing in the rdata chain.
*
* When buffer is valid, caller must set buffer_std to indicate whether the
- * page uses standard pd_lower/pd_upper header fields. If this is true, then
+ * page uses standard pd_lower/pd_upper header fields. If this is true, then
* XLOG is allowed to omit the free space between pd_lower and pd_upper from
* the backed-up page image. Note that even when buffer_std is false, the
* page MUST have an LSN field as its first eight bytes!
diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h
index 508c2eeb8d..e7c365938b 100644
--- a/src/include/access/xlog_internal.h
+++ b/src/include/access/xlog_internal.h
@@ -50,7 +50,7 @@ typedef struct BkpBlock
/*
* When there is not enough space on current page for whole record, we
- * continue on the next page with continuation record. (However, the
+ * continue on the next page with continuation record. (However, the
* XLogRecord header will never be split across pages; if there's less than
* SizeOfXLogRecord space left at the end of a page, we just waste it.)
*
@@ -155,7 +155,7 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader;
* Compute ID and segment from an XLogRecPtr.
*
* For XLByteToSeg, do the computation at face value. For XLByteToPrevSeg,
- * a boundary byte is taken to be in the previous segment. This is suitable
+ * a boundary byte is taken to be in the previous segment. This is suitable
* for deciding which segment to write given a pointer to a record end,
* for example. (We can assume xrecoff is not zero, since no valid recptr
* can have that.)
diff --git a/src/include/access/xlogdefs.h b/src/include/access/xlogdefs.h
index 2761deb476..6005b23de9 100644
--- a/src/include/access/xlogdefs.h
+++ b/src/include/access/xlogdefs.h
@@ -25,8 +25,8 @@
* NOTE: the "log file number" is somewhat misnamed, since the actual files
* making up the XLOG are much smaller than 4Gb. Each actual file is an
* XLogSegSize-byte "segment" of a logical log file having the indicated
- * xlogid. The log file number and segment number together identify a
- * physical XLOG file. Segment number and offset within the physical file
+ * xlogid. The log file number and segment number together identify a
+ * physical XLOG file. Segment number and offset within the physical file
* are computed from xrecoff div and mod XLogSegSize.
*/
typedef struct XLogRecPtr
@@ -70,7 +70,7 @@ typedef uint32 TimeLineID;
/*
* Because O_DIRECT bypasses the kernel buffers, and because we never
* read those buffers except during crash recovery, it is a win to use
- * it in all cases where we sync on each write(). We could allow O_DIRECT
+ * it in all cases where we sync on each write(). We could allow O_DIRECT
* with fsync(), but because skipping the kernel buffer forces writes out
* quickly, it seems best just to use it for O_SYNC. It is hard to imagine
* how fsync() could be a win for O_DIRECT compared to O_SYNC and O_DIRECT.
@@ -86,7 +86,7 @@ typedef uint32 TimeLineID;
/*
* This chunk of hackery attempts to determine which file sync methods
* are available on the current platform, and to choose an appropriate
- * default method. We assume that fsync() is always available, and that
+ * default method. We assume that fsync() is always available, and that
* configure determined whether fdatasync() is.
*/
#if defined(O_SYNC)
diff --git a/src/include/c.h b/src/include/c.h
index 36401259de..5d48c54100 100644
--- a/src/include/c.h
+++ b/src/include/c.h
@@ -36,7 +36,7 @@
* 8) system-specific hacks
*
* NOTE: since this file is included by both frontend and backend modules, it's
- * almost certainly wrong to put an "extern" declaration here. typedefs and
+ * almost certainly wrong to put an "extern" declaration here. typedefs and
* macros are the kind of thing that might go here.
*
*----------------------------------------------------------------
@@ -103,7 +103,7 @@
/*
* Use this to mark string constants as needing translation at some later
- * time, rather than immediately. This is useful for cases where you need
+ * time, rather than immediately. This is useful for cases where you need
* access to the original string and translated string, and for cases where
* immediate translation is not possible, like when initializing global
* variables.
@@ -389,7 +389,7 @@ typedef struct
* Variable-length datatypes all share the 'struct varlena' header.
*
* NOTE: for TOASTable types, this is an oversimplification, since the value
- * may be compressed or moved out-of-line. However datatype-specific routines
+ * may be compressed or moved out-of-line. However datatype-specific routines
* are mostly content to deal with de-TOASTed values only, and of course
* client-side routines should never see a TOASTed value. But even in a
* de-TOASTed value, beware of touching vl_len_ directly, as its representation
@@ -419,7 +419,7 @@ typedef struct varlena VarChar; /* var-length char, ie SQL varchar(n) */
/*
* Specialized array types. These are physically laid out just the same
* as regular arrays (so that the regular array subscripting code works
- * with them). They exist as distinct types mostly for historical reasons:
+ * with them). They exist as distinct types mostly for historical reasons:
* they have nonstandard I/O behavior which we don't want to change for fear
* of breaking applications that look at the system catalogs. There is also
* an implementation issue for oidvector: it's part of the primary key for
@@ -462,7 +462,7 @@ typedef NameData *Name;
/*
* Support macros for escaping strings. escape_backslash should be TRUE
- * if generating a non-standard-conforming string. Prefixing a string
+ * if generating a non-standard-conforming string. Prefixing a string
* with ESCAPE_STRING_SYNTAX guarantees it is non-standard-conforming.
* Beware of multiple evaluation of the "ch" argument!
*/
@@ -591,7 +591,7 @@ typedef NameData *Name;
* datum) and add a null, do not do it with StrNCpy(..., len+1). That
* might seem to work, but it fetches one byte more than there is in the
* text object. One fine day you'll have a SIGSEGV because there isn't
- * another byte before the end of memory. Don't laugh, we've had real
+ * another byte before the end of memory. Don't laugh, we've had real
* live bug reports from real live users over exactly this mistake.
* Do it honestly with "memcpy(dst,src,len); dst[len] = '\0';", instead.
*/
@@ -617,7 +617,7 @@ typedef NameData *Name;
* Exactly the same as standard library function memset(), but considerably
* faster for zeroing small word-aligned structures (such as parsetree nodes).
* This has to be a macro because the main point is to avoid function-call
- * overhead. However, we have also found that the loop is faster than
+ * overhead. However, we have also found that the loop is faster than
* native libc memset() on some platforms, even those with assembler
* memset() functions. More research needs to be done, perhaps with
* MEMSET_LOOP_LIMIT tests in configure.
@@ -747,7 +747,7 @@ typedef NameData *Name;
* Section 8: system-specific hacks
*
* This should be limited to things that absolutely have to be
- * included in every source file. The port-specific header file
+ * included in every source file. The port-specific header file
* is usually a better place for this sort of thing.
* ----------------------------------------------------------------
*/
@@ -756,7 +756,7 @@ typedef NameData *Name;
* NOTE: this is also used for opening text files.
* WIN32 treats Control-Z as EOF in files opened in text mode.
* Therefore, we open files in binary mode on Win32 so we can read
- * literal control-Z. The other affect is that we see CRLF, but
+ * literal control-Z. The other affect is that we see CRLF, but
* that is OK because we can already handle those cleanly.
*/
#if defined(WIN32) || defined(__CYGWIN__)
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index dcd149d051..97d61cf167 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -4,7 +4,7 @@
* "Catalog version number" for PostgreSQL.
*
* The catalog version number is used to flag incompatible changes in
- * the PostgreSQL system catalogs. Whenever anyone changes the format of
+ * the PostgreSQL system catalogs. Whenever anyone changes the format of
* a system catalog relation, or adds, deletes, or modifies standard
* catalog entries in such a way that an updated backend wouldn't work
* with an old database (or vice versa), the catalog version number
diff --git a/src/include/catalog/dependency.h b/src/include/catalog/dependency.h
index fe04aab964..1aad375f22 100644
--- a/src/include/catalog/dependency.h
+++ b/src/include/catalog/dependency.h
@@ -52,7 +52,7 @@
* DEPENDENCY_PIN ('p'): there is no dependent object; this type of entry
* is a signal that the system itself depends on the referenced object,
* and so that object must never be deleted. Entries of this type are
- * created only during initdb. The fields for the dependent object
+ * created only during initdb. The fields for the dependent object
* contain zeroes.
*
* Other dependency flavors may be needed in future.
diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h
index ed9218c03a..775a44278e 100644
--- a/src/include/catalog/namespace.h
+++ b/src/include/catalog/namespace.h
@@ -19,7 +19,7 @@
/*
* This structure holds a list of possible functions or operators
- * found by namespace lookup. Each function/operator is identified
+ * found by namespace lookup. Each function/operator is identified
* by OID and by argument types; the list must be pruned by type
* resolution rules that are embodied in the parser, not here.
* See FuncnameGetCandidates's comments for more info.
diff --git a/src/include/catalog/pg_attrdef.h b/src/include/catalog/pg_attrdef.h
index d844223301..290bfda507 100644
--- a/src/include/catalog/pg_attrdef.h
+++ b/src/include/catalog/pg_attrdef.h
@@ -22,7 +22,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_attrdef definition. cpp turns this into
+ * pg_attrdef definition. cpp turns this into
* typedef struct FormData_pg_attrdef
* ----------------
*/
diff --git a/src/include/catalog/pg_attribute.h b/src/include/catalog/pg_attribute.h
index b852a28cd5..efc3b725fa 100644
--- a/src/include/catalog/pg_attribute.h
+++ b/src/include/catalog/pg_attribute.h
@@ -15,7 +15,7 @@
* information from the DATA() statements.
*
* utils/cache/relcache.c requires hard-coded tuple descriptors
- * for some of the system catalogs. So if the schema for any of
+ * for some of the system catalogs. So if the schema for any of
* these changes, be sure and change the appropriate Schema_xxx
* macros! -cim 2/5/91
*
@@ -157,7 +157,7 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS
/*
* ATTRIBUTE_FIXED_PART_SIZE is the size of the fixed-layout,
- * guaranteed-not-null part of a pg_attribute row. This is in fact as much
+ * guaranteed-not-null part of a pg_attribute row. This is in fact as much
* of the row as gets copied into tuple descriptors, so don't expect you
* can access fields beyond attinhcount except in a real tuple!
*/
diff --git a/src/include/catalog/pg_authid.h b/src/include/catalog/pg_authid.h
index 660c3c7c14..2778491a2c 100644
--- a/src/include/catalog/pg_authid.h
+++ b/src/include/catalog/pg_authid.h
@@ -26,7 +26,7 @@
/*
* The CATALOG definition has to refer to the type of rolvaliduntil as
* "timestamptz" (lower case) so that bootstrap mode recognizes it. But
- * the C header files define this type as TimestampTz. Since the field is
+ * the C header files define this type as TimestampTz. Since the field is
* potentially-null and therefore can't be accessed directly from C code,
* there is no particular need for the C struct definition to show the
* field type as TimestampTz --- instead we just make it int.
diff --git a/src/include/catalog/pg_constraint.h b/src/include/catalog/pg_constraint.h
index 69979cef21..efa909a4f7 100644
--- a/src/include/catalog/pg_constraint.h
+++ b/src/include/catalog/pg_constraint.h
@@ -37,7 +37,7 @@ CATALOG(pg_constraint,2606)
* relations. This is partly for backwards compatibility with past
* Postgres practice, and partly because we don't want to have to obtain a
* global lock to generate a globally unique name for a nameless
- * constraint. We associate a namespace with constraint names only for
+ * constraint. We associate a namespace with constraint names only for
* SQL92 compatibility.
*/
NameData conname; /* name of this constraint */
@@ -55,7 +55,7 @@ CATALOG(pg_constraint,2606)
/*
* contypid links to the pg_type row for a domain if this is a domain
- * constraint. Otherwise it's 0.
+ * constraint. Otherwise it's 0.
*
* For SQL-style global ASSERTIONs, both conrelid and contypid would be
* zero. This is not presently supported, however.
@@ -64,7 +64,7 @@ CATALOG(pg_constraint,2606)
/*
* These fields, plus confkey, are only meaningful for a foreign-key
- * constraint. Otherwise confrelid is 0 and the char fields are spaces.
+ * constraint. Otherwise confrelid is 0 and the char fields are spaces.
*/
Oid confrelid; /* relation referenced by foreign key */
char confupdtype; /* foreign key's ON UPDATE action */
diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h
index 0312509e65..3224e4550c 100644
--- a/src/include/catalog/pg_control.h
+++ b/src/include/catalog/pg_control.h
@@ -76,9 +76,9 @@ typedef struct ControlFileData
uint64 system_identifier;
/*
- * Version identifier information. Keep these fields at the same offset,
+ * Version identifier information. Keep these fields at the same offset,
* especially pg_control_version; they won't be real useful if they move
- * around. (For historical reasons they must be 8 bytes into the file
+ * around. (For historical reasons they must be 8 bytes into the file
* rather than immediately at the front.)
*
* pg_control_version identifies the format of pg_control itself.
diff --git a/src/include/catalog/pg_description.h b/src/include/catalog/pg_description.h
index 088c82b31f..30a7bff93b 100644
--- a/src/include/catalog/pg_description.h
+++ b/src/include/catalog/pg_description.h
@@ -6,12 +6,12 @@
* NOTE: an object is identified by the OID of the row that primarily
* defines the object, plus the OID of the table that that row appears in.
* For example, a function is identified by the OID of its pg_proc row
- * plus the pg_class OID of table pg_proc. This allows unique identification
+ * plus the pg_class OID of table pg_proc. This allows unique identification
* of objects without assuming that OIDs are unique across tables.
*
* Since attributes don't have OIDs of their own, we identify an attribute
* comment by the objoid+classoid of its parent table, plus an "objsubid"
- * giving the attribute column number. "objsubid" must be zero in a comment
+ * giving the attribute column number. "objsubid" must be zero in a comment
* for a table itself, so that it is distinct from any column comment.
* Currently, objsubid is unused and zero for all other kinds of objects,
* but perhaps it might be useful someday to associate comments with
@@ -39,7 +39,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_description definition. cpp turns this into
+ * pg_description definition. cpp turns this into
* typedef struct FormData_pg_description
* ----------------
*/
diff --git a/src/include/catalog/pg_largeobject.h b/src/include/catalog/pg_largeobject.h
index e907cc6419..da9d360ee8 100644
--- a/src/include/catalog/pg_largeobject.h
+++ b/src/include/catalog/pg_largeobject.h
@@ -22,7 +22,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_largeobject definition. cpp turns this into
+ * pg_largeobject definition. cpp turns this into
* typedef struct FormData_pg_largeobject
* ----------------
*/
diff --git a/src/include/catalog/pg_opclass.h b/src/include/catalog/pg_opclass.h
index 48d22609a7..7f4380e06c 100644
--- a/src/include/catalog/pg_opclass.h
+++ b/src/include/catalog/pg_opclass.h
@@ -17,10 +17,10 @@
* don't support partial indexes on system catalogs.)
*
* Normally opckeytype = InvalidOid (zero), indicating that the data stored
- * in the index is the same as the data in the indexed column. If opckeytype
+ * in the index is the same as the data in the indexed column. If opckeytype
* is nonzero then it indicates that a conversion step is needed to produce
* the stored index data, which will be of type opckeytype (which might be
- * the same or different from the input datatype). Performing such a
+ * the same or different from the input datatype). Performing such a
* conversion is the responsibility of the index access method --- not all
* AMs support this.
*
@@ -42,7 +42,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_opclass definition. cpp turns this into
+ * pg_opclass definition. cpp turns this into
* typedef struct FormData_pg_opclass
* ----------------
*/
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index d71623e667..211baed3b7 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -1718,7 +1718,7 @@ DESCR("exponential");
/*
* This form of obj_description is now deprecated, since it will fail if
- * OIDs are not unique across system catalogs. Use the other forms instead.
+ * OIDs are not unique across system catalogs. Use the other forms instead.
*/
DATA(insert OID = 1348 ( obj_description PGNSP PGUID 14 100 0 0 f f f t f s 1 0 25 "26" _null_ _null_ _null_ _null_ "select description from pg_catalog.pg_description where objoid = $1 and objsubid = 0" _null_ _null_ _null_ ));
DESCR("get description for object id (deprecated)");
@@ -4716,7 +4716,7 @@ DESCR("fetch the Nth row value");
#define PROVOLATILE_VOLATILE 'v' /* can change even within a scan */
/*
- * Symbolic values for proargmodes column. Note that these must agree with
+ * Symbolic values for proargmodes column. Note that these must agree with
* the FunctionParameterMode enum in parsenodes.h; we declare them here to
* be accessible from either header.
*/
diff --git a/src/include/catalog/pg_rewrite.h b/src/include/catalog/pg_rewrite.h
index 4c43898873..d2150528d2 100644
--- a/src/include/catalog/pg_rewrite.h
+++ b/src/include/catalog/pg_rewrite.h
@@ -25,7 +25,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_rewrite definition. cpp turns this into
+ * pg_rewrite definition. cpp turns this into
* typedef struct FormData_pg_rewrite
* ----------------
*/
diff --git a/src/include/catalog/pg_shdepend.h b/src/include/catalog/pg_shdepend.h
index a13ce8dbaf..c2628c8e7a 100644
--- a/src/include/catalog/pg_shdepend.h
+++ b/src/include/catalog/pg_shdepend.h
@@ -33,7 +33,7 @@ CATALOG(pg_shdepend,1214) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
/*
* Identification of the dependent (referencing) object.
*
- * These fields are all zeroes for a DEPENDENCY_PIN entry. Also, dbid can
+ * These fields are all zeroes for a DEPENDENCY_PIN entry. Also, dbid can
* be zero to denote a shared object.
*/
Oid dbid; /* OID of database containing object */
diff --git a/src/include/catalog/pg_shdescription.h b/src/include/catalog/pg_shdescription.h
index 386f63051e..26959af3c8 100644
--- a/src/include/catalog/pg_shdescription.h
+++ b/src/include/catalog/pg_shdescription.h
@@ -7,7 +7,7 @@
* NOTE: an object is identified by the OID of the row that primarily
* defines the object, plus the OID of the table that that row appears in.
* For example, a database is identified by the OID of its pg_database row
- * plus the pg_class OID of table pg_database. This allows unique
+ * plus the pg_class OID of table pg_database. This allows unique
* identification of objects without assuming that OIDs are unique
* across tables.
*
@@ -32,7 +32,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_shdescription definition. cpp turns this into
+ * pg_shdescription definition. cpp turns this into
* typedef struct FormData_pg_shdescription
* ----------------
*/
diff --git a/src/include/catalog/pg_statistic.h b/src/include/catalog/pg_statistic.h
index edccf254b1..559e517644 100644
--- a/src/include/catalog/pg_statistic.h
+++ b/src/include/catalog/pg_statistic.h
@@ -48,7 +48,7 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS
float4 stanullfrac;
/*
- * stawidth is the average width in bytes of non-null entries. For
+ * stawidth is the average width in bytes of non-null entries. For
* fixed-width datatypes this is of course the same as the typlen, but for
* var-width types it is more useful. Note that this is the average width
* of the data as actually stored, post-TOASTing (eg, for a
@@ -68,7 +68,7 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS
* The special negative case allows us to cope with columns that are
* unique (stadistinct = -1) or nearly so (for example, a column in
* which values appear about twice on the average could be represented
- * by stadistinct = -0.5). Because the number-of-rows statistic in
+ * by stadistinct = -0.5). Because the number-of-rows statistic in
* pg_class may be updated more frequently than pg_statistic is, it's
* important to be able to describe such situations as a multiple of
* the number of rows, rather than a fixed number of distinct values.
@@ -80,8 +80,8 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS
/* ----------------
* To allow keeping statistics on different kinds of datatypes,
* we do not hard-wire any particular meaning for the remaining
- * statistical fields. Instead, we provide several "slots" in which
- * statistical data can be placed. Each slot includes:
+ * statistical fields. Instead, we provide several "slots" in which
+ * statistical data can be placed. Each slot includes:
* kind integer code identifying kind of data
* op OID of associated operator, if needed
* numbers float4 array (for statistical values)
@@ -167,15 +167,15 @@ typedef FormData_pg_statistic *Form_pg_statistic;
/*
* Currently, three statistical slot "kinds" are defined: most common values,
- * histogram, and correlation. Additional "kinds" will probably appear in
+ * histogram, and correlation. Additional "kinds" will probably appear in
* future to help cope with non-scalar datatypes. Also, custom data types
* can define their own "kind" codes by mutual agreement between a custom
* typanalyze routine and the selectivity estimation functions of the type's
* operators.
*
* Code reading the pg_statistic relation should not assume that a particular
- * data "kind" will appear in any particular slot. Instead, search the
- * stakind fields to see if the desired data is available. (The standard
+ * data "kind" will appear in any particular slot. Instead, search the
+ * stakind fields to see if the desired data is available. (The standard
* function get_attstatsslot() may be used for this.)
*/
@@ -202,7 +202,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* the K most common non-null values appearing in the column, and stanumbers
* contains their frequencies (fractions of total row count). The values
* shall be ordered in decreasing frequency. Note that since the arrays are
- * variable-size, K may be chosen by the statistics collector. Values should
+ * variable-size, K may be chosen by the statistics collector. Values should
* not appear in MCV unless they have been observed to occur more than once;
* a unique column will have no MCV slot.
*/
@@ -214,13 +214,13 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* more than one histogram could appear, if a datatype has more than one
* useful sort operator.) stavalues contains M (>=2) non-null values that
* divide the non-null column data values into M-1 bins of approximately equal
- * population. The first stavalues item is the MIN and the last is the MAX.
+ * population. The first stavalues item is the MIN and the last is the MAX.
* stanumbers is not used and should be NULL. IMPORTANT POINT: if an MCV
* slot is also provided, then the histogram describes the data distribution
* *after removing the values listed in MCV* (thus, it's a "compressed
* histogram" in the technical parlance). This allows a more accurate
* representation of the distribution of a column with some very-common
- * values. In a column with only a few distinct values, it's possible that
+ * values. In a column with only a few distinct values, it's possible that
* the MCV list describes the entire data population; in this case the
* histogram reduces to empty and should be omitted.
*/
@@ -231,7 +231,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* of table tuples and the ordering of data values of this column, as seen
* by the "<" operator identified by staop. (As with the histogram, more
* than one entry could theoretically appear.) stavalues is not used and
- * should be NULL. stanumbers contains a single entry, the correlation
+ * should be NULL. stanumbers contains a single entry, the correlation
* coefficient between the sequence of data values and the sequence of
* their actual tuple positions. The coefficient ranges from +1 to -1.
*/
@@ -240,7 +240,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
/*
* A "most common elements" slot is similar to a "most common values" slot,
* except that it stores the most common non-null *elements* of the column
- * values. This is useful when the column datatype is an array or some other
+ * values. This is useful when the column datatype is an array or some other
* type with identifiable elements (for instance, tsvector). staop contains
* the equality operator appropriate to the element type. stavalues contains
* the most common element values, and stanumbers their frequencies. Unlike
diff --git a/src/include/catalog/pg_trigger.h b/src/include/catalog/pg_trigger.h
index 2f4274107f..55f8bbb20a 100644
--- a/src/include/catalog/pg_trigger.h
+++ b/src/include/catalog/pg_trigger.h
@@ -22,7 +22,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_trigger definition. cpp turns this into
+ * pg_trigger definition. cpp turns this into
* typedef struct FormData_pg_trigger
*
* Note: when tgconstraint is nonzero, tgisconstraint must be true, and
diff --git a/src/include/catalog/pg_ts_dict.h b/src/include/catalog/pg_ts_dict.h
index 3920e37df0..3914e398ae 100644
--- a/src/include/catalog/pg_ts_dict.h
+++ b/src/include/catalog/pg_ts_dict.h
@@ -24,7 +24,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_ts_dict definition. cpp turns this into
+ * pg_ts_dict definition. cpp turns this into
* typedef struct FormData_pg_ts_dict
* ----------------
*/
diff --git a/src/include/catalog/pg_ts_template.h b/src/include/catalog/pg_ts_template.h
index 67c9573890..89488bce38 100644
--- a/src/include/catalog/pg_ts_template.h
+++ b/src/include/catalog/pg_ts_template.h
@@ -24,7 +24,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_ts_template definition. cpp turns this into
+ * pg_ts_template definition. cpp turns this into
* typedef struct FormData_pg_ts_template
* ----------------
*/
diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h
index e6bbd68bd2..c0f7d906ab 100644
--- a/src/include/catalog/pg_type.h
+++ b/src/include/catalog/pg_type.h
@@ -41,7 +41,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP
/*
* For a fixed-size type, typlen is the number of bytes we use to
- * represent a value of this type, e.g. 4 for an int4. But for a
+ * represent a value of this type, e.g. 4 for an int4. But for a
* variable-length type, typlen is negative. We use -1 to indicate a
* "varlena" type (one that has a length word), -2 to indicate a
* null-terminated C string.
@@ -50,7 +50,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP
/*
* typbyval determines whether internal Postgres routines pass a value of
- * this type by value or by reference. typbyval had better be FALSE if
+ * this type by value or by reference. typbyval had better be FALSE if
* the length is not 1, 2, or 4 (or 8 on 8-byte-Datum machines).
* Variable-length types are always passed by reference. Note that
* typbyval can be false even if the length would allow pass-by-value;
@@ -70,7 +70,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP
/*
* typcategory and typispreferred help the parser distinguish preferred
* and non-preferred coercions. The category can be any single ASCII
- * character (but not \0). The categories used for built-in types are
+ * character (but not \0). The categories used for built-in types are
* identified by the TYPCATEGORY macros below.
*/
char typcategory; /* arbitrary type classification */
@@ -79,7 +79,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP
/*
* If typisdefined is false, the entry is only a placeholder (forward
- * reference). We know the type name, but not yet anything else about it.
+ * reference). We know the type name, but not yet anything else about it.
*/
bool typisdefined;
@@ -140,7 +140,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP
* 'd' = DOUBLE alignment (8 bytes on many machines, but by no means all).
*
* See include/access/tupmacs.h for the macros that compute these
- * alignment requirements. Note also that we allow the nominal alignment
+ * alignment requirements. Note also that we allow the nominal alignment
* to be violated when storing "packed" varlenas; the TOAST mechanism
* takes care of hiding that from most code.
*
@@ -175,7 +175,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP
/*
* Domains use typbasetype to show the base (or domain) type that the
- * domain is based on. Zero if the type is not a domain.
+ * domain is based on. Zero if the type is not a domain.
*/
Oid typbasetype;
diff --git a/src/include/commands/comment.h b/src/include/commands/comment.h
index f302bd6755..a2354f62fc 100644
--- a/src/include/commands/comment.h
+++ b/src/include/commands/comment.h
@@ -24,7 +24,7 @@
* related routines. CommentObject() implements the SQL "COMMENT ON"
* command. DeleteComments() deletes all comments for an object.
* CreateComments creates (or deletes, if comment is NULL) a comment
- * for a specific key. There are versions of these two methods for
+ * for a specific key. There are versions of these two methods for
* both normal and shared objects.
*------------------------------------------------------------------
*/
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index 9012b4e2e3..1f83f40bf9 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -25,12 +25,12 @@
/*----------
* ANALYZE builds one of these structs for each attribute (column) that is
- * to be analyzed. The struct and subsidiary data are in anl_context,
+ * to be analyzed. The struct and subsidiary data are in anl_context,
* so they live until the end of the ANALYZE operation.
*
* The type-specific typanalyze function is passed a pointer to this struct
* and must return TRUE to continue analysis, FALSE to skip analysis of this
- * column. In the TRUE case it must set the compute_stats and minrows fields,
+ * column. In the TRUE case it must set the compute_stats and minrows fields,
* and can optionally set extra_data to pass additional info to compute_stats.
* minrows is its request for the minimum number of sample rows to be gathered
* (but note this request might not be honored, eg if there are fewer rows
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 652cdaf7de..6097f7804f 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -36,7 +36,7 @@
* REWIND indicates that the plan node should try to efficiently support
* rescans without parameter changes. (Nodes must support ExecReScan calls
* in any case, but if this flag was not given, they are at liberty to do it
- * through complete recalculation. Note that a parameter change forces a
+ * through complete recalculation. Note that a parameter change forces a
* full recalculation in any case.)
*
* BACKWARD indicates that the plan node must respect the es_direction flag.
diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h
index 3c73ca84a6..77f5e9ecd5 100644
--- a/src/include/executor/hashjoin.h
+++ b/src/include/executor/hashjoin.h
@@ -41,7 +41,7 @@
* If nbatch > 1 then tuples that don't belong in first batch get saved
* into inner-batch temp files. The same statements apply for the
* first scan of the outer relation, except we write tuples to outer-batch
- * temp files. After finishing the first scan, we do the following for
+ * temp files. After finishing the first scan, we do the following for
* each remaining batch:
* 1. Read tuples from inner batch file, load into hash buckets.
* 2. Read tuples from outer batch file, match to hash buckets and output.
@@ -130,7 +130,7 @@ typedef struct HashJoinTableData
/*
* These arrays are allocated for the life of the hash join, but only if
- * nbatch > 1. A file is opened only when we first write a tuple into it
+ * nbatch > 1. A file is opened only when we first write a tuple into it
* (otherwise its pointer remains NULL). Note that the zero'th array
* elements never get used, since we will process rather than dump out any
* tuples of batch zero.
diff --git a/src/include/executor/spi_priv.h b/src/include/executor/spi_priv.h
index ef50a9013e..b8a541da8a 100644
--- a/src/include/executor/spi_priv.h
+++ b/src/include/executor/spi_priv.h
@@ -43,7 +43,7 @@ typedef struct
* For a saved plan, the _SPI_plan struct and the argument type array are in
* the plancxt (which can be really small). All the other subsidiary state
* is in plancache entries identified by plancache_list (note: the list cells
- * themselves are in plancxt). We rely on plancache.c to keep the cache
+ * themselves are in plancxt). We rely on plancache.c to keep the cache
* entries up-to-date as needed. The plancxt is a child of CacheMemoryContext
* since it should persist until explicitly destroyed.
*
diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h
index 879a310c68..9ae967b530 100644
--- a/src/include/executor/tuptable.h
+++ b/src/include/executor/tuptable.h
@@ -34,7 +34,7 @@
*
* A "minimal" tuple is handled similarly to a palloc'd regular tuple.
* At present, minimal tuples never are stored in buffers, so there is no
- * parallel to case 1. Note that a minimal tuple has no "system columns".
+ * parallel to case 1. Note that a minimal tuple has no "system columns".
* (Actually, it could have an OID, but we have no need to access the OID.)
*
* A "virtual" tuple is an optimization used to minimize physical data
@@ -44,7 +44,7 @@
* a lower plan node's output TupleTableSlot, or to a function result
* constructed in a plan node's per-tuple econtext. It is the responsibility
* of the generating plan node to be sure these resources are not released
- * for as long as the virtual tuple needs to be valid. We only use virtual
+ * for as long as the virtual tuple needs to be valid. We only use virtual
* tuples in the result slots of plan nodes --- tuples to be copied anywhere
* else need to be "materialized" into physical tuples. Note also that a
* virtual tuple does not have any "system columns".
@@ -58,11 +58,11 @@
* payloads when this is the case.
*
* The Datum/isnull arrays of a TupleTableSlot serve double duty. When the
- * slot contains a virtual tuple, they are the authoritative data. When the
+ * slot contains a virtual tuple, they are the authoritative data. When the
* slot contains a physical tuple, the arrays contain data extracted from
* the tuple. (In this state, any pass-by-reference Datums point into
* the physical tuple.) The extracted information is built "lazily",
- * ie, only as needed. This serves to avoid repeated extraction of data
+ * ie, only as needed. This serves to avoid repeated extraction of data
* from the physical tuple.
*
* A TupleTableSlot can also be "empty", holding no valid data. This is
@@ -89,7 +89,7 @@
* buffer page.)
*
* tts_nvalid indicates the number of valid columns in the tts_values/isnull
- * arrays. When the slot is holding a "virtual" tuple this must be equal
+ * arrays. When the slot is holding a "virtual" tuple this must be equal
* to the descriptor's natts. When the slot is holding a physical tuple
* this is equal to the number of columns we have extracted (we always
* extract columns from left to right, so there are no holes).
@@ -103,7 +103,7 @@
* has only a minimal and not also a regular physical tuple, then tts_tuple
* points at tts_minhdr and the fields of that struct are set correctly
* for access to the minimal tuple; in particular, tts_minhdr.t_data points
- * MINIMAL_TUPLE_OFFSET bytes before tts_mintuple. This allows column
+ * MINIMAL_TUPLE_OFFSET bytes before tts_mintuple. This allows column
* extraction to treat the case identically to regular physical tuples.
*
* tts_slow/tts_off are saved state for slot_deform_tuple, and should not
diff --git a/src/include/fmgr.h b/src/include/fmgr.h
index bbcde85007..831b8142a0 100644
--- a/src/include/fmgr.h
+++ b/src/include/fmgr.h
@@ -91,7 +91,7 @@ extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo,
/*
* This macro initializes all the fields of a FunctionCallInfoData except
- * for the arg[] and argnull[] arrays. Performance testing has shown that
+ * for the arg[] and argnull[] arrays. Performance testing has shown that
* the fastest way to set up argnull[] for small numbers of arguments is to
* explicitly set each required element to false, so we don't try to zero
* out the argnull[] array in the macro.
@@ -107,7 +107,7 @@ extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo,
/*
* This macro invokes a function given a filled-in FunctionCallInfoData
- * struct. The macro result is the returned Datum --- but note that
+ * struct. The macro result is the returned Datum --- but note that
* caller must still check fcinfo->isnull! Also, if function is strict,
* it is caller's responsibility to verify that no null arguments are present
* before calling.
@@ -151,11 +151,11 @@ extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo,
* which are varlena types). pg_detoast_datum() gives you either the input
* datum (if not toasted) or a detoasted copy allocated with palloc().
* pg_detoast_datum_copy() always gives you a palloc'd copy --- use it
- * if you need a modifiable copy of the input. Caller is expected to have
+ * if you need a modifiable copy of the input. Caller is expected to have
* checked for null inputs first, if necessary.
*
* pg_detoast_datum_packed() will return packed (1-byte header) datums
- * unmodified. It will still expand an externally toasted or compressed datum.
+ * unmodified. It will still expand an externally toasted or compressed datum.
* The resulting datum can be accessed using VARSIZE_ANY() and VARDATA_ANY()
* (beware of multiple evaluations in those macros!)
*
@@ -186,7 +186,7 @@ extern struct varlena *pg_detoast_datum_packed(struct varlena * datum);
pg_detoast_datum_packed((struct varlena *) DatumGetPointer(datum))
/*
- * Support for cleaning up detoasted copies of inputs. This must only
+ * Support for cleaning up detoasted copies of inputs. This must only
* be used for pass-by-ref datatypes, and normally would only be used
* for toastable types. If the given pointer is different from the
* original argument, assume it's a palloc'd detoasted copy, and pfree it.
@@ -303,7 +303,7 @@ extern struct varlena *pg_detoast_datum_packed(struct varlena * datum);
* Dynamically loaded functions may use either the version-1 ("new style")
* or version-0 ("old style") calling convention. Version 1 is the call
* convention defined in this header file; version 0 is the old "plain C"
- * convention. A version-1 function must be accompanied by the macro call
+ * convention. A version-1 function must be accompanied by the macro call
*
* PG_FUNCTION_INFO_V1(function_name);
*
@@ -466,8 +466,8 @@ extern Datum FunctionCall9(FmgrInfo *flinfo, Datum arg1, Datum arg2,
/* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. These are essentially FunctionLookup() followed
- * by FunctionCallN(). If the same function is to be invoked repeatedly,
+ * are allowed to be NULL. These are essentially FunctionLookup() followed
+ * by FunctionCallN(). If the same function is to be invoked repeatedly,
* do the FunctionLookup() once and then use FunctionCallN().
*/
extern Datum OidFunctionCall1(Oid functionId, Datum arg1);
diff --git a/src/include/funcapi.h b/src/include/funcapi.h
index 17406b7d2c..9a4e981223 100644
--- a/src/include/funcapi.h
+++ b/src/include/funcapi.h
@@ -129,7 +129,7 @@ typedef struct FuncCallContext
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result or the rowtype could not be
* determined. NB: the tupledesc should be copied if it is to be
diff --git a/src/include/lib/stringinfo.h b/src/include/lib/stringinfo.h
index 39a31dca8c..71a5b7ad1c 100644
--- a/src/include/lib/stringinfo.h
+++ b/src/include/lib/stringinfo.h
@@ -60,7 +60,7 @@ typedef StringInfoData *StringInfo;
*
* NOTE: some routines build up a string using StringInfo, and then
* release the StringInfoData but return the data string itself to their
- * caller. At that point the data string looks like a plain palloc'd
+ * caller. At that point the data string looks like a plain palloc'd
* string.
*-------------------------
*/
@@ -100,7 +100,7 @@ __attribute__((format(printf, 2, 3)));
/*------------------------
* appendStringInfoVA
* Attempt to format text data under the control of fmt (an sprintf-style
- * format string) and append it to whatever is already in str. If successful
+ * format string) and append it to whatever is already in str. If successful
* return true; if not (because there's not enough space), return false
* without modifying str. Typically the caller would enlarge str and retry
* on false return --- see appendStringInfo for standard usage pattern.
diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h
index 000e658a64..351d622cd3 100644
--- a/src/include/libpq/libpq-be.h
+++ b/src/include/libpq/libpq-be.h
@@ -93,10 +93,10 @@ typedef struct
#endif
/*
- * This is used by the postmaster in its communication with frontends. It
+ * This is used by the postmaster in its communication with frontends. It
* contains all state information needed during this communication before the
- * backend is run. The Port structure is kept in malloc'd memory and is
- * still available when a backend is running (see MyProcPort). The data
+ * backend is run. The Port structure is kept in malloc'd memory and is
+ * still available when a backend is running (see MyProcPort). The data
* it points to must also be malloc'd, or else palloc'd in TopMemoryContext,
* so that it survives into PostgresMain execution!
*/
@@ -113,7 +113,7 @@ typedef struct Port
/*
* Information that needs to be saved from the startup packet and passed
- * into backend execution. "char *" fields are NULL if not set.
+ * into backend execution. "char *" fields are NULL if not set.
* guc_options points to a List of alternating option names and values.
*/
char *database_name;
diff --git a/src/include/libpq/pqcomm.h b/src/include/libpq/pqcomm.h
index b5c5da4e3b..d6fbce652d 100644
--- a/src/include/libpq/pqcomm.h
+++ b/src/include/libpq/pqcomm.h
@@ -153,7 +153,7 @@ extern bool Db_user_namespace;
/*
* In protocol 3.0 and later, the startup packet length is not fixed, but
- * we set an arbitrary limit on it anyway. This is just to prevent simple
+ * we set an arbitrary limit on it anyway. This is just to prevent simple
* denial-of-service attacks via sending enough data to run the server
* out of memory.
*/
diff --git a/src/include/mb/pg_wchar.h b/src/include/mb/pg_wchar.h
index 3af1cfaab3..5ba4333bea 100644
--- a/src/include/mb/pg_wchar.h
+++ b/src/include/mb/pg_wchar.h
@@ -10,7 +10,7 @@
*
* NOTES
* This is used both by the backend and by libpq, but should not be
- * included by libpq client programs. In particular, a libpq client
+ * included by libpq client programs. In particular, a libpq client
* should not assume that the encoding IDs used by the version of libpq
* it's linked to match up with the IDs declared here.
*
@@ -40,7 +40,7 @@ typedef unsigned int pg_wchar;
/*
* Leading byte types or leading prefix byte for MULE internal code.
- * See http://www.xemacs.org for more details. (there is a doc titled
+ * See http://www.xemacs.org for more details. (there is a doc titled
* "XEmacs Internals Manual", "MULE Character Sets and Encodings"
* section.)
*/
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index e195b96308..beac7df75f 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -37,7 +37,7 @@
* In both cases, we need to be able to clean up the current transaction
* gracefully, so we can't respond to the interrupt instantaneously ---
* there's no guarantee that internal data structures would be self-consistent
- * if the code is interrupted at an arbitrary instant. Instead, the signal
+ * if the code is interrupted at an arbitrary instant. Instead, the signal
* handlers set flags that are checked periodically during execution.
*
* The CHECK_FOR_INTERRUPTS() macro is called at strategically located spots
@@ -46,19 +46,19 @@
* might sometimes be called in contexts that do *not* want to allow a cancel
* or die interrupt. The HOLD_INTERRUPTS() and RESUME_INTERRUPTS() macros
* allow code to ensure that no cancel or die interrupt will be accepted,
- * even if CHECK_FOR_INTERRUPTS() gets called in a subroutine. The interrupt
+ * even if CHECK_FOR_INTERRUPTS() gets called in a subroutine. The interrupt
* will be held off until CHECK_FOR_INTERRUPTS() is done outside any
* HOLD_INTERRUPTS() ... RESUME_INTERRUPTS() section.
*
* Special mechanisms are used to let an interrupt be accepted when we are
* waiting for a lock or when we are waiting for command input (but, of
- * course, only if the interrupt holdoff counter is zero). See the
+ * course, only if the interrupt holdoff counter is zero). See the
* related code for details.
*
* A related, but conceptually distinct, mechanism is the "critical section"
* mechanism. A critical section not only holds off cancel/die interrupts,
* but causes any ereport(ERROR) or ereport(FATAL) to become ereport(PANIC)
- * --- that is, a system-wide reset is forced. Needless to say, only really
+ * --- that is, a system-wide reset is forced. Needless to say, only really
* *critical* code should be marked as a critical section! Currently, this
* mechanism is only used for XLOG-related code.
*
@@ -252,7 +252,7 @@ extern void check_stack_depth(void);
/*****************************************************************************
* pdir.h -- *
- * POSTGRES directory path definitions. *
+ * POSTGRES directory path definitions. *
*****************************************************************************/
/* flags to be OR'd to form sec_context */
@@ -291,7 +291,7 @@ extern bool superuser_arg(Oid roleid); /* given user is superuser */
/*****************************************************************************
* pmod.h -- *
- * POSTGRES processing mode definitions. *
+ * POSTGRES processing mode definitions. *
*****************************************************************************/
/*
@@ -306,7 +306,7 @@ extern bool superuser_arg(Oid roleid); /* given user is superuser */
* is used during the initial generation of template databases.
*
* Initialization mode: used while starting a backend, until all normal
- * initialization is complete. Some code behaves differently when executed
+ * initialization is complete. Some code behaves differently when executed
* in this mode to enable system bootstrapping.
*
* If a POSTGRES binary is in normal mode, then all code may be executed
@@ -339,7 +339,7 @@ extern ProcessingMode Mode;
/*****************************************************************************
* pinit.h -- *
- * POSTGRES initialization and cleanup definitions. *
+ * POSTGRES initialization and cleanup definitions. *
*****************************************************************************/
/* in utils/init/postinit.c */
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index c026979fc3..027158998e 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -84,14 +84,14 @@ typedef struct ExprContext_CB
*
* This class holds the "current context" information
* needed to evaluate expressions for doing tuple qualifications
- * and tuple projections. For example, if an expression refers
+ * and tuple projections. For example, if an expression refers
* to an attribute in the current inner tuple then we need to know
* what the current inner tuple is and so we look at the expression
* context.
*
* There are two memory contexts associated with an ExprContext:
* * ecxt_per_query_memory is a query-lifespan context, typically the same
- * context the ExprContext node itself is allocated in. This context
+ * context the ExprContext node itself is allocated in. This context
* can be used for purposes such as storing function call cache info.
* * ecxt_per_tuple_memory is a short-term context for expression results.
* As the name suggests, it will typically be reset once per tuple,
@@ -194,9 +194,9 @@ typedef struct ReturnSetInfo
* Nodes which need to do projections create one of these.
*
* ExecProject() evaluates the tlist, forms a tuple, and stores it
- * in the given slot. Note that the result will be a "virtual" tuple
+ * in the given slot. Note that the result will be a "virtual" tuple
* unless ExecMaterializeSlot() is then called to force it to be
- * converted to a physical tuple. The slot must have a tupledesc
+ * converted to a physical tuple. The slot must have a tupledesc
* that matches the output of the tlist!
*
* The planner very often produces tlists that consist entirely of
@@ -251,7 +251,7 @@ typedef struct ProjectionInfo
* in emitted tuples. For example, when we do an UPDATE query,
* the planner adds a "junk" entry to the targetlist so that the tuples
* returned to ExecutePlan() contain an extra attribute: the ctid of
- * the tuple to be updated. This is needed to do the update, but we
+ * the tuple to be updated. This is needed to do the update, but we
* don't want the ctid to be part of the stored new tuple! So, we
* apply a "junk filter" to remove the junk attributes and form the
* real output tuple. The junkfilter code also provides routines to
@@ -383,8 +383,8 @@ typedef struct EState
/*
- * es_rowMarks is a list of these structs. See RowMarkClause for details
- * about rti and prti. toidAttno is not used in a "plain" rowmark.
+ * es_rowMarks is a list of these structs. See RowMarkClause for details
+ * about rti and prti. toidAttno is not used in a "plain" rowmark.
*/
typedef struct ExecRowMark
{
@@ -598,7 +598,7 @@ typedef struct FuncExprState
/*
* In some cases we need to compute a tuple descriptor for the function's
- * output. If so, it's stored here.
+ * output. If so, it's stored here.
*/
TupleDesc funcResultDesc;
bool funcReturnsTuple; /* valid when funcResultDesc isn't
@@ -624,7 +624,7 @@ typedef struct FuncExprState
/*
* Flag to remember whether we have registered a shutdown callback for
- * this FuncExprState. We do so only if funcResultStore or setArgsValid
+ * this FuncExprState. We do so only if funcResultStore or setArgsValid
* has been set at least once (since all the callback is for is to release
* the tuplestore or clear setArgsValid).
*/
@@ -1306,7 +1306,7 @@ typedef struct CteScanState
* WorkTableScanState information
*
* WorkTableScan nodes are used to scan the work table created by
- * a RecursiveUnion node. We locate the RecursiveUnion node
+ * a RecursiveUnion node. We locate the RecursiveUnion node
* during executor startup.
* ----------------
*/
@@ -1587,7 +1587,7 @@ typedef struct WindowAggState
* UniqueState information
*
* Unique nodes are used "on top of" sort nodes to discard
- * duplicate tuples returned from the sort phase. Basically
+ * duplicate tuples returned from the sort phase. Basically
* all it does is compare the current tuple from the subplan
* with the previously fetched tuple (stored in its result slot).
* If the two are identical in all interesting fields, then
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index 56fc64fa65..931200e8bd 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -533,7 +533,7 @@ typedef enum JoinType
/*
* Semijoins and anti-semijoins (as defined in relational theory) do not
* appear in the SQL JOIN syntax, but there are standard idioms for
- * representing them (e.g., using EXISTS). The planner recognizes these
+ * representing them (e.g., using EXISTS). The planner recognizes these
* cases and converts them to joins. So the planner and executor must
* support these codes. NOTE: in JOIN_SEMI output, it is unspecified
* which matching RHS row is joined to. In JOIN_ANTI output, the row is
@@ -557,7 +557,7 @@ typedef enum JoinType
/*
* OUTER joins are those for which pushed-down quals must behave differently
* from the join's own quals. This is in fact everything except INNER and
- * SEMI joins. However, this macro must also exclude the JOIN_UNIQUE symbols
+ * SEMI joins. However, this macro must also exclude the JOIN_UNIQUE symbols
* since those are temporary proxies for what will eventually be an INNER
* join.
*
diff --git a/src/include/nodes/params.h b/src/include/nodes/params.h
index adc5871055..d596af8673 100644
--- a/src/include/nodes/params.h
+++ b/src/include/nodes/params.h
@@ -19,7 +19,7 @@
* ParamListInfo
*
* ParamListInfo arrays are used to pass parameters into the executor
- * for parameterized plans. Each entry in the array defines the value
+ * for parameterized plans. Each entry in the array defines the value
* to be substituted for a PARAM_EXTERN parameter. The "paramid"
* of a PARAM_EXTERN Param can range from 1 to numParams.
*
@@ -64,7 +64,7 @@ typedef ParamListInfoData *ParamListInfo;
* es_param_exec_vals or ecxt_param_exec_vals.
*
* If execPlan is not NULL, it points to a SubPlanState node that needs
- * to be executed to produce the value. (This is done so that we can have
+ * to be executed to produce the value. (This is done so that we can have
* lazy evaluation of InitPlans: they aren't executed until/unless a
* result value is needed.) Otherwise the value is assumed to be valid
* when needed.
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 9d31bdbb27..8dc78d5839 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -152,7 +152,7 @@ typedef struct Query
* Supporting data structures for Parse Trees
*
* Most of these node types appear in raw parsetrees output by the grammar,
- * and get transformed to something else by the analyzer. A few of them
+ * and get transformed to something else by the analyzer. A few of them
* are used as-is in transformed querytrees.
****************************************************************************/
@@ -166,7 +166,7 @@ typedef struct Query
* be prespecified in typemod, otherwise typemod is unused.
*
* If pct_type is TRUE, then names is actually a field name and we look up
- * the type of that field. Otherwise (the normal case), names is a type
+ * the type of that field. Otherwise (the normal case), names is a type
* name possibly qualified with schema and database name.
*/
typedef struct TypeName
@@ -185,7 +185,7 @@ typedef struct TypeName
/*
* ColumnRef - specifies a reference to a column, or possibly a whole tuple
*
- * The "fields" list must be nonempty. It can contain string Value nodes
+ * The "fields" list must be nonempty. It can contain string Value nodes
* (representing names) and A_Star nodes (representing occurrence of a '*').
* Currently, A_Star must appear only as the last list element --- the grammar
* is responsible for enforcing this!
@@ -505,7 +505,7 @@ typedef struct IndexElem
/*
* DefElem - a generic "name = value" option definition
*
- * In some contexts the name can be qualified. Also, certain SQL commands
+ * In some contexts the name can be qualified. Also, certain SQL commands
* allow a SET/ADD/DROP action to be attached to option settings, so it's
* convenient to carry a field for that too. (Note: currently, it is our
* practice that the grammar allows namespace and action only in statements
@@ -532,7 +532,7 @@ typedef struct DefElem
/*
* LockingClause - raw representation of FOR UPDATE/SHARE options
*
- * Note: lockedRels == NIL means "all relations in query". Otherwise it
+ * Note: lockedRels == NIL means "all relations in query". Otherwise it
* is a list of RangeVar nodes. (We use RangeVar mainly because it carries
* a location field --- currently, parse analysis insists on unqualified
* names in LockingClause.)
@@ -584,8 +584,8 @@ typedef struct XmlSerialize
*
* In RELATION RTEs, the colnames in both alias and eref are indexed by
* physical attribute number; this means there must be colname entries for
- * dropped columns. When building an RTE we insert empty strings ("") for
- * dropped columns. Note however that a stored rule may have nonempty
+ * dropped columns. When building an RTE we insert empty strings ("") for
+ * dropped columns. Note however that a stored rule may have nonempty
* colnames for columns dropped since the rule was created (and for that
* matter the colnames might be out of date due to column renamings).
* The same comments apply to FUNCTION RTEs when the function's return type
@@ -593,9 +593,9 @@ typedef struct XmlSerialize
*
* In JOIN RTEs, the colnames in both alias and eref are one-to-one with
* joinaliasvars entries. A JOIN RTE will omit columns of its inputs when
- * those columns are known to be dropped at parse time. Again, however,
+ * those columns are known to be dropped at parse time. Again, however,
* a stored rule might contain entries for columns dropped since the rule
- * was created. (This is only possible for columns not actually referenced
+ * was created. (This is only possible for columns not actually referenced
* in the rule.) When loading a stored rule, we replace the joinaliasvars
* items for any such columns with null pointers. (We can't simply delete
* them from the joinaliasvars list, because that would affect the attnums
@@ -614,7 +614,7 @@ typedef struct XmlSerialize
* decompiled queries.
*
* requiredPerms and checkAsUser specify run-time access permissions
- * checks to be performed at query startup. The user must have *all*
+ * checks to be performed at query startup. The user must have *all*
* of the permissions that are OR'd together in requiredPerms (zero
* indicates no permissions checking). If checkAsUser is not zero,
* then do the permissions checks using the access rights of that user,
@@ -668,7 +668,7 @@ typedef struct RangeTblEntry
* Fields valid for a join RTE (else NULL/zero):
*
* joinaliasvars is a list of (usually) Vars corresponding to the columns
- * of the join result. An alias Var referencing column K of the join
+ * of the join result. An alias Var referencing column K of the join
* result can be replaced by the K'th element of joinaliasvars --- but to
* simplify the task of reverse-listing aliases correctly, we do not do
* that until planning time. In detail: an element of joinaliasvars can
@@ -730,7 +730,7 @@ typedef struct RangeTblEntry
* You might think that ORDER BY is only interested in defining ordering,
* and GROUP/DISTINCT are only interested in defining equality. However,
* one way to implement grouping is to sort and then apply a "uniq"-like
- * filter. So it's also interesting to keep track of possible sort operators
+ * filter. So it's also interesting to keep track of possible sort operators
* for GROUP/DISTINCT, and in particular to try to sort for the grouping
* in a way that will also yield a requested ORDER BY ordering. So we need
* to be able to compare ORDER BY and GROUP/DISTINCT lists, which motivates
@@ -748,10 +748,10 @@ typedef struct RangeTblEntry
* here, but it's cheap to get it along with the sortop, and requiring it
* to be valid eases comparisons to grouping items.)
*
- * In a grouping item, eqop must be valid. If the eqop is a btree equality
+ * In a grouping item, eqop must be valid. If the eqop is a btree equality
* operator, then sortop should be set to a compatible ordering operator.
* We prefer to set eqop/sortop/nulls_first to match any ORDER BY item that
- * the query presents for the same tlist item. If there is none, we just
+ * the query presents for the same tlist item. If there is none, we just
* use the default ordering op for the datatype.
*
* If the tlist item's type has a hash opclass but no btree opclass, then
@@ -988,7 +988,7 @@ typedef struct SelectStmt
* range table. Its setOperations field shows the tree of set operations,
* with leaf SelectStmt nodes replaced by RangeTblRef nodes, and internal
* nodes replaced by SetOperationStmt nodes. Information about the output
- * column types is added, too. (Note that the child nodes do not necessarily
+ * column types is added, too. (Note that the child nodes do not necessarily
* produce these types directly, but we've checked that their output types
* can be coerced to the output column type.) Also, if it's not UNION ALL,
* information about the types' sort/group semantics is provided in the form
@@ -1249,7 +1249,7 @@ typedef struct AccessPriv
*
* Note: because of the parsing ambiguity with the GRANT <privileges>
* statement, granted_roles is a list of AccessPriv; the execution code
- * should complain if any column lists appear. grantee_roles is a list
+ * should complain if any column lists appear. grantee_roles is a list
* of role names, as Value strings.
* ----------------------
*/
@@ -1268,7 +1268,7 @@ typedef struct GrantRoleStmt
* Copy Statement
*
* We support "COPY relation FROM file", "COPY relation TO file", and
- * "COPY (query) TO file". In any given CopyStmt, exactly one of "relation"
+ * "COPY (query) TO file". In any given CopyStmt, exactly one of "relation"
* and "query" must be non-NULL.
* ----------------------
*/
@@ -1403,7 +1403,7 @@ typedef struct Constraint
*
* If skip_validation is true then we skip checking that the existing rows
* in the table satisfy the constraint, and just install the catalog entries
- * for the constraint. This is currently used only during CREATE TABLE
+ * for the constraint. This is currently used only during CREATE TABLE
* (when we know the table must be empty).
* ----------
*/
@@ -1799,7 +1799,7 @@ typedef struct CommentStmt
* Declare Cursor Statement
*
* Note: the "query" field of DeclareCursorStmt is only used in the raw grammar
- * output. After parse analysis it's set to null, and the Query points to the
+ * output. After parse analysis it's set to null, and the Query points to the
* DeclareCursorStmt, not vice versa.
* ----------------------
*/
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 44f14140f4..582c7ff099 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -169,7 +169,7 @@ typedef struct Result
* Generate the concatenation of the results of sub-plans.
*
* Append nodes are sometimes used to switch between several result relations
- * (when the target of an UPDATE or DELETE is an inheritance set). Such a
+ * (when the target of an UPDATE or DELETE is an inheritance set). Such a
* node will have isTarget true. The Append executor is then responsible
* for updating the executor state to point at the correct target relation
* whenever it switches subplans.
@@ -206,7 +206,7 @@ typedef struct RecursiveUnion
* BitmapAnd node -
* Generate the intersection of the results of sub-plans.
*
- * The subplans must be of types that yield tuple bitmaps. The targetlist
+ * The subplans must be of types that yield tuple bitmaps. The targetlist
* and qual fields of the plan are unused and are always NIL.
* ----------------
*/
@@ -220,7 +220,7 @@ typedef struct BitmapAnd
* BitmapOr node -
* Generate the union of the results of sub-plans.
*
- * The subplans must be of types that yield tuple bitmaps. The targetlist
+ * The subplans must be of types that yield tuple bitmaps. The targetlist
* and qual fields of the plan are unused and are always NIL.
* ----------------
*/
@@ -254,15 +254,15 @@ typedef Scan SeqScan;
* in the same form it appeared in the query WHERE condition. Each should
* be of the form (indexkey OP comparisonval) or (comparisonval OP indexkey).
* The indexkey is a Var or expression referencing column(s) of the index's
- * base table. The comparisonval might be any expression, but it won't use
+ * base table. The comparisonval might be any expression, but it won't use
* any columns of the base table.
*
* indexqual has the same form, but the expressions have been commuted if
* necessary to put the indexkeys on the left, and the indexkeys are replaced
* by Var nodes identifying the index columns (varattno is the index column
* position, not the base table's column, even though varno is for the base
- * table). This is a bit hokey ... would be cleaner to use a special-purpose
- * node type that could not be mistaken for a regular Var. But it will do
+ * table). This is a bit hokey ... would be cleaner to use a special-purpose
+ * node type that could not be mistaken for a regular Var. But it will do
* for now.
* ----------------
*/
@@ -279,7 +279,7 @@ typedef struct IndexScan
* bitmap index scan node
*
* BitmapIndexScan delivers a bitmap of potential tuple locations;
- * it does not access the heap itself. The bitmap is used by an
+ * it does not access the heap itself. The bitmap is used by an
* ancestor BitmapHeapScan node, possibly after passing through
* intermediate BitmapAnd and/or BitmapOr nodes to combine it with
* the results of other BitmapIndexScans.
@@ -339,7 +339,7 @@ typedef struct TidScan
* purposes.
*
* Note: we store the sub-plan in the type-specific subplan field, not in
- * the generic lefttree field as you might expect. This is because we do
+ * the generic lefttree field as you might expect. This is because we do
* not want plan-tree-traversal routines to recurse into the subplan without
* knowing that they are changing Query contexts.
*
@@ -443,7 +443,7 @@ typedef struct NestLoop
*
* The expected ordering of each mergeable column is described by a btree
* opfamily OID, a direction (BTLessStrategyNumber or BTGreaterStrategyNumber)
- * and a nulls-first flag. Note that the two sides of each mergeclause may
+ * and a nulls-first flag. Note that the two sides of each mergeclause may
* be of different datatypes, but they are ordered the same way according to
* the common opfamily. The operator in each mergeclause must be an equality
* operator of the indicated opfamily.
@@ -636,7 +636,7 @@ typedef struct Limit
*
* We track the objects on which a PlannedStmt depends in two ways:
* relations are recorded as a simple list of OIDs, and everything else
- * is represented as a list of PlanInvalItems. A PlanInvalItem is designed
+ * is represented as a list of PlanInvalItems. A PlanInvalItem is designed
* to be used with the syscache invalidation mechanism, so it identifies a
* system catalog entry by cache ID and tuple TID.
*/
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index a41b0e2f7d..30b008402c 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -33,7 +33,7 @@
*
* Note: colnames is a list of Value nodes (always strings). In Alias structs
* associated with RTEs, there may be entries corresponding to dropped
- * columns; these are normally empty strings (""). See parsenodes.h for info.
+ * columns; these are normally empty strings (""). See parsenodes.h for info.
*/
typedef struct Alias
{
@@ -248,7 +248,7 @@ typedef struct WindowFunc
* entire new modified array value.
*
* If reflowerindexpr = NIL, then we are fetching or storing a single array
- * element at the subscripts given by refupperindexpr. Otherwise we are
+ * element at the subscripts given by refupperindexpr. Otherwise we are
* fetching or storing an array slice, that is a rectangular subarray
* with lower and upper bounds given by the index expressions.
* reflowerindexpr must be the same length as refupperindexpr when it
@@ -370,7 +370,7 @@ typedef struct ScalarArrayOpExpr
*
* Notice the arguments are given as a List. For NOT, of course the list
* must always have exactly one element. For AND and OR, the executor can
- * handle any number of arguments. The parser generally treats AND and OR
+ * handle any number of arguments. The parser generally treats AND and OR
* as binary and so it typically only produces two-element lists, but the
* optimizer will flatten trees of AND and OR nodes to produce longer lists
* when possible. There are also a few special cases where more arguments
@@ -393,7 +393,7 @@ typedef struct BoolExpr
* SubLink
*
* A SubLink represents a subselect appearing in an expression, and in some
- * cases also the combining operator(s) just above it. The subLinkType
+ * cases also the combining operator(s) just above it. The subLinkType
* indicates the form of the expression represented:
* EXISTS_SUBLINK EXISTS(SELECT ...)
* ALL_SUBLINK (lefthand) op ALL (SELECT ...)
@@ -420,7 +420,7 @@ typedef struct BoolExpr
*
* NOTE: in the raw output of gram.y, testexpr contains just the raw form
* of the lefthand expression (if any), and operName is the String name of
- * the combining operator. Also, subselect is a raw parsetree. During parse
+ * the combining operator. Also, subselect is a raw parsetree. During parse
* analysis, the parser transforms testexpr into a complete boolean expression
* that compares the lefthand value(s) to PARAM_SUBLINK nodes representing the
* output columns of the subselect. And subselect is transformed to a Query.
@@ -478,7 +478,7 @@ typedef struct SubLink
* list). In this case testexpr is NULL to avoid duplication.
*
* The planner also derives lists of the values that need to be passed into
- * and out of the subplan. Input values are represented as a list "args" of
+ * and out of the subplan. Input values are represented as a list "args" of
* expressions to be evaluated in the outer-query context (currently these
* args are always just Vars, but in principle they could be any expression).
* The values are assigned to the global PARAM_EXEC params indexed by parParam
@@ -566,7 +566,7 @@ typedef struct FieldSelect
* portion of a column.
*
* A single FieldStore can actually represent updates of several different
- * fields. The parser only generates FieldStores with single-element lists,
+ * fields. The parser only generates FieldStores with single-element lists,
* but the planner will collapse multiple updates of the same base column
* into one FieldStore.
* ----------------
@@ -680,7 +680,7 @@ typedef struct ConvertRowtypeExpr
* and the testexpr in the second case.
*
* In the raw grammar output for the second form, the condition expressions
- * of the WHEN clauses are just the comparison values. Parse analysis
+ * of the WHEN clauses are just the comparison values. Parse analysis
* converts these to valid boolean expressions of the form
* CaseTestExpr '=' compexpr
* where the CaseTestExpr node is a placeholder that emits the correct
@@ -751,10 +751,10 @@ typedef struct ArrayExpr
*
* Note: the list of fields must have a one-for-one correspondence with
* physical fields of the associated rowtype, although it is okay for it
- * to be shorter than the rowtype. That is, the N'th list element must
+ * to be shorter than the rowtype. That is, the N'th list element must
* match up with the N'th physical field. When the N'th physical field
* is a dropped column (attisdropped) then the N'th list element can just
- * be a NULL constant. (This case can only occur for named composite types,
+ * be a NULL constant. (This case can only occur for named composite types,
* not RECORD types, since those are built from the RowExpr itself rather
* than vice versa.) It is important not to assume that length(args) is
* the same as the number of columns logically present in the rowtype.
@@ -775,7 +775,7 @@ typedef struct RowExpr
* Note: we deliberately do NOT store a typmod. Although a typmod will be
* associated with specific RECORD types at runtime, it will differ for
* different backends, and so cannot safely be stored in stored
- * parsetrees. We must assume typmod -1 for a RowExpr node.
+ * parsetrees. We must assume typmod -1 for a RowExpr node.
*/
CoercionForm row_format; /* how to display this node */
List *colnames; /* list of String, or NIL */
@@ -944,8 +944,8 @@ typedef struct BooleanTest
*
* CoerceToDomain represents the operation of coercing a value to a domain
* type. At runtime (and not before) the precise set of constraints to be
- * checked will be determined. If the value passes, it is returned as the
- * result; if not, an error is raised. Note that this is equivalent to
+ * checked will be determined. If the value passes, it is returned as the
+ * result; if not, an error is raised. Note that this is equivalent to
* RelabelType in the scenario where no constraints are applied.
*/
typedef struct CoerceToDomain
@@ -960,7 +960,7 @@ typedef struct CoerceToDomain
/*
* Placeholder node for the value to be processed by a domain's check
- * constraint. This is effectively like a Param, but can be implemented more
+ * constraint. This is effectively like a Param, but can be implemented more
* simply since we need only one replacement value at a time.
*
* Note: the typeId/typeMod will be set from the domain's base type, not
@@ -979,7 +979,7 @@ typedef struct CoerceToDomainValue
* Placeholder node for a DEFAULT marker in an INSERT or UPDATE command.
*
* This is not an executable expression: it must be replaced by the actual
- * column default expression during rewriting. But it is convenient to
+ * column default expression during rewriting. But it is convenient to
* treat it as an expression node during parsing and rewriting.
*/
typedef struct SetToDefault
@@ -1020,14 +1020,14 @@ typedef struct CurrentOfExpr
* single expression tree.
*
* In a SELECT's targetlist, resno should always be equal to the item's
- * ordinal position (counting from 1). However, in an INSERT or UPDATE
+ * ordinal position (counting from 1). However, in an INSERT or UPDATE
* targetlist, resno represents the attribute number of the destination
* column for the item; so there may be missing or out-of-order resnos.
* It is even legal to have duplicated resnos; consider
* UPDATE table SET arraycol[1] = ..., arraycol[2] = ..., ...
* The two meanings come together in the executor, because the planner
* transforms INSERT/UPDATE tlists into a normalized form with exactly
- * one entry for each column of the destination table. Before that's
+ * one entry for each column of the destination table. Before that's
* happened, however, it is risky to assume that resno == position.
* Generally get_tle_by_resno() should be used rather than list_nth()
* to fetch tlist entries by resno, and only in SELECT should you assume
@@ -1036,25 +1036,25 @@ typedef struct CurrentOfExpr
* resname is required to represent the correct column name in non-resjunk
* entries of top-level SELECT targetlists, since it will be used as the
* column title sent to the frontend. In most other contexts it is only
- * a debugging aid, and may be wrong or even NULL. (In particular, it may
+ * a debugging aid, and may be wrong or even NULL. (In particular, it may
* be wrong in a tlist from a stored rule, if the referenced column has been
- * renamed by ALTER TABLE since the rule was made. Also, the planner tends
+ * renamed by ALTER TABLE since the rule was made. Also, the planner tends
* to store NULL rather than look up a valid name for tlist entries in
* non-toplevel plan nodes.) In resjunk entries, resname should be either
* a specific system-generated name (such as "ctid") or NULL; anything else
* risks confusing ExecGetJunkAttribute!
*
* ressortgroupref is used in the representation of ORDER BY, GROUP BY, and
- * DISTINCT items. Targetlist entries with ressortgroupref=0 are not
+ * DISTINCT items. Targetlist entries with ressortgroupref=0 are not
* sort/group items. If ressortgroupref>0, then this item is an ORDER BY,
- * GROUP BY, and/or DISTINCT target value. No two entries in a targetlist
+ * GROUP BY, and/or DISTINCT target value. No two entries in a targetlist
* may have the same nonzero ressortgroupref --- but there is no particular
* meaning to the nonzero values, except as tags. (For example, one must
* not assume that lower ressortgroupref means a more significant sort key.)
* The order of the associated SortGroupClause lists determine the semantics.
*
* resorigtbl/resorigcol identify the source of the column, if it is a
- * simple reference to a column of a base table (or view). If it is not
+ * simple reference to a column of a base table (or view). If it is not
* a simple reference, these fields are zeroes.
*
* If resjunk is true then the column is a working column (such as a sort key)
@@ -1094,7 +1094,7 @@ typedef struct TargetEntry
*
* NOTE: the qualification expressions present in JoinExpr nodes are
* *in addition to* the query's main WHERE clause, which appears as the
- * qual of the top-level FromExpr. The reason for associating quals with
+ * qual of the top-level FromExpr. The reason for associating quals with
* specific nodes in the jointree is that the position of a qual is critical
* when outer joins are present. (If we enforce a qual too soon or too late,
* that may cause the outer join to produce the wrong set of NULL-extended
@@ -1125,12 +1125,12 @@ typedef struct RangeTblRef
/*----------
* JoinExpr - for SQL JOIN expressions
*
- * isNatural, using, and quals are interdependent. The user can write only
+ * isNatural, using, and quals are interdependent. The user can write only
* one of NATURAL, USING(), or ON() (this is enforced by the grammar).
* If he writes NATURAL then parse analysis generates the equivalent USING()
* list, and from that fills in "quals" with the right equality comparisons.
* If he writes USING() then "quals" is filled with equality comparisons.
- * If he writes ON() then only "quals" is set. Note that NATURAL/USING
+ * If he writes ON() then only "quals" is set. Note that NATURAL/USING
* are not equivalent to ON() since they also affect the output column list.
*
* alias is an Alias node representing the AS alias-clause attached to the
@@ -1139,7 +1139,7 @@ typedef struct RangeTblRef
* restricts visibility of the tables/columns inside it.
*
* During parse analysis, an RTE is created for the Join, and its index
- * is filled into rtindex. This RTE is present mainly so that Vars can
+ * is filled into rtindex. This RTE is present mainly so that Vars can
* be created that refer to the outputs of the join. The planner sometimes
* generates JoinExprs internally; these can have rtindex = 0 if there are
* no join alias variables referencing such joins.
diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h
index 278cfa3c39..e05f3bb6c9 100644
--- a/src/include/nodes/relation.h
+++ b/src/include/nodes/relation.h
@@ -95,7 +95,7 @@ typedef struct PlannerGlobal
*
* This struct is conventionally called "root" in all the planner routines.
* It holds links to all of the planner's working state, in addition to the
- * original Query. Note that at present the planner extensively modifies
+ * original Query. Note that at present the planner extensively modifies
* the passed-in Query data structure; someday that should stop.
*----------
*/
@@ -113,7 +113,7 @@ typedef struct PlannerInfo
/*
* simple_rel_array holds pointers to "base rels" and "other rels" (see
- * comments for RelOptInfo for more info). It is indexed by rangetable
+ * comments for RelOptInfo for more info). It is indexed by rangetable
* index (so entry 0 is always wasted). Entries can be NULL when an RTE
* does not correspond to a base relation, such as a join RTE or an
* unreferenced view RTE; or if the RelOptInfo hasn't been made yet.
@@ -134,7 +134,7 @@ typedef struct PlannerInfo
* considered in this planning run. For small problems we just scan the
* list to do lookups, but when there are many join relations we build a
* hash table for faster lookups. The hash table is present and valid
- * when join_rel_hash is not NULL. Note that we still maintain the list
+ * when join_rel_hash is not NULL. Note that we still maintain the list
* even when using the hash table for lookups; this simplifies life for
* GEQO.
*/
@@ -239,7 +239,7 @@ typedef struct PlannerInfo
* Currently the only kind of otherrels are those made for member relations
* of an "append relation", that is an inheritance set or UNION ALL subquery.
* An append relation has a parent RTE that is a base rel, which represents
- * the entire append relation. The member RTEs are otherrels. The parent
+ * the entire append relation. The member RTEs are otherrels. The parent
* is present in the query join tree but the members are not. The member
* RTEs and otherrels are used to plan the scans of the individual tables or
* subqueries of the append set; then the parent baserel is given an Append
@@ -251,7 +251,7 @@ typedef struct PlannerInfo
* alias Vars are expanded to non-aliased form during preprocess_expression.
*
* Parts of this data structure are specific to various scan and join
- * mechanisms. It didn't seem worth creating new node types for them.
+ * mechanisms. It didn't seem worth creating new node types for them.
*
* relids - Set of base-relation identifiers; it is a base relation
* if there is just one, a join relation if more than one
@@ -413,7 +413,7 @@ typedef struct RelOptInfo
* Zeroes in the indexkeys[] array indicate index columns that are
* expressions; there is one element in indexprs for each such column.
*
- * For an unordered index, the sortop arrays contains zeroes. Note that
+ * For an unordered index, the sortop arrays contains zeroes. Note that
* fwdsortop[] and nulls_first[] describe the sort ordering of a forward
* indexscan; we can also consider a backward indexscan, which will
* generate sort order described by revsortop/!nulls_first.
@@ -476,7 +476,7 @@ typedef struct IndexOptInfo
* us represent knowledge about different sort orderings being equivalent.
* Since every PathKey must reference an EquivalenceClass, we will end up
* with single-member EquivalenceClasses whenever a sort key expression has
- * not been equivalenced to anything else. It is also possible that such an
+ * not been equivalenced to anything else. It is also possible that such an
* EquivalenceClass will contain a volatile expression ("ORDER BY random()"),
* which is a case that can't arise otherwise since clauses containing
* volatile functions are never considered mergejoinable. We mark such
@@ -489,7 +489,7 @@ typedef struct IndexOptInfo
* We allow equality clauses appearing below the nullable side of an outer join
* to form EquivalenceClasses, but these have a slightly different meaning:
* the included values might be all NULL rather than all the same non-null
- * values. See src/backend/optimizer/README for more on that point.
+ * values. See src/backend/optimizer/README for more on that point.
*
* NB: if ec_merged isn't NULL, this class has been merged into another, and
* should be ignored in favor of using the pointed-to class.
@@ -530,7 +530,7 @@ typedef struct EquivalenceClass
*
* em_datatype is usually the same as exprType(em_expr), but can be
* different when dealing with a binary-compatible opfamily; in particular
- * anyarray_ops would never work without this. Use em_datatype when
+ * anyarray_ops would never work without this. Use em_datatype when
* looking up a specific btree operator to work with this expression.
*/
typedef struct EquivalenceMember
@@ -554,11 +554,11 @@ typedef struct EquivalenceMember
* etc. The value being sorted is represented by linking to an
* EquivalenceClass containing that value and including pk_opfamily among its
* ec_opfamilies. This is a convenient method because it makes it trivial
- * to detect equivalent and closely-related orderings. (See optimizer/README
+ * to detect equivalent and closely-related orderings. (See optimizer/README
* for more information.)
*
* Note: pk_strategy is either BTLessStrategyNumber (for ASC) or
- * BTGreaterStrategyNumber (for DESC). We assume that all ordering-capable
+ * BTGreaterStrategyNumber (for DESC). We assume that all ordering-capable
* index types will use btree-compatible strategy numbers.
*/
@@ -662,11 +662,11 @@ typedef struct IndexPath
*
* The individual indexscans are represented by IndexPath nodes, and any
* logic on top of them is represented by a tree of BitmapAndPath and
- * BitmapOrPath nodes. Notice that we can use the same IndexPath node both
+ * BitmapOrPath nodes. Notice that we can use the same IndexPath node both
* to represent a regular IndexScan plan, and as the child of a BitmapHeapPath
* that represents scanning the same index using a BitmapIndexScan. The
* startup_cost and total_cost figures of an IndexPath always represent the
- * costs to use it as a regular IndexScan. The costs of a BitmapIndexScan
+ * costs to use it as a regular IndexScan. The costs of a BitmapIndexScan
* can be computed using the IndexPath's indextotalcost and indexselectivity.
*
* BitmapHeapPaths can be nestloop inner indexscans. The isjoininner and
@@ -768,7 +768,7 @@ typedef struct MaterialPath
*
* This is unlike the other Path nodes in that it can actually generate
* different plans: either hash-based or sort-based implementation, or a
- * no-op if the input path can be proven distinct already. The decision
+ * no-op if the input path can be proven distinct already. The decision
* is sufficiently localized that it's not worth having separate Path node
* types. (Note: in the no-op case, we could eliminate the UniquePath node
* entirely and just return the subpath; but it's convenient to have a
@@ -882,7 +882,7 @@ typedef struct HashPath
* When we construct a join rel that includes all the base rels referenced
* in a multi-relation restriction clause, we place that clause into the
* joinrestrictinfo lists of paths for the join rel, if neither left nor
- * right sub-path includes all base rels referenced in the clause. The clause
+ * right sub-path includes all base rels referenced in the clause. The clause
* will be applied at that join level, and will not propagate any further up
* the join tree. (Note: the "predicate migration" code was once intended to
* push restriction clauses up and down the plan tree based on evaluation
@@ -922,13 +922,13 @@ typedef struct HashPath
* that appeared elsewhere in the tree and were pushed down to the join rel
* because they used no other rels. That's what the is_pushed_down flag is
* for; it tells us that a qual is not an OUTER JOIN qual for the set of base
- * rels listed in required_relids. A clause that originally came from WHERE
+ * rels listed in required_relids. A clause that originally came from WHERE
* or an INNER JOIN condition will *always* have its is_pushed_down flag set.
* It's possible for an OUTER JOIN clause to be marked is_pushed_down too,
* if we decide that it can be pushed down into the nullable side of the join.
* In that case it acts as a plain filter qual for wherever it gets evaluated.
* (In short, is_pushed_down is only false for non-degenerate outer join
- * conditions. Possibly we should rename it to reflect that meaning?)
+ * conditions. Possibly we should rename it to reflect that meaning?)
*
* RestrictInfo nodes also contain an outerjoin_delayed flag, which is true
* if the clause's applicability must be delayed due to any outer joins
@@ -938,10 +938,10 @@ typedef struct HashPath
* forced null by some outer join below the clause. outerjoin_delayed = true
* is subtly different from nullable_relids != NULL: a clause might reference
* some nullable rels and yet not be outerjoin_delayed because it also
- * references all the other rels of the outer join(s). A clause that is not
+ * references all the other rels of the outer join(s). A clause that is not
* outerjoin_delayed can be enforced anywhere it is computable.
*
- * In general, the referenced clause might be arbitrarily complex. The
+ * In general, the referenced clause might be arbitrarily complex. The
* kinds of clauses we can handle as indexscan quals, mergejoin clauses,
* or hashjoin clauses are limited (e.g., no volatile functions). The code
* for each kind of path is responsible for identifying the restrict clauses
@@ -966,7 +966,7 @@ typedef struct HashPath
*
* The pseudoconstant flag is set true if the clause contains no Vars of
* the current query level and no volatile functions. Such a clause can be
- * pulled out and used as a one-time qual in a gating Result node. We keep
+ * pulled out and used as a one-time qual in a gating Result node. We keep
* pseudoconstant clauses in the same lists as other RestrictInfos so that
* the regular clause-pushing machinery can assign them to the correct join
* level, but they need to be treated specially for cost and selectivity
@@ -976,7 +976,7 @@ typedef struct HashPath
*
* When join clauses are generated from EquivalenceClasses, there may be
* several equally valid ways to enforce join equivalence, of which we need
- * apply only one. We mark clauses of this kind by setting parent_ec to
+ * apply only one. We mark clauses of this kind by setting parent_ec to
* point to the generating EquivalenceClass. Multiple clauses with the same
* parent_ec in the same join are redundant.
*/
@@ -1101,8 +1101,8 @@ typedef struct InnerIndexscanInfo
/*
* Placeholder node for an expression to be evaluated below the top level
- * of a plan tree. This is used during planning to represent the contained
- * expression. At the end of the planning process it is replaced by either
+ * of a plan tree. This is used during planning to represent the contained
+ * expression. At the end of the planning process it is replaced by either
* the contained expression or a Var referring to a lower-level evaluation of
* the contained expression. Typically the evaluation occurs below an outer
* join, and Var references above the outer join might thereby yield NULL
@@ -1126,9 +1126,9 @@ typedef struct PlaceHolderVar
* "Special join" info.
*
* One-sided outer joins constrain the order of joining partially but not
- * completely. We flatten such joins into the planner's top-level list of
+ * completely. We flatten such joins into the planner's top-level list of
* relations to join, but record information about each outer join in a
- * SpecialJoinInfo struct. These structs are kept in the PlannerInfo node's
+ * SpecialJoinInfo struct. These structs are kept in the PlannerInfo node's
* join_info_list.
*
* Similarly, semijoins and antijoins created by flattening IN (subselect)
@@ -1156,7 +1156,7 @@ typedef struct PlaceHolderVar
* to be evaluated after this join is formed (because it references the RHS).
* Any outer joins that have such a clause and this join in their RHS cannot
* commute with this join, because that would leave noplace to check the
- * pushed-down clause. (We don't track this for FULL JOINs, either.)
+ * pushed-down clause. (We don't track this for FULL JOINs, either.)
*
* join_quals is an implicit-AND list of the quals syntactically associated
* with the join (they may or may not end up being applied at the join level).
@@ -1241,7 +1241,7 @@ typedef struct AppendRelInfo
/*
* For an inheritance appendrel, the parent and child are both regular
* relations, and we store their rowtype OIDs here for use in translating
- * whole-row Vars. For a UNION-ALL appendrel, the parent and child are
+ * whole-row Vars. For a UNION-ALL appendrel, the parent and child are
* both subqueries with no named rowtype, and we store InvalidOid here.
*/
Oid parent_reltype; /* OID of parent's composite type */
@@ -1253,14 +1253,14 @@ typedef struct AppendRelInfo
* used to translate Vars referencing the parent rel into references to
* the child. A list element is NULL if it corresponds to a dropped
* column of the parent (this is only possible for inheritance cases, not
- * UNION ALL). The list elements are always simple Vars for inheritance
+ * UNION ALL). The list elements are always simple Vars for inheritance
* cases, but can be arbitrary expressions in UNION ALL cases.
*
* Notice we only store entries for user columns (attno > 0). Whole-row
* Vars are special-cased, and system columns (attno < 0) need no special
* translation since their attnos are the same for all tables.
*
- * Caution: the Vars have varlevelsup = 0. Be careful to adjust as needed
+ * Caution: the Vars have varlevelsup = 0. Be careful to adjust as needed
* when copying into a subquery.
*/
List *translated_vars; /* Expressions in the child's Vars */
@@ -1277,7 +1277,7 @@ typedef struct AppendRelInfo
* For each distinct placeholder expression generated during planning, we
* store a PlaceHolderInfo node in the PlannerInfo node's placeholder_list.
* This stores info that is needed centrally rather than in each copy of the
- * PlaceHolderVar. The phid fields identify which PlaceHolderInfo goes with
+ * PlaceHolderVar. The phid fields identify which PlaceHolderInfo goes with
* each PlaceHolderVar. Note that phid is unique throughout a planner run,
* not just within a query level --- this is so that we need not reassign ID's
* when pulling a subquery into its parent.
@@ -1335,7 +1335,7 @@ typedef struct PlaceHolderInfo
* value in the Var will always be zero.
*
* A PlaceHolderVar: this works much like the Var case, except that the
- * entry is a PlaceHolderVar node with a contained expression. The PHV
+ * entry is a PlaceHolderVar node with a contained expression. The PHV
* will have phlevelsup = 0, and the contained expression is adjusted
* to match in level.
*
diff --git a/src/include/nodes/tidbitmap.h b/src/include/nodes/tidbitmap.h
index 38c0f65824..fa5fa87be2 100644
--- a/src/include/nodes/tidbitmap.h
+++ b/src/include/nodes/tidbitmap.h
@@ -26,7 +26,7 @@
/*
- * Actual bitmap representation is private to tidbitmap.c. Callers can
+ * Actual bitmap representation is private to tidbitmap.c. Callers can
* do IsA(x, TIDBitmap) on it, but nothing else.
*/
typedef struct TIDBitmap TIDBitmap;
diff --git a/src/include/nodes/value.h b/src/include/nodes/value.h
index 27bbe33666..68d29e4cfc 100644
--- a/src/include/nodes/value.h
+++ b/src/include/nodes/value.h
@@ -29,7 +29,7 @@
*
* (Before Postgres 7.0, we used a double to represent T_Float,
* but that creates loss-of-precision problems when the value is
- * ultimately destined to be converted to NUMERIC. Since Value nodes
+ * ultimately destined to be converted to NUMERIC. Since Value nodes
* are only used in the parsing process, not for runtime data, it's
* better to use the more general representation.)
*
diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h
index 25c8d7f0cd..c8674ae7c8 100644
--- a/src/include/parser/parse_node.h
+++ b/src/include/parser/parse_node.h
@@ -24,7 +24,7 @@
* links to current parse state of outer query.
*
* p_sourcetext: source string that generated the raw parsetree being
- * analyzed, or NULL if not available. (The string is used only to
+ * analyzed, or NULL if not available. (The string is used only to
* generate cursor positions in error messages: we need it to convert
* byte-wise locations in parse structures to character-wise cursor
* positions.)
@@ -59,7 +59,7 @@
* to make an RTE before you can access a CTE.
*
* p_future_ctes: list of CommonTableExprs (WITH items) that are not yet
- * visible due to scope rules. This is used to help improve error messages.
+ * visible due to scope rules. This is used to help improve error messages.
*
* p_parent_cte: CommonTableExpr that immediately contains the current query,
* if any.
diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index 3a93328d86..0aa69e4206 100644
--- a/src/include/pg_config_manual.h
+++ b/src/include/pg_config_manual.h
@@ -3,7 +3,7 @@
*
* This file contains various configuration symbols and limits. In
* all cases, changing them is only useful in very rare situations or
- * for developers. If you edit any of these, be sure to do a *full*
+ * for developers. If you edit any of these, be sure to do a *full*
* rebuild (and an initdb if noted).
*
* $PostgreSQL: pgsql/src/include/pg_config_manual.h,v 1.39 2009/06/11 14:49:08 momjian Exp $
@@ -60,8 +60,8 @@
/*
* Define this if you want to allow the lo_import and lo_export SQL
- * functions to be executed by ordinary users. By default these
- * functions are only available to the Postgres superuser. CAUTION:
+ * functions to be executed by ordinary users. By default these
+ * functions are only available to the Postgres superuser. CAUTION:
* These functions are SECURITY HOLES since they can read and write
* any file that the PostgreSQL server has permission to access. If
* you turn this on, don't say we didn't warn you.
@@ -140,7 +140,7 @@
/*
* This is the default directory in which AF_UNIX socket files are
- * placed. Caution: changing this risks breaking your existing client
+ * placed. Caution: changing this risks breaking your existing client
* applications, which are likely to continue to look in the old
* directory. But if you just hate the idea of sockets in /tmp,
* here's where to twiddle it. You can also override this at runtime
@@ -153,7 +153,7 @@
* MAX_RANDOM_VALUE. Currently, all known implementations yield
* 0..2^31-1, so we just hardwire this constant. We could do a
* configure test if it proves to be necessary. CAUTION: Think not to
- * replace this with RAND_MAX. RAND_MAX defines the maximum value of
+ * replace this with RAND_MAX. RAND_MAX defines the maximum value of
* the older rand() function, which is often different from --- and
* considerably inferior to --- random().
*/
@@ -178,7 +178,7 @@
/*
* Define this to check memory allocation errors (scribbling on more
- * bytes than were allocated). Right now, this gets defined
+ * bytes than were allocated). Right now, this gets defined
* automatically if --enable-cassert.
*/
#ifdef USE_ASSERT_CHECKING
@@ -188,7 +188,7 @@
/*
* Define this to cause palloc()'d memory to be filled with random data, to
* facilitate catching code that depends on the contents of uninitialized
- * memory. Caution: this is horrendously expensive.
+ * memory. Caution: this is horrendously expensive.
*/
/* #define RANDOMIZE_ALLOCATED_MEMORY */
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index bac5262ed7..b69cfbe617 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -104,7 +104,7 @@ typedef struct PgStat_TableCounts
*
* Most of the event counters are nontransactional, ie, we count events
* in committed and aborted transactions alike. For these, we just count
- * directly in the PgStat_TableStatus. However, new_live_tuples and
+ * directly in the PgStat_TableStatus. However, new_live_tuples and
* new_dead_tuples must be derived from tuple insertion and deletion counts
* with awareness of whether the transaction or subtransaction committed or
* aborted. Hence, we also keep a stack of per-(sub)transaction status
diff --git a/src/include/port.h b/src/include/port.h
index ece64db7f7..76231961f1 100644
--- a/src/include/port.h
+++ b/src/include/port.h
@@ -141,7 +141,7 @@ extern unsigned char pg_tolower(unsigned char ch);
/*
* Versions of libintl >= 0.13 try to replace printf() and friends with
- * macros to their own versions that understand the %$ format. We do the
+ * macros to their own versions that understand the %$ format. We do the
* same, so disable their macros, if they exist.
*/
#ifdef vsnprintf
diff --git a/src/include/port/linux.h b/src/include/port/linux.h
index e2a4c4a114..50d2bd51f0 100644
--- a/src/include/port/linux.h
+++ b/src/include/port/linux.h
@@ -4,7 +4,7 @@
* As of July 2007, all known versions of the Linux kernel will sometimes
* return EIDRM for a shmctl() operation when EINVAL is correct (it happens
* when the low-order 15 bits of the supplied shm ID match the slot number
- * assigned to a newer shmem segment). We deal with this by assuming that
+ * assigned to a newer shmem segment). We deal with this by assuming that
* EIDRM means EINVAL in PGSharedMemoryIsInUse(). This is reasonably safe
* since in fact Linux has no excuse for ever returning EIDRM; it doesn't
* track removed segments in a way that would allow distinguishing them from
diff --git a/src/include/port/win32.h b/src/include/port/win32.h
index 8586bf106b..9270fa6ca4 100644
--- a/src/include/port/win32.h
+++ b/src/include/port/win32.h
@@ -94,7 +94,7 @@
* Signal stuff
*
* For WIN32, there is no wait() call so there are no wait() macros
- * to interpret the return value of system(). Instead, system()
+ * to interpret the return value of system(). Instead, system()
* return values < 0x100 are used for exit() termination, and higher
* values are used to indicated non-exit() termination, which is
* similar to a unix-style signal exit (think SIGSEGV ==
@@ -132,7 +132,7 @@
* NTSTATUS.H from the Windows NT DDK.
*
* Some day we might want to print descriptions for the most common
- * exceptions, rather than printing an include file name. We could use
+ * exceptions, rather than printing an include file name. We could use
* RtlNtStatusToDosError() and pass to FormatMessage(), which can print
* the text of error values, but MinGW does not support
* RtlNtStatusToDosError().
diff --git a/src/include/portability/instr_time.h b/src/include/portability/instr_time.h
index 2d48d866fc..121d42614d 100644
--- a/src/include/portability/instr_time.h
+++ b/src/include/portability/instr_time.h
@@ -10,8 +10,8 @@
* high-precision-timing APIs on yet other platforms.
*
* The basic data type is instr_time, which all callers should treat as an
- * opaque typedef. instr_time can store either an absolute time (of
- * unspecified reference time) or an interval. The operations provided
+ * opaque typedef. instr_time can store either an absolute time (of
+ * unspecified reference time) or an interval. The operations provided
* for it are:
*
* INSTR_TIME_IS_ZERO(t) is t equal to zero?
diff --git a/src/include/postgres.h b/src/include/postgres.h
index c1e4f77386..46240169d3 100644
--- a/src/include/postgres.h
+++ b/src/include/postgres.h
@@ -33,7 +33,7 @@
* in the backend environment, but are of no interest outside the backend.
*
* Simple type definitions live in c.h, where they are shared with
- * postgres_fe.h. We do that since those type definitions are needed by
+ * postgres_fe.h. We do that since those type definitions are needed by
* frontend modules that want to deal with binary data transmission to or
* from the backend. Type definitions in this file should be for
* representations that never escape the backend, such as Datum or
@@ -55,7 +55,7 @@
/*
* struct varatt_external is a "TOAST pointer", that is, the information
- * needed to fetch a stored-out-of-line Datum. The data is compressed
+ * needed to fetch a stored-out-of-line Datum. The data is compressed
* if and only if va_extsize < va_rawsize - VARHDRSZ. This struct must not
* contain any padding, because we sometimes compare pointers using memcmp.
*
@@ -127,7 +127,7 @@ typedef struct
* The "xxx" bits are the length field (which includes itself in all cases).
* In the big-endian case we mask to extract the length, in the little-endian
* case we shift. Note that in both cases the flag bits are in the physically
- * first byte. Also, it is not possible for a 1-byte length word to be zero;
+ * first byte. Also, it is not possible for a 1-byte length word to be zero;
* this lets us disambiguate alignment padding bytes from the start of an
* unaligned datum. (We now *require* pad bytes to be filled with zero!)
*/
diff --git a/src/include/postgres_ext.h b/src/include/postgres_ext.h
index 51a18b7dcc..1f0e4aa18b 100644
--- a/src/include/postgres_ext.h
+++ b/src/include/postgres_ext.h
@@ -7,7 +7,7 @@
* For example, the Oid type is part of the API of libpq and other libraries.
*
* Declarations which are specific to a particular interface should
- * go in the header file for that interface (such as libpq-fe.h). This
+ * go in the header file for that interface (such as libpq-fe.h). This
* file is only for fundamental Postgres declarations.
*
* User-written C functions don't count as "external to Postgres."
diff --git a/src/include/postmaster/syslogger.h b/src/include/postmaster/syslogger.h
index a19a58c7d5..4abe5e0a9c 100644
--- a/src/include/postmaster/syslogger.h
+++ b/src/include/postmaster/syslogger.h
@@ -20,7 +20,7 @@
* here is to divide long messages into chunks that are not more than
* PIPE_BUF bytes long, which according to POSIX spec must be written into
* the pipe atomically. The pipe reader then uses the protocol headers to
- * reassemble the parts of a message into a single string. The reader can
+ * reassemble the parts of a message into a single string. The reader can
* also cope with non-protocol data coming down the pipe, though we cannot
* guarantee long strings won't get split apart.
*
diff --git a/src/include/regex/regcustom.h b/src/include/regex/regcustom.h
index 269f926be8..061f642af6 100644
--- a/src/include/regex/regcustom.h
+++ b/src/include/regex/regcustom.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/include/regex/regex.h b/src/include/regex/regex.h
index 4b35b0ccd7..e1b432f523 100644
--- a/src/include/regex/regex.h
+++ b/src/include/regex/regex.h
@@ -3,7 +3,7 @@
/*
* regular expressions
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/include/regex/regguts.h b/src/include/regex/regguts.h
index 1a0a8bd7cd..ed07907aae 100644
--- a/src/include/regex/regguts.h
+++ b/src/include/regex/regguts.h
@@ -1,7 +1,7 @@
/*
* Internal interface definitions, etc., for the reg package
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -126,8 +126,8 @@
/*
- * We dissect a chr into byts for colormap table indexing. Here we define
- * a byt, which will be the same as a byte on most machines... The exact
+ * We dissect a chr into byts for colormap table indexing. Here we define
+ * a byt, which will be the same as a byte on most machines... The exact
* size of a byt is not critical, but about 8 bits is good, and extraction
* of 8-bit chunks is sometimes especially fast.
*/
@@ -156,9 +156,9 @@ typedef int pcolor; /* what color promotes to */
/*
* A colormap is a tree -- more precisely, a DAG -- indexed at each level
- * by a byt of the chr, to map the chr to a color efficiently. Because
+ * by a byt of the chr, to map the chr to a color efficiently. Because
* lower sections of the tree can be shared, it can exploit the usual
- * sparseness of such a mapping table. The tree is always NBYTS levels
+ * sparseness of such a mapping table. The tree is always NBYTS levels
* deep (in the past it was shallower during construction but was "filled"
* to full depth at the end of that); areas that are unaltered as yet point
* to "fill blocks" which are entirely WHITE in color.
diff --git a/src/include/snowball/header.h b/src/include/snowball/header.h
index 21fa2ea29c..917b04ee6a 100644
--- a/src/include/snowball/header.h
+++ b/src/include/snowball/header.h
@@ -4,7 +4,7 @@
* Replacement header file for Snowball stemmer modules
*
* The Snowball stemmer modules do #include "header.h", and think they
- * are including snowball/libstemmer/header.h. We adjust the CPPFLAGS
+ * are including snowball/libstemmer/header.h. We adjust the CPPFLAGS
* so that this file is found instead, and thereby we can modify the
* headers they see. The main point here is to ensure that pg_config.h
* is included before any system headers such as <stdio.h>; without that,
diff --git a/src/include/storage/block.h b/src/include/storage/block.h
index 74e909f145..1f51528e0f 100644
--- a/src/include/storage/block.h
+++ b/src/include/storage/block.h
@@ -37,7 +37,7 @@ typedef uint32 BlockNumber;
/*
* BlockId:
*
- * this is a storage type for BlockNumber. in other words, this type
+ * this is a storage type for BlockNumber. in other words, this type
* is used for on-disk structures (e.g., in HeapTupleData) whereas
* BlockNumber is the type on which calculations are performed (e.g.,
* in access method code).
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index 7084343037..5aa0e92198 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -108,9 +108,9 @@ typedef struct buftag
*
* Note: buf_hdr_lock must be held to examine or change the tag, flags,
* usage_count, refcount, or wait_backend_pid fields. buf_id field never
- * changes after initialization, so does not need locking. freeNext is
+ * changes after initialization, so does not need locking. freeNext is
* protected by the BufFreelistLock not buf_hdr_lock. The LWLocks can take
- * care of themselves. The buf_hdr_lock is *not* used to control access to
+ * care of themselves. The buf_hdr_lock is *not* used to control access to
* the data in the buffer!
*
* An exception is that if we have the buffer pinned, its tag can't change
@@ -121,7 +121,7 @@ typedef struct buftag
*
* We can't physically remove items from a disk page if another backend has
* the buffer pinned. Hence, a backend may need to wait for all other pins
- * to go away. This is signaled by storing its own PID into
+ * to go away. This is signaled by storing its own PID into
* wait_backend_pid and setting flag bit BM_PIN_COUNT_WAITER. At present,
* there can be only one such waiter per buffer.
*
diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h
index 565917cbd0..ef24898578 100644
--- a/src/include/storage/bufpage.h
+++ b/src/include/storage/bufpage.h
@@ -26,7 +26,7 @@
* disk page is always a slotted page of the form:
*
* +----------------+---------------------------------+
- * | PageHeaderData | linp1 linp2 linp3 ... |
+ * | PageHeaderData | linp1 linp2 linp3 ... |
* +-----------+----+---------------------------------+
* | ... linpN | |
* +-----------+--------------------------------------+
@@ -34,7 +34,7 @@
* | |
* | v pd_upper |
* +-------------+------------------------------------+
- * | | tupleN ... |
+ * | | tupleN ... |
* +-------------+------------------+-----------------+
* | ... tuple3 tuple2 tuple1 | "special space" |
* +--------------------------------+-----------------+
@@ -65,7 +65,7 @@
*
* AM-specific per-page data (if any) is kept in the area marked "special
* space"; each AM has an "opaque" structure defined somewhere that is
- * stored as the page trailer. an access method should always
+ * stored as the page trailer. an access method should always
* initialize its pages with PageInit and then set its own opaque
* fields.
*/
@@ -104,7 +104,7 @@ typedef uint16 LocationIndex;
* like a good idea).
*
* pd_prune_xid is a hint field that helps determine whether pruning will be
- * useful. It is currently unused in index pages.
+ * useful. It is currently unused in index pages.
*
* The page version number and page size are packed together into a single
* uint16 field. This is for historical reasons: before PostgreSQL 7.3,
diff --git a/src/include/storage/ipc.h b/src/include/storage/ipc.h
index b76c033d48..4132e63b40 100644
--- a/src/include/storage/ipc.h
+++ b/src/include/storage/ipc.h
@@ -4,7 +4,7 @@
* POSTGRES inter-process communication definitions.
*
* This file is misnamed, as it no longer has much of anything directly
- * to do with IPC. The functionality here is concerned with managing
+ * to do with IPC. The functionality here is concerned with managing
* exit-time cleanup for either a postmaster or a backend.
*
*
diff --git a/src/include/storage/itemid.h b/src/include/storage/itemid.h
index 496cd1d47f..48de9663e6 100644
--- a/src/include/storage/itemid.h
+++ b/src/include/storage/itemid.h
@@ -31,7 +31,7 @@ typedef struct ItemIdData
typedef ItemIdData *ItemId;
/*
- * lp_flags has these possible states. An UNUSED line pointer is available
+ * lp_flags has these possible states. An UNUSED line pointer is available
* for immediate re-use, the other states are not.
*/
#define LP_UNUSED 0 /* unused (should always have lp_len=0) */
diff --git a/src/include/storage/itemptr.h b/src/include/storage/itemptr.h
index 3dc8902921..3a72f8d686 100644
--- a/src/include/storage/itemptr.h
+++ b/src/include/storage/itemptr.h
@@ -29,7 +29,7 @@
* tuple header on disk, it's very important not to waste space with
* structure padding bytes. The struct is designed to be six bytes long
* (it contains three int16 fields) but a few compilers will pad it to
- * eight bytes unless coerced. We apply appropriate persuasion where
+ * eight bytes unless coerced. We apply appropriate persuasion where
* possible, and to cope with unpersuadable compilers, we try to use
* "SizeOfIptrData" rather than "sizeof(ItemPointerData)" when computing
* on-disk sizes.
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index e2b27ccb98..a7a90981c8 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -43,7 +43,7 @@ extern bool Debug_deadlocks;
/*
* Top-level transactions are identified by VirtualTransactionIDs comprising
* the BackendId of the backend running the xact, plus a locally-assigned
- * LocalTransactionId. These are guaranteed unique over the short term,
+ * LocalTransactionId. These are guaranteed unique over the short term,
* but will be reused after a database restart; hence they should never
* be stored on disk.
*
@@ -159,7 +159,7 @@ typedef uint16 LOCKMETHODID;
/*
* LOCKTAG is the key information needed to look up a LOCK item in the
- * lock hashtable. A LOCKTAG value uniquely identifies a lockable object.
+ * lock hashtable. A LOCKTAG value uniquely identifies a lockable object.
*
* The LockTagType enum defines the different kinds of objects we can lock.
* We can handle up to 256 different LockTagTypes.
@@ -212,7 +212,7 @@ typedef struct LOCKTAG
/*
* These macros define how we map logical IDs of lockable objects into
- * the physical fields of LOCKTAG. Use these to set up LOCKTAG values,
+ * the physical fields of LOCKTAG. Use these to set up LOCKTAG values,
* rather than accessing the fields directly. Note multiple eval of target!
*/
#define SET_LOCKTAG_RELATION(locktag,dboid,reloid) \
@@ -324,14 +324,14 @@ typedef struct LOCK
* a PROCLOCK struct.
*
* PROCLOCKTAG is the key information needed to look up a PROCLOCK item in the
- * proclock hashtable. A PROCLOCKTAG value uniquely identifies the combination
+ * proclock hashtable. A PROCLOCKTAG value uniquely identifies the combination
* of a lockable object and a holder/waiter for that object. (We can use
* pointers here because the PROCLOCKTAG need only be unique for the lifespan
* of the PROCLOCK, and it will never outlive the lock or the proc.)
*
* Internally to a backend, it is possible for the same lock to be held
* for different purposes: the backend tracks transaction locks separately
- * from session locks. However, this is not reflected in the shared-memory
+ * from session locks. However, this is not reflected in the shared-memory
* state: we only track which backend(s) hold the lock. This is OK since a
* backend can never block itself.
*
@@ -342,7 +342,7 @@ typedef struct LOCK
* as soon as convenient.
*
* releaseMask is workspace for LockReleaseAll(): it shows the locks due
- * to be released during the current call. This must only be examined or
+ * to be released during the current call. This must only be examined or
* set by the backend owning the PROCLOCK.
*
* Each PROCLOCK object is linked into lists for both the associated LOCK
@@ -375,7 +375,7 @@ typedef struct PROCLOCK
/*
* Each backend also maintains a local hash table with information about each
- * lock it is currently interested in. In particular the local table counts
+ * lock it is currently interested in. In particular the local table counts
* the number of times that lock has been acquired. This allows multiple
* requests for the same lock to be executed without additional accesses to
* shared memory. We also track the number of lock acquisitions per
@@ -420,7 +420,7 @@ typedef struct LOCALLOCK
/*
* This struct holds information passed from lmgr internals to the lock
- * listing user-level functions (in lockfuncs.c). For each PROCLOCK in
+ * listing user-level functions (in lockfuncs.c). For each PROCLOCK in
* the system, copies of the PROCLOCK object and associated PGPROC and
* LOCK objects are stored. Note there will often be multiple copies
* of the same PGPROC or LOCK --- to detect whether two are the same,
diff --git a/src/include/storage/pg_sema.h b/src/include/storage/pg_sema.h
index a0a2875aad..c63b4c07f9 100644
--- a/src/include/storage/pg_sema.h
+++ b/src/include/storage/pg_sema.h
@@ -6,7 +6,7 @@
* PostgreSQL requires counting semaphores (the kind that keep track of
* multiple unlock operations, and will allow an equal number of subsequent
* lock operations before blocking). The underlying implementation is
- * not the same on every platform. This file defines the API that must
+ * not the same on every platform. This file defines the API that must
* be provided by each port.
*
*
diff --git a/src/include/storage/pg_shmem.h b/src/include/storage/pg_shmem.h
index fb6f24f011..d26de6c884 100644
--- a/src/include/storage/pg_shmem.h
+++ b/src/include/storage/pg_shmem.h
@@ -10,7 +10,7 @@
*
* To simplify life for the SysV implementation, the ID is assumed to
* consist of two unsigned long values (these are key and ID in SysV
- * terms). Other platforms may ignore the second value if they need
+ * terms). Other platforms may ignore the second value if they need
* only one ID number.
*
*
diff --git a/src/include/storage/pos.h b/src/include/storage/pos.h
index 35b4cfe79c..502113511a 100644
--- a/src/include/storage/pos.h
+++ b/src/include/storage/pos.h
@@ -20,7 +20,7 @@
* been changed to just <offset> as the notion of having multiple pages
* within a block has been removed.
*
- * the 'offset' abstraction is somewhat confusing. it is NOT a byte
+ * the 'offset' abstraction is somewhat confusing. it is NOT a byte
* offset within the page; instead, it is an offset into the line
* pointer array contained on every page that store (heap or index)
* tuples.
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index e586572ef8..f76b9a0cdc 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -20,7 +20,7 @@
/*
* Each backend advertises up to PGPROC_MAX_CACHED_SUBXIDS TransactionIds
- * for non-aborted subtransactions of its current top transaction. These
+ * for non-aborted subtransactions of its current top transaction. These
* have to be treated as running XIDs by other backends.
*
* We also keep track of whether the cache overflowed (ie, the transaction has
@@ -51,7 +51,7 @@ struct XidCache
* Each backend has a PGPROC struct in shared memory. There is also a list of
* currently-unused PGPROC structs that will be reallocated to new backends.
*
- * links: list link for any list the PGPROC is in. When waiting for a lock,
+ * links: list link for any list the PGPROC is in. When waiting for a lock,
* the PGPROC is linked into that lock's waitProcs queue. A recycled PGPROC
* is linked into ProcGlobal's freeProcs list.
*
diff --git a/src/include/storage/relfilenode.h b/src/include/storage/relfilenode.h
index 92cfb3ef1f..1146f8abae 100644
--- a/src/include/storage/relfilenode.h
+++ b/src/include/storage/relfilenode.h
@@ -44,7 +44,7 @@ typedef enum ForkNumber
* spcNode identifies the tablespace of the relation. It corresponds to
* pg_tablespace.oid.
*
- * dbNode identifies the database of the relation. It is zero for
+ * dbNode identifies the database of the relation. It is zero for
* "shared" relations (those common to all databases of a cluster).
* Nonzero dbNode values correspond to pg_database.oid.
*
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index 97ccdbebf6..96316b7de6 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -36,7 +36,7 @@
*
* int TAS(slock_t *lock)
* Atomic test-and-set instruction. Attempt to acquire the lock,
- * but do *not* wait. Returns 0 if successful, nonzero if unable
+ * but do *not* wait. Returns 0 if successful, nonzero if unable
* to acquire the lock.
*
* TAS() is NOT part of the API, and should never be called directly.
diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h
index f332ec285e..fbadae6c09 100644
--- a/src/include/storage/sinval.h
+++ b/src/include/storage/sinval.h
@@ -23,9 +23,9 @@
* invalidates an entry in a catcache, one that invalidates a relcache entry,
* and one that invalidates an smgr cache entry. More types could be added
* if needed. The message type is identified by the first "int16" field of
- * the message struct. Zero or positive means a catcache inval message (and
+ * the message struct. Zero or positive means a catcache inval message (and
* also serves as the catcache ID field). -1 means a relcache inval message.
- * -2 means an smgr inval message. Other negative values are available to
+ * -2 means an smgr inval message. Other negative values are available to
* identify other inval message types.
*
* Catcache inval events are initially driven by detecting tuple inserts,
diff --git a/src/include/storage/sinvaladt.h b/src/include/storage/sinvaladt.h
index 87a8c6d3a1..39b8b3d506 100644
--- a/src/include/storage/sinvaladt.h
+++ b/src/include/storage/sinvaladt.h
@@ -4,7 +4,7 @@
* POSTGRES shared cache invalidation data manager.
*
* The shared cache invalidation manager is responsible for transmitting
- * invalidation messages between backends. Any message sent by any backend
+ * invalidation messages between backends. Any message sent by any backend
* must be delivered to all already-running backends before it can be
* forgotten. (If we run out of space, we instead deliver a "RESET"
* message to backends that have fallen too far behind.)
diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h
index 957f2108d5..ef5e685486 100644
--- a/src/include/storage/smgr.h
+++ b/src/include/storage/smgr.h
@@ -30,7 +30,7 @@
*
* An SMgrRelation may have an "owner", which is just a pointer to it from
* somewhere else; smgr.c will clear this pointer if the SMgrRelation is
- * closed. We use this to avoid dangling pointers from relcache to smgr
+ * closed. We use this to avoid dangling pointers from relcache to smgr
* without having to make the smgr explicitly aware of relcache. There
* can't be more than one "owner" pointer per SMgrRelation, but that's
* all we need.
@@ -47,7 +47,7 @@ typedef struct SMgrRelationData
/*
* Fields below here are intended to be private to smgr.c and its
- * submodules. Do not touch them from elsewhere.
+ * submodules. Do not touch them from elsewhere.
*/
int smgr_which; /* storage manager selector */
diff --git a/src/include/tcop/dest.h b/src/include/tcop/dest.h
index 202f0df459..db1f53b7a3 100644
--- a/src/include/tcop/dest.h
+++ b/src/include/tcop/dest.h
@@ -29,14 +29,14 @@
*
* CreateDestReceiver returns a receiver object appropriate to the specified
* destination. The executor, as well as utility statements that can return
- * tuples, are passed the resulting DestReceiver* pointer. Each executor run
+ * tuples, are passed the resulting DestReceiver* pointer. Each executor run
* or utility execution calls the receiver's rStartup method, then the
* receiveSlot method (zero or more times), then the rShutdown method.
* The same receiver object may be re-used multiple times; eventually it is
* destroyed by calling its rDestroy method.
*
* In some cases, receiver objects require additional parameters that must
- * be passed to them after calling CreateDestReceiver. Since the set of
+ * be passed to them after calling CreateDestReceiver. Since the set of
* parameters varies for different receiver types, this is not handled by
* this module, but by direct calls from the calling code to receiver type
* specific functions.
@@ -45,10 +45,10 @@
* allocated object (for destination types that require no local state),
* in which case rDestroy is a no-op. Alternatively it can be a palloc'd
* object that has DestReceiver as its first field and contains additional
- * fields (see printtup.c for an example). These additional fields are then
+ * fields (see printtup.c for an example). These additional fields are then
* accessible to the DestReceiver functions by casting the DestReceiver*
- * pointer passed to them. The palloc'd object is pfree'd by the rDestroy
- * method. Note that the caller of CreateDestReceiver should take care to
+ * pointer passed to them. The palloc'd object is pfree'd by the rDestroy
+ * method. Note that the caller of CreateDestReceiver should take care to
* do so in a memory context that is long-lived enough for the receiver
* object not to disappear while still needed.
*
@@ -79,7 +79,7 @@
* destination. Someday this will probably need to be improved.
*
* Note: only the values DestNone, DestDebug, DestRemote are legal for the
- * global variable whereToSendOutput. The other values may be used
+ * global variable whereToSendOutput. The other values may be used
* as the destination for individual commands.
* ----------------
*/
diff --git a/src/include/tcop/tcopdebug.h b/src/include/tcop/tcopdebug.h
index e16fd527ef..7773ee21c0 100644
--- a/src/include/tcop/tcopdebug.h
+++ b/src/include/tcop/tcopdebug.h
@@ -24,7 +24,7 @@
/* ----------------
* TCOP_SHOWSTATS controls whether or not buffer and
- * access method statistics are shown for each query. -cim 2/9/89
+ * access method statistics are shown for each query. -cim 2/9/89
* ----------------
*/
#undef TCOP_SHOWSTATS
diff --git a/src/include/utils/acl.h b/src/include/utils/acl.h
index 5c38822d5d..35fadb68ca 100644
--- a/src/include/utils/acl.h
+++ b/src/include/utils/acl.h
@@ -80,11 +80,11 @@ typedef struct AclItem
/*
* Definitions for convenient access to Acl (array of AclItem).
* These are standard PostgreSQL arrays, but are restricted to have one
- * dimension and no nulls. We also ignore the lower bound when reading,
+ * dimension and no nulls. We also ignore the lower bound when reading,
* and set it to one when writing.
*
* CAUTION: as of PostgreSQL 7.1, these arrays are toastable (just like all
- * other array types). Therefore, be careful to detoast them with the
+ * other array types). Therefore, be careful to detoast them with the
* macros provided, unless you know for certain that a particular array
* can't have been toasted.
*/
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index dec27424d0..7019bdb621 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -76,13 +76,13 @@ typedef struct catctup
/*
* Each tuple in a cache is a member of a Dllist that stores the elements
- * of its hash bucket. We keep each Dllist in LRU order to speed repeated
+ * of its hash bucket. We keep each Dllist in LRU order to speed repeated
* lookups.
*/
Dlelem cache_elem; /* list member of per-bucket list */
/*
- * The tuple may also be a member of at most one CatCList. (If a single
+ * The tuple may also be a member of at most one CatCList. (If a single
* catcache is list-searched with varying numbers of keys, we may have to
* make multiple entries for the same tuple because of this restriction.
* Currently, that's not expected to be common, so we accept the potential
@@ -99,7 +99,7 @@ typedef struct catctup
*
* A negative cache entry is an assertion that there is no tuple matching
* a particular key. This is just as useful as a normal entry so far as
- * avoiding catalog searches is concerned. Management of positive and
+ * avoiding catalog searches is concerned. Management of positive and
* negative entries is identical.
*/
int refcount; /* number of active references */
@@ -118,7 +118,7 @@ typedef struct catclist
/*
* A CatCList describes the result of a partial search, ie, a search using
- * only the first K key columns of an N-key cache. We form the keys used
+ * only the first K key columns of an N-key cache. We form the keys used
* into a tuple (with other attributes NULL) to represent the stored key
* set. The CatCList object contains links to cache entries for all the
* table rows satisfying the partial key. (Note: none of these will be
diff --git a/src/include/utils/datetime.h b/src/include/utils/datetime.h
index 4260719c84..42b087b30c 100644
--- a/src/include/utils/datetime.h
+++ b/src/include/utils/datetime.h
@@ -274,7 +274,7 @@ extern const int day_tab[2][13];
/*
* Datetime input parsing routines (ParseDateTime, DecodeDateTime, etc)
- * return zero or a positive value on success. On failure, they return
+ * return zero or a positive value on success. On failure, they return
* one of these negative code values. DateTimeParseError may be used to
* produce a correct ereport.
*/
diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h
index b6fc5e6771..d141bbbaa5 100644
--- a/src/include/utils/elog.h
+++ b/src/include/utils/elog.h
@@ -89,13 +89,13 @@
* ... other errxxx() fields as needed ...));
*
* The error level is required, and so is a primary error message (errmsg
- * or errmsg_internal). All else is optional. errcode() defaults to
+ * or errmsg_internal). All else is optional. errcode() defaults to
* ERRCODE_INTERNAL_ERROR if elevel is ERROR or more, ERRCODE_WARNING
* if elevel is WARNING, or ERRCODE_SUCCESSFUL_COMPLETION if elevel is
* NOTICE or below.
*
* ereport_domain() allows a message domain to be specified, for modules that
- * wish to use a different message catalog from the backend's. To avoid having
+ * wish to use a different message catalog from the backend's. To avoid having
* one copy of the default text domain per .o file, we define it as NULL here
* and have errstart insert the default text domain. Modules can either use
* ereport_domain() directly, or preferably they can override the TEXTDOMAIN
diff --git a/src/include/utils/errcodes.h b/src/include/utils/errcodes.h
index 6524b8c5fe..563229581b 100644
--- a/src/include/utils/errcodes.h
+++ b/src/include/utils/errcodes.h
@@ -5,7 +5,7 @@
*
* The error code list is kept in its own source file for possible use by
* automatic tools. Each error code is identified by a five-character string
- * following the SQLSTATE conventions. The exact representation of the
+ * following the SQLSTATE conventions. The exact representation of the
* string is determined by the MAKE_SQLSTATE() macro, which is not defined
* in this file; it can be defined by the caller for special purposes.
*
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index e12591593a..c61dcbc2e1 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -40,7 +40,7 @@
* configuration file, or by client request in the connection startup
* packet (e.g., from libpq's PGOPTIONS variable). Furthermore, an
* already-started backend will ignore changes to such an option in the
- * configuration file. The idea is that these options are fixed for a
+ * configuration file. The idea is that these options are fixed for a
* given backend once it's started, but they can vary across backends.
*
* SUSET options can be set at postmaster startup, with the SIGHUP
@@ -74,7 +74,7 @@ typedef enum
*
* PGC_S_TEST is used when testing values to be stored as per-database or
* per-user defaults ("doit" will always be false, so this never gets stored
- * as the actual source of any value). This is an interactive case, but
+ * as the actual source of any value). This is an interactive case, but
* it needs its own source value because some assign hooks need to make
* different validity checks in this case.
*/
diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h
index d81d72d817..c0bfcdeaf3 100644
--- a/src/include/utils/hsearch.h
+++ b/src/include/utils/hsearch.h
@@ -30,7 +30,7 @@ typedef int (*HashCompareFunc) (const void *key1, const void *key2,
Size keysize);
/*
- * Key copying functions must have this signature. The return value is not
+ * Key copying functions must have this signature. The return value is not
* used. (The definition is set up to allow memcpy() and strncpy() to be
* used directly.)
*/
diff --git a/src/include/utils/inet.h b/src/include/utils/inet.h
index f67932b15d..3f7904943d 100644
--- a/src/include/utils/inet.h
+++ b/src/include/utils/inet.h
@@ -40,7 +40,7 @@ typedef struct
/*
* Both INET and CIDR addresses are represented within Postgres as varlena
* objects, ie, there is a varlena header in front of the struct type
- * depicted above. This struct depicts what we actually have in memory
+ * depicted above. This struct depicts what we actually have in memory
* in "uncompressed" cases. Note that since the maximum data size is only
* 18 bytes, INET/CIDR will invariably be stored into tuples using the
* 1-byte-header varlena format. However, we have to be prepared to cope
diff --git a/src/include/utils/memutils.h b/src/include/utils/memutils.h
index 5b420d00ec..3d7f2bea82 100644
--- a/src/include/utils/memutils.h
+++ b/src/include/utils/memutils.h
@@ -30,7 +30,7 @@
* be summarily denied.
*
* XXX This is deliberately chosen to correspond to the limiting size
- * of varlena objects under TOAST. See VARATT_MASK_SIZE in postgres.h.
+ * of varlena objects under TOAST. See VARATT_MASK_SIZE in postgres.h.
*
* XXX Also, various places in aset.c assume they can compute twice an
* allocation's size without overflow, so beware of raising this.
@@ -43,8 +43,8 @@
* All chunks allocated by any memory context manager are required to be
* preceded by a StandardChunkHeader at a spacing of STANDARDCHUNKHEADERSIZE.
* A currently-allocated chunk must contain a backpointer to its owning
- * context as well as the allocated size of the chunk. The backpointer is
- * used by pfree() and repalloc() to find the context to call. The allocated
+ * context as well as the allocated size of the chunk. The backpointer is
+ * used by pfree() and repalloc() to find the context to call. The allocated
* size is not absolutely essential, but it's expected to be needed by any
* reasonable implementation.
*/
diff --git a/src/include/utils/palloc.h b/src/include/utils/palloc.h
index e76ed7f542..135b0e5013 100644
--- a/src/include/utils/palloc.h
+++ b/src/include/utils/palloc.h
@@ -6,9 +6,9 @@
* This file contains the basic memory allocation interface that is
* needed by almost every backend module. It is included directly by
* postgres.h, so the definitions here are automatically available
- * everywhere. Keep it lean!
+ * everywhere. Keep it lean!
*
- * Memory allocation occurs within "contexts". Every chunk obtained from
+ * Memory allocation occurs within "contexts". Every chunk obtained from
* palloc()/MemoryContextAlloc() is allocated within a specific context.
* The entire contents of a context can be freed easily and quickly by
* resetting or deleting the context --- this is both faster and less
@@ -29,7 +29,7 @@
#define PALLOC_H
/*
- * Type MemoryContextData is declared in nodes/memnodes.h. Most users
+ * Type MemoryContextData is declared in nodes/memnodes.h. Most users
* of memory allocation should just treat it as an abstract type, so we
* do not provide the struct contents here.
*/
@@ -37,7 +37,7 @@ typedef struct MemoryContextData *MemoryContext;
/*
* CurrentMemoryContext is the default allocation context for palloc().
- * We declare it here so that palloc() can be a macro. Avoid accessing it
+ * We declare it here so that palloc() can be a macro. Avoid accessing it
* directly! Instead, use MemoryContextSwitchTo() to change the setting.
*/
extern PGDLLIMPORT MemoryContext CurrentMemoryContext;
diff --git a/src/include/utils/pg_crc.h b/src/include/utils/pg_crc.h
index 73def08c12..cdc0f5d649 100644
--- a/src/include/utils/pg_crc.h
+++ b/src/include/utils/pg_crc.h
@@ -73,7 +73,7 @@ extern CRCDLLIMPORT const uint32 pg_crc32_table[];
/*
* crc0 represents the LSBs of the 64-bit value, crc1 the MSBs. Note that
* with crc0 placed first, the output of 32-bit and 64-bit implementations
- * will be bit-compatible only on little-endian architectures. If it were
+ * will be bit-compatible only on little-endian architectures. If it were
* important to make the two possible implementations bit-compatible on
* all machines, we could do a configure test to decide how to order the
* two fields, but it seems not worth the trouble.
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index ea919bd456..758528c597 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -39,7 +39,7 @@
* losing any flexibility if a replan turns out to be necessary.
*
* Note: the string referenced by commandTag is not subsidiary storage;
- * it is assumed to be a compile-time-constant string. As with portals,
+ * it is assumed to be a compile-time-constant string. As with portals,
* commandTag shall be NULL if and only if the original query string (before
* rewriting) was an empty string.
*/
diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h
index ca8546c1e5..362c35a88a 100644
--- a/src/include/utils/portal.h
+++ b/src/include/utils/portal.h
@@ -57,8 +57,8 @@
* single result from the user's viewpoint. However, the rule rewriter
* may expand the single source query to zero or many actual queries.)
*
- * PORTAL_ONE_SELECT: the portal contains one single SELECT query. We run
- * the Executor incrementally as results are demanded. This strategy also
+ * PORTAL_ONE_SELECT: the portal contains one single SELECT query. We run
+ * the Executor incrementally as results are demanded. This strategy also
* supports holdable cursors (the Executor results can be dumped into a
* tuplestore for access after transaction completion).
*
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index ca9913bda3..3bdc4e7c73 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -180,7 +180,7 @@ typedef struct RelationData
* Note: rd_amcache is available for index AMs to cache private data about
* an index. This must be just a cache since it may get reset at any time
* (in particular, it will get reset by a relcache inval message for the
- * index). If used, it must point to a single memory chunk palloc'd in
+ * index). If used, it must point to a single memory chunk palloc'd in
* rd_indexcxt. A relcache reset will include freeing that chunk and
* setting rd_amcache = NULL.
*/
diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h
index 42e7fa3d90..bdd261e75d 100644
--- a/src/include/utils/relcache.h
+++ b/src/include/utils/relcache.h
@@ -24,7 +24,7 @@ typedef struct RelationData *Relation;
/* ----------------
* RelationPtr is used in the executor to support index scans
* where we have to keep track of several index relations in an
- * array. -cim 9/10/89
+ * array. -cim 9/10/89
* ----------------
*/
typedef Relation *RelationPtr;
diff --git a/src/include/utils/resowner.h b/src/include/utils/resowner.h
index b8e4510bf0..c37d509d8c 100644
--- a/src/include/utils/resowner.h
+++ b/src/include/utils/resowner.h
@@ -42,7 +42,7 @@ extern PGDLLIMPORT ResourceOwner TopTransactionResourceOwner;
/*
* Resource releasing is done in three phases: pre-locks, locks, and
- * post-locks. The pre-lock phase must release any resources that are
+ * post-locks. The pre-lock phase must release any resources that are
* visible to other backends (such as pinned buffers); this ensures that
* when we release a lock that another backend may be waiting on, it will
* see us as being fully out of our transaction. The post-lock phase
diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h
index 38371ff89f..d17e22107b 100644
--- a/src/include/utils/selfuncs.h
+++ b/src/include/utils/selfuncs.h
@@ -23,7 +23,7 @@
/*
* Note: the default selectivity estimates are not chosen entirely at random.
* We want them to be small enough to ensure that indexscans will be used if
- * available, for typical table densities of ~100 tuples/page. Thus, for
+ * available, for typical table densities of ~100 tuples/page. Thus, for
* example, 0.01 is not quite small enough, since that makes it appear that
* nearly all pages will be hit anyway. Also, since we sometimes estimate
* eqsel as 1/num_distinct, we probably want DEFAULT_NUM_DISTINCT to equal
diff --git a/src/include/utils/timestamp.h b/src/include/utils/timestamp.h
index d515c70c50..19b6eeba5f 100644
--- a/src/include/utils/timestamp.h
+++ b/src/include/utils/timestamp.h
@@ -76,7 +76,7 @@ typedef struct
* DAYS_PER_MONTH is very imprecise. The more accurate value is
* 365.2425/12 = 30.436875, or '30 days 10:29:06'. Right now we only
* return an integral number of days, but someday perhaps we should
- * also return a 'time' value to be used as well. ISO 8601 suggests
+ * also return a 'time' value to be used as well. ISO 8601 suggests
* 30 days.
*/
#define DAYS_PER_MONTH 30 /* assumes exactly 30 days per month */
diff --git a/src/include/utils/tqual.h b/src/include/utils/tqual.h
index 7d6a43650e..1923cbccaa 100644
--- a/src/include/utils/tqual.h
+++ b/src/include/utils/tqual.h
@@ -3,7 +3,7 @@
* tqual.h
* POSTGRES "time qualification" definitions, ie, tuple visibility rules.
*
- * Should be moved/renamed... - vadim 07/28/98
+ * Should be moved/renamed... - vadim 07/28/98
*
* Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h
index e351536d47..d45204fe74 100644
--- a/src/include/utils/tuplesort.h
+++ b/src/include/utils/tuplesort.h
@@ -6,7 +6,7 @@
* This module handles sorting of heap tuples, index tuples, or single
* Datums (and could easily support other kinds of sortable objects,
* if necessary). It works efficiently for both small and large amounts
- * of data. Small amounts are sorted in-memory using qsort(). Large
+ * of data. Small amounts are sorted in-memory using qsort(). Large
* amounts are sorted using temporary files and a standard external sort
* algorithm.
*
diff --git a/src/include/utils/tuplestore.h b/src/include/utils/tuplestore.h
index b1a5e5ce26..90354d56f6 100644
--- a/src/include/utils/tuplestore.h
+++ b/src/include/utils/tuplestore.h
@@ -8,7 +8,7 @@
* a dumbed-down version of tuplesort.c; it does no sorting of tuples
* but can only store and regurgitate a sequence of tuples. However,
* because no sort is required, it is allowed to start reading the sequence
- * before it has all been written. This is particularly useful for cursors,
+ * before it has all been written. This is particularly useful for cursors,
* because it allows random access within the already-scanned portion of
* a query without having to process the underlying scan to completion.
* Also, it is possible to support multiple independent read pointers.
diff --git a/src/interfaces/ecpg/include/sqlca.h b/src/interfaces/ecpg/include/sqlca.h
index 52fcbf830f..41e5b550af 100644
--- a/src/interfaces/ecpg/include/sqlca.h
+++ b/src/interfaces/ecpg/include/sqlca.h
@@ -40,7 +40,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/pgtypeslib/dt.h b/src/interfaces/ecpg/pgtypeslib/dt.h
index 0b5b67e02c..8fec8f5050 100644
--- a/src/interfaces/ecpg/pgtypeslib/dt.h
+++ b/src/interfaces/ecpg/pgtypeslib/dt.h
@@ -255,7 +255,7 @@ do { \
* DAYS_PER_MONTH is very imprecise. The more accurate value is
* 365.2425/12 = 30.436875, or '30 days 10:29:06'. Right now we only
* return an integral number of days, but someday perhaps we should
- * also return a 'time' value to be used as well. ISO 8601 suggests
+ * also return a 'time' value to be used as well. ISO 8601 suggests
* 30 days.
*/
#define DAYS_PER_MONTH 30 /* assumes exactly 30 days per month */
diff --git a/src/interfaces/ecpg/pgtypeslib/interval.c b/src/interfaces/ecpg/pgtypeslib/interval.c
index af2a077157..285c6e1b6c 100644
--- a/src/interfaces/ecpg/pgtypeslib/interval.c
+++ b/src/interfaces/ecpg/pgtypeslib/interval.c
@@ -160,7 +160,7 @@ DecodeISO8601Interval(char *str,
return dterr;
/*
- * Note: we could step off the end of the string here. Code below
+ * Note: we could step off the end of the string here. Code below
* *must* exit the loop if unit == '\0'.
*/
unit = *str++;
diff --git a/src/interfaces/ecpg/pgtypeslib/numeric.c b/src/interfaces/ecpg/pgtypeslib/numeric.c
index 790765ae64..8e29b073a9 100644
--- a/src/interfaces/ecpg/pgtypeslib/numeric.c
+++ b/src/interfaces/ecpg/pgtypeslib/numeric.c
@@ -946,7 +946,7 @@ PGTYPESnumeric_sub(numeric *var1, numeric *var2, numeric *result)
* mul_var() -
*
* Multiplication on variable level. Product of var1 * var2 is stored
- * in result. Accuracy of result is determined by global_rscale.
+ * in result. Accuracy of result is determined by global_rscale.
* ----------
*/
int
diff --git a/src/interfaces/ecpg/preproc/parser.c b/src/interfaces/ecpg/preproc/parser.c
index a4644cd305..d58ef53870 100644
--- a/src/interfaces/ecpg/preproc/parser.c
+++ b/src/interfaces/ecpg/preproc/parser.c
@@ -35,7 +35,7 @@ static YYLTYPE lookahead_yylloc; /* yylloc for lookahead token */
* Intermediate filter between parser and base lexer (base_yylex in scan.l).
*
* The filter is needed because in some cases the standard SQL grammar
- * requires more than one token lookahead. We reduce these cases to one-token
+ * requires more than one token lookahead. We reduce these cases to one-token
* lookahead by combining tokens here, in order to keep the grammar LALR(1).
*
* Using a filter is simpler than trying to recognize multiword tokens
diff --git a/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c b/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c
index 2f8ee74971..113411d555 100644
--- a/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c
+++ b/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c
@@ -57,7 +57,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/preproc-init.c b/src/interfaces/ecpg/test/expected/preproc-init.c
index 1307915fad..5b30385af3 100644
--- a/src/interfaces/ecpg/test/expected/preproc-init.c
+++ b/src/interfaces/ecpg/test/expected/preproc-init.c
@@ -51,7 +51,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-array.c b/src/interfaces/ecpg/test/expected/sql-array.c
index cdd2bea078..55d92f2a00 100644
--- a/src/interfaces/ecpg/test/expected/sql-array.c
+++ b/src/interfaces/ecpg/test/expected/sql-array.c
@@ -59,7 +59,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-code100.c b/src/interfaces/ecpg/test/expected/sql-code100.c
index e250690e9c..c5a6e4b0a1 100644
--- a/src/interfaces/ecpg/test/expected/sql-code100.c
+++ b/src/interfaces/ecpg/test/expected/sql-code100.c
@@ -51,7 +51,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-copystdout.c b/src/interfaces/ecpg/test/expected/sql-copystdout.c
index 563732b05d..33ea2133d6 100644
--- a/src/interfaces/ecpg/test/expected/sql-copystdout.c
+++ b/src/interfaces/ecpg/test/expected/sql-copystdout.c
@@ -53,7 +53,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-define.c b/src/interfaces/ecpg/test/expected/sql-define.c
index b9571ec53d..4a1d7ee6f0 100644
--- a/src/interfaces/ecpg/test/expected/sql-define.c
+++ b/src/interfaces/ecpg/test/expected/sql-define.c
@@ -51,7 +51,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-dynalloc.c b/src/interfaces/ecpg/test/expected/sql-dynalloc.c
index d95accc99f..df9f4d6347 100644
--- a/src/interfaces/ecpg/test/expected/sql-dynalloc.c
+++ b/src/interfaces/ecpg/test/expected/sql-dynalloc.c
@@ -52,7 +52,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-dynalloc2.c b/src/interfaces/ecpg/test/expected/sql-dynalloc2.c
index 18634deb78..90bfb5c1f1 100644
--- a/src/interfaces/ecpg/test/expected/sql-dynalloc2.c
+++ b/src/interfaces/ecpg/test/expected/sql-dynalloc2.c
@@ -52,7 +52,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-dyntest.c b/src/interfaces/ecpg/test/expected/sql-dyntest.c
index dfa39696ea..31d5454c07 100644
--- a/src/interfaces/ecpg/test/expected/sql-dyntest.c
+++ b/src/interfaces/ecpg/test/expected/sql-dyntest.c
@@ -105,7 +105,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-indicators.c b/src/interfaces/ecpg/test/expected/sql-indicators.c
index e805e4ee00..5e167b1944 100644
--- a/src/interfaces/ecpg/test/expected/sql-indicators.c
+++ b/src/interfaces/ecpg/test/expected/sql-indicators.c
@@ -53,7 +53,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/thread-alloc.c b/src/interfaces/ecpg/test/expected/thread-alloc.c
index b99b868001..424ad1ee1a 100644
--- a/src/interfaces/ecpg/test/expected/thread-alloc.c
+++ b/src/interfaces/ecpg/test/expected/thread-alloc.c
@@ -74,7 +74,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/thread-descriptor.c b/src/interfaces/ecpg/test/expected/thread-descriptor.c
index 2584626f4f..e2be89dec0 100644
--- a/src/interfaces/ecpg/test/expected/thread-descriptor.c
+++ b/src/interfaces/ecpg/test/expected/thread-descriptor.c
@@ -65,7 +65,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/thread-prep.c b/src/interfaces/ecpg/test/expected/thread-prep.c
index 9af4ba522c..8ea445ddda 100644
--- a/src/interfaces/ecpg/test/expected/thread-prep.c
+++ b/src/interfaces/ecpg/test/expected/thread-prep.c
@@ -74,7 +74,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c
index a8dc622f07..e5c0ba2088 100644
--- a/src/interfaces/libpq/fe-auth.c
+++ b/src/interfaces/libpq/fe-auth.c
@@ -1014,7 +1014,7 @@ pg_fe_getauthname(PQExpBuffer errorMessage)
*
* This is intended to be used by client applications that wish to send
* commands like ALTER USER joe PASSWORD 'pwd'. The password need not
- * be sent in cleartext if it is encrypted on the client side. This is
+ * be sent in cleartext if it is encrypted on the client side. This is
* good because it ensures the cleartext password won't end up in logs,
* pg_stat displays, etc. We export the function so that clients won't
* be dependent on low-level details like whether the enceyption is MD5
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 63306caf23..62e380b3a1 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -116,7 +116,7 @@ static int ldapServiceLookup(const char *purl, PQconninfoOption *options,
*
* PQconninfoOptions[] is a constant static array that we use to initialize
* a dynamically allocated working copy. All the "val" fields in
- * PQconninfoOptions[] *must* be NULL. In a working copy, non-null "val"
+ * PQconninfoOptions[] *must* be NULL. In a working copy, non-null "val"
* fields point to malloc'd strings that should be freed when the working
* array is freed (see PQconninfoFree).
* ----------
@@ -324,7 +324,7 @@ PQconnectdb(const char *conninfo)
* See comment for PQconnectdb for the definition of the string format.
*
* Returns a PGconn*. If NULL is returned, a malloc error has occurred, and
- * you should not attempt to proceed with this connection. If the status
+ * you should not attempt to proceed with this connection. If the status
* field of the connection returned is CONNECTION_BAD, an error has
* occurred. In this case you should call PQfinish on the result, (perhaps
* inspecting the error message first). Other fields of the structure may not
@@ -566,14 +566,14 @@ connectOptions2(PGconn *conn)
*
* Parse an empty string like PQconnectdb() would do and return the
* resulting connection options array, ie, all the default values that are
- * available from the environment etc. On error (eg out of memory),
+ * available from the environment etc. On error (eg out of memory),
* NULL is returned.
*
* Using this function, an application may determine all possible options
* and their current default values.
*
* NOTE: as of PostgreSQL 7.0, the returned array is dynamically allocated
- * and should be freed when no longer needed via PQconninfoFree(). (In prior
+ * and should be freed when no longer needed via PQconninfoFree(). (In prior
* versions, the returned array was static, but that's not thread-safe.)
* Pre-7.0 applications that use this function will see a small memory leak
* until they are updated to call PQconninfoFree.
@@ -952,7 +952,7 @@ connectDBComplete(PGconn *conn)
for (;;)
{
/*
- * Wait, if necessary. Note that the initial state (just after
+ * Wait, if necessary. Note that the initial state (just after
* PQconnectStart) is to wait for the socket to select for writing.
*/
switch (flag)
@@ -1014,7 +1014,7 @@ connectDBComplete(PGconn *conn)
* will not block.
* o If you do not supply an IP address for the remote host (i.e. you
* supply a host name instead) then PQconnectStart will block on
- * gethostbyname. You will be fine if using Unix sockets (i.e. by
+ * gethostbyname. You will be fine if using Unix sockets (i.e. by
* supplying neither a host name nor a host address).
* o If your backend wants to use Kerberos authentication then you must
* supply both a host name and a host address, otherwise this function
@@ -1199,7 +1199,7 @@ keep_going: /* We will come back to here until there is
/*
* This connection failed --- set up error report, then
* close socket (do it this way in case close() affects
- * the value of errno...). We will ignore the connect()
+ * the value of errno...). We will ignore the connect()
* failure and keep going if there are more addresses.
*/
connectFailureMessage(conn, SOCK_ERRNO);
@@ -2094,7 +2094,7 @@ freePGconn(PGconn *conn)
* - properly close a connection to the backend
*
* This should reset or release all transient state, but NOT the connection
- * parameters. On exit, the PGconn should be in condition to start a fresh
+ * parameters. On exit, the PGconn should be in condition to start a fresh
* connection with the same parameters (see PQreset()).
*/
static void
@@ -2230,7 +2230,7 @@ PQreset(PGconn *conn)
if (connectDBStart(conn) && connectDBComplete(conn))
{
/*
- * Notify event procs of successful reset. We treat an event proc
+ * Notify event procs of successful reset. We treat an event proc
* failure as disabling the connection ... good idea?
*/
int i;
@@ -2290,7 +2290,7 @@ PQresetPoll(PGconn *conn)
if (status == PGRES_POLLING_OK)
{
/*
- * Notify event procs of successful reset. We treat an event proc
+ * Notify event procs of successful reset. We treat an event proc
* failure as disabling the connection ... good idea?
*/
int i;
@@ -2479,7 +2479,7 @@ cancel_errReturn:
* Returns TRUE if able to send the cancel request, FALSE if not.
*
* On failure, an error message is stored in *errbuf, which must be of size
- * errbufsize (recommended size is 256 bytes). *errbuf is not changed on
+ * errbufsize (recommended size is 256 bytes). *errbuf is not changed on
* success return.
*/
int
@@ -3206,7 +3206,7 @@ parseServiceInfo(PQconninfoOption *options, PQExpBuffer errorMessage)
* PQconninfoParse
*
* Parse a string like PQconnectdb() would do and return the
- * resulting connection options array. NULL is returned on failure.
+ * resulting connection options array. NULL is returned on failure.
* The result contains only options specified directly in the string,
* not any possible default values.
*
diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c
index 53be95406c..03a4543c3c 100644
--- a/src/interfaces/libpq/fe-exec.c
+++ b/src/interfaces/libpq/fe-exec.c
@@ -91,7 +91,7 @@ static int check_field_number(const PGresult *res, int field_num);
* doesn't tell us up front how many tuples will be returned.)
* All other subsidiary storage for a PGresult is kept in PGresult_data blocks
* of size PGRESULT_DATA_BLOCKSIZE. The overhead at the start of each block
- * is just a link to the next one, if any. Free-space management info is
+ * is just a link to the next one, if any. Free-space management info is
* kept in the owning PGresult.
* A query returning a small amount of data will thus require three malloc
* calls: one for the PGresult, one for the tuples pointer array, and one
@@ -110,7 +110,7 @@ static int check_field_number(const PGresult *res, int field_num);
* blocks, instead of being crammed into a regular allocation block.
* Requirements for correct function are:
* PGRESULT_ALIGN_BOUNDARY must be a multiple of the alignment requirements
- * of all machine data types. (Currently this is set from configure
+ * of all machine data types. (Currently this is set from configure
* tests, so it should be OK automatically.)
* PGRESULT_SEP_ALLOC_THRESHOLD + PGRESULT_BLOCK_OVERHEAD <=
* PGRESULT_DATA_BLOCKSIZE
@@ -263,10 +263,10 @@ PQsetResultAttrs(PGresult *res, int numAttributes, PGresAttDesc *attDescs)
* Returns a deep copy of the provided 'src' PGresult, which cannot be NULL.
* The 'flags' argument controls which portions of the result will or will
* NOT be copied. The created result is always put into the
- * PGRES_TUPLES_OK status. The source result error message is not copied,
+ * PGRES_TUPLES_OK status. The source result error message is not copied,
* although cmdStatus is.
*
- * To set custom attributes, use PQsetResultAttrs. That function requires
+ * To set custom attributes, use PQsetResultAttrs. That function requires
* that there are no attrs contained in the result, so to use that
* function you cannot use the PG_COPYRES_ATTRS or PG_COPYRES_TUPLES
* options with this function.
@@ -294,7 +294,7 @@ PQcopyResult(const PGresult *src, int flags)
if (!dest)
return NULL;
- /* Always copy these over. Is cmdStatus really useful here? */
+ /* Always copy these over. Is cmdStatus really useful here? */
dest->client_encoding = src->client_encoding;
strcpy(dest->cmdStatus, src->cmdStatus);
@@ -749,7 +749,7 @@ pqPrepareAsyncResult(PGconn *conn)
PGresult *res;
/*
- * conn->result is the PGresult to return. If it is NULL (which probably
+ * conn->result is the PGresult to return. If it is NULL (which probably
* shouldn't happen) we assume there is an appropriate error message in
* conn->errorMessage.
*/
@@ -1421,7 +1421,7 @@ pqHandleSendFailure(PGconn *conn)
/* loop until no more data readable */ ;
/*
- * Parse any available input messages. Since we are in PGASYNC_IDLE
+ * Parse any available input messages. Since we are in PGASYNC_IDLE
* state, only NOTICE and NOTIFY messages will be eaten.
*/
parseInput(conn);
@@ -1613,7 +1613,7 @@ getCopyResult(PGconn *conn, ExecStatusType copytype)
* If the server connection has been lost, don't pretend everything is
* hunky-dory; instead return a PGRES_FATAL_ERROR result, and reset the
* asyncStatus to idle (corresponding to what we'd do if we'd detected I/O
- * error in the earlier steps in PQgetResult). The text returned in the
+ * error in the earlier steps in PQgetResult). The text returned in the
* result is whatever is in conn->errorMessage; we hope that was filled
* with something relevant when the lost connection was detected.
*/
@@ -1847,7 +1847,7 @@ PQexecFinish(PGconn *conn)
* If the query was not even sent, return NULL; conn->errorMessage is set to
* a relevant message.
* If the query was sent, a new PGresult is returned (which could indicate
- * either success or failure). On success, the PGresult contains status
+ * either success or failure). On success, the PGresult contains status
* PGRES_COMMAND_OK, and its parameter and column-heading fields describe
* the statement's inputs and outputs respectively.
* The user is responsible for freeing the PGresult via PQclear()
@@ -2183,7 +2183,7 @@ PQgetCopyData(PGconn *conn, char **buffer, int async)
* PQgetline - gets a newline-terminated string from the backend.
*
* Chiefly here so that applications can use "COPY <rel> to stdout"
- * and read the output string. Returns a null-terminated string in s.
+ * and read the output string. Returns a null-terminated string in s.
*
* XXX this routine is now deprecated, because it can't handle binary data.
* If called during a COPY BINARY we return EOF.
@@ -2297,7 +2297,7 @@ PQputnbytes(PGconn *conn, const char *buffer, int nbytes)
* the application must call this routine to finish the command protocol.
*
* When using protocol 3.0 this is deprecated; it's cleaner to use PQgetResult
- * to get the transfer status. Note however that when using 2.0 protocol,
+ * to get the transfer status. Note however that when using 2.0 protocol,
* recovering from a copy failure often requires a PQreset. PQendcopy will
* take care of that, PQgetResult won't.
*
@@ -2525,7 +2525,7 @@ PQfname(const PGresult *res, int field_num)
* downcasing in the frontend might follow different locale rules than
* downcasing in the backend...
*
- * Returns -1 if no match. In the present backend it is also possible
+ * Returns -1 if no match. In the present backend it is also possible
* to have multiple matches, in which case the first one is found.
*/
int
@@ -2935,7 +2935,7 @@ PQfreemem(void *ptr)
*
* This function is here only for binary backward compatibility.
* New code should use PQfreemem(). A macro will automatically map
- * calls to PQfreemem. It should be removed in the future. bjm 2003-03-24
+ * calls to PQfreemem. It should be removed in the future. bjm 2003-03-24
*/
#undef PQfreeNotify
diff --git a/src/interfaces/libpq/fe-lobj.c b/src/interfaces/libpq/fe-lobj.c
index 0245d2b075..fa5364805f 100644
--- a/src/interfaces/libpq/fe-lobj.c
+++ b/src/interfaces/libpq/fe-lobj.c
@@ -714,7 +714,7 @@ lo_initialize(PGconn *conn)
MemSet((char *) lobjfuncs, 0, sizeof(PGlobjfuncs));
/*
- * Execute the query to get all the functions at once. In 7.3 and later
+ * Execute the query to get all the functions at once. In 7.3 and later
* we need to be schema-safe. lo_create only exists in 8.1 and up.
* lo_truncate only exists in 8.3 and up.
*/
diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c
index 0e724144b9..8ab51b07f3 100644
--- a/src/interfaces/libpq/fe-misc.c
+++ b/src/interfaces/libpq/fe-misc.c
@@ -648,13 +648,13 @@ retry3:
/*
* Hack to deal with the fact that some kernels will only give us back
* 1 packet per recv() call, even if we asked for more and there is
- * more available. If it looks like we are reading a long message,
+ * more available. If it looks like we are reading a long message,
* loop back to recv() again immediately, until we run out of data or
* buffer space. Without this, the block-and-restart behavior of
* libpq's higher levels leads to O(N^2) performance on long messages.
*
* Since we left-justified the data above, conn->inEnd gives the
- * amount of data already read in the current message. We consider
+ * amount of data already read in the current message. We consider
* the message "long" once we have acquired 32k ...
*/
if (conn->inEnd > 32768 &&
diff --git a/src/interfaces/libpq/fe-protocol2.c b/src/interfaces/libpq/fe-protocol2.c
index b99268c606..fe9ba15fde 100644
--- a/src/interfaces/libpq/fe-protocol2.c
+++ b/src/interfaces/libpq/fe-protocol2.c
@@ -171,7 +171,7 @@ pqSetenvPoll(PGconn *conn)
case SETENV_STATE_QUERY1_SEND:
{
/*
- * Issue query to get information we need. Here we must
+ * Issue query to get information we need. Here we must
* use begin/commit in case autocommit is off by default
* in a 7.3 server.
*
@@ -800,7 +800,7 @@ pqGetErrorNotice2(PGconn *conn, bool isError)
/*
* Since the message might be pretty long, we create a temporary
- * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
+ * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
* for stuff that is expected to be short.
*/
initPQExpBuffer(&workBuf);
@@ -894,10 +894,10 @@ failure:
/*
* checkXactStatus - attempt to track transaction-block status of server
*
- * This is called each time we receive a command-complete message. By
+ * This is called each time we receive a command-complete message. By
* watching for messages from BEGIN/COMMIT/ROLLBACK commands, we can do
* a passable job of tracking the server's xact status. BUT: this does
- * not work at all on 7.3 servers with AUTOCOMMIT OFF. (Man, was that
+ * not work at all on 7.3 servers with AUTOCOMMIT OFF. (Man, was that
* feature ever a mistake.) Caveat user.
*
* The tags known here are all those used as far back as 7.0; is it worth
diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c
index 7131b87ea4..9f6d4f909c 100644
--- a/src/interfaces/libpq/fe-protocol3.c
+++ b/src/interfaces/libpq/fe-protocol3.c
@@ -166,7 +166,7 @@ pqParseInput3(PGconn *conn)
* ERROR messages are displayed using the notice processor;
* ParameterStatus is handled normally; anything else is just
* dropped on the floor after displaying a suitable warning
- * notice. (An ERROR is very possibly the backend telling us why
+ * notice. (An ERROR is very possibly the backend telling us why
* it is about to close the connection, so we don't want to just
* discard it...)
*/
@@ -361,7 +361,7 @@ pqParseInput3(PGconn *conn)
case 'd': /* Copy Data */
/*
- * If we see Copy Data, just silently drop it. This would
+ * If we see Copy Data, just silently drop it. This would
* only occur if application exits COPY OUT mode too
* early.
*/
@@ -370,7 +370,7 @@ pqParseInput3(PGconn *conn)
case 'c': /* Copy Done */
/*
- * If we see Copy Done, just silently drop it. This is
+ * If we see Copy Done, just silently drop it. This is
* the normal case during PQendcopy. We will keep
* swallowing data, expecting to see command-complete for
* the COPY command.
@@ -726,14 +726,14 @@ pqGetErrorNotice3(PGconn *conn, bool isError)
/*
* Since the fields might be pretty long, we create a temporary
- * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
- * for stuff that is expected to be short. We shouldn't use
+ * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
+ * for stuff that is expected to be short. We shouldn't use
* conn->errorMessage either, since this might be only a notice.
*/
initPQExpBuffer(&workBuf);
/*
- * Make a PGresult to hold the accumulated fields. We temporarily lie
+ * Make a PGresult to hold the accumulated fields. We temporarily lie
* about the result status, so that PQmakeEmptyPGresult doesn't uselessly
* copy conn->errorMessage.
*/
@@ -918,7 +918,7 @@ reportErrorPosition(PQExpBuffer msg, const char *query, int loc, int encoding)
/*
* Each character might occupy multiple physical bytes in the string, and
* in some Far Eastern character sets it might take more than one screen
- * column as well. We compute the starting byte offset and starting
+ * column as well. We compute the starting byte offset and starting
* screen column of each logical character, and store these in qidx[] and
* scridx[] respectively.
*/
@@ -946,8 +946,8 @@ reportErrorPosition(PQExpBuffer msg, const char *query, int loc, int encoding)
/*
* Within the scanning loop, cno is the current character's logical
* number, qoffset is its offset in wquery, and scroffset is its starting
- * logical screen column (all indexed from 0). "loc" is the logical
- * character number of the error location. We scan to determine loc_line
+ * logical screen column (all indexed from 0). "loc" is the logical
+ * character number of the error location. We scan to determine loc_line
* (the 1-based line number containing loc) and ibeg/iend (first character
* number and last+1 character number of the line containing loc). Note
* that qidx[] and scridx[] are filled only as far as iend.
@@ -1378,7 +1378,7 @@ pqGetCopyData3(PGconn *conn, char **buffer, int async)
for (;;)
{
/*
- * Collect the next input message. To make life simpler for async
+ * Collect the next input message. To make life simpler for async
* callers, we keep returning 0 until the next message is fully
* available, even if it is not Copy Data.
*/
@@ -1387,7 +1387,7 @@ pqGetCopyData3(PGconn *conn, char **buffer, int async)
{
/*
* On end-of-copy, exit COPY_OUT mode and let caller read status
- * with PQgetResult(). The normal case is that it's Copy Done,
+ * with PQgetResult(). The normal case is that it's Copy Done,
* but we let parseInput read that. If error, we expect the state
* was already changed.
*/
@@ -1591,7 +1591,7 @@ pqEndcopy3(PGconn *conn)
/*
* Non blocking connections may have to abort at this point. If everyone
* played the game there should be no problem, but in error scenarios the
- * expected messages may not have arrived yet. (We are assuming that the
+ * expected messages may not have arrived yet. (We are assuming that the
* backend's packetizing will ensure that CommandComplete arrives along
* with the CopyDone; are there corner cases where that doesn't happen?)
*/
diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c
index a07c44ad14..522fac6d77 100644
--- a/src/interfaces/libpq/fe-secure.c
+++ b/src/interfaces/libpq/fe-secure.c
@@ -1370,7 +1370,7 @@ open_client_SSL(PGconn *conn)
* these will be detected by client_cert_cb() which is
* called from SSL_connect(). We want to return that
* error message and not the rather unhelpful error that
- * OpenSSL itself returns. So check to see if an error
+ * OpenSSL itself returns. So check to see if an error
* message was already stored.
*/
if (conn->errorMessage.len == 0)
@@ -1545,7 +1545,7 @@ PQgetssl(PGconn *conn)
#if defined(ENABLE_THREAD_SAFETY) && !defined(WIN32)
/*
- * Block SIGPIPE for this thread. This prevents send()/write() from exiting
+ * Block SIGPIPE for this thread. This prevents send()/write() from exiting
* the application.
*/
int
@@ -1584,7 +1584,7 @@ pq_block_sigpipe(sigset_t *osigset, bool *sigpipe_pending)
* Discard any pending SIGPIPE and reset the signal mask.
*
* Note: we are effectively assuming here that the C library doesn't queue
- * up multiple SIGPIPE events. If it did, then we'd accidentally leave
+ * up multiple SIGPIPE events. If it did, then we'd accidentally leave
* ours in the queue when an event was already pending and we got another.
* As long as it doesn't queue multiple events, we're OK because the caller
* can't tell the difference.
@@ -1595,7 +1595,7 @@ pq_block_sigpipe(sigset_t *osigset, bool *sigpipe_pending)
* gotten one, pass got_epipe = TRUE.
*
* We do not want this to change errno, since if it did that could lose
- * the error code from a preceding send(). We essentially assume that if
+ * the error code from a preceding send(). We essentially assume that if
* we were able to do pq_block_sigpipe(), this can't fail.
*/
void
diff --git a/src/interfaces/libpq/libpq-fe.h b/src/interfaces/libpq/libpq-fe.h
index 6119fd539b..3e8e7603bc 100644
--- a/src/interfaces/libpq/libpq-fe.h
+++ b/src/interfaces/libpq/libpq-fe.h
@@ -54,9 +54,9 @@ typedef enum
* be used for user feedback or similar purposes.
*/
CONNECTION_STARTED, /* Waiting for connection to be made. */
- CONNECTION_MADE, /* Connection OK; waiting to send. */
+ CONNECTION_MADE, /* Connection OK; waiting to send. */
CONNECTION_AWAITING_RESPONSE, /* Waiting for a response from the
- * postmaster. */
+ * postmaster. */
CONNECTION_AUTH_OK, /* Received authentication; waiting for
* backend startup. */
CONNECTION_SETENV, /* Negotiating environment. */
diff --git a/src/interfaces/libpq/pqexpbuffer.c b/src/interfaces/libpq/pqexpbuffer.c
index 2b297dc1e8..5679bae725 100644
--- a/src/interfaces/libpq/pqexpbuffer.c
+++ b/src/interfaces/libpq/pqexpbuffer.c
@@ -161,7 +161,7 @@ resetPQExpBuffer(PQExpBuffer str)
* Make sure there is enough space for 'needed' more bytes in the buffer
* ('needed' does not include the terminating null).
*
- * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
+ * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
* the buffer is left in "broken" state.)
*/
int
@@ -175,7 +175,7 @@ enlargePQExpBuffer(PQExpBuffer str, size_t needed)
/*
* Guard against ridiculous "needed" values, which can occur if we're fed
- * bogus data. Without this, we can get an overflow or infinite loop in
+ * bogus data. Without this, we can get an overflow or infinite loop in
* the following.
*/
if (needed >= ((size_t) INT_MAX - str->len))
@@ -202,7 +202,7 @@ enlargePQExpBuffer(PQExpBuffer str, size_t needed)
/*
* Clamp to INT_MAX in case we went past it. Note we are assuming here
- * that INT_MAX <= UINT_MAX/2, else the above loop could overflow. We
+ * that INT_MAX <= UINT_MAX/2, else the above loop could overflow. We
* will still have newlen >= needed.
*/
if (newlen > (size_t) INT_MAX)
@@ -223,7 +223,7 @@ enlargePQExpBuffer(PQExpBuffer str, size_t needed)
/*
* printfPQExpBuffer
* Format text data under the control of fmt (an sprintf-like format string)
- * and insert it into str. More space is allocated to str if necessary.
+ * and insert it into str. More space is allocated to str if necessary.
* This is a convenience routine that does the same thing as
* resetPQExpBuffer() followed by appendPQExpBuffer().
*/
diff --git a/src/interfaces/libpq/pqexpbuffer.h b/src/interfaces/libpq/pqexpbuffer.h
index c4e9da9ba8..da169bb9a4 100644
--- a/src/interfaces/libpq/pqexpbuffer.h
+++ b/src/interfaces/libpq/pqexpbuffer.h
@@ -37,7 +37,7 @@
* more space. We must always have maxlen > len.
*
* An exception occurs if we failed to allocate enough memory for the string
- * buffer. In that case data points to a statically allocated empty string,
+ * buffer. In that case data points to a statically allocated empty string,
* and len = maxlen = 0.
*-------------------------
*/
@@ -107,7 +107,7 @@ extern void initPQExpBuffer(PQExpBuffer str);
*
* NOTE: some routines build up a string using PQExpBuffer, and then
* release the PQExpBufferData but return the data string itself to their
- * caller. At that point the data string looks like a plain malloc'd
+ * caller. At that point the data string looks like a plain malloc'd
* string.
*/
extern void destroyPQExpBuffer(PQExpBuffer str);
@@ -126,7 +126,7 @@ extern void resetPQExpBuffer(PQExpBuffer str);
* Make sure there is enough space for 'needed' more bytes in the buffer
* ('needed' does not include the terminating null).
*
- * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
+ * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
* the buffer is left in "broken" state.)
*/
extern int enlargePQExpBuffer(PQExpBuffer str, size_t needed);
@@ -134,7 +134,7 @@ extern int enlargePQExpBuffer(PQExpBuffer str, size_t needed);
/*------------------------
* printfPQExpBuffer
* Format text data under the control of fmt (an sprintf-like format string)
- * and insert it into str. More space is allocated to str if necessary.
+ * and insert it into str. More space is allocated to str if necessary.
* This is a convenience routine that does the same thing as
* resetPQExpBuffer() followed by appendPQExpBuffer().
*/
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index b65ea31a86..d7b7bc382b 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -2086,7 +2086,7 @@ plperl_spi_execute_fetch_result(SPITupleTable *tuptable, int processed,
/*
* Note: plperl_return_next is called both in Postgres and Perl contexts.
- * We report any errors in Postgres fashion (via ereport). If called in
+ * We report any errors in Postgres fashion (via ereport). If called in
* Perl context, it is SPI.xs's responsibility to catch the error and
* convert to a Perl error. We assume (perhaps without adequate justification)
* that we need not abort the current transaction if the Perl code traps the
diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c
index 659e797f2c..754ff76fff 100644
--- a/src/pl/plpgsql/src/pl_comp.c
+++ b/src/pl/plpgsql/src/pl_comp.c
@@ -284,7 +284,7 @@ do_compile(FunctionCallInfo fcinfo,
MemoryContext func_cxt;
/*
- * Setup the scanner input and error info. We assume that this function
+ * Setup the scanner input and error info. We assume that this function
* cannot be invoked recursively, so there's no need to save and restore
* the static variables used here.
*/
@@ -372,7 +372,7 @@ do_compile(FunctionCallInfo fcinfo,
* needed permanently, so make them in tmp cxt.
*
* We also need to resolve any polymorphic input or output
- * argument types. In validation mode we won't be able to, so we
+ * argument types. In validation mode we won't be able to, so we
* arbitrarily assume we are dealing with integers.
*/
MemoryContextSwitchTo(compile_tmp_cxt);
@@ -457,7 +457,7 @@ do_compile(FunctionCallInfo fcinfo,
/*
* If there's just one OUT parameter, out_param_varno points
- * directly to it. If there's more than one, build a row that
+ * directly to it. If there's more than one, build a row that
* holds all of them.
*/
if (num_out_args == 1)
@@ -1414,7 +1414,7 @@ plpgsql_parse_dblwordrowtype(char *word)
*
* The returned struct may be a PLpgSQL_var, PLpgSQL_row, or
* PLpgSQL_rec depending on the given datatype, and is allocated via
- * palloc. The struct is automatically added to the current datum
+ * palloc. The struct is automatically added to the current datum
* array, and optionally to the current namespace.
*/
PLpgSQL_variable *
@@ -1872,7 +1872,7 @@ plpgsql_adddatum(PLpgSQL_datum *new)
* last call.
*
* This is used around a DECLARE section to create a list of the VARs
- * that have to be initialized at block entry. Note that VARs can also
+ * that have to be initialized at block entry. Note that VARs can also
* be created elsewhere than DECLARE, eg by a FOR-loop, but it is then
* the responsibility of special-purpose code to initialize them.
* ----------
@@ -2023,7 +2023,7 @@ plpgsql_resolve_polymorphic_argtypes(int numargs,
* delete_function - clean up as much as possible of a stale function cache
*
* We can't release the PLpgSQL_function struct itself, because of the
- * possibility that there are fn_extra pointers to it. We can release
+ * possibility that there are fn_extra pointers to it. We can release
* the subsidiary storage, but only if there are no active evaluations
* in progress. Otherwise we'll just leak that storage. Since the
* case would only occur if a pg_proc update is detected during a nested
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 418f3c80ba..51034074ca 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -53,7 +53,7 @@ typedef struct
* creates its own "eval_econtext" ExprContext within this estate for
* per-evaluation workspace. eval_econtext is freed at normal function exit,
* and the EState is freed at transaction end (in case of error, we assume
- * that the abort mechanisms clean it all up). Furthermore, any exception
+ * that the abort mechanisms clean it all up). Furthermore, any exception
* block within a function has to have its own eval_econtext separate from
* the containing function's, so that we can clean up ExprContext callbacks
* properly at subtransaction exit. We maintain a stack that tracks the
@@ -61,7 +61,7 @@ typedef struct
*
* This arrangement is a bit tedious to maintain, but it's worth the trouble
* so that we don't have to re-prepare simple expressions on each trip through
- * a function. (We assume the case to optimize is many repetitions of a
+ * a function. (We assume the case to optimize is many repetitions of a
* function within a transaction.)
*/
typedef struct SimpleEcontextStackEntry
@@ -2833,7 +2833,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
/*
* Check for error, and set FOUND if appropriate (for historical reasons
- * we set FOUND only for certain query types). Also Assert that we
+ * we set FOUND only for certain query types). Also Assert that we
* identified the statement type the same as SPI did.
*/
switch (rc)
@@ -3540,7 +3540,7 @@ exec_assign_value(PLpgSQL_execstate *estate,
var->datatype->typlen);
/*
- * Now free the old value. (We can't do this any earlier
+ * Now free the old value. (We can't do this any earlier
* because of the possibility that we are assigning the var's
* old value to it, eg "foo := foo". We could optimize out
* the assignment altogether in such cases, but it's too
@@ -3907,7 +3907,7 @@ exec_assign_value(PLpgSQL_execstate *estate,
* At present this doesn't handle PLpgSQL_expr or PLpgSQL_arrayelem datums.
*
* NOTE: caller must not modify the returned value, since it points right
- * at the stored value in the case of pass-by-reference datatypes. In some
+ * at the stored value in the case of pass-by-reference datatypes. In some
* cases we have to palloc a return value, and in such cases we put it into
* the estate's short-term memory context.
*/
@@ -4271,7 +4271,7 @@ exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt,
PinPortal(portal);
/*
- * Fetch the initial tuple(s). If prefetching is allowed then we grab a
+ * Fetch the initial tuple(s). If prefetching is allowed then we grab a
* few more rows to avoid multiple trips through executor startup
* overhead.
*/
@@ -5289,7 +5289,7 @@ plpgsql_create_econtext(PLpgSQL_execstate *estate)
/*
* Create an EState for evaluation of simple expressions, if there's not
- * one already in the current transaction. The EState is made a child of
+ * one already in the current transaction. The EState is made a child of
* TopTransactionContext so it will have the right lifespan.
*/
if (simple_eval_estate == NULL)
diff --git a/src/pl/plpgsql/src/pl_funcs.c b/src/pl/plpgsql/src/pl_funcs.c
index 671ef47777..d40b7891de 100644
--- a/src/pl/plpgsql/src/pl_funcs.c
+++ b/src/pl/plpgsql/src/pl_funcs.c
@@ -228,7 +228,7 @@ plpgsql_ns_additem(int itemtype, int itemno, const char *name)
*
* Note that this only searches for variables, not labels.
*
- * name1 must be non-NULL. Pass NULL for name2 and/or name3 if parsing a name
+ * name1 must be non-NULL. Pass NULL for name2 and/or name3 if parsing a name
* with fewer than three components.
*
* If names_used isn't NULL, *names_used receives the number of names
@@ -369,9 +369,9 @@ plpgsql_ns_rename(char *oldname, char *newname)
* truncate to NAMEDATALEN.
*
* There may be several identifiers separated by dots and optional
- * whitespace. Each one is converted to a separate palloc'd string.
+ * whitespace. Each one is converted to a separate palloc'd string.
* The caller passes the expected number of identifiers, as well as
- * a char* array to hold them. It is an error if we find the wrong
+ * a char* array to hold them. It is an error if we find the wrong
* number of identifiers (cf grammar processing of fori_varname).
*
* NOTE: the input string has already been accepted by the flex lexer,
diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h
index 11ffc8d952..2e7c59ccaa 100644
--- a/src/pl/plpgsql/src/plpgsql.h
+++ b/src/pl/plpgsql/src/plpgsql.h
@@ -633,7 +633,7 @@ typedef struct PLpgSQL_func_hashkey
/*
* For a trigger function, the OID of the relation triggered on is part of
* the hashkey --- we want to compile the trigger separately for each
- * relation it is used with, in case the rowtype is different. Zero if
+ * relation it is used with, in case the rowtype is different. Zero if
* not called as a trigger.
*/
Oid trigrelOid;
@@ -758,7 +758,7 @@ typedef struct
*
* Also, immediately before any call to func_setup, PL/pgSQL fills in the
* error_callback and assign_expr fields with pointers to its own
- * plpgsql_exec_error_callback and exec_assign_expr functions. This is
+ * plpgsql_exec_error_callback and exec_assign_expr functions. This is
* a somewhat ad-hoc expedient to simplify life for debugger plugins.
*/
diff --git a/src/pl/plpython/plpython.c b/src/pl/plpython/plpython.c
index 791d45fe58..b9b070d8f4 100644
--- a/src/pl/plpython/plpython.c
+++ b/src/pl/plpython/plpython.c
@@ -1113,7 +1113,7 @@ PLy_function_delete_args(PLyProcedure *proc)
*/
/* PLy_procedure_get: returns a cached PLyProcedure, or creates, stores and
- * returns a new PLyProcedure. fcinfo is the call info, tgreloid is the
+ * returns a new PLyProcedure. fcinfo is the call info, tgreloid is the
* relation OID when calling a trigger, or InvalidOid (zero) for ordinary
* function calls.
*/
@@ -1166,7 +1166,7 @@ PLy_procedure_get(FunctionCallInfo fcinfo, Oid tgreloid)
if (OidIsValid(tgreloid))
{
/*
- * Input/output conversion for trigger tuples. Use the result
+ * Input/output conversion for trigger tuples. Use the result
* TypeInfo variable to store the tuple conversion info. We do this
* over again on each call to cover the possibility that the
* relation's tupdesc changed since the trigger was last called.
diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c
index d8561eafb7..eebb421ed8 100644
--- a/src/pl/tcl/pltcl.c
+++ b/src/pl/tcl/pltcl.c
@@ -1527,7 +1527,7 @@ pltcl_elog(ClientData cdata, Tcl_Interp *interp,
if (level == ERROR)
{
/*
- * We just pass the error back to Tcl. If it's not caught, it'll
+ * We just pass the error back to Tcl. If it's not caught, it'll
* eventually get converted to a PG error when we reach the call
* handler.
*/
diff --git a/src/port/chklocale.c b/src/port/chklocale.c
index 58ed9e2813..65276142fd 100644
--- a/src/port/chklocale.c
+++ b/src/port/chklocale.c
@@ -314,7 +314,7 @@ pg_get_encoding_from_locale(const char *ctype)
/*
* We print a warning if we got a CODESET string but couldn't recognize
- * it. This means we need another entry in the table.
+ * it. This means we need another entry in the table.
*/
#ifdef FRONTEND
fprintf(stderr, _("could not determine encoding for locale \"%s\": codeset is \"%s\""),
diff --git a/src/port/crypt.c b/src/port/crypt.c
index 920ab932a5..9fe174f3ec 100644
--- a/src/port/crypt.c
+++ b/src/port/crypt.c
@@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -118,7 +118,7 @@ typedef int int32_t;
* representation is to store one bit per byte in an array of bytes. Bit N of
* the NBS spec is stored as the LSB of the Nth byte (index N-1) in the array.
* Another representation stores the 64 bits in 8 bytes, with bits 1..8 in the
- * first byte, 9..16 in the second, and so on. The DES spec apparently has
+ * first byte, 9..16 in the second, and so on. The DES spec apparently has
* bit 1 in the MSB of the first byte, but that is particularly noxious so we
* bit-reverse each byte so that bit 1 is the LSB of the first byte, bit 8 is
* the MSB of the first byte. Specifically, the 64-bit input data and key are
@@ -126,21 +126,21 @@ typedef int int32_t;
* MSB format.
*
* DES operates internally on groups of 32 bits which are expanded to 48 bits
- * by permutation E and shrunk back to 32 bits by the S boxes. To speed up
+ * by permutation E and shrunk back to 32 bits by the S boxes. To speed up
* the computation, the expansion is applied only once, the expanded
* representation is maintained during the encryption, and a compression
- * permutation is applied only at the end. To speed up the S-box lookups,
+ * permutation is applied only at the end. To speed up the S-box lookups,
* the 48 bits are maintained as eight 6 bit groups, one per byte, which
* directly feed the eight S-boxes. Within each byte, the 6 bits are the
- * most significant ones. The low two bits of each byte are zero. (Thus,
+ * most significant ones. The low two bits of each byte are zero. (Thus,
* bit 1 of the 48 bit E expansion is stored as the "4"-valued bit of the
* first byte in the eight byte representation, bit 2 of the 48 bit value is
* the "8"-valued bit, and so on.) In fact, a combined "SPE"-box lookup is
* used, in which the output is the 64 bit result of an S-box lookup which
* has been permuted by P and expanded by E, and is ready for use in the next
* iteration. Two 32-bit wide tables, SPE[0] and SPE[1], are used for this
- * lookup. Since each byte in the 48 bit path is a multiple of four, indexed
- * lookup of SPE[0] and SPE[1] is simple and fast. The key schedule and
+ * lookup. Since each byte in the 48 bit path is a multiple of four, indexed
+ * lookup of SPE[0] and SPE[1] is simple and fast. The key schedule and
* "salt" are also converted to this 8*(6+2) format. The SPE table size is
* 8*64*8 = 4K bytes.
*
@@ -154,7 +154,7 @@ typedef int int32_t;
* The byte-order problem is unfortunate, since on the one hand it is good
* to have a machine-independent C_block representation (bits 1..8 in the
* first byte, etc.), and on the other hand it is good for the LSB of the
- * first byte to be the LSB of i0. We cannot have both these things, so we
+ * first byte to be the LSB of i0. We cannot have both these things, so we
* currently use the "little-endian" representation and avoid any multi-byte
* operations that depend on byte order. This largely precludes use of the
* 64-bit datatype since the relative order of i0 and i1 are unknown. It
@@ -181,13 +181,13 @@ typedef int int32_t;
* IE3264: MSB->LSB conversion, initial permutation, and expansion.
* This is done by collecting the 32 even-numbered bits and applying
* a 32->64 bit transformation, and then collecting the 32 odd-numbered
- * bits and applying the same transformation. Since there are only
+ * bits and applying the same transformation. Since there are only
* 32 input bits, the IE3264 transformation table is half the size of
* the usual table.
* CF6464: Compression, final permutation, and LSB->MSB conversion.
* This is done by two trivial 48->32 bit compressions to obtain
* a 64-bit block (the bit numbering is given in the "CIFP" table)
- * followed by a 64->64 bit "cleanup" transformation. (It would
+ * followed by a 64->64 bit "cleanup" transformation. (It would
* be possible to group the bits in the 64-bit block so that 2
* identical 32->32 bit transformations could be used instead,
* saving a factor of 4 in space and possibly 2 in time, but
@@ -206,7 +206,7 @@ typedef int int32_t;
* transforms 56 bits into 48 bits, dropping 8 bits, so PC2 is not
* invertible. We get around that problem by using a modified PC2
* which retains the 8 otherwise-lost bits in the unused low-order
- * bits of each byte. The low-order bits are cleared when the
+ * bits of each byte. The low-order bits are cleared when the
* codes are stored into the key schedule.
* PC2ROT[1]: Same as PC2ROT[0], but with two rotations.
* This is faster than applying PC2ROT[0] twice,
@@ -215,7 +215,7 @@ typedef int int32_t;
*
* The salting is a simple permutation applied to the 48-bit result of E.
* Specifically, if bit i (1 <= i <= 24) of the salt is set then bits i and
- * i+24 of the result are swapped. The salt is thus a 24 bit number, with
+ * i+24 of the result are swapped. The salt is thus a 24 bit number, with
* 16777216 possible values. (The original salt was 12 bits and could not
* swap bits 13..24 with 36..48.)
*
@@ -467,7 +467,7 @@ static C_block PC2ROT[2][64 / CHUNKBITS][1 << CHUNKBITS];
/* Initial permutation/expansion table */
static C_block IE3264[32 / CHUNKBITS][1 << CHUNKBITS];
-/* Table that combines the S, P, and E operations. */
+/* Table that combines the S, P, and E operations. */
static int32_t SPE[2][8][64];
/* compressed/interleaved => final permutation table */
diff --git a/src/port/exec.c b/src/port/exec.c
index e92fdee020..3af1374815 100644
--- a/src/port/exec.c
+++ b/src/port/exec.c
@@ -180,7 +180,7 @@ validate_exec(const char *path)
* that are part of our installation relative to the executable.
*
* This function is not thread-safe because it calls validate_exec(),
- * which calls getgrgid(). This function should be used only in
+ * which calls getgrgid(). This function should be used only in
* non-threaded binaries, not in library routines.
*/
int
diff --git a/src/port/getaddrinfo.c b/src/port/getaddrinfo.c
index 62b4efb655..250f12d747 100644
--- a/src/port/getaddrinfo.c
+++ b/src/port/getaddrinfo.c
@@ -4,7 +4,7 @@
* Support getaddrinfo() on platforms that don't have it.
*
* We also supply getnameinfo() here, assuming that the platform will have
- * it if and only if it has getaddrinfo(). If this proves false on some
+ * it if and only if it has getaddrinfo(). If this proves false on some
* platform, we'll need to split this file and provide a separate configure
* test for getnameinfo().
*
diff --git a/src/port/getopt.c b/src/port/getopt.c
index b759d53f0a..dea7b400bb 100644
--- a/src/port/getopt.c
+++ b/src/port/getopt.c
@@ -21,7 +21,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/getopt_long.c b/src/port/getopt_long.c
index 1127803024..aca03946b6 100644
--- a/src/port/getopt_long.c
+++ b/src/port/getopt_long.c
@@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/inet_aton.c b/src/port/inet_aton.c
index 31e19ea78f..a28fc92cb8 100644
--- a/src/port/inet_aton.c
+++ b/src/port/inet_aton.c
@@ -6,7 +6,7 @@
*
* The function was been extracted whole from the file inet_aton.c in
* Release 5.3.12 of the Linux C library, which is derived from the
- * GNU C library, by Bryan Henderson in October 1996. The copyright
+ * GNU C library, by Bryan Henderson in October 1996. The copyright
* notice from that file is below.
*/
@@ -29,7 +29,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/memcmp.c b/src/port/memcmp.c
index 9b514d4dd5..5bbba9618f 100644
--- a/src/port/memcmp.c
+++ b/src/port/memcmp.c
@@ -36,7 +36,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/path.c b/src/port/path.c
index 5c22d8cae6..5c114da0d9 100644
--- a/src/port/path.c
+++ b/src/port/path.c
@@ -55,7 +55,7 @@ static void trim_trailing_separator(char *path);
/*
* skip_drive
*
- * On Windows, a path may begin with "C:" or "//network/". Advance over
+ * On Windows, a path may begin with "C:" or "//network/". Advance over
* this and point to the effective start of the path.
*/
#ifdef WIN32
@@ -268,7 +268,7 @@ canonicalize_path(char *path)
* Remove any trailing uses of "." and process ".." ourselves
*
* Note that "/../.." should reduce to just "/", while "../.." has to be
- * kept as-is. In the latter case we put back mistakenly trimmed ".."
+ * kept as-is. In the latter case we put back mistakenly trimmed ".."
* components below. Also note that we want a Windows drive spec to be
* visible to trim_directory(), but it's not part of the logic that's
* looking at the name components; hence distinction between path and
diff --git a/src/port/qsort.c b/src/port/qsort.c
index f7dae50c28..7c431e4f3d 100644
--- a/src/port/qsort.c
+++ b/src/port/qsort.c
@@ -33,7 +33,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/qsort_arg.c b/src/port/qsort_arg.c
index cf08ddb682..3c71e5213b 100644
--- a/src/port/qsort_arg.c
+++ b/src/port/qsort_arg.c
@@ -33,7 +33,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/snprintf.c b/src/port/snprintf.c
index 3ccac709e1..e4a2e1ae8c 100644
--- a/src/port/snprintf.c
+++ b/src/port/snprintf.c
@@ -18,7 +18,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -66,7 +66,7 @@
* platforms. This implementation is compatible with the Single Unix Spec:
*
* 1. -1 is returned only if processing is abandoned due to an invalid
- * parameter, such as incorrect format string. (Although not required by
+ * parameter, such as incorrect format string. (Although not required by
* the spec, this happens only when no characters have yet been transmitted
* to the destination.)
*
@@ -87,7 +87,7 @@
* Original:
* Patrick Powell Tue Apr 11 09:48:21 PDT 1995
* A bombproof version of doprnt (dopr) included.
- * Sigh. This sort of thing is always nasty do deal with. Note that
+ * Sigh. This sort of thing is always nasty do deal with. Note that
* the version here does not include floating point. (now it does ... tgl)
**************************************************************/
diff --git a/src/port/strlcat.c b/src/port/strlcat.c
index cab2e9adf7..9ad7add242 100644
--- a/src/port/strlcat.c
+++ b/src/port/strlcat.c
@@ -25,7 +25,7 @@
/*
* Appends src to string dst of size siz (unlike strncat, siz is the
* full size of dst, not space left). At most siz-1 characters
- * will be copied. Always NUL terminates (unless siz <= strlen(dst)).
+ * will be copied. Always NUL terminates (unless siz <= strlen(dst)).
* Returns strlen(src) + MIN(siz, strlen(initial dst)).
* If retval >= siz, truncation occurred.
*/
diff --git a/src/port/strlcpy.c b/src/port/strlcpy.c
index 5186634d2e..3e7619328a 100644
--- a/src/port/strlcpy.c
+++ b/src/port/strlcpy.c
@@ -36,8 +36,8 @@
/*
- * Copy src to string dst of size siz. At most siz-1 characters
- * will be copied. Always NUL terminates (unless siz == 0).
+ * Copy src to string dst of size siz. At most siz-1 characters
+ * will be copied. Always NUL terminates (unless siz == 0).
* Returns strlen(src); if retval >= siz, truncation occurred.
* Function creation history: http://www.gratisoft.us/todd/papers/strlcpy.html
*/
diff --git a/src/port/strtol.c b/src/port/strtol.c
index d48c896992..8f4511fad0 100644
--- a/src/port/strtol.c
+++ b/src/port/strtol.c
@@ -21,7 +21,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -104,7 +104,7 @@ int base;
* that is greater than this value, if followed by a legal input
* character, is too big. One that is equal to this value may be valid or
* not; the limit between valid and invalid numbers is then based on the
- * last digit. For instance, if the range for longs is
+ * last digit. For instance, if the range for longs is
* [-2147483648..2147483647] and the input base is 10, cutoff will be set
* to 214748364 and cutlim to either 7 (neg==0) or 8 (neg==1), meaning
* that if we have accumulated a value > 214748364, or equal but the next
diff --git a/src/port/strtoul.c b/src/port/strtoul.c
index 18f04ac179..0e33a88463 100644
--- a/src/port/strtoul.c
+++ b/src/port/strtoul.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/thread.c b/src/port/thread.c
index ddcd885919..41642cd18f 100644
--- a/src/port/thread.c
+++ b/src/port/thread.c
@@ -28,12 +28,12 @@
* Additional confusion exists because many operating systems that
* use pthread_setspecific/pthread_getspecific() also have *_r versions
* of standard library functions for compatibility with operating systems
- * that require them. However, internally, these *_r functions merely
+ * that require them. However, internally, these *_r functions merely
* call the thread-safe standard library functions.
*
* For example, BSD/OS 4.3 uses Bind 8.2.3 for getpwuid(). Internally,
* getpwuid() calls pthread_setspecific/pthread_getspecific() to return
- * static data to the caller in a thread-safe manner. However, BSD/OS
+ * static data to the caller in a thread-safe manner. However, BSD/OS
* also has getpwuid_r(), which merely calls getpwuid() and shifts
* around the arguments to match the getpwuid_r() function declaration.
* Therefore, while BSD/OS has getpwuid_r(), it isn't required. It also
diff --git a/src/port/unsetenv.c b/src/port/unsetenv.c
index 9e9b23591d..7c50d537b5 100644
--- a/src/port/unsetenv.c
+++ b/src/port/unsetenv.c
@@ -30,7 +30,7 @@ unsetenv(const char *name)
* entry. When we clobber the entry in the second step we are ensuring
* that we zap the actual environ member. However, there are some libc
* implementations (notably recent BSDs) that do not obey SUS but copy the
- * presented string. This method fails on such platforms. Hopefully all
+ * presented string. This method fails on such platforms. Hopefully all
* such platforms have unsetenv() and thus won't be using this hack.
*
* Note that repeatedly setting and unsetting a var using this code will
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index 8c30813b00..416b544158 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -532,7 +532,7 @@ convert_sourcefiles(void)
* namely, it is a standard regular expression with an implicit ^ at the start.
* (We currently support only a very limited subset of regular expressions,
* see string_matches_pattern() above.) What hostplatformpattern will be
- * matched against is the config.guess output. (In the shell-script version,
+ * matched against is the config.guess output. (In the shell-script version,
* we also provided an indication of whether gcc or another compiler was in
* use, but that facility isn't used anymore.)
*/
@@ -778,7 +778,7 @@ initialize_environment(void)
/*
* GNU make stores some flags in the MAKEFLAGS environment variable to
- * pass arguments to its own children. If we are invoked by make,
+ * pass arguments to its own children. If we are invoked by make,
* that causes the make invoked by us to think its part of the make
* task invoking us, and so it tries to communicate with the toplevel
* make. Which fails.
@@ -811,7 +811,7 @@ initialize_environment(void)
* Set up shared library paths to include the temp install.
*
* LD_LIBRARY_PATH covers many platforms. DYLD_LIBRARY_PATH works on
- * Darwin, and maybe other Mach-based systems. LIBPATH is for AIX.
+ * Darwin, and maybe other Mach-based systems. LIBPATH is for AIX.
* Windows needs shared libraries in PATH (only those linked into
* executables, not dlopen'ed ones). Feel free to account for others
* as well.
@@ -931,7 +931,7 @@ spawn_process(const char *cmdline)
pid_t pid;
/*
- * Must flush I/O buffers before fork. Ideally we'd use fflush(NULL) here
+ * Must flush I/O buffers before fork. Ideally we'd use fflush(NULL) here
* ... does anyone still care about systems where that doesn't work?
*/
fflush(stdout);
@@ -952,7 +952,7 @@ spawn_process(const char *cmdline)
* In child
*
* Instead of using system(), exec the shell directly, and tell it to
- * "exec" the command too. This saves two useless processes per
+ * "exec" the command too. This saves two useless processes per
* parallel test case.
*/
char *cmdline2 = malloc(strlen(cmdline) + 6);
diff --git a/src/timezone/localtime.c b/src/timezone/localtime.c
index 11ce78a8c3..5c8405206f 100644
--- a/src/timezone/localtime.c
+++ b/src/timezone/localtime.c
@@ -24,15 +24,15 @@
#ifndef WILDABBR
/*----------
* Someone might make incorrect use of a time zone abbreviation:
- * 1. They might reference tzname[0] before calling tzset (explicitly
+ * 1. They might reference tzname[0] before calling tzset (explicitly
* or implicitly).
- * 2. They might reference tzname[1] before calling tzset (explicitly
+ * 2. They might reference tzname[1] before calling tzset (explicitly
* or implicitly).
- * 3. They might reference tzname[1] after setting to a time zone
+ * 3. They might reference tzname[1] after setting to a time zone
* in which Daylight Saving Time is never observed.
- * 4. They might reference tzname[0] after setting to a time zone
+ * 4. They might reference tzname[0] after setting to a time zone
* in which Standard Time is never observed.
- * 5. They might reference tm.TM_ZONE after calling offtime.
+ * 5. They might reference tm.TM_ZONE after calling offtime.
* What's best to do in the above cases is open to debate;
* for now, we just set things up so that in any of the five cases
* WILDABBR is used. Another possibility: initialize tzname[0] to the
@@ -1451,7 +1451,7 @@ pg_get_timezone_offset(const pg_tz *tz, long int *gmtoff)
{
/*
* The zone could have more than one ttinfo, if it's historically used
- * more than one abbreviation. We return TRUE as long as they all have
+ * more than one abbreviation. We return TRUE as long as they all have
* the same gmtoff.
*/
const struct state *sp;
diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c
index d584f97166..3b93b040ce 100644
--- a/src/timezone/pgtz.c
+++ b/src/timezone/pgtz.c
@@ -130,7 +130,7 @@ pg_open_tzfile(const char *name, char *canonname)
/*
* Scan specified directory for a case-insensitive match to fname
- * (of length fnamelen --- fname may not be null terminated!). If found,
+ * (of length fnamelen --- fname may not be null terminated!). If found,
* copy the actual filename into canonname and return true.
*/
static bool
@@ -153,7 +153,7 @@ scan_directory_ci(const char *dirname, const char *fname, int fnamelen,
while ((direntry = ReadDir(dirdesc, dirname)) != NULL)
{
/*
- * Ignore . and .., plus any other "hidden" files. This is a security
+ * Ignore . and .., plus any other "hidden" files. This is a security
* measure to prevent access to files outside the timezone directory.
*/
if (direntry->d_name[0] == '.')
@@ -524,7 +524,7 @@ identify_system_timezone(void)
return resultbuf;
/*
- * Did not find the timezone. Fallback to use a GMT zone. Note that the
+ * Did not find the timezone. Fallback to use a GMT zone. Note that the
* zic timezone database names the GMT-offset zones in POSIX style: plus
* is west of Greenwich. It's unfortunate that this is opposite of SQL
* conventions. Should we therefore change the names? Probably not...
@@ -543,7 +543,7 @@ identify_system_timezone(void)
* Recursively scan the timezone database looking for the best match to
* the system timezone behavior.
*
- * tzdir points to a buffer of size MAXPGPATH. On entry, it holds the
+ * tzdir points to a buffer of size MAXPGPATH. On entry, it holds the
* pathname of a directory containing TZ files. We internally modify it
* to hold pathnames of sub-directories and files, but must restore it
* to its original contents before exit.
@@ -1380,7 +1380,7 @@ tz_acceptable(pg_tz *tz)
/*
- * Get a pg_tz struct for the given timezone name. Returns NULL if name
+ * Get a pg_tz struct for the given timezone name. Returns NULL if name
* is invalid or not an "acceptable" zone.
*/
static pg_tz *
@@ -1438,7 +1438,7 @@ select_default_timezone(void)
*
* This is called before GUC variable initialization begins. Its purpose
* is to ensure that elog.c has a pgtz variable available to format timestamps
- * with, in case log_line_prefix is set to a value requiring that. We cannot
+ * with, in case log_line_prefix is set to a value requiring that. We cannot
* set log_timezone yet.
*/
void
diff --git a/src/tutorial/complex.c b/src/tutorial/complex.c
index bc37220fe3..bf1fd25194 100644
--- a/src/tutorial/complex.c
+++ b/src/tutorial/complex.c
@@ -140,7 +140,7 @@ complex_add(PG_FUNCTION_ARGS)
* It's essential that the comparison operators and support function for a
* B-tree index opclass always agree on the relative ordering of any two
* data values. Experience has shown that it's depressingly easy to write
- * unintentionally inconsistent functions. One way to reduce the odds of
+ * unintentionally inconsistent functions. One way to reduce the odds of
* making a mistake is to make all the functions simple wrappers around
* an internal three-way-comparison function, as we do here.
*****************************************************************************/