summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/common/heaptuple.c10
-rw-r--r--src/backend/access/common/indextuple.c6
-rw-r--r--src/backend/access/common/printtup.c2
-rw-r--r--src/backend/access/common/reloptions.c6
-rw-r--r--src/backend/access/common/tupconvert.c2
-rw-r--r--src/backend/access/common/tupdesc.c2
-rw-r--r--src/backend/access/gin/ginarrayproc.c5
-rw-r--r--src/backend/access/gin/ginbtree.c27
-rw-r--r--src/backend/access/gin/ginbulk.c2
-rw-r--r--src/backend/access/gin/gindatapage.c96
-rw-r--r--src/backend/access/gin/ginentrypage.c7
-rw-r--r--src/backend/access/gin/ginfast.c8
-rw-r--r--src/backend/access/gin/ginget.c83
-rw-r--r--src/backend/access/gin/gininsert.c2
-rw-r--r--src/backend/access/gin/ginlogic.c39
-rw-r--r--src/backend/access/gin/ginpostinglist.c16
-rw-r--r--src/backend/access/gin/ginscan.c2
-rw-r--r--src/backend/access/gin/ginutil.c7
-rw-r--r--src/backend/access/gin/ginvacuum.c21
-rw-r--r--src/backend/access/gin/ginxlog.c20
-rw-r--r--src/backend/access/gist/gist.c2
-rw-r--r--src/backend/access/gist/gistget.c6
-rw-r--r--src/backend/access/gist/gistscan.c2
-rw-r--r--src/backend/access/gist/gistsplit.c10
-rw-r--r--src/backend/access/gist/gistutil.c8
-rw-r--r--src/backend/access/gist/gistvacuum.c2
-rw-r--r--src/backend/access/gist/gistxlog.c5
-rw-r--r--src/backend/access/hash/hash.c8
-rw-r--r--src/backend/access/hash/hashfunc.c10
-rw-r--r--src/backend/access/hash/hashinsert.c2
-rw-r--r--src/backend/access/hash/hashovfl.c16
-rw-r--r--src/backend/access/hash/hashpage.c16
-rw-r--r--src/backend/access/hash/hashsearch.c4
-rw-r--r--src/backend/access/hash/hashsort.c4
-rw-r--r--src/backend/access/hash/hashutil.c4
-rw-r--r--src/backend/access/heap/heapam.c286
-rw-r--r--src/backend/access/heap/hio.c20
-rw-r--r--src/backend/access/heap/pruneheap.c17
-rw-r--r--src/backend/access/heap/rewriteheap.c90
-rw-r--r--src/backend/access/heap/syncscan.c6
-rw-r--r--src/backend/access/heap/tuptoaster.c48
-rw-r--r--src/backend/access/heap/visibilitymap.c20
-rw-r--r--src/backend/access/index/genam.c12
-rw-r--r--src/backend/access/index/indexam.c14
-rw-r--r--src/backend/access/nbtree/nbtcompare.c2
-rw-r--r--src/backend/access/nbtree/nbtinsert.c47
-rw-r--r--src/backend/access/nbtree/nbtpage.c102
-rw-r--r--src/backend/access/nbtree/nbtree.c8
-rw-r--r--src/backend/access/nbtree/nbtsearch.c28
-rw-r--r--src/backend/access/nbtree/nbtsort.c24
-rw-r--r--src/backend/access/nbtree/nbtutils.c52
-rw-r--r--src/backend/access/nbtree/nbtxlog.c43
-rw-r--r--src/backend/access/rmgrdesc/gindesc.c24
-rw-r--r--src/backend/access/rmgrdesc/nbtdesc.c2
-rw-r--r--src/backend/access/spgist/spgdoinsert.c14
-rw-r--r--src/backend/access/spgist/spginsert.c4
-rw-r--r--src/backend/access/spgist/spgscan.c4
-rw-r--r--src/backend/access/spgist/spgtextproc.c8
-rw-r--r--src/backend/access/spgist/spgutils.c6
-rw-r--r--src/backend/access/spgist/spgvacuum.c4
-rw-r--r--src/backend/access/spgist/spgxlog.c6
-rw-r--r--src/backend/access/transam/clog.c10
-rw-r--r--src/backend/access/transam/multixact.c108
-rw-r--r--src/backend/access/transam/slru.c20
-rw-r--r--src/backend/access/transam/subtrans.c4
-rw-r--r--src/backend/access/transam/timeline.c8
-rw-r--r--src/backend/access/transam/transam.c4
-rw-r--r--src/backend/access/transam/twophase.c14
-rw-r--r--src/backend/access/transam/varsup.c12
-rw-r--r--src/backend/access/transam/xact.c70
-rw-r--r--src/backend/access/transam/xlog.c323
-rw-r--r--src/backend/access/transam/xlogarchive.c4
-rw-r--r--src/backend/access/transam/xlogfuncs.c2
-rw-r--r--src/backend/access/transam/xlogreader.c8
-rw-r--r--src/backend/bootstrap/bootstrap.c16
-rw-r--r--src/backend/catalog/aclchk.c18
-rw-r--r--src/backend/catalog/catalog.c8
-rw-r--r--src/backend/catalog/dependency.c36
-rw-r--r--src/backend/catalog/heap.c34
-rw-r--r--src/backend/catalog/index.c110
-rw-r--r--src/backend/catalog/indexing.c2
-rw-r--r--src/backend/catalog/namespace.c54
-rw-r--r--src/backend/catalog/objectaddress.c4
-rw-r--r--src/backend/catalog/pg_aggregate.c6
-rw-r--r--src/backend/catalog/pg_collation.c2
-rw-r--r--src/backend/catalog/pg_constraint.c10
-rw-r--r--src/backend/catalog/pg_db_role_setting.c2
-rw-r--r--src/backend/catalog/pg_depend.c8
-rw-r--r--src/backend/catalog/pg_enum.c2
-rw-r--r--src/backend/catalog/pg_largeobject.c2
-rw-r--r--src/backend/catalog/pg_operator.c4
-rw-r--r--src/backend/catalog/pg_proc.c8
-rw-r--r--src/backend/catalog/pg_shdepend.c18
-rw-r--r--src/backend/catalog/pg_type.c2
-rw-r--r--src/backend/catalog/storage.c6
-rw-r--r--src/backend/catalog/toasting.c12
-rw-r--r--src/backend/commands/aggregatecmds.c4
-rw-r--r--src/backend/commands/alter.c2
-rw-r--r--src/backend/commands/analyze.c46
-rw-r--r--src/backend/commands/async.c32
-rw-r--r--src/backend/commands/cluster.c63
-rw-r--r--src/backend/commands/constraint.c4
-rw-r--r--src/backend/commands/copy.c83
-rw-r--r--src/backend/commands/createas.c12
-rw-r--r--src/backend/commands/dbcommands.c15
-rw-r--r--src/backend/commands/define.c2
-rw-r--r--src/backend/commands/event_trigger.c14
-rw-r--r--src/backend/commands/explain.c21
-rw-r--r--src/backend/commands/extension.c30
-rw-r--r--src/backend/commands/foreigncmds.c4
-rw-r--r--src/backend/commands/functioncmds.c12
-rw-r--r--src/backend/commands/indexcmds.c46
-rw-r--r--src/backend/commands/matview.c16
-rw-r--r--src/backend/commands/opclasscmds.c8
-rw-r--r--src/backend/commands/operatorcmds.c2
-rw-r--r--src/backend/commands/portalcmds.c8
-rw-r--r--src/backend/commands/prepare.c10
-rw-r--r--src/backend/commands/proclang.c2
-rw-r--r--src/backend/commands/schemacmds.c4
-rw-r--r--src/backend/commands/sequence.c32
-rw-r--r--src/backend/commands/tablecmds.c246
-rw-r--r--src/backend/commands/tablespace.c26
-rw-r--r--src/backend/commands/trigger.c59
-rw-r--r--src/backend/commands/typecmds.c30
-rw-r--r--src/backend/commands/user.c8
-rw-r--r--src/backend/commands/vacuum.c42
-rw-r--r--src/backend/commands/vacuumlazy.c26
-rw-r--r--src/backend/commands/variable.c12
-rw-r--r--src/backend/commands/view.c14
-rw-r--r--src/backend/executor/execAmi.c4
-rw-r--r--src/backend/executor/execCurrent.c2
-rw-r--r--src/backend/executor/execJunk.c2
-rw-r--r--src/backend/executor/execMain.c35
-rw-r--r--src/backend/executor/execProcnode.c4
-rw-r--r--src/backend/executor/execQual.c48
-rw-r--r--src/backend/executor/execScan.c4
-rw-r--r--src/backend/executor/execTuples.c12
-rw-r--r--src/backend/executor/execUtils.c22
-rw-r--r--src/backend/executor/functions.c34
-rw-r--r--src/backend/executor/nodeAgg.c58
-rw-r--r--src/backend/executor/nodeAppend.c2
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c4
-rw-r--r--src/backend/executor/nodeForeignscan.c2
-rw-r--r--src/backend/executor/nodeFunctionscan.c6
-rw-r--r--src/backend/executor/nodeHash.c16
-rw-r--r--src/backend/executor/nodeHashjoin.c6
-rw-r--r--src/backend/executor/nodeIndexonlyscan.c6
-rw-r--r--src/backend/executor/nodeIndexscan.c4
-rw-r--r--src/backend/executor/nodeLimit.c2
-rw-r--r--src/backend/executor/nodeLockRows.c8
-rw-r--r--src/backend/executor/nodeMaterial.c2
-rw-r--r--src/backend/executor/nodeMergeAppend.c2
-rw-r--r--src/backend/executor/nodeMergejoin.c12
-rw-r--r--src/backend/executor/nodeModifyTable.c26
-rw-r--r--src/backend/executor/nodeRecursiveunion.c2
-rw-r--r--src/backend/executor/nodeSetOp.c6
-rw-r--r--src/backend/executor/nodeSubplan.c10
-rw-r--r--src/backend/executor/nodeSubqueryscan.c2
-rw-r--r--src/backend/executor/nodeUnique.c2
-rw-r--r--src/backend/executor/nodeValuesscan.c2
-rw-r--r--src/backend/executor/nodeWindowAgg.c38
-rw-r--r--src/backend/executor/nodeWorktablescan.c2
-rw-r--r--src/backend/executor/spi.c12
-rw-r--r--src/backend/executor/tstoreReceiver.c2
-rw-r--r--src/backend/lib/stringinfo.c4
-rw-r--r--src/backend/libpq/auth.c18
-rw-r--r--src/backend/libpq/be-fsstubs.c6
-rw-r--r--src/backend/libpq/be-secure.c6
-rw-r--r--src/backend/libpq/hba.c4
-rw-r--r--src/backend/libpq/md5.c2
-rw-r--r--src/backend/libpq/pqcomm.c5
-rw-r--r--src/backend/libpq/pqformat.c2
-rw-r--r--src/backend/main/main.c12
-rw-r--r--src/backend/nodes/bitmapset.c4
-rw-r--r--src/backend/nodes/copyfuncs.c8
-rw-r--r--src/backend/nodes/equalfuncs.c14
-rw-r--r--src/backend/nodes/list.c4
-rw-r--r--src/backend/nodes/makefuncs.c2
-rw-r--r--src/backend/nodes/nodeFuncs.c16
-rw-r--r--src/backend/nodes/outfuncs.c6
-rw-r--r--src/backend/nodes/params.c2
-rw-r--r--src/backend/nodes/read.c10
-rw-r--r--src/backend/nodes/readfuncs.c14
-rw-r--r--src/backend/nodes/tidbitmap.c18
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c10
-rw-r--r--src/backend/optimizer/path/allpaths.c30
-rw-r--r--src/backend/optimizer/path/clausesel.c20
-rw-r--r--src/backend/optimizer/path/costsize.c76
-rw-r--r--src/backend/optimizer/path/equivclass.c62
-rw-r--r--src/backend/optimizer/path/indxpath.c76
-rw-r--r--src/backend/optimizer/path/joinpath.c26
-rw-r--r--src/backend/optimizer/path/joinrels.c12
-rw-r--r--src/backend/optimizer/path/pathkeys.c34
-rw-r--r--src/backend/optimizer/path/tidpath.c4
-rw-r--r--src/backend/optimizer/plan/analyzejoins.c10
-rw-r--r--src/backend/optimizer/plan/createplan.c46
-rw-r--r--src/backend/optimizer/plan/initsplan.c58
-rw-r--r--src/backend/optimizer/plan/planagg.c8
-rw-r--r--src/backend/optimizer/plan/planmain.c12
-rw-r--r--src/backend/optimizer/plan/planner.c73
-rw-r--r--src/backend/optimizer/plan/setrefs.c20
-rw-r--r--src/backend/optimizer/plan/subselect.c40
-rw-r--r--src/backend/optimizer/prep/prepjointree.c42
-rw-r--r--src/backend/optimizer/prep/prepqual.c12
-rw-r--r--src/backend/optimizer/prep/prepsecurity.c52
-rw-r--r--src/backend/optimizer/prep/preptlist.c12
-rw-r--r--src/backend/optimizer/prep/prepunion.c17
-rw-r--r--src/backend/optimizer/util/clauses.c68
-rw-r--r--src/backend/optimizer/util/joininfo.c2
-rw-r--r--src/backend/optimizer/util/orclauses.c14
-rw-r--r--src/backend/optimizer/util/pathnode.c26
-rw-r--r--src/backend/optimizer/util/placeholder.c4
-rw-r--r--src/backend/optimizer/util/plancat.c10
-rw-r--r--src/backend/optimizer/util/predtest.c22
-rw-r--r--src/backend/optimizer/util/relnode.c18
-rw-r--r--src/backend/optimizer/util/restrictinfo.c2
-rw-r--r--src/backend/optimizer/util/tlist.c2
-rw-r--r--src/backend/optimizer/util/var.c14
-rw-r--r--src/backend/parser/analyze.c116
-rw-r--r--src/backend/parser/kwlookup.c2
-rw-r--r--src/backend/parser/parse_agg.c10
-rw-r--r--src/backend/parser/parse_clause.c47
-rw-r--r--src/backend/parser/parse_coerce.c46
-rw-r--r--src/backend/parser/parse_collate.c16
-rw-r--r--src/backend/parser/parse_cte.c8
-rw-r--r--src/backend/parser/parse_expr.c24
-rw-r--r--src/backend/parser/parse_func.c26
-rw-r--r--src/backend/parser/parse_node.c10
-rw-r--r--src/backend/parser/parse_oper.c4
-rw-r--r--src/backend/parser/parse_param.c2
-rw-r--r--src/backend/parser/parse_relation.c26
-rw-r--r--src/backend/parser/parse_target.c24
-rw-r--r--src/backend/parser/parse_type.c20
-rw-r--r--src/backend/parser/parse_utilcmd.c20
-rw-r--r--src/backend/parser/parser.c2
-rw-r--r--src/backend/parser/scansup.c6
-rw-r--r--src/backend/port/darwin/system.c2
-rw-r--r--src/backend/port/dynloader/darwin.c2
-rw-r--r--src/backend/port/dynloader/freebsd.c2
-rw-r--r--src/backend/port/dynloader/netbsd.c2
-rw-r--r--src/backend/port/dynloader/openbsd.c2
-rw-r--r--src/backend/port/posix_sema.c2
-rw-r--r--src/backend/port/sysv_sema.c14
-rw-r--r--src/backend/port/sysv_shmem.c20
-rw-r--r--src/backend/port/unix_latch.c6
-rw-r--r--src/backend/port/win32/socket.c4
-rw-r--r--src/backend/port/win32_latch.c2
-rw-r--r--src/backend/port/win32_shmem.c2
-rw-r--r--src/backend/postmaster/autovacuum.c46
-rw-r--r--src/backend/postmaster/bgworker.c132
-rw-r--r--src/backend/postmaster/bgwriter.c35
-rw-r--r--src/backend/postmaster/checkpointer.c24
-rw-r--r--src/backend/postmaster/fork_process.c2
-rw-r--r--src/backend/postmaster/pgarch.c14
-rw-r--r--src/backend/postmaster/pgstat.c50
-rw-r--r--src/backend/postmaster/postmaster.c132
-rw-r--r--src/backend/postmaster/startup.c2
-rw-r--r--src/backend/postmaster/syslogger.c19
-rw-r--r--src/backend/postmaster/walwriter.c8
-rw-r--r--src/backend/regex/regc_color.c2
-rw-r--r--src/backend/regex/regc_cvec.c2
-rw-r--r--src/backend/regex/regc_lex.c2
-rw-r--r--src/backend/regex/regc_locale.c6
-rw-r--r--src/backend/regex/regc_nfa.c6
-rw-r--r--src/backend/regex/regc_pg_locale.c12
-rw-r--r--src/backend/regex/regcomp.c8
-rw-r--r--src/backend/regex/rege_dfa.c2
-rw-r--r--src/backend/regex/regerror.c2
-rw-r--r--src/backend/regex/regexec.c18
-rw-r--r--src/backend/regex/regfree.c2
-rw-r--r--src/backend/regex/regprefix.c8
-rw-r--r--src/backend/replication/basebackup.c20
-rw-r--r--src/backend/replication/libpqwalreceiver/libpqwalreceiver.c2
-rw-r--r--src/backend/replication/logical/decode.c77
-rw-r--r--src/backend/replication/logical/logical.c135
-rw-r--r--src/backend/replication/logical/logicalfuncs.c25
-rw-r--r--src/backend/replication/logical/reorderbuffer.c82
-rw-r--r--src/backend/replication/logical/snapbuild.c138
-rw-r--r--src/backend/replication/slot.c91
-rw-r--r--src/backend/replication/slotfuncs.c9
-rw-r--r--src/backend/replication/syncrep.c6
-rw-r--r--src/backend/replication/walreceiver.c10
-rw-r--r--src/backend/replication/walreceiverfuncs.c2
-rw-r--r--src/backend/replication/walsender.c164
-rw-r--r--src/backend/rewrite/rewriteDefine.c8
-rw-r--r--src/backend/rewrite/rewriteHandler.c85
-rw-r--r--src/backend/rewrite/rewriteManip.c18
-rw-r--r--src/backend/rewrite/rewriteSupport.c2
-rw-r--r--src/backend/storage/buffer/buf_init.c4
-rw-r--r--src/backend/storage/buffer/buf_table.c4
-rw-r--r--src/backend/storage/buffer/bufmgr.c38
-rw-r--r--src/backend/storage/buffer/freelist.c12
-rw-r--r--src/backend/storage/buffer/localbuf.c6
-rw-r--r--src/backend/storage/file/buffile.c6
-rw-r--r--src/backend/storage/file/fd.c26
-rw-r--r--src/backend/storage/freespace/freespace.c4
-rw-r--r--src/backend/storage/freespace/fsmpage.c4
-rw-r--r--src/backend/storage/ipc/dsm.c156
-rw-r--r--src/backend/storage/ipc/dsm_impl.c323
-rw-r--r--src/backend/storage/ipc/ipc.c28
-rw-r--r--src/backend/storage/ipc/ipci.c4
-rw-r--r--src/backend/storage/ipc/pmsignal.c6
-rw-r--r--src/backend/storage/ipc/procarray.c32
-rw-r--r--src/backend/storage/ipc/procsignal.c2
-rw-r--r--src/backend/storage/ipc/shm_mq.c131
-rw-r--r--src/backend/storage/ipc/shm_toc.c26
-rw-r--r--src/backend/storage/ipc/shmem.c16
-rw-r--r--src/backend/storage/ipc/shmqueue.c2
-rw-r--r--src/backend/storage/ipc/sinval.c12
-rw-r--r--src/backend/storage/ipc/sinvaladt.c22
-rw-r--r--src/backend/storage/ipc/standby.c16
-rw-r--r--src/backend/storage/large_object/inv_api.c10
-rw-r--r--src/backend/storage/lmgr/deadlock.c18
-rw-r--r--src/backend/storage/lmgr/lmgr.c14
-rw-r--r--src/backend/storage/lmgr/lock.c40
-rw-r--r--src/backend/storage/lmgr/lwlock.c46
-rw-r--r--src/backend/storage/lmgr/predicate.c56
-rw-r--r--src/backend/storage/lmgr/proc.c32
-rw-r--r--src/backend/storage/lmgr/s_lock.c6
-rw-r--r--src/backend/storage/lmgr/spin.c6
-rw-r--r--src/backend/storage/page/bufpage.c10
-rw-r--r--src/backend/storage/smgr/md.c32
-rw-r--r--src/backend/storage/smgr/smgr.c4
-rw-r--r--src/backend/tcop/fastpath.c10
-rw-r--r--src/backend/tcop/postgres.c32
-rw-r--r--src/backend/tcop/pquery.c14
-rw-r--r--src/backend/tcop/utility.c4
-rw-r--r--src/backend/tsearch/ts_locale.c4
-rw-r--r--src/backend/tsearch/ts_selfuncs.c2
-rw-r--r--src/backend/tsearch/ts_typanalyze.c14
-rw-r--r--src/backend/tsearch/ts_utils.c8
-rw-r--r--src/backend/tsearch/wparser_def.c2
-rw-r--r--src/backend/utils/adt/acl.c21
-rw-r--r--src/backend/utils/adt/array_selfuncs.c20
-rw-r--r--src/backend/utils/adt/array_typanalyze.c14
-rw-r--r--src/backend/utils/adt/array_userfuncs.c4
-rw-r--r--src/backend/utils/adt/arrayfuncs.c27
-rw-r--r--src/backend/utils/adt/arrayutils.c2
-rw-r--r--src/backend/utils/adt/cash.c92
-rw-r--r--src/backend/utils/adt/char.c2
-rw-r--r--src/backend/utils/adt/date.c6
-rw-r--r--src/backend/utils/adt/datetime.c34
-rw-r--r--src/backend/utils/adt/datum.c2
-rw-r--r--src/backend/utils/adt/dbsize.c3
-rw-r--r--src/backend/utils/adt/domains.c8
-rw-r--r--src/backend/utils/adt/float.c6
-rw-r--r--src/backend/utils/adt/format_type.c6
-rw-r--r--src/backend/utils/adt/formatting.c4
-rw-r--r--src/backend/utils/adt/geo_ops.c11
-rw-r--r--src/backend/utils/adt/geo_selfuncs.c4
-rw-r--r--src/backend/utils/adt/inet_cidr_ntop.c2
-rw-r--r--src/backend/utils/adt/int.c30
-rw-r--r--src/backend/utils/adt/int8.c44
-rw-r--r--src/backend/utils/adt/json.c12
-rw-r--r--src/backend/utils/adt/jsonb.c26
-rw-r--r--src/backend/utils/adt/jsonb_gin.c80
-rw-r--r--src/backend/utils/adt/jsonb_op.c13
-rw-r--r--src/backend/utils/adt/jsonb_util.c257
-rw-r--r--src/backend/utils/adt/jsonfuncs.c35
-rw-r--r--src/backend/utils/adt/like.c4
-rw-r--r--src/backend/utils/adt/misc.c14
-rw-r--r--src/backend/utils/adt/nabstime.c36
-rw-r--r--src/backend/utils/adt/network.c12
-rw-r--r--src/backend/utils/adt/network_gist.c10
-rw-r--r--src/backend/utils/adt/numeric.c63
-rw-r--r--src/backend/utils/adt/oid.c2
-rw-r--r--src/backend/utils/adt/orderedsetaggs.c8
-rw-r--r--src/backend/utils/adt/pg_locale.c29
-rw-r--r--src/backend/utils/adt/pg_lsn.c43
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c12
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c2
-rw-r--r--src/backend/utils/adt/pseudotypes.c6
-rw-r--r--src/backend/utils/adt/rangetypes.c8
-rw-r--r--src/backend/utils/adt/rangetypes_gist.c8
-rw-r--r--src/backend/utils/adt/regexp.c6
-rw-r--r--src/backend/utils/adt/regproc.c26
-rw-r--r--src/backend/utils/adt/ri_triggers.c14
-rw-r--r--src/backend/utils/adt/rowtypes.c31
-rw-r--r--src/backend/utils/adt/ruleutils.c98
-rw-r--r--src/backend/utils/adt/selfuncs.c143
-rw-r--r--src/backend/utils/adt/timestamp.c36
-rw-r--r--src/backend/utils/adt/tsginidx.c13
-rw-r--r--src/backend/utils/adt/varchar.c4
-rw-r--r--src/backend/utils/adt/varlena.c42
-rw-r--r--src/backend/utils/adt/xml.c64
-rw-r--r--src/backend/utils/cache/attoptcache.c2
-rw-r--r--src/backend/utils/cache/catcache.c21
-rw-r--r--src/backend/utils/cache/inval.c30
-rw-r--r--src/backend/utils/cache/lsyscache.c10
-rw-r--r--src/backend/utils/cache/plancache.c42
-rw-r--r--src/backend/utils/cache/relcache.c122
-rw-r--r--src/backend/utils/cache/relfilenodemap.c13
-rw-r--r--src/backend/utils/cache/relmapper.c20
-rw-r--r--src/backend/utils/cache/spccache.c6
-rw-r--r--src/backend/utils/cache/syscache.c21
-rw-r--r--src/backend/utils/cache/typcache.c8
-rw-r--r--src/backend/utils/error/elog.c99
-rw-r--r--src/backend/utils/fmgr/dfmgr.c6
-rw-r--r--src/backend/utils/fmgr/fmgr.c18
-rw-r--r--src/backend/utils/fmgr/funcapi.c10
-rw-r--r--src/backend/utils/hash/dynahash.c28
-rw-r--r--src/backend/utils/init/miscinit.c30
-rw-r--r--src/backend/utils/init/postinit.c18
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c2
-rw-r--r--src/backend/utils/mb/mbutils.c18
-rw-r--r--src/backend/utils/mb/wchar.c8
-rw-r--r--src/backend/utils/mb/wstrcmp.c2
-rw-r--r--src/backend/utils/mb/wstrncmp.c2
-rw-r--r--src/backend/utils/misc/guc.c67
-rw-r--r--src/backend/utils/misc/ps_status.c6
-rw-r--r--src/backend/utils/misc/rbtree.c12
-rw-r--r--src/backend/utils/misc/timeout.c22
-rw-r--r--src/backend/utils/misc/tzparser.c4
-rw-r--r--src/backend/utils/mmgr/aset.c16
-rw-r--r--src/backend/utils/mmgr/mcxt.c16
-rw-r--r--src/backend/utils/mmgr/portalmem.c18
-rw-r--r--src/backend/utils/resowner/resowner.c8
-rw-r--r--src/backend/utils/sort/logtape.c30
-rw-r--r--src/backend/utils/sort/tuplesort.c77
-rw-r--r--src/backend/utils/sort/tuplestore.c28
-rw-r--r--src/backend/utils/time/combocid.c6
-rw-r--r--src/backend/utils/time/snapmgr.c38
-rw-r--r--src/backend/utils/time/tqual.c57
-rw-r--r--src/bin/initdb/findtimezone.c6
-rw-r--r--src/bin/initdb/initdb.c25
-rw-r--r--src/bin/initdb/t/001_initdb.pl29
-rw-r--r--src/bin/pg_basebackup/pg_basebackup.c70
-rw-r--r--src/bin/pg_basebackup/pg_receivexlog.c6
-rw-r--r--src/bin/pg_basebackup/pg_recvlogical.c50
-rw-r--r--src/bin/pg_basebackup/receivelog.c10
-rw-r--r--src/bin/pg_basebackup/streamutil.c6
-rw-r--r--src/bin/pg_basebackup/streamutil.h4
-rw-r--r--src/bin/pg_basebackup/t/010_pg_basebackup.pl94
-rw-r--r--src/bin/pg_config/t/001_pg_config.pl12
-rw-r--r--src/bin/pg_controldata/pg_controldata.c2
-rw-r--r--src/bin/pg_controldata/t/001_pg_controldata.pl6
-rw-r--r--src/bin/pg_ctl/pg_ctl.c48
-rw-r--r--src/bin/pg_ctl/t/001_start_stop.pl20
-rw-r--r--src/bin/pg_ctl/t/002_status.pl9
-rw-r--r--src/bin/pg_dump/common.c4
-rw-r--r--src/bin/pg_dump/compress_io.c8
-rw-r--r--src/bin/pg_dump/dumputils.c16
-rw-r--r--src/bin/pg_dump/parallel.c12
-rw-r--r--src/bin/pg_dump/pg_backup.h2
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c55
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.h2
-rw-r--r--src/bin/pg_dump/pg_backup_custom.c2
-rw-r--r--src/bin/pg_dump/pg_backup_db.c4
-rw-r--r--src/bin/pg_dump/pg_backup_directory.c8
-rw-r--r--src/bin/pg_dump/pg_backup_tar.c4
-rw-r--r--src/bin/pg_dump/pg_dump.c162
-rw-r--r--src/bin/pg_dump/pg_dump.h8
-rw-r--r--src/bin/pg_dump/pg_dump_sort.c24
-rw-r--r--src/bin/pg_dump/pg_dumpall.c8
-rw-r--r--src/bin/pg_resetxlog/pg_resetxlog.c4
-rw-r--r--src/bin/psql/command.c26
-rw-r--r--src/bin/psql/common.c2
-rw-r--r--src/bin/psql/copy.c7
-rw-r--r--src/bin/psql/describe.c57
-rw-r--r--src/bin/psql/input.c6
-rw-r--r--src/bin/psql/large_obj.c2
-rw-r--r--src/bin/psql/mainloop.c4
-rw-r--r--src/bin/psql/mbprint.c2
-rw-r--r--src/bin/psql/print.c60
-rw-r--r--src/bin/psql/settings.h2
-rw-r--r--src/bin/psql/stringutils.c12
-rw-r--r--src/bin/psql/tab-complete.c22
-rw-r--r--src/bin/psql/variables.c2
-rw-r--r--src/bin/scripts/common.c2
-rw-r--r--src/bin/scripts/createuser.c1
-rw-r--r--src/bin/scripts/pg_isready.c2
-rw-r--r--src/bin/scripts/t/010_clusterdb.pl16
-rw-r--r--src/bin/scripts/t/011_clusterdb_all.pl5
-rw-r--r--src/bin/scripts/t/020_createdb.pl12
-rw-r--r--src/bin/scripts/t/030_createlang.pl12
-rw-r--r--src/bin/scripts/t/040_createuser.pl30
-rw-r--r--src/bin/scripts/t/050_dropdb.pl7
-rw-r--r--src/bin/scripts/t/060_droplang.pl9
-rw-r--r--src/bin/scripts/t/070_dropuser.pl7
-rw-r--r--src/bin/scripts/t/090_reindexdb.pl23
-rw-r--r--src/bin/scripts/t/091_reindexdb_all.pl5
-rw-r--r--src/bin/scripts/t/100_vacuumdb.pl25
-rw-r--r--src/bin/scripts/t/101_vacuumdb_all.pl5
-rw-r--r--src/bin/scripts/t/102_vacuumdb_stages.pl7
-rw-r--r--src/bin/scripts/vacuumdb.c10
-rw-r--r--src/common/psprintf.c9
-rw-r--r--src/common/relpath.c2
-rw-r--r--src/include/access/attnum.h2
-rw-r--r--src/include/access/genam.h4
-rw-r--r--src/include/access/gin.h8
-rw-r--r--src/include/access/gin_private.h65
-rw-r--r--src/include/access/gist.h4
-rw-r--r--src/include/access/hash.h2
-rw-r--r--src/include/access/heapam.h4
-rw-r--r--src/include/access/heapam_xlog.h38
-rw-r--r--src/include/access/htup.h4
-rw-r--r--src/include/access/htup_details.h16
-rw-r--r--src/include/access/itup.h2
-rw-r--r--src/include/access/nbtree.h50
-rw-r--r--src/include/access/reloptions.h2
-rw-r--r--src/include/access/rewriteheap.h10
-rw-r--r--src/include/access/rmgr.h2
-rw-r--r--src/include/access/rmgrlist.h2
-rw-r--r--src/include/access/skey.h4
-rw-r--r--src/include/access/slru.h6
-rw-r--r--src/include/access/spgist_private.h2
-rw-r--r--src/include/access/transam.h2
-rw-r--r--src/include/access/tupdesc.h2
-rw-r--r--src/include/access/tupmacs.h4
-rw-r--r--src/include/access/tuptoaster.h6
-rw-r--r--src/include/access/xlog.h6
-rw-r--r--src/include/access/xlog_internal.h4
-rw-r--r--src/include/access/xlogdefs.h4
-rw-r--r--src/include/c.h24
-rw-r--r--src/include/catalog/catversion.h2
-rw-r--r--src/include/catalog/dependency.h2
-rwxr-xr-xsrc/include/catalog/duplicate_oids18
-rw-r--r--src/include/catalog/genbki.h2
-rw-r--r--src/include/catalog/namespace.h2
-rw-r--r--src/include/catalog/objectaccess.h2
-rw-r--r--src/include/catalog/pg_aggregate.h2
-rw-r--r--src/include/catalog/pg_attrdef.h2
-rw-r--r--src/include/catalog/pg_attribute.h2
-rw-r--r--src/include/catalog/pg_authid.h2
-rw-r--r--src/include/catalog/pg_constraint.h6
-rw-r--r--src/include/catalog/pg_control.h4
-rw-r--r--src/include/catalog/pg_db_role_setting.h2
-rw-r--r--src/include/catalog/pg_default_acl.h4
-rw-r--r--src/include/catalog/pg_description.h6
-rw-r--r--src/include/catalog/pg_event_trigger.h2
-rw-r--r--src/include/catalog/pg_index.h2
-rw-r--r--src/include/catalog/pg_largeobject.h2
-rw-r--r--src/include/catalog/pg_opclass.h6
-rw-r--r--src/include/catalog/pg_proc.h22
-rw-r--r--src/include/catalog/pg_rewrite.h2
-rw-r--r--src/include/catalog/pg_shdepend.h2
-rw-r--r--src/include/catalog/pg_shdescription.h4
-rw-r--r--src/include/catalog/pg_statistic.h28
-rw-r--r--src/include/catalog/pg_trigger.h2
-rw-r--r--src/include/catalog/pg_ts_dict.h2
-rw-r--r--src/include/catalog/pg_ts_template.h2
-rw-r--r--src/include/catalog/pg_type.h12
-rw-r--r--src/include/catalog/toasting.h4
-rw-r--r--src/include/commands/comment.h2
-rw-r--r--src/include/commands/tablecmds.h2
-rw-r--r--src/include/commands/vacuum.h6
-rw-r--r--src/include/common/fe_memutils.h6
-rw-r--r--src/include/common/relpath.h2
-rw-r--r--src/include/datatype/timestamp.h4
-rw-r--r--src/include/executor/executor.h4
-rw-r--r--src/include/executor/hashjoin.h4
-rw-r--r--src/include/executor/spi_priv.h2
-rw-r--r--src/include/executor/tuptable.h12
-rw-r--r--src/include/fmgr.h18
-rw-r--r--src/include/funcapi.h2
-rw-r--r--src/include/lib/ilist.h16
-rw-r--r--src/include/lib/stringinfo.h4
-rw-r--r--src/include/libpq/libpq-be.h8
-rw-r--r--src/include/libpq/pqcomm.h4
-rw-r--r--src/include/mb/pg_wchar.h10
-rw-r--r--src/include/miscadmin.h18
-rw-r--r--src/include/nodes/execnodes.h20
-rw-r--r--src/include/nodes/nodes.h4
-rw-r--r--src/include/nodes/params.h8
-rw-r--r--src/include/nodes/parsenodes.h60
-rw-r--r--src/include/nodes/plannodes.h20
-rw-r--r--src/include/nodes/primnodes.h60
-rw-r--r--src/include/nodes/relation.h66
-rw-r--r--src/include/nodes/replnodes.h11
-rw-r--r--src/include/nodes/tidbitmap.h2
-rw-r--r--src/include/nodes/value.h2
-rw-r--r--src/include/parser/gramparse.h2
-rw-r--r--src/include/parser/parse_node.h6
-rw-r--r--src/include/parser/scanner.h4
-rw-r--r--src/include/pg_config_manual.h18
-rw-r--r--src/include/pgstat.h24
-rw-r--r--src/include/port.h4
-rw-r--r--src/include/port/linux.h2
-rw-r--r--src/include/port/win32.h4
-rw-r--r--src/include/portability/instr_time.h4
-rw-r--r--src/include/postgres.h8
-rw-r--r--src/include/postgres_ext.h2
-rw-r--r--src/include/postmaster/bgworker.h11
-rw-r--r--src/include/postmaster/bgworker_internals.h4
-rw-r--r--src/include/postmaster/syslogger.h2
-rw-r--r--src/include/regex/regcustom.h2
-rw-r--r--src/include/regex/regex.h2
-rw-r--r--src/include/regex/regexport.h2
-rw-r--r--src/include/regex/regguts.h14
-rw-r--r--src/include/replication/basebackup.h4
-rw-r--r--src/include/replication/decode.h2
-rw-r--r--src/include/replication/logical.h26
-rw-r--r--src/include/replication/output_plugin.h10
-rw-r--r--src/include/replication/reorderbuffer.h18
-rw-r--r--src/include/replication/slot.h14
-rw-r--r--src/include/replication/snapbuild.h16
-rw-r--r--src/include/replication/walreceiver.h8
-rw-r--r--src/include/rewrite/rewriteHandler.h8
-rw-r--r--src/include/snowball/header.h2
-rw-r--r--src/include/storage/barrier.h2
-rw-r--r--src/include/storage/block.h2
-rw-r--r--src/include/storage/buf_internals.h8
-rw-r--r--src/include/storage/bufpage.h8
-rw-r--r--src/include/storage/dsm.h4
-rw-r--r--src/include/storage/dsm_impl.h2
-rw-r--r--src/include/storage/ipc.h2
-rw-r--r--src/include/storage/itemid.h2
-rw-r--r--src/include/storage/itemptr.h2
-rw-r--r--src/include/storage/large_object.h2
-rw-r--r--src/include/storage/lock.h14
-rw-r--r--src/include/storage/lwlock.h8
-rw-r--r--src/include/storage/pg_sema.h2
-rw-r--r--src/include/storage/pg_shmem.h6
-rw-r--r--src/include/storage/pos.h2
-rw-r--r--src/include/storage/predicate_internals.h10
-rw-r--r--src/include/storage/proc.h13
-rw-r--r--src/include/storage/procarray.h2
-rw-r--r--src/include/storage/relfilenode.h6
-rw-r--r--src/include/storage/shm_mq.h6
-rw-r--r--src/include/storage/shm_toc.h4
-rw-r--r--src/include/storage/sinval.h4
-rw-r--r--src/include/storage/sinvaladt.h2
-rw-r--r--src/include/storage/smgr.h6
-rw-r--r--src/include/storage/spin.h6
-rw-r--r--src/include/tcop/dest.h12
-rw-r--r--src/include/tcop/tcopdebug.h2
-rw-r--r--src/include/utils/acl.h4
-rw-r--r--src/include/utils/builtins.h2
-rw-r--r--src/include/utils/catcache.h8
-rw-r--r--src/include/utils/datetime.h4
-rw-r--r--src/include/utils/elog.h4
-rw-r--r--src/include/utils/guc.h4
-rw-r--r--src/include/utils/hsearch.h2
-rw-r--r--src/include/utils/inet.h4
-rw-r--r--src/include/utils/jsonapi.h4
-rw-r--r--src/include/utils/jsonb.h82
-rw-r--r--src/include/utils/memutils.h4
-rw-r--r--src/include/utils/palloc.h12
-rw-r--r--src/include/utils/pg_crc.h2
-rw-r--r--src/include/utils/plancache.h12
-rw-r--r--src/include/utils/portal.h6
-rw-r--r--src/include/utils/rel.h21
-rw-r--r--src/include/utils/relcache.h4
-rw-r--r--src/include/utils/relfilenodemap.h2
-rw-r--r--src/include/utils/resowner.h2
-rw-r--r--src/include/utils/resowner_private.h4
-rw-r--r--src/include/utils/selfuncs.h2
-rw-r--r--src/include/utils/snapshot.h10
-rw-r--r--src/include/utils/sortsupport.h2
-rw-r--r--src/include/utils/tqual.h12
-rw-r--r--src/include/utils/tuplesort.h8
-rw-r--r--src/include/utils/tuplestore.h2
-rw-r--r--src/include/utils/typcache.h6
-rw-r--r--src/interfaces/ecpg/ecpglib/data.c2
-rw-r--r--src/interfaces/ecpg/ecpglib/execute.c24
-rw-r--r--src/interfaces/ecpg/ecpglib/extern.h14
-rw-r--r--src/interfaces/ecpg/include/sqlca.h2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt.h4
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt_common.c8
-rw-r--r--src/interfaces/ecpg/pgtypeslib/interval.c6
-rw-r--r--src/interfaces/ecpg/pgtypeslib/numeric.c2
-rw-r--r--src/interfaces/ecpg/preproc/c_keywords.c2
-rw-r--r--src/interfaces/ecpg/preproc/extern.h4
-rw-r--r--src/interfaces/ecpg/preproc/output.c2
-rw-r--r--src/interfaces/ecpg/preproc/parse.pl12
-rw-r--r--src/interfaces/ecpg/preproc/parser.c2
-rw-r--r--src/interfaces/ecpg/preproc/type.c89
-rw-r--r--src/interfaces/ecpg/preproc/variable.c5
-rw-r--r--src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c2
-rw-r--r--src/interfaces/ecpg/test/expected/preproc-init.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-array.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-code100.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-copystdout.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-define.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-dynalloc.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-dynalloc2.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-dyntest.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-indicators.c2
-rw-r--r--src/interfaces/ecpg/test/expected/thread-alloc.c2
-rw-r--r--src/interfaces/ecpg/test/expected/thread-descriptor.c2
-rw-r--r--src/interfaces/ecpg/test/expected/thread-prep.c2
-rw-r--r--src/interfaces/ecpg/test/pg_regress_ecpg.c6
-rw-r--r--src/interfaces/libpq/fe-auth.c8
-rw-r--r--src/interfaces/libpq/fe-connect.c59
-rw-r--r--src/interfaces/libpq/fe-exec.c30
-rw-r--r--src/interfaces/libpq/fe-lobj.c2
-rw-r--r--src/interfaces/libpq/fe-misc.c4
-rw-r--r--src/interfaces/libpq/fe-protocol2.c12
-rw-r--r--src/interfaces/libpq/fe-protocol3.c26
-rw-r--r--src/interfaces/libpq/fe-secure.c31
-rw-r--r--src/interfaces/libpq/libpq-fe.h4
-rw-r--r--src/interfaces/libpq/libpq-int.h3
-rw-r--r--src/interfaces/libpq/pqexpbuffer.c10
-rw-r--r--src/interfaces/libpq/pqexpbuffer.h8
-rw-r--r--src/interfaces/libpq/test/uri-regress.c2
-rw-r--r--src/pl/plperl/plperl.c17
-rw-r--r--src/pl/plpgsql/src/pl_comp.c24
-rw-r--r--src/pl/plpgsql/src/pl_exec.c66
-rw-r--r--src/pl/plpgsql/src/pl_funcs.c4
-rw-r--r--src/pl/plpgsql/src/pl_handler.c6
-rw-r--r--src/pl/plpgsql/src/pl_scanner.c8
-rw-r--r--src/pl/plpgsql/src/plpgsql.h12
-rw-r--r--src/pl/plpython/plpy_elog.c2
-rw-r--r--src/pl/plpython/plpy_exec.c2
-rw-r--r--src/pl/plpython/plpy_typeio.c8
-rw-r--r--src/pl/plpython/plpy_util.c6
-rw-r--r--src/pl/tcl/pltcl.c8
-rw-r--r--src/port/chklocale.c2
-rw-r--r--src/port/crypt.c26
-rw-r--r--src/port/dirent.c2
-rw-r--r--src/port/erand48.c2
-rw-r--r--src/port/fls.c4
-rw-r--r--src/port/getaddrinfo.c5
-rw-r--r--src/port/getopt.c2
-rw-r--r--src/port/getopt_long.c2
-rw-r--r--src/port/inet_aton.c4
-rw-r--r--src/port/kill.c2
-rw-r--r--src/port/path.c6
-rw-r--r--src/port/pgmkdirp.c2
-rw-r--r--src/port/pqsignal.c4
-rw-r--r--src/port/qsort.c2
-rw-r--r--src/port/qsort_arg.c2
-rw-r--r--src/port/snprintf.c6
-rw-r--r--src/port/sprompt.c6
-rw-r--r--src/port/strlcat.c2
-rw-r--r--src/port/strlcpy.c4
-rw-r--r--src/port/thread.c4
-rw-r--r--src/port/unsetenv.c2
-rw-r--r--src/test/isolation/isolation_main.c12
-rw-r--r--src/test/isolation/isolationtester.c36
-rw-r--r--src/test/isolation/isolationtester.h4
-rw-r--r--src/test/perl/TestLib.pm119
-rw-r--r--src/test/regress/pg_regress.c30
-rw-r--r--src/test/regress/pg_regress.h4
-rw-r--r--src/test/regress/pg_regress_main.c6
-rw-r--r--src/test/regress/regress.c9
-rw-r--r--src/timezone/localtime.c12
-rw-r--r--src/timezone/pgtz.c6
-rw-r--r--src/timezone/zic.c3
-rw-r--r--src/tools/entab/entab.c10
-rwxr-xr-xsrc/tools/git_changelog2
-rw-r--r--src/tools/msvc/MSBuildProject.pm8
-rw-r--r--src/tools/msvc/Mkvcbuild.pm2
-rw-r--r--src/tools/msvc/Solution.pm32
-rw-r--r--src/tools/msvc/gendef.pl250
-rwxr-xr-xsrc/tools/pgindent/pgindent20
-rw-r--r--src/tutorial/complex.c2
747 files changed, 7231 insertions, 6801 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index c64ede9dac..009ebe7a1c 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -21,7 +21,7 @@
* tuptoaster.c.
*
* This change will break any code that assumes it needn't detoast values
- * that have been put into a tuple but never sent to disk. Hopefully there
+ * that have been put into a tuple but never sent to disk. Hopefully there
* are few such places.
*
* Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
@@ -387,7 +387,7 @@ nocachegetattr(HeapTuple tuple,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (HeapTupleHasVarWidth(tuple))
@@ -454,7 +454,7 @@ nocachegetattr(HeapTuple tuple,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
@@ -549,7 +549,7 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
/*
* cmin and cmax are now both aliases for the same field, which
- * can in fact also be a combo command id. XXX perhaps we should
+ * can in fact also be a combo command id. XXX perhaps we should
* return the "real" cmin or cmax if possible, that is if we are
* inside the originating transaction?
*/
@@ -709,7 +709,7 @@ heap_form_tuple(TupleDesc tupleDescriptor,
len += data_len;
/*
- * Allocate and zero the space needed. Note that the tuple body and
+ * Allocate and zero the space needed. Note that the tuple body and
* HeapTupleData management structure are allocated in one chunk.
*/
tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index 7da10e9a74..5fd400990b 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -71,7 +71,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
/*
* If value is stored EXTERNAL, must fetch it so we are not depending
- * on outside storage. This should be improved someday.
+ * on outside storage. This should be improved someday.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i])))
{
@@ -280,7 +280,7 @@ nocache_index_getattr(IndexTuple tup,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup))
@@ -347,7 +347,7 @@ nocache_index_getattr(IndexTuple tup,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index af59aa1a40..c7fa727485 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -182,7 +182,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
* or some similar function; it does not contain a full set of fields.
* The targetlist will be NIL when executing a utility function that does
* not have a plan. If the targetlist isn't NIL then it is a Query node's
- * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
+ * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
* send zeroes for the format codes in that case.
*/
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 530a1aee7b..522b671993 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -540,7 +540,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val,
* Add a new string reloption
*
* "validator" is an optional function pointer that can be used to test the
- * validity of the values. It must elog(ERROR) when the argument string is
+ * validity of the values. It must elog(ERROR) when the argument string is
* not acceptable for the variable. Note that the default value must pass
* the validation.
*/
@@ -868,7 +868,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions)
* is returned.
*
* Note: values of type int, bool and real are allocated as part of the
- * returned array. Values of type string are allocated separately and must
+ * returned array. Values of type string are allocated separately and must
* be freed by the caller.
*/
relopt_value *
@@ -1205,7 +1205,7 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind)
{"check_option", RELOPT_TYPE_STRING,
offsetof(StdRdOptions, check_option_offset)},
{"user_catalog_table", RELOPT_TYPE_BOOL,
- offsetof(StdRdOptions, user_catalog_table)}
+ offsetof(StdRdOptions, user_catalog_table)}
};
options = parseRelOptions(reloptions, validate, kind, &numoptions);
diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c
index 1b6c6d957c..2e48b32ba3 100644
--- a/src/backend/access/common/tupconvert.c
+++ b/src/backend/access/common/tupconvert.c
@@ -5,7 +5,7 @@
*
* These functions provide conversion between rowtypes that are logically
* equivalent but might have columns in a different order or different sets
- * of dropped columns. There is some overlap of functionality with the
+ * of dropped columns. There is some overlap of functionality with the
* executor's "junkfilter" routines, but these functions work on bare
* HeapTuples rather than TupleTableSlots.
*
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index 74cfb6499a..f3b36893f7 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -581,7 +581,7 @@ TupleDescInitEntryCollation(TupleDesc desc,
* Given a relation schema (list of ColumnDef nodes), build a TupleDesc.
*
* Note: the default assumption is no OIDs; caller may modify the returned
- * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
+ * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* later on.
*/
TupleDesc
diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c
index 32dbed68c7..66cea28113 100644
--- a/src/backend/access/gin/ginarrayproc.c
+++ b/src/backend/access/gin/ginarrayproc.c
@@ -197,7 +197,7 @@ ginarrayconsistent(PG_FUNCTION_ARGS)
/*
* Must have all elements in check[] true; no discrimination
- * against nulls here. This is because array_contain_compare and
+ * against nulls here. This is because array_contain_compare and
* array_eq handle nulls differently ...
*/
res = true;
@@ -279,9 +279,10 @@ ginarraytriconsistent(PG_FUNCTION_ARGS)
res = GIN_MAYBE;
break;
case GinEqualStrategy:
+
/*
* Must have all elements in check[] true; no discrimination
- * against nulls here. This is because array_contain_compare and
+ * against nulls here. This is because array_contain_compare and
* array_eq handle nulls differently ...
*/
res = GIN_MAYBE;
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 9b0f82fc90..27f88e0eb2 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -251,6 +251,7 @@ ginFindParents(GinBtree btree, GinBtreeStack *stack)
Assert(blkno != btree->rootBlkno);
ptr->blkno = blkno;
ptr->buffer = buffer;
+
/*
* parent may be wrong, but if so, the ginFinishSplit call will
* recurse to call ginFindParents again to fix it.
@@ -328,7 +329,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
GinPlaceToPageRC rc;
uint16 xlflags = 0;
Page childpage = NULL;
- Page newlpage = NULL, newrpage = NULL;
+ Page newlpage = NULL,
+ newrpage = NULL;
if (GinPageIsData(page))
xlflags |= GIN_INSERT_ISDATA;
@@ -346,8 +348,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
}
/*
- * Try to put the incoming tuple on the page. placeToPage will decide
- * if the page needs to be split.
+ * Try to put the incoming tuple on the page. placeToPage will decide if
+ * the page needs to be split.
*/
rc = btree->placeToPage(btree, stack->buffer, stack,
insertdata, updateblkno,
@@ -371,7 +373,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
XLogRecPtr recptr;
XLogRecData rdata[3];
ginxlogInsert xlrec;
- BlockIdData childblknos[2];
+ BlockIdData childblknos[2];
xlrec.node = btree->index->rd_node;
xlrec.blkno = BufferGetBlockNumber(stack->buffer);
@@ -449,7 +451,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
data.flags = xlflags;
if (childbuf != InvalidBuffer)
{
- Page childpage = BufferGetPage(childbuf);
+ Page childpage = BufferGetPage(childbuf);
+
GinPageGetOpaque(childpage)->flags &= ~GIN_INCOMPLETE_SPLIT;
data.leftChildBlkno = BufferGetBlockNumber(childbuf);
@@ -505,8 +508,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
/*
* Construct a new root page containing downlinks to the new left
- * and right pages. (do this in a temporary copy first rather
- * than overwriting the original page directly, so that we can still
+ * and right pages. (do this in a temporary copy first rather than
+ * overwriting the original page directly, so that we can still
* abort gracefully if this fails.)
*/
newrootpg = PageGetTempPage(newrpage);
@@ -604,7 +607,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
else
{
elog(ERROR, "unknown return code from GIN placeToPage method: %d", rc);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
}
@@ -627,8 +630,8 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack,
bool first = true;
/*
- * freestack == false when we encounter an incompletely split page during a
- * scan, while freestack == true is used in the normal scenario that a
+ * freestack == false when we encounter an incompletely split page during
+ * a scan, while freestack == true is used in the normal scenario that a
* split is finished right after the initial insert.
*/
if (!freestack)
@@ -650,8 +653,8 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack,
* then continue with the current one.
*
* Note: we have to finish *all* incomplete splits we encounter, even
- * if we have to move right. Otherwise we might choose as the target
- * a page that has no downlink in the parent, and splitting it further
+ * if we have to move right. Otherwise we might choose as the target a
+ * page that has no downlink in the parent, and splitting it further
* would fail.
*/
if (GinPageIsIncompleteSplit(BufferGetPage(parent->buffer)))
diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c
index 9f3009b589..3af027187a 100644
--- a/src/backend/access/gin/ginbulk.c
+++ b/src/backend/access/gin/ginbulk.c
@@ -187,7 +187,7 @@ ginInsertBAEntry(BuildAccumulator *accum,
* Since the entries are being inserted into a balanced binary tree, you
* might think that the order of insertion wouldn't be critical, but it turns
* out that inserting the entries in sorted order results in a lot of
- * rebalancing operations and is slow. To prevent this, we attempt to insert
+ * rebalancing operations and is slow. To prevent this, we attempt to insert
* the nodes in an order that will produce a nearly-balanced tree if the input
* is in fact sorted.
*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index c11ed85883..272a9ca7c0 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -49,8 +49,8 @@ typedef struct
dlist_head segments; /* a list of leafSegmentInfos */
/*
- * The following fields represent how the segments are split across
- * pages, if a page split is required. Filled in by leafRepackItems.
+ * The following fields represent how the segments are split across pages,
+ * if a page split is required. Filled in by leafRepackItems.
*/
dlist_node *lastleft; /* last segment on left page */
int lsize; /* total size on left page */
@@ -61,7 +61,7 @@ typedef struct
typedef struct
{
- dlist_node node; /* linked list pointers */
+ dlist_node node; /* linked list pointers */
/*-------------
* 'action' indicates the status of this in-memory segment, compared to
@@ -83,9 +83,9 @@ typedef struct
int nmodifieditems;
/*
- * The following fields represent the items in this segment. If 'items'
- * is not NULL, it contains a palloc'd array of the itemsin this segment.
- * If 'seg' is not NULL, it contains the items in an already-compressed
+ * The following fields represent the items in this segment. If 'items' is
+ * not NULL, it contains a palloc'd array of the itemsin this segment. If
+ * 'seg' is not NULL, it contains the items in an already-compressed
* format. It can point to an on-disk page (!modified), or a palloc'd
* segment in memory. If both are set, they must represent the same items.
*/
@@ -386,7 +386,7 @@ GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset)
if (offset != maxoff + 1)
memmove(ptr + sizeof(PostingItem),
ptr,
- (maxoff - offset + 1) * sizeof(PostingItem));
+ (maxoff - offset + 1) *sizeof(PostingItem));
}
memcpy(ptr, data, sizeof(PostingItem));
@@ -436,8 +436,8 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack,
int maxitems = items->nitem - items->curitem;
Page page = BufferGetPage(buf);
int i;
- ItemPointerData rbound;
- ItemPointerData lbound;
+ ItemPointerData rbound;
+ ItemPointerData lbound;
bool needsplit;
bool append;
int segsize;
@@ -451,7 +451,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack,
Assert(GinPageIsData(page));
- rbound = *GinDataPageGetRightBound(page);
+ rbound = *GinDataPageGetRightBound(page);
/*
* Count how many of the new items belong to this page.
@@ -464,8 +464,8 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack,
{
/*
* This needs to go to some other location in the tree. (The
- * caller should've chosen the insert location so that at least
- * the first item goes here.)
+ * caller should've chosen the insert location so that at
+ * least the first item goes here.)
*/
Assert(i > 0);
break;
@@ -553,7 +553,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack,
/* Add the new items to the segments */
if (!addItemsToLeaf(leaf, newItems, maxitems))
{
- /* all items were duplicates, we have nothing to do */
+ /* all items were duplicates, we have nothing to do */
items->curitem += maxitems;
MemoryContextSwitchTo(oldCxt);
@@ -680,7 +680,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack,
Assert(GinPageRightMost(page) ||
ginCompareItemPointers(GinDataPageGetRightBound(*newlpage),
- GinDataPageGetRightBound(*newrpage)) < 0);
+ GinDataPageGetRightBound(*newrpage)) < 0);
if (append)
elog(DEBUG2, "appended %d items to block %u; split %d/%d (%d to go)",
@@ -769,16 +769,16 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs)
* We don't try to re-encode the segments here, even though some of them
* might be really small now that we've removed some items from them. It
* seems like a waste of effort, as there isn't really any benefit from
- * larger segments per se; larger segments only help to pack more items
- * in the same space. We might as well delay doing that until the next
+ * larger segments per se; larger segments only help to pack more items in
+ * the same space. We might as well delay doing that until the next
* insertion, which will need to re-encode at least part of the page
* anyway.
*
- * Also note if the page was in uncompressed, pre-9.4 format before, it
- * is now represented as one huge segment that contains all the items.
- * It might make sense to split that, to speed up random access, but we
- * don't bother. You'll have to REINDEX anyway if you want the full gain
- * of the new tighter index format.
+ * Also note if the page was in uncompressed, pre-9.4 format before, it is
+ * now represented as one huge segment that contains all the items. It
+ * might make sense to split that, to speed up random access, but we don't
+ * bother. You'll have to REINDEX anyway if you want the full gain of the
+ * new tighter index format.
*/
if (removedsomething)
{
@@ -795,6 +795,7 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs)
{
leafSegmentInfo *seginfo = dlist_container(leafSegmentInfo, node,
iter.cur);
+
if (seginfo->action != GIN_SEGMENT_UNMODIFIED)
modified = true;
if (modified && seginfo->action != GIN_SEGMENT_DELETE)
@@ -862,10 +863,11 @@ constructLeafRecompressWALData(Buffer buf, disassembledLeaf *leaf)
}
walbufbegin = palloc(
- sizeof(ginxlogRecompressDataLeaf) +
- BLCKSZ + /* max size needed to hold the segment data */
- nmodified * 2 + /* (segno + action) per action */
- sizeof(XLogRecData));
+ sizeof(ginxlogRecompressDataLeaf) +
+ BLCKSZ + /* max size needed to hold the segment
+ * data */
+ nmodified * 2 + /* (segno + action) per action */
+ sizeof(XLogRecData));
walbufend = walbufbegin;
recompress_xlog = (ginxlogRecompressDataLeaf *) walbufend;
@@ -965,9 +967,9 @@ dataPlaceToPageLeafRecompress(Buffer buf, disassembledLeaf *leaf)
int segsize;
/*
- * If the page was in pre-9.4 format before, convert the header, and
- * force all segments to be copied to the page whether they were modified
- * or not.
+ * If the page was in pre-9.4 format before, convert the header, and force
+ * all segments to be copied to the page whether they were modified or
+ * not.
*/
if (!GinPageIsCompressed(page))
{
@@ -1022,6 +1024,7 @@ dataPlaceToPageLeafSplit(Buffer buf, disassembledLeaf *leaf,
dlist_node *node;
dlist_node *firstright;
leafSegmentInfo *seginfo;
+
/* these must be static so they can be returned to caller */
static ginxlogSplitDataLeaf split_xlog;
static XLogRecData rdata[3];
@@ -1121,6 +1124,7 @@ dataPlaceToPageInternal(GinBtree btree, Buffer buf, GinBtreeStack *stack,
Page page = BufferGetPage(buf);
OffsetNumber off = stack->off;
PostingItem *pitem;
+
/* these must be static so they can be returned to caller */
static XLogRecData rdata;
static ginxlogInsertDataInternal data;
@@ -1198,7 +1202,7 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf,
int nrightitems;
Size pageSize = PageGetPageSize(oldpage);
ItemPointerData oldbound = *GinDataPageGetRightBound(oldpage);
- ItemPointer bound;
+ ItemPointer bound;
Page lpage;
Page rpage;
OffsetNumber separator;
@@ -1216,8 +1220,8 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf,
*prdata = rdata;
/*
- * First construct a new list of PostingItems, which includes all the
- * old items, and the new item.
+ * First construct a new list of PostingItems, which includes all the old
+ * items, and the new item.
*/
memcpy(allitems, GinDataPageGetPostingItem(oldpage, FirstOffsetNumber),
(off - 1) * sizeof(PostingItem));
@@ -1402,8 +1406,8 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems)
leafSegmentInfo *newseg;
/*
- * If the page is completely empty, just construct one new segment to
- * hold all the new items.
+ * If the page is completely empty, just construct one new segment to hold
+ * all the new items.
*/
if (dlist_is_empty(&leaf->segments))
{
@@ -1418,9 +1422,9 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems)
dlist_foreach(iter, &leaf->segments)
{
- leafSegmentInfo *cur = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, iter.cur);
+ leafSegmentInfo *cur = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, iter.cur);
int nthis;
- ItemPointer tmpitems;
+ ItemPointer tmpitems;
int ntmpitems;
/*
@@ -1434,7 +1438,7 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems)
ItemPointerData next_first;
next = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node,
- dlist_next_node(&leaf->segments, iter.cur));
+ dlist_next_node(&leaf->segments, iter.cur));
if (next->items)
next_first = next->items[0];
else
@@ -1556,27 +1560,27 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
if (seginfo->seg == NULL)
{
if (seginfo->nitems > GinPostingListSegmentMaxSize)
- npacked = 0; /* no chance that it would fit. */
+ npacked = 0; /* no chance that it would fit. */
else
{
seginfo->seg = ginCompressPostingList(seginfo->items,
seginfo->nitems,
- GinPostingListSegmentMaxSize,
+ GinPostingListSegmentMaxSize,
&npacked);
}
if (npacked != seginfo->nitems)
{
/*
- * Too large. Compress again to the target size, and create
- * a new segment to represent the remaining items. The new
- * segment is inserted after this one, so it will be
- * processed in the next iteration of this loop.
+ * Too large. Compress again to the target size, and
+ * create a new segment to represent the remaining items.
+ * The new segment is inserted after this one, so it will
+ * be processed in the next iteration of this loop.
*/
if (seginfo->seg)
pfree(seginfo->seg);
seginfo->seg = ginCompressPostingList(seginfo->items,
seginfo->nitems,
- GinPostingListSegmentTargetSize,
+ GinPostingListSegmentTargetSize,
&npacked);
if (seginfo->action != GIN_SEGMENT_INSERT)
seginfo->action = GIN_SEGMENT_REPLACE;
@@ -1596,7 +1600,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
*/
if (SizeOfGinPostingList(seginfo->seg) < GinPostingListSegmentMinSize && next_node)
{
- int nmerged;
+ int nmerged;
nextseg = dlist_container(leafSegmentInfo, node, next_node);
@@ -1741,8 +1745,8 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
GinPageGetOpaque(tmppage)->rightlink = InvalidBlockNumber;
/*
- * Write as many of the items to the root page as fit. In segments
- * of max GinPostingListSegmentMaxSize bytes each.
+ * Write as many of the items to the root page as fit. In segments of max
+ * GinPostingListSegmentMaxSize bytes each.
*/
nrootitems = 0;
rootsize = 0;
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index 4291bab63b..412f90da4d 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -135,7 +135,8 @@ GinFormTuple(GinState *ginstate,
*/
if (data)
{
- char *ptr = GinGetPosting(itup);
+ char *ptr = GinGetPosting(itup);
+
memcpy(ptr, data, dataSize);
}
@@ -162,7 +163,7 @@ ginReadTuple(GinState *ginstate, OffsetNumber attnum, IndexTuple itup,
{
Pointer ptr = GinGetPosting(itup);
int nipd = GinGetNPosting(itup);
- ItemPointer ipd;
+ ItemPointer ipd;
int ndecoded;
if (GinItupIsCompressed(itup))
@@ -192,7 +193,7 @@ ginReadTuple(GinState *ginstate, OffsetNumber attnum, IndexTuple itup,
* Form a non-leaf entry tuple by copying the key data from the given tuple,
* which can be either a leaf or non-leaf entry tuple.
*
- * Any posting list in the source tuple is not copied. The specified child
+ * Any posting list in the source tuple is not copied. The specified child
* block number is inserted into t_tid.
*/
static IndexTuple
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index a16c2140c2..09c3e39bf3 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -440,7 +440,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
* Create temporary index tuples for a single indexable item (one index column
* for the heap tuple specified by ht_ctid), and append them to the array
* in *collector. They will subsequently be written out using
- * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
+ * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
* temp tuples for a given heap tuple must be written in one call to
* ginHeapTupleFastInsert.
*/
@@ -707,7 +707,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka,
*
* This can be called concurrently by multiple backends, so it must cope.
* On first glance it looks completely not concurrent-safe and not crash-safe
- * either. The reason it's okay is that multiple insertion of the same entry
+ * either. The reason it's okay is that multiple insertion of the same entry
* is detected and treated as a no-op by gininsert.c. If we crash after
* posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
@@ -761,7 +761,7 @@ ginInsertCleanup(GinState *ginstate,
LockBuffer(metabuffer, GIN_UNLOCK);
/*
- * Initialize. All temporary space will be in opCtx
+ * Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
@@ -855,7 +855,7 @@ ginInsertCleanup(GinState *ginstate,
/*
* While we left the page unlocked, more stuff might have gotten
- * added to it. If so, process those entries immediately. There
+ * added to it. If so, process those entries immediately. There
* shouldn't be very many, so we don't worry about the fact that
* we're doing this with exclusive lock. Insertion algorithm
* guarantees that inserted row(s) will not continue on next page.
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index fda19cf4e6..271f09901b 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -85,7 +85,8 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
page = BufferGetPage(buffer);
if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0)
{
- int n = GinDataLeafPageGetItemsToTbm(page, scanEntry->matchBitmap);
+ int n = GinDataLeafPageGetItemsToTbm(page, scanEntry->matchBitmap);
+
scanEntry->predictNumberResult += n;
}
@@ -100,7 +101,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
/*
* Collects TIDs into scanEntry->matchBitmap for all heap tuples that
- * match the search entry. This supports three different match modes:
+ * match the search entry. This supports three different match modes:
*
* 1. Partial-match support: scan from current point until the
* comparePartialFn says we're done.
@@ -196,7 +197,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
/*
* In ALL mode, we are not interested in null items, so we can
* stop if we get to a null-item placeholder (which will be the
- * last entry for a given attnum). We do want to include NULL_KEY
+ * last entry for a given attnum). We do want to include NULL_KEY
* and EMPTY_ITEM entries, though.
*/
if (icategory == GIN_CAT_NULL_ITEM)
@@ -407,7 +408,7 @@ restartScanEntry:
else if (GinGetNPosting(itup) > 0)
{
entry->list = ginReadTuple(ginstate, entry->attnum, itup,
- &entry->nlist);
+ &entry->nlist);
entry->predictNumberResult = entry->nlist;
entry->isFinished = FALSE;
@@ -463,11 +464,11 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
* considerably, if the frequent term can be put in the additional set.
*
* There can be many legal ways to divide them entries into these two
- * sets. A conservative division is to just put everything in the
- * required set, but the more you can put in the additional set, the more
- * you can skip during the scan. To maximize skipping, we try to put as
- * many frequent items as possible into additional, and less frequent
- * ones into required. To do that, sort the entries by frequency
+ * sets. A conservative division is to just put everything in the required
+ * set, but the more you can put in the additional set, the more you can
+ * skip during the scan. To maximize skipping, we try to put as many
+ * frequent items as possible into additional, and less frequent ones into
+ * required. To do that, sort the entries by frequency
* (predictNumberResult), and put entries into the required set in that
* order, until the consistent function says that none of the remaining
* entries can form a match, without any items from the required set. The
@@ -635,8 +636,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan
if (stepright)
{
/*
- * We've processed all the entries on this page. If it was the last
- * page in the tree, we're done.
+ * We've processed all the entries on this page. If it was the
+ * last page in the tree, we're done.
*/
if (GinPageRightMost(page))
{
@@ -647,8 +648,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan
}
/*
- * Step to next page, following the right link. then find the first
- * ItemPointer greater than advancePast.
+ * Step to next page, following the right link. then find the
+ * first ItemPointer greater than advancePast.
*/
entry->buffer = ginStepRight(entry->buffer,
ginstate->index,
@@ -658,7 +659,7 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan
stepright = true;
if (GinPageGetOpaque(page)->flags & GIN_DELETED)
- continue; /* page was deleted by concurrent vacuum */
+ continue; /* page was deleted by concurrent vacuum */
/*
* The first item > advancePast might not be on this page, but
@@ -781,6 +782,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
gotitem = true;
break;
}
+
/*
* Not a lossy page. Skip over any offsets <= advancePast, and
* return that.
@@ -788,8 +790,9 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
if (entry->matchResult->blockno == advancePastBlk)
{
/*
- * First, do a quick check against the last offset on the page.
- * If that's > advancePast, so are all the other offsets.
+ * First, do a quick check against the last offset on the
+ * page. If that's > advancePast, so are all the other
+ * offsets.
*/
if (entry->matchResult->offsets[entry->matchResult->ntuples - 1] <= advancePastOff)
{
@@ -890,8 +893,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
/*
* We might have already tested this item; if so, no need to repeat work.
- * (Note: the ">" case can happen, if advancePast is exact but we previously
- * had to set curItem to a lossy-page pointer.)
+ * (Note: the ">" case can happen, if advancePast is exact but we
+ * previously had to set curItem to a lossy-page pointer.)
*/
if (ginCompareItemPointers(&key->curItem, &advancePast) > 0)
return;
@@ -942,8 +945,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
/*
* Ok, we now know that there are no matches < minItem.
*
- * If minItem is lossy, it means that there were no exact items on
- * the page among requiredEntries, because lossy pointers sort after exact
+ * If minItem is lossy, it means that there were no exact items on the
+ * page among requiredEntries, because lossy pointers sort after exact
* items. However, there might be exact items for the same page among
* additionalEntries, so we mustn't advance past them.
*/
@@ -1085,6 +1088,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
if (entry->isFinished)
key->entryRes[i] = GIN_FALSE;
#if 0
+
/*
* This case can't currently happen, because we loaded all the entries
* for this item earlier.
@@ -1119,6 +1123,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
break;
default:
+
/*
* the 'default' case shouldn't happen, but if the consistent
* function returns something bogus, this is the safe result
@@ -1129,11 +1134,10 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
}
/*
- * We have a tuple, and we know if it matches or not. If it's a
- * non-match, we could continue to find the next matching tuple, but
- * let's break out and give scanGetItem a chance to advance the other
- * keys. They might be able to skip past to a much higher TID, allowing
- * us to save work.
+ * We have a tuple, and we know if it matches or not. If it's a non-match,
+ * we could continue to find the next matching tuple, but let's break out
+ * and give scanGetItem a chance to advance the other keys. They might be
+ * able to skip past to a much higher TID, allowing us to save work.
*/
/* clean up after consistentFn calls */
@@ -1165,14 +1169,14 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
* matching item.
*
* This logic works only if a keyGetItem stream can never contain both
- * exact and lossy pointers for the same page. Else we could have a
+ * exact and lossy pointers for the same page. Else we could have a
* case like
*
* stream 1 stream 2
- * ... ...
+ * ... ...
* 42/6 42/7
* 50/1 42/0xffff
- * ... ...
+ * ... ...
*
* We would conclude that 42/6 is not a match and advance stream 1,
* thus never detecting the match to the lossy pointer in stream 2.
@@ -1205,12 +1209,11 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
}
/*
- * It's a match. We can conclude that nothing < matches, so
- * the other key streams can skip to this item.
+ * It's a match. We can conclude that nothing < matches, so the
+ * other key streams can skip to this item.
*
- * Beware of lossy pointers, though; from a lossy pointer, we
- * can only conclude that nothing smaller than this *block*
- * matches.
+ * Beware of lossy pointers, though; from a lossy pointer, we can
+ * only conclude that nothing smaller than this *block* matches.
*/
if (ItemPointerIsLossyPage(&key->curItem))
{
@@ -1229,8 +1232,8 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
}
/*
- * If this is the first key, remember this location as a
- * potential match, and proceed to check the rest of the keys.
+ * If this is the first key, remember this location as a potential
+ * match, and proceed to check the rest of the keys.
*
* Otherwise, check if this is the same item that we checked the
* previous keys for (or a lossy pointer for the same page). If
@@ -1247,7 +1250,7 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
if (ItemPointerIsLossyPage(&key->curItem) ||
ItemPointerIsLossyPage(item))
{
- Assert (GinItemPointerGetBlockNumber(&key->curItem) >= GinItemPointerGetBlockNumber(item));
+ Assert(GinItemPointerGetBlockNumber(&key->curItem) >= GinItemPointerGetBlockNumber(item));
match = (GinItemPointerGetBlockNumber(&key->curItem) ==
GinItemPointerGetBlockNumber(item));
}
@@ -1264,8 +1267,8 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
/*
* Now *item contains the first ItemPointer after previous result that
- * satisfied all the keys for that exact TID, or a lossy reference
- * to the same page.
+ * satisfied all the keys for that exact TID, or a lossy reference to the
+ * same page.
*
* We must return recheck = true if any of the keys are marked recheck.
*/
@@ -1776,10 +1779,10 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* First, scan the pending list and collect any matching entries into the
- * bitmap. After we scan a pending item, some other backend could post it
+ * bitmap. After we scan a pending item, some other backend could post it
* into the main index, and so we might visit it a second time during the
* main scan. This is okay because we'll just re-set the same bit in the
- * bitmap. (The possibility of duplicate visits is a major reason why GIN
+ * bitmap. (The possibility of duplicate visits is a major reason why GIN
* can't support the amgettuple API, however.) Note that it would not do
* to scan the main index before the pending list, since concurrent
* cleanup could then make us miss entries entirely.
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 3bafb6471b..b27cae3aab 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -40,7 +40,7 @@ typedef struct
* Adds array of item pointers to tuple's posting list, or
* creates posting tree and tuple pointing to tree in case
* of not enough space. Max size of tuple is defined in
- * GinFormTuple(). Returns a new, modified index tuple.
+ * GinFormTuple(). Returns a new, modified index tuple.
* items[] must be in sorted order with no duplicates.
*/
static IndexTuple
diff --git a/src/backend/access/gin/ginlogic.c b/src/backend/access/gin/ginlogic.c
index 167d25ea5c..052abd2bd8 100644
--- a/src/backend/access/gin/ginlogic.c
+++ b/src/backend/access/gin/ginlogic.c
@@ -47,7 +47,7 @@
* Maximum number of MAYBE inputs that shimTriConsistentFn will try to
* resolve by calling all combinations.
*/
-#define MAX_MAYBE_ENTRIES 4
+#define MAX_MAYBE_ENTRIES 4
/*
* Dummy consistent functions for an EVERYTHING key. Just claim it matches.
@@ -95,14 +95,14 @@ static GinTernaryValue
directTriConsistentFn(GinScanKey key)
{
return DatumGetGinTernaryValue(FunctionCall7Coll(
- key->triConsistentFmgrInfo,
- key->collation,
- PointerGetDatum(key->entryRes),
- UInt16GetDatum(key->strategy),
- key->query,
- UInt32GetDatum(key->nuserentries),
- PointerGetDatum(key->extra_data),
- PointerGetDatum(key->queryValues),
+ key->triConsistentFmgrInfo,
+ key->collation,
+ PointerGetDatum(key->entryRes),
+ UInt16GetDatum(key->strategy),
+ key->query,
+ UInt32GetDatum(key->nuserentries),
+ PointerGetDatum(key->extra_data),
+ PointerGetDatum(key->queryValues),
PointerGetDatum(key->queryCategories)));
}
@@ -115,15 +115,16 @@ static bool
shimBoolConsistentFn(GinScanKey key)
{
GinTernaryValue result;
+
result = DatumGetGinTernaryValue(FunctionCall7Coll(
- key->triConsistentFmgrInfo,
- key->collation,
- PointerGetDatum(key->entryRes),
- UInt16GetDatum(key->strategy),
- key->query,
- UInt32GetDatum(key->nuserentries),
- PointerGetDatum(key->extra_data),
- PointerGetDatum(key->queryValues),
+ key->triConsistentFmgrInfo,
+ key->collation,
+ PointerGetDatum(key->entryRes),
+ UInt16GetDatum(key->strategy),
+ key->query,
+ UInt32GetDatum(key->nuserentries),
+ PointerGetDatum(key->extra_data),
+ PointerGetDatum(key->queryValues),
PointerGetDatum(key->queryCategories)));
if (result == GIN_MAYBE)
{
@@ -240,8 +241,8 @@ ginInitConsistentFunction(GinState *ginstate, GinScanKey key)
key->boolConsistentFn = shimBoolConsistentFn;
if (OidIsValid(ginstate->triConsistentFn[key->attnum - 1].fn_oid))
- key->triConsistentFn = directTriConsistentFn;
+ key->triConsistentFn = directTriConsistentFn;
else
- key->triConsistentFn = shimTriConsistentFn;
+ key->triConsistentFn = shimTriConsistentFn;
}
}
diff --git a/src/backend/access/gin/ginpostinglist.c b/src/backend/access/gin/ginpostinglist.c
index 81bbb09c24..606a824f12 100644
--- a/src/backend/access/gin/ginpostinglist.c
+++ b/src/backend/access/gin/ginpostinglist.c
@@ -126,9 +126,9 @@ encode_varbyte(uint64 val, unsigned char **ptr)
static uint64
decode_varbyte(unsigned char **ptr)
{
- uint64 val;
+ uint64 val;
unsigned char *p = *ptr;
- uint64 c;
+ uint64 c;
c = *(p++);
val = c & 0x7F;
@@ -210,7 +210,7 @@ ginCompressPostingList(const ItemPointer ipd, int nipd, int maxsize,
uint64 val = itemptr_to_uint64(&ipd[totalpacked]);
uint64 delta = val - prev;
- Assert (val > prev);
+ Assert(val > prev);
if (endptr - ptr >= 6)
encode_varbyte(delta, &ptr);
@@ -225,7 +225,7 @@ ginCompressPostingList(const ItemPointer ipd, int nipd, int maxsize,
encode_varbyte(delta, &p);
if (p - buf > (endptr - ptr))
- break; /* output is full */
+ break; /* output is full */
memcpy(ptr, buf, p - buf);
ptr += (p - buf);
@@ -286,7 +286,7 @@ ginPostingListDecode(GinPostingList *plist, int *ndecoded)
ItemPointer
ginPostingListDecodeAllSegments(GinPostingList *segment, int len, int *ndecoded_out)
{
- ItemPointer result;
+ ItemPointer result;
int nallocated;
uint64 val;
char *endseg = ((char *) segment) + len;
@@ -349,7 +349,7 @@ ginPostingListDecodeAllSegmentsToTbm(GinPostingList *ptr, int len,
TIDBitmap *tbm)
{
int ndecoded;
- ItemPointer items;
+ ItemPointer items;
items = ginPostingListDecodeAllSegments(ptr, len, &ndecoded);
tbm_add_tuples(tbm, items, ndecoded, false);
@@ -374,8 +374,8 @@ ginMergeItemPointers(ItemPointerData *a, uint32 na,
dst = (ItemPointer) palloc((na + nb) * sizeof(ItemPointerData));
/*
- * If the argument arrays don't overlap, we can just append them to
- * each other.
+ * If the argument arrays don't overlap, we can just append them to each
+ * other.
*/
if (na == 0 || nb == 0 || ginCompareItemPointers(&a[na - 1], &b[0]) < 0)
{
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index b19386e19a..66c62b2e32 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -389,7 +389,7 @@ ginNewScanKey(IndexScanDesc scan)
/*
* If the index is version 0, it may be missing null and placeholder
* entries, which would render searches for nulls and full-index scans
- * unreliable. Throw an error if so.
+ * unreliable. Throw an error if so.
*/
if (hasNullQuery && !so->isVoidRes)
{
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 4dadb50dca..3ca0b68434 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -67,6 +67,7 @@ initGinState(GinState *state, Relation index)
fmgr_info_copy(&(state->extractQueryFn[i]),
index_getprocinfo(index, i + 1, GIN_EXTRACTQUERY_PROC),
CurrentMemoryContext);
+
/*
* Check opclass capability to do tri-state or binary logic consistent
* check.
@@ -74,14 +75,14 @@ initGinState(GinState *state, Relation index)
if (index_getprocid(index, i + 1, GIN_TRICONSISTENT_PROC) != InvalidOid)
{
fmgr_info_copy(&(state->triConsistentFn[i]),
- index_getprocinfo(index, i + 1, GIN_TRICONSISTENT_PROC),
+ index_getprocinfo(index, i + 1, GIN_TRICONSISTENT_PROC),
CurrentMemoryContext);
}
if (index_getprocid(index, i + 1, GIN_CONSISTENT_PROC) != InvalidOid)
{
fmgr_info_copy(&(state->consistentFn[i]),
- index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC),
+ index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC),
CurrentMemoryContext);
}
@@ -458,7 +459,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
* If there's more than one key, sort and unique-ify.
*
* XXX Using qsort here is notationally painful, and the overhead is
- * pretty bad too. For small numbers of keys it'd likely be better to use
+ * pretty bad too. For small numbers of keys it'd likely be better to use
* a simple insertion sort.
*/
if (*nentries > 1)
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 72f734caf8..af4d2714b5 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -47,7 +47,7 @@ ginVacuumItemPointers(GinVacuumState *gvs, ItemPointerData *items,
{
int i,
remaining = 0;
- ItemPointer tmpitems = NULL;
+ ItemPointer tmpitems = NULL;
/*
* Iterate over TIDs array
@@ -208,8 +208,8 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
}
/*
- * if we have root and there are empty pages in tree, then we don't release
- * lock to go further processing and guarantee that tree is unused
+ * if we have root and there are empty pages in tree, then we don't
+ * release lock to go further processing and guarantee that tree is unused
*/
if (!(isRoot && hasVoidPage))
{
@@ -236,7 +236,7 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
Buffer pBuffer;
Page page,
parentPage;
- BlockNumber rightlink;
+ BlockNumber rightlink;
/*
* Lock the pages in the same order as an insertion would, to avoid
@@ -302,11 +302,11 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
data.rightLink = GinPageGetOpaque(page)->rightlink;
/*
- * We can't pass buffer_std = TRUE, because we didn't set pd_lower
- * on pre-9.4 versions. The page might've been binary-upgraded from
- * an older version, and hence not have pd_lower set correctly.
- * Ditto for the left page, but removing the item from the parent
- * updated its pd_lower, so we know that's OK at this point.
+ * We can't pass buffer_std = TRUE, because we didn't set pd_lower on
+ * pre-9.4 versions. The page might've been binary-upgraded from an
+ * older version, and hence not have pd_lower set correctly. Ditto for
+ * the left page, but removing the item from the parent updated its
+ * pd_lower, so we know that's OK at this point.
*/
rdata[0].buffer = dBuffer;
rdata[0].buffer_std = FALSE;
@@ -538,7 +538,8 @@ ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint3
}
/*
- * if we already created a temporary page, make changes in place
+ * if we already created a temporary page, make changes in
+ * place
*/
if (tmppage == origpage)
{
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index d19389330c..a8a917a9d0 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -133,7 +133,7 @@ ginRedoInsertEntry(Buffer buffer, bool isLeaf, BlockNumber rightblkno, void *rda
if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), offset, false, false) == InvalidOffsetNumber)
{
RelFileNode node;
- ForkNumber forknum;
+ ForkNumber forknum;
BlockNumber blknum;
BufferGetTag(buffer, &node, &forknum, &blknum);
@@ -341,8 +341,8 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
payload = XLogRecGetData(record) + sizeof(ginxlogInsert);
/*
- * First clear incomplete-split flag on child page if this finishes
- * a split.
+ * First clear incomplete-split flag on child page if this finishes a
+ * split.
*/
if (!isLeaf)
{
@@ -472,8 +472,8 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record)
payload = XLogRecGetData(record) + sizeof(ginxlogSplit);
/*
- * First clear incomplete-split flag on child page if this finishes
- * a split
+ * First clear incomplete-split flag on child page if this finishes a
+ * split
*/
if (!isLeaf)
{
@@ -522,7 +522,7 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record)
if (isRoot)
{
- BlockNumber rootBlkno = data->rrlink;
+ BlockNumber rootBlkno = data->rrlink;
Buffer rootBuf = XLogReadBuffer(data->node, rootBlkno, true);
Page rootPage = BufferGetPage(rootBuf);
@@ -711,9 +711,9 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
Buffer buffer;
/*
- * Restore the metapage. This is essentially the same as a full-page image,
- * so restore the metapage unconditionally without looking at the LSN, to
- * avoid torn page hazards.
+ * Restore the metapage. This is essentially the same as a full-page
+ * image, so restore the metapage unconditionally without looking at the
+ * LSN, to avoid torn page hazards.
*/
metabuffer = XLogReadBuffer(data->node, GIN_METAPAGE_BLKNO, false);
if (!BufferIsValid(metabuffer))
@@ -877,7 +877,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record)
/*
* In normal operation, shiftList() takes exclusive lock on all the
- * pages-to-be-deleted simultaneously. During replay, however, it should
+ * pages-to-be-deleted simultaneously. During replay, however, it should
* be all right to lock them one at a time. This is dependent on the fact
* that we are deleting pages from the head of the list, and that readers
* share-lock the next page before releasing the one they are on. So we
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 64125d5195..e6f06c29e5 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -1382,7 +1382,7 @@ initGISTstate(Relation index)
/*
* If the index column has a specified collation, we should honor that
* while doing comparisons. However, we may have a collatable storage
- * type for a noncollatable indexed data type. If there's no index
+ * type for a noncollatable indexed data type. If there's no index
* collation then specify default collation in case the support
* functions need collation. This is harmless if the support
* functions don't care about collation, so we just do it
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index 92a9dce8e6..7a8692b508 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -31,7 +31,7 @@
*
* On success return for a heap tuple, *recheck_p is set to indicate
* whether recheck is needed. We recheck if any of the consistent() functions
- * request it. recheck is not interesting when examining a non-leaf entry,
+ * request it. recheck is not interesting when examining a non-leaf entry,
* since we must visit the lower index page if there's any doubt.
*
* If we are doing an ordered scan, so->distances[] is filled with distance
@@ -62,7 +62,7 @@ gistindex_keytest(IndexScanDesc scan,
/*
* If it's a leftover invalid tuple from pre-9.1, treat it as a match with
- * minimum possible distances. This means we'll always follow it to the
+ * minimum possible distances. This means we'll always follow it to the
* referenced page.
*/
if (GistTupleIsInvalid(tuple))
@@ -224,7 +224,7 @@ gistindex_keytest(IndexScanDesc scan,
* ntids: if not NULL, gistgetbitmap's output tuple counter
*
* If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap
- * tuples should be reported directly into the bitmap. If they are NULL,
+ * tuples should be reported directly into the bitmap. If they are NULL,
* we're doing a plain or ordered indexscan. For a plain indexscan, heap
* tuple TIDs are returned into so->pageData[]. For an ordered indexscan,
* heap tuple TIDs are pushed into individual search queue items.
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index 5194fe08ab..8360b16ae5 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -56,7 +56,7 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg)
/*
* If new item is heap tuple, it goes to front of chain; otherwise insert
* it before the first index-page item, so that index pages are visited in
- * LIFO order, ensuring depth-first search of index pages. See comments
+ * LIFO order, ensuring depth-first search of index pages. See comments
* in gist_private.h.
*/
if (GISTSearchItemIsHeap(*newitem))
diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c
index 2dd26de098..e1994bf04b 100644
--- a/src/backend/access/gist/gistsplit.c
+++ b/src/backend/access/gist/gistsplit.c
@@ -71,7 +71,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
* Recompute unions of left- and right-side subkeys after a page split,
* ignoring any tuples that are marked in spl->spl_dontcare[].
*
- * Note: we always recompute union keys for all index columns. In some cases
+ * Note: we always recompute union keys for all index columns. In some cases
* this might represent duplicate work for the leftmost column(s), but it's
* not safe to assume that "zero penalty to move a tuple" means "the union
* key doesn't change at all". Penalty functions aren't 100% accurate.
@@ -160,7 +160,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
/*
* Remove tuples that are marked don't-cares from the tuple index array a[]
- * of length *len. This is applied separately to the spl_left and spl_right
+ * of length *len. This is applied separately to the spl_left and spl_right
* arrays.
*/
static void
@@ -193,7 +193,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare)
/*
* Place a single don't-care tuple into either the left or right side of the
* split, according to which has least penalty for merging the tuple into
- * the previously-computed union keys. We need consider only columns starting
+ * the previously-computed union keys. We need consider only columns starting
* at attno.
*/
static void
@@ -291,7 +291,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
/*
* There is only one previously defined union, so we just choose swap
- * or not by lowest penalty for that side. We can only get here if a
+ * or not by lowest penalty for that side. We can only get here if a
* secondary split happened to have all NULLs in its column in the
* tuples that the outer recursion level had assigned to one side.
* (Note that the null checks in gistSplitByKey don't prevent the
@@ -427,7 +427,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
sv->spl_rdatum = v->spl_rattr[attno];
/*
- * Let the opclass-specific PickSplit method do its thing. Note that at
+ * Let the opclass-specific PickSplit method do its thing. Note that at
* this point we know there are no null keys in the entryvec.
*/
FunctionCall2Coll(&giststate->picksplitFn[attno],
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index fbccdb800b..f32e35ad15 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -414,7 +414,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
* some inserts to go to other equally-good subtrees.
*
* keep_current_best is -1 if we haven't yet had to make a random choice
- * whether to keep the current best tuple. If we have done so, and
+ * whether to keep the current best tuple. If we have done so, and
* decided to keep it, keep_current_best is 1; if we've decided to
* replace, keep_current_best is 0. (This state will be reset to -1 as
* soon as we've made the replacement, but sometimes we make the choice in
@@ -456,7 +456,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* New best penalty for column. Tentatively select this tuple
- * as the target, and record the best penalty. Then reset the
+ * as the target, and record the best penalty. Then reset the
* next column's penalty to "unknown" (and indirectly, the
* same for all the ones to its right). This will force us to
* adopt this tuple's penalty values as the best for all the
@@ -475,7 +475,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* The current tuple is exactly as good for this column as the
- * best tuple seen so far. The next iteration of this loop
+ * best tuple seen so far. The next iteration of this loop
* will compare the next column.
*/
}
@@ -681,7 +681,7 @@ gistcheckpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index 215806be12..278d386a7c 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -49,7 +49,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
stats->estimated_count = info->estimated_count;
/*
- * XXX the above is wrong if index is partial. Would it be OK to just
+ * XXX the above is wrong if index is partial. Would it be OK to just
* return NULL, or is there work we must do below?
*/
}
diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c
index e12b9c66dc..7d36b2ab6a 100644
--- a/src/backend/access/gist/gistxlog.c
+++ b/src/backend/access/gist/gistxlog.c
@@ -38,7 +38,7 @@ static MemoryContext opCtx; /* working memory for operations */
* follow-right flag, because that change is not included in the full-page
* image. To be sure that the intermediate state with the wrong flag value is
* not visible to concurrent Hot Standby queries, this function handles
- * restoring the full-page image as well as updating the flag. (Note that
+ * restoring the full-page image as well as updating the flag. (Note that
* we never need to do anything else to the child page in the current WAL
* action.)
*/
@@ -89,7 +89,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
/*
* We need to acquire and hold lock on target page while updating the left
- * child page. If we have a full-page image of target page, getting the
+ * child page. If we have a full-page image of target page, getting the
* lock is a side-effect of restoring that image. Note that even if the
* target page no longer exists, we'll still attempt to replay the change
* on the child page.
@@ -387,6 +387,7 @@ gistXLogSplit(RelFileNode node, BlockNumber blkno, bool page_is_leaf,
for (ptr = dist; ptr; ptr = ptr->next)
npage++;
+
/*
* the caller should've checked this already, but doesn't hurt to check
* again.
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 502fc31dd1..7abb7a47fc 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -78,7 +78,7 @@ hashbuild(PG_FUNCTION_ARGS)
* (assuming their hash codes are pretty random) there will be no locality
* of access to the index, and if the index is bigger than available RAM
* then we'll thrash horribly. To prevent that scenario, we can sort the
- * tuples by (expected) bucket number. However, such a sort is useless
+ * tuples by (expected) bucket number. However, such a sort is useless
* overhead when the index does fit in RAM. We choose to sort if the
* initial index size exceeds NBuffers.
*
@@ -248,7 +248,7 @@ hashgettuple(PG_FUNCTION_ARGS)
/*
* An insertion into the current index page could have happened while
* we didn't have read lock on it. Re-find our position by looking
- * for the TID we previously returned. (Because we hold share lock on
+ * for the TID we previously returned. (Because we hold share lock on
* the bucket, no deletions or splits could have occurred; therefore
* we can expect that the TID still exists in the current index page,
* at an offset >= where we were.)
@@ -524,7 +524,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
/*
* Read the metapage to fetch original bucket and tuple counts. Also, we
* keep a copy of the last-seen metapage so that we can use its
- * hashm_spares[] values to compute bucket page addresses. This is a bit
+ * hashm_spares[] values to compute bucket page addresses. This is a bit
* hokey but perfectly safe, since the interesting entries in the spares
* array cannot change under us; and it beats rereading the metapage for
* each bucket.
@@ -655,7 +655,7 @@ loop_top:
{
/*
* Otherwise, our count is untrustworthy since we may have
- * double-scanned tuples in split buckets. Proceed by dead-reckoning.
+ * double-scanned tuples in split buckets. Proceed by dead-reckoning.
* (Note: we still return estimated_count = false, because using this
* count is better than not updating reltuples at all.)
*/
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 6d351da5b0..c61fec6b84 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -11,7 +11,7 @@
* src/backend/access/hash/hashfunc.c
*
* NOTES
- * These functions are stored in pg_amproc. For each operator class
+ * These functions are stored in pg_amproc. For each operator class
* defined for hash indexes, they compute the hash value of the argument.
*
* Additional hash functions appear in /utils/adt/ files for various
@@ -158,7 +158,7 @@ hashtext(PG_FUNCTION_ARGS)
/*
* Note: this is currently identical in behavior to hashvarlena, but keep
* it as a separate function in case we someday want to do something
- * different in non-C locales. (See also hashbpchar, if so.)
+ * different in non-C locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA_ANY(key),
VARSIZE_ANY_EXHDR(key));
@@ -236,7 +236,7 @@ hashvarlena(PG_FUNCTION_ARGS)
*
* This allows some parallelism. Read-after-writes are good at doubling
* the number of bits affected, so the goal of mixing pulls in the opposite
- * direction from the goal of parallelism. I did what I could. Rotates
+ * direction from the goal of parallelism. I did what I could. Rotates
* seem to cost as much as shifts on every machine I could lay my hands on,
* and rotates are much kinder to the top and bottom bits, so I used rotates.
*----------
@@ -270,7 +270,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* substantial performance increase since final() does not need to
* do well in reverse, but is does need to affect all output bits.
* mix(), on the other hand, does not need to affect all output
- * bits (affecting 32 bits is enough). The original hash function had
+ * bits (affecting 32 bits is enough). The original hash function had
* a single mixing operation that had to satisfy both sets of requirements
* and was slower as a result.
*----------
@@ -291,7 +291,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* k : the key (the unaligned variable-length array of bytes)
* len : the length of the key, counting by bytes
*
- * Returns a uint32 value. Every bit of the key affects every bit of
+ * Returns a uint32 value. Every bit of the key affects every bit of
* the return value. Every 1-bit and 2-bit delta achieves avalanche.
* About 6*len+35 instructions. The best hash table sizes are powers
* of 2. There is no need to do mod a prime (mod is sooo slow!).
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 49211eef9a..05e9808b8a 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -89,7 +89,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 2389c3843f..628c05698b 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -80,7 +80,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
- * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
+ * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore). The returned overflow page will be pinned and write-locked;
* it is guaranteed to be empty.
@@ -89,12 +89,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
* That buffer is returned in the same state.
*
* The caller must hold at least share lock on the bucket, to ensure that
- * no one else tries to compact the bucket meanwhile. This guarantees that
+ * no one else tries to compact the bucket meanwhile. This guarantees that
* 'buf' won't stop being part of the bucket while it's unlocked.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
- * immediate successor of the originally passed 'buf'. Additional overflow
+ * immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
@@ -157,7 +157,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/*
* _hash_getovflpage()
*
- * Find an available overflow page and return it. The returned buffer
+ * Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
@@ -253,7 +253,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
* We create the new bitmap page with all pages marked "in use".
* Actually two pages in the new bitmap's range will exist
* immediately: the bitmap page itself, and the following page which
- * is the one we return to the caller. Both of these are correctly
+ * is the one we return to the caller. Both of these are correctly
* marked "in use". Subsequent pages do not exist yet, but it is
* convenient to pre-mark them as "in use" too.
*/
@@ -284,7 +284,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
metap->hashm_spares[splitnum]++;
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@@ -313,7 +313,7 @@ found:
blkno = bitno_to_blkno(metap, bit);
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@@ -494,7 +494,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
/*
* _hash_initbitmap()
*
- * Initialize a new bitmap page. The metapage has a write-lock upon
+ * Initialize a new bitmap page. The metapage has a write-lock upon
* entering the function, and must be written by caller after return.
*
* 'blkno' is the block number of the new bitmap page.
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 1552b73f28..9e4a2e0434 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -49,7 +49,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf,
* of the locking rules). However, we can skip taking lmgr locks when the
* index is local to the current backend (ie, either temp or new in the
* current transaction). No one else can see it, so there's no reason to
- * take locks. We still take buffer-level locks, but not lmgr locks.
+ * take locks. We still take buffer-level locks, but not lmgr locks.
*/
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
@@ -136,7 +136,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
- * _hash_pageinit() is applied automatically. Otherwise it has
+ * _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
@@ -344,7 +344,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
- * as the user-settable fillfactor parameter says. We can compute it
+ * as the user-settable fillfactor parameter says. We can compute it
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
*/
data_width = sizeof(uint32);
@@ -377,7 +377,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
/*
* We initialize the metapage, the first N bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
- * calls to occur. This ensures that the smgr level has the right idea of
+ * calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
@@ -545,7 +545,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Determine which bucket is to be split, and attempt to lock the old
- * bucket. If we can't get the lock, give up.
+ * bucket. If we can't get the lock, give up.
*
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
@@ -603,7 +603,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
}
/*
- * Okay to proceed with split. Update the metapage bucket mapping info.
+ * Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
@@ -641,7 +641,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
* split lock, other splits could begin, so these values might be out of
- * date before _hash_splitbucket finishes. That's okay, since all it
+ * date before _hash_splitbucket finishes. That's okay, since all it
* needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
@@ -876,7 +876,7 @@ _hash_splitbucket(Relation rel,
/*
* We're at the end of the old bucket chain, so we're done partitioning
- * the tuples. Before quitting, call _hash_squeezebucket to ensure the
+ * the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index ad405646c5..5aabe06606 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -210,7 +210,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
@@ -269,7 +269,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
* _hash_step() -- step to the next valid item in a scan in the bucket.
*
* If no valid record exists in the requested direction, return
- * false. Else, return true and set the hashso_curpos for the
+ * false. Else, return true and set the hashso_curpos for the
* scan to the right thing.
*
* 'bufP' points to the current buffer, which is pinned and read-locked.
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index e13670c4f4..c0d6fec256 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -8,7 +8,7 @@
* thrashing. We use tuplesort.c to sort the given index tuples into order.
*
* Note: if the number of rows in the table has been underestimated,
- * bucket splits may occur during the index build. In that case we'd
+ * bucket splits may occur during the index build. In that case we'd
* be inserting into two or more buckets for each possible masked-off
* hash code value. That's no big problem though, since we'll still have
* plenty of locality of access.
@@ -52,7 +52,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
hspool->index = index;
/*
- * Determine the bitmask for hash code values. Since there are currently
+ * Determine the bitmask for hash code values. Since there are currently
* num_buckets buckets in the index, the appropriate mask can be computed
* as follows.
*
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 20bd279258..43652921ac 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -160,7 +160,7 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@@ -280,7 +280,7 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull)
*
* Returns the offset of the first index entry having hashkey >= hash_value,
* or the page's max offset plus one if hash_value is greater than all
- * existing hash keys in the page. This is the appropriate place to start
+ * existing hash keys in the page. This is the appropriate place to start
* a search, or to insert a new item.
*/
OffsetNumber
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 336fbb06da..405117a526 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -88,11 +88,11 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
HeapTuple newtup, HeapTuple old_key_tup,
bool all_visible_cleared, bool new_all_visible_cleared);
static void HeapSatisfiesHOTandKeyUpdate(Relation relation,
- Bitmapset *hot_attrs,
- Bitmapset *key_attrs, Bitmapset *id_attrs,
- bool *satisfies_hot, bool *satisfies_key,
- bool *satisfies_id,
- HeapTuple oldtup, HeapTuple newtup);
+ Bitmapset *hot_attrs,
+ Bitmapset *key_attrs, Bitmapset *id_attrs,
+ bool *satisfies_hot, bool *satisfies_key,
+ bool *satisfies_id,
+ HeapTuple oldtup, HeapTuple newtup);
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
uint16 old_infomask2, TransactionId add_to_xmax,
LockTupleMode mode, bool is_update,
@@ -113,7 +113,7 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status
XLTW_Oper oper, int *remaining);
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified,
- bool *copy);
+ bool *copy);
/*
@@ -213,7 +213,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
* while the scan is in progress will be invisible to my snapshot anyway.
* (That is not true when using a non-MVCC snapshot. However, we couldn't
* guarantee to return tuples added after scan start anyway, since they
- * might go into pages we already scanned. To guarantee consistent
+ * might go into pages we already scanned. To guarantee consistent
* results for a non-MVCC snapshot, the caller must hold some higher-level
* lock that ensures the interesting tuple(s) won't change.)
*/
@@ -221,7 +221,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
/*
* If the table is large relative to NBuffers, use a bulk-read access
- * strategy and enable synchronized scanning (see syncscan.c). Although
+ * strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
* (However, some callers need to be able to disable one or both of these
@@ -325,7 +325,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
}
/*
- * Be sure to check for interrupts at least once per page. Checks at
+ * Be sure to check for interrupts at least once per page. Checks at
* higher code levels won't be able to stop a seqscan that encounters many
* pages' worth of consecutive dead tuples.
*/
@@ -349,7 +349,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
/*
* We must hold share lock on the buffer content while examining tuple
- * visibility. Afterwards, however, the tuples we have found to be
+ * visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@@ -1126,7 +1126,7 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
*
* Same as relation_openrv, but with an additional missing_ok argument
* allowing a NULL return rather than an error if the relation is not
- * found. (Note that some other causes, such as permissions problems,
+ * found. (Note that some other causes, such as permissions problems,
* will still result in an ereport.)
* ----------------
*/
@@ -1740,7 +1740,7 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
/*
* When first_call is true (and thus, skip is initially false) we'll
- * return the first tuple we find. But on later passes, heapTuple
+ * return the first tuple we find. But on later passes, heapTuple
* will initially be pointing to the tuple we returned last time.
* Returning it again would be incorrect (and would loop forever), so
* we skip it and return the next match we find.
@@ -1834,7 +1834,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
* possibly uncommitted version.
*
* *tid is both an input and an output parameter: it is updated to
- * show the latest version of the row. Note that it will not be changed
+ * show the latest version of the row. Note that it will not be changed
* if no version of the row passes the snapshot test.
*/
void
@@ -1955,7 +1955,7 @@ heap_get_latest_tid(Relation relation,
*
* This is called after we have waited for the XMAX transaction to terminate.
* If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
- * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
+ * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* hint bit if possible --- but beware that that may not yet be possible,
* if the transaction committed asynchronously.
*
@@ -2042,7 +2042,7 @@ FreeBulkInsertState(BulkInsertState bistate)
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
- * TID where the tuple was stored. But note that any toasting of fields
+ * TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
@@ -2071,7 +2071,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
- * lock "gaps" as index page locks do. So we don't need to identify a
+ * lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@@ -2123,8 +2123,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
bool need_tuple_data;
/*
- * For logical decoding, we need the tuple even if we're doing a
- * full page write, so make sure to log it separately. (XXX We could
+ * For logical decoding, we need the tuple even if we're doing a full
+ * page write, so make sure to log it separately. (XXX We could
* alternatively store a pointer into the FPW).
*
* Also, if this is a catalog, we need to transmit combocids to
@@ -2165,9 +2165,9 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
rdata[2].next = NULL;
/*
- * Make a separate rdata entry for the tuple's buffer if we're
- * doing logical decoding, so that an eventual FPW doesn't
- * remove the tuple's data.
+ * Make a separate rdata entry for the tuple's buffer if we're doing
+ * logical decoding, so that an eventual FPW doesn't remove the
+ * tuple's data.
*/
if (need_tuple_data)
{
@@ -2248,7 +2248,7 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
/*
* If the object id of this tuple has already been assigned, trust the
- * caller. There are a couple of ways this can happen. At initial db
+ * caller. There are a couple of ways this can happen. At initial db
* creation, the backend program sets oids for tuples. When we define
* an index, we set the oid. Finally, in the future, we may allow
* users to set their own object ids in order to support a persistent
@@ -2342,7 +2342,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
- * lock "gaps" as index page locks do. So we don't need to identify a
+ * lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@@ -2356,7 +2356,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
int nthispage;
/*
- * Find buffer where at least the next tuple will fit. If the page is
+ * Find buffer where at least the next tuple will fit. If the page is
* all-visible, this will also pin the requisite visibility map page.
*/
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
@@ -2487,9 +2487,9 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
rdata[1].next = NULL;
/*
- * Make a separate rdata entry for the tuple's buffer if
- * we're doing logical decoding, so that an eventual FPW
- * doesn't remove the tuple's data.
+ * Make a separate rdata entry for the tuple's buffer if we're
+ * doing logical decoding, so that an eventual FPW doesn't remove
+ * the tuple's data.
*/
if (need_tuple_data)
{
@@ -2597,8 +2597,8 @@ compute_infobits(uint16 infomask, uint16 infomask2)
static inline bool
xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
{
- const uint16 interesting =
- HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
+ const uint16 interesting =
+ HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
if ((new_infomask & interesting) != (old_infomask & interesting))
return true;
@@ -2650,7 +2650,7 @@ heap_delete(Relation relation, ItemPointer tid,
bool have_tuple_lock = false;
bool iscombo;
bool all_visible_cleared = false;
- HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
+ HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
bool old_key_copied = false;
Assert(ItemPointerIsValid(tid));
@@ -2751,10 +2751,10 @@ l1:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
- * other subxacts of this backend. It is legal for us to delete
+ * other subxacts of this backend. It is legal for us to delete
* the tuple in either case, however (the latter case is
* essentially a situation of upgrading our former shared lock to
- * exclusive). We don't bother changing the on-disk hint bits
+ * exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@@ -2836,7 +2836,7 @@ l1:
* If this is the first possibly-multixact-able operation in the current
* transaction, set my per-backend OldestMemberMXactId setting. We can be
* certain that the transaction will never become a member of any older
- * MultiXactIds than that. (We have to do this even if we end up just
+ * MultiXactIds than that. (We have to do this even if we end up just
* using our own TransactionId below, since some other backend could
* incorporate our XID into a MultiXact immediately afterwards.)
*/
@@ -2852,7 +2852,7 @@ l1:
/*
* If this transaction commits, the tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*/
@@ -2919,7 +2919,7 @@ l1:
xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
rdata[1].next = &(rdata[2]);
- rdata[2].data = (char*)&xlhdr;
+ rdata[2].data = (char *) &xlhdr;
rdata[2].len = SizeOfHeapHeader;
rdata[2].buffer = InvalidBuffer;
rdata[2].next = NULL;
@@ -2994,7 +2994,7 @@ l1:
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@@ -3110,7 +3110,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* Fetch the list of attributes to be checked for HOT update. This is
* wasted effort if we fail to update or have to put the new tuple on a
- * different page. But we must compute the list before obtaining buffer
+ * different page. But we must compute the list before obtaining buffer
* lock --- in the worst case, if we are doing an update on one of the
* relevant system catalogs, we could deadlock if we try to fetch the list
* later. In any case, the relcache caches the data so this is usually
@@ -3122,7 +3122,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
hot_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_ALL);
key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
id_attrs = RelationGetIndexAttrBitmap(relation,
- INDEX_ATTR_BITMAP_IDENTITY_KEY);
+ INDEX_ATTR_BITMAP_IDENTITY_KEY);
block = ItemPointerGetBlockNumber(otid);
buffer = ReadBuffer(relation, block);
@@ -3193,7 +3193,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
* If this is the first possibly-multixact-able operation in the
* current transaction, set my per-backend OldestMemberMXactId
* setting. We can be certain that the transaction will never become a
- * member of any older MultiXactIds than that. (We have to do this
+ * member of any older MultiXactIds than that. (We have to do this
* even if we end up just using our own TransactionId below, since
* some other backend could incorporate our XID into a MultiXact
* immediately afterwards.)
@@ -3238,7 +3238,7 @@ l2:
/*
* XXX note that we don't consider the "no wait" case here. This
* isn't a problem currently because no caller uses that case, but it
- * should be fixed if such a caller is introduced. It wasn't a
+ * should be fixed if such a caller is introduced. It wasn't a
* problem previously because this code would always wait, but now
* that some tuple locks do not conflict with one of the lock modes we
* use, it is possible that this case is interesting to handle
@@ -3276,7 +3276,7 @@ l2:
* it as locker, unless it is gone completely.
*
* If it's not a multi, we need to check for sleeping conditions
- * before actually going to sleep. If the update doesn't conflict
+ * before actually going to sleep. If the update doesn't conflict
* with the locks, we just continue without sleeping (but making sure
* it is preserved).
*/
@@ -3302,10 +3302,10 @@ l2:
goto l2;
/*
- * Note that the multixact may not be done by now. It could have
+ * Note that the multixact may not be done by now. It could have
* surviving members; our own xact or other subxacts of this
* backend, and also any other concurrent transaction that locked
- * the tuple with KeyShare if we only got TupleLockUpdate. If
+ * the tuple with KeyShare if we only got TupleLockUpdate. If
* this is the case, we have to be careful to mark the updated
* tuple with the surviving members in Xmax.
*
@@ -3512,7 +3512,7 @@ l2:
* If the toaster needs to be activated, OR if the new tuple will not fit
* on the same page as the old, then we need to release the content lock
* (but not the pin!) on the old tuple's buffer while we are off doing
- * TOAST and/or table-file-extension work. We must mark the old tuple to
+ * TOAST and/or table-file-extension work. We must mark the old tuple to
* show that it's already being updated, else other processes may try to
* update it themselves.
*
@@ -3578,7 +3578,7 @@ l2:
* there's more free now than before.
*
* What's more, if we need to get a new page, we will need to acquire
- * buffer locks on both old and new pages. To avoid deadlock against
+ * buffer locks on both old and new pages. To avoid deadlock against
* some other backend trying to get the same two locks in the other
* order, we must be consistent about the order we get the locks in.
* We use the rule "lock the lower-numbered page of the relation
@@ -3638,7 +3638,7 @@ l2:
/*
* At this point newbuf and buffer are both pinned and locked, and newbuf
- * has enough space for the new tuple. If they are the same buffer, only
+ * has enough space for the new tuple. If they are the same buffer, only
* one pin is held.
*/
@@ -3646,7 +3646,7 @@ l2:
{
/*
* Since the new tuple is going into the same page, we might be able
- * to do a HOT update. Check if any of the index columns have been
+ * to do a HOT update. Check if any of the index columns have been
* changed. If not, then HOT update is possible.
*/
if (satisfies_hot)
@@ -3672,13 +3672,13 @@ l2:
/*
* If this transaction commits, the old tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*
* XXX Should we set hint on newbuf as well? If the transaction aborts,
* there would be a prunable tuple in the newbuf; but for now we choose
- * not to optimize for aborts. Note that heap_xlog_update must be kept in
+ * not to optimize for aborts. Note that heap_xlog_update must be kept in
* sync if this decision changes.
*/
PageSetPrunable(page, xid);
@@ -3775,7 +3775,7 @@ l2:
* Mark old tuple for invalidation from system caches at next command
* boundary, and mark the new tuple for invalidation in case we abort. We
* have to do this before releasing the buffer because oldtup is in the
- * buffer. (heaptup is all in local memory, but it's necessary to process
+ * buffer. (heaptup is all in local memory, but it's necessary to process
* both tuple versions in one call to inval.c so we can avoid redundant
* sinval messages.)
*/
@@ -3853,7 +3853,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* Extract the corresponding values. XXX this is pretty inefficient if
- * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
+ * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
* a single heap_deform_tuple call on each tuple, instead? But that
* doesn't work for system columns ...
*/
@@ -3876,7 +3876,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* We do simple binary comparison of the two datums. This may be overly
* strict because there can be multiple binary representations for the
- * same logical value. But we should be OK as long as there are no false
+ * same logical value. But we should be OK as long as there are no false
* positives. Using a type-specific equality operator is messy because
* there could be multiple notions of equality in different operator
* classes; furthermore, we cannot safely invoke user-defined functions
@@ -3951,8 +3951,7 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs,
/*
* Since the HOT attributes are a superset of the key attributes and
* the key attributes are a superset of the id attributes, this logic
- * is guaranteed to identify the next column that needs to be
- * checked.
+ * is guaranteed to identify the next column that needs to be checked.
*/
if (hot_result && next_hot_attnum > FirstLowInvalidHeapAttributeNumber)
check_now = next_hot_attnum;
@@ -3981,12 +3980,11 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs,
}
/*
- * Advance the next attribute numbers for the sets that contain
- * the attribute we just checked. As we work our way through the
- * columns, the next_attnum values will rise; but when each set
- * becomes empty, bms_first_member() will return -1 and the attribute
- * number will end up with a value less than
- * FirstLowInvalidHeapAttributeNumber.
+ * Advance the next attribute numbers for the sets that contain the
+ * attribute we just checked. As we work our way through the columns,
+ * the next_attnum values will rise; but when each set becomes empty,
+ * bms_first_member() will return -1 and the attribute number will end
+ * up with a value less than FirstLowInvalidHeapAttributeNumber.
*/
if (hot_result && check_now == next_hot_attnum)
{
@@ -4015,7 +4013,7 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs,
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@@ -4057,7 +4055,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
static MultiXactStatus
get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
{
- int retval;
+ int retval;
if (is_update)
retval = tupleLockExtraInfo[mode].updstatus;
@@ -4239,15 +4237,15 @@ l3:
* However, if there are updates, we need to walk the update chain
* to mark future versions of the row as locked, too. That way,
* if somebody deletes that future version, we're protected
- * against the key going away. This locking of future versions
+ * against the key going away. This locking of future versions
* could block momentarily, if a concurrent transaction is
* deleting a key; or it could return a value to the effect that
- * the transaction deleting the key has already committed. So we
+ * the transaction deleting the key has already committed. So we
* do this before re-locking the buffer; otherwise this would be
* prone to deadlocks.
*
* Note that the TID we're locking was grabbed before we unlocked
- * the buffer. For it to change while we're not looking, the
+ * the buffer. For it to change while we're not looking, the
* other properties we're testing for below after re-locking the
* buffer would also change, in which case we would restart this
* loop above.
@@ -4472,7 +4470,7 @@ l3:
* Of course, the multixact might not be done here: if we're
* requesting a light lock mode, other transactions with light
* locks could still be alive, as well as locks owned by our
- * own xact or other subxacts of this backend. We need to
+ * own xact or other subxacts of this backend. We need to
* preserve the surviving MultiXact members. Note that it
* isn't absolutely necessary in the latter case, but doing so
* is simpler.
@@ -4516,7 +4514,7 @@ l3:
/*
* xwait is done, but if xwait had just locked the tuple then
* some other xact could update this tuple before we get to
- * this point. Check for xmax change, and start over if so.
+ * this point. Check for xmax change, and start over if so.
*/
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(
@@ -4525,7 +4523,7 @@ l3:
goto l3;
/*
- * Otherwise check if it committed or aborted. Note we cannot
+ * Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that should have been handled above. So
* that transaction must necessarily be gone by now.
@@ -4605,7 +4603,7 @@ failed:
* If this is the first possibly-multixact-able operation in the current
* transaction, set my per-backend OldestMemberMXactId setting. We can be
* certain that the transaction will never become a member of any older
- * MultiXactIds than that. (We have to do this even if we end up just
+ * MultiXactIds than that. (We have to do this even if we end up just
* using our own TransactionId below, since some other backend could
* incorporate our XID into a MultiXact immediately afterwards.)
*/
@@ -4641,7 +4639,7 @@ failed:
HeapTupleHeaderSetXmax(tuple->t_data, xid);
/*
- * Make sure there is no forward chain link in t_ctid. Note that in the
+ * Make sure there is no forward chain link in t_ctid. Note that in the
* cases where the tuple has been updated, we must not overwrite t_ctid,
* because it was set by the updater. Moreover, if the tuple has been
* updated, we need to follow the update chain to lock the new versions of
@@ -4653,8 +4651,8 @@ failed:
MarkBufferDirty(*buffer);
/*
- * XLOG stuff. You might think that we don't need an XLOG record because
- * there is no state change worth restoring after a crash. You would be
+ * XLOG stuff. You might think that we don't need an XLOG record because
+ * there is no state change worth restoring after a crash. You would be
* wrong however: we have just written either a TransactionId or a
* MultiXactId that may never have been seen on disk before, and we need
* to make sure that there are XLOG entries covering those ID numbers.
@@ -4818,7 +4816,7 @@ l5:
* If the XMAX is already a MultiXactId, then we need to expand it to
* include add_to_xmax; but if all the members were lockers and are
* all gone, we can do away with the IS_MULTI bit and just set
- * add_to_xmax as the only locker/updater. If all lockers are gone
+ * add_to_xmax as the only locker/updater. If all lockers are gone
* and we have an updater that aborted, we can also do without a
* multi.
*
@@ -4881,7 +4879,7 @@ l5:
*/
MultiXactStatus new_status;
MultiXactStatus old_status;
- LockTupleMode old_mode;
+ LockTupleMode old_mode;
if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
{
@@ -4900,8 +4898,8 @@ l5:
{
/*
* LOCK_ONLY can be present alone only when a page has been
- * upgraded by pg_upgrade. But in that case,
- * TransactionIdIsInProgress() should have returned false. We
+ * upgraded by pg_upgrade. But in that case,
+ * TransactionIdIsInProgress() should have returned false. We
* assume it's no longer locked in this case.
*/
elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
@@ -4929,12 +4927,13 @@ l5:
if (xmax == add_to_xmax)
{
/*
- * Note that it's not possible for the original tuple to be updated:
- * we wouldn't be here because the tuple would have been invisible and
- * we wouldn't try to update it. As a subtlety, this code can also
- * run when traversing an update chain to lock future versions of a
- * tuple. But we wouldn't be here either, because the add_to_xmax
- * would be different from the original updater.
+ * Note that it's not possible for the original tuple to be
+ * updated: we wouldn't be here because the tuple would have been
+ * invisible and we wouldn't try to update it. As a subtlety,
+ * this code can also run when traversing an update chain to lock
+ * future versions of a tuple. But we wouldn't be here either,
+ * because the add_to_xmax would be different from the original
+ * updater.
*/
Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
@@ -5013,7 +5012,7 @@ static HTSU_Result
test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
LockTupleMode mode, bool *needwait)
{
- MultiXactStatus wantedstatus;
+ MultiXactStatus wantedstatus;
*needwait = false;
wantedstatus = get_mxact_status_for_lock(mode, false);
@@ -5026,18 +5025,18 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
if (TransactionIdIsCurrentTransactionId(xid))
{
/*
- * Updated by our own transaction? Just return failure. This shouldn't
- * normally happen.
+ * Updated by our own transaction? Just return failure. This
+ * shouldn't normally happen.
*/
return HeapTupleSelfUpdated;
}
else if (TransactionIdIsInProgress(xid))
{
/*
- * If the locking transaction is running, what we do depends on whether
- * the lock modes conflict: if they do, then we must wait for it to
- * finish; otherwise we can fall through to lock this tuple version
- * without waiting.
+ * If the locking transaction is running, what we do depends on
+ * whether the lock modes conflict: if they do, then we must wait for
+ * it to finish; otherwise we can fall through to lock this tuple
+ * version without waiting.
*/
if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
LOCKMODE_from_mxstatus(wantedstatus)))
@@ -5046,8 +5045,8 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
}
/*
- * If we set needwait above, then this value doesn't matter; otherwise,
- * this value signals to caller that it's okay to proceed.
+ * If we set needwait above, then this value doesn't matter;
+ * otherwise, this value signals to caller that it's okay to proceed.
*/
return HeapTupleMayBeUpdated;
}
@@ -5059,7 +5058,7 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
* The other transaction committed. If it was only a locker, then the
* lock is completely gone now and we can return success; but if it
* was an update, then what we do depends on whether the two lock
- * modes conflict. If they conflict, then we must report error to
+ * modes conflict. If they conflict, then we must report error to
* caller. But if they don't, we can fall through to allow the current
* transaction to lock the tuple.
*
@@ -5133,8 +5132,8 @@ l4:
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/*
- * Check the tuple XMIN against prior XMAX, if any. If we reached
- * the end of the chain, we're done, so return success.
+ * Check the tuple XMIN against prior XMAX, if any. If we reached the
+ * end of the chain, we're done, so return success.
*/
if (TransactionIdIsValid(priorXmax) &&
!TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
@@ -5162,14 +5161,14 @@ l4:
rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
if (old_infomask & HEAP_XMAX_IS_MULTI)
{
- int nmembers;
- int i;
+ int nmembers;
+ int i;
MultiXactMember *members;
nmembers = GetMultiXactIdMembers(rawxmax, &members, false);
for (i = 0; i < nmembers; i++)
{
- HTSU_Result res;
+ HTSU_Result res;
res = test_lockmode_for_conflict(members[i].status,
members[i].xid,
@@ -5196,7 +5195,7 @@ l4:
}
else
{
- HTSU_Result res;
+ HTSU_Result res;
MultiXactStatus status;
/*
@@ -5219,9 +5218,9 @@ l4:
else
{
/*
- * LOCK_ONLY present alone (a pg_upgraded tuple
- * marked as share-locked in the old cluster) shouldn't
- * be seen in the middle of an update chain.
+ * LOCK_ONLY present alone (a pg_upgraded tuple marked
+ * as share-locked in the old cluster) shouldn't be
+ * seen in the middle of an update chain.
*/
elog(ERROR, "invalid lock status in tuple");
}
@@ -5323,7 +5322,7 @@ l4:
* The initial tuple is assumed to be already locked.
*
* This function doesn't check visibility, it just inconditionally marks the
- * tuple(s) as locked. If any tuple in the updated chain is being deleted
+ * tuple(s) as locked. If any tuple in the updated chain is being deleted
* concurrently (or updated with the key being modified), sleep until the
* transaction doing it is finished.
*
@@ -5347,7 +5346,7 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
* If this is the first possibly-multixact-able operation in the
* current transaction, set my per-backend OldestMemberMXactId
* setting. We can be certain that the transaction will never become a
- * member of any older MultiXactIds than that. (We have to do this
+ * member of any older MultiXactIds than that. (We have to do this
* even if we end up just using our own TransactionId below, since
* some other backend could incorporate our XID into a MultiXact
* immediately afterwards.)
@@ -5366,7 +5365,7 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
* heap_inplace_update - update a tuple "in place" (ie, overwrite it)
*
* Overwriting violates both MVCC and transactional safety, so the uses
- * of this function in Postgres are extremely limited. Nonetheless we
+ * of this function in Postgres are extremely limited. Nonetheless we
* find some places to use it.
*
* The tuple cannot change size, and therefore it's reasonable to assume
@@ -5608,7 +5607,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
*/
if (ISUPDATE_from_mxstatus(members[i].status))
{
- TransactionId xid = members[i].xid;
+ TransactionId xid = members[i].xid;
/*
* It's an update; should we keep it? If the transaction is known
@@ -5728,7 +5727,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
* heap_prepare_freeze_tuple
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
- * are older than the specified cutoff XID and cutoff MultiXactId. If so,
+ * are older than the specified cutoff XID and cutoff MultiXactId. If so,
* setup enough state (in the *frz output argument) to later execute and
* WAL-log what we would need to do, and return TRUE. Return FALSE if nothing
* is to be changed.
@@ -5801,11 +5800,11 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
else if (flags & FRM_RETURN_IS_XID)
{
/*
- * NB -- some of these transformations are only valid because
- * we know the return Xid is a tuple updater (i.e. not merely a
+ * NB -- some of these transformations are only valid because we
+ * know the return Xid is a tuple updater (i.e. not merely a
* locker.) Also note that the only reason we don't explicitely
- * worry about HEAP_KEYS_UPDATED is because it lives in t_infomask2
- * rather than t_infomask.
+ * worry about HEAP_KEYS_UPDATED is because it lives in
+ * t_infomask2 rather than t_infomask.
*/
frz->t_infomask &= ~HEAP_XMAX_BITS;
frz->xmax = newxmax;
@@ -5815,8 +5814,8 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
}
else if (flags & FRM_RETURN_IS_MULTI)
{
- uint16 newbits;
- uint16 newbits2;
+ uint16 newbits;
+ uint16 newbits2;
/*
* We can't use GetMultiXactIdHintBits directly on the new multi
@@ -5851,7 +5850,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
- * LOCKED. Normalize to INVALID just to be sure no one gets confused.
+ * LOCKED. Normalize to INVALID just to be sure no one gets confused.
* Also get rid of the HEAP_KEYS_UPDATED bit.
*/
frz->t_infomask &= ~HEAP_XMAX_BITS;
@@ -6111,7 +6110,7 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
* used to optimize multixact access in case it's a lock-only multi); 'nowait'
* indicates whether to use conditional lock acquisition, to allow callers to
* fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
- * context information for error messages. 'remaining', if not NULL, receives
+ * context information for error messages. 'remaining', if not NULL, receives
* the number of members that are still running, including any (non-aborted)
* subtransactions of our own transaction.
*
@@ -6173,7 +6172,7 @@ Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
* return failure, if asked to avoid waiting.)
*
* Note that we don't set up an error context callback ourselves,
- * but instead we pass the info down to XactLockTableWait. This
+ * but instead we pass the info down to XactLockTableWait. This
* might seem a bit wasteful because the context is set up and
* tore down for each member of the multixact, but in reality it
* should be barely noticeable, and it avoids duplicate code.
@@ -6242,7 +6241,7 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
* heap_tuple_needs_freeze
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
- * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
+ * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
*
* It doesn't matter whether the tuple is alive or dead, we are checking
* to see if a tuple needs to be removed or frozen to avoid wraparound.
@@ -6366,7 +6365,7 @@ heap_restrpos(HeapScanDesc scan)
else
{
/*
- * If we reached end of scan, rs_inited will now be false. We must
+ * If we reached end of scan, rs_inited will now be false. We must
* reset it to true to keep heapgettup from doing the wrong thing.
*/
scan->rs_inited = true;
@@ -6548,7 +6547,7 @@ log_heap_clean(Relation reln, Buffer buffer,
}
/*
- * Perform XLogInsert for a heap-freeze operation. Caller must have already
+ * Perform XLogInsert for a heap-freeze operation. Caller must have already
* modified the buffer and marked it dirty.
*/
XLogRecPtr
@@ -6593,7 +6592,7 @@ log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
/*
* Perform XLogInsert for a heap-visible operation. 'block' is the block
* being marked all-visible, and vm_buffer is the buffer containing the
- * corresponding visibility map block. Both should have already been modified
+ * corresponding visibility map block. Both should have already been modified
* and dirtied.
*
* If checksums are enabled, we also add the heap_buffer to the chain to
@@ -6642,7 +6641,7 @@ log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
}
/*
- * Perform XLogInsert for a heap-update operation. Caller must already
+ * Perform XLogInsert for a heap-update operation. Caller must already
* have modified the buffer(s) and marked them dirty.
*/
static XLogRecPtr
@@ -6674,10 +6673,10 @@ log_heap_update(Relation reln, Buffer oldbuf,
info = XLOG_HEAP_UPDATE;
/*
- * If the old and new tuple are on the same page, we only need to log
- * the parts of the new tuple that were changed. That saves on the amount
- * of WAL we need to write. Currently, we just count any unchanged bytes
- * in the beginning and end of the tuple. That's quick to check, and
+ * If the old and new tuple are on the same page, we only need to log the
+ * parts of the new tuple that were changed. That saves on the amount of
+ * WAL we need to write. Currently, we just count any unchanged bytes in
+ * the beginning and end of the tuple. That's quick to check, and
* perfectly covers the common case that only one field is updated.
*
* We could do this even if the old and new tuple are on different pages,
@@ -6688,10 +6687,10 @@ log_heap_update(Relation reln, Buffer oldbuf,
* updates tend to create the new tuple version on the same page, there
* isn't much to be gained by doing this across pages anyway.
*
- * Skip this if we're taking a full-page image of the new page, as we don't
- * include the new tuple in the WAL record in that case. Also disable if
- * wal_level='logical', as logical decoding needs to be able to read the
- * new tuple in whole from the WAL record alone.
+ * Skip this if we're taking a full-page image of the new page, as we
+ * don't include the new tuple in the WAL record in that case. Also
+ * disable if wal_level='logical', as logical decoding needs to be able to
+ * read the new tuple in whole from the WAL record alone.
*/
if (oldbuf == newbuf && !need_tuple_data &&
!XLogCheckBufferNeedsBackup(newbuf))
@@ -6707,6 +6706,7 @@ log_heap_update(Relation reln, Buffer oldbuf,
if (newp[prefixlen] != oldp[prefixlen])
break;
}
+
/*
* Storing the length of the prefix takes 2 bytes, so we need to save
* at least 3 bytes or there's no point.
@@ -6793,8 +6793,8 @@ log_heap_update(Relation reln, Buffer oldbuf,
xlhdr.header.t_infomask2 = newtup->t_data->t_infomask2;
xlhdr.header.t_infomask = newtup->t_data->t_infomask;
xlhdr.header.t_hoff = newtup->t_data->t_hoff;
- Assert(offsetof(HeapTupleHeaderData, t_bits) + prefixlen + suffixlen <= newtup->t_len);
- xlhdr.t_len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) - prefixlen - suffixlen;
+ Assert(offsetof(HeapTupleHeaderData, t_bits) +prefixlen + suffixlen <= newtup->t_len);
+ xlhdr.t_len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) -prefixlen - suffixlen;
/*
* As with insert records, we need not store this rdata segment if we
@@ -6816,7 +6816,7 @@ log_heap_update(Relation reln, Buffer oldbuf,
if (prefixlen == 0)
{
rdata[nr].data = ((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits);
- rdata[nr].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) - suffixlen;
+ rdata[nr].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) -suffixlen;
rdata[nr].buffer = need_tuple_data ? InvalidBuffer : newbufref;
rdata[nr].buffer_std = true;
rdata[nr].next = NULL;
@@ -6829,7 +6829,7 @@ log_heap_update(Relation reln, Buffer oldbuf,
* two separate rdata entries.
*/
/* bitmap [+ padding] [+ oid] */
- if (newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits) > 0)
+ if (newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits) >0)
{
rdata[nr - 1].next = &(rdata[nr]);
rdata[nr].data = ((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits);
@@ -6853,13 +6853,13 @@ log_heap_update(Relation reln, Buffer oldbuf,
/*
* Separate storage for the FPW buffer reference of the new page in the
* wal_level >= logical case.
- */
+ */
if (need_tuple_data)
{
rdata[nr - 1].next = &(rdata[nr]);
rdata[nr].data = NULL,
- rdata[nr].len = 0;
+ rdata[nr].len = 0;
rdata[nr].buffer = newbufref;
rdata[nr].buffer_std = true;
rdata[nr].next = NULL;
@@ -6992,8 +6992,8 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata);
/*
- * The page may be uninitialized. If so, we can't set the LSN because
- * that would corrupt the page.
+ * The page may be uninitialized. If so, we can't set the LSN because that
+ * would corrupt the page.
*/
if (!PageIsNew(page))
{
@@ -7173,14 +7173,14 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *
*/
for (natt = 0; natt < idx_desc->natts; natt++)
{
- int attno = idx_rel->rd_index->indkey.values[natt];
+ int attno = idx_rel->rd_index->indkey.values[natt];
if (attno < 0)
{
/*
* The OID column can appear in an index definition, but that's
- * OK, becuse we always copy the OID if present (see below).
- * Other system columns may not.
+ * OK, becuse we always copy the OID if present (see below). Other
+ * system columns may not.
*/
if (attno == ObjectIdAttributeNumber)
continue;
@@ -7210,7 +7210,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *
*/
if (HeapTupleHasExternal(key_tuple))
{
- HeapTuple oldtup = key_tuple;
+ HeapTuple oldtup = key_tuple;
+
key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
heap_freetuple(oldtup);
}
@@ -7963,7 +7964,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
/*
* In normal operation, it is important to lock the two pages in
* page-number order, to avoid possible deadlocks against other update
- * operations going the other way. However, during WAL replay there can
+ * operations going the other way. However, during WAL replay there can
* be no other update happening, so we don't need to worry about that. But
* we *do* need to worry that we don't expose an inconsistent state to Hot
* Standby queries --- so the original page can't be unlocked before we've
@@ -8169,7 +8170,7 @@ newsame:;
if (suffixlen > 0)
memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
- newlen = offsetof(HeapTupleHeaderData, t_bits) + xlhdr.t_len + prefixlen + suffixlen;
+ newlen = offsetof(HeapTupleHeaderData, t_bits) +xlhdr.t_len + prefixlen + suffixlen;
htup->t_infomask2 = xlhdr.header.t_infomask2;
htup->t_infomask = xlhdr.header.t_infomask;
htup->t_hoff = xlhdr.header.t_hoff;
@@ -8444,6 +8445,7 @@ heap2_redo(XLogRecPtr lsn, XLogRecord *record)
heap_xlog_lock_updated(lsn, record);
break;
case XLOG_HEAP2_NEW_CID:
+
/*
* Nothing to do on a real replay, only used during logical
* decoding.
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index b306398aec..631af759d7 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -146,7 +146,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
/*
* If there are two buffers involved and we pinned just one of them,
* it's possible that the second one became all-visible while we were
- * busy pinning the first one. If it looks like that's a possible
+ * busy pinning the first one. If it looks like that's a possible
* scenario, we'll need to make a second pass through this loop.
*/
if (buffer2 == InvalidBuffer || buffer1 == buffer2
@@ -177,7 +177,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
* NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
* same buffer we select for insertion of the new tuple (this could only
* happen if space is freed in that page after heap_update finds there's not
- * enough there). In that case, the page will be pinned and locked only once.
+ * enough there). In that case, the page will be pinned and locked only once.
*
* For the vmbuffer and vmbuffer_other arguments, we avoid deadlock by
* locking them only after locking the corresponding heap page, and taking
@@ -198,7 +198,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
* for additional constraints needed for safe usage of this behavior.)
*
* The caller can also provide a BulkInsertState object to optimize many
- * insertions into the same relation. This keeps a pin on the current
+ * insertions into the same relation. This keeps a pin on the current
* insertion target page (to save pin/unpin cycles) and also passes a
* BULKWRITE buffer selection strategy object to the buffer manager.
* Passing NULL for bistate selects the default behavior.
@@ -251,7 +251,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We first try to put the tuple on the same page we last inserted a tuple
- * on, as cached in the BulkInsertState or relcache entry. If that
+ * on, as cached in the BulkInsertState or relcache entry. If that
* doesn't work, we ask the Free Space Map to locate a suitable page.
* Since the FSM's info might be out of date, we have to be prepared to
* loop around and retry multiple times. (To insure this isn't an infinite
@@ -283,7 +283,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* If the FSM knows nothing of the rel, try the last page before we
- * give up and extend. This avoids one-tuple-per-page syndrome during
+ * give up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
@@ -305,7 +305,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
* If the page-level all-visible flag is set, caller will need to
* clear both that and the corresponding visibility map bit. However,
* by the time we return, we'll have x-locked the buffer, and we don't
- * want to do any I/O while in that state. So we check the bit here
+ * want to do any I/O while in that state. So we check the bit here
* before taking the lock, and pin the page if it appears necessary.
* Checking without the lock creates a risk of getting the wrong
* answer, so we'll have to recheck after acquiring the lock.
@@ -347,7 +347,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We now have the target page (and the other buffer, if any) pinned
- * and locked. However, since our initial PageIsAllVisible checks
+ * and locked. However, since our initial PageIsAllVisible checks
* were performed before acquiring the lock, the results might now be
* out of date, either for the selected victim buffer, or for the
* other buffer passed by the caller. In that case, we'll need to
@@ -390,7 +390,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Not enough space, so we must give up our page locks and pin (if
- * any) and prepare to look elsewhere. We don't care which order we
+ * any) and prepare to look elsewhere. We don't care which order we
* unlock the two buffers in, so this can be slightly simpler than the
* code above.
*/
@@ -432,7 +432,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* XXX This does an lseek - rather expensive - but at the moment it is the
- * only way to accurately determine how many blocks are in a relation. Is
+ * only way to accurately determine how many blocks are in a relation. Is
* it worth keeping an accurate file length in shared memory someplace,
* rather than relying on the kernel to do it for us?
*/
@@ -452,7 +452,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Release the file-extension lock; it's now OK for someone else to extend
- * the relation some more. Note that we cannot release this lock before
+ * the relation some more. Note that we cannot release this lock before
* we have buffer lock on the new page, or we risk a race condition
* against vacuumlazy.c --- see comments therein.
*/
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 3c69e1bada..06b5488923 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -117,7 +117,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
* Checking free space here is questionable since we aren't holding any
* lock on the buffer; in the worst case we could get a bogus answer. It's
* unlikely to be *seriously* wrong, though, since reading either pd_lower
- * or pd_upper is probably atomic. Avoiding taking a lock seems more
+ * or pd_upper is probably atomic. Avoiding taking a lock seems more
* important than sometimes getting a wrong answer in what is after all
* just a heuristic estimate.
*/
@@ -332,8 +332,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
* OldestXmin is the cutoff XID used to identify dead tuples.
*
* We don't actually change the page here, except perhaps for hint-bit updates
- * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
- * prstate showing the changes to be made. Items to be redirected are added
+ * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
+ * prstate showing the changes to be made. Items to be redirected are added
* to the redirected[] array (two entries per redirection); items to be set to
* LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
* state are added to nowunused[].
@@ -384,7 +384,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
* We need this primarily to handle aborted HOT updates, that is,
* XMIN_INVALID heap-only tuples. Those might not be linked to by
* any chain, since the parent tuple might be re-updated before
- * any pruning occurs. So we have to be able to reap them
+ * any pruning occurs. So we have to be able to reap them
* separately from chain-pruning. (Note that
* HeapTupleHeaderIsHotUpdated will never return true for an
* XMIN_INVALID tuple, so this code will work even when there were
@@ -496,9 +496,10 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
+
/*
- * This tuple may soon become DEAD. Update the hint field
- * so that the page is reconsidered for pruning in future.
+ * This tuple may soon become DEAD. Update the hint field so
+ * that the page is reconsidered for pruning in future.
*/
heap_prune_record_prunable(prstate,
HeapTupleHeaderGetUpdateXid(htup));
@@ -574,7 +575,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/*
* If the root entry had been a normal tuple, we are deleting it, so
- * count it in the result. But changing a redirect (even to DEAD
+ * count it in the result. But changing a redirect (even to DEAD
* state) doesn't count.
*/
if (ItemIdIsNormal(rootlp))
@@ -663,7 +664,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
* buffer, and is inside a critical section.
*
* This is split out because it is also used by heap_xlog_clean()
- * to replay the WAL record when needed after a crash. Note that the
+ * to replay the WAL record when needed after a crash. Note that the
* arguments are identical to those of log_heap_clean().
*/
void
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index ef8c12194c..7b57911477 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -10,7 +10,7 @@
*
* The caller is responsible for creating the new heap, all catalog
* changes, supplying the tuples to be written to the new heap, and
- * rebuilding indexes. The caller must hold AccessExclusiveLock on the
+ * rebuilding indexes. The caller must hold AccessExclusiveLock on the
* target table, because we assume no one else is writing into it.
*
* To use the facility:
@@ -43,7 +43,7 @@
* to substitute the correct ctid instead.
*
* For each ctid reference from A -> B, we might encounter either A first
- * or B first. (Note that a tuple in the middle of a chain is both A and B
+ * or B first. (Note that a tuple in the middle of a chain is both A and B
* of different pairs.)
*
* If we encounter A first, we'll store the tuple in the unresolved_tups
@@ -58,11 +58,11 @@
* and can write A immediately with the correct ctid.
*
* Entries in the hash tables can be removed as soon as the later tuple
- * is encountered. That helps to keep the memory usage down. At the end,
+ * is encountered. That helps to keep the memory usage down. At the end,
* both tables are usually empty; we should have encountered both A and B
* of each pair. However, it's possible for A to be RECENTLY_DEAD and B
* entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
- * for deadness using OldestXmin is not exact. In such a case we might
+ * for deadness using OldestXmin is not exact. In such a case we might
* encounter B first, and skip it, and find A later. Then A would be added
* to unresolved_tups, and stay there until end of the rewrite. Since
* this case is very unusual, we don't worry about the memory usage.
@@ -78,7 +78,7 @@
* of CLUSTERing on an unchanging key column, we'll see all the versions
* of a given tuple together anyway, and so the peak memory usage is only
* proportional to the number of RECENTLY_DEAD versions of a single row, not
- * in the whole table. Note that if we do fail halfway through a CLUSTER,
+ * in the whole table. Note that if we do fail halfway through a CLUSTER,
* the old table is still valid, so failure is not catastrophic.
*
* We can't use the normal heap_insert function to insert into the new
@@ -143,13 +143,13 @@ typedef struct RewriteStateData
BlockNumber rs_blockno; /* block where page will go */
bool rs_buffer_valid; /* T if any tuples in buffer */
bool rs_use_wal; /* must we WAL-log inserts? */
- bool rs_logical_rewrite; /* do we need to do logical rewriting */
+ bool rs_logical_rewrite; /* do we need to do logical rewriting */
TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
* determine tuple visibility */
TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
* point */
- TransactionId rs_logical_xmin; /* Xid that will be used as cutoff
- * point for logical rewrites */
+ TransactionId rs_logical_xmin; /* Xid that will be used as cutoff
+ * point for logical rewrites */
MultiXactId rs_cutoff_multi;/* MultiXactId that will be used as cutoff
* point for multixacts */
MemoryContext rs_cxt; /* for hash tables and entries and tuples in
@@ -158,7 +158,7 @@ typedef struct RewriteStateData
HTAB *rs_unresolved_tups; /* unmatched A tuples */
HTAB *rs_old_new_tid_map; /* unmatched B tuples */
HTAB *rs_logical_mappings; /* logical remapping files */
- uint32 rs_num_rewrite_mappings; /* # in memory mappings */
+ uint32 rs_num_rewrite_mappings; /* # in memory mappings */
} RewriteStateData;
/*
@@ -199,12 +199,12 @@ typedef OldToNewMappingData *OldToNewMapping;
*/
typedef struct RewriteMappingFile
{
- TransactionId xid; /* xid that might need to see the row */
- int vfd; /* fd of mappings file */
- off_t off; /* how far have we written yet */
- uint32 num_mappings; /* number of in-memory mappings */
- dlist_head mappings; /* list of in-memory mappings */
- char path[MAXPGPATH]; /* path, for error messages */
+ TransactionId xid; /* xid that might need to see the row */
+ int vfd; /* fd of mappings file */
+ off_t off; /* how far have we written yet */
+ uint32 num_mappings; /* number of in-memory mappings */
+ dlist_head mappings; /* list of in-memory mappings */
+ char path[MAXPGPATH]; /* path, for error messages */
} RewriteMappingFile;
/*
@@ -213,8 +213,8 @@ typedef struct RewriteMappingFile
*/
typedef struct RewriteMappingDataEntry
{
- LogicalRewriteMappingData map; /* map between old and new location of
- * the tuple */
+ LogicalRewriteMappingData map; /* map between old and new location of
+ * the tuple */
dlist_node node;
} RewriteMappingDataEntry;
@@ -346,7 +346,7 @@ end_heap_rewrite(RewriteState state)
}
/*
- * If the rel is WAL-logged, must fsync before commit. We use heap_sync
+ * If the rel is WAL-logged, must fsync before commit. We use heap_sync
* to ensure that the toast table gets fsync'd too.
*
* It's obvious that we must do this when not WAL-logging. It's less
@@ -617,7 +617,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
}
/*
- * Insert a tuple to the new relation. This has to track heap_insert
+ * Insert a tuple to the new relation. This has to track heap_insert
* and its subsidiary functions!
*
* t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
@@ -866,13 +866,13 @@ logical_heap_rewrite_flush_mappings(RewriteState state)
hash_seq_init(&seq_status, state->rs_logical_mappings);
while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
{
- XLogRecData rdata[2];
- char *waldata;
- char *waldata_start;
+ XLogRecData rdata[2];
+ char *waldata;
+ char *waldata_start;
xl_heap_rewrite_mapping xlrec;
- Oid dboid;
- uint32 len;
- int written;
+ Oid dboid;
+ uint32 len;
+ int written;
/* this file hasn't got any new mappings */
if (src->num_mappings == 0)
@@ -962,14 +962,14 @@ logical_end_heap_rewrite(RewriteState state)
return;
/* writeout remaining in-memory entries */
- if (state->rs_num_rewrite_mappings > 0 )
+ if (state->rs_num_rewrite_mappings > 0)
logical_heap_rewrite_flush_mappings(state);
/* Iterate over all mappings we have written and fsync the files. */
hash_seq_init(&seq_status, state->rs_logical_mappings);
while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
{
- if(FileSync(src->vfd) != 0)
+ if (FileSync(src->vfd) != 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not fsync file \"%s\": %m", src->path)));
@@ -985,10 +985,10 @@ static void
logical_rewrite_log_mapping(RewriteState state, TransactionId xid,
LogicalRewriteMappingData *map)
{
- RewriteMappingFile *src;
- RewriteMappingDataEntry *pmap;
- Oid relid;
- bool found;
+ RewriteMappingFile *src;
+ RewriteMappingDataEntry *pmap;
+ Oid relid;
+ bool found;
relid = RelationGetRelid(state->rs_old_rel);
@@ -1027,7 +1027,7 @@ logical_rewrite_log_mapping(RewriteState state, TransactionId xid,
if (src->vfd < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create file \"%s\": %m", path)));
+ errmsg("could not create file \"%s\": %m", path)));
}
pmap = MemoryContextAlloc(state->rs_cxt,
@@ -1041,7 +1041,7 @@ logical_rewrite_log_mapping(RewriteState state, TransactionId xid,
* Write out buffer every time we've too many in-memory entries across all
* mapping files.
*/
- if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */)
+ if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */ )
logical_heap_rewrite_flush_mappings(state);
}
@@ -1054,11 +1054,11 @@ logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid,
HeapTuple new_tuple)
{
ItemPointerData new_tid = new_tuple->t_self;
- TransactionId cutoff = state->rs_logical_xmin;
- TransactionId xmin;
- TransactionId xmax;
- bool do_log_xmin = false;
- bool do_log_xmax = false;
+ TransactionId cutoff = state->rs_logical_xmin;
+ TransactionId xmin;
+ TransactionId xmax;
+ bool do_log_xmin = false;
+ bool do_log_xmax = false;
LogicalRewriteMappingData map;
/* no logical rewrite in progress, we don't need to log anything */
@@ -1147,7 +1147,8 @@ heap_xlog_logical_rewrite(XLogRecPtr lsn, XLogRecord *r)
if (fd < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create file \"%s\": %m", path)));
+ errmsg("could not create file \"%s\": %m", path)));
+
/*
* Truncate all data that's not guaranteed to have been safely fsynced (by
* previous record or by the last checkpoint).
@@ -1174,6 +1175,7 @@ heap_xlog_logical_rewrite(XLogRecPtr lsn, XLogRecord *r)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not write to file \"%s\": %m", path)));
+
/*
* Now fsync all previously written data. We could improve things and only
* do this for the last write to a file, but the required bookkeeping
@@ -1222,13 +1224,14 @@ CheckPointLogicalRewriteHeap(void)
mappings_dir = AllocateDir("pg_llog/mappings");
while ((mapping_de = ReadDir(mappings_dir, "pg_llog/mappings")) != NULL)
{
- struct stat statbuf;
+ struct stat statbuf;
Oid dboid;
Oid relid;
XLogRecPtr lsn;
TransactionId rewrite_xid;
TransactionId create_xid;
- uint32 hi, lo;
+ uint32 hi,
+ lo;
if (strcmp(mapping_de->d_name, ".") == 0 ||
strcmp(mapping_de->d_name, "..") == 0)
@@ -1244,7 +1247,7 @@ CheckPointLogicalRewriteHeap(void)
if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT,
&dboid, &relid, &hi, &lo, &rewrite_xid, &create_xid) != 6)
- elog(ERROR,"could not parse filename \"%s\"", mapping_de->d_name);
+ elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name);
lsn = ((uint64) hi) << 32 | lo;
@@ -1258,7 +1261,7 @@ CheckPointLogicalRewriteHeap(void)
}
else
{
- int fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0);
+ int fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0);
/*
* The file cannot vanish due to concurrency since this function
@@ -1269,6 +1272,7 @@ CheckPointLogicalRewriteHeap(void)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open file \"%s\": %m", path)));
+
/*
* We could try to avoid fsyncing files that either haven't
* changed or have only been created since the checkpoint's start,
diff --git a/src/backend/access/heap/syncscan.c b/src/backend/access/heap/syncscan.c
index edd0395d8e..7ea1ead543 100644
--- a/src/backend/access/heap/syncscan.c
+++ b/src/backend/access/heap/syncscan.c
@@ -4,7 +4,7 @@
* heap scan synchronization support
*
* When multiple backends run a sequential scan on the same table, we try
- * to keep them synchronized to reduce the overall I/O needed. The goal is
+ * to keep them synchronized to reduce the overall I/O needed. The goal is
* to read each page into shared buffer cache only once, and let all backends
* that take part in the shared scan process the page before it falls out of
* the cache.
@@ -26,7 +26,7 @@
* don't want such queries to slow down others.
*
* There can realistically only be a few large sequential scans on different
- * tables in progress at any time. Therefore we just keep the scan positions
+ * tables in progress at any time. Therefore we just keep the scan positions
* in a small LRU list which we scan every time we need to look up or update a
* scan position. The whole mechanism is only applied for tables exceeding
* a threshold size (but that is not the concern of this module).
@@ -243,7 +243,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
* relation, or 0 if no valid location is found.
*
* We expect the caller has just done RelationGetNumberOfBlocks(), and
- * so that number is passed in rather than computing it again. The result
+ * so that number is passed in rather than computing it again. The result
* is guaranteed less than relnblocks (assuming that's > 0).
*/
BlockNumber
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index dde74d4797..4adfe8217b 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -53,11 +53,11 @@ static struct varlena *toast_fetch_datum(struct varlena * attr);
static struct varlena *toast_fetch_datum_slice(struct varlena * attr,
int32 sliceoffset, int32 length);
static int toast_open_indexes(Relation toastrel,
- LOCKMODE lock,
- Relation **toastidxs,
- int *num_indexes);
+ LOCKMODE lock,
+ Relation **toastidxs,
+ int *num_indexes);
static void toast_close_indexes(Relation *toastidxs, int num_indexes,
- LOCKMODE lock);
+ LOCKMODE lock);
/* ----------
@@ -91,8 +91,9 @@ heap_tuple_fetch_attr(struct varlena * attr)
* to persist a Datum for unusually long time, like in a HOLD cursor.
*/
struct varatt_indirect redirect;
+
VARATT_EXTERNAL_GET_POINTER(redirect, attr);
- attr = (struct varlena *)redirect.pointer;
+ attr = (struct varlena *) redirect.pointer;
/* nested indirect Datums aren't allowed */
Assert(!VARATT_IS_EXTERNAL_INDIRECT(attr));
@@ -147,8 +148,9 @@ heap_tuple_untoast_attr(struct varlena * attr)
else if (VARATT_IS_EXTERNAL_INDIRECT(attr))
{
struct varatt_indirect redirect;
+
VARATT_EXTERNAL_GET_POINTER(redirect, attr);
- attr = (struct varlena *)redirect.pointer;
+ attr = (struct varlena *) redirect.pointer;
/* nested indirect Datums aren't allowed */
Assert(!VARATT_IS_EXTERNAL_INDIRECT(attr));
@@ -217,6 +219,7 @@ heap_tuple_untoast_attr_slice(struct varlena * attr,
else if (VARATT_IS_EXTERNAL_INDIRECT(attr))
{
struct varatt_indirect redirect;
+
VARATT_EXTERNAL_GET_POINTER(redirect, attr);
/* nested indirect Datums aren't allowed */
@@ -299,6 +302,7 @@ toast_raw_datum_size(Datum value)
else if (VARATT_IS_EXTERNAL_INDIRECT(attr))
{
struct varatt_indirect toast_pointer;
+
VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr);
/* nested indirect Datums aren't allowed */
@@ -354,6 +358,7 @@ toast_datum_size(Datum value)
else if (VARATT_IS_EXTERNAL_INDIRECT(attr))
{
struct varatt_indirect toast_pointer;
+
VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr);
/* nested indirect Datums aren't allowed */
@@ -597,7 +602,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Fetch it back (without decompression, unless we are forcing
- * PLAIN storage). If necessary, we'll push it out as a new
+ * PLAIN storage). If necessary, we'll push it out as a new
* external value below.
*/
if (VARATT_IS_EXTERNAL(new_value))
@@ -740,7 +745,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
/*
* Second we look for attributes of attstorage 'x' or 'e' that are still
- * inline. But skip this if there's no toast table to push them to.
+ * inline. But skip this if there's no toast table to push them to.
*/
while (heap_compute_data_size(tupleDesc,
toast_values, toast_isnull) > maxDataLen &&
@@ -850,7 +855,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
}
/*
- * Finally we store attributes of type 'm' externally. At this point we
+ * Finally we store attributes of type 'm' externally. At this point we
* increase the target tuple size, so that 'm' attributes aren't stored
* externally unless really necessary.
*/
@@ -1438,7 +1443,7 @@ toast_save_datum(Relation rel, Datum value,
* those versions could easily reference the same toast value.
* When we copy the second or later version of such a row,
* reusing the OID will mean we select an OID that's already
- * in the new toast table. Check for that, and if so, just
+ * in the new toast table. Check for that, and if so, just
* fall through without writing the data again.
*
* While annoying and ugly-looking, this is a good thing
@@ -1467,7 +1472,7 @@ toast_save_datum(Relation rel, Datum value,
{
toast_pointer.va_valueid =
GetNewOidWithIndex(toastrel,
- RelationGetRelid(toastidxs[validIndex]),
+ RelationGetRelid(toastidxs[validIndex]),
(AttrNumber) 1);
} while (toastid_valueid_exists(rel->rd_toastoid,
toast_pointer.va_valueid));
@@ -1488,7 +1493,7 @@ toast_save_datum(Relation rel, Datum value,
*/
while (data_todo > 0)
{
- int i;
+ int i;
/*
* Calculate the size of this chunk
@@ -1506,7 +1511,7 @@ toast_save_datum(Relation rel, Datum value,
heap_insert(toastrel, toasttup, mycid, options, NULL);
/*
- * Create the index entry. We cheat a little here by not using
+ * Create the index entry. We cheat a little here by not using
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table for all the
* indexes.
@@ -1656,8 +1661,8 @@ toastrel_valueid_exists(Relation toastrel, Oid valueid)
* Is there any such chunk?
*/
toastscan = systable_beginscan(toastrel,
- RelationGetRelid(toastidxs[validIndex]),
- true, SnapshotToast, 1, &toastkey);
+ RelationGetRelid(toastidxs[validIndex]),
+ true, SnapshotToast, 1, &toastkey);
if (systable_getnext(toastscan) != NULL)
result = true;
@@ -2126,7 +2131,8 @@ toast_open_indexes(Relation toastrel,
/* Fetch the first valid index in list */
for (i = 0; i < *num_indexes; i++)
{
- Relation toastidx = (*toastidxs)[i];
+ Relation toastidx = (*toastidxs)[i];
+
if (toastidx->rd_index->indisvalid)
{
res = i;
@@ -2136,14 +2142,14 @@ toast_open_indexes(Relation toastrel,
}
/*
- * Free index list, not necessary anymore as relations are opened
- * and a valid index has been found.
+ * Free index list, not necessary anymore as relations are opened and a
+ * valid index has been found.
*/
list_free(indexlist);
/*
- * The toast relation should have one valid index, so something is
- * going wrong if there is nothing.
+ * The toast relation should have one valid index, so something is going
+ * wrong if there is nothing.
*/
if (!found)
elog(ERROR, "no valid index found for toast relation with Oid %d",
@@ -2161,7 +2167,7 @@ toast_open_indexes(Relation toastrel,
static void
toast_close_indexes(Relation *toastidxs, int num_indexes, LOCKMODE lock)
{
- int i;
+ int i;
/* Close relations and clean up things */
for (i = 0; i < num_indexes; i++)
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index 899ffacf1e..a0c0c7f2a6 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -27,7 +27,7 @@
* the sense that we make sure that whenever a bit is set, we know the
* condition is true, but if a bit is not set, it might or might not be true.
*
- * Clearing a visibility map bit is not separately WAL-logged. The callers
+ * Clearing a visibility map bit is not separately WAL-logged. The callers
* must make sure that whenever a bit is cleared, the bit is cleared on WAL
* replay of the updating operation as well.
*
@@ -36,9 +36,9 @@
* it may still be the case that every tuple on the page is visible to all
* transactions; we just don't know that for certain. The difficulty is that
* there are two bits which are typically set together: the PD_ALL_VISIBLE bit
- * on the page itself, and the visibility map bit. If a crash occurs after the
+ * on the page itself, and the visibility map bit. If a crash occurs after the
* visibility map page makes it to disk and before the updated heap page makes
- * it to disk, redo must set the bit on the heap page. Otherwise, the next
+ * it to disk, redo must set the bit on the heap page. Otherwise, the next
* insert, update, or delete on the heap page will fail to realize that the
* visibility map bit must be cleared, possibly causing index-only scans to
* return wrong answers.
@@ -59,10 +59,10 @@
* the buffer lock over any I/O that may be required to read in the visibility
* map page. To avoid this, we examine the heap page before locking it;
* if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
- * bit. Then, we lock the buffer. But this creates a race condition: there
+ * bit. Then, we lock the buffer. But this creates a race condition: there
* is a possibility that in the time it takes to lock the buffer, the
* PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
- * buffer, pin the visibility map page, and relock the buffer. This shouldn't
+ * buffer, pin the visibility map page, and relock the buffer. This shouldn't
* happen often, because only VACUUM currently sets visibility map bits,
* and the race will only occur if VACUUM processes a given page at almost
* exactly the same time that someone tries to further modify it.
@@ -227,9 +227,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
* visibilitymap_set - set a bit on a previously pinned page
*
* recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
- * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
+ * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
* one provided; in normal running, we generate a new XLOG record and set the
- * page LSN to that value. cutoff_xid is the largest xmin on the page being
+ * page LSN to that value. cutoff_xid is the largest xmin on the page being
* marked all-visible; it is needed for Hot Standby, and can be
* InvalidTransactionId if the page contains no tuples.
*
@@ -320,10 +320,10 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
* releasing *buf after it's done testing and setting bits.
*
* NOTE: This function is typically called without a lock on the heap page,
- * so somebody else could change the bit just after we look at it. In fact,
+ * so somebody else could change the bit just after we look at it. In fact,
* since we don't lock the visibility map page either, it's even possible that
* someone else could have changed the bit just before we look at it, but yet
- * we might see the old value. It is the caller's responsibility to deal with
+ * we might see the old value. It is the caller's responsibility to deal with
* all concurrency issues!
*/
bool
@@ -526,7 +526,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
/*
* We might not have opened the relation at the smgr level yet, or we
- * might have been forced to close it by a sinval message. The code below
+ * might have been forced to close it by a sinval message. The code below
* won't necessarily notice relation extension immediately when extend =
* false, so we rely on sinval messages to ensure that our ideas about the
* size of the map aren't too far out of date.
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 50cb92a47b..850008b340 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -45,7 +45,7 @@
*
* At the end of a scan, the AM's endscan routine undoes the locking,
* but does *not* call IndexScanEnd --- the higher-level index_endscan
- * routine does that. (We can't do it in the AM because index_endscan
+ * routine does that. (We can't do it in the AM because index_endscan
* still needs to touch the IndexScanDesc after calling the AM.)
*
* Because of this, the AM does not have a choice whether to call
@@ -79,7 +79,7 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
scan->heapRelation = NULL; /* may be set later */
scan->indexRelation = indexRelation;
- scan->xs_snapshot = InvalidSnapshot; /* caller must initialize this */
+ scan->xs_snapshot = InvalidSnapshot; /* caller must initialize this */
scan->numberOfKeys = nkeys;
scan->numberOfOrderBys = norderbys;
@@ -188,7 +188,7 @@ BuildIndexValueDescription(Relation indexRelation,
* at rd_opcintype not the index tupdesc.
*
* Note: this is a bit shaky for opclasses that have pseudotype
- * input types such as ANYARRAY or RECORD. Currently, the
+ * input types such as ANYARRAY or RECORD. Currently, the
* typoutput functions associated with the pseudotypes will work
* okay, but we might have to try harder in future.
*/
@@ -269,7 +269,7 @@ systable_beginscan(Relation heapRelation,
if (snapshot == NULL)
{
- Oid relid = RelationGetRelid(heapRelation);
+ Oid relid = RelationGetRelid(heapRelation);
snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
sysscan->snapshot = snapshot;
@@ -442,7 +442,7 @@ systable_endscan(SysScanDesc sysscan)
* index order. Also, for largely historical reasons, the index to use
* is opened and locked by the caller, not here.
*
- * Currently we do not support non-index-based scans here. (In principle
+ * Currently we do not support non-index-based scans here. (In principle
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of
@@ -475,7 +475,7 @@ systable_beginscan_ordered(Relation heapRelation,
if (snapshot == NULL)
{
- Oid relid = RelationGetRelid(heapRelation);
+ Oid relid = RelationGetRelid(heapRelation);
snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
sysscan->snapshot = snapshot;
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index a4b5f3d698..53cf96fc10 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -84,7 +84,7 @@
*
* Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there
* to check that we don't try to scan or do retail insertions into an index
- * that is currently being rebuilt or pending rebuild. This helps to catch
+ * that is currently being rebuilt or pending rebuild. This helps to catch
* things that don't work when reindexing system catalogs. The assertion
* doesn't prevent the actual rebuild because we don't use RELATION_CHECKS
* when calling the index AM's ambuild routine, and there is no reason for
@@ -149,7 +149,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation,
* index_open - open an index relation by relation OID
*
* If lockmode is not "NoLock", the specified kind of lock is
- * obtained on the index. (Generally, NoLock should only be
+ * obtained on the index. (Generally, NoLock should only be
* used if the caller knows it has some appropriate lock on the
* index already.)
*
@@ -414,7 +414,7 @@ index_markpos(IndexScanDesc scan)
* returnable tuple in each HOT chain, and so restoring the prior state at the
* granularity of the index AM is sufficient. Since the only current user
* of mark/restore functionality is nodeMergejoin.c, this effectively means
- * that merge-join plans only work for MVCC snapshots. This could be fixed
+ * that merge-join plans only work for MVCC snapshots. This could be fixed
* if necessary, but for now it seems unimportant.
* ----------------
*/
@@ -553,7 +553,7 @@ index_fetch_heap(IndexScanDesc scan)
/*
* If we scanned a whole HOT chain and found only dead tuples, tell index
* AM to kill its entry for that TID (this will take effect in the next
- * amgettuple call, in index_getnext_tid). We do not do this when in
+ * amgettuple call, in index_getnext_tid). We do not do this when in
* recovery because it may violate MVCC to do so. See comments in
* RelationGetIndexScan().
*/
@@ -590,7 +590,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
{
/*
* We are resuming scan of a HOT chain after having returned an
- * earlier member. Must still hold pin on current heap page.
+ * earlier member. Must still hold pin on current heap page.
*/
Assert(BufferIsValid(scan->xs_cbuf));
Assert(ItemPointerGetBlockNumber(&scan->xs_ctup.t_self) ==
@@ -760,7 +760,7 @@ index_can_return(Relation indexRelation)
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
- * type instead). Only the default functions are stored in relcache
+ * type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
@@ -794,7 +794,7 @@ index_getprocid(Relation irel,
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
- * support procs in the relcache. As above, only the "default"
+ * support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index 86ac7d3ec2..b1f9ae3685 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -25,7 +25,7 @@
* Although any negative int32 (except INT_MIN) is acceptable for reporting
* "<", and any positive int32 is acceptable for reporting ">", routines
* that work on 32-bit or wider datatypes can't just return "a - b".
- * That could overflow and give the wrong answer. Also, one must not
+ * That could overflow and give the wrong answer. Also, one must not
* return INT_MIN to report "<", since some callers will negate the result.
*
* NOTE: it is critical that the comparison function impose a total order
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 0d806af505..d64cbd9822 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -90,7 +90,7 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel);
* By here, itup is filled in, including the TID.
*
* If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
- * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
+ * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
* UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
* For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
* don't actually insert.
@@ -129,7 +129,7 @@ top:
* If the page was split between the time that we surrendered our read
* lock and acquired our write lock, then this page may no longer be the
* right place for the key we want to insert. In this case, we need to
- * move right in the tree. See Lehman and Yao for an excruciatingly
+ * move right in the tree. See Lehman and Yao for an excruciatingly
* precise description.
*/
buf = _bt_moveright(rel, buf, natts, itup_scankey, false,
@@ -211,7 +211,7 @@ top:
* is the first tuple on the next page.
*
* Returns InvalidTransactionId if there is no conflict, else an xact ID
- * we must wait for to see if it commits a conflicting tuple. If an actual
+ * we must wait for to see if it commits a conflicting tuple. If an actual
* conflict is detected, no return --- just ereport().
*
* However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
@@ -293,7 +293,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
/*
* If we are doing a recheck, we expect to find the tuple we
- * are rechecking. It's not a duplicate, but we have to keep
+ * are rechecking. It's not a duplicate, but we have to keep
* scanning.
*/
if (checkUnique == UNIQUE_CHECK_EXISTING &&
@@ -482,7 +482,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* If the new key is equal to one or more existing keys, we can
* legitimately place it anywhere in the series of equal keys --- in fact,
* if the new key is equal to the page's "high key" we can place it on
- * the next page. If it is equal to the high key, and there's not room
+ * the next page. If it is equal to the high key, and there's not room
* to insert the new tuple on the current page without splitting, then
* we can move right hoping to find more free space and avoid a split.
* (We should not move right indefinitely, however, since that leads to
@@ -494,7 +494,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* removing any LP_DEAD tuples.
*
* On entry, *buf and *offsetptr point to the first legal position
- * where the new tuple could be inserted. The caller should hold an
+ * where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
* InvalidOffsetNumber, in which case the function will search for the
* right location within the page if needed. On exit, they point to the
@@ -564,7 +564,7 @@ _bt_findinsertloc(Relation rel,
* on every insert. We implement "get tired" as a random choice,
* since stopping after scanning a fixed number of pages wouldn't work
* well (we'd never reach the right-hand side of previously split
- * pages). Currently the probability of moving right is set at 0.99,
+ * pages). Currently the probability of moving right is set at 0.99,
* which may seem too high to change the behavior much, but it does an
* excellent job of preventing O(N^2) behavior with many equal keys.
*----------
@@ -574,7 +574,7 @@ _bt_findinsertloc(Relation rel,
while (PageGetFreeSpace(page) < itemsz)
{
Buffer rbuf;
- BlockNumber rblkno;
+ BlockNumber rblkno;
/*
* before considering moving right, see if we can obtain enough space
@@ -620,10 +620,10 @@ _bt_findinsertloc(Relation rel,
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
/*
- * If this page was incompletely split, finish the split now.
- * We do this while holding a lock on the left sibling, which
- * is not good because finishing the split could be a fairly
- * lengthy operation. But this should happen very seldom.
+ * If this page was incompletely split, finish the split now. We
+ * do this while holding a lock on the left sibling, which is not
+ * good because finishing the split could be a fairly lengthy
+ * operation. But this should happen very seldom.
*/
if (P_INCOMPLETE_SPLIT(lpageop))
{
@@ -681,7 +681,7 @@ _bt_findinsertloc(Relation rel,
* + updates the metapage if a true root or fast root is split.
*
* On entry, we must have the correct buffer in which to do the
- * insertion, and the buffer must be pinned and write-locked. On return,
+ * insertion, and the buffer must be pinned and write-locked. On return,
* we will have dropped both the pin and the lock on the buffer.
*
* When inserting to a non-leaf page, 'cbuf' is the left-sibling of the
@@ -978,7 +978,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright,
* origpage is the original page to be split. leftpage is a temporary
* buffer that receives the left-sibling data, which will be copied back
* into origpage on success. rightpage is the new page that receives the
- * right-sibling data. If we fail before reaching the critical section,
+ * right-sibling data. If we fail before reaching the critical section,
* origpage hasn't been modified and leftpage is only workspace. In
* principle we shouldn't need to worry about rightpage either, because it
* hasn't been linked into the btree page structure; but to avoid leaving
@@ -1196,7 +1196,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright,
* page. If you're confused, imagine that page A splits to A B and
* then again, yielding A C B, while vacuum is in progress. Tuples
* originally in A could now be in either B or C, hence vacuum must
- * examine both pages. But if D, our right sibling, has a different
+ * examine both pages. But if D, our right sibling, has a different
* cycleid then it could not contain any tuples that were in A when
* the vacuum started.
*/
@@ -1330,11 +1330,10 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright,
lastrdata++;
/*
- * Although we don't need to WAL-log anything on the left page,
- * we still need XLogInsert to consider storing a full-page image
- * of the left page, so make an empty entry referencing that
- * buffer. This also ensures that the left page is always backup
- * block 1.
+ * Although we don't need to WAL-log anything on the left page, we
+ * still need XLogInsert to consider storing a full-page image of
+ * the left page, so make an empty entry referencing that buffer.
+ * This also ensures that the left page is always backup block 1.
*/
lastrdata->data = NULL;
lastrdata->len = 0;
@@ -1448,7 +1447,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright,
*
* We return the index of the first existing tuple that should go on the
* righthand page, plus a boolean indicating whether the new tuple goes on
- * the left or right page. The bool is necessary to disambiguate the case
+ * the left or right page. The bool is necessary to disambiguate the case
* where firstright == newitemoff.
*/
static OffsetNumber
@@ -1684,7 +1683,7 @@ _bt_checksplitloc(FindSplitData *state,
*
* On entry, buf and rbuf are the left and right split pages, which we
* still hold write locks on per the L&Y algorithm. We release the
- * write locks once we have write lock on the parent page. (Any sooner,
+ * write locks once we have write lock on the parent page. (Any sooner,
* and it'd be possible for some other process to try to split or delete
* one of these pages, and get confused because it cannot find the downlink.)
*
@@ -1705,7 +1704,7 @@ _bt_insert_parent(Relation rel,
* Here we have to do something Lehman and Yao don't talk about: deal with
* a root split and construction of a new root. If our stack is empty
* then we have just split a node on what had been the root level when we
- * descended the tree. If it was still the root then we perform a
+ * descended the tree. If it was still the root then we perform a
* new-root construction. If it *wasn't* the root anymore, search to find
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
@@ -1917,7 +1916,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/*
* These loops will check every item on the page --- but in an
* order that's attuned to the probability of where it actually
- * is. Scan to the right first, then to the left.
+ * is. Scan to the right first, then to the left.
*/
for (offnum = start;
offnum <= maxoff;
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index c0ebb95ba8..d357b33bc0 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -12,7 +12,7 @@
* src/backend/access/nbtree/nbtpage.c
*
* NOTES
- * Postgres btree pages look like ordinary relation pages. The opaque
+ * Postgres btree pages look like ordinary relation pages. The opaque
* data at high addresses includes pointers to left and right siblings
* and flag data describing page state. The first page in a btree, page
* zero, is special -- it stores meta-information describing the tree.
@@ -36,7 +36,7 @@ static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf,
static bool _bt_lock_branch_parent(Relation rel, BlockNumber child,
BTStack stack, Buffer *topparent, OffsetNumber *topoff,
BlockNumber *target, BlockNumber *rightsib);
-static void _bt_log_reuse_page(Relation rel, BlockNumber blkno,
+static void _bt_log_reuse_page(Relation rel, BlockNumber blkno,
TransactionId latestRemovedXid);
/*
@@ -62,7 +62,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
metaopaque->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not essential
+ * Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) page)->pd_lower =
@@ -80,7 +80,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
*
* The access type parameter (BT_READ or BT_WRITE) controls whether
* a new root page will be created or not. If access = BT_READ,
- * and no root page exists, we just return InvalidBuffer. For
+ * and no root page exists, we just return InvalidBuffer. For
* BT_WRITE, we try to create the root page if it doesn't exist.
* NOTE that the returned root page will have only a read lock set
* on it even if access = BT_WRITE!
@@ -197,7 +197,7 @@ _bt_getroot(Relation rel, int access)
/*
* Metadata initialized by someone else. In order to guarantee no
* deadlocks, we have to release the metadata page and start all
- * over again. (Is that really true? But it's hardly worth trying
+ * over again. (Is that really true? But it's hardly worth trying
* to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
@@ -254,7 +254,7 @@ _bt_getroot(Relation rel, int access)
END_CRIT_SECTION();
/*
- * swap root write lock for read lock. There is no danger of anyone
+ * swap root write lock for read lock. There is no danger of anyone
* else accessing the new root page while it's unlocked, since no one
* else knows where it is yet.
*/
@@ -322,7 +322,7 @@ _bt_getroot(Relation rel, int access)
* By the time we acquire lock on the root page, it might have been split and
* not be the true root anymore. This is okay for the present uses of this
* routine; we only really need to be able to move up at least one tree level
- * from whatever non-root page we were at. If we ever do need to lock the
+ * from whatever non-root page we were at. If we ever do need to lock the
* one true root page, we could loop here, re-reading the metapage on each
* failure. (Note that it wouldn't do to hold the lock on the metapage while
* moving to the root --- that'd deadlock against any concurrent root split.)
@@ -497,7 +497,7 @@ _bt_checkpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@@ -564,7 +564,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
/*
* _bt_getbuf() -- Get a buffer by block number for read or write.
*
- * blkno == P_NEW means to get an unallocated index page. The page
+ * blkno == P_NEW means to get an unallocated index page. The page
* will be initialized before returning it.
*
* When this routine returns, the appropriate lock is set on the
@@ -595,7 +595,7 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* First see if the FSM knows of any free pages.
*
* We can't trust the FSM's report unreservedly; we have to check that
- * the page is still free. (For example, an already-free page could
+ * the page is still free. (For example, an already-free page could
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
@@ -774,7 +774,7 @@ _bt_page_recyclable(Page page)
/*
* Delete item(s) from a btree page during VACUUM.
*
- * This must only be used for deleting leaf items. Deleting an item on a
+ * This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
@@ -842,7 +842,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf,
/*
* The target-offsets array is not in the buffer, but pretend that it
- * is. When XLogInsert stores the whole buffer, the offsets array
+ * is. When XLogInsert stores the whole buffer, the offsets array
* need not be stored too.
*/
if (nitems > 0)
@@ -1049,11 +1049,12 @@ _bt_lock_branch_parent(Relation rel, BlockNumber child, BTStack stack,
lbuf = _bt_getbuf(rel, leftsib, BT_READ);
lpage = BufferGetPage(lbuf);
lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
+
/*
* If the left sibling was concurrently split, so that its
- * next-pointer doesn't point to the current page anymore,
- * the split that created the current page must be completed.
- * (We don't allow splitting an incompletely split page again
+ * next-pointer doesn't point to the current page anymore, the
+ * split that created the current page must be completed. (We
+ * don't allow splitting an incompletely split page again
* until the previous split has been completed)
*/
if (lopaque->btpo_next == parent &&
@@ -1066,7 +1067,7 @@ _bt_lock_branch_parent(Relation rel, BlockNumber child, BTStack stack,
}
return _bt_lock_branch_parent(rel, parent, stack->bts_parent,
- topparent, topoff, target, rightsib);
+ topparent, topoff, target, rightsib);
}
else
{
@@ -1112,6 +1113,7 @@ _bt_pagedel(Relation rel, Buffer buf)
bool rightsib_empty;
Page page;
BTPageOpaque opaque;
+
/*
* "stack" is a search stack leading (approximately) to the target page.
* It is initially NULL, but when iterating, we keep it to avoid
@@ -1140,24 +1142,24 @@ _bt_pagedel(Relation rel, Buffer buf)
* was never supposed to leave half-dead pages in the tree, it was
* just a transient state, but it was nevertheless possible in
* error scenarios. We don't know how to deal with them here. They
- * are harmless as far as searches are considered, but inserts into
- * the deleted keyspace could add out-of-order downlinks in the
- * upper levels. Log a notice, hopefully the admin will notice and
- * reindex.
+ * are harmless as far as searches are considered, but inserts
+ * into the deleted keyspace could add out-of-order downlinks in
+ * the upper levels. Log a notice, hopefully the admin will notice
+ * and reindex.
*/
if (P_ISHALFDEAD(opaque))
ereport(LOG,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("index \"%s\" contains a half-dead internal page",
- RelationGetRelationName(rel)),
+ errmsg("index \"%s\" contains a half-dead internal page",
+ RelationGetRelationName(rel)),
errhint("This can be caused by an interrupt VACUUM in version 9.3 or older, before upgrade. Please REINDEX it.")));
_bt_relbuf(rel, buf);
return ndeleted;
}
/*
- * We can never delete rightmost pages nor root pages. While at
- * it, check that page is not already deleted and is empty.
+ * We can never delete rightmost pages nor root pages. While at it,
+ * check that page is not already deleted and is empty.
*
* To keep the algorithm simple, we also never delete an incompletely
* split page (they should be rare enough that this doesn't make any
@@ -1167,10 +1169,10 @@ _bt_pagedel(Relation rel, Buffer buf)
* left half of an incomplete split, but ensuring that it's not the
* right half is more complicated. For that, we have to check that
* the left sibling doesn't have its INCOMPLETE_SPLIT flag set. On
- * the first iteration, we temporarily release the lock on the
- * current page, and check the left sibling and also construct a
- * search stack to. On subsequent iterations, we know we stepped right
- * from a page that passed these tests, so it's OK.
+ * the first iteration, we temporarily release the lock on the current
+ * page, and check the left sibling and also construct a search stack
+ * to. On subsequent iterations, we know we stepped right from a page
+ * that passed these tests, so it's OK.
*/
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) ||
@@ -1184,9 +1186,9 @@ _bt_pagedel(Relation rel, Buffer buf)
}
/*
- * First, remove downlink pointing to the page (or a parent of the page,
- * if we are going to delete a taller branch), and mark the page as
- * half-dead.
+ * First, remove downlink pointing to the page (or a parent of the
+ * page, if we are going to delete a taller branch), and mark the page
+ * as half-dead.
*/
if (!P_ISHALFDEAD(opaque))
{
@@ -1205,7 +1207,7 @@ _bt_pagedel(Relation rel, Buffer buf)
ItemId itemid;
IndexTuple targetkey;
Buffer lbuf;
- BlockNumber leftsib;
+ BlockNumber leftsib;
itemid = PageGetItemId(page, P_HIKEY);
targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid));
@@ -1219,9 +1221,9 @@ _bt_pagedel(Relation rel, Buffer buf)
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
/*
- * Fetch the left sibling, to check that it's not marked
- * with INCOMPLETE_SPLIT flag. That would mean that the
- * page to-be-deleted doesn't have a downlink, and the page
+ * Fetch the left sibling, to check that it's not marked with
+ * INCOMPLETE_SPLIT flag. That would mean that the page
+ * to-be-deleted doesn't have a downlink, and the page
* deletion algorithm isn't prepared to handle that.
*/
if (!P_LEFTMOST(opaque))
@@ -1267,7 +1269,7 @@ _bt_pagedel(Relation rel, Buffer buf)
/*
* Then unlink it from its siblings. Each call to
- *_bt_unlink_halfdead_page unlinks the topmost page from the branch,
+ * _bt_unlink_halfdead_page unlinks the topmost page from the branch,
* making it shallower. Iterate until the leaf page is gone.
*/
rightsib_empty = false;
@@ -1291,8 +1293,8 @@ _bt_pagedel(Relation rel, Buffer buf)
* is that it was the rightmost child of the parent. Now that we
* removed the downlink for this page, the right sibling might now be
* the only child of the parent, and could be removed. It would be
- * picked up by the next vacuum anyway, but might as well try to remove
- * it now, so loop back to process the right sibling.
+ * picked up by the next vacuum anyway, but might as well try to
+ * remove it now, so loop back to process the right sibling.
*/
if (!rightsib_empty)
break;
@@ -1310,9 +1312,9 @@ _bt_pagedel(Relation rel, Buffer buf)
static bool
_bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
{
- BlockNumber leafblkno;
+ BlockNumber leafblkno;
BlockNumber leafrightsib;
- BlockNumber target;
+ BlockNumber target;
BlockNumber rightsib;
ItemId itemid;
Page page;
@@ -1351,7 +1353,7 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
/*
* Check that the parent-page index items we're about to delete/overwrite
- * contain what we expect. This can fail if the index has become corrupt
+ * contain what we expect. This can fail if the index has become corrupt
* for some reason. We want to throw any error before entering the
* critical section --- otherwise it'd be a PANIC.
*
@@ -1490,9 +1492,9 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty)
BlockNumber leafblkno = BufferGetBlockNumber(leafbuf);
BlockNumber leafleftsib;
BlockNumber leafrightsib;
- BlockNumber target;
- BlockNumber leftsib;
- BlockNumber rightsib;
+ BlockNumber target;
+ BlockNumber leftsib;
+ BlockNumber rightsib;
Buffer lbuf = InvalidBuffer;
Buffer buf;
Buffer rbuf;
@@ -1506,7 +1508,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty)
int targetlevel;
ItemPointer leafhikey;
BlockNumber nextchild;
- BlockNumber topblkno;
+ BlockNumber topblkno;
page = BufferGetPage(leafbuf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -1596,7 +1598,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty)
lbuf = InvalidBuffer;
/*
- * Next write-lock the target page itself. It should be okay to take just
+ * Next write-lock the target page itself. It should be okay to take just
* a write lock not a superexclusive lock, since no scans would stop on an
* empty page.
*/
@@ -1605,9 +1607,9 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty)
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
- * Check page is still empty etc, else abandon deletion. This is just
- * for paranoia's sake; a half-dead page cannot resurrect because there
- * can be only one vacuum process running at a time.
+ * Check page is still empty etc, else abandon deletion. This is just for
+ * paranoia's sake; a half-dead page cannot resurrect because there can be
+ * only one vacuum process running at a time.
*/
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque))
{
@@ -1733,7 +1735,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty)
* we're in VACUUM and would not otherwise have an XID. Having already
* updated links to the target, ReadNewTransactionId() suffices as an
* upper bound. Any scan having retained a now-stale link is advertising
- * in its PGXACT an xmin less than or equal to the value we read here. It
+ * in its PGXACT an xmin less than or equal to the value we read here. It
* will continue to do so, holding back RecentGlobalXmin, for the duration
* of that scan.
*/
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 542ed43984..36dc6c278e 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -208,7 +208,7 @@ btbuildempty(PG_FUNCTION_ARGS)
metapage = (Page) palloc(BLCKSZ);
_bt_initmetapage(metapage, P_NONE, 0);
- /* Write the page. If archiving/streaming, XLOG it. */
+ /* Write the page. If archiving/streaming, XLOG it. */
PageSetChecksumInplace(metapage, BTREE_METAPAGE);
smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,
(char *) metapage, true);
@@ -427,7 +427,7 @@ btbeginscan(PG_FUNCTION_ARGS)
/*
* We don't know yet whether the scan will be index-only, so we do not
- * allocate the tuple workspace arrays until btrescan. However, we set up
+ * allocate the tuple workspace arrays until btrescan. However, we set up
* scan->xs_itupdesc whether we'll need it or not, since that's so cheap.
*/
so->currTuples = so->markTuples = NULL;
@@ -472,7 +472,7 @@ btrescan(PG_FUNCTION_ARGS)
/*
* Allocate tuple workspace arrays, if needed for an index-only scan and
- * not already done in a previous rescan call. To save on palloc
+ * not already done in a previous rescan call. To save on palloc
* overhead, both workspaces are allocated as one palloc block; only this
* function and btendscan know that.
*
@@ -952,7 +952,7 @@ restart:
vstate->lastBlockLocked = blkno;
/*
- * Check whether we need to recurse back to earlier pages. What we
+ * Check whether we need to recurse back to earlier pages. What we
* are concerned about is a page split that happened since we started
* the vacuum scan. If the split moved some tuples to a lower page
* then we might have missed 'em. If so, set up for tail recursion.
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 0bf12f0e10..203b9691ba 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -50,7 +50,7 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
*
* NOTE that the returned buffer is read-locked regardless of the access
* parameter. However, access = BT_WRITE will allow an empty root page
- * to be created and returned. When access = BT_READ, an empty index
+ * to be created and returned. When access = BT_READ, an empty index
* will result in *bufP being set to InvalidBuffer. Also, in BT_WRITE mode,
* any incomplete splits encountered during the search will be finished.
*/
@@ -271,7 +271,7 @@ _bt_moveright(Relation rel,
* (or leaf keys > given scankey when nextkey is true).
*
* This procedure is not responsible for walking right, it just examines
- * the given page. _bt_binsrch() has no lock or refcount side effects
+ * the given page. _bt_binsrch() has no lock or refcount side effects
* on the buffer.
*/
OffsetNumber
@@ -403,7 +403,7 @@ _bt_compare(Relation rel,
/*
* The scan key is set up with the attribute number associated with each
* term in the key. It is important that, if the index is multi-key, the
- * scan contain the first k key attributes, and that they be in order. If
+ * scan contain the first k key attributes, and that they be in order. If
* you think about how multi-key ordering works, you'll understand why
* this is.
*
@@ -442,7 +442,7 @@ _bt_compare(Relation rel,
/*
* The sk_func needs to be passed the index value as left arg and
* the sk_argument as right arg (they might be of different
- * types). Since it is convenient for callers to think of
+ * types). Since it is convenient for callers to think of
* _bt_compare as comparing the scankey to the index item, we have
* to flip the sign of the comparison result. (Unless it's a DESC
* column, in which case we *don't* flip the sign.)
@@ -471,7 +471,7 @@ _bt_compare(Relation rel,
* _bt_first() -- Find the first item in a scan.
*
* We need to be clever about the direction of scan, the search
- * conditions, and the tree ordering. We find the first item (or,
+ * conditions, and the tree ordering. We find the first item (or,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, the page containing
* the current index tuple is pinned but not locked, and data about
@@ -527,7 +527,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* We want to identify the keys that can be used as starting boundaries;
* these are =, >, or >= keys for a forward scan or =, <, <= keys for
* a backwards scan. We can use keys for multiple attributes so long as
- * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
+ * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
* a > or < boundary or find an attribute with no boundary (which can be
* thought of as the same as "> -infinity"), we can't use keys for any
* attributes to its right, because it would break our simplistic notion
@@ -742,7 +742,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* even if the row comparison is of ">" or "<" type, because the
* condition applied to all but the last row member is effectively
* ">=" or "<=", and so the extra keys don't break the positioning
- * scheme. But, by the same token, if we aren't able to use all
+ * scheme. But, by the same token, if we aren't able to use all
* the row members, then the part of the row comparison that we
* did use has to be treated as just a ">=" or "<=" condition, and
* so we'd better adjust strat_total accordingly.
@@ -861,7 +861,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* Find first item >= scankey, then back up one to arrive at last
- * item < scankey. (Note: this positioning strategy is only used
+ * item < scankey. (Note: this positioning strategy is only used
* for a backward scan, so that is always the correct starting
* position.)
*/
@@ -910,7 +910,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
case BTGreaterEqualStrategyNumber:
/*
- * Find first item >= scankey. (This is only used for forward
+ * Find first item >= scankey. (This is only used for forward
* scans.)
*/
nextkey = false;
@@ -988,7 +988,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
*
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
- * the last item on this page. Adjust the starting offset if needed. (If
+ * the last item on this page. Adjust the starting offset if needed. (If
* this results in an offset before the first item or after the last one,
* _bt_readpage will report no items found, and then we'll step to the
* next page as needed.)
@@ -1304,7 +1304,7 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
* than the walk-right case because of the possibility that the page
* to our left splits while we are in flight to it, plus the
* possibility that the page we were on gets deleted after we leave
- * it. See nbtree/README for details.
+ * it. See nbtree/README for details.
*/
for (;;)
{
@@ -1399,7 +1399,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* anymore, not that its left sibling got split more than four times.
*
* Note that it is correct to test P_ISDELETED not P_IGNORE here,
- * because half-dead pages are still in the sibling chain. Caller
+ * because half-dead pages are still in the sibling chain. Caller
* must reject half-dead pages if wanted.
*/
tries = 0;
@@ -1425,7 +1425,7 @@ _bt_walk_left(Relation rel, Buffer buf)
if (P_ISDELETED(opaque))
{
/*
- * It was deleted. Move right to first nondeleted page (there
+ * It was deleted. Move right to first nondeleted page (there
* must be one); that is the page that has acquired the deleted
* one's keyspace, so stepping left from it will take us where we
* want to be.
@@ -1469,7 +1469,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* _bt_get_endpoint() -- Find the first or last page on a given tree level
*
* If the index is empty, we will return InvalidBuffer; any other failure
- * condition causes ereport(). We will not return a dead page.
+ * condition causes ereport(). We will not return a dead page.
*
* The returned buffer is pinned and read-locked.
*/
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 9ddc275499..1281a120c5 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -7,7 +7,7 @@
*
* We use tuplesort.c to sort the given index tuples into order.
* Then we scan the index tuples in order and build the btree pages
- * for each level. We load source tuples into leaf-level pages.
+ * for each level. We load source tuples into leaf-level pages.
* Whenever we fill a page at one level, we add a link to it to its
* parent level (starting a new parent level if necessary). When
* done, we write out each final page on each level, adding it to
@@ -42,11 +42,11 @@
*
* Since the index will never be used unless it is completely built,
* from a crash-recovery point of view there is no need to WAL-log the
- * steps of the build. After completing the index build, we can just sync
+ * steps of the build. After completing the index build, we can just sync
* the whole file to disk using smgrimmedsync() before exiting this module.
* This can be seen to be sufficient for crash recovery by considering that
* it's effectively equivalent to what would happen if a CHECKPOINT occurred
- * just after the index build. However, it is clearly not sufficient if the
+ * just after the index build. However, it is clearly not sufficient if the
* DBA is using the WAL log for PITR or replication purposes, since another
* machine would not be able to reconstruct the index from WAL. Therefore,
* we log the completed index pages to WAL if and only if WAL archiving is
@@ -89,7 +89,7 @@ struct BTSpool
};
/*
- * Status record for a btree page being built. We have one of these
+ * Status record for a btree page being built. We have one of these
* for each active tree level.
*
* The reason we need to store a copy of the minimum key is that we'll
@@ -160,7 +160,7 @@ _bt_spoolinit(Relation heap, Relation index, bool isunique, bool isdead)
* We size the sort area as maintenance_work_mem rather than work_mem to
* speed index creation. This should be OK since a single backend can't
* run multiple index creations in parallel. Note that creation of a
- * unique index actually requires two BTSpool objects. We expect that the
+ * unique index actually requires two BTSpool objects. We expect that the
* second one (for dead tuples) won't get very full, so we give it only
* work_mem.
*/
@@ -298,7 +298,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
PageSetChecksumInplace(page, blkno);
/*
- * Now write the page. There's no need for smgr to schedule an fsync for
+ * Now write the page. There's no need for smgr to schedule an fsync for
* this write; we'll do it ourselves before ending the build.
*/
if (blkno == wstate->btws_pages_written)
@@ -423,14 +423,14 @@ _bt_sortaddtup(Page page,
* A leaf page being built looks like:
*
* +----------------+---------------------------------+
- * | PageHeaderData | linp0 linp1 linp2 ... |
+ * | PageHeaderData | linp0 linp1 linp2 ... |
* +-----------+----+---------------------------------+
* | ... linpN | |
* +-----------+--------------------------------------+
* | ^ last |
* | |
* +-------------+------------------------------------+
- * | | itemN ... |
+ * | | itemN ... |
* +-------------+------------------+-----------------+
* | ... item3 item2 item1 | "special space" |
* +--------------------------------+-----------------+
@@ -492,9 +492,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
RelationGetRelationName(wstate->index))));
/*
- * Check to see if page is "full". It's definitely full if the item won't
+ * Check to see if page is "full". It's definitely full if the item won't
* fit. Otherwise, compare to the target freespace derived from the
- * fillfactor. However, we must put at least two items on each page, so
+ * fillfactor. However, we must put at least two items on each page, so
* disregard fillfactor if we don't have that many.
*/
if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY))
@@ -567,7 +567,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
}
/*
- * Write out the old page. We never need to touch it again, so we can
+ * Write out the old page. We never need to touch it again, so we can
* free the opage workspace too.
*/
_bt_blwritepage(wstate, opage, oblkno);
@@ -804,7 +804,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
/*
* If the index is WAL-logged, we must fsync it down to disk before it's
- * safe to commit the transaction. (For a non-WAL-logged index we don't
+ * safe to commit the transaction. (For a non-WAL-logged index we don't
* care since the index will be uninteresting after a crash anyway.)
*
* It's obvious that we must do this when not WAL-logging the build. It's
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 922e6a9cd4..f8f8e69be7 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -107,7 +107,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
* comparison data ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(), unless comparison
- * data is first stored into the key entries. Currently this
+ * data is first stored into the key entries. Currently this
* routine is only called by nbtsort.c and tuplesort.c, which have
* their own comparison routines.
*/
@@ -269,7 +269,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
continue;
/*
- * First, deconstruct the array into elements. Anything allocated
+ * First, deconstruct the array into elements. Anything allocated
* here (including a possibly detoasted array value) is in the
* workspace context.
*/
@@ -283,7 +283,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
&elem_values, &elem_nulls, &num_elems);
/*
- * Compress out any null elements. We can ignore them since we assume
+ * Compress out any null elements. We can ignore them since we assume
* all btree operators are strict.
*/
num_nonnulls = 0;
@@ -517,7 +517,7 @@ _bt_compare_array_elements(const void *a, const void *b, void *arg)
* _bt_start_array_keys() -- Initialize array keys at start of a scan
*
* Set up the cur_elem counters and fill in the first sk_argument value for
- * each array scankey. We can't do this until we know the scan direction.
+ * each array scankey. We can't do this until we know the scan direction.
*/
void
_bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
@@ -670,8 +670,8 @@ _bt_restore_array_keys(IndexScanDesc scan)
* so that the index sorts in the desired direction.
*
* One key purpose of this routine is to discover which scan keys must be
- * satisfied to continue the scan. It also attempts to eliminate redundant
- * keys and detect contradictory keys. (If the index opfamily provides
+ * satisfied to continue the scan. It also attempts to eliminate redundant
+ * keys and detect contradictory keys. (If the index opfamily provides
* incomplete sets of cross-type operators, we may fail to detect redundant
* or contradictory keys, but we can survive that.)
*
@@ -702,7 +702,7 @@ _bt_restore_array_keys(IndexScanDesc scan)
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
- * we cannot eliminate either. If there are two such keys of the same
+ * we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
@@ -737,7 +737,7 @@ _bt_restore_array_keys(IndexScanDesc scan)
* Note: the reason we have to copy the preprocessed scan keys into private
* storage is that we are modifying the array based on comparisons of the
* key argument values, which could change on a rescan or after moving to
- * new elements of array keys. Therefore we can't overwrite the source data.
+ * new elements of array keys. Therefore we can't overwrite the source data.
*/
void
_bt_preprocess_keys(IndexScanDesc scan)
@@ -919,7 +919,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Emit the cleaned-up keys into the outkeys[] array, and then
- * mark them if they are required. They are required (possibly
+ * mark them if they are required. They are required (possibly
* only in one direction) if all attrs before this one had "=".
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
@@ -1017,7 +1017,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
- * may not be able to make the comparison. If we can make the comparison
+ * may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* if the comparison could not be made.
*
@@ -1043,7 +1043,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
StrategyNumber strat;
/*
- * First, deal with cases where one or both args are NULL. This should
+ * First, deal with cases where one or both args are NULL. This should
* only happen when the scankeys represent IS NULL/NOT NULL conditions.
*/
if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL)
@@ -1183,7 +1183,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
*
* Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
* NULL comparison value. Since all btree operators are assumed strict,
- * a NULL means that the qual cannot be satisfied. We return TRUE if the
+ * a NULL means that the qual cannot be satisfied. We return TRUE if the
* comparison value isn't NULL, or FALSE if the scan should be abandoned.
*
* This function is applied to the *input* scankey structure; therefore
@@ -1212,7 +1212,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* --- we can treat IS NULL as an equality operator for purposes of search
* strategy.
*
- * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
+ * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
* than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS
* FIRST index.
*
@@ -1284,7 +1284,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* Mark a scankey as "required to continue the scan".
*
* Depending on the operator type, the key may be required for both scan
- * directions or just one. Also, if the key is a row comparison header,
+ * directions or just one. Also, if the key is a row comparison header,
* we have to mark the appropriate subsidiary ScanKeys as required. In
* such cases, the first subsidiary key is required, but subsequent ones
* are required only as long as they correspond to successive index columns
@@ -1296,7 +1296,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* scribbling on a data structure belonging to the index AM's caller, not on
* our private copy. This should be OK because the marking will not change
* from scan to scan within a query, and so we'd just re-mark the same way
- * anyway on a rescan. Something to keep an eye on though.
+ * anyway on a rescan. Something to keep an eye on though.
*/
static void
_bt_mark_scankey_required(ScanKey skey)
@@ -1482,7 +1482,7 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
@@ -1498,8 +1498,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. We can stop regardless of
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
@@ -1593,7 +1593,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
@@ -1609,8 +1609,8 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. We can stop regardless of
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
@@ -1631,7 +1631,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
{
/*
* Unlike the simple-scankey case, this isn't a disallowed case.
- * But it can never match. If all the earlier row comparison
+ * But it can never match. If all the earlier row comparison
* columns are required for the scan direction, we can stop the
* scan, because there can't be another tuple that will succeed.
*/
@@ -1696,7 +1696,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Tuple fails this qual. If it's a required qual for the current
* scan direction, then we can conclude no further tuples will pass,
- * either. Note we have to look at the deciding column, not
+ * either. Note we have to look at the deciding column, not
* necessarily the first or last column of the row condition.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -1722,7 +1722,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* is sufficient for setting LP_DEAD status (which is only a hint).
*
* We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
+ * delete. We cope with cases where items have moved right due to insertions.
* If an item has moved off the current page due to a split, we'll fail to
* find it and do nothing (this is not an error case --- we assume the item
* will eventually get marked in a future indexscan). Note that because we
@@ -1806,8 +1806,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
/*
* The following routines manage a shared-memory area in which we track
* assignment of "vacuum cycle IDs" to currently-active btree vacuuming
- * operations. There is a single counter which increments each time we
- * start a vacuum to assign it a cycle ID. Since multiple vacuums could
+ * operations. There is a single counter which increments each time we
+ * start a vacuum to assign it a cycle ID. Since multiple vacuums could
* be active concurrently, we have to track the cycle ID for each active
* vacuum; this requires at most MaxBackends entries (usually far fewer).
* We assume at most one vacuum can be active for a given index.
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 86824f3495..640639c175 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -40,9 +40,9 @@ _bt_restore_page(Page page, char *from, int len)
int nitems;
/*
- * To get the items back in the original order, we add them to the page
- * in reverse. To figure out where one tuple ends and another begins,
- * we have to scan them in forward order first.
+ * To get the items back in the original order, we add them to the page in
+ * reverse. To figure out where one tuple ends and another begins, we
+ * have to scan them in forward order first.
*/
i = 0;
while (from < end)
@@ -97,7 +97,7 @@ _bt_restore_meta(RelFileNode rnode, XLogRecPtr lsn,
pageop->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not essential
+ * Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) metapg)->pd_lower =
@@ -118,7 +118,7 @@ static void
_bt_clear_incomplete_split(XLogRecPtr lsn, XLogRecord *record,
RelFileNode rnode, BlockNumber cblock)
{
- Buffer buf;
+ Buffer buf;
buf = XLogReadBuffer(rnode, cblock, false);
if (BufferIsValid(buf))
@@ -128,6 +128,7 @@ _bt_clear_incomplete_split(XLogRecPtr lsn, XLogRecord *record,
if (lsn > PageGetLSN(page))
{
BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
+
Assert((pageop->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0);
pageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
@@ -153,6 +154,7 @@ btree_xlog_insert(bool isleaf, bool ismeta,
datapos = (char *) xlrec + SizeOfBtreeInsert;
datalen = record->xl_len - SizeOfBtreeInsert;
+
/*
* if this insert finishes a split at lower level, extract the block
* number of the (left) child.
@@ -172,10 +174,10 @@ btree_xlog_insert(bool isleaf, bool ismeta,
}
/*
- * Insertion to an internal page finishes an incomplete split at the
- * child level. Clear the incomplete-split flag in the child. Note:
- * during normal operation, the child and parent pages are locked at the
- * same time, so that clearing the flag and inserting the downlink appear
+ * Insertion to an internal page finishes an incomplete split at the child
+ * level. Clear the incomplete-split flag in the child. Note: during
+ * normal operation, the child and parent pages are locked at the same
+ * time, so that clearing the flag and inserting the downlink appear
* atomic to other backends. We don't bother with that during replay,
* because readers don't care about the incomplete-split flag and there
* cannot be updates happening.
@@ -279,9 +281,10 @@ btree_xlog_split(bool onleft, bool isroot,
datapos += left_hikeysz;
datalen -= left_hikeysz;
}
+
/*
- * If this insertion finishes an incomplete split, get the block number
- * of the child.
+ * If this insertion finishes an incomplete split, get the block number of
+ * the child.
*/
if (!isleaf && !(record->xl_info & XLR_BKP_BLOCK(1)))
{
@@ -439,7 +442,7 @@ btree_xlog_split(bool onleft, bool isroot,
* the backup block containing right sibling is 2 or 3, depending
* whether this was a leaf or internal page.
*/
- int rnext_index = isleaf ? 2 : 3;
+ int rnext_index = isleaf ? 2 : 3;
if (record->xl_info & XLR_BKP_BLOCK(rnext_index))
(void) RestoreBackupBlock(lsn, record, rnext_index, false, false);
@@ -620,7 +623,7 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
/*
* In what follows, we have to examine the previous state of the index
- * page, as well as the heap page(s) it points to. This is only valid if
+ * page, as well as the heap page(s) it points to. This is only valid if
* WAL replay has reached a consistent database state; which means that
* the preceding check is not just an optimization, but is *necessary*. We
* won't have let in any user sessions before we reach consistency.
@@ -629,9 +632,9 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data");
/*
- * Get index page. If the DB is consistent, this should not fail, nor
+ * Get index page. If the DB is consistent, this should not fail, nor
* should any of the heap page fetches below. If one does, we return
- * InvalidTransactionId to cancel all HS transactions. That's probably
+ * InvalidTransactionId to cancel all HS transactions. That's probably
* overkill, but it's safe, and certainly better than panicking here.
*/
ibuffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
@@ -716,9 +719,9 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
/*
* If all heap tuples were LP_DEAD then we will be returning
* InvalidTransactionId here, which avoids conflicts. This matches
- * existing logic which assumes that LP_DEAD tuples must already be
- * older than the latestRemovedXid on the cleanup record that
- * set them as LP_DEAD, hence must already have generated a conflict.
+ * existing logic which assumes that LP_DEAD tuples must already be older
+ * than the latestRemovedXid on the cleanup record that set them as
+ * LP_DEAD, hence must already have generated a conflict.
*/
return latestRemovedXid;
}
@@ -735,7 +738,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
* If we have any conflict processing to do, it must happen before we
* update the page.
*
- * Btree delete records can conflict with standby queries. You might
+ * Btree delete records can conflict with standby queries. You might
* think that vacuum records would conflict as well, but we've handled
* that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
* cleaned by the vacuum of the heap and so we can resolve any conflicts
@@ -828,7 +831,7 @@ btree_xlog_mark_page_halfdead(uint8 info, XLogRecPtr lsn, XLogRecord *record)
ItemId itemid;
IndexTuple itup;
OffsetNumber nextoffset;
- BlockNumber rightsib;
+ BlockNumber rightsib;
poffset = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c
index aa60c8db65..cd1edfffa2 100644
--- a/src/backend/access/rmgrdesc/gindesc.c
+++ b/src/backend/access/rmgrdesc/gindesc.c
@@ -54,7 +54,7 @@ desc_recompress_leaf(StringInfo buf, ginxlogRecompressDataLeaf *insertData)
walbuf += nitems * sizeof(ItemPointerData);
}
- switch(a_action)
+ switch (a_action)
{
case GIN_SEGMENT_ADDITEMS:
appendStringInfo(buf, " %d (add %d items)", a_segno, nitems);
@@ -94,13 +94,13 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec)
case XLOG_GIN_INSERT:
{
ginxlogInsert *xlrec = (ginxlogInsert *) rec;
- char *payload = rec + sizeof(ginxlogInsert);
+ char *payload = rec + sizeof(ginxlogInsert);
appendStringInfoString(buf, "Insert item, ");
desc_node(buf, xlrec->node, xlrec->blkno);
appendStringInfo(buf, " isdata: %c isleaf: %c",
- (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F',
- (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F');
+ (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F',
+ (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F');
if (!(xlrec->flags & GIN_INSERT_ISLEAF))
{
BlockNumber leftChildBlkno;
@@ -115,11 +115,11 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec)
}
if (!(xlrec->flags & GIN_INSERT_ISDATA))
appendStringInfo(buf, " isdelete: %c",
- (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F');
+ (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F');
else if (xlrec->flags & GIN_INSERT_ISLEAF)
{
ginxlogRecompressDataLeaf *insertData =
- (ginxlogRecompressDataLeaf *) payload;
+ (ginxlogRecompressDataLeaf *) payload;
if (xl_info & XLR_BKP_BLOCK(0))
appendStringInfo(buf, " (full page image)");
@@ -129,10 +129,11 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec)
else
{
ginxlogInsertDataInternal *insertData = (ginxlogInsertDataInternal *) payload;
+
appendStringInfo(buf, " pitem: %u-%u/%u",
- PostingItemGetBlockNumber(&insertData->newitem),
- ItemPointerGetBlockNumber(&insertData->newitem.key),
- ItemPointerGetOffsetNumber(&insertData->newitem.key));
+ PostingItemGetBlockNumber(&insertData->newitem),
+ ItemPointerGetBlockNumber(&insertData->newitem.key),
+ ItemPointerGetOffsetNumber(&insertData->newitem.key));
}
}
break;
@@ -144,8 +145,8 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec)
desc_node(buf, ((ginxlogSplit *) rec)->node, ((ginxlogSplit *) rec)->lblkno);
appendStringInfo(buf, " isrootsplit: %c", (((ginxlogSplit *) rec)->flags & GIN_SPLIT_ROOT) ? 'T' : 'F');
appendStringInfo(buf, " isdata: %c isleaf: %c",
- (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F',
- (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F');
+ (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F',
+ (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F');
}
break;
case XLOG_GIN_VACUUM_PAGE:
@@ -155,6 +156,7 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec)
case XLOG_GIN_VACUUM_DATA_LEAF_PAGE:
{
ginxlogVacuumDataLeafPage *xlrec = (ginxlogVacuumDataLeafPage *) rec;
+
appendStringInfoString(buf, "Vacuum data leaf page, ");
desc_node(buf, xlrec->node, xlrec->blkno);
if (xl_info & XLR_BKP_BLOCK(0))
diff --git a/src/backend/access/rmgrdesc/nbtdesc.c b/src/backend/access/rmgrdesc/nbtdesc.c
index af7663b8ca..a3c746f1a8 100644
--- a/src/backend/access/rmgrdesc/nbtdesc.c
+++ b/src/backend/access/rmgrdesc/nbtdesc.c
@@ -140,7 +140,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_btree_unlink_page *xlrec = (xl_btree_unlink_page *) rec;
appendStringInfo(buf, "unlink_page: rel %u/%u/%u; ",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
appendStringInfo(buf, "dead %u; left %u; right %u; btpo_xact %u; ",
xlrec->deadblk, xlrec->leftsib, xlrec->rightsib, xlrec->btpo_xact);
appendStringInfo(buf, "leaf %u; leafleft %u; leafright %u; topparent %u",
diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c
index 48f32cda24..c08d211104 100644
--- a/src/backend/access/spgist/spgdoinsert.c
+++ b/src/backend/access/spgist/spgdoinsert.c
@@ -25,7 +25,7 @@
/*
* SPPageDesc tracks all info about a page we are inserting into. In some
* situations it actually identifies a tuple, or even a specific node within
- * an inner tuple. But any of the fields can be invalid. If the buffer
+ * an inner tuple. But any of the fields can be invalid. If the buffer
* field is valid, it implies we hold pin and exclusive lock on that buffer.
* page pointer should be valid exactly when buffer is.
*/
@@ -249,7 +249,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
else
{
/*
- * Tuple must be inserted into existing chain. We mustn't change the
+ * Tuple must be inserted into existing chain. We mustn't change the
* chain's head address, but we don't need to chase the entire chain
* to put the tuple at the end; we can insert it second.
*
@@ -814,7 +814,7 @@ doPickSplit(Relation index, SpGistState *state,
* We may not actually insert new tuple because another picksplit may be
* necessary due to too large value, but we will try to allocate enough
* space to include it; and in any case it has to be included in the input
- * for the picksplit function. So don't increment nToInsert yet.
+ * for the picksplit function. So don't increment nToInsert yet.
*/
in.datums[in.nTuples] = SGLTDATUM(newLeafTuple, state);
heapPtrs[in.nTuples] = newLeafTuple->heapPtr;
@@ -872,7 +872,7 @@ doPickSplit(Relation index, SpGistState *state,
/*
* Check to see if the picksplit function failed to separate the values,
* ie, it put them all into the same child node. If so, select allTheSame
- * mode and create a random split instead. See comments for
+ * mode and create a random split instead. See comments for
* checkAllTheSame as to why we need to know if the new leaf tuples could
* fit on one page.
*/
@@ -1037,7 +1037,7 @@ doPickSplit(Relation index, SpGistState *state,
&xlrec.initDest);
/*
- * Attempt to assign node groups to the two pages. We might fail to
+ * Attempt to assign node groups to the two pages. We might fail to
* do so, even if totalLeafSizes is less than the available space,
* because we can't split a group across pages.
*/
@@ -1917,7 +1917,7 @@ spgdoinsert(Relation index, SpGistState *state,
if (current.blkno == InvalidBlockNumber)
{
/*
- * Create a leaf page. If leafSize is too large to fit on a page,
+ * Create a leaf page. If leafSize is too large to fit on a page,
* we won't actually use the page yet, but it simplifies the API
* for doPickSplit to always have a leaf page at hand; so just
* quietly limit our request to a page size.
@@ -2120,7 +2120,7 @@ spgdoinsert(Relation index, SpGistState *state,
out.result.addNode.nodeLabel);
/*
- * Retry insertion into the enlarged node. We assume that
+ * Retry insertion into the enlarged node. We assume that
* we'll get a MatchNode result this time.
*/
goto process_inner_tuple;
diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c
index 2b1b49348c..a4408f03bd 100644
--- a/src/backend/access/spgist/spginsert.c
+++ b/src/backend/access/spgist/spginsert.c
@@ -163,7 +163,7 @@ spgbuildempty(PG_FUNCTION_ARGS)
page = (Page) palloc(BLCKSZ);
SpGistInitMetapage(page);
- /* Write the page. If archiving/streaming, XLOG it. */
+ /* Write the page. If archiving/streaming, XLOG it. */
PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
(char *) page, true);
@@ -232,7 +232,7 @@ spginsert(PG_FUNCTION_ARGS)
/*
* We might have to repeat spgdoinsert() multiple times, if conflicts
* occur with concurrent insertions. If so, reset the insertCtx each time
- * to avoid cumulative memory consumption. That means we also have to
+ * to avoid cumulative memory consumption. That means we also have to
* redo initSpGistState(), but it's cheap enough not to matter.
*/
while (!spgdoinsert(index, &spgstate, ht_ctid, *values, *isnull))
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index 0a1e09c51e..35cc41b3aa 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -103,7 +103,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
* Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so.
*
* The point here is to eliminate null-related considerations from what the
- * opclass consistent functions need to deal with. We assume all SPGiST-
+ * opclass consistent functions need to deal with. We assume all SPGiST-
* indexable operators are strict, so any null RHS value makes the scan
* condition unsatisfiable. We also pull out any IS NULL/IS NOT NULL
* conditions; their effect is reflected into searchNulls/searchNonNulls.
@@ -600,7 +600,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
if (so->want_itup)
{
/*
- * Reconstruct desired IndexTuple. We have to copy the datum out of
+ * Reconstruct desired IndexTuple. We have to copy the datum out of
* the temp context anyway, so we may as well create the tuple here.
*/
so->indexTups[so->nPtrs] = index_form_tuple(so->indexTupDesc,
diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c
index bcdd29362d..5b7a5a06a0 100644
--- a/src/backend/access/spgist/spgtextproc.c
+++ b/src/backend/access/spgist/spgtextproc.c
@@ -26,11 +26,11 @@
* In the worst case, an inner tuple in a text radix tree could have as many
* as 256 nodes (one for each possible byte value). Each node can take 16
* bytes on MAXALIGN=8 machines. The inner tuple must fit on an index page
- * of size BLCKSZ. Rather than assuming we know the exact amount of overhead
+ * of size BLCKSZ. Rather than assuming we know the exact amount of overhead
* imposed by page headers, tuple headers, etc, we leave 100 bytes for that
* (the actual overhead should be no more than 56 bytes at this writing, so
* there is slop in this number). So we can safely create prefixes up to
- * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
+ * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
* already 4K, there is no safe prefix length when BLCKSZ is less than 8K;
* it is always possible to get "SPGiST inner tuple size exceeds maximum"
* if there are too many distinct next-byte values at a given place in the
@@ -327,7 +327,7 @@ spg_text_picksplit(PG_FUNCTION_ARGS)
}
/*
- * Sort by label bytes so that we can group the values into nodes. This
+ * Sort by label bytes so that we can group the values into nodes. This
* also ensures that the nodes are ordered by label value, allowing the
* use of binary search in searchChar.
*/
@@ -377,7 +377,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
/*
* Reconstruct values represented at this tuple, including parent data,
- * prefix of this tuple if any, and the node label if any. in->level
+ * prefix of this tuple if any, and the node label if any. in->level
* should be the length of the previously reconstructed value, and the
* number of bytes added here is prefixSize or prefixSize + 1.
*
diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c
index 3cbad99e46..1a224ef7cc 100644
--- a/src/backend/access/spgist/spgutils.c
+++ b/src/backend/access/spgist/spgutils.c
@@ -235,7 +235,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When requesting an inner page, if we get one with the wrong parity,
* we just release the buffer and try again. We will get a different page
- * because GetFreeIndexPage will have marked the page used in FSM. The page
+ * because GetFreeIndexPage will have marked the page used in FSM. The page
* is entered in our local lastUsedPages cache, so there's some hope of
* making use of it later in this session, but otherwise we rely on VACUUM
* to eventually re-enter the page in FSM, making it available for recycling.
@@ -245,7 +245,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When we return a buffer to the caller, the page is *not* entered into
* the lastUsedPages cache; we expect the caller will do so after it's taken
- * whatever space it will use. This is because after the caller has used up
+ * whatever space it will use. This is because after the caller has used up
* some space, the page might have less space than whatever was cached already
* so we'd rather not trash the old cache entry.
*/
@@ -317,7 +317,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew)
/*
* If possible, increase the space request to include relation's
- * fillfactor. This ensures that when we add unrelated tuples to a page,
+ * fillfactor. This ensures that when we add unrelated tuples to a page,
* we try to keep 100-fillfactor% available for adding tuples that are
* related to the ones already on it. But fillfactor mustn't cause an
* error for requests that would otherwise be legal.
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index 633cf7aeae..19a461be41 100644
--- a/src/backend/access/spgist/spgvacuum.c
+++ b/src/backend/access/spgist/spgvacuum.c
@@ -211,7 +211,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
* Figure out exactly what we have to do. We do this separately from
* actually modifying the page, mainly so that we have a representation
* that can be dumped into WAL and then the replay code can do exactly
- * the same thing. The output of this step consists of six arrays
+ * the same thing. The output of this step consists of six arrays
* describing four kinds of operations, to be performed in this order:
*
* toDead[]: tuple numbers to be replaced with DEAD tuples
@@ -287,7 +287,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
else
{
/*
- * Second or later live tuple. Arrange to re-chain it to the
+ * Second or later live tuple. Arrange to re-chain it to the
* previous live one, if there was a gap.
*/
if (interveningDeletable)
diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c
index 1689324f23..cc0184d174 100644
--- a/src/backend/access/spgist/spgxlog.c
+++ b/src/backend/access/spgist/spgxlog.c
@@ -41,7 +41,7 @@ fillFakeState(SpGistState *state, spgxlogState stateSrc)
}
/*
- * Add a leaf tuple, or replace an existing placeholder tuple. This is used
+ * Add a leaf tuple, or replace an existing placeholder tuple. This is used
* to replay SpGistPageAddNewItem() operations. If the offset points at an
* existing tuple, it had better be a placeholder tuple.
*/
@@ -462,7 +462,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
}
/*
- * Update parent downlink. Since parent could be in either of the
+ * Update parent downlink. Since parent could be in either of the
* previous two buffers, it's a bit tricky to determine which BKP bit
* applies.
*/
@@ -799,7 +799,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
bbi++;
/*
- * Now we can release the leaf-page locks. It's okay to do this before
+ * Now we can release the leaf-page locks. It's okay to do this before
* updating the parent downlink.
*/
if (BufferIsValid(srcBuffer))
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 0eadd776af..27ca4c6567 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -11,15 +11,15 @@
* log can be broken into relatively small, independent segments.
*
* XLOG interactions: this module generates an XLOG record whenever a new
- * CLOG page is initialized to zeroes. Other writes of CLOG come from
+ * CLOG page is initialized to zeroes. Other writes of CLOG come from
* recording of transaction commit or abort in xact.c, which generates its
* own XLOG records for these events and will re-perform the status update
- * on redo; so we need make no additional XLOG entry here. For synchronous
+ * on redo; so we need make no additional XLOG entry here. For synchronous
* transaction commits, the XLOG is guaranteed flushed through the XLOG commit
* record before we are called to log a commit, so the WAL rule "write xlog
* before data" is satisfied automatically. However, for async commits we
* must track the latest LSN affecting each CLOG page, so that we can flush
- * XLOG that far and satisfy the WAL rule. We don't have to worry about this
+ * XLOG that far and satisfy the WAL rule. We don't have to worry about this
* for aborts (whether sync or async), since the post-crash assumption would
* be that such transactions failed anyway.
*
@@ -105,7 +105,7 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids,
* in the tree of xid. In various cases nsubxids may be zero.
*
* lsn must be the WAL location of the commit record when recording an async
- * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
+ * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
* caller guarantees the commit record is already flushed in that case. It
* should be InvalidXLogRecPtr for abort cases, too.
*
@@ -417,7 +417,7 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn)
* Testing during the PostgreSQL 9.2 development cycle revealed that on a
* large multi-processor system, it was possible to have more CLOG page
* requests in flight at one time than the numebr of CLOG buffers which existed
- * at that time, which was hardcoded to 8. Further testing revealed that
+ * at that time, which was hardcoded to 8. Further testing revealed that
* performance dropped off with more than 32 CLOG buffers, possibly because
* the linear buffer search algorithm doesn't scale well.
*
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 459f59cb4e..9da22c8bdf 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -5,7 +5,7 @@
*
* The pg_multixact manager is a pg_clog-like manager that stores an array of
* MultiXactMember for each MultiXactId. It is a fundamental part of the
- * shared-row-lock implementation. Each MultiXactMember is comprised of a
+ * shared-row-lock implementation. Each MultiXactMember is comprised of a
* TransactionId and a set of flag bits. The name is a bit historical:
* originally, a MultiXactId consisted of more than one TransactionId (except
* in rare corner cases), hence "multi". Nowadays, however, it's perfectly
@@ -18,7 +18,7 @@
*
* We use two SLRU areas, one for storing the offsets at which the data
* starts for each MultiXactId in the other one. This trick allows us to
- * store variable length arrays of TransactionIds. (We could alternatively
+ * store variable length arrays of TransactionIds. (We could alternatively
* use one area containing counts and TransactionIds, with valid MultiXactId
* values pointing at slots containing counts; but that way seems less robust
* since it would get completely confused if someone inquired about a bogus
@@ -38,7 +38,7 @@
*
* Like clog.c, and unlike subtrans.c, we have to preserve state across
* crashes and ensure that MXID and offset numbering increases monotonically
- * across a crash. We do this in the same way as it's done for transaction
+ * across a crash. We do this in the same way as it's done for transaction
* IDs: the WAL record is guaranteed to contain evidence of every MXID we
* could need to worry about, and we just make sure that at the end of
* replay, the next-MXID and next-offset counters are at least as large as
@@ -50,7 +50,7 @@
* The minimum value in each database is stored in pg_database, and the
* global minimum is part of pg_control. Any vacuum that is able to
* advance its database's minimum value also computes a new global minimum,
- * and uses this value to truncate older segments. When new multixactid
+ * and uses this value to truncate older segments. When new multixactid
* values are to be created, care is taken that the counter does not
* fall within the wraparound horizon considering the global minimum value.
*
@@ -85,13 +85,13 @@
/*
- * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
+ * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
* used everywhere else in Postgres.
*
* Note: because MultiXactOffsets are 32 bits and wrap around at 0xFFFFFFFF,
* MultiXact page numbering also wraps around at
* 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE, and segment numbering at
- * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need
+ * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need
* take no explicit notice of that fact in this module, except when comparing
* segment and page numbers in TruncateMultiXact (see
* MultiXactOffsetPagePrecedes).
@@ -110,7 +110,7 @@
* additional flag bits for each TransactionId. To do this without getting
* into alignment issues, we store four bytes of flags, and then the
* corresponding 4 Xids. Each such 5-word (20-byte) set we call a "group", and
- * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
+ * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
* per page. This wastes 12 bytes per page, but that's OK -- simplicity (and
* performance) trumps space efficiency here.
*
@@ -161,7 +161,7 @@ static SlruCtlData MultiXactMemberCtlData;
#define MultiXactMemberCtl (&MultiXactMemberCtlData)
/*
- * MultiXact state shared across all backends. All this state is protected
+ * MultiXact state shared across all backends. All this state is protected
* by MultiXactGenLock. (We also use MultiXactOffsetControlLock and
* MultiXactMemberControlLock to guard accesses to the two sets of SLRU
* buffers. For concurrency's sake, we avoid holding more than one of these
@@ -179,7 +179,7 @@ typedef struct MultiXactStateData
MultiXactId lastTruncationPoint;
/*
- * oldest multixact that is still on disk. Anything older than this
+ * oldest multixact that is still on disk. Anything older than this
* should not be consulted.
*/
MultiXactId oldestMultiXactId;
@@ -269,8 +269,8 @@ typedef struct mXactCacheEnt
} mXactCacheEnt;
#define MAX_CACHE_ENTRIES 256
-static dlist_head MXactCache = DLIST_STATIC_INIT(MXactCache);
-static int MXactCacheMembers = 0;
+static dlist_head MXactCache = DLIST_STATIC_INIT(MXactCache);
+static int MXactCacheMembers = 0;
static MemoryContext MXactContext = NULL;
#ifdef MULTIXACT_DEBUG
@@ -528,7 +528,7 @@ MultiXactIdIsRunning(MultiXactId multi)
/*
* This could be made faster by having another entry point in procarray.c,
- * walking the PGPROC array only once for all the members. But in most
+ * walking the PGPROC array only once for all the members. But in most
* cases nmembers should be small enough that it doesn't much matter.
*/
for (i = 0; i < nmembers; i++)
@@ -579,9 +579,9 @@ MultiXactIdSetOldestMember(void)
* back. Which would be wrong.
*
* Note that a shared lock is sufficient, because it's enough to stop
- * someone from advancing nextMXact; and nobody else could be trying to
- * write to our OldestMember entry, only reading (and we assume storing
- * it is atomic.)
+ * someone from advancing nextMXact; and nobody else could be trying
+ * to write to our OldestMember entry, only reading (and we assume
+ * storing it is atomic.)
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -615,7 +615,7 @@ MultiXactIdSetOldestMember(void)
* The value to set is the oldest of nextMXact and all the valid per-backend
* OldestMemberMXactId[] entries. Because of the locking we do, we can be
* certain that no subsequent call to MultiXactIdSetOldestMember can set
- * an OldestMemberMXactId[] entry older than what we compute here. Therefore
+ * an OldestMemberMXactId[] entry older than what we compute here. Therefore
* there is no live transaction, now or later, that can be a member of any
* MultiXactId older than the OldestVisibleMXactId we compute here.
*/
@@ -751,7 +751,7 @@ MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
* heap_lock_tuple() to have put it there, and heap_lock_tuple() generates
* an XLOG record that must follow ours. The normal LSN interlock between
* the data page and that XLOG record will ensure that our XLOG record
- * reaches disk first. If the SLRU members/offsets data reaches disk
+ * reaches disk first. If the SLRU members/offsets data reaches disk
* sooner than the XLOG record, we do not care because we'll overwrite it
* with zeroes unless the XLOG record is there too; see notes at top of
* this file.
@@ -882,7 +882,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
* GetNewMultiXactId
* Get the next MultiXactId.
*
- * Also, reserve the needed amount of space in the "members" area. The
+ * Also, reserve the needed amount of space in the "members" area. The
* starting offset of the reserved space is returned in *offset.
*
* This may generate XLOG records for expansion of the offsets and/or members
@@ -916,7 +916,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
/*----------
* Check to see if it's safe to assign another MultiXactId. This protects
- * against catastrophic data loss due to multixact wraparound. The basic
+ * against catastrophic data loss due to multixact wraparound. The basic
* rules are:
*
* If we're past multiVacLimit, start trying to force autovacuum cycles.
@@ -930,7 +930,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
{
/*
* For safety's sake, we release MultiXactGenLock while sending
- * signals, warnings, etc. This is not so much because we care about
+ * signals, warnings, etc. This is not so much because we care about
* preserving concurrency in this situation, as to avoid any
* possibility of deadlock while doing get_database_name(). First,
* copy all the shared values we'll need in this path.
@@ -981,8 +981,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
(errmsg_plural("database \"%s\" must be vacuumed before %u more MultiXactId is used",
"database \"%s\" must be vacuumed before %u more MultiXactIds are used",
multiWrapLimit - result,
- oldest_datname,
- multiWrapLimit - result),
+ oldest_datname,
+ multiWrapLimit - result),
errhint("Execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
else
@@ -990,8 +990,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
(errmsg_plural("database with OID %u must be vacuumed before %u more MultiXactId is used",
"database with OID %u must be vacuumed before %u more MultiXactIds are used",
multiWrapLimit - result,
- oldest_datoid,
- multiWrapLimit - result),
+ oldest_datoid,
+ multiWrapLimit - result),
errhint("Execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
}
@@ -1036,7 +1036,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
* until after file extension has succeeded!
*
* We don't care about MultiXactId wraparound here; it will be handled by
- * the next iteration. But note that nextMXact may be InvalidMultiXactId
+ * the next iteration. But note that nextMXact may be InvalidMultiXactId
* or the first value on a segment-beginning page after this routine
* exits, so anyone else looking at the variable must be prepared to deal
* with either case. Similarly, nextOffset may be zero, but we won't use
@@ -1114,16 +1114,16 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
* need to allow an empty set to be returned regardless, if the caller is
* willing to accept it; the caller is expected to check that it's an
* allowed condition (such as ensuring that the infomask bits set on the
- * tuple are consistent with the pg_upgrade scenario). If the caller is
+ * tuple are consistent with the pg_upgrade scenario). If the caller is
* expecting this to be called only on recently created multis, then we
* raise an error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. This raises a
+ * seen, it implies undetected ID wraparound has occurred. This raises a
* hard error.
*
* Shared lock is enough here since we aren't modifying any global state.
- * Acquire it just long enough to grab the current counter values. We may
+ * Acquire it just long enough to grab the current counter values. We may
* need both nextMXact and nextOffset; see below.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -1151,12 +1151,12 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
/*
* Find out the offset at which we need to start reading MultiXactMembers
- * and the number of members in the multixact. We determine the latter as
+ * and the number of members in the multixact. We determine the latter as
* the difference between this multixact's starting offset and the next
* one's. However, there are some corner cases to worry about:
*
* 1. This multixact may be the latest one created, in which case there is
- * no next one to look at. In this case the nextOffset value we just
+ * no next one to look at. In this case the nextOffset value we just
* saved is the correct endpoint.
*
* 2. The next multixact may still be in process of being filled in: that
@@ -1167,11 +1167,11 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
* (because we are careful to pre-zero offset pages). Because
* GetNewMultiXactId will never return zero as the starting offset for a
* multixact, when we read zero as the next multixact's offset, we know we
- * have this case. We sleep for a bit and try again.
+ * have this case. We sleep for a bit and try again.
*
* 3. Because GetNewMultiXactId increments offset zero to offset one to
* handle case #2, there is an ambiguity near the point of offset
- * wraparound. If we see next multixact's offset is one, is that our
+ * wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th
* member slot wasn't filled, it'll contain zero, and zero isn't a valid
@@ -1297,8 +1297,8 @@ retry:
/*
* MultiXactHasRunningRemoteMembers
- * Does the given multixact have still-live members from
- * transactions other than our own?
+ * Does the given multixact have still-live members from
+ * transactions other than our own?
*/
bool
MultiXactHasRunningRemoteMembers(MultiXactId multi)
@@ -1694,7 +1694,7 @@ multixact_twophase_postabort(TransactionId xid, uint16 info,
/*
* Initialization of shared memory for MultiXact. We use two SLRU areas,
- * thus double memory. Also, reserve space for the shared MultiXactState
+ * thus double memory. Also, reserve space for the shared MultiXactState
* struct and the per-backend MultiXactId arrays (two of those, too).
*/
Size
@@ -1754,7 +1754,7 @@ MultiXactShmemInit(void)
/*
* This func must be called ONCE on system install. It creates the initial
- * MultiXact segments. (The MultiXacts directories are assumed to have been
+ * MultiXact segments. (The MultiXacts directories are assumed to have been
* created by initdb, and MultiXactShmemInit must have been called already.)
*/
void
@@ -1849,7 +1849,7 @@ MaybeExtendOffsetSlru(void)
if (!SimpleLruDoesPhysicalPageExist(MultiXactOffsetCtl, pageno))
{
- int slotno;
+ int slotno;
/*
* Fortunately for us, SimpleLruWritePage is already prepared to deal
@@ -1925,7 +1925,7 @@ TrimMultiXact(void)
MultiXactOffsetCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current offsets page. See notes in
+ * Zero out the remainder of the current offsets page. See notes in
* StartupCLOG() for motivation.
*/
entryno = MultiXactIdToOffsetEntry(multi);
@@ -1955,7 +1955,7 @@ TrimMultiXact(void)
MultiXactMemberCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current members page. See notes in
+ * Zero out the remainder of the current members page. See notes in
* TrimCLOG() for motivation.
*/
flagsoff = MXOffsetToFlagsOffset(offset);
@@ -2097,7 +2097,7 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
/*
* We'll start complaining loudly when we get within 10M multis of the
- * stop point. This is kind of arbitrary, but if you let your gas gauge
+ * stop point. This is kind of arbitrary, but if you let your gas gauge
* get down to 1% of full, would you be looking for the next gas station?
* We need to be fairly liberal about this number because there are lots
* of scenarios where most transactions are done by automatic clients that
@@ -2172,8 +2172,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
(errmsg_plural("database \"%s\" must be vacuumed before %u more MultiXactId is used",
"database \"%s\" must be vacuumed before %u more MultiXactIds are used",
multiWrapLimit - curMulti,
- oldest_datname,
- multiWrapLimit - curMulti),
+ oldest_datname,
+ multiWrapLimit - curMulti),
errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
else
@@ -2181,8 +2181,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
(errmsg_plural("database with OID %u must be vacuumed before %u more MultiXactId is used",
"database with OID %u must be vacuumed before %u more MultiXactIds are used",
multiWrapLimit - curMulti,
- oldest_datoid,
- multiWrapLimit - curMulti),
+ oldest_datoid,
+ multiWrapLimit - curMulti),
errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n"
"You might also need to commit or roll back old prepared transactions.")));
}
@@ -2375,16 +2375,16 @@ GetOldestMultiXactId(void)
/*
* SlruScanDirectory callback.
- * This callback deletes segments that are outside the range determined by
- * the given page numbers.
+ * This callback deletes segments that are outside the range determined by
+ * the given page numbers.
*
* Both range endpoints are exclusive (that is, segments containing any of
* those pages are kept.)
*/
typedef struct MembersLiveRange
{
- int rangeStart;
- int rangeEnd;
+ int rangeStart;
+ int rangeEnd;
} MembersLiveRange;
static bool
@@ -2392,15 +2392,15 @@ SlruScanDirCbRemoveMembers(SlruCtl ctl, char *filename, int segpage,
void *data)
{
MembersLiveRange *range = (MembersLiveRange *) data;
- MultiXactOffset nextOffset;
+ MultiXactOffset nextOffset;
if ((segpage == range->rangeStart) ||
(segpage == range->rangeEnd))
- return false; /* easy case out */
+ return false; /* easy case out */
/*
- * To ensure that no segment is spuriously removed, we must keep track
- * of new segments added since the start of the directory scan; to do this,
+ * To ensure that no segment is spuriously removed, we must keep track of
+ * new segments added since the start of the directory scan; to do this,
* we update our end-of-range point as we run.
*
* As an optimization, we can skip looking at shared memory if we know for
@@ -2473,10 +2473,10 @@ void
TruncateMultiXact(MultiXactId oldestMXact)
{
MultiXactOffset oldestOffset;
- MultiXactOffset nextOffset;
+ MultiXactOffset nextOffset;
mxtruncinfo trunc;
MultiXactId earliest;
- MembersLiveRange range;
+ MembersLiveRange range;
/*
* Note we can't just plow ahead with the truncation; it's possible that
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index b90db9a417..1f9a100da8 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -15,7 +15,7 @@
*
* We use a control LWLock to protect the shared data structures, plus
* per-buffer LWLocks that synchronize I/O for each buffer. The control lock
- * must be held to examine or modify any shared state. A process that is
+ * must be held to examine or modify any shared state. A process that is
* reading in or writing out a page buffer does not hold the control lock,
* only the per-buffer lock for the buffer it is working on.
*
@@ -34,7 +34,7 @@
* could have happened while we didn't have the lock).
*
* As with the regular buffer manager, it is possible for another process
- * to re-dirty a page that is currently being written out. This is handled
+ * to re-dirty a page that is currently being written out. This is handled
* by re-setting the page's page_dirty flag.
*
*
@@ -96,7 +96,7 @@ typedef struct SlruFlushData *SlruFlush;
* page_lru_count entries to be "reset" to lower values than they should have,
* in case a process is delayed while it executes this macro. With care in
* SlruSelectLRUPage(), this does little harm, and in any case the absolute
- * worst possible consequence is a nonoptimal choice of page to evict. The
+ * worst possible consequence is a nonoptimal choice of page to evict. The
* gain from allowing concurrent reads of SLRU pages seems worth it.
*/
#define SlruRecentlyUsed(shared, slotno) \
@@ -481,7 +481,7 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
*
* NOTE: only one write attempt is made here. Hence, it is possible that
* the page is still dirty at exit (if someone else re-dirtied it during
- * the write). However, we *do* attempt a fresh write even if the page
+ * the write). However, we *do* attempt a fresh write even if the page
* is already being written; this is for checkpoints.
*
* Control lock must be held at entry, and will be held at exit.
@@ -634,7 +634,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
* In a crash-and-restart situation, it's possible for us to receive
* commands to set the commit status of transactions whose bits are in
* already-truncated segments of the commit log (see notes in
- * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
+ * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
* where the file doesn't exist, and return zeroes instead.
*/
fd = OpenTransientFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
@@ -964,9 +964,9 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If we find any EMPTY slot, just select that one. Else choose a
- * victim page to replace. We normally take the least recently used
+ * victim page to replace. We normally take the least recently used
* valid page, but we will never take the slot containing
- * latest_page_number, even if it appears least recently used. We
+ * latest_page_number, even if it appears least recently used. We
* will select a slot that is already I/O busy only if there is no
* other choice: a read-busy slot will not be least recently used once
* the read finishes, and waiting for an I/O on a write-busy slot is
@@ -1041,7 +1041,7 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If all pages (except possibly the latest one) are I/O busy, we'll
- * have to wait for an I/O to complete and then retry. In that
+ * have to wait for an I/O to complete and then retry. In that
* unhappy case, we choose to wait for the I/O on the least recently
* used slot, on the assumption that it was likely initiated first of
* all the I/Os in progress and may therefore finish first.
@@ -1193,7 +1193,7 @@ restart:;
/*
* Hmm, we have (or may have) I/O operations acting on the page, so
* we've got to wait for them to finish and then start again. This is
- * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
+ * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
* wouldn't it be OK to just discard it without writing it? For now,
* keep the logic the same as it was.)
*/
@@ -1293,7 +1293,7 @@ SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data)
cldir = AllocateDir(ctl->Dir);
while ((clde = ReadDir(cldir, ctl->Dir)) != NULL)
{
- size_t len;
+ size_t len;
len = strlen(clde->d_name);
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index 2f5cfa0d22..bebaee9216 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -5,7 +5,7 @@
*
* The pg_subtrans manager is a pg_clog-like manager that stores the parent
* transaction Id for each transaction. It is a fundamental part of the
- * nested transactions implementation. A main transaction has a parent
+ * nested transactions implementation. A main transaction has a parent
* of InvalidTransactionId, and each subtransaction has its immediate parent.
* The tree can easily be walked from child to parent, but not in the
* opposite direction.
@@ -191,7 +191,7 @@ SUBTRANSShmemInit(void)
* must have been called already.)
*
* Note: it's not really necessary to create the initial segment now,
- * since slru.c would create it on first write anyway. But we may as well
+ * since slru.c would create it on first write anyway. But we may as well
* do it to be sure the directory is set up correctly.
*/
void
diff --git a/src/backend/access/transam/timeline.c b/src/backend/access/transam/timeline.c
index 319a218541..2d27b3ae31 100644
--- a/src/backend/access/transam/timeline.c
+++ b/src/backend/access/transam/timeline.c
@@ -66,7 +66,7 @@ restoreTimeLineHistoryFiles(TimeLineID begin, TimeLineID end)
* Try to read a timeline's history file.
*
* If successful, return the list of component TLIs (the given TLI followed by
- * its ancestor TLIs). If we can't find the history file, assume that the
+ * its ancestor TLIs). If we can't find the history file, assume that the
* timeline has no parents, and return a list of just the specified timeline
* ID.
*/
@@ -150,7 +150,7 @@ readTimeLineHistory(TimeLineID targetTLI)
if (nfields != 3)
ereport(FATAL,
(errmsg("syntax error in history file: %s", fline),
- errhint("Expected a transaction log switchpoint location.")));
+ errhint("Expected a transaction log switchpoint location.")));
if (result && tli <= lasttli)
ereport(FATAL,
@@ -281,7 +281,7 @@ findNewestTimeLine(TimeLineID startTLI)
* reason: human-readable explanation of why the timeline was switched
*
* Currently this is only used at the end recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
void
@@ -418,7 +418,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
/*
* Prefer link() to rename() here just to be really sure that we don't
- * overwrite an existing file. However, there shouldn't be one, so
+ * overwrite an existing file. However, there shouldn't be one, so
* rename() is an acceptable substitute except for the truly paranoid.
*/
#if HAVE_WORKING_LINK
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index 8965319551..12982d9b55 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -145,7 +145,7 @@ TransactionIdDidCommit(TransactionId transactionId)
* be a window just after database startup where we do not have complete
* knowledge in pg_subtrans of the transactions after TransactionXmin.
* StartupSUBTRANS() has ensured that any missing information will be
- * zeroed. Since this case should not happen under normal conditions, it
+ * zeroed. Since this case should not happen under normal conditions, it
* seems reasonable to emit a WARNING for it.
*/
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
@@ -301,7 +301,7 @@ TransactionIdPrecedes(TransactionId id1, TransactionId id2)
{
/*
* If either ID is a permanent XID then we can just do unsigned
- * comparison. If both are normal, do a modulo-2^32 comparison.
+ * comparison. If both are normal, do a modulo-2^32 comparison.
*/
int32 diff;
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 66dbf58456..70ca6ab67d 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -443,7 +443,7 @@ LockGXact(const char *gid, Oid user)
/*
* Note: it probably would be possible to allow committing from
* another database; but at the moment NOTIFY is known not to work and
- * there may be some other issues as well. Hence disallow until
+ * there may be some other issues as well. Hence disallow until
* someone gets motivated to make it work.
*/
if (MyDatabaseId != proc->databaseId)
@@ -1031,7 +1031,7 @@ EndPrepare(GlobalTransaction gxact)
* out the correct state file CRC, we have an inconsistency: the xact is
* prepared according to WAL but not according to our on-disk state. We
* use a critical section to force a PANIC if we are unable to complete
- * the write --- then, WAL replay should repair the inconsistency. The
+ * the write --- then, WAL replay should repair the inconsistency. The
* odds of a PANIC actually occurring should be very tiny given that we
* were able to write the bogus CRC above.
*
@@ -1069,7 +1069,7 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close two-phase state file: %m")));
/*
- * Mark the prepared transaction as valid. As soon as xact.c marks
+ * Mark the prepared transaction as valid. As soon as xact.c marks
* MyPgXact as not running our XID (which it will do immediately after
* this function returns), others can commit/rollback the xact.
*
@@ -1336,7 +1336,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/*
* In case we fail while running the callbacks, mark the gxact invalid so
* no one else will try to commit/rollback, and so it can be recycled
- * properly later. It is still locked by our XID so it won't go away yet.
+ * properly later. It is still locked by our XID so it won't go away yet.
*
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
*/
@@ -1540,7 +1540,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* This approach creates a race condition: someone else could delete a
* GXACT between the time we release TwoPhaseStateLock and the time we try
- * to open its state file. We handle this by special-casing ENOENT
+ * to open its state file. We handle this by special-casing ENOENT
* failures: if we see that, we verify that the GXACT is no longer valid,
* and if so ignore the failure.
*/
@@ -1621,7 +1621,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* We throw away any prepared xacts with main XID beyond nextXid --- if any
* are present, it suggests that the DBA has done a PITR recovery to an
- * earlier point in time without cleaning out pg_twophase. We dare not
+ * earlier point in time without cleaning out pg_twophase. We dare not
* try to recover such prepared xacts since they likely depend on database
* state that doesn't exist now.
*
@@ -1713,7 +1713,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
* XID, and they may force us to advance nextXid.
*
* We don't expect anyone else to modify nextXid, hence we don't
- * need to hold a lock while examining it. We still acquire the
+ * need to hold a lock while examining it. We still acquire the
* lock to modify it, though.
*/
subxids = (TransactionId *)
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 51b6b1a302..7013fb894b 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -39,7 +39,7 @@ VariableCache ShmemVariableCache = NULL;
*
* Note: when this is called, we are actually already inside a valid
* transaction, since XIDs are now not allocated until the transaction
- * does something. So it is safe to do a database lookup if we want to
+ * does something. So it is safe to do a database lookup if we want to
* issue a warning about XID wrap.
*/
TransactionId
@@ -165,20 +165,20 @@ GetNewTransactionId(bool isSubXact)
/*
* Now advance the nextXid counter. This must not happen until after we
* have successfully completed ExtendCLOG() --- if that routine fails, we
- * want the next incoming transaction to try it again. We cannot assign
+ * want the next incoming transaction to try it again. We cannot assign
* more XIDs until there is CLOG space for them.
*/
TransactionIdAdvance(ShmemVariableCache->nextXid);
/*
* We must store the new XID into the shared ProcArray before releasing
- * XidGenLock. This ensures that every active XID older than
+ * XidGenLock. This ensures that every active XID older than
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we
* are relying on fetch/store of an xid to be atomic, else other backends
- * might see a partially-set xid here. But holding both locks at once
+ * might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit. So for now, assume atomicity.
*
* Note that readers of PGXACT xid fields should be careful to fetch the
@@ -289,7 +289,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
/*
* We'll start complaining loudly when we get within 10M transactions of
- * the stop point. This is kind of arbitrary, but if you let your gas
+ * the stop point. This is kind of arbitrary, but if you let your gas
* gauge get down to 1% of full, would you be looking for the next gas
* station? We need to be fairly liberal about this number because there
* are lots of scenarios where most transactions are done by automatic
@@ -390,7 +390,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
* We primarily check whether oldestXidDB is valid. The cases we have in
* mind are that that database was dropped, or the field was reset to zero
* by pg_resetxlog. In either case we should force recalculation of the
- * wrap limit. Also do it if oldestXid is old enough to be forcing
+ * wrap limit. Also do it if oldestXid is old enough to be forcing
* autovacuums or other actions; this ensures we update our state as soon
* as possible once extra overhead is being incurred.
*/
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 9ee11f34f2..3e744097c7 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -270,7 +270,7 @@ static void CallSubXactCallbacks(SubXactEvent event,
SubTransactionId parentSubid);
static void CleanupTransaction(void);
static void CheckTransactionChain(bool isTopLevel, bool throwError,
- const char *stmtType);
+ const char *stmtType);
static void CommitTransaction(void);
static TransactionId RecordTransactionAbort(bool isSubXact);
static void StartTransaction(void);
@@ -450,7 +450,7 @@ AssignTransactionId(TransactionState s)
{
bool isSubXact = (s->parent != NULL);
ResourceOwner currentOwner;
- bool log_unknown_top = false;
+ bool log_unknown_top = false;
/* Assert that caller didn't screw up */
Assert(!TransactionIdIsValid(s->transactionId));
@@ -487,8 +487,8 @@ AssignTransactionId(TransactionState s)
/*
* When wal_level=logical, guarantee that a subtransaction's xid can only
- * be seen in the WAL stream if its toplevel xid has been logged
- * before. If necessary we log a xact_assignment record with fewer than
+ * be seen in the WAL stream if its toplevel xid has been logged before.
+ * If necessary we log a xact_assignment record with fewer than
* PGPROC_MAX_CACHED_SUBXIDS. Note that it is fine if didLogXid isn't set
* for a transaction even though it appears in a WAL record, we just might
* superfluously log something. That can happen when an xid is included
@@ -637,7 +637,7 @@ SubTransactionIsActive(SubTransactionId subxid)
*
* "used" must be TRUE if the caller intends to use the command ID to mark
* inserted/updated/deleted tuples. FALSE means the ID is being fetched
- * for read-only purposes (ie, as a snapshot validity cutoff). See
+ * for read-only purposes (ie, as a snapshot validity cutoff). See
* CommandCounterIncrement() for discussion.
*/
CommandId
@@ -724,7 +724,7 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
/*
* We always say that BootstrapTransactionId is "not my transaction ID"
- * even when it is (ie, during bootstrap). Along with the fact that
+ * even when it is (ie, during bootstrap). Along with the fact that
* transam.c always treats BootstrapTransactionId as already committed,
* this causes the tqual.c routines to see all tuples as committed, which
* is what we need during bootstrap. (Bootstrap mode only inserts tuples,
@@ -866,7 +866,7 @@ AtStart_Memory(void)
/*
* If this is the first time through, create a private context for
* AbortTransaction to work in. By reserving some space now, we can
- * insulate AbortTransaction from out-of-memory scenarios. Like
+ * insulate AbortTransaction from out-of-memory scenarios. Like
* ErrorContext, we set it up with slow growth rate and a nonzero minimum
* size, so that space will be reserved immediately.
*/
@@ -969,7 +969,7 @@ AtSubStart_ResourceOwner(void)
Assert(s->parent != NULL);
/*
- * Create a resource owner for the subtransaction. We make it a child of
+ * Create a resource owner for the subtransaction. We make it a child of
* the immediate parent's resource owner.
*/
s->curTransactionOwner =
@@ -989,7 +989,7 @@ AtSubStart_ResourceOwner(void)
* RecordTransactionCommit
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionCommit(void)
@@ -1034,7 +1034,7 @@ RecordTransactionCommit(void)
/*
* If we didn't create XLOG entries, we're done here; otherwise we
- * should flush those entries the same as a commit record. (An
+ * should flush those entries the same as a commit record. (An
* example of a possible record that wouldn't cause an XID to be
* assigned is a sequence advance record due to nextval() --- we want
* to flush that to disk before reporting commit.)
@@ -1051,7 +1051,7 @@ RecordTransactionCommit(void)
BufmgrCommit();
/*
- * Mark ourselves as within our "commit critical section". This
+ * Mark ourselves as within our "commit critical section". This
* forces any concurrent checkpoint to wait until we've updated
* pg_clog. Without this, it is possible for the checkpoint to set
* REDO after the XLOG record but fail to flush the pg_clog update to
@@ -1059,7 +1059,7 @@ RecordTransactionCommit(void)
* crashes a little later.
*
* Note: we could, but don't bother to, set this flag in
- * RecordTransactionAbort. That's because loss of a transaction abort
+ * RecordTransactionAbort. That's because loss of a transaction abort
* is noncritical; the presumption would be that it aborted, anyway.
*
* It's safe to change the delayChkpt flag of our own backend without
@@ -1168,15 +1168,15 @@ RecordTransactionCommit(void)
/*
* Check if we want to commit asynchronously. We can allow the XLOG flush
* to happen asynchronously if synchronous_commit=off, or if the current
- * transaction has not performed any WAL-logged operation. The latter
+ * transaction has not performed any WAL-logged operation. The latter
* case can arise if the current transaction wrote only to temporary
- * and/or unlogged tables. In case of a crash, the loss of such a
+ * and/or unlogged tables. In case of a crash, the loss of such a
* transaction will be irrelevant since temp tables will be lost anyway,
* and unlogged tables will be truncated. (Given the foregoing, you might
* think that it would be unnecessary to emit the XLOG record at all in
* this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the
- * KnownAssignedXids machinery requires tracking every XID assignment. It
+ * KnownAssignedXids machinery requires tracking every XID assignment. It
* might be OK to skip it only when wal_level < hot_standby, but for now
* we don't.)
*
@@ -1423,7 +1423,7 @@ AtSubCommit_childXids(void)
* RecordTransactionAbort
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionAbort(bool isSubXact)
@@ -1440,7 +1440,7 @@ RecordTransactionAbort(bool isSubXact)
/*
* If we haven't been assigned an XID, nobody will care whether we aborted
- * or not. Hence, we're done in that case. It does not matter if we have
+ * or not. Hence, we're done in that case. It does not matter if we have
* rels to delete (note that this routine is not responsible for actually
* deleting 'em). We cannot have any child XIDs, either.
*/
@@ -1456,7 +1456,7 @@ RecordTransactionAbort(bool isSubXact)
* We have a valid XID, so we should write an ABORT record for it.
*
* We do not flush XLOG to disk here, since the default assumption after a
- * crash would be that we aborted, anyway. For the same reason, we don't
+ * crash would be that we aborted, anyway. For the same reason, we don't
* need to worry about interlocking against checkpoint start.
*/
@@ -1624,7 +1624,7 @@ AtSubAbort_childXids(void)
/*
* We keep the child-XID arrays in TopTransactionContext (see
- * AtSubCommit_childXids). This means we'd better free the array
+ * AtSubCommit_childXids). This means we'd better free the array
* explicitly at abort to avoid leakage.
*/
if (s->childXids != NULL)
@@ -1802,7 +1802,7 @@ StartTransaction(void)
VirtualXactLockTableInsert(vxid);
/*
- * Advertise it in the proc array. We assume assignment of
+ * Advertise it in the proc array. We assume assignment of
* LocalTransactionID is atomic, and the backendId should be set already.
*/
Assert(MyProc->backendId == vxid.backendId);
@@ -1899,7 +1899,7 @@ CommitTransaction(void)
/*
* The remaining actions cannot call any user-defined code, so it's safe
- * to start shutting down within-transaction services. But note that most
+ * to start shutting down within-transaction services. But note that most
* of this stuff could still throw an error, which would switch us into
* the transaction-abort path.
*/
@@ -2104,7 +2104,7 @@ PrepareTransaction(void)
/*
* The remaining actions cannot call any user-defined code, so it's safe
- * to start shutting down within-transaction services. But note that most
+ * to start shutting down within-transaction services. But note that most
* of this stuff could still throw an error, which would switch us into
* the transaction-abort path.
*/
@@ -2224,7 +2224,7 @@ PrepareTransaction(void)
XactLastRecEnd = 0;
/*
- * Let others know about no transaction in progress by me. This has to be
+ * Let others know about no transaction in progress by me. This has to be
* done *after* the prepared transaction has been marked valid, else
* someone may think it is unlocked and recyclable.
*/
@@ -2233,7 +2233,7 @@ PrepareTransaction(void)
/*
* This is all post-transaction cleanup. Note that if an error is raised
* here, it's too late to abort the transaction. This should be just
- * noncritical resource releasing. See notes in CommitTransaction.
+ * noncritical resource releasing. See notes in CommitTransaction.
*/
CallXactCallbacks(XACT_EVENT_PREPARE);
@@ -2411,7 +2411,7 @@ AbortTransaction(void)
ProcArrayEndTransaction(MyProc, latestXid);
/*
- * Post-abort cleanup. See notes in CommitTransaction() concerning
+ * Post-abort cleanup. See notes in CommitTransaction() concerning
* ordering. We can skip all of it if the transaction failed before
* creating a resource owner.
*/
@@ -2646,7 +2646,7 @@ CommitTransactionCommand(void)
/*
* Here we were in a perfectly good transaction block but the user
- * told us to ROLLBACK anyway. We have to abort the transaction
+ * told us to ROLLBACK anyway. We have to abort the transaction
* and then clean up.
*/
case TBLOCK_ABORT_PENDING:
@@ -2666,7 +2666,7 @@ CommitTransactionCommand(void)
/*
* We were just issued a SAVEPOINT inside a transaction block.
- * Start a subtransaction. (DefineSavepoint already did
+ * Start a subtransaction. (DefineSavepoint already did
* PushTransaction, so as to have someplace to put the SUBBEGIN
* state.)
*/
@@ -2870,7 +2870,7 @@ AbortCurrentTransaction(void)
break;
/*
- * Here, we failed while trying to COMMIT. Clean up the
+ * Here, we failed while trying to COMMIT. Clean up the
* transaction and return to idle state (we do not want to stay in
* the transaction).
*/
@@ -2932,7 +2932,7 @@ AbortCurrentTransaction(void)
/*
* If we failed while trying to create a subtransaction, clean up
- * the broken subtransaction and abort the parent. The same
+ * the broken subtransaction and abort the parent. The same
* applies if we get a failure while ending a subtransaction.
*/
case TBLOCK_SUBBEGIN:
@@ -3485,7 +3485,7 @@ UserAbortTransactionBlock(void)
break;
/*
- * We are inside a subtransaction. Mark everything up to top
+ * We are inside a subtransaction. Mark everything up to top
* level as exitable.
*/
case TBLOCK_SUBINPROGRESS:
@@ -3619,7 +3619,7 @@ ReleaseSavepoint(List *options)
break;
/*
- * We are in a non-aborted subtransaction. This is the only valid
+ * We are in a non-aborted subtransaction. This is the only valid
* case.
*/
case TBLOCK_SUBINPROGRESS:
@@ -3676,7 +3676,7 @@ ReleaseSavepoint(List *options)
/*
* Mark "commit pending" all subtransactions up to the target
- * subtransaction. The actual commits will happen when control gets to
+ * subtransaction. The actual commits will happen when control gets to
* CommitTransactionCommand.
*/
xact = CurrentTransactionState;
@@ -3775,7 +3775,7 @@ RollbackToSavepoint(List *options)
/*
* Mark "abort pending" all subtransactions up to the target
- * subtransaction. The actual aborts will happen when control gets to
+ * subtransaction. The actual aborts will happen when control gets to
* CommitTransactionCommand.
*/
xact = CurrentTransactionState;
@@ -4182,7 +4182,7 @@ CommitSubTransaction(void)
CommandCounterIncrement();
/*
- * Prior to 8.4 we marked subcommit in clog at this point. We now only
+ * Prior to 8.4 we marked subcommit in clog at this point. We now only
* perform that step, if required, as part of the atomic update of the
* whole transaction tree at top level commit or abort.
*/
@@ -4641,7 +4641,7 @@ TransStateAsString(TransState state)
/*
* xactGetCommittedChildren
*
- * Gets the list of committed children of the current transaction. The return
+ * Gets the list of committed children of the current transaction. The return
* value is the number of child transactions. *ptr is set to point to an
* array of TransactionIds. The array is allocated in TopTransactionContext;
* the caller should *not* pfree() it (this is a change from pre-8.4 code!).
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index a636bb6d2b..3406fa5a29 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -101,7 +101,7 @@ bool XLOG_DEBUG = false;
* future XLOG segment as long as there aren't already XLOGfileslop future
* segments; else we'll delete it. This could be made a separate GUC
* variable, but at present I think it's sufficient to hardwire it as
- * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
+ * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
* no more than 2*CheckPointSegments log segments, and we want to recycle all
* of them; the +1 allows boundary cases to happen without wasting a
* delete/create-segment cycle.
@@ -190,7 +190,7 @@ static bool LocalHotStandbyActive = false;
* 0: unconditionally not allowed to insert XLOG
* -1: must check RecoveryInProgress(); disallow until it is false
* Most processes start with -1 and transition to 1 after seeing that recovery
- * is not in progress. But we can also force the value for special cases.
+ * is not in progress. But we can also force the value for special cases.
* The coding in XLogInsertAllowed() depends on the first two of these states
* being numerically the same as bool true and false.
*/
@@ -223,7 +223,7 @@ static bool recoveryPauseAtTarget = true;
static TransactionId recoveryTargetXid;
static TimestampTz recoveryTargetTime;
static char *recoveryTargetName;
-static int min_recovery_apply_delay = 0;
+static int min_recovery_apply_delay = 0;
static TimestampTz recoveryDelayUntilTime;
/* options taken from recovery.conf for XLOG streaming */
@@ -261,7 +261,7 @@ static bool recoveryStopAfter;
*
* expectedTLEs: a list of TimeLineHistoryEntries for recoveryTargetTLI and the timelines of
* its known parents, newest first (so recoveryTargetTLI is always the
- * first list member). Only these TLIs are expected to be seen in the WAL
+ * first list member). Only these TLIs are expected to be seen in the WAL
* segments we read, and indeed only these TLIs will be considered as
* candidate WAL files to open at all.
*
@@ -290,7 +290,7 @@ XLogRecPtr XactLastRecEnd = InvalidXLogRecPtr;
/*
* RedoRecPtr is this backend's local copy of the REDO record pointer
* (which is almost but not quite the same as a pointer to the most recent
- * CHECKPOINT record). We update this from the shared-memory copy,
+ * CHECKPOINT record). We update this from the shared-memory copy,
* XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
* hold an insertion lock). See XLogInsert for details. We are also allowed
* to update from XLogCtl->RedoRecPtr if we hold the info_lck;
@@ -418,11 +418,11 @@ typedef struct XLogCtlInsert
slock_t insertpos_lck; /* protects CurrBytePos and PrevBytePos */
/*
- * CurrBytePos is the end of reserved WAL. The next record will be inserted
- * at that position. PrevBytePos is the start position of the previously
- * inserted (or rather, reserved) record - it is copied to the prev-link
- * of the next record. These are stored as "usable byte positions" rather
- * than XLogRecPtrs (see XLogBytePosToRecPtr()).
+ * CurrBytePos is the end of reserved WAL. The next record will be
+ * inserted at that position. PrevBytePos is the start position of the
+ * previously inserted (or rather, reserved) record - it is copied to the
+ * prev-link of the next record. These are stored as "usable byte
+ * positions" rather than XLogRecPtrs (see XLogBytePosToRecPtr()).
*/
uint64 CurrBytePos;
uint64 PrevBytePos;
@@ -464,7 +464,7 @@ typedef struct XLogCtlInsert
/*
* WAL insertion locks.
*/
- WALInsertLockPadded *WALInsertLocks;
+ WALInsertLockPadded *WALInsertLocks;
LWLockTranche WALInsertLockTranche;
int WALInsertLockTrancheId;
} XLogCtlInsert;
@@ -504,10 +504,11 @@ typedef struct XLogCtlData
* Latest initialized page in the cache (last byte position + 1).
*
* To change the identity of a buffer (and InitializedUpTo), you need to
- * hold WALBufMappingLock. To change the identity of a buffer that's still
- * dirty, the old page needs to be written out first, and for that you
- * need WALWriteLock, and you need to ensure that there are no in-progress
- * insertions to the page by calling WaitXLogInsertionsToFinish().
+ * hold WALBufMappingLock. To change the identity of a buffer that's
+ * still dirty, the old page needs to be written out first, and for that
+ * you need WALWriteLock, and you need to ensure that there are no
+ * in-progress insertions to the page by calling
+ * WaitXLogInsertionsToFinish().
*/
XLogRecPtr InitializedUpTo;
@@ -799,8 +800,8 @@ static void rm_redo_error_callback(void *arg);
static int get_sync_bit(int method);
static void CopyXLogRecordToWAL(int write_len, bool isLogSwitch,
- XLogRecData *rdata,
- XLogRecPtr StartPos, XLogRecPtr EndPos);
+ XLogRecData *rdata,
+ XLogRecPtr StartPos, XLogRecPtr EndPos);
static void ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos,
XLogRecPtr *EndPos, XLogRecPtr *PrevPtr);
static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos,
@@ -860,6 +861,7 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
if (rechdr == NULL)
{
static char rechdrbuf[SizeOfXLogRecord + MAXIMUM_ALIGNOF];
+
rechdr = (XLogRecord *) MAXALIGN(&rechdrbuf);
MemSet(rechdr, 0, SizeOfXLogRecord);
}
@@ -1075,12 +1077,12 @@ begin:;
* record to the shared WAL buffer cache is a two-step process:
*
* 1. Reserve the right amount of space from the WAL. The current head of
- * reserved space is kept in Insert->CurrBytePos, and is protected by
- * insertpos_lck.
+ * reserved space is kept in Insert->CurrBytePos, and is protected by
+ * insertpos_lck.
*
* 2. Copy the record to the reserved WAL space. This involves finding the
- * correct WAL buffer containing the reserved space, and copying the
- * record in place. This can be done concurrently in multiple processes.
+ * correct WAL buffer containing the reserved space, and copying the
+ * record in place. This can be done concurrently in multiple processes.
*
* To keep track of which insertions are still in-progress, each concurrent
* inserter acquires an insertion lock. In addition to just indicating that
@@ -1232,6 +1234,7 @@ begin:;
{
TRACE_POSTGRESQL_XLOG_SWITCH();
XLogFlush(EndPos);
+
/*
* Even though we reserved the rest of the segment for us, which is
* reflected in EndPos, we return a pointer to just the end of the
@@ -1272,7 +1275,7 @@ begin:;
rdt_lastnormal->next = NULL;
initStringInfo(&recordbuf);
- for (;rdata != NULL; rdata = rdata->next)
+ for (; rdata != NULL; rdata = rdata->next)
appendBinaryStringInfo(&recordbuf, rdata->data, rdata->len);
appendStringInfoString(&buf, " - ");
@@ -1514,8 +1517,8 @@ CopyXLogRecordToWAL(int write_len, bool isLogSwitch, XLogRecData *rdata,
/*
* If this was an xlog-switch, it's not enough to write the switch record,
- * we also have to consume all the remaining space in the WAL segment.
- * We have already reserved it for us, but we still need to make sure it's
+ * we also have to consume all the remaining space in the WAL segment. We
+ * have already reserved it for us, but we still need to make sure it's
* allocated and zeroed in the WAL buffers so that when the caller (or
* someone else) does XLogWrite(), it can really write out all the zeros.
*/
@@ -1556,14 +1559,14 @@ WALInsertLockAcquire(void)
/*
* It doesn't matter which of the WAL insertion locks we acquire, so try
- * the one we used last time. If the system isn't particularly busy,
- * it's a good bet that it's still available, and it's good to have some
+ * the one we used last time. If the system isn't particularly busy, it's
+ * a good bet that it's still available, and it's good to have some
* affinity to a particular lock so that you don't unnecessarily bounce
* cache lines between processes when there's no contention.
*
* If this is the first time through in this backend, pick a lock
- * (semi-)randomly. This allows the locks to be used evenly if you have
- * a lot of very short connections.
+ * (semi-)randomly. This allows the locks to be used evenly if you have a
+ * lot of very short connections.
*/
static int lockToTry = -1;
@@ -1583,10 +1586,10 @@ WALInsertLockAcquire(void)
/*
* If we couldn't get the lock immediately, try another lock next
* time. On a system with more insertion locks than concurrent
- * inserters, this causes all the inserters to eventually migrate
- * to a lock that no-one else is using. On a system with more
- * inserters than locks, it still helps to distribute the inserters
- * evenly across the locks.
+ * inserters, this causes all the inserters to eventually migrate to a
+ * lock that no-one else is using. On a system with more inserters
+ * than locks, it still helps to distribute the inserters evenly
+ * across the locks.
*/
lockToTry = (lockToTry + 1) % num_xloginsert_locks;
}
@@ -1604,8 +1607,8 @@ WALInsertLockAcquireExclusive(void)
/*
* When holding all the locks, we only update the last lock's insertingAt
* indicator. The others are set to 0xFFFFFFFFFFFFFFFF, which is higher
- * than any real XLogRecPtr value, to make sure that no-one blocks
- * waiting on those.
+ * than any real XLogRecPtr value, to make sure that no-one blocks waiting
+ * on those.
*/
for (i = 0; i < num_xloginsert_locks - 1; i++)
{
@@ -1655,7 +1658,7 @@ WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt)
* WALInsertLockAcquireExclusive.
*/
LWLockUpdateVar(&WALInsertLocks[num_xloginsert_locks - 1].l.lock,
- &WALInsertLocks[num_xloginsert_locks - 1].l.insertingAt,
+ &WALInsertLocks[num_xloginsert_locks - 1].l.insertingAt,
insertingAt);
}
else
@@ -1716,15 +1719,16 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto)
* Loop through all the locks, sleeping on any in-progress insert older
* than 'upto'.
*
- * finishedUpto is our return value, indicating the point upto which
- * all the WAL insertions have been finished. Initialize it to the head
- * of reserved WAL, and as we iterate through the insertion locks, back it
+ * finishedUpto is our return value, indicating the point upto which all
+ * the WAL insertions have been finished. Initialize it to the head of
+ * reserved WAL, and as we iterate through the insertion locks, back it
* out for any insertion that's still in progress.
*/
finishedUpto = reservedUpto;
for (i = 0; i < num_xloginsert_locks; i++)
{
- XLogRecPtr insertingat = InvalidXLogRecPtr;
+ XLogRecPtr insertingat = InvalidXLogRecPtr;
+
do
{
/*
@@ -1797,9 +1801,9 @@ GetXLogBuffer(XLogRecPtr ptr)
}
/*
- * The XLog buffer cache is organized so that a page is always loaded
- * to a particular buffer. That way we can easily calculate the buffer
- * a given page must be loaded into, from the XLogRecPtr alone.
+ * The XLog buffer cache is organized so that a page is always loaded to a
+ * particular buffer. That way we can easily calculate the buffer a given
+ * page must be loaded into, from the XLogRecPtr alone.
*/
idx = XLogRecPtrToBufIdx(ptr);
@@ -1827,8 +1831,8 @@ GetXLogBuffer(XLogRecPtr ptr)
if (expectedEndPtr != endptr)
{
/*
- * Let others know that we're finished inserting the record up
- * to the page boundary.
+ * Let others know that we're finished inserting the record up to the
+ * page boundary.
*/
WALInsertLockUpdateInsertingAt(expectedEndPtr - XLOG_BLCKSZ);
@@ -1837,7 +1841,7 @@ GetXLogBuffer(XLogRecPtr ptr)
if (expectedEndPtr != endptr)
elog(PANIC, "could not find WAL buffer for %X/%X",
- (uint32) (ptr >> 32) , (uint32) ptr);
+ (uint32) (ptr >> 32), (uint32) ptr);
}
else
{
@@ -1974,8 +1978,8 @@ XLogRecPtrToBytePos(XLogRecPtr ptr)
else
{
result = fullsegs * UsableBytesInSegment +
- (XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */
- (fullpages - 1) * UsableBytesInPage; /* full pages */
+ (XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */
+ (fullpages - 1) * UsableBytesInPage; /* full pages */
if (offset > 0)
{
Assert(offset >= SizeOfXLogShortPHD);
@@ -2170,8 +2174,8 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
}
/*
- * Now the next buffer slot is free and we can set it up to be the next
- * output page.
+ * Now the next buffer slot is free and we can set it up to be the
+ * next output page.
*/
NewPageBeginPtr = XLogCtl->InitializedUpTo;
NewPageEndPtr = NewPageBeginPtr + XLOG_BLCKSZ;
@@ -2194,7 +2198,8 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
/* NewPage->xlp_info = 0; */ /* done by memset */
NewPage ->xlp_tli = ThisTimeLineID;
NewPage ->xlp_pageaddr = NewPageBeginPtr;
- /* NewPage->xlp_rem_len = 0; */ /* done by memset */
+
+ /* NewPage->xlp_rem_len = 0; */ /* done by memset */
/*
* If online backup is not in progress, mark the header to indicate
@@ -2202,12 +2207,12 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
* blocks. This allows the WAL archiver to know whether it is safe to
* compress archived WAL data by transforming full-block records into
* the non-full-block format. It is sufficient to record this at the
- * page level because we force a page switch (in fact a segment switch)
- * when starting a backup, so the flag will be off before any records
- * can be written during the backup. At the end of a backup, the last
- * page will be marked as all unsafe when perhaps only part is unsafe,
- * but at worst the archiver would miss the opportunity to compress a
- * few records.
+ * page level because we force a page switch (in fact a segment
+ * switch) when starting a backup, so the flag will be off before any
+ * records can be written during the backup. At the end of a backup,
+ * the last page will be marked as all unsafe when perhaps only part
+ * is unsafe, but at worst the archiver would miss the opportunity to
+ * compress a few records.
*/
if (!Insert->forcePageWrites)
NewPage ->xlp_info |= XLP_BKP_REMOVABLE;
@@ -2329,7 +2334,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
* if we're passed a bogus WriteRqst.Write that is past the end of the
* last page that's been initialized by AdvanceXLInsertBuffer.
*/
- XLogRecPtr EndPtr = XLogCtl->xlblocks[curridx];
+ XLogRecPtr EndPtr = XLogCtl->xlblocks[curridx];
+
if (LogwrtResult.Write >= EndPtr)
elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
(uint32) (LogwrtResult.Write >> 32),
@@ -2413,7 +2419,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
do
{
errno = 0;
- written = write(openLogFile, from, nleft);
+ written = write(openLogFile, from, nleft);
if (written <= 0)
{
if (errno == EINTR)
@@ -2422,7 +2428,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
(errcode_for_file_access(),
errmsg("could not write to log file %s "
"at offset %u, length %zu: %m",
- XLogFileNameP(ThisTimeLineID, openLogSegNo),
+ XLogFileNameP(ThisTimeLineID, openLogSegNo),
openLogOff, nbytes)));
}
nleft -= written;
@@ -2500,7 +2506,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
{
/*
* Could get here without iterating above loop, in which case we might
- * have no open file or the wrong one. However, we do not need to
+ * have no open file or the wrong one. However, we do not need to
* fsync more than one file.
*/
if (sync_method != SYNC_METHOD_OPEN &&
@@ -2569,7 +2575,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
/*
* If the WALWriter is sleeping, we should kick it to make it come out of
- * low-power mode. Otherwise, determine whether there's a full page of
+ * low-power mode. Otherwise, determine whether there's a full page of
* WAL available to write.
*/
if (!sleeping)
@@ -2616,7 +2622,8 @@ XLogGetReplicationSlotMinimumLSN(void)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- XLogRecPtr retval;
+ XLogRecPtr retval;
+
SpinLockAcquire(&xlogctl->info_lck);
retval = xlogctl->replicationSlotMinLSN;
SpinLockRelease(&xlogctl->info_lck);
@@ -2883,9 +2890,9 @@ XLogFlush(XLogRecPtr record)
* We normally flush only completed blocks; but if there is nothing to do on
* that basis, we check for unflushed async commits in the current incomplete
* block, and flush through the latest one of those. Thus, if async commits
- * are not being used, we will flush complete blocks only. We can guarantee
+ * are not being used, we will flush complete blocks only. We can guarantee
* that async commits reach disk after at most three cycles; normally only
- * one or two. (When flushing complete blocks, we allow XLogWrite to write
+ * one or two. (When flushing complete blocks, we allow XLogWrite to write
* "flexibly", meaning it can stop at the end of the buffer ring; this makes a
* difference only with very high load or long wal_writer_delay, but imposes
* one extra cycle for the worst case for async commits.)
@@ -3060,7 +3067,7 @@ XLogNeedsFlush(XLogRecPtr record)
* log, seg: identify segment to be created/opened.
*
* *use_existent: if TRUE, OK to use a pre-existing file (else, any
- * pre-existing file will be deleted). On return, TRUE if a pre-existing
+ * pre-existing file will be deleted). On return, TRUE if a pre-existing
* file was used.
*
* use_lock: if TRUE, acquire ControlFileLock while moving file into
@@ -3127,11 +3134,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
errmsg("could not create file \"%s\": %m", tmppath)));
/*
- * Zero-fill the file. We have to do this the hard way to ensure that all
+ * Zero-fill the file. We have to do this the hard way to ensure that all
* the file space has really been allocated --- on platforms that allow
* "holes" in files, just seeking to the end doesn't allocate intermediate
* space. This way, we know that we have all the space and (after the
- * fsync below) that all the indirect blocks are down on disk. Therefore,
+ * fsync below) that all the indirect blocks are down on disk. Therefore,
* fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the
* log file.
*
@@ -3223,7 +3230,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
* a different timeline)
*
* Currently this is only used during recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
static void
@@ -3434,7 +3441,7 @@ XLogFileOpen(XLogSegNo segno)
if (fd < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not open transaction log file \"%s\": %m", path)));
+ errmsg("could not open transaction log file \"%s\": %m", path)));
return fd;
}
@@ -3541,13 +3548,13 @@ XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source)
* the timelines listed in expectedTLEs.
*
* We expect curFileTLI on entry to be the TLI of the preceding file in
- * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
+ * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
* to go backwards; this prevents us from picking up the wrong file when a
* parent timeline extends to higher segment numbers than the child we
* want to read.
*
* If we haven't read the timeline history file yet, read it now, so that
- * we know which TLIs to scan. We don't save the list in expectedTLEs,
+ * we know which TLIs to scan. We don't save the list in expectedTLEs,
* however, unless we actually find a valid segment. That way if there is
* neither a timeline history file nor a WAL segment in the archive, and
* streaming replication is set up, we'll read the timeline history file
@@ -3611,7 +3618,7 @@ XLogFileClose(void)
/*
* WAL segment files will not be re-read in normal operation, so we advise
- * the OS to release any cached pages. But do not do so if WAL archiving
+ * the OS to release any cached pages. But do not do so if WAL archiving
* or streaming is active, because archiver and walsender process could
* use the cache to read the WAL segment.
*/
@@ -3777,7 +3784,7 @@ RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr endptr)
{
/*
* We ignore the timeline part of the XLOG segment identifiers in
- * deciding whether a segment is still needed. This ensures that we
+ * deciding whether a segment is still needed. This ensures that we
* won't prematurely remove a segment from a parent timeline. We could
* probably be a little more proactive about removing segments of
* non-parent timelines, but that would be a whole lot more
@@ -3828,6 +3835,7 @@ RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr endptr)
xlde->d_name)));
#ifdef WIN32
+
/*
* On Windows, if another process (e.g another backend)
* holds the file open in FILE_SHARE_DELETE mode, unlink
@@ -4310,7 +4318,7 @@ rescanLatestTimeLine(void)
* I/O routines for pg_control
*
* *ControlFile is a buffer in shared memory that holds an image of the
- * contents of pg_control. WriteControlFile() initializes pg_control
+ * contents of pg_control. WriteControlFile() initializes pg_control
* given a preloaded buffer, ReadControlFile() loads the buffer from
* the pg_control file (during postmaster or standalone-backend startup),
* and UpdateControlFile() rewrites pg_control after we modify xlog state.
@@ -4715,7 +4723,7 @@ check_wal_buffers(int *newval, void **extra, GucSource source)
{
/*
* If we haven't yet changed the boot_val default of -1, just let it
- * be. We'll fix it when XLOGShmemSize is called.
+ * be. We'll fix it when XLOGShmemSize is called.
*/
if (XLOGbuffers == -1)
return true;
@@ -4815,7 +4823,7 @@ XLOGShmemInit(void)
/* WAL insertion locks. Ensure they're aligned to the full padded size */
allocptr += sizeof(WALInsertLockPadded) -
- ((uintptr_t) allocptr) % sizeof(WALInsertLockPadded);
+ ((uintptr_t) allocptr) %sizeof(WALInsertLockPadded);
WALInsertLocks = XLogCtl->Insert.WALInsertLocks =
(WALInsertLockPadded *) allocptr;
allocptr += sizeof(WALInsertLockPadded) * num_xloginsert_locks;
@@ -4836,8 +4844,8 @@ XLOGShmemInit(void)
/*
* Align the start of the page buffers to a full xlog block size boundary.
- * This simplifies some calculations in XLOG insertion. It is also required
- * for O_DIRECT.
+ * This simplifies some calculations in XLOG insertion. It is also
+ * required for O_DIRECT.
*/
allocptr = (char *) TYPEALIGN(XLOG_BLCKSZ, allocptr);
XLogCtl->pages = allocptr;
@@ -5233,7 +5241,7 @@ readRecoveryCommandFile(void)
const char *hintmsg;
if (!parse_int(item->value, &min_recovery_apply_delay, GUC_UNIT_MS,
- &hintmsg))
+ &hintmsg))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("parameter \"%s\" requires a temporal value", "min_recovery_apply_delay"),
@@ -5271,7 +5279,7 @@ readRecoveryCommandFile(void)
/*
* If user specified recovery_target_timeline, validate it or compute the
- * "latest" value. We can't do this until after we've gotten the restore
+ * "latest" value. We can't do this until after we've gotten the restore
* command and set InArchiveRecovery, because we need to fetch timeline
* history files from the archive.
*/
@@ -5464,8 +5472,8 @@ recoveryStopsBefore(XLogRecord *record)
*
* when testing for an xid, we MUST test for equality only, since
* transactions are numbered in the order they start, not the order
- * they complete. A higher numbered xid will complete before you
- * about 50% of the time...
+ * they complete. A higher numbered xid will complete before you about
+ * 50% of the time...
*/
stopsHere = (record->xl_xid == recoveryTargetXid);
}
@@ -5525,8 +5533,8 @@ recoveryStopsAfter(XLogRecord *record)
record_info = record->xl_info & ~XLR_INFO_MASK;
/*
- * There can be many restore points that share the same name; we stop
- * at the first one.
+ * There can be many restore points that share the same name; we stop at
+ * the first one.
*/
if (recoveryTarget == RECOVERY_TARGET_NAME &&
record->xl_rmid == RM_XLOG_ID && record_info == XLOG_RESTORE_POINT)
@@ -5543,9 +5551,9 @@ recoveryStopsAfter(XLogRecord *record)
strlcpy(recoveryStopName, recordRestorePointData->rp_name, MAXFNAMELEN);
ereport(LOG,
- (errmsg("recovery stopping at restore point \"%s\", time %s",
- recoveryStopName,
- timestamptz_to_str(recoveryStopTime))));
+ (errmsg("recovery stopping at restore point \"%s\", time %s",
+ recoveryStopName,
+ timestamptz_to_str(recoveryStopTime))));
return true;
}
}
@@ -5688,10 +5696,10 @@ recoveryApplyDelay(XLogRecord *record)
/*
* Is it a COMMIT record?
*
- * We deliberately choose not to delay aborts since they have no effect
- * on MVCC. We already allow replay of records that don't have a
- * timestamp, so there is already opportunity for issues caused by early
- * conflicts on standbys.
+ * We deliberately choose not to delay aborts since they have no effect on
+ * MVCC. We already allow replay of records that don't have a timestamp,
+ * so there is already opportunity for issues caused by early conflicts on
+ * standbys.
*/
record_info = record->xl_info & ~XLR_INFO_MASK;
if (!(record->xl_rmid == RM_XACT_ID &&
@@ -5711,7 +5719,7 @@ recoveryApplyDelay(XLogRecord *record)
*/
TimestampDifference(GetCurrentTimestamp(), recoveryDelayUntilTime,
&secs, &microsecs);
- if (secs <= 0 && microsecs <=0)
+ if (secs <= 0 && microsecs <= 0)
return false;
while (true)
@@ -5731,15 +5739,15 @@ recoveryApplyDelay(XLogRecord *record)
TimestampDifference(GetCurrentTimestamp(), recoveryDelayUntilTime,
&secs, &microsecs);
- if (secs <= 0 && microsecs <=0)
+ if (secs <= 0 && microsecs <= 0)
break;
elog(DEBUG2, "recovery apply delay %ld seconds, %d milliseconds",
- secs, microsecs / 1000);
+ secs, microsecs / 1000);
WaitLatch(&XLogCtl->recoveryWakeupLatch,
- WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
- secs * 1000L + microsecs / 1000);
+ WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
+ secs * 1000L + microsecs / 1000);
}
return true;
}
@@ -5978,7 +5986,7 @@ StartupXLOG(void)
ValidateXLOGDirectoryStructure();
/*
- * Clear out any old relcache cache files. This is *necessary* if we do
+ * Clear out any old relcache cache files. This is *necessary* if we do
* any WAL replay, since that would probably result in the cache files
* being out of sync with database reality. In theory we could leave them
* in place if the database had been cleanly shut down, but it seems
@@ -6050,7 +6058,7 @@ StartupXLOG(void)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Failed while allocating an XLog reading processor.")));
+ errdetail("Failed while allocating an XLog reading processor.")));
xlogreader->system_identifier = ControlFile->system_identifier;
if (read_backup_label(&checkPointLoc, &backupEndRequired,
@@ -6261,9 +6269,9 @@ StartupXLOG(void)
StartupReorderBuffer();
/*
- * Startup MultiXact. We need to do this early for two reasons: one
- * is that we might try to access multixacts when we do tuple freezing,
- * and the other is we need its state initialized because we attempt
+ * Startup MultiXact. We need to do this early for two reasons: one is
+ * that we might try to access multixacts when we do tuple freezing, and
+ * the other is we need its state initialized because we attempt
* truncation during restartpoints.
*/
StartupMultiXact();
@@ -6517,9 +6525,9 @@ StartupXLOG(void)
}
/*
- * Initialize shared variables for tracking progress of WAL replay,
- * as if we had just replayed the record before the REDO location
- * (or the checkpoint record itself, if it's a shutdown checkpoint).
+ * Initialize shared variables for tracking progress of WAL replay, as
+ * if we had just replayed the record before the REDO location (or the
+ * checkpoint record itself, if it's a shutdown checkpoint).
*/
SpinLockAcquire(&xlogctl->info_lck);
if (checkPoint.redo < RecPtr)
@@ -6646,17 +6654,17 @@ StartupXLOG(void)
}
/*
- * If we've been asked to lag the master, wait on
- * latch until enough time has passed.
+ * If we've been asked to lag the master, wait on latch until
+ * enough time has passed.
*/
if (recoveryApplyDelay(record))
{
/*
- * We test for paused recovery again here. If
- * user sets delayed apply, it may be because
- * they expect to pause recovery in case of
- * problems, so we must test again here otherwise
- * pausing during the delay-wait wouldn't work.
+ * We test for paused recovery again here. If user sets
+ * delayed apply, it may be because they expect to pause
+ * recovery in case of problems, so we must test again
+ * here otherwise pausing during the delay-wait wouldn't
+ * work.
*/
if (xlogctl->recoveryPause)
recoveryPausesHere();
@@ -6893,8 +6901,8 @@ StartupXLOG(void)
/*
* Consider whether we need to assign a new timeline ID.
*
- * If we are doing an archive recovery, we always assign a new ID. This
- * handles a couple of issues. If we stopped short of the end of WAL
+ * If we are doing an archive recovery, we always assign a new ID. This
+ * handles a couple of issues. If we stopped short of the end of WAL
* during recovery, then we are clearly generating a new timeline and must
* assign it a unique new ID. Even if we ran to the end, modifying the
* current last segment is problematic because it may result in trying to
@@ -6969,7 +6977,7 @@ StartupXLOG(void)
/*
* Tricky point here: readBuf contains the *last* block that the LastRec
- * record spans, not the one it starts in. The last block is indeed the
+ * record spans, not the one it starts in. The last block is indeed the
* one we want to use.
*/
if (EndOfLog % XLOG_BLCKSZ != 0)
@@ -6996,9 +7004,9 @@ StartupXLOG(void)
else
{
/*
- * There is no partial block to copy. Just set InitializedUpTo,
- * and let the first attempt to insert a log record to initialize
- * the next buffer.
+ * There is no partial block to copy. Just set InitializedUpTo, and
+ * let the first attempt to insert a log record to initialize the next
+ * buffer.
*/
XLogCtl->InitializedUpTo = EndOfLog;
}
@@ -7162,7 +7170,7 @@ StartupXLOG(void)
XLogReportParameters();
/*
- * All done. Allow backends to write WAL. (Although the bool flag is
+ * All done. Allow backends to write WAL. (Although the bool flag is
* probably atomic in itself, we use the info_lck here to ensure that
* there are no race conditions concerning visibility of other recent
* updates to shared memory.)
@@ -7200,7 +7208,7 @@ StartupXLOG(void)
static void
CheckRecoveryConsistency(void)
{
- XLogRecPtr lastReplayedEndRecPtr;
+ XLogRecPtr lastReplayedEndRecPtr;
/*
* During crash recovery, we don't reach a consistent state until we've
@@ -7322,7 +7330,7 @@ RecoveryInProgress(void)
/*
* Initialize TimeLineID and RedoRecPtr when we discover that recovery
* is finished. InitPostgres() relies upon this behaviour to ensure
- * that InitXLOGAccess() is called at backend startup. (If you change
+ * that InitXLOGAccess() is called at backend startup. (If you change
* this, see also LocalSetXLogInsertAllowed.)
*/
if (!LocalRecoveryInProgress)
@@ -7335,6 +7343,7 @@ RecoveryInProgress(void)
pg_memory_barrier();
InitXLOGAccess();
}
+
/*
* Note: We don't need a memory barrier when we're still in recovery.
* We might exit recovery immediately after return, so the caller
@@ -7594,7 +7603,7 @@ GetRedoRecPtr(void)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- XLogRecPtr ptr;
+ XLogRecPtr ptr;
/*
* The possibly not up-to-date copy in XlogCtl is enough. Even if we
@@ -7983,7 +7992,7 @@ CreateCheckPoint(int flags)
/*
* If this isn't a shutdown or forced checkpoint, and we have not inserted
* any XLOG records since the start of the last checkpoint, skip the
- * checkpoint. The idea here is to avoid inserting duplicate checkpoints
+ * checkpoint. The idea here is to avoid inserting duplicate checkpoints
* when the system is idle. That wastes log space, and more importantly it
* exposes us to possible loss of both current and previous checkpoint
* records if the machine crashes just as we're writing the update.
@@ -8120,7 +8129,7 @@ CreateCheckPoint(int flags)
* performing those groups of actions.
*
* One example is end of transaction, so we must wait for any transactions
- * that are currently in commit critical sections. If an xact inserted
+ * that are currently in commit critical sections. If an xact inserted
* its commit record into XLOG just before the REDO point, then a crash
* restart from the REDO point would not replay that record, which means
* that our flushing had better include the xact's update of pg_clog. So
@@ -8131,9 +8140,8 @@ CreateCheckPoint(int flags)
* fuzzy: it is possible that we will wait for xacts we didn't really need
* to wait for. But the delay should be short and it seems better to make
* checkpoint take a bit longer than to hold off insertions longer than
- * necessary.
- * (In fact, the whole reason we have this issue is that xact.c does
- * commit record XLOG insertion and clog update as two separate steps
+ * necessary. (In fact, the whole reason we have this issue is that xact.c
+ * does commit record XLOG insertion and clog update as two separate steps
* protected by different locks, but again that seems best on grounds of
* minimizing lock contention.)
*
@@ -8280,9 +8288,9 @@ CreateCheckPoint(int flags)
/*
* Truncate pg_subtrans if possible. We can throw away all data before
- * the oldest XMIN of any running transaction. No future transaction will
+ * the oldest XMIN of any running transaction. No future transaction will
* attempt to reference any pg_subtrans entry older than that (see Asserts
- * in subtrans.c). During recovery, though, we mustn't do this because
+ * in subtrans.c). During recovery, though, we mustn't do this because
* StartupSUBTRANS hasn't been called yet.
*/
if (!RecoveryInProgress())
@@ -8600,11 +8608,11 @@ CreateRestartPoint(int flags)
_logSegNo--;
/*
- * Try to recycle segments on a useful timeline. If we've been promoted
- * since the beginning of this restartpoint, use the new timeline
- * chosen at end of recovery (RecoveryInProgress() sets ThisTimeLineID
- * in that case). If we're still in recovery, use the timeline we're
- * currently replaying.
+ * Try to recycle segments on a useful timeline. If we've been
+ * promoted since the beginning of this restartpoint, use the new
+ * timeline chosen at end of recovery (RecoveryInProgress() sets
+ * ThisTimeLineID in that case). If we're still in recovery, use the
+ * timeline we're currently replaying.
*
* There is no guarantee that the WAL segments will be useful on the
* current timeline; if recovery proceeds to a new timeline right
@@ -8636,9 +8644,9 @@ CreateRestartPoint(int flags)
/*
* Truncate pg_subtrans if possible. We can throw away all data before
- * the oldest XMIN of any running transaction. No future transaction will
+ * the oldest XMIN of any running transaction. No future transaction will
* attempt to reference any pg_subtrans entry older than that (see Asserts
- * in subtrans.c). When hot standby is disabled, though, we mustn't do
+ * in subtrans.c). When hot standby is disabled, though, we mustn't do
* this because StartupSUBTRANS hasn't been called yet.
*/
if (EnableHotStandby)
@@ -8697,7 +8705,7 @@ KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo)
/* then check whether slots limit removal further */
if (max_replication_slots > 0 && keep != InvalidXLogRecPtr)
{
- XLogRecPtr slotSegNo;
+ XLogRecPtr slotSegNo;
XLByteToSeg(keep, slotSegNo);
@@ -8730,7 +8738,7 @@ XLogPutNextOid(Oid nextOid)
* We need not flush the NEXTOID record immediately, because any of the
* just-allocated OIDs could only reach disk as part of a tuple insert or
* update that would have its own XLOG record that must follow the NEXTOID
- * record. Therefore, the standard buffer LSN interlock applied to those
+ * record. Therefore, the standard buffer LSN interlock applied to those
* records will ensure no such OID reaches disk before the NEXTOID record
* does.
*
@@ -8859,8 +8867,9 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
* lsn updates. We assume pd_lower/upper cannot be changed without an
* exclusive lock, so the contents bkp are not racy.
*
- * With buffer_std set to false, XLogCheckBuffer() sets hole_length and
- * hole_offset to 0; so the following code is safe for either case.
+ * With buffer_std set to false, XLogCheckBuffer() sets hole_length
+ * and hole_offset to 0; so the following code is safe for either
+ * case.
*/
memcpy(copied_buffer, origdata, bkpb.hole_offset);
memcpy(copied_buffer + bkpb.hole_offset,
@@ -9072,7 +9081,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* We used to try to take the maximum of ShmemVariableCache->nextOid
* and the recorded nextOid, but that fails if the OID counter wraps
- * around. Since no OID allocation should be happening during replay
+ * around. Since no OID allocation should be happening during replay
* anyway, better to just believe the record exactly. We still take
* OidGenLock while setting the variable, just in case.
*/
@@ -9262,10 +9271,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
BkpBlock bkpb;
/*
- * Full-page image (FPI) records contain a backup block stored "inline"
- * in the normal data since the locking when writing hint records isn't
- * sufficient to use the normal backup block mechanism, which assumes
- * exclusive lock on the buffer supplied.
+ * Full-page image (FPI) records contain a backup block stored
+ * "inline" in the normal data since the locking when writing hint
+ * records isn't sufficient to use the normal backup block mechanism,
+ * which assumes exclusive lock on the buffer supplied.
*
* Since the only change in these backup block are hint bits, there
* are no recovery conflicts generated.
@@ -9415,7 +9424,7 @@ get_sync_bit(int method)
/*
* Optimize writes by bypassing kernel cache with O_DIRECT when using
- * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are
+ * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are
* disabled, otherwise the archive command or walsender process will read
* the WAL soon after writing it, which is guaranteed to cause a physical
* read if we bypassed the kernel cache. We also skip the
@@ -9619,7 +9628,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
* during an on-line backup even if not doing so at other times, because
* it's quite possible for the backup dump to obtain a "torn" (partially
* written) copy of a database page if it reads the page concurrently with
- * our write to the same page. This can be fixed as long as the first
+ * our write to the same page. This can be fixed as long as the first
* write to the page in the WAL sequence is a full-page write. Hence, we
* turn on forcePageWrites and then force a CHECKPOINT, to ensure there
* are no dirty pages in shared memory that might get dumped while the
@@ -9663,7 +9672,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
* old timeline IDs. That would otherwise happen if you called
* pg_start_backup() right after restoring from a PITR archive: the
* first WAL segment containing the startup checkpoint has pages in
- * the beginning with the old timeline ID. That can cause trouble at
+ * the beginning with the old timeline ID. That can cause trouble at
* recovery: we won't have a history file covering the old timeline if
* pg_xlog directory was not included in the base backup and the WAL
* archive was cleared too before starting the backup.
@@ -9686,7 +9695,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
bool checkpointfpw;
/*
- * Force a CHECKPOINT. Aside from being necessary to prevent torn
+ * Force a CHECKPOINT. Aside from being necessary to prevent torn
* page problems, this guarantees that two successive backup runs
* will have different checkpoint positions and hence different
* history file names, even if nothing happened in between.
@@ -10339,7 +10348,7 @@ GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli)
*
* If we see a backup_label during recovery, we assume that we are recovering
* from a backup dump file, and we therefore roll forward from the checkpoint
- * identified by the label file, NOT what pg_control says. This avoids the
+ * identified by the label file, NOT what pg_control says. This avoids the
* problem that pg_control might have been archived one or more checkpoints
* later than the start of the dump, and so if we rely on it as the start
* point, we will fail to restore a consistent database state.
@@ -10686,7 +10695,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
* Standby mode is implemented by a state machine:
*
* 1. Read from either archive or pg_xlog (XLOG_FROM_ARCHIVE), or just
- * pg_xlog (XLOG_FROM_XLOG)
+ * pg_xlog (XLOG_FROM_XLOG)
* 2. Check trigger file
* 3. Read from primary server via walreceiver (XLOG_FROM_STREAM)
* 4. Rescan timelines
@@ -10887,8 +10896,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
* file from pg_xlog.
*/
readFile = XLogFileReadAnyTLI(readSegNo, DEBUG2,
- currentSource == XLOG_FROM_ARCHIVE ? XLOG_FROM_ANY :
- currentSource);
+ currentSource == XLOG_FROM_ARCHIVE ? XLOG_FROM_ANY :
+ currentSource);
if (readFile >= 0)
return true; /* success! */
@@ -10945,11 +10954,11 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
if (havedata)
{
/*
- * Great, streamed far enough. Open the file if it's
+ * Great, streamed far enough. Open the file if it's
* not open already. Also read the timeline history
* file if we haven't initialized timeline history
* yet; it should be streamed over and present in
- * pg_xlog by now. Use XLOG_FROM_STREAM so that
+ * pg_xlog by now. Use XLOG_FROM_STREAM so that
* source info is set correctly and XLogReceiptTime
* isn't changed.
*/
@@ -11014,7 +11023,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
HandleStartupProcInterrupts();
}
- return false; /* not reached */
+ return false; /* not reached */
}
/*
@@ -11022,9 +11031,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
* in the current WAL page, previously read by XLogPageRead().
*
* 'emode' is the error mode that would be used to report a file-not-found
- * or legitimate end-of-WAL situation. Generally, we use it as-is, but if
+ * or legitimate end-of-WAL situation. Generally, we use it as-is, but if
* we're retrying the exact same record that we've tried previously, only
- * complain the first time to keep the noise down. However, we only do when
+ * complain the first time to keep the noise down. However, we only do when
* reading from pg_xlog, because we don't expect any invalid records in archive
* or in records streamed from master. Files in the archive should be complete,
* and we should never hit the end of WAL because we stop and wait for more WAL
diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c
index a43793382e..37745dce89 100644
--- a/src/backend/access/transam/xlogarchive.c
+++ b/src/backend/access/transam/xlogarchive.c
@@ -300,8 +300,8 @@ RestoreArchivedFile(char *path, const char *xlogfname,
signaled = WIFSIGNALED(rc) || WEXITSTATUS(rc) > 125;
ereport(signaled ? FATAL : DEBUG2,
- (errmsg("could not restore file \"%s\" from archive: %s",
- xlogfname, wait_result_to_str(rc))));
+ (errmsg("could not restore file \"%s\" from archive: %s",
+ xlogfname, wait_result_to_str(rc))));
not_available:
diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c
index 5f8d65514c..8a87581e79 100644
--- a/src/backend/access/transam/xlogfuncs.c
+++ b/src/backend/access/transam/xlogfuncs.c
@@ -429,7 +429,7 @@ pg_is_in_recovery(PG_FUNCTION_ARGS)
Datum
pg_xlog_location_diff(PG_FUNCTION_ARGS)
{
- Datum result;
+ Datum result;
result = DirectFunctionCall2(pg_lsn_mi,
PG_GETARG_DATUM(0),
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index eff2081afe..f06daa2638 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -199,7 +199,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
randAccess = true;
/*
- * RecPtr is pointing to end+1 of the previous WAL record. If we're
+ * RecPtr is pointing to end+1 of the previous WAL record. If we're
* at a page boundary, no more records can fit on the current page. We
* must skip over the page header, but we can't do that until we've
* read in the page, since the header size is variable.
@@ -277,7 +277,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
/*
* If the whole record header is on this page, validate it immediately.
* Otherwise do just a basic sanity check on xl_tot_len, and validate the
- * rest of the header after reading it from the next page. The xl_tot_len
+ * rest of the header after reading it from the next page. The xl_tot_len
* check is necessary here to ensure that we enter the "Need to reassemble
* record" code path below; otherwise we might fail to apply
* ValidXLogRecordHeader at all.
@@ -572,7 +572,7 @@ err:
* Validate an XLOG record header.
*
* This is just a convenience subroutine to avoid duplicated code in
- * XLogReadRecord. It's not intended for use from anywhere else.
+ * XLogReadRecord. It's not intended for use from anywhere else.
*/
static bool
ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
@@ -661,7 +661,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
* data to read in) until we've checked the CRCs.
*
* We assume all of the record (that is, xl_tot_len bytes) has been read
- * into memory at *record. Also, ValidXLogRecordHeader() has accepted the
+ * into memory at *record. Also, ValidXLogRecordHeader() has accepted the
* record's header, which means in particular that xl_tot_len is at least
* SizeOfXlogRecord, so it is safe to fetch xl_len.
*/
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index c36e71d806..4a542e65ca 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -76,7 +76,7 @@ int numattr; /* number of attributes for cur. rel */
* in the core "bootstrapped" catalogs.
*
* XXX several of these input/output functions do catalog scans
- * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some
+ * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some
* order dependencies in the catalog creation process.
*/
struct typinfo
@@ -374,9 +374,9 @@ AuxiliaryProcessMain(int argc, char *argv[])
#endif
/*
- * Assign the ProcSignalSlot for an auxiliary process. Since it
+ * Assign the ProcSignalSlot for an auxiliary process. Since it
* doesn't have a BackendId, the slot is statically allocated based on
- * the auxiliary process type (MyAuxProcType). Backends use slots
+ * the auxiliary process type (MyAuxProcType). Backends use slots
* indexed in the range from 1 to MaxBackends (inclusive), so we use
* MaxBackends + AuxProcType + 1 as the index of the slot for an
* auxiliary process.
@@ -561,7 +561,7 @@ bootstrap_signals(void)
}
/*
- * Begin shutdown of an auxiliary process. This is approximately the equivalent
+ * Begin shutdown of an auxiliary process. This is approximately the equivalent
* of ShutdownPostgres() in postinit.c. We can't run transactions in an
* auxiliary process, so most of the work of AbortTransaction() is not needed,
* but we do need to make sure we've released any LWLocks we are holding.
@@ -876,7 +876,7 @@ cleanup(void)
* and not an OID at all, until the first reference to a type not known in
* TypInfo[]. At that point it will read and cache pg_type in the Typ array,
* and subsequently return a real OID (and set the global pointer Ap to
- * point at the found row in Typ). So caller must check whether Typ is
+ * point at the found row in Typ). So caller must check whether Typ is
* still NULL to determine what the return value is!
* ----------------
*/
@@ -1073,9 +1073,9 @@ MapArrayTypeName(char *s)
*
* At bootstrap time, we define a bunch of indexes on system catalogs.
* We postpone actually building the indexes until just before we're
- * finished with initialization, however. This is because the indexes
+ * finished with initialization, however. This is because the indexes
* themselves have catalog entries, and those have to be included in the
- * indexes on those catalogs. Doing it in two phases is the simplest
+ * indexes on those catalogs. Doing it in two phases is the simplest
* way of making sure the indexes have the right contents at the end.
*/
void
@@ -1088,7 +1088,7 @@ index_register(Oid heap,
/*
* XXX mao 10/31/92 -- don't gc index reldescs, associated info at
- * bootstrap time. we'll declare the indexes now, but want to create them
+ * bootstrap time. we'll declare the indexes now, but want to create them
* later.
*/
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index f4fc12d83a..d9745cabd2 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -313,7 +313,7 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs,
/*
* Restrict the operation to what we can actually grant or revoke, and
- * issue a warning if appropriate. (For REVOKE this isn't quite what the
+ * issue a warning if appropriate. (For REVOKE this isn't quite what the
* spec says to do: the spec seems to want a warning only if no privilege
* bits actually change in the ACL. In practice that behavior seems much
* too noisy, as well as inconsistent with the GRANT case.)
@@ -1092,7 +1092,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
/*
* The default for a global entry is the hard-wired default ACL for the
- * particular object type. The default for non-global entries is an empty
+ * particular object type. The default for non-global entries is an empty
* ACL. This must be so because global entries replace the hard-wired
* defaults, while others are added on.
*/
@@ -1662,7 +1662,7 @@ ExecGrant_Attribute(InternalGrant *istmt, Oid relOid, const char *relname,
* If the updated ACL is empty, we can set attacl to null, and maybe even
* avoid an update of the pg_attribute row. This is worth testing because
* we'll come through here multiple times for any relation-level REVOKE,
- * even if there were never any column GRANTs. Note we are assuming that
+ * even if there were never any column GRANTs. Note we are assuming that
* the "default" ACL state for columns is empty.
*/
if (ACL_NUM(new_acl) > 0)
@@ -1787,7 +1787,7 @@ ExecGrant_Relation(InternalGrant *istmt)
{
/*
* Mention the object name because the user needs to know
- * which operations succeeded. This is required because
+ * which operations succeeded. This is required because
* WARNING allows the command to continue.
*/
ereport(WARNING,
@@ -1816,7 +1816,7 @@ ExecGrant_Relation(InternalGrant *istmt)
/*
* Set up array in which we'll accumulate any column privilege bits
- * that need modification. The array is indexed such that entry [0]
+ * that need modification. The array is indexed such that entry [0]
* corresponds to FirstLowInvalidHeapAttributeNumber.
*/
num_col_privileges = pg_class_tuple->relnatts - FirstLowInvalidHeapAttributeNumber + 1;
@@ -3507,7 +3507,7 @@ pg_aclmask(AclObjectKind objkind, Oid table_oid, AttrNumber attnum, Oid roleid,
*
* Note: this considers only privileges granted specifically on the column.
* It is caller's responsibility to take relation-level privileges into account
- * as appropriate. (For the same reason, we have no special case for
+ * as appropriate. (For the same reason, we have no special case for
* superuser-ness here.)
*/
AclMode
@@ -3620,12 +3620,12 @@ pg_class_aclmask(Oid table_oid, Oid roleid,
/*
* Deny anyone permission to update a system catalog unless
- * pg_authid.rolcatupdate is set. (This is to let superusers protect
+ * pg_authid.rolcatupdate is set. (This is to let superusers protect
* themselves from themselves.) Also allow it if allowSystemTableMods.
*
* As of 7.4 we have some updatable system views; those shouldn't be
* protected in this way. Assume the view rules can take care of
- * themselves. ACL_USAGE is if we ever have system sequences.
+ * themselves. ACL_USAGE is if we ever have system sequences.
*/
if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE | ACL_TRUNCATE | ACL_USAGE)) &&
IsSystemClass(table_oid, classForm) &&
@@ -4331,7 +4331,7 @@ pg_attribute_aclcheck_all(Oid table_oid, Oid roleid, AclMode mode,
ReleaseSysCache(classTuple);
/*
- * Initialize result in case there are no non-dropped columns. We want to
+ * Initialize result in case there are no non-dropped columns. We want to
* report failure in such cases for either value of 'how'.
*/
result = ACLCHECK_NO_PRIV;
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 3ec360c2be..2eb2c2fddf 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -48,7 +48,7 @@
* IsSystemRelation
* True iff the relation is either a system catalog or toast table.
* By a system catalog, we mean one that created in the pg_catalog schema
- * during initdb. User-created relations in pg_catalog don't count as
+ * during initdb. User-created relations in pg_catalog don't count as
* system catalogs.
*
* NB: TOAST relations are considered system relations by this test
@@ -100,7 +100,7 @@ IsCatalogRelation(Relation relation)
bool
IsCatalogClass(Oid relid, Form_pg_class reltuple)
{
- Oid relnamespace = reltuple->relnamespace;
+ Oid relnamespace = reltuple->relnamespace;
/*
* Never consider relations outside pg_catalog/pg_toast to be catalog
@@ -268,7 +268,7 @@ IsSharedRelation(Oid relationId)
* Since the OID is not immediately inserted into the table, there is a
* race condition here; but a problem could occur only if someone else
* managed to cycle through 2^32 OIDs and generate the same OID before we
- * finish inserting our row. This seems unlikely to be a problem. Note
+ * finish inserting our row. This seems unlikely to be a problem. Note
* that if we had to *commit* the row to end the race condition, the risk
* would be rather higher; therefore we use SnapshotDirty in the test,
* so that we will see uncommitted rows.
@@ -314,7 +314,7 @@ GetNewOid(Relation relation)
* This is exported separately because there are cases where we want to use
* an index that will not be recognized by RelationGetOidIndex: TOAST tables
* have indexes that are usable, but have multiple columns and are on
- * ordinary columns rather than a true OID column. This code will work
+ * ordinary columns rather than a true OID column. This code will work
* anyway, so long as the OID is the index's first column. The caller must
* pass in the actual heap attnum of the OID column, however.
*
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index e5116693cf..d41ba49f87 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -261,7 +261,7 @@ performDeletion(const ObjectAddress *object,
depRel = heap_open(DependRelationId, RowExclusiveLock);
/*
- * Acquire deletion lock on the target object. (Ideally the caller has
+ * Acquire deletion lock on the target object. (Ideally the caller has
* done this already, but many places are sloppy about it.)
*/
AcquireDeletionLock(object, 0);
@@ -373,7 +373,7 @@ performMultipleDeletions(const ObjectAddresses *objects,
/*
* deleteWhatDependsOn: attempt to drop everything that depends on the
- * specified object, though not the object itself. Behavior is always
+ * specified object, though not the object itself. Behavior is always
* CASCADE.
*
* This is currently used only to clean out the contents of a schema
@@ -399,7 +399,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
depRel = heap_open(DependRelationId, RowExclusiveLock);
/*
- * Acquire deletion lock on the target object. (Ideally the caller has
+ * Acquire deletion lock on the target object. (Ideally the caller has
* done this already, but many places are sloppy about it.)
*/
AcquireDeletionLock(object, 0);
@@ -441,7 +441,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
* Since this function is currently only used to clean out temporary
* schemas, we pass PERFORM_DELETION_INTERNAL here, indicating that
* the operation is an automatic system operation rather than a user
- * action. If, in the future, this function is used for other
+ * action. If, in the future, this function is used for other
* purposes, we might need to revisit this.
*/
deleteOneObject(thisobj, &depRel, PERFORM_DELETION_INTERNAL);
@@ -458,7 +458,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
*
* For every object that depends on the starting object, acquire a deletion
* lock on the object, add it to targetObjects (if not already there),
- * and recursively find objects that depend on it. An object's dependencies
+ * and recursively find objects that depend on it. An object's dependencies
* will be placed into targetObjects before the object itself; this means
* that the finished list's order represents a safe deletion order.
*
@@ -510,7 +510,7 @@ findDependentObjects(const ObjectAddress *object,
* will not break a loop at an internal dependency: if we enter the loop
* at an "owned" object we will switch and start at the "owning" object
* instead. We could probably hack something up to avoid breaking at an
- * auto dependency, too, if we had to. However there are no known cases
+ * auto dependency, too, if we had to. However there are no known cases
* where that would be necessary.
*/
if (stack_address_present_add_flags(object, flags, stack))
@@ -531,7 +531,7 @@ findDependentObjects(const ObjectAddress *object,
/*
* The target object might be internally dependent on some other object
* (its "owner"), and/or be a member of an extension (also considered its
- * owner). If so, and if we aren't recursing from the owning object, we
+ * owner). If so, and if we aren't recursing from the owning object, we
* have to transform this deletion request into a deletion request of the
* owning object. (We'll eventually recurse back to this object, but the
* owning object has to be visited first so it will be deleted after.) The
@@ -594,7 +594,7 @@ findDependentObjects(const ObjectAddress *object,
/*
* Exception 1a: if the owning object is listed in
* pendingObjects, just release the caller's lock and
- * return. We'll eventually complete the DROP when we
+ * return. We'll eventually complete the DROP when we
* reach that entry in the pending list.
*/
if (pendingObjects &&
@@ -647,7 +647,7 @@ findDependentObjects(const ObjectAddress *object,
* owning object.
*
* First, release caller's lock on this object and get
- * deletion lock on the owning object. (We must release
+ * deletion lock on the owning object. (We must release
* caller's lock to avoid deadlock against a concurrent
* deletion of the owning object.)
*/
@@ -809,7 +809,7 @@ findDependentObjects(const ObjectAddress *object,
systable_endscan(scan);
/*
- * Finally, we can add the target object to targetObjects. Be careful to
+ * Finally, we can add the target object to targetObjects. Be careful to
* include any flags that were passed back down to us from inner recursion
* levels.
*/
@@ -864,7 +864,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects,
/*
* We limit the number of dependencies reported to the client to
* MAX_REPORTED_DEPS, since client software may not deal well with
- * enormous error strings. The server log always gets a full report.
+ * enormous error strings. The server log always gets a full report.
*/
#define MAX_REPORTED_DEPS 100
@@ -897,7 +897,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects,
DEPFLAG_EXTENSION))
{
/*
- * auto-cascades are reported at DEBUG2, not msglevel. We don't
+ * auto-cascades are reported at DEBUG2, not msglevel. We don't
* try to combine them with the regular message because the
* results are too confusing when client_min_messages and
* log_min_messages are different.
@@ -1079,7 +1079,7 @@ deleteOneObject(const ObjectAddress *object, Relation *depRel, int flags)
systable_endscan(scan);
/*
- * Delete shared dependency references related to this object. Again, if
+ * Delete shared dependency references related to this object. Again, if
* subId = 0, remove records for sub-objects too.
*/
deleteSharedDependencyRecordsFor(object->classId, object->objectId,
@@ -1344,13 +1344,13 @@ recordDependencyOnExpr(const ObjectAddress *depender,
* recordDependencyOnSingleRelExpr - find expression dependencies
*
* As above, but only one relation is expected to be referenced (with
- * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
+ * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
* range table. An additional frammish is that dependencies on that
* relation (or its component columns) will be marked with 'self_behavior',
* whereas 'behavior' is used for everything else.
*
* NOTE: the caller should ensure that a whole-table dependency on the
- * specified relation is created separately, if one is needed. In particular,
+ * specified relation is created separately, if one is needed. In particular,
* a whole-row Var "relation.*" will not cause this routine to emit any
* dependency item. This is appropriate behavior for subexpressions of an
* ordinary query, so other cases need to cope as necessary.
@@ -1470,7 +1470,7 @@ find_expr_references_walker(Node *node,
/*
* A whole-row Var references no specific columns, so adds no new
- * dependency. (We assume that there is a whole-table dependency
+ * dependency. (We assume that there is a whole-table dependency
* arising from each underlying rangetable entry. While we could
* record such a dependency when finding a whole-row Var that
* references a relation directly, it's quite unclear how to extend
@@ -1529,7 +1529,7 @@ find_expr_references_walker(Node *node,
/*
* If it's a regclass or similar literal referring to an existing
- * object, add a reference to that object. (Currently, only the
+ * object, add a reference to that object. (Currently, only the
* regclass and regconfig cases have any likely use, but we may as
* well handle all the OID-alias datatypes consistently.)
*/
@@ -2130,7 +2130,7 @@ object_address_present_add_flags(const ObjectAddress *object,
{
/*
* We get here if we find a need to delete a column after
- * having already decided to drop its whole table. Obviously
+ * having already decided to drop its whole table. Obviously
* we no longer need to drop the column. But don't plaster
* its flags on the table.
*/
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 2cf4bc033c..33eef9f1ca 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -21,7 +21,7 @@
* the old heap_create_with_catalog, amcreate, and amdestroy.
* those routines will soon call these routines using the function
* manager,
- * just like the poorly named "NewXXX" routines do. The
+ * just like the poorly named "NewXXX" routines do. The
* "New" routines are all going to die soon, once and for all!
* -cim 1/13/91
*
@@ -199,7 +199,7 @@ SystemAttributeDefinition(AttrNumber attno, bool relhasoids)
/*
* If the given name is a system attribute name, return a Form_pg_attribute
- * pointer for a prototype definition. If not, return NULL.
+ * pointer for a prototype definition. If not, return NULL.
*/
Form_pg_attribute
SystemAttributeByName(const char *attname, bool relhasoids)
@@ -527,7 +527,7 @@ CheckAttributeType(const char *attname,
int i;
/*
- * Check for self-containment. Eventually we might be able to allow
+ * Check for self-containment. Eventually we might be able to allow
* this (just return without complaint, if so) but it's not clear how
* many other places would require anti-recursion defenses before it
* would be safe to allow tables to contain their own rowtype.
@@ -590,7 +590,7 @@ CheckAttributeType(const char *attname,
* attribute to insert (but we ignore attacl and attoptions, which are always
* initialized to NULL).
*
- * indstate is the index state for CatalogIndexInsert. It can be passed as
+ * indstate is the index state for CatalogIndexInsert. It can be passed as
* NULL, in which case we'll fetch the necessary info. (Don't do this when
* inserting multiple attributes, because it's a tad more expensive.)
*/
@@ -757,7 +757,7 @@ AddNewAttributeTuples(Oid new_rel_oid,
* Tuple data is taken from new_rel_desc->rd_rel, except for the
* variable-width fields which are not present in a cached reldesc.
* relacl and reloptions are passed in Datum form (to avoid having
- * to reference the data types in heap.h). Pass (Datum) 0 to set them
+ * to reference the data types in heap.h). Pass (Datum) 0 to set them
* to NULL.
* --------------------------------
*/
@@ -816,7 +816,7 @@ InsertPgClassTuple(Relation pg_class_desc,
tup = heap_form_tuple(RelationGetDescr(pg_class_desc), values, nulls);
/*
- * The new tuple must have the oid already chosen for the rel. Sure would
+ * The new tuple must have the oid already chosen for the rel. Sure would
* be embarrassing to do this sort of thing in polite company.
*/
HeapTupleSetOid(tup, new_rel_oid);
@@ -1372,8 +1372,8 @@ heap_create_init_fork(Relation rel)
* RelationRemoveInheritance
*
* Formerly, this routine checked for child relations and aborted the
- * deletion if any were found. Now we rely on the dependency mechanism
- * to check for or delete child relations. By the time we get here,
+ * deletion if any were found. Now we rely on the dependency mechanism
+ * to check for or delete child relations. By the time we get here,
* there are no children and we need only remove any pg_inherits rows
* linking this relation to its parent(s).
*/
@@ -1658,7 +1658,7 @@ RemoveAttrDefault(Oid relid, AttrNumber attnum,
/*
* RemoveAttrDefaultById
*
- * Remove a pg_attrdef entry specified by OID. This is the guts of
+ * Remove a pg_attrdef entry specified by OID. This is the guts of
* attribute-default removal. Note it should be called via performDeletion,
* not directly.
*/
@@ -2065,7 +2065,7 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal)
/*
* Deparsing of constraint expressions will fail unless the just-created
- * pg_attribute tuples for this relation are made visible. So, bump the
+ * pg_attribute tuples for this relation are made visible. So, bump the
* command counter. CAUTION: this will cause a relcache entry rebuild.
*/
CommandCounterIncrement();
@@ -2117,7 +2117,7 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal)
* the default and constraint expressions added to the relation.
*
* NB: caller should have opened rel with AccessExclusiveLock, and should
- * hold that lock till end of transaction. Also, we assume the caller has
+ * hold that lock till end of transaction. Also, we assume the caller has
* done a CommandCounterIncrement if necessary to make the relation's catalog
* tuples visible.
*/
@@ -2262,7 +2262,7 @@ AddRelationNewConstraints(Relation rel,
checknames = lappend(checknames, ccname);
/*
- * Check against pre-existing constraints. If we are allowed to
+ * Check against pre-existing constraints. If we are allowed to
* merge with an existing constraint, there's no more to do here.
* (We omit the duplicate constraint from the result, which is
* what ATAddCheckConstraint wants.)
@@ -2279,7 +2279,7 @@ AddRelationNewConstraints(Relation rel,
* column constraint and "tab_check" for a table constraint. We
* no longer have any info about the syntactic positioning of the
* constraint phrase, so we approximate this by seeing whether the
- * expression references more than one column. (If the user
+ * expression references more than one column. (If the user
* played by the rules, the result is the same...)
*
* Note: pull_var_clause() doesn't descend into sublinks, but we
@@ -2664,7 +2664,7 @@ RemoveStatistics(Oid relid, AttrNumber attnum)
* with the heap relation to zero tuples.
*
* The routine will truncate and then reconstruct the indexes on
- * the specified relation. Caller must hold exclusive lock on rel.
+ * the specified relation. Caller must hold exclusive lock on rel.
*/
static void
RelationTruncateIndexes(Relation heapRelation)
@@ -2704,7 +2704,7 @@ RelationTruncateIndexes(Relation heapRelation)
* This routine deletes all data within all the specified relations.
*
* This is not transaction-safe! There is another, transaction-safe
- * implementation in commands/tablecmds.c. We now use this only for
+ * implementation in commands/tablecmds.c. We now use this only for
* ON COMMIT truncation of temporary tables, where it doesn't matter.
*/
void
@@ -2813,7 +2813,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
return;
/*
- * Otherwise, must scan pg_constraint. We make one pass with all the
+ * Otherwise, must scan pg_constraint. We make one pass with all the
* relations considered; if this finds nothing, then all is well.
*/
dependents = heap_truncate_find_FKs(oids);
@@ -2874,7 +2874,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
* behavior to change depending on chance locations of rows in pg_constraint.)
*
* Note: caller should already have appropriate lock on all rels mentioned
- * in relationIds. Since adding or dropping an FK requires exclusive lock
+ * in relationIds. Since adding or dropping an FK requires exclusive lock
* on both rels, this ensures that the answer will be stable.
*/
List *
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index c932c83342..80acc0ec27 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -411,7 +411,7 @@ ConstructTupleDescriptor(Relation heapRelation,
/*
* We do not yet have the correct relation OID for the index, so just
- * set it invalid for now. InitializeAttributeOids() will fix it
+ * set it invalid for now. InitializeAttributeOids() will fix it
* later.
*/
to->attrelid = InvalidOid;
@@ -651,7 +651,7 @@ UpdateIndexRelation(Oid indexoid,
* heapRelation: table to build index on (suitably locked by caller)
* indexRelationName: what it say
* indexRelationId: normally, pass InvalidOid to let this routine
- * generate an OID for the index. During bootstrap this may be
+ * generate an OID for the index. During bootstrap this may be
* nonzero to specify a preselected OID.
* relFileNode: normally, pass InvalidOid to get new storage. May be
* nonzero to attach an existing valid build.
@@ -670,7 +670,7 @@ UpdateIndexRelation(Oid indexoid,
* allow_system_table_mods: allow table to be a system catalog
* skip_build: true to skip the index_build() step for the moment; caller
* must do it later (typically via reindex_index())
- * concurrent: if true, do not lock the table against writers. The index
+ * concurrent: if true, do not lock the table against writers. The index
* will be marked "invalid" and the caller must take additional steps
* to fix it up.
* is_internal: if true, post creation hook for new index
@@ -960,7 +960,7 @@ index_create(Relation heapRelation,
/*
* If there are no simply-referenced columns, give the index an
- * auto dependency on the whole table. In most cases, this will
+ * auto dependency on the whole table. In most cases, this will
* be redundant, but it might not be if the index expressions and
* predicate contain no Vars or only whole-row Vars.
*/
@@ -1085,7 +1085,7 @@ index_create(Relation heapRelation,
/*
* Close the index; but we keep the lock that we acquired above until end
- * of transaction. Closing the heap is caller's responsibility.
+ * of transaction. Closing the heap is caller's responsibility.
*/
index_close(indexRelation, NoLock);
@@ -1243,7 +1243,7 @@ index_constraint_create(Relation heapRelation,
* have been so marked already, so no need to clear the flag in the other
* case.
*
- * Note: this might better be done by callers. We do it here to avoid
+ * Note: this might better be done by callers. We do it here to avoid
* exposing index_update_stats() globally, but that wouldn't be necessary
* if relhaspkey went away.
*/
@@ -1256,10 +1256,10 @@ index_constraint_create(Relation heapRelation,
/*
* If needed, mark the index as primary and/or deferred in pg_index.
*
- * Note: When making an existing index into a constraint, caller must
- * have a table lock that prevents concurrent table updates; otherwise,
- * there is a risk that concurrent readers of the table will miss seeing
- * this index at all.
+ * Note: When making an existing index into a constraint, caller must have
+ * a table lock that prevents concurrent table updates; otherwise, there
+ * is a risk that concurrent readers of the table will miss seeing this
+ * index at all.
*/
if (update_pgindex && (mark_as_primary || deferrable))
{
@@ -1336,7 +1336,7 @@ index_drop(Oid indexId, bool concurrent)
* in multiple steps and waiting out any transactions that might be using
* the index, so we don't need exclusive lock on the parent table. Instead
* we take ShareUpdateExclusiveLock, to ensure that two sessions aren't
- * doing CREATE/DROP INDEX CONCURRENTLY on the same index. (We will get
+ * doing CREATE/DROP INDEX CONCURRENTLY on the same index. (We will get
* AccessExclusiveLock on the index below, once we're sure nobody else is
* using it.)
*/
@@ -1376,7 +1376,7 @@ index_drop(Oid indexId, bool concurrent)
* non-concurrent case we can just do that now. In the concurrent case
* it's a bit trickier. The predicate locks must be moved when there are
* no index scans in progress on the index and no more can subsequently
- * start, so that no new predicate locks can be made on the index. Also,
+ * start, so that no new predicate locks can be made on the index. Also,
* they must be moved before heap inserts stop maintaining the index, else
* the conflict with the predicate lock on the index gap could be missed
* before the lock on the heap relation is in place to detect a conflict
@@ -1386,11 +1386,11 @@ index_drop(Oid indexId, bool concurrent)
{
/*
* We must commit our transaction in order to make the first pg_index
- * state update visible to other sessions. If the DROP machinery has
+ * state update visible to other sessions. If the DROP machinery has
* already performed any other actions (removal of other objects,
* pg_depend entries, etc), the commit would make those actions
* permanent, which would leave us with inconsistent catalog state if
- * we fail partway through the following sequence. Since DROP INDEX
+ * we fail partway through the following sequence. Since DROP INDEX
* CONCURRENTLY is restricted to dropping just one index that has no
* dependencies, we should get here before anything's been done ---
* but let's check that to be sure. We can verify that the current
@@ -1426,7 +1426,7 @@ index_drop(Oid indexId, bool concurrent)
* We must commit our current transaction so that the indisvalid
* update becomes visible to other transactions; then start another.
* Note that any previously-built data structures are lost in the
- * commit. The only data we keep past here are the relation IDs.
+ * commit. The only data we keep past here are the relation IDs.
*
* Before committing, get a session-level lock on the table, to ensure
* that neither it nor the index can be dropped before we finish. This
@@ -1443,10 +1443,10 @@ index_drop(Oid indexId, bool concurrent)
/*
* Now we must wait until no running transaction could be using the
* index for a query. Use AccessExclusiveLock here to check for
- * running transactions that hold locks of any kind on the table.
- * Note we do not need to worry about xacts that open the table for
- * reading after this point; they will see the index as invalid when
- * they open the relation.
+ * running transactions that hold locks of any kind on the table. Note
+ * we do not need to worry about xacts that open the table for reading
+ * after this point; they will see the index as invalid when they open
+ * the relation.
*
* Note: the reason we use actual lock acquisition here, rather than
* just checking the ProcArray and sleeping, is that deadlock is
@@ -1468,7 +1468,7 @@ index_drop(Oid indexId, bool concurrent)
/*
* Now we are sure that nobody uses the index for queries; they just
- * might have it open for updating it. So now we can unset indisready
+ * might have it open for updating it. So now we can unset indisready
* and indislive, then wait till nobody could be using it at all
* anymore.
*/
@@ -1599,7 +1599,7 @@ index_drop(Oid indexId, bool concurrent)
*
* IndexInfo stores the information about the index that's needed by
* FormIndexDatum, which is used for both index_build() and later insertion
- * of individual index tuples. Normally we build an IndexInfo for an index
+ * of individual index tuples. Normally we build an IndexInfo for an index
* just once per command, and then use it for (potentially) many tuples.
* ----------------
*/
@@ -1669,7 +1669,7 @@ BuildIndexInfo(Relation index)
* context must point to the heap tuple passed in.
*
* Notice we don't actually call index_form_tuple() here; we just prepare
- * its input arrays values[] and isnull[]. This is because the index AM
+ * its input arrays values[] and isnull[]. This is because the index AM
* may wish to alter the data before storage.
* ----------------
*/
@@ -1735,7 +1735,7 @@ FormIndexDatum(IndexInfo *indexInfo,
* index_update_stats --- update pg_class entry after CREATE INDEX or REINDEX
*
* This routine updates the pg_class row of either an index or its parent
- * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
+ * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
* to ensure we can do all the necessary work in just one update.
*
* hasindex: set relhasindex to this value
@@ -1747,7 +1747,7 @@ FormIndexDatum(IndexInfo *indexInfo,
*
* NOTE: an important side-effect of this operation is that an SI invalidation
* message is sent out to all backends --- including me --- causing relcache
- * entries to be flushed or updated with the new data. This must happen even
+ * entries to be flushed or updated with the new data. This must happen even
* if we find that no change is needed in the pg_class row. When updating
* a heap entry, this ensures that other backends find out about the new
* index. When updating an index, it's important because some index AMs
@@ -1786,13 +1786,13 @@ index_update_stats(Relation rel,
* 4. Even with just a single CREATE INDEX, there's a risk factor because
* someone else might be trying to open the rel while we commit, and this
* creates a race condition as to whether he will see both or neither of
- * the pg_class row versions as valid. Again, a non-transactional update
+ * the pg_class row versions as valid. Again, a non-transactional update
* avoids the risk. It is indeterminate which state of the row the other
* process will see, but it doesn't matter (if he's only taking
* AccessShareLock, then it's not critical that he see relhasindex true).
*
* It is safe to use a non-transactional update even though our
- * transaction could still fail before committing. Setting relhasindex
+ * transaction could still fail before committing. Setting relhasindex
* true is safe even if there are no indexes (VACUUM will eventually fix
* it), likewise for relhaspkey. And of course the new relpages and
* reltuples counts are correct regardless. However, we don't want to
@@ -1804,7 +1804,7 @@ index_update_stats(Relation rel,
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
/*
- * Make a copy of the tuple to update. Normally we use the syscache, but
+ * Make a copy of the tuple to update. Normally we use the syscache, but
* we can't rely on that during bootstrap or while reindexing pg_class
* itself.
*/
@@ -1903,7 +1903,7 @@ index_update_stats(Relation rel,
* index_build - invoke access-method-specific index build procedure
*
* On entry, the index's catalog entries are valid, and its physical disk
- * file has been created but is empty. We call the AM-specific build
+ * file has been created but is empty. We call the AM-specific build
* procedure to fill in the index contents. We then update the pg_class
* entries of the index and heap relation as needed, using statistics
* returned by ambuild as well as data passed by the caller.
@@ -2001,7 +2001,7 @@ index_build(Relation heapRelation,
* Therefore, this code path can only be taken during non-concurrent
* CREATE INDEX. Thus the fact that heap_update will set the pg_index
* tuple's xmin doesn't matter, because that tuple was created in the
- * current transaction anyway. That also means we don't need to worry
+ * current transaction anyway. That also means we don't need to worry
* about any concurrent readers of the tuple; no other transaction can see
* it yet.
*/
@@ -2050,7 +2050,7 @@ index_build(Relation heapRelation,
/*
* If it's for an exclusion constraint, make a second pass over the heap
- * to verify that the constraint is satisfied. We must not do this until
+ * to verify that the constraint is satisfied. We must not do this until
* the index is fully valid. (Broken HOT chains shouldn't matter, though;
* see comments for IndexCheckExclusion.)
*/
@@ -2075,8 +2075,8 @@ index_build(Relation heapRelation,
* things to add it to the new index. After we return, the AM's index
* build procedure does whatever cleanup it needs.
*
- * The total count of heap tuples is returned. This is for updating pg_class
- * statistics. (It's annoying not to be able to do that here, but we want
+ * The total count of heap tuples is returned. This is for updating pg_class
+ * statistics. (It's annoying not to be able to do that here, but we want
* to merge that update with others; see index_update_stats.) Note that the
* index AM itself must keep track of the number of index tuples; we don't do
* so here because the AM might reject some of the tuples for its own reasons,
@@ -2126,7 +2126,7 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* Need an EState for evaluation of index expressions and partial-index
- * predicates. Also a slot to hold the current tuple.
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -2251,7 +2251,7 @@ IndexBuildHeapScan(Relation heapRelation,
* building it, and may need to see such tuples.)
*
* However, if it was HOT-updated then we must only index
- * the live tuple at the end of the HOT-chain. Since this
+ * the live tuple at the end of the HOT-chain. Since this
* breaks semantics for pre-existing snapshots, mark the
* index as unusable for them.
*/
@@ -2271,7 +2271,7 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* Since caller should hold ShareLock or better, normally
* the only way to see this is if it was inserted earlier
- * in our own transaction. However, it can happen in
+ * in our own transaction. However, it can happen in
* system catalogs, since we tend to release write lock
* before commit there. Give a warning if neither case
* applies.
@@ -2426,7 +2426,7 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* You'd think we should go ahead and build the index tuple here, but
- * some index AMs want to do further processing on the data first. So
+ * some index AMs want to do further processing on the data first. So
* pass the values[] and isnull[] arrays, instead.
*/
@@ -2517,7 +2517,7 @@ IndexCheckExclusion(Relation heapRelation,
/*
* Need an EState for evaluation of index expressions and partial-index
- * predicates. Also a slot to hold the current tuple.
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -2597,11 +2597,11 @@ IndexCheckExclusion(Relation heapRelation,
* We do a concurrent index build by first inserting the catalog entry for the
* index via index_create(), marking it not indisready and not indisvalid.
* Then we commit our transaction and start a new one, then we wait for all
- * transactions that could have been modifying the table to terminate. Now
+ * transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* honor its constraints on HOT updates; so while existing HOT-chains might
* be broken with respect to the index, no currently live tuple will have an
- * incompatible HOT update done to it. We now build the index normally via
+ * incompatible HOT update done to it. We now build the index normally via
* index_build(), while holding a weak lock that allows concurrent
* insert/update/delete. Also, we index only tuples that are valid
* as of the start of the scan (see IndexBuildHeapScan), whereas a normal
@@ -2615,13 +2615,13 @@ IndexCheckExclusion(Relation heapRelation,
*
* Next, we mark the index "indisready" (but still not "indisvalid") and
* commit the second transaction and start a third. Again we wait for all
- * transactions that could have been modifying the table to terminate. Now
+ * transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* insert their new tuples into it. We then take a new reference snapshot
* which is passed to validate_index(). Any tuples that are valid according
* to this snap, but are not in the index, must be added to the index.
* (Any tuples committed live after the snap will be inserted into the
- * index by their originating transaction. Any tuples committed dead before
+ * index by their originating transaction. Any tuples committed dead before
* the snap need not be indexed, because we will wait out all transactions
* that might care about them before we mark the index valid.)
*
@@ -2630,7 +2630,7 @@ IndexCheckExclusion(Relation heapRelation,
* ever say "delete it". (This should be faster than a plain indexscan;
* also, not all index AMs support full-index indexscan.) Then we sort the
* TIDs, and finally scan the table doing a "merge join" against the TID list
- * to see which tuples are missing from the index. Thus we will ensure that
+ * to see which tuples are missing from the index. Thus we will ensure that
* all tuples valid according to the reference snapshot are in the index.
*
* Building a unique index this way is tricky: we might try to insert a
@@ -2646,7 +2646,7 @@ IndexCheckExclusion(Relation heapRelation,
* were alive at the time of the reference snapshot are gone; this is
* necessary to be sure there are none left with a transaction snapshot
* older than the reference (and hence possibly able to see tuples we did
- * not index). Then we mark the index "indisvalid" and commit. Subsequent
+ * not index). Then we mark the index "indisvalid" and commit. Subsequent
* transactions will be able to use it for queries.
*
* Doing two full table scans is a brute-force strategy. We could try to be
@@ -2672,7 +2672,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
indexRelation = index_open(indexId, RowExclusiveLock);
/*
- * Fetch info needed for index_insert. (You might think this should be
+ * Fetch info needed for index_insert. (You might think this should be
* passed in from DefineIndex, but its copy is long gone due to having
* been built in a previous transaction.)
*/
@@ -2789,7 +2789,7 @@ validate_index_heapscan(Relation heapRelation,
/*
* Need an EState for evaluation of index expressions and partial-index
- * predicates. Also a slot to hold the current tuple.
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -2838,7 +2838,7 @@ validate_index_heapscan(Relation heapRelation,
* visit the live tuples in order by their offsets, but the root
* offsets that we need to compare against the index contents might be
* ordered differently. So we might have to "look back" within the
- * tuplesort output, but only within the current page. We handle that
+ * tuplesort output, but only within the current page. We handle that
* by keeping a bool array in_index[] showing all the
* already-passed-over tuplesort output TIDs of the current page. We
* clear that array here, when advancing onto a new heap page.
@@ -2923,7 +2923,7 @@ validate_index_heapscan(Relation heapRelation,
/*
* For the current heap tuple, extract all the attributes we use
- * in this index, and note which are null. This also performs
+ * in this index, and note which are null. This also performs
* evaluation of any expressions needed.
*/
FormIndexDatum(indexInfo,
@@ -2945,7 +2945,7 @@ validate_index_heapscan(Relation heapRelation,
* for a uniqueness check on the whole HOT-chain. That is, the
* tuple we have here could be dead because it was already
* HOT-updated, and if so the updating transaction will not have
- * thought it should insert index entries. The index AM will
+ * thought it should insert index entries. The index AM will
* check the whole HOT-chain and correctly detect a conflict if
* there is one.
*/
@@ -3068,7 +3068,7 @@ index_set_state_flags(Oid indexId, IndexStateFlagsAction action)
/*
* IndexGetRelation: given an index's relation OID, get the OID of the
- * relation it is an index on. Uses the system cache.
+ * relation it is an index on. Uses the system cache.
*/
Oid
IndexGetRelation(Oid indexId, bool missing_ok)
@@ -3105,7 +3105,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
volatile bool skipped_constraint = false;
/*
- * Open and lock the parent heap relation. ShareLock is sufficient since
+ * Open and lock the parent heap relation. ShareLock is sufficient since
* we only need to be sure no schema or data changes are going on.
*/
heapId = IndexGetRelation(indexId, false);
@@ -3193,7 +3193,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
* chains, we had better force indcheckxmin true, because the normal
* argument that the HOT chains couldn't conflict with the index is
* suspect for an invalid index. (A conflict is definitely possible if
- * the index was dead. It probably shouldn't happen otherwise, but let's
+ * the index was dead. It probably shouldn't happen otherwise, but let's
* be conservative.) In this case advancing the usability horizon is
* appropriate.
*
@@ -3277,7 +3277,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
* the data in a manner that risks a change in constraint validity.
*
* Returns true if any indexes were rebuilt (including toast table's index
- * when relevant). Note that a CommandCounterIncrement will occur after each
+ * when relevant). Note that a CommandCounterIncrement will occur after each
* index rebuild.
*/
bool
@@ -3290,7 +3290,7 @@ reindex_relation(Oid relid, int flags)
bool result;
/*
- * Open and lock the relation. ShareLock is sufficient since we only need
+ * Open and lock the relation. ShareLock is sufficient since we only need
* to prevent schema and data changes in it. The lock level used here
* should match ReindexTable().
*/
@@ -3309,7 +3309,7 @@ reindex_relation(Oid relid, int flags)
* reindex_index will attempt to update the pg_class rows for the relation
* and index. If we are processing pg_class itself, we want to make sure
* that the updates do not try to insert index entries into indexes we
- * have not processed yet. (When we are trying to recover from corrupted
+ * have not processed yet. (When we are trying to recover from corrupted
* indexes, that could easily cause a crash.) We can accomplish this
* because CatalogUpdateIndexes will use the relcache's index list to know
* which indexes to update. We just force the index list to be only the
@@ -3318,7 +3318,7 @@ reindex_relation(Oid relid, int flags)
* It is okay to not insert entries into the indexes we have not processed
* yet because all of this is transaction-safe. If we fail partway
* through, the updated rows are dead and it doesn't matter whether they
- * have index entries. Also, a new pg_class index will be created with a
+ * have index entries. Also, a new pg_class index will be created with a
* correct entry for its own pg_class row because we do
* RelationSetNewRelfilenode() before we do index_build().
*
diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c
index 4bf412fb0b..05aa56e859 100644
--- a/src/backend/catalog/indexing.c
+++ b/src/backend/catalog/indexing.c
@@ -149,7 +149,7 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple)
* CatalogUpdateIndexes - do all the indexing work for a new catalog tuple
*
* This is a convenience routine for the common case where we only need
- * to insert or update a single tuple in a system catalog. Avoid using it for
+ * to insert or update a single tuple in a system catalog. Avoid using it for
* multiple tuples, since opening the indexes and building the index info
* structures is moderately expensive.
*/
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 5bf6d289d8..89df585b87 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -66,10 +66,10 @@
* when we are obeying an override search path spec that says not to use the
* temp namespace, or the temp namespace is included in the explicit list.)
*
- * 2. The system catalog namespace is always searched. If the system
+ * 2. The system catalog namespace is always searched. If the system
* namespace is present in the explicit path then it will be searched in
* the specified order; otherwise it will be searched after TEMP tables and
- * *before* the explicit list. (It might seem that the system namespace
+ * *before* the explicit list. (It might seem that the system namespace
* should be implicitly last, but this behavior appears to be required by
* SQL99. Also, this provides a way to search the system namespace first
* without thereby making it the default creation target namespace.)
@@ -87,7 +87,7 @@
* to refer to the current backend's temp namespace. This is usually also
* ignorable if the temp namespace hasn't been set up, but there's a special
* case: if "pg_temp" appears first then it should be the default creation
- * target. We kluge this case a little bit so that the temp namespace isn't
+ * target. We kluge this case a little bit so that the temp namespace isn't
* set up until the first attempt to create something in it. (The reason for
* klugery is that we can't create the temp namespace outside a transaction,
* but initial GUC processing of search_path happens outside a transaction.)
@@ -98,7 +98,7 @@
* In bootstrap mode, the search path is set equal to "pg_catalog", so that
* the system namespace is the only one searched or inserted into.
* initdb is also careful to set search_path to "pg_catalog" for its
- * post-bootstrap standalone backend runs. Otherwise the default search
+ * post-bootstrap standalone backend runs. Otherwise the default search
* path is determined by GUC. The factory default path contains the PUBLIC
* namespace (if it exists), preceded by the user's personal namespace
* (if one exists).
@@ -162,13 +162,13 @@ static List *overrideStack = NIL;
/*
* myTempNamespace is InvalidOid until and unless a TEMP namespace is set up
* in a particular backend session (this happens when a CREATE TEMP TABLE
- * command is first executed). Thereafter it's the OID of the temp namespace.
+ * command is first executed). Thereafter it's the OID of the temp namespace.
*
* myTempToastNamespace is the OID of the namespace for my temp tables' toast
- * tables. It is set when myTempNamespace is, and is InvalidOid before that.
+ * tables. It is set when myTempNamespace is, and is InvalidOid before that.
*
* myTempNamespaceSubID shows whether we've created the TEMP namespace in the
- * current subtransaction. The flag propagates up the subtransaction tree,
+ * current subtransaction. The flag propagates up the subtransaction tree,
* so the main transaction will correctly recognize the flag if all
* intermediate subtransactions commit. When it is InvalidSubTransactionId,
* we either haven't made the TEMP namespace yet, or have successfully
@@ -250,7 +250,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
}
/*
- * DDL operations can change the results of a name lookup. Since all such
+ * DDL operations can change the results of a name lookup. Since all such
* operations will generate invalidation messages, we keep track of
* whether any such messages show up while we're performing the operation,
* and retry until either (1) no more invalidation messages show up or (2)
@@ -259,7 +259,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
* But if lockmode = NoLock, then we assume that either the caller is OK
* with the answer changing under them, or that they already hold some
* appropriate lock, and therefore return the first answer we get without
- * checking for invalidation messages. Also, if the requested lock is
+ * checking for invalidation messages. Also, if the requested lock is
* already held, no LockRelationOid will not AcceptInvalidationMessages,
* so we may fail to notice a change. We could protect against that case
* by calling AcceptInvalidationMessages() before beginning this loop, but
@@ -396,7 +396,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
break;
/*
- * Something may have changed. Let's repeat the name lookup, to make
+ * Something may have changed. Let's repeat the name lookup, to make
* sure this name still references the same relation it did
* previously.
*/
@@ -869,7 +869,7 @@ TypeIsVisible(Oid typid)
* and the returned nvargs will always be zero.
*
* If expand_defaults is true, functions that could match after insertion of
- * default argument values will also be retrieved. In this case the returned
+ * default argument values will also be retrieved. In this case the returned
* structs could have nargs > passed-in nargs, and ndargs is set to the number
* of additional args (which can be retrieved from the function's
* proargdefaults entry).
@@ -1032,7 +1032,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames,
* Call uses positional notation
*
* Check if function is variadic, and get variadic element type if
- * so. If expand_variadic is false, we should just ignore
+ * so. If expand_variadic is false, we should just ignore
* variadic-ness.
*/
if (pronargs <= nargs && expand_variadic)
@@ -1162,7 +1162,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames,
if (prevResult)
{
/*
- * We have a match with a previous result. Decide which one
+ * We have a match with a previous result. Decide which one
* to keep, or mark it ambiguous if we can't decide. The
* logic here is preference > 0 means prefer the old result,
* preference < 0 means prefer the new, preference = 0 means
@@ -1553,7 +1553,7 @@ OpernameGetOprid(List *names, Oid oprleft, Oid oprright)
* identical entries in later namespaces.
*
* The returned items always have two args[] entries --- one or the other
- * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too.
+ * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too.
*/
FuncCandidateList
OpernameGetCandidates(List *names, char oprkind, bool missing_schema_ok)
@@ -2536,7 +2536,7 @@ get_ts_config_oid(List *names, bool missing_ok)
/*
* TSConfigIsVisible
* Determine whether a text search configuration (identified by OID)
- * is visible in the current search path. Visible means "would be found
+ * is visible in the current search path. Visible means "would be found
* by searching for the unqualified text search configuration name".
*/
bool
@@ -2855,7 +2855,7 @@ QualifiedNameGetCreationNamespace(List *names, char **objname_p)
/*
* get_namespace_oid - given a namespace name, look up the OID
*
- * If missing_ok is false, throw an error if namespace name not found. If
+ * If missing_ok is false, throw an error if namespace name not found. If
* true, just return InvalidOid.
*/
Oid
@@ -3070,7 +3070,7 @@ GetTempNamespaceBackendId(Oid namespaceId)
/*
* GetTempToastNamespace - get the OID of my temporary-toast-table namespace,
- * which must already be assigned. (This is only used when creating a toast
+ * which must already be assigned. (This is only used when creating a toast
* table for a temp table, so we must have already done InitTempTableNamespace)
*/
Oid
@@ -3168,8 +3168,8 @@ OverrideSearchPathMatchesCurrent(OverrideSearchPath *path)
*
* It's possible that newpath->useTemp is set but there is no longer any
* active temp namespace, if the path was saved during a transaction that
- * created a temp namespace and was later rolled back. In that case we just
- * ignore useTemp. A plausible alternative would be to create a new temp
+ * created a temp namespace and was later rolled back. In that case we just
+ * ignore useTemp. A plausible alternative would be to create a new temp
* namespace, but for existing callers that's not necessary because an empty
* temp namespace wouldn't affect their results anyway.
*
@@ -3202,7 +3202,7 @@ PushOverrideSearchPath(OverrideSearchPath *newpath)
firstNS = linitial_oid(oidlist);
/*
- * Add any implicitly-searched namespaces to the list. Note these go on
+ * Add any implicitly-searched namespaces to the list. Note these go on
* the front, not the back; also notice that we do not check USAGE
* permissions for these.
*/
@@ -3525,7 +3525,7 @@ recomputeNamespacePath(void)
}
/*
- * Remember the first member of the explicit list. (Note: this is
+ * Remember the first member of the explicit list. (Note: this is
* nominally wrong if temp_missing, but we need it anyway to distinguish
* explicit from implicit mention of pg_catalog.)
*/
@@ -3535,7 +3535,7 @@ recomputeNamespacePath(void)
firstNS = linitial_oid(oidlist);
/*
- * Add any implicitly-searched namespaces to the list. Note these go on
+ * Add any implicitly-searched namespaces to the list. Note these go on
* the front, not the back; also notice that we do not check USAGE
* permissions for these.
*/
@@ -3590,7 +3590,7 @@ InitTempTableNamespace(void)
/*
* First, do permission check to see if we are authorized to make temp
- * tables. We use a nonstandard error message here since "databasename:
+ * tables. We use a nonstandard error message here since "databasename:
* permission denied" might be a tad cryptic.
*
* Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask;
@@ -3609,9 +3609,9 @@ InitTempTableNamespace(void)
* Do not allow a Hot Standby slave session to make temp tables. Aside
* from problems with modifying the system catalogs, there is a naming
* conflict: pg_temp_N belongs to the session with BackendId N on the
- * master, not to a slave session with the same BackendId. We should not
+ * master, not to a slave session with the same BackendId. We should not
* be able to get here anyway due to XactReadOnly checks, but let's just
- * make real sure. Note that this also backstops various operations that
+ * make real sure. Note that this also backstops various operations that
* allow XactReadOnly transactions to modify temp tables; they'd need
* RecoveryInProgress checks if not for this.
*/
@@ -3967,7 +3967,7 @@ fetch_search_path(bool includeImplicit)
/*
* If the temp namespace should be first, force it to exist. This is so
* that callers can trust the result to reflect the actual default
- * creation namespace. It's a bit bogus to do this here, since
+ * creation namespace. It's a bit bogus to do this here, since
* current_schema() is supposedly a stable function without side-effects,
* but the alternatives seem worse.
*/
@@ -3989,7 +3989,7 @@ fetch_search_path(bool includeImplicit)
/*
* Fetch the active search path into a caller-allocated array of OIDs.
- * Returns the number of path entries. (If this is more than sarray_len,
+ * Returns the number of path entries. (If this is more than sarray_len,
* then the data didn't fit and is not all stored.)
*
* The returned list always includes the implicitly-prepended namespaces,
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index 2b837a99c1..c7c8f4b1a3 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -467,7 +467,7 @@ static void getRelationIdentity(StringInfo buffer, Oid relid);
* drop operation.
*
* Note: If the object is not found, we don't give any indication of the
- * reason. (It might have been a missing schema if the name was qualified, or
+ * reason. (It might have been a missing schema if the name was qualified, or
* an inexistant type name in case of a cast, function or operator; etc).
* Currently there is only one caller that might be interested in such info, so
* we don't spend much effort here. If more callers start to care, it might be
@@ -665,7 +665,7 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
/*
* If we're dealing with a relation or attribute, then the relation is
- * already locked. Otherwise, we lock it now.
+ * already locked. Otherwise, we lock it now.
*/
if (address.classId != RelationRelationId)
{
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index d99c2e5eda..1ad923ca6c 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -152,10 +152,10 @@ AggregateCreate(const char *aggName,
errdetail("An aggregate using a polymorphic transition type must have at least one polymorphic argument.")));
/*
- * An ordered-set aggregate that is VARIADIC must be VARIADIC ANY. In
+ * An ordered-set aggregate that is VARIADIC must be VARIADIC ANY. In
* principle we could support regular variadic types, but it would make
* things much more complicated because we'd have to assemble the correct
- * subsets of arguments into array values. Since no standard aggregates
+ * subsets of arguments into array values. Since no standard aggregates
* have use for such a case, we aren't bothering for now.
*/
if (AGGKIND_IS_ORDERED_SET(aggKind) && OidIsValid(variadicArgType) &&
@@ -167,7 +167,7 @@ AggregateCreate(const char *aggName,
/*
* If it's a hypothetical-set aggregate, there must be at least as many
* direct arguments as aggregated ones, and the last N direct arguments
- * must match the aggregated ones in type. (We have to check this again
+ * must match the aggregated ones in type. (We have to check this again
* when the aggregate is called, in case ANY is involved, but it makes
* sense to reject the aggregate definition now if the declared arg types
* don't match up.) It's unconditionally OK if numDirectArgs == numArgs,
diff --git a/src/backend/catalog/pg_collation.c b/src/backend/catalog/pg_collation.c
index fb94705121..434dbce97f 100644
--- a/src/backend/catalog/pg_collation.c
+++ b/src/backend/catalog/pg_collation.c
@@ -78,7 +78,7 @@ CollationCreate(const char *collname, Oid collnamespace,
collname, pg_encoding_to_char(collencoding))));
/*
- * Also forbid matching an any-encoding entry. This test of course is not
+ * Also forbid matching an any-encoding entry. This test of course is not
* backed up by the unique index, but it's not a problem since we don't
* support adding any-encoding entries after initdb.
*/
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 5fd9822c6e..041f5ad686 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -38,7 +38,7 @@
* Create a constraint table entry.
*
* Subsidiary records (such as triggers or indexes to implement the
- * constraint) are *not* created here. But we do make dependency links
+ * constraint) are *not* created here. But we do make dependency links
* from the constraint to the things it depends on.
*/
Oid
@@ -305,7 +305,7 @@ CreateConstraintEntry(const char *constraintName,
{
/*
* Register normal dependency on the unique index that supports a
- * foreign-key constraint. (Note: for indexes associated with unique
+ * foreign-key constraint. (Note: for indexes associated with unique
* or primary-key constraints, the dependency runs the other way, and
* is not made here.)
*/
@@ -759,7 +759,7 @@ void
get_constraint_relation_oids(Oid constraint_oid, Oid *conrelid, Oid *confrelid)
{
HeapTuple tup;
- Form_pg_constraint con;
+ Form_pg_constraint con;
tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(constraint_oid));
if (!HeapTupleIsValid(tup)) /* should not happen */
@@ -895,10 +895,10 @@ get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok)
* the rel of interest are Vars with the indicated varno/varlevelsup.
*
* Currently we only check to see if the rel has a primary key that is a
- * subset of the grouping_columns. We could also use plain unique constraints
+ * subset of the grouping_columns. We could also use plain unique constraints
* if all their columns are known not null, but there's a problem: we need
* to be able to represent the not-null-ness as part of the constraints added
- * to *constraintDeps. FIXME whenever not-null constraints get represented
+ * to *constraintDeps. FIXME whenever not-null constraints get represented
* in pg_constraint.
*/
bool
diff --git a/src/backend/catalog/pg_db_role_setting.c b/src/backend/catalog/pg_db_role_setting.c
index 9f9bbe2074..3e73e0f45b 100644
--- a/src/backend/catalog/pg_db_role_setting.c
+++ b/src/backend/catalog/pg_db_role_setting.c
@@ -172,7 +172,7 @@ AlterSetting(Oid databaseid, Oid roleid, VariableSetStmt *setstmt)
/*
* Drop some settings from the catalog. These can be for a particular
- * database, or for a particular role. (It is of course possible to do both
+ * database, or for a particular role. (It is of course possible to do both
* too, but it doesn't make sense for current uses.)
*/
void
diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c
index fabc51c35c..7b2d0a7649 100644
--- a/src/backend/catalog/pg_depend.c
+++ b/src/backend/catalog/pg_depend.c
@@ -50,7 +50,7 @@ recordDependencyOn(const ObjectAddress *depender,
/*
* Record multiple dependencies (of the same kind) for a single dependent
- * object. This has a little less overhead than recording each separately.
+ * object. This has a little less overhead than recording each separately.
*/
void
recordMultipleDependencies(const ObjectAddress *depender,
@@ -127,7 +127,7 @@ recordMultipleDependencies(const ObjectAddress *depender,
/*
* If we are executing a CREATE EXTENSION operation, mark the given object
- * as being a member of the extension. Otherwise, do nothing.
+ * as being a member of the extension. Otherwise, do nothing.
*
* This must be called during creation of any user-definable object type
* that could be a member of an extension.
@@ -186,7 +186,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object,
* (possibly with some differences from before).
*
* If skipExtensionDeps is true, we do not delete any dependencies that
- * show that the given object is a member of an extension. This avoids
+ * show that the given object is a member of an extension. This avoids
* needing a lot of extra logic to fetch and recreate that dependency.
*/
long
@@ -492,7 +492,7 @@ getExtensionOfObject(Oid classId, Oid objectId)
* Detect whether a sequence is marked as "owned" by a column
*
* An ownership marker is an AUTO dependency from the sequence to the
- * column. If we find one, store the identity of the owning column
+ * column. If we find one, store the identity of the owning column
* into *tableId and *colId and return TRUE; else return FALSE.
*
* Note: if there's more than one such pg_depend entry then you get
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index 4168c0e84a..b4f2051749 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -465,7 +465,7 @@ restart:
* We avoid doing this unless absolutely necessary; in most installations
* it will never happen. The reason is that updating existing pg_enum
* entries creates hazards for other backends that are concurrently reading
- * pg_enum. Although system catalog scans now use MVCC semantics, the
+ * pg_enum. Although system catalog scans now use MVCC semantics, the
* syscache machinery might read different pg_enum entries under different
* snapshots, so some other backend might get confused about the proper
* ordering if a concurrent renumbering occurs.
diff --git a/src/backend/catalog/pg_largeobject.c b/src/backend/catalog/pg_largeobject.c
index ed2a41bfd8..a54bc1b1fa 100644
--- a/src/backend/catalog/pg_largeobject.c
+++ b/src/backend/catalog/pg_largeobject.c
@@ -76,7 +76,7 @@ LargeObjectCreate(Oid loid)
}
/*
- * Drop a large object having the given LO identifier. Both the data pages
+ * Drop a large object having the given LO identifier. Both the data pages
* and metadata must be dropped.
*/
void
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index 8faa015276..9a3e20a7ae 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -315,7 +315,7 @@ OperatorShellMake(const char *operatorName,
* specify operators that do not exist. For example, if operator
* "op" is being defined, the negator operator "negop" and the
* commutator "commop" can also be defined without specifying
- * any information other than their names. Since in order to
+ * any information other than their names. Since in order to
* add "op" to the PG_OPERATOR catalog, all the Oid's for these
* operators must be placed in the fields of "op", a forward
* declaration is done on the commutator and negator operators.
@@ -433,7 +433,7 @@ OperatorCreate(const char *operatorName,
operatorName);
/*
- * Set up the other operators. If they do not currently exist, create
+ * Set up the other operators. If they do not currently exist, create
* shells in order to get ObjectId's.
*/
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index abf2f497e4..0fa331ad18 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -229,7 +229,7 @@ ProcedureCreate(const char *procedureName,
/*
* Do not allow polymorphic return type unless at least one input argument
- * is polymorphic. ANYRANGE return type is even stricter: must have an
+ * is polymorphic. ANYRANGE return type is even stricter: must have an
* ANYRANGE input (since we can't deduce the specific range type from
* ANYELEMENT). Also, do not allow return type INTERNAL unless at least
* one input argument is INTERNAL.
@@ -676,7 +676,7 @@ ProcedureCreate(const char *procedureName,
/*
* Set per-function configuration parameters so that the validation is
- * done with the environment the function expects. However, if
+ * done with the environment the function expects. However, if
* check_function_bodies is off, we don't do this, because that would
* create dump ordering hazards that pg_dump doesn't know how to deal
* with. (For example, a SET clause might refer to a not-yet-created
@@ -948,7 +948,7 @@ sql_function_parse_error_callback(void *arg)
/*
* Adjust a syntax error occurring inside the function body of a CREATE
- * FUNCTION or DO command. This can be used by any function validator or
+ * FUNCTION or DO command. This can be used by any function validator or
* anonymous-block handler, not only for SQL-language functions.
* It is assumed that the syntax error position is initially relative to the
* function body string (as passed in). If possible, we adjust the position
@@ -1081,7 +1081,7 @@ match_prosrc_to_literal(const char *prosrc, const char *literal,
/*
* This implementation handles backslashes and doubled quotes in the
- * string literal. It does not handle the SQL syntax for literals
+ * string literal. It does not handle the SQL syntax for literals
* continued across line boundaries.
*
* We do the comparison a character at a time, not a byte at a time, so
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index 8942441dc5..7aa70fa3b2 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -167,7 +167,7 @@ recordDependencyOnOwner(Oid classId, Oid objectId, Oid owner)
* shdepChangeDep
*
* Update shared dependency records to account for an updated referenced
- * object. This is an internal workhorse for operations such as changing
+ * object. This is an internal workhorse for operations such as changing
* an object's owner.
*
* There must be no more than one existing entry for the given dependent
@@ -316,7 +316,7 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId)
* was previously granted some rights to the object.
*
* This step is analogous to aclnewowner's removal of duplicate entries
- * in the ACL. We have to do it to handle this scenario:
+ * in the ACL. We have to do it to handle this scenario:
* A grants some rights on an object to B
* ALTER OWNER changes the object's owner to B
* ALTER OWNER changes the object's owner to C
@@ -402,9 +402,9 @@ getOidListDiff(Oid *list1, int *nlist1, Oid *list2, int *nlist2)
* and then insert or delete from pg_shdepend as appropriate.
*
* Note that we can't just insert all referenced roles blindly during GRANT,
- * because we would end up with duplicate registered dependencies. We could
+ * because we would end up with duplicate registered dependencies. We could
* check for existence of the tuples before inserting, but that seems to be
- * more expensive than what we are doing here. Likewise we can't just delete
+ * more expensive than what we are doing here. Likewise we can't just delete
* blindly during REVOKE, because the user may still have other privileges.
* It is also possible that REVOKE actually adds dependencies, due to
* instantiation of a formerly implicit default ACL (although at present,
@@ -535,7 +535,7 @@ checkSharedDependencies(Oid classId, Oid objectId,
/*
* We limit the number of dependencies reported to the client to
* MAX_REPORTED_DEPS, since client software may not deal well with
- * enormous error strings. The server log always gets a full report.
+ * enormous error strings. The server log always gets a full report.
*/
#define MAX_REPORTED_DEPS 100
@@ -616,7 +616,7 @@ checkSharedDependencies(Oid classId, Oid objectId,
bool stored = false;
/*
- * XXX this info is kept on a simple List. Maybe it's not good
+ * XXX this info is kept on a simple List. Maybe it's not good
* for performance, but using a hash table seems needlessly
* complex. The expected number of databases is not high anyway,
* I suppose.
@@ -853,7 +853,7 @@ shdepAddDependency(Relation sdepRel,
/*
* Make sure the object doesn't go away while we record the dependency on
- * it. DROP routines should lock the object exclusively before they check
+ * it. DROP routines should lock the object exclusively before they check
* shared dependencies.
*/
shdepLockAndCheckObject(refclassId, refobjId);
@@ -1004,7 +1004,7 @@ shdepLockAndCheckObject(Oid classId, Oid objectId)
/*
* Currently, this routine need not support any other shared
- * object types besides roles. If we wanted to record explicit
+ * object types besides roles. If we wanted to record explicit
* dependencies on databases or tablespaces, we'd need code along
* these lines:
*/
@@ -1150,7 +1150,7 @@ isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel)
/*
* shdepDropOwned
*
- * Drop the objects owned by any one of the given RoleIds. If a role has
+ * Drop the objects owned by any one of the given RoleIds. If a role has
* access to an object, the grant will be removed as well (but the object
* will not, of course).
*
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 8e0e65b721..f614915abf 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -394,7 +394,7 @@ TypeCreate(Oid newTypeOid,
if (HeapTupleIsValid(tup))
{
/*
- * check that the type is not already defined. It may exist as a
+ * check that the type is not already defined. It may exist as a
* shell type, however.
*/
if (((Form_pg_type) GETSTRUCT(tup))->typisdefined)
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index 85df9a1092..c3b2f072e4 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -35,7 +35,7 @@
* that have been created or deleted in the current transaction. When
* a relation is created, we create the physical file immediately, but
* remember it so that we can delete the file again if the current
- * transaction is aborted. Conversely, a deletion request is NOT
+ * transaction is aborted. Conversely, a deletion request is NOT
* executed immediately, but is just entered in the list. When and if
* the transaction commits, we can delete the physical file.
*
@@ -344,7 +344,7 @@ smgrDoPendingDeletes(bool isCommit)
if (maxrels == 0)
{
maxrels = 8;
- srels = palloc(sizeof(SMgrRelation) * maxrels );
+ srels = palloc(sizeof(SMgrRelation) * maxrels);
}
else if (maxrels <= nrels)
{
@@ -378,7 +378,7 @@ smgrDoPendingDeletes(bool isCommit)
* *ptr is set to point to a freshly-palloc'd array of RelFileNodes.
* If there are no relations to be deleted, *ptr is set to NULL.
*
- * Only non-temporary relations are included in the returned list. This is OK
+ * Only non-temporary relations are included in the returned list. This is OK
* because the list is used only in contexts where temporary relations don't
* matter: we're either writing to the two-phase state file (and transactions
* that have touched temp tables can't be prepared) or we're writing to xlog
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index 5275e4bfdb..bdfeb90dd1 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -36,9 +36,9 @@
Oid binary_upgrade_next_toast_pg_type_oid = InvalidOid;
static void CheckAndCreateToastTable(Oid relOid, Datum reloptions,
- LOCKMODE lockmode, bool check);
+ LOCKMODE lockmode, bool check);
static bool create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
- Datum reloptions, LOCKMODE lockmode, bool check);
+ Datum reloptions, LOCKMODE lockmode, bool check);
static bool needs_toast_table(Relation rel);
@@ -106,7 +106,7 @@ BootstrapToastTable(char *relName, Oid toastOid, Oid toastIndexOid)
/* create_toast_table does all the work */
if (!create_toast_table(rel, toastOid, toastIndexOid, (Datum) 0,
- AccessExclusiveLock, false))
+ AccessExclusiveLock, false))
elog(ERROR, "\"%s\" does not require a toast table",
relName);
@@ -177,8 +177,8 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
return false;
/*
- * If requested check lockmode is sufficient. This is a cross check
- * in case of errors or conflicting decisions in earlier code.
+ * If requested check lockmode is sufficient. This is a cross check in
+ * case of errors or conflicting decisions in earlier code.
*/
if (check && lockmode != AccessExclusiveLock)
elog(ERROR, "AccessExclusiveLock required to add toast table.");
@@ -362,7 +362,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
}
/*
- * Check to see whether the table needs a TOAST table. It does only if
+ * Check to see whether the table needs a TOAST table. It does only if
* (1) there are any toastable attributes, and (2) the maximum length
* of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to
* create a toast table for something like "f1 varchar(20)".)
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index a73d709437..fcf86dd0d9 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -296,7 +296,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
*
* transtype can't be a pseudo-type, since we need to be able to store
* values of the transtype. However, we can allow polymorphic transtype
- * in some cases (AggregateCreate will check). Also, we allow "internal"
+ * in some cases (AggregateCreate will check). Also, we allow "internal"
* for functions that want to pass pointers to private data structures;
* but allow that only to superusers, since you could crash the system (or
* worse) by connecting up incompatible internal-using functions in an
@@ -317,7 +317,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
}
/*
- * If a moving-aggregate transtype is specified, look that up. Same
+ * If a moving-aggregate transtype is specified, look that up. Same
* restrictions as for transtype.
*/
if (mtransType)
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index a43457bb57..80c9743a0d 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -296,7 +296,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name)
}
/*
- * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
+ * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
* type, the function appropriate to that type is executed.
*/
Oid
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index a04adeaac7..c09ca7e6db 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -409,7 +409,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
/*
* Open all indexes of the relation, and see if there are any analyzable
- * columns in the indexes. We do not analyze index columns if there was
+ * columns in the indexes. We do not analyze index columns if there was
* an explicit column list in the ANALYZE command, however. If we are
* doing a recursive scan, we don't want to touch the parent's indexes at
* all.
@@ -466,7 +466,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
/*
* Determine how many rows we need to sample, using the worst case from
- * all analyzable columns. We use a lower bound of 100 rows to avoid
+ * all analyzable columns. We use a lower bound of 100 rows to avoid
* possible overflow in Vitter's algorithm. (Note: that will also be the
* target in the corner case where there are no analyzable columns.)
*/
@@ -501,7 +501,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
&totalrows, &totaldeadrows);
/*
- * Compute the statistics. Temporary results during the calculations for
+ * Compute the statistics. Temporary results during the calculations for
* each column are stored in a child context. The calc routines are
* responsible to make sure that whatever they store into the VacAttrStats
* structure is allocated in anl_context.
@@ -558,7 +558,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
/*
* Emit the completed stats rows into pg_statistic, replacing any
- * previous statistics for the target columns. (If there are stats in
+ * previous statistics for the target columns. (If there are stats in
* pg_statistic for columns we didn't process, we leave them alone.)
*/
update_attstats(RelationGetRelid(onerel), inh,
@@ -610,7 +610,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
}
/*
- * Report ANALYZE to the stats collector, too. However, if doing
+ * Report ANALYZE to the stats collector, too. However, if doing
* inherited stats we shouldn't report, because the stats collector only
* tracks per-table stats.
*/
@@ -872,7 +872,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
return NULL;
/*
- * Create the VacAttrStats struct. Note that we only have a copy of the
+ * Create the VacAttrStats struct. Note that we only have a copy of the
* fixed fields of the pg_attribute tuple.
*/
stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats));
@@ -882,7 +882,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
/*
* When analyzing an expression index, believe the expression tree's type
* not the column datatype --- the latter might be the opckeytype storage
- * type of the opclass, which is not interesting for our purposes. (Note:
+ * type of the opclass, which is not interesting for our purposes. (Note:
* if we did anything with non-expression index columns, we'd need to
* figure out where to get the correct type info from, but for now that's
* not a problem.) It's not clear whether anyone will care about the
@@ -921,7 +921,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
}
/*
- * Call the type-specific typanalyze function. If none is specified, use
+ * Call the type-specific typanalyze function. If none is specified, use
* std_typanalyze().
*/
if (OidIsValid(stats->attrtype->typanalyze))
@@ -997,7 +997,7 @@ BlockSampler_Next(BlockSampler bs)
* If we are to skip, we should advance t (hence decrease K), and
* repeat the same probabilistic test for the next block. The naive
* implementation thus requires an anl_random_fract() call for each block
- * number. But we can reduce this to one anl_random_fract() call per
+ * number. But we can reduce this to one anl_random_fract() call per
* selected block, by noting that each time the while-test succeeds,
* we can reinterpret V as a uniform random number in the range 0 to p.
* Therefore, instead of choosing a new V, we just adjust p to be
@@ -1127,7 +1127,7 @@ acquire_sample_rows(Relation onerel, int elevel,
/*
* We ignore unused and redirect line pointers. DEAD line
* pointers should be counted as dead, because we need vacuum to
- * run to get rid of them. Note that this rule agrees with the
+ * run to get rid of them. Note that this rule agrees with the
* way that heap_page_prune() counts things.
*/
if (!ItemIdIsNormal(itemid))
@@ -1173,7 +1173,7 @@ acquire_sample_rows(Relation onerel, int elevel,
* is the safer option.
*
* A special case is that the inserting transaction might
- * be our own. In this case we should count and sample
+ * be our own. In this case we should count and sample
* the row, to accommodate users who load a table and
* analyze it in one transaction. (pgstat_report_analyze
* has to adjust the numbers we send to the stats
@@ -1215,7 +1215,7 @@ acquire_sample_rows(Relation onerel, int elevel,
/*
* The first targrows sample rows are simply copied into the
* reservoir. Then we start replacing tuples in the sample
- * until we reach the end of the relation. This algorithm is
+ * until we reach the end of the relation. This algorithm is
* from Jeff Vitter's paper (see full citation below). It
* works by repeatedly computing the number of tuples to skip
* before selecting a tuple, which replaces a randomly chosen
@@ -1274,7 +1274,7 @@ acquire_sample_rows(Relation onerel, int elevel,
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
/*
- * Estimate total numbers of rows in relation. For live rows, use
+ * Estimate total numbers of rows in relation. For live rows, use
* vac_estimate_reltuples; for dead rows, we have no source of old
* information, so we have to assume the density is the same in unseen
* pages as in the pages we scanned.
@@ -1597,7 +1597,7 @@ acquire_inherited_sample_rows(Relation onerel, int elevel,
* Statistics are stored in several places: the pg_class row for the
* relation has stats about the whole relation, and there is a
* pg_statistic row for each (non-system) attribute that has ever
- * been analyzed. The pg_class values are updated by VACUUM, not here.
+ * been analyzed. The pg_class values are updated by VACUUM, not here.
*
* pg_statistic rows are just added or updated normally. This means
* that pg_statistic will probably contain some deleted rows at the
@@ -2001,7 +2001,7 @@ compute_minimal_stats(VacAttrStatsP stats,
/*
* If the value is toasted, we want to detoast it just once to
* avoid repeated detoastings and resultant excess memory usage
- * during the comparisons. Also, check to see if the value is
+ * during the comparisons. Also, check to see if the value is
* excessively wide, and if so don't detoast at all --- just
* ignore the value.
*/
@@ -2121,7 +2121,7 @@ compute_minimal_stats(VacAttrStatsP stats,
* We assume (not very reliably!) that all the multiply-occurring
* values are reflected in the final track[] list, and the other
* nonnull values all appeared but once. (XXX this usually
- * results in a drastic overestimate of ndistinct. Can we do
+ * results in a drastic overestimate of ndistinct. Can we do
* any better?)
*----------
*/
@@ -2158,7 +2158,7 @@ compute_minimal_stats(VacAttrStatsP stats,
* Decide how many values are worth storing as most-common values. If
* we are able to generate a complete MCV list (all the values in the
* sample will fit, and we think these are all the ones in the table),
- * then do so. Otherwise, store only those values that are
+ * then do so. Otherwise, store only those values that are
* significantly more common than the (estimated) average. We set the
* threshold rather arbitrarily at 25% more than average, with at
* least 2 instances in the sample.
@@ -2326,7 +2326,7 @@ compute_scalar_stats(VacAttrStatsP stats,
/*
* If the value is toasted, we want to detoast it just once to
* avoid repeated detoastings and resultant excess memory usage
- * during the comparisons. Also, check to see if the value is
+ * during the comparisons. Also, check to see if the value is
* excessively wide, and if so don't detoast at all --- just
* ignore the value.
*/
@@ -2371,7 +2371,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* accumulate ordering-correlation statistics.
*
* To determine which are most common, we first have to count the
- * number of duplicates of each value. The duplicates are adjacent in
+ * number of duplicates of each value. The duplicates are adjacent in
* the sorted list, so a brute-force approach is to compare successive
* datum values until we find two that are not equal. However, that
* requires N-1 invocations of the datum comparison routine, which are
@@ -2380,7 +2380,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* that are adjacent in the sorted order; otherwise it could not know
* that it's ordered the pair correctly.) We exploit this by having
* compare_scalars remember the highest tupno index that each
- * ScalarItem has been found equal to. At the end of the sort, a
+ * ScalarItem has been found equal to. At the end of the sort, a
* ScalarItem's tupnoLink will still point to itself if and only if it
* is the last item of its group of duplicates (since the group will
* be ordered by tupno).
@@ -2500,7 +2500,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* Decide how many values are worth storing as most-common values. If
* we are able to generate a complete MCV list (all the values in the
* sample will fit, and we think these are all the ones in the table),
- * then do so. Otherwise, store only those values that are
+ * then do so. Otherwise, store only those values that are
* significantly more common than the (estimated) average. We set the
* threshold rather arbitrarily at 25% more than average, with at
* least 2 instances in the sample. Also, we won't suppress values
@@ -2655,7 +2655,7 @@ compute_scalar_stats(VacAttrStatsP stats,
/*
* The object of this loop is to copy the first and last values[]
- * entries along with evenly-spaced values in between. So the
+ * entries along with evenly-spaced values in between. So the
* i'th value is values[(i * (nvals - 1)) / (num_hist - 1)]. But
* computing that subscript directly risks integer overflow when
* the stats target is more than a couple thousand. Instead we
@@ -2766,7 +2766,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* qsort_arg comparator for sorting ScalarItems
*
* Aside from sorting the items, we update the tupnoLink[] array
- * whenever two ScalarItems are found to contain equal datums. The array
+ * whenever two ScalarItems are found to contain equal datums. The array
* is indexed by tupno; for each ScalarItem, it contains the highest
* tupno that that item's datum has been found to be equal to. This allows
* us to avoid additional comparisons in compute_scalar_stats().
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 09fb99bb73..92f2077d48 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -151,7 +151,7 @@
*
* This struct declaration has the maximal length, but in a real queue entry
* the data area is only big enough for the actual channel and payload strings
- * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible
+ * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible
* entry size, if both channel and payload strings are empty (but note it
* doesn't include alignment padding).
*
@@ -265,7 +265,7 @@ static SlruCtlData AsyncCtlData;
*
* The most data we can have in the queue at a time is QUEUE_MAX_PAGE/2
* pages, because more than that would confuse slru.c into thinking there
- * was a wraparound condition. With the default BLCKSZ this means there
+ * was a wraparound condition. With the default BLCKSZ this means there
* can be up to 8GB of queued-and-not-read data.
*
* Note: it's possible to redefine QUEUE_MAX_PAGE with a smaller multiple of
@@ -395,7 +395,7 @@ asyncQueuePagePrecedes(int p, int q)
int diff;
/*
- * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be
+ * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be
* in the range 0..QUEUE_MAX_PAGE.
*/
Assert(p >= 0 && p <= QUEUE_MAX_PAGE);
@@ -826,7 +826,7 @@ PreCommit_Notify(void)
while (nextNotify != NULL)
{
/*
- * Add the pending notifications to the queue. We acquire and
+ * Add the pending notifications to the queue. We acquire and
* release AsyncQueueLock once per page, which might be overkill
* but it does allow readers to get in while we're doing this.
*
@@ -1042,12 +1042,12 @@ Exec_UnlistenAllCommit(void)
* The reason that this is not done in AtCommit_Notify is that there is
* a nonzero chance of errors here (for example, encoding conversion errors
* while trying to format messages to our frontend). An error during
- * AtCommit_Notify would be a PANIC condition. The timing is also arranged
+ * AtCommit_Notify would be a PANIC condition. The timing is also arranged
* to ensure that a transaction's self-notifies are delivered to the frontend
* before it gets the terminating ReadyForQuery message.
*
* Note that we send signals and process the queue even if the transaction
- * eventually aborted. This is because we need to clean out whatever got
+ * eventually aborted. This is because we need to clean out whatever got
* added to the queue.
*
* NOTE: we are outside of any transaction here.
@@ -1137,7 +1137,7 @@ IsListeningOn(const char *channel)
/*
* Remove our entry from the listeners array when we are no longer listening
- * on any channel. NB: must not fail if we're already not listening.
+ * on any channel. NB: must not fail if we're already not listening.
*/
static void
asyncQueueUnregister(void)
@@ -1179,7 +1179,7 @@ asyncQueueIsFull(void)
/*
* The queue is full if creating a new head page would create a page that
* logically precedes the current global tail pointer, ie, the head
- * pointer would wrap around compared to the tail. We cannot create such
+ * pointer would wrap around compared to the tail. We cannot create such
* a head page for fear of confusing slru.c. For safety we round the tail
* pointer back to a segment boundary (compare the truncation logic in
* asyncQueueAdvanceTail).
@@ -1198,7 +1198,7 @@ asyncQueueIsFull(void)
/*
* Advance the QueuePosition to the next entry, assuming that the current
- * entry is of length entryLength. If we jump to a new page the function
+ * entry is of length entryLength. If we jump to a new page the function
* returns true, else false.
*/
static bool
@@ -1267,7 +1267,7 @@ asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe)
* the last byte which simplifies reading the page later.
*
* We are passed the list cell containing the next notification to write
- * and return the first still-unwritten cell back. Eventually we will return
+ * and return the first still-unwritten cell back. Eventually we will return
* NULL indicating all is done.
*
* We are holding AsyncQueueLock already from the caller and grab AsyncCtlLock
@@ -1344,7 +1344,7 @@ asyncQueueAddEntries(ListCell *nextNotify)
* Page is full, so we're done here, but first fill the next page
* with zeroes. The reason to do this is to ensure that slru.c's
* idea of the head page is always the same as ours, which avoids
- * boundary problems in SimpleLruTruncate. The test in
+ * boundary problems in SimpleLruTruncate. The test in
* asyncQueueIsFull() ensured that there is room to create this
* page without overrunning the queue.
*/
@@ -1518,7 +1518,7 @@ AtAbort_Notify(void)
/*
* If we LISTEN but then roll back the transaction after PreCommit_Notify,
* we have registered as a listener but have not made any entry in
- * listenChannels. In that case, deregister again.
+ * listenChannels. In that case, deregister again.
*/
if (amRegisteredListener && listenChannels == NIL)
asyncQueueUnregister();
@@ -1771,7 +1771,7 @@ EnableNotifyInterrupt(void)
* is disabled until the next EnableNotifyInterrupt call.
*
* The PROCSIG_CATCHUP_INTERRUPT signal handler also needs to call this,
- * so as to prevent conflicts if one signal interrupts the other. So we
+ * so as to prevent conflicts if one signal interrupts the other. So we
* must return the previous state of the flag.
*/
bool
@@ -1866,7 +1866,7 @@ asyncQueueReadAllNotifications(void)
/*
* We copy the data from SLRU into a local buffer, so as to avoid
* holding the AsyncCtlLock while we are examining the entries and
- * possibly transmitting them to our frontend. Copy only the part
+ * possibly transmitting them to our frontend. Copy only the part
* of the page we will actually inspect.
*/
slotno = SimpleLruReadPage_ReadOnly(AsyncCtl, curpage,
@@ -1940,7 +1940,7 @@ asyncQueueReadAllNotifications(void)
* and deliver relevant ones to my frontend.
*
* The current page must have been fetched into page_buffer from shared
- * memory. (We could access the page right in shared memory, but that
+ * memory. (We could access the page right in shared memory, but that
* would imply holding the AsyncCtlLock throughout this routine.)
*
* We stop if we reach the "stop" position, or reach a notification from an
@@ -2146,7 +2146,7 @@ NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid)
pq_endmessage(&buf);
/*
- * NOTE: we do not do pq_flush() here. For a self-notify, it will
+ * NOTE: we do not do pq_flush() here. For a self-notify, it will
* happen at the end of the transaction, and for incoming notifies
* ProcessIncomingNotify will do it after finding all the notifies.
*/
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 4ac1e0b864..54a2753182 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* cluster.c
- * CLUSTER a table on an index. This is now also used for VACUUM FULL.
+ * CLUSTER a table on an index. This is now also used for VACUUM FULL.
*
* There is hardly anything left of Paul Brown's original implementation...
*
@@ -94,7 +94,7 @@ static void reform_and_rewrite_tuple(HeapTuple tuple,
*
* The single-relation case does not have any such overhead.
*
- * We also allow a relation to be specified without index. In that case,
+ * We also allow a relation to be specified without index. In that case,
* the indisclustered bit will be looked up, and an ERROR will be thrown
* if there is no index with the bit set.
*---------------------------------------------------------------------------
@@ -206,7 +206,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Build the list of relations to cluster. Note that this lives in
+ * Build the list of relations to cluster. Note that this lives in
* cluster_context.
*/
rvs = get_tables_to_cluster(cluster_context);
@@ -243,7 +243,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
*
* This clusters the table by creating a new, clustered table and
* swapping the relfilenodes of the new table and the old table, so
- * the OID of the original table is preserved. Thus we do not lose
+ * the OID of the original table is preserved. Thus we do not lose
* GRANT, inheritance nor references to this table (this was a bug
* in releases thru 7.3).
*
@@ -252,7 +252,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
* them incrementally while we load the table.
*
* If indexOid is InvalidOid, the table will be rewritten in physical order
- * instead of index order. This is the new implementation of VACUUM FULL,
+ * instead of index order. This is the new implementation of VACUUM FULL,
* and error messages should refer to the operation as VACUUM not CLUSTER.
*/
void
@@ -265,7 +265,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose)
/*
* We grab exclusive access to the target rel and index for the duration
- * of the transaction. (This is redundant for the single-transaction
+ * of the transaction. (This is redundant for the single-transaction
* case, since cluster() already did it.) The index lock is taken inside
* check_index_is_clusterable.
*/
@@ -300,7 +300,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose)
* check in the "recheck" case is appropriate (which currently means
* somebody is executing a database-wide CLUSTER), because there is
* another check in cluster() which will stop any attempt to cluster
- * remote temp tables by name. There is another check in cluster_rel
+ * remote temp tables by name. There is another check in cluster_rel
* which is redundant, but we leave it for extra safety.
*/
if (RELATION_IS_OTHER_TEMP(OldHeap))
@@ -393,7 +393,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose)
/*
* All predicate locks on the tuples or pages are about to be made
- * invalid, because we move tuples around. Promote them to relation
+ * invalid, because we move tuples around. Promote them to relation
* locks. Predicate locks on indexes will be promoted when they are
* reindexed.
*/
@@ -440,7 +440,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck, LOCKMOD
/*
* Disallow clustering on incomplete indexes (those that might not index
- * every row of the relation). We could relax this by making a separate
+ * every row of the relation). We could relax this by making a separate
* seqscan pass over the table to copy the missing rows, but that seems
* expensive and tedious.
*/
@@ -649,14 +649,14 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, bool forcetemp,
/*
* Create the new heap, using a temporary name in the same namespace as
- * the existing table. NOTE: there is some risk of collision with user
+ * the existing table. NOTE: there is some risk of collision with user
* relnames. Working around this seems more trouble than it's worth; in
* particular, we can't create the new heap in a different namespace from
* the old, or we will have problems with the TEMP status of temp tables.
*
* Note: the new heap is not a shared relation, even if we are rebuilding
* a shared rel. However, we do make the new heap mapped if the source is
- * mapped. This simplifies swap_relation_files, and is absolutely
+ * mapped. This simplifies swap_relation_files, and is absolutely
* necessary for rebuilding pg_class, for reasons explained there.
*/
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", OIDOldHeap);
@@ -696,11 +696,11 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, bool forcetemp,
*
* If the relation doesn't have a TOAST table already, we can't need one
* for the new relation. The other way around is possible though: if some
- * wide columns have been dropped, NewHeapCreateToastTable can decide
- * that no TOAST table is needed for the new table.
+ * wide columns have been dropped, NewHeapCreateToastTable can decide that
+ * no TOAST table is needed for the new table.
*
- * Note that NewHeapCreateToastTable ends with CommandCounterIncrement,
- * so that the TOAST table will be visible for insertion.
+ * Note that NewHeapCreateToastTable ends with CommandCounterIncrement, so
+ * that the TOAST table will be visible for insertion.
*/
toastid = OldHeap->rd_rel->reltoastrelid;
if (OidIsValid(toastid))
@@ -788,12 +788,12 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose,
/*
* If the OldHeap has a toast table, get lock on the toast table to keep
- * it from being vacuumed. This is needed because autovacuum processes
+ * it from being vacuumed. This is needed because autovacuum processes
* toast tables independently of their main tables, with no lock on the
- * latter. If an autovacuum were to start on the toast table after we
+ * latter. If an autovacuum were to start on the toast table after we
* compute our OldestXmin below, it would use a later OldestXmin, and then
* possibly remove as DEAD toast tuples belonging to main tuples we think
- * are only RECENTLY_DEAD. Then we'd fail while trying to copy those
+ * are only RECENTLY_DEAD. Then we'd fail while trying to copy those
* tuples.
*
* We don't need to open the toast relation here, just lock it. The lock
@@ -814,7 +814,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose,
/*
* If both tables have TOAST tables, perform toast swap by content. It is
* possible that the old table has a toast table but the new one doesn't,
- * if toastable columns have been dropped. In that case we have to do
+ * if toastable columns have been dropped. In that case we have to do
* swap by links. This is okay because swap by content is only essential
* for system catalogs, and we don't support schema changes for them.
*/
@@ -833,7 +833,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose,
*
* Note that we must hold NewHeap open until we are done writing data,
* since the relcache will not guarantee to remember this setting once
- * the relation is closed. Also, this technique depends on the fact
+ * the relation is closed. Also, this technique depends on the fact
* that no one will try to read from the NewHeap until after we've
* finished writing it and swapping the rels --- otherwise they could
* follow the toast pointers to the wrong place. (It would actually
@@ -929,7 +929,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose,
/*
* Scan through the OldHeap, either in OldIndex order or sequentially;
* copy each tuple into the NewHeap, or transiently to the tuplesort
- * module. Note that we don't bother sorting dead tuples (they won't get
+ * module. Note that we don't bother sorting dead tuples (they won't get
* to the new table anyway).
*/
for (;;)
@@ -1217,7 +1217,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
NameStr(relform2->relname), r2);
/*
- * Send replacement mappings to relmapper. Note these won't actually
+ * Send replacement mappings to relmapper. Note these won't actually
* take effect until CommandCounterIncrement.
*/
RelationMapUpdateMap(r1, relfilenode2, relform1->relisshared, false);
@@ -1404,7 +1404,8 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
relform1->relkind == RELKIND_TOASTVALUE &&
relform2->relkind == RELKIND_TOASTVALUE)
{
- Oid toastIndex1, toastIndex2;
+ Oid toastIndex1,
+ toastIndex2;
/* Get valid index for each relation */
toastIndex1 = toast_get_valid_index(r1,
@@ -1440,7 +1441,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
* non-transient relation.)
*
* Caution: the placement of this step interacts with the decision to
- * handle toast rels by recursion. When we are trying to rebuild pg_class
+ * handle toast rels by recursion. When we are trying to rebuild pg_class
* itself, the smgr close on pg_class must happen after all accesses in
* this function.
*/
@@ -1487,9 +1488,9 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
/*
* Rebuild each index on the relation (but not the toast table, which is
- * all-new at this point). It is important to do this before the DROP
+ * all-new at this point). It is important to do this before the DROP
* step because if we are processing a system catalog that will be used
- * during DROP, we want to have its indexes available. There is no
+ * during DROP, we want to have its indexes available. There is no
* advantage to the other order anyway because this is all transactional,
* so no chance to reclaim disk space before commit. We do not need a
* final CommandCounterIncrement() because reindex_relation does it.
@@ -1511,11 +1512,11 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
* swap_relation_files()), thus relfrozenxid was not updated. That's
* annoying because a potential reason for doing a VACUUM FULL is a
* imminent or actual anti-wraparound shutdown. So, now that we can
- * access the new relation using it's indices, update
- * relfrozenxid. pg_class doesn't have a toast relation, so we don't need
- * to update the corresponding toast relation. Not that there's little
- * point moving all relfrozenxid updates here since swap_relation_files()
- * needs to write to pg_class for non-mapped relations anyway.
+ * access the new relation using it's indices, update relfrozenxid.
+ * pg_class doesn't have a toast relation, so we don't need to update the
+ * corresponding toast relation. Not that there's little point moving all
+ * relfrozenxid updates here since swap_relation_files() needs to write to
+ * pg_class for non-mapped relations anyway.
*/
if (OIDOldHeap == RelationRelationId)
{
diff --git a/src/backend/commands/constraint.c b/src/backend/commands/constraint.c
index 751f89e514..b0cad4634b 100644
--- a/src/backend/commands/constraint.c
+++ b/src/backend/commands/constraint.c
@@ -50,7 +50,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
bool isnull[INDEX_MAX_KEYS];
/*
- * Make sure this is being called as an AFTER ROW trigger. Note:
+ * Make sure this is being called as an AFTER ROW trigger. Note:
* translatable error strings are shared with ri_triggers.c, so resist the
* temptation to fold the function name into them.
*/
@@ -87,7 +87,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
* If the new_row is now dead (ie, inserted and then deleted within our
* transaction), we can skip the check. However, we have to be careful,
* because this trigger gets queued only in response to index insertions;
- * which means it does not get queued for HOT updates. The row we are
+ * which means it does not get queued for HOT updates. The row we are
* called for might now be dead, but have a live HOT child, in which case
* we still need to make the check. Therefore we have to use
* heap_hot_search, not just HeapTupleSatisfiesVisibility as is done in
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 70ee7e5048..fbd7492a73 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -125,8 +125,8 @@ typedef struct CopyStateData
bool *force_quote_flags; /* per-column CSV FQ flags */
List *force_notnull; /* list of column names */
bool *force_notnull_flags; /* per-column CSV FNN flags */
- List *force_null; /* list of column names */
- bool *force_null_flags; /* per-column CSV FN flags */
+ List *force_null; /* list of column names */
+ bool *force_null_flags; /* per-column CSV FN flags */
bool convert_selectively; /* do selective binary conversion? */
List *convert_select; /* list of column names (can be NIL) */
bool *convert_select_flags; /* per-column CSV/TEXT CS flags */
@@ -189,7 +189,7 @@ typedef struct CopyStateData
/*
* Finally, raw_buf holds raw data read from the data source (file or
- * client connection). CopyReadLine parses this data sufficiently to
+ * client connection). CopyReadLine parses this data sufficiently to
* locate line boundaries, then transfers the data to line_buf and
* converts it. Note: we guarantee that there is a \0 at
* raw_buf[raw_buf_len].
@@ -215,7 +215,7 @@ typedef struct
* function call overhead in tight COPY loops.
*
* We must use "if (1)" because the usual "do {...} while(0)" wrapper would
- * prevent the continue/break processing from working. We end the "if (1)"
+ * prevent the continue/break processing from working. We end the "if (1)"
* with "else ((void) 0)" to ensure the "if" does not unintentionally match
* any "else" in the calling code, and to avoid any compiler warnings about
* empty statements. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros.
@@ -549,7 +549,7 @@ CopySendEndOfRow(CopyState cstate)
* CopyGetData reads data from the source (file or frontend)
*
* We attempt to read at least minread, and at most maxread, bytes from
- * the source. The actual number of bytes read is returned; if this is
+ * the source. The actual number of bytes read is returned; if this is
* less than minread, EOF was detected.
*
* Note: when copying from the frontend, we expect a proper EOF mark per
@@ -766,7 +766,7 @@ CopyLoadRawBuf(CopyState cstate)
* we also support copying the output of an arbitrary SELECT query.
*
* If <pipe> is false, transfer is between the table and the file named
- * <filename>. Otherwise, transfer is between the table and our regular
+ * <filename>. Otherwise, transfer is between the table and our regular
* input/output stream. The latter could be either stdin/stdout or a
* socket, depending on whether we're running under Postmaster control.
*
@@ -1203,7 +1203,7 @@ ProcessCopyOptions(CopyState cstate,
if (cstate->force_null != NIL && !is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force null only available using COPY FROM")));
+ errmsg("COPY force null only available using COPY FROM")));
/* Don't allow the delimiter to appear in the null string. */
if (strchr(cstate->null_print, cstate->delim[0]) != NULL)
@@ -1298,7 +1298,7 @@ BeginCopy(bool is_from,
errmsg("COPY (SELECT) WITH OIDS is not supported")));
/*
- * Run parse analysis and rewrite. Note this also acquires sufficient
+ * Run parse analysis and rewrite. Note this also acquires sufficient
* locks on the source table(s).
*
* Because the parser and planner tend to scribble on their input, we
@@ -1428,8 +1428,8 @@ BeginCopy(bool is_from,
if (!list_member_int(cstate->attnumlist, attnum))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("FORCE NULL column \"%s\" not referenced by COPY",
- NameStr(tupDesc->attrs[attnum - 1]->attname))));
+ errmsg("FORCE NULL column \"%s\" not referenced by COPY",
+ NameStr(tupDesc->attrs[attnum - 1]->attname))));
cstate->force_null_flags[attnum - 1] = true;
}
}
@@ -1730,7 +1730,7 @@ CopyTo(CopyState cstate)
* Create a temporary memory context that we can reset once per row to
* recover palloc'd memory. This avoids any problems with leaks inside
* datatype output routines, and should be faster than retail pfree's
- * anyway. (We don't need a whole econtext as CopyFrom does.)
+ * anyway. (We don't need a whole econtext as CopyFrom does.)
*/
cstate->rowcontext = AllocSetContextCreate(CurrentMemoryContext,
"COPY TO",
@@ -2248,8 +2248,8 @@ CopyFrom(CopyState cstate)
{
/*
* Reset the per-tuple exprcontext. We can only do this if the
- * tuple buffer is empty. (Calling the context the per-tuple memory
- * context is a bit of a misnomer now.)
+ * tuple buffer is empty. (Calling the context the per-tuple
+ * memory context is a bit of a misnomer now.)
*/
ResetPerTupleExprContext(estate);
}
@@ -2569,19 +2569,20 @@ BeginCopyFrom(Relation rel,
num_defaults++;
/*
- * If a default expression looks at the table being loaded, then
- * it could give the wrong answer when using multi-insert. Since
- * database access can be dynamic this is hard to test for
- * exactly, so we use the much wider test of whether the
- * default expression is volatile. We allow for the special case
- * of when the default expression is the nextval() of a sequence
- * which in this specific case is known to be safe for use with
- * the multi-insert optimisation. Hence we use this special case
- * function checker rather than the standard check for
+ * If a default expression looks at the table being loaded,
+ * then it could give the wrong answer when using
+ * multi-insert. Since database access can be dynamic this is
+ * hard to test for exactly, so we use the much wider test of
+ * whether the default expression is volatile. We allow for
+ * the special case of when the default expression is the
+ * nextval() of a sequence which in this specific case is
+ * known to be safe for use with the multi-insert
+ * optimisation. Hence we use this special case function
+ * checker rather than the standard check for
* contain_volatile_functions().
*/
if (!volatile_defexprs)
- volatile_defexprs = contain_volatile_functions_not_nextval((Node *)defexpr);
+ volatile_defexprs = contain_volatile_functions_not_nextval((Node *) defexpr);
}
}
}
@@ -2861,8 +2862,8 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
if (cstate->csv_mode)
{
- if(string == NULL &&
- cstate->force_notnull_flags[m])
+ if (string == NULL &&
+ cstate->force_notnull_flags[m])
{
/*
* FORCE_NOT_NULL option is set and column is NULL -
@@ -2870,14 +2871,14 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
*/
string = cstate->null_print;
}
- else if(string != NULL && cstate->force_null_flags[m]
- && strcmp(string,cstate->null_print) == 0 )
+ else if (string != NULL && cstate->force_null_flags[m]
+ && strcmp(string, cstate->null_print) == 0)
{
/*
- * FORCE_NULL option is set and column matches the NULL string.
- * It must have been quoted, or otherwise the string would already
- * have been set to NULL.
- * Convert it to NULL as specified.
+ * FORCE_NULL option is set and column matches the NULL
+ * string. It must have been quoted, or otherwise the
+ * string would already have been set to NULL. Convert it
+ * to NULL as specified.
*/
string = NULL;
}
@@ -2920,7 +2921,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
* if client chooses to send that now.
*
* Note that we MUST NOT try to read more data in an old-protocol
- * copy, since there is no protocol-level EOF marker then. We
+ * copy, since there is no protocol-level EOF marker then. We
* could go either way for copy from file, but choose to throw
* error if there's data after the EOF marker, for consistency
* with the new-protocol case.
@@ -2982,7 +2983,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
/*
* Now compute and insert any defaults available for the columns not
- * provided by the input data. Anything not processed here or above will
+ * provided by the input data. Anything not processed here or above will
* remain NULL.
*/
for (i = 0; i < num_defaults; i++)
@@ -3017,7 +3018,7 @@ EndCopyFrom(CopyState cstate)
* server encoding.
*
* Result is true if read was terminated by EOF, false if terminated
- * by newline. The terminating newline or EOF marker is not included
+ * by newline. The terminating newline or EOF marker is not included
* in the final value of line_buf.
*/
static bool
@@ -3173,7 +3174,7 @@ CopyReadLineText(CopyState cstate)
* of read-ahead and avoid the many calls to
* IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE protocol
* does not allow us to read too far ahead or we might read into the
- * next data, so we read-ahead only as far we know we can. One
+ * next data, so we read-ahead only as far we know we can. One
* optimization would be to read-ahead four byte here if
* cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it,
* considering the size of the buffer.
@@ -3183,7 +3184,7 @@ CopyReadLineText(CopyState cstate)
REFILL_LINEBUF;
/*
- * Try to read some more data. This will certainly reset
+ * Try to read some more data. This will certainly reset
* raw_buf_index to zero, and raw_buf_ptr must go with it.
*/
if (!CopyLoadRawBuf(cstate))
@@ -3241,7 +3242,7 @@ CopyReadLineText(CopyState cstate)
/*
* Updating the line count for embedded CR and/or LF chars is
* necessarily a little fragile - this test is probably about the
- * best we can do. (XXX it's arguable whether we should do this
+ * best we can do. (XXX it's arguable whether we should do this
* at all --- is cur_lineno a physical or logical count?)
*/
if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r'))
@@ -3420,7 +3421,7 @@ CopyReadLineText(CopyState cstate)
* after a backslash is special, so we skip over that second
* character too. If we didn't do that \\. would be
* considered an eof-of copy, while in non-CSV mode it is a
- * literal backslash followed by a period. In CSV mode,
+ * literal backslash followed by a period. In CSV mode,
* backslashes are not special, so we want to process the
* character after the backslash just like a normal character,
* so we don't increment in those cases.
@@ -3523,7 +3524,7 @@ CopyReadAttributesText(CopyState cstate)
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to do
+ * then transfer data without any checks for enough space. We need to do
* it this way because enlarging attribute_buf mid-stream would invalidate
* pointers already stored into cstate->raw_fields[].
*/
@@ -3753,7 +3754,7 @@ CopyReadAttributesCSV(CopyState cstate)
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to do
+ * then transfer data without any checks for enough space. We need to do
* it this way because enlarging attribute_buf mid-stream would invalidate
* pointers already stored into cstate->raw_fields[].
*/
@@ -3968,7 +3969,7 @@ CopyAttributeOutText(CopyState cstate, char *string)
/*
* We have to grovel through the string searching for control characters
* and instances of the delimiter character. In most cases, though, these
- * are infrequent. To avoid overhead from calling CopySendData once per
+ * are infrequent. To avoid overhead from calling CopySendData once per
* character, we dump out all characters between escaped characters in a
* single call. The loop invariant is that the data from "start" to "ptr"
* can be sent literally, but hasn't yet been.
diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c
index e434d38702..96806eed98 100644
--- a/src/backend/commands/createas.c
+++ b/src/backend/commands/createas.c
@@ -104,7 +104,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
/*
* For materialized views, lock down security-restricted operations and
- * arrange to make GUC variable changes local to this command. This is
+ * arrange to make GUC variable changes local to this command. This is
* not necessary for security, but this keeps the behavior similar to
* REFRESH MATERIALIZED VIEW. Otherwise, one could create a materialized
* view not possible to refresh.
@@ -124,9 +124,9 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
* plancache.c.
*
* Because the rewriter and planner tend to scribble on the input, we make
- * a preliminary copy of the source querytree. This prevents problems in
+ * a preliminary copy of the source querytree. This prevents problems in
* the case that CTAS is in a portal or plpgsql function and is executed
- * repeatedly. (See also the same hack in EXPLAIN and PREPARE.)
+ * repeatedly. (See also the same hack in EXPLAIN and PREPARE.)
*/
rewritten = QueryRewrite((Query *) copyObject(query));
@@ -141,7 +141,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
/*
* Use a snapshot with an updated command ID to ensure this query sees
- * results of any previously executed queries. (This could only matter if
+ * results of any previously executed queries. (This could only matter if
* the planner executed an allegedly-stable function that changed the
* database contents, but let's do it anyway to be parallel to the EXPLAIN
* code path.)
@@ -359,8 +359,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
/*
* If necessary, create a TOAST table for the target table. Note that
- * NewRelationCreateToastTable ends with CommandCounterIncrement(), so that
- * the TOAST table will be visible for insertion.
+ * NewRelationCreateToastTable ends with CommandCounterIncrement(), so
+ * that the TOAST table will be visible for insertion.
*/
CommandCounterIncrement();
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 4996a2e7cd..5705889f31 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -265,7 +265,7 @@ createdb(const CreatedbStmt *stmt)
* To create a database, must have createdb privilege and must be able to
* become the target role (this does not imply that the target role itself
* must have createdb privilege). The latter provision guards against
- * "giveaway" attacks. Note that a superuser will always have both of
+ * "giveaway" attacks. Note that a superuser will always have both of
* these privileges a fortiori.
*/
if (!have_createdb_privilege())
@@ -397,7 +397,7 @@ createdb(const CreatedbStmt *stmt)
/*
* If we are trying to change the default tablespace of the template,
* we require that the template not have any files in the new default
- * tablespace. This is necessary because otherwise the copied
+ * tablespace. This is necessary because otherwise the copied
* database would contain pg_class rows that refer to its default
* tablespace both explicitly (by OID) and implicitly (as zero), which
* would cause problems. For example another CREATE DATABASE using
@@ -433,7 +433,7 @@ createdb(const CreatedbStmt *stmt)
}
/*
- * Check for db name conflict. This is just to give a more friendly error
+ * Check for db name conflict. This is just to give a more friendly error
* message than "unique index violation". There's a race condition but
* we're willing to accept the less friendly message in that case.
*/
@@ -498,7 +498,7 @@ createdb(const CreatedbStmt *stmt)
/*
* We deliberately set datacl to default (NULL), rather than copying it
- * from the template database. Copying it would be a bad idea when the
+ * from the template database. Copying it would be a bad idea when the
* owner is not the same as the template's owner.
*/
new_record_nulls[Anum_pg_database_datacl - 1] = true;
@@ -751,7 +751,8 @@ dropdb(const char *dbname, bool missing_ok)
HeapTuple tup;
int notherbackends;
int npreparedxacts;
- int nslots, nslots_active;
+ int nslots,
+ nslots_active;
/*
* Look up the target database's OID, and get exclusive lock on it. We
@@ -1160,7 +1161,7 @@ movedb(const char *dbname, const char *tblspcname)
/*
* Use an ENSURE block to make sure we remove the debris if the copy fails
- * (eg, due to out-of-disk-space). This is not a 100% solution, because
+ * (eg, due to out-of-disk-space). This is not a 100% solution, because
* of the possibility of failure during transaction commit, but it should
* handle most scenarios.
*/
@@ -1647,7 +1648,7 @@ get_db_info(const char *name, LOCKMODE lockmode,
LockSharedObject(DatabaseRelationId, dbOid, 0, lockmode);
/*
- * And now, re-fetch the tuple by OID. If it's still there and still
+ * And now, re-fetch the tuple by OID. If it's still there and still
* the same name, we win; else, drop the lock and loop back to try
* again.
*/
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index f0cb4f544e..dca6e952a5 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -202,7 +202,7 @@ defGetInt64(DefElem *def)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid int8
+ * constants by the lexer. Accept these if they are valid int8
* strings.
*/
return DatumGetInt64(DirectFunctionCall1(int8in,
diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c
index 024a4778a9..96f926cbb2 100644
--- a/src/backend/commands/event_trigger.c
+++ b/src/backend/commands/event_trigger.c
@@ -606,7 +606,7 @@ filter_event_trigger(const char **tag, EventTriggerCacheItem *item)
}
/*
- * Setup for running triggers for the given event. Return value is an OID list
+ * Setup for running triggers for the given event. Return value is an OID list
* of functions to run; if there are any, trigdata is filled with an
* appropriate EventTriggerData for them to receive.
*/
@@ -625,7 +625,7 @@ EventTriggerCommonSetup(Node *parsetree,
* invoked to match up exactly with the list that CREATE EVENT TRIGGER
* accepts. This debugging cross-check will throw an error if this
* function is invoked for a command tag that CREATE EVENT TRIGGER won't
- * accept. (Unfortunately, there doesn't seem to be any simple, automated
+ * accept. (Unfortunately, there doesn't seem to be any simple, automated
* way to verify that CREATE EVENT TRIGGER doesn't accept extra stuff that
* never reaches this control point.)
*
@@ -655,7 +655,7 @@ EventTriggerCommonSetup(Node *parsetree,
/*
* Filter list of event triggers by command tag, and copy them into our
- * memory context. Once we start running the command trigers, or indeed
+ * memory context. Once we start running the command trigers, or indeed
* once we do anything at all that touches the catalogs, an invalidation
* might leave cachelist pointing at garbage, so we must do this before we
* can do much else.
@@ -783,7 +783,7 @@ EventTriggerSQLDrop(Node *parsetree)
return;
/*
- * Use current state to determine whether this event fires at all. If
+ * Use current state to determine whether this event fires at all. If
* there are no triggers for the sql_drop event, then we don't have
* anything to do here. Note that dropped object collection is disabled
* if this is the case, so even if we were to try to run, the list would
@@ -798,7 +798,7 @@ EventTriggerSQLDrop(Node *parsetree)
&trigdata);
/*
- * Nothing to do if run list is empty. Note this shouldn't happen,
+ * Nothing to do if run list is empty. Note this shouldn't happen,
* because if there are no sql_drop events, then objects-to-drop wouldn't
* have been collected in the first place and we would have quitted above.
*/
@@ -813,7 +813,7 @@ EventTriggerSQLDrop(Node *parsetree)
/*
* Make sure pg_event_trigger_dropped_objects only works when running
- * these triggers. Use PG_TRY to ensure in_sql_drop is reset even when
+ * these triggers. Use PG_TRY to ensure in_sql_drop is reset even when
* one trigger fails. (This is perhaps not necessary, as the currentState
* variable will be removed shortly by our caller, but it seems better to
* play safe.)
@@ -1053,7 +1053,7 @@ EventTriggerBeginCompleteQuery(void)
* returned false previously.
*
* Note: this might be called in the PG_CATCH block of a failing transaction,
- * so be wary of running anything unnecessary. (In particular, it's probably
+ * so be wary of running anything unnecessary. (In particular, it's probably
* unwise to try to allocate memory.)
*/
void
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 1104cc3631..794042b550 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -86,7 +86,7 @@ static void show_sort_group_keys(PlanState *planstate, const char *qlabel,
static void show_sort_info(SortState *sortstate, ExplainState *es);
static void show_hash_info(HashState *hashstate, ExplainState *es);
static void show_tidbitmap_info(BitmapHeapScanState *planstate,
- ExplainState *es);
+ ExplainState *es);
static void show_instrumentation_count(const char *qlabel, int which,
PlanState *planstate, ExplainState *es);
static void show_foreignscan_info(ForeignScanState *fsstate, ExplainState *es);
@@ -197,7 +197,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
* plancache.c.
*
* Because the rewriter and planner tend to scribble on the input, we make
- * a preliminary copy of the source querytree. This prevents problems in
+ * a preliminary copy of the source querytree. This prevents problems in
* the case that the EXPLAIN is in a portal or plpgsql function and is
* executed repeatedly. (See also the same hack in DECLARE CURSOR and
* PREPARE.) XXX FIXME someday.
@@ -320,8 +320,9 @@ ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es,
(*ExplainOneQuery_hook) (query, into, es, queryString, params);
else
{
- PlannedStmt *plan;
- instr_time planstart, planduration;
+ PlannedStmt *plan;
+ instr_time planstart,
+ planduration;
INSTR_TIME_SET_CURRENT(planstart);
@@ -493,7 +494,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
if (es->costs && planduration)
{
- double plantime = INSTR_TIME_GET_DOUBLE(*planduration);
+ double plantime = INSTR_TIME_GET_DOUBLE(*planduration);
if (es->format == EXPLAIN_FORMAT_TEXT)
appendStringInfo(es->str, "Planning time: %.3f ms\n",
@@ -542,7 +543,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
* convert a QueryDesc's plan tree to text and append it to es->str
*
* The caller should have set up the options fields of *es, as well as
- * initializing the output buffer es->str. Other fields in *es are
+ * initializing the output buffer es->str. Other fields in *es are
* initialized here.
*
* NB: will not work on utility statements
@@ -567,7 +568,7 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
* es->str
*
* The caller should have set up the options fields of *es, as well as
- * initializing the output buffer es->str. Other fields in *es are
+ * initializing the output buffer es->str. Other fields in *es are
* initialized here.
*/
void
@@ -2193,7 +2194,7 @@ show_modifytable_info(ModifyTableState *mtstate, ExplainState *es)
/*
* If the first target relation is a foreign table, call its FDW to
- * display whatever additional fields it wants to. For now, we ignore the
+ * display whatever additional fields it wants to. For now, we ignore the
* possibility of other targets being foreign tables, although the API for
* ExplainForeignModify is designed to allow them to be processed.
*/
@@ -2692,7 +2693,7 @@ ExplainXMLTag(const char *tagname, int flags, ExplainState *es)
/*
* Emit a JSON line ending.
*
- * JSON requires a comma after each property but the last. To facilitate this,
+ * JSON requires a comma after each property but the last. To facilitate this,
* in JSON format, the text emitted for each property begins just prior to the
* preceding line-break (and comma, if applicable).
*/
@@ -2713,7 +2714,7 @@ ExplainJSONLineEnding(ExplainState *es)
* YAML lines are ordinarily indented by two spaces per indentation level.
* The text emitted for each property begins just prior to the preceding
* line-break, except for the first property in an unlabelled group, for which
- * it begins immediately after the "- " that introduces the group. The first
+ * it begins immediately after the "- " that introduces the group. The first
* property of the group appears on the same line as the opening "- ".
*/
static void
diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c
index 06bd90b9aa..9a0afa4b5d 100644
--- a/src/backend/commands/extension.c
+++ b/src/backend/commands/extension.c
@@ -108,7 +108,7 @@ static void ApplyExtensionUpdates(Oid extensionOid,
/*
* get_extension_oid - given an extension name, look up the OID
*
- * If missing_ok is false, throw an error if extension name not found. If
+ * If missing_ok is false, throw an error if extension name not found. If
* true, just return InvalidOid.
*/
Oid
@@ -257,9 +257,9 @@ check_valid_extension_name(const char *extensionname)
errdetail("Extension names must not contain \"--\".")));
/*
- * No leading or trailing dash either. (We could probably allow this, but
+ * No leading or trailing dash either. (We could probably allow this, but
* it would require much care in filename parsing and would make filenames
- * visually if not formally ambiguous. Since there's no real-world use
+ * visually if not formally ambiguous. Since there's no real-world use
* case, let's just forbid it.)
*/
if (extensionname[0] == '-' || extensionname[namelen - 1] == '-')
@@ -435,7 +435,7 @@ get_extension_script_filename(ExtensionControlFile *control,
/*
* Parse contents of primary or auxiliary control file, and fill in
- * fields of *control. We parse primary file if version == NULL,
+ * fields of *control. We parse primary file if version == NULL,
* else the optional auxiliary file for that version.
*
* Control files are supposed to be very short, half a dozen lines,
@@ -673,7 +673,7 @@ read_extension_script_file(const ExtensionControlFile *control,
* filename is used only to report errors.
*
* Note: it's tempting to just use SPI to execute the string, but that does
- * not work very well. The really serious problem is that SPI will parse,
+ * not work very well. The really serious problem is that SPI will parse,
* analyze, and plan the whole string before executing any of it; of course
* this fails if there are any plannable statements referring to objects
* created earlier in the script. A lesser annoyance is that SPI insists
@@ -848,7 +848,7 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
/*
* Set creating_extension and related variables so that
* recordDependencyOnCurrentExtension and other functions do the right
- * things. On failure, ensure we reset these variables.
+ * things. On failure, ensure we reset these variables.
*/
creating_extension = true;
CurrentExtensionObject = extensionOid;
@@ -1092,7 +1092,7 @@ identify_update_path(ExtensionControlFile *control,
* is still good.
*
* Result is a List of names of versions to transition through (the initial
- * version is *not* included). Returns NIL if no such path.
+ * version is *not* included). Returns NIL if no such path.
*/
static List *
find_update_path(List *evi_list,
@@ -1193,7 +1193,7 @@ CreateExtension(CreateExtensionStmt *stmt)
check_valid_extension_name(stmt->extname);
/*
- * Check for duplicate extension name. The unique index on
+ * Check for duplicate extension name. The unique index on
* pg_extension.extname would catch this anyway, and serves as a backstop
* in case of race conditions; but this is a friendlier error message, and
* besides we need a check to support IF NOT EXISTS.
@@ -1360,7 +1360,7 @@ CreateExtension(CreateExtensionStmt *stmt)
{
/*
* The extension is not relocatable and the author gave us a schema
- * for it. We create the schema here if it does not already exist.
+ * for it. We create the schema here if it does not already exist.
*/
schemaName = control->schema;
schemaOid = get_namespace_oid(schemaName, true);
@@ -1390,7 +1390,7 @@ CreateExtension(CreateExtensionStmt *stmt)
*/
List *search_path = fetch_search_path(false);
- if (search_path == NIL) /* nothing valid in search_path? */
+ if (search_path == NIL) /* nothing valid in search_path? */
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
errmsg("no schema has been selected to create in")));
@@ -1589,7 +1589,7 @@ RemoveExtensionById(Oid extId)
* might write "DROP EXTENSION foo" in foo's own script files, as because
* errors in dependency management in extension script files could give
* rise to cases where an extension is dropped as a result of recursing
- * from some contained object. Because of that, we must test for the case
+ * from some contained object. Because of that, we must test for the case
* here, not at some higher level of the DROP EXTENSION command.
*/
if (extId == CurrentExtensionObject)
@@ -1620,7 +1620,7 @@ RemoveExtensionById(Oid extId)
/*
* This function lists the available extensions (one row per primary control
- * file in the control directory). We parse each control file and report the
+ * file in the control directory). We parse each control file and report the
* interesting fields.
*
* The system view pg_available_extensions provides a user interface to this
@@ -1729,7 +1729,7 @@ pg_available_extensions(PG_FUNCTION_ARGS)
/*
* This function lists the available extension versions (one row per
- * extension installation script). For each version, we parse the related
+ * extension installation script). For each version, we parse the related
* control file(s) and report the interesting fields.
*
* The system view pg_available_extension_versions provides a user interface
@@ -2517,7 +2517,7 @@ AlterExtensionNamespace(List *names, const char *newschema)
Oid dep_oldNspOid;
/*
- * Ignore non-membership dependencies. (Currently, the only other
+ * Ignore non-membership dependencies. (Currently, the only other
* case we could see here is a normal dependency from another
* extension.)
*/
@@ -2929,7 +2929,7 @@ ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt)
/*
* Prevent a schema from being added to an extension if the schema
- * contains the extension. That would create a dependency loop.
+ * contains the extension. That would create a dependency loop.
*/
if (object.classId == NamespaceRelationId &&
object.objectId == get_extension_schema(extension.objectId))
diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c
index 7f007d7854..8ab9c439db 100644
--- a/src/backend/commands/foreigncmds.c
+++ b/src/backend/commands/foreigncmds.c
@@ -81,7 +81,7 @@ optionListToArray(List *options)
/*
- * Transform a list of DefElem into text array format. This is substantially
+ * Transform a list of DefElem into text array format. This is substantially
* the same thing as optionListToArray(), except we recognize SET/ADD/DROP
* actions for modifying an existing list of options, which is passed in
* Datum form as oldOptions. Also, if fdwvalidator isn't InvalidOid
@@ -125,7 +125,7 @@ transformGenericOptions(Oid catalogId,
/*
* It is possible to perform multiple SET/DROP actions on the same
- * option. The standard permits this, as long as the options to be
+ * option. The standard permits this, as long as the options to be
* added are unique. Note that an unspecified action is taken to be
* ADD.
*/
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 4c8119a474..470db5705c 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -74,7 +74,7 @@
* allow a shell type to be used, or even created if the specified return type
* doesn't exist yet. (Without this, there's no way to define the I/O procs
* for a new type.) But SQL function creation won't cope, so error out if
- * the target language is SQL. (We do this here, not in the SQL-function
+ * the target language is SQL. (We do this here, not in the SQL-function
* validator, so as not to produce a NOTICE and then an ERROR for the same
* condition.)
*/
@@ -451,7 +451,7 @@ interpret_function_parameter_list(List *parameters,
* FUNCTION and ALTER FUNCTION and return it via one of the out
* parameters. Returns true if the passed option was recognized. If
* the out parameter we were going to assign to points to non-NULL,
- * raise a duplicate-clause error. (We don't try to detect duplicate
+ * raise a duplicate-clause error. (We don't try to detect duplicate
* SET parameters though --- if you're redundant, the last one wins.)
*/
static bool
@@ -760,7 +760,7 @@ interpret_AS_clause(Oid languageOid, const char *languageName,
{
/*
* For "C" language, store the file name in probin and, when given,
- * the link symbol name in prosrc. If link symbol is omitted,
+ * the link symbol name in prosrc. If link symbol is omitted,
* substitute procedure name. We also allow link symbol to be
* specified as "-", since that was the habit in PG versions before
* 8.4, and there might be dump files out there that don't translate
@@ -1394,7 +1394,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* Restricting the volatility of a cast function may or may not be a
* good idea in the abstract, but it definitely breaks many old
- * user-defined types. Disable this check --- tgl 2/1/03
+ * user-defined types. Disable this check --- tgl 2/1/03
*/
#ifdef NOT_USED
if (procstruct->provolatile == PROVOLATILE_VOLATILE)
@@ -1458,7 +1458,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* We know that composite, enum and array types are never binary-
- * compatible with each other. They all have OIDs embedded in them.
+ * compatible with each other. They all have OIDs embedded in them.
*
* Theoretically you could build a user-defined base type that is
* binary-compatible with a composite, enum, or array type. But we
@@ -1487,7 +1487,7 @@ CreateCast(CreateCastStmt *stmt)
* We also disallow creating binary-compatibility casts involving
* domains. Casting from a domain to its base type is already
* allowed, and casting the other way ought to go through domain
- * coercion to permit constraint checking. Again, if you're intent on
+ * coercion to permit constraint checking. Again, if you're intent on
* having your own semantics for that, create a no-op cast function.
*
* NOTE: if we were to relax this, the above checks for composites
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 38ce023a8a..fdfa6ca4f5 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -102,7 +102,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation,
* concrete benefit for core types.
* When a comparison or exclusion operator has a polymorphic input type, the
- * actual input types must also match. This defends against the possibility
+ * actual input types must also match. This defends against the possibility
* that operators could vary behavior in response to get_fn_expr_argtype().
* At present, this hazard is theoretical: check_exclusion_constraint() and
* all core index access methods decline to set fn_expr for such calls.
@@ -349,11 +349,11 @@ DefineIndex(Oid relationId,
* index build; but for concurrent builds we allow INSERT/UPDATE/DELETE
* (but not VACUUM).
*
- * NB: Caller is responsible for making sure that relationId refers
- * to the relation on which the index should be built; except in bootstrap
- * mode, this will typically require the caller to have already locked
- * the relation. To avoid lock upgrade hazards, that lock should be at
- * least as strong as the one we take here.
+ * NB: Caller is responsible for making sure that relationId refers to the
+ * relation on which the index should be built; except in bootstrap mode,
+ * this will typically require the caller to have already locked the
+ * relation. To avoid lock upgrade hazards, that lock should be at least
+ * as strong as the one we take here.
*/
lockmode = stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock;
rel = heap_open(relationId, lockmode);
@@ -433,7 +433,7 @@ DefineIndex(Oid relationId,
}
/*
- * Force shared indexes into the pg_global tablespace. This is a bit of a
+ * Force shared indexes into the pg_global tablespace. This is a bit of a
* hack but seems simpler than marking them in the BKI commands. On the
* other hand, if it's not shared, don't allow it to be placed there.
*/
@@ -628,7 +628,7 @@ DefineIndex(Oid relationId,
/*
* For a concurrent build, it's important to make the catalog entries
* visible to other transactions before we start to build the index. That
- * will prevent them from making incompatible HOT updates. The new index
+ * will prevent them from making incompatible HOT updates. The new index
* will be marked not indisready and not indisvalid, so that no one else
* tries to either insert into it or use it for queries.
*
@@ -676,7 +676,7 @@ DefineIndex(Oid relationId,
* indexes. We have waited out all the existing transactions and any new
* transaction will have the new index in its list, but the index is still
* marked as "not-ready-for-inserts". The index is consulted while
- * deciding HOT-safety though. This arrangement ensures that no new HOT
+ * deciding HOT-safety though. This arrangement ensures that no new HOT
* chains can be created where the new tuple and the old tuple in the
* chain have different index keys.
*
@@ -736,7 +736,7 @@ DefineIndex(Oid relationId,
/*
* Now take the "reference snapshot" that will be used by validate_index()
- * to filter candidate tuples. Beware! There might still be snapshots in
+ * to filter candidate tuples. Beware! There might still be snapshots in
* use that treat some transaction as in-progress that our reference
* snapshot treats as committed. If such a recently-committed transaction
* deleted tuples in the table, we will not include them in the index; yet
@@ -761,7 +761,7 @@ DefineIndex(Oid relationId,
* Drop the reference snapshot. We must do this before waiting out other
* snapshot holders, else we will deadlock against other processes also
* doing CREATE INDEX CONCURRENTLY, which would see our snapshot as one
- * they must wait for. But first, save the snapshot's xmin to use as
+ * they must wait for. But first, save the snapshot's xmin to use as
* limitXmin for GetCurrentVirtualXIDs().
*/
limitXmin = snapshot->xmin;
@@ -771,7 +771,7 @@ DefineIndex(Oid relationId,
/*
* The index is now valid in the sense that it contains all currently
- * interesting tuples. But since it might not contain tuples deleted just
+ * interesting tuples. But since it might not contain tuples deleted just
* before the reference snap was taken, we have to wait out any
* transactions that might have older snapshots. Obtain a list of VXIDs
* of such transactions, and wait for them individually.
@@ -786,7 +786,7 @@ DefineIndex(Oid relationId,
*
* We can also exclude autovacuum processes and processes running manual
* lazy VACUUMs, because they won't be fazed by missing index entries
- * either. (Manual ANALYZEs, however, can't be excluded because they
+ * either. (Manual ANALYZEs, however, can't be excluded because they
* might be within transactions that are going to do arbitrary operations
* later.)
*
@@ -875,7 +875,7 @@ CheckMutability(Expr *expr)
{
/*
* First run the expression through the planner. This has a couple of
- * important consequences. First, function default arguments will get
+ * important consequences. First, function default arguments will get
* inserted, which may affect volatility (consider "default now()").
* Second, inline-able functions will get inlined, which may allow us to
* conclude that the function is really less volatile than it's marked. As
@@ -898,7 +898,7 @@ CheckMutability(Expr *expr)
* Checks that the given partial-index predicate is valid.
*
* This used to also constrain the form of the predicate to forms that
- * indxpath.c could do something with. However, that seems overly
+ * indxpath.c could do something with. However, that seems overly
* restrictive. One useful application of partial indexes is to apply
* a UNIQUE constraint across a subset of a table, and in that scenario
* any evaluatable predicate will work. So accept any predicate here
@@ -1009,7 +1009,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
attcollation = exprCollation(expr);
/*
- * Strip any top-level COLLATE clause. This ensures that we treat
+ * Strip any top-level COLLATE clause. This ensures that we treat
* "x COLLATE y" and "(x COLLATE y)" alike.
*/
while (IsA(expr, CollateExpr))
@@ -1215,7 +1215,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
* 2000/07/30
*
* Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
- * too for awhile. I'm starting to think we need a better approach. tgl
+ * too for awhile. I'm starting to think we need a better approach. tgl
* 2000/10/01
*
* Release 8.0 removes bigbox_ops (which was dead code for a long while
@@ -1284,7 +1284,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
NameListToString(opclass), accessMethodName)));
/*
- * Verify that the index operator class accepts this datatype. Note we
+ * Verify that the index operator class accepts this datatype. Note we
* will accept binary compatibility.
*/
opClassId = HeapTupleGetOid(tuple);
@@ -1305,7 +1305,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
* GetDefaultOpClass
*
* Given the OIDs of a datatype and an access method, find the default
- * operator class, if any. Returns InvalidOid if there is none.
+ * operator class, if any. Returns InvalidOid if there is none.
*/
Oid
GetDefaultOpClass(Oid type_id, Oid am_id)
@@ -1400,7 +1400,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id)
* Create a name for an implicitly created index, sequence, constraint, etc.
*
* The parameters are typically: the original table name, the original field
- * name, and a "type" string (such as "seq" or "pkey"). The field name
+ * name, and a "type" string (such as "seq" or "pkey"). The field name
* and/or type can be NULL if not relevant.
*
* The result is a palloc'd string.
@@ -1408,7 +1408,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id)
* The basic result we want is "name1_name2_label", omitting "_name2" or
* "_label" when those parameters are NULL. However, we must generate
* a name with less than NAMEDATALEN characters! So, we truncate one or
- * both names if necessary to make a short-enough string. The label part
+ * both names if necessary to make a short-enough string. The label part
* is never truncated (so it had better be reasonably short).
*
* The caller is responsible for checking uniqueness of the generated
@@ -1603,7 +1603,7 @@ ChooseIndexNameAddition(List *colnames)
/*
* Select the actual names to be used for the columns of an index, given the
- * list of IndexElems for the columns. This is mostly about ensuring the
+ * list of IndexElems for the columns. This is mostly about ensuring the
* names are unique so we don't get a conflicting-attribute-names error.
*
* Returns a List of plain strings (char *, not String nodes).
@@ -1714,7 +1714,7 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
/*
* If the relation does exist, check whether it's an index. But note that
* the relation might have been dropped between the time we did the name
- * lookup and now. In that case, there's nothing to do.
+ * lookup and now. In that case, there's nothing to do.
*/
relkind = get_rel_relkind(relId);
if (!relkind)
diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c
index a301d65b60..5130d512a6 100644
--- a/src/backend/commands/matview.c
+++ b/src/backend/commands/matview.c
@@ -240,9 +240,9 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
owner = matviewRel->rd_rel->relowner;
/*
- * Create the transient table that will receive the regenerated data.
- * Lock it against access by any other process until commit (by which time
- * it will be gone).
+ * Create the transient table that will receive the regenerated data. Lock
+ * it against access by any other process until commit (by which time it
+ * will be gone).
*/
OIDNewHeap = make_new_heap(matviewOid, tableSpace, concurrent,
ExclusiveLock);
@@ -319,7 +319,7 @@ refresh_matview_datafill(DestReceiver *dest, Query *query,
/*
* Use a snapshot with an updated command ID to ensure this query sees
- * results of any previously executed queries. (This could only matter if
+ * results of any previously executed queries. (This could only matter if
* the planner executed an allegedly-stable function that changed the
* database contents, but let's do it anyway to be safe.)
*/
@@ -495,9 +495,9 @@ mv_GenerateOper(StringInfo buf, Oid opoid)
*
* This is called after a new version of the data has been created in a
* temporary table. It performs a full outer join against the old version of
- * the data, producing "diff" results. This join cannot work if there are any
+ * the data, producing "diff" results. This join cannot work if there are any
* duplicated rows in either the old or new versions, in the sense that every
- * column would compare as equal between the two rows. It does work correctly
+ * column would compare as equal between the two rows. It does work correctly
* in the face of rows which have at least one NULL value, with all non-NULL
* columns equal. The behavior of NULLs on equality tests and on UNIQUE
* indexes turns out to be quite convenient here; the tests we need to make
@@ -561,7 +561,7 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid)
/*
* We need to ensure that there are not duplicate rows without NULLs in
- * the new data set before we can count on the "diff" results. Check for
+ * the new data set before we can count on the "diff" results. Check for
* that in a way that allows showing the first duplicated row found. Even
* after we pass this test, a unique index on the materialized view may
* find a duplicate key problem.
@@ -707,7 +707,7 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid)
/* Deletes must come before inserts; do them first. */
resetStringInfo(&querybuf);
appendStringInfo(&querybuf,
- "DELETE FROM %s mv WHERE ctid OPERATOR(pg_catalog.=) ANY "
+ "DELETE FROM %s mv WHERE ctid OPERATOR(pg_catalog.=) ANY "
"(SELECT diff.tid FROM %s diff "
"WHERE diff.tid IS NOT NULL "
"AND diff.newdata IS NULL)",
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 5d7b37c674..4b2baaceff 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -391,7 +391,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
* A minimum expectation therefore is that the caller have execute
* privilege with grant option. Since we don't have a way to make the
* opclass go away if the grant option is revoked, we choose instead to
- * require ownership of the functions. It's also not entirely clear what
+ * require ownership of the functions. It's also not entirely clear what
* permissions should be required on the datatype, but ownership seems
* like a safe choice.
*
@@ -673,7 +673,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
opclassoid, procedures, false);
/*
- * Create dependencies for the opclass proper. Note: we do not create a
+ * Create dependencies for the opclass proper. Note: we do not create a
* dependency link to the AM, because we don't currently support DROP
* ACCESS METHOD.
*/
@@ -1090,7 +1090,7 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
if (OidIsValid(member->sortfamily))
{
/*
- * Ordering op, check index supports that. (We could perhaps also
+ * Ordering op, check index supports that. (We could perhaps also
* check that the operator returns a type supported by the sortfamily,
* but that seems more trouble than it's worth here. If it does not,
* the operator will never be matchable to any ORDER BY clause, but no
@@ -1219,7 +1219,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
/*
* The default in CREATE OPERATOR CLASS is to use the class' opcintype as
- * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype
+ * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype
* isn't available, so make the user specify the types.
*/
if (!OidIsValid(member->lefttype))
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index c2560cbce3..85b81b7928 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -211,7 +211,7 @@ DefineOperator(List *names, List *parameters)
functionOid = LookupFuncName(functionName, nargs, typeId, false);
/*
- * We require EXECUTE rights for the function. This isn't strictly
+ * We require EXECUTE rights for the function. This isn't strictly
* necessary, since EXECUTE will be checked at any attempted use of the
* operator, but it seems like a good idea anyway.
*/
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index e7c681ab7f..28e785afb8 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -4,7 +4,7 @@
* Utility commands affecting portals (that is, SQL cursor commands)
*
* Note: see also tcop/pquery.c, which implements portal operations for
- * the FE/BE protocol. This module uses pquery.c for some operations.
+ * the FE/BE protocol. This module uses pquery.c for some operations.
* And both modules depend on utils/mmgr/portalmem.c, which controls
* storage management for portals (but doesn't run any queries in them).
*
@@ -89,7 +89,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
/*----------
* Also copy the outer portal's parameter list into the inner portal's
- * memory context. We want to pass down the parameter values in case we
+ * memory context. We want to pass down the parameter values in case we
* had a command like
* DECLARE c CURSOR FOR SELECT ... WHERE foo = $1
* This will have been parsed using the outer parameter set and the
@@ -106,7 +106,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
*
* If the user didn't specify a SCROLL type, allow or disallow scrolling
* based on whether it would require any additional runtime overhead to do
- * so. Also, we disallow scrolling for FOR UPDATE cursors.
+ * so. Also, we disallow scrolling for FOR UPDATE cursors.
*/
portal->cursorOptions = cstmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@@ -365,7 +365,7 @@ PersistHoldablePortal(Portal portal)
ExecutorRewind(queryDesc);
/*
- * Change the destination to output to the tuplestore. Note we tell
+ * Change the destination to output to the tuplestore. Note we tell
* the tuplestore receiver to detoast all data passed through it.
*/
queryDesc->dest = CreateDestReceiver(DestTuplestore);
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 65431b713d..10168e3e80 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -174,7 +174,7 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString)
* ExecuteQuery --- implement the 'EXECUTE' utility statement.
*
* This code also supports CREATE TABLE ... AS EXECUTE. That case is
- * indicated by passing a non-null intoClause. The DestReceiver is already
+ * indicated by passing a non-null intoClause. The DestReceiver is already
* set up correctly for CREATE TABLE AS, but we still have to make a few
* other adjustments here.
*
@@ -211,7 +211,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
{
/*
* Need an EState to evaluate parameters; must not delete it till end
- * of query, in case parameters are pass-by-reference. Note that the
+ * of query, in case parameters are pass-by-reference. Note that the
* passed-in "params" could possibly be referenced in the parameter
* expressions.
*/
@@ -237,7 +237,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
/*
* For CREATE TABLE ... AS EXECUTE, we must verify that the prepared
* statement is one that produces tuples. Currently we insist that it be
- * a plain old SELECT. In future we might consider supporting other
+ * a plain old SELECT. In future we might consider supporting other
* things such as INSERT ... RETURNING, but there are a couple of issues
* to be settled first, notably how WITH NO DATA should be handled in such
* a case (do we really want to suppress execution?) and how to pass down
@@ -529,7 +529,7 @@ FetchPreparedStatementResultDesc(PreparedStatement *stmt)
/*
* Given a prepared statement that returns tuples, extract the query
- * targetlist. Returns NIL if the statement doesn't have a determinable
+ * targetlist. Returns NIL if the statement doesn't have a determinable
* targetlist.
*
* Note: this is pretty ugly, but since it's only used in corner cases like
@@ -644,7 +644,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
{
/*
* Need an EState to evaluate parameters; must not delete it till end
- * of query, in case parameters are pass-by-reference. Note that the
+ * of query, in case parameters are pass-by-reference. Note that the
* passed-in "params" could possibly be referenced in the parameter
* expressions.
*/
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 75b4ce56ae..6fb34637f8 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -260,7 +260,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (funcrettype != LANGUAGE_HANDLEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a handler function declared OPAQUE, change it to
* LANGUAGE_HANDLER. (This is probably obsolete and removable?)
*/
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 2599e28cc4..03f5514d39 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -67,7 +67,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
* To create a schema, must have schema-create privilege on the current
* database and must be able to become the target role (this does not
* imply that the target role itself must have create-schema privilege).
- * The latter provision guards against "giveaway" attacks. Note that a
+ * The latter provision guards against "giveaway" attacks. Note that a
* superuser will always have both of these privileges a fortiori.
*/
aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE);
@@ -132,7 +132,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
/*
* Examine the list of commands embedded in the CREATE SCHEMA command, and
* reorganize them into a sequentially executable order with no forward
- * references. Note that the result is still a list of raw parsetrees ---
+ * references. Note that the result is still a list of raw parsetrees ---
* we cannot, in general, run parse analysis on one statement until we
* have actually executed the prior ones.
*/
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 2829b1e304..e6084203a8 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -279,7 +279,7 @@ ResetSequence(Oid seq_relid)
seq->log_cnt = 0;
/*
- * Create a new storage file for the sequence. We want to keep the
+ * Create a new storage file for the sequence. We want to keep the
* sequence's relfrozenxid at 0, since it won't contain any unfrozen XIDs.
* Same with relminmxid, since a sequence will never contain multixacts.
*/
@@ -325,9 +325,9 @@ fill_seq_with_data(Relation rel, HeapTuple tuple)
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/*
- * Since VACUUM does not process sequences, we have to force the tuple
- * to have xmin = FrozenTransactionId now. Otherwise it would become
- * invisible to SELECTs after 2G transactions. It is okay to do this
+ * Since VACUUM does not process sequences, we have to force the tuple to
+ * have xmin = FrozenTransactionId now. Otherwise it would become
+ * invisible to SELECTs after 2G transactions. It is okay to do this
* because if the current transaction aborts, no other xact will ever
* examine the sequence tuple anyway.
*/
@@ -487,7 +487,7 @@ nextval(PG_FUNCTION_ARGS)
* XXX: This is not safe in the presence of concurrent DDL, but acquiring
* a lock here is more expensive than letting nextval_internal do it,
* since the latter maintains a cache that keeps us from hitting the lock
- * manager more than once per transaction. It's not clear whether the
+ * manager more than once per transaction. It's not clear whether the
* performance penalty is material in practice, but for now, we do it this
* way.
*/
@@ -567,7 +567,7 @@ nextval_internal(Oid relid)
}
/*
- * Decide whether we should emit a WAL log record. If so, force up the
+ * Decide whether we should emit a WAL log record. If so, force up the
* fetch count to grab SEQ_LOG_VALS more values than we actually need to
* cache. (These will then be usable without logging.)
*
@@ -674,7 +674,7 @@ nextval_internal(Oid relid)
* We must mark the buffer dirty before doing XLogInsert(); see notes in
* SyncOneBuffer(). However, we don't apply the desired changes just yet.
* This looks like a violation of the buffer update protocol, but it is in
- * fact safe because we hold exclusive lock on the buffer. Any other
+ * fact safe because we hold exclusive lock on the buffer. Any other
* process, including a checkpoint, that tries to examine the buffer
* contents will block until we release the lock, and then will see the
* final state that we install below.
@@ -936,7 +936,7 @@ setval3_oid(PG_FUNCTION_ARGS)
* Open the sequence and acquire AccessShareLock if needed
*
* If we haven't touched the sequence already in this transaction,
- * we need to acquire AccessShareLock. We arrange for the lock to
+ * we need to acquire AccessShareLock. We arrange for the lock to
* be owned by the top transaction, so that we don't need to do it
* more than once per xact.
*/
@@ -1037,7 +1037,7 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
/*
* If the sequence has been transactionally replaced since we last saw it,
- * discard any cached-but-unissued values. We do not touch the currval()
+ * discard any cached-but-unissued values. We do not touch the currval()
* state, however.
*/
if (seqrel->rd_rel->relfilenode != elm->filenode)
@@ -1554,13 +1554,13 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
page = (Page) BufferGetPage(buffer);
/*
- * We always reinit the page. However, since this WAL record type is
- * also used for updating sequences, it's possible that a hot-standby
- * backend is examining the page concurrently; so we mustn't transiently
- * trash the buffer. The solution is to build the correct new page
- * contents in local workspace and then memcpy into the buffer. Then only
- * bytes that are supposed to change will change, even transiently. We
- * must palloc the local page for alignment reasons.
+ * We always reinit the page. However, since this WAL record type is also
+ * used for updating sequences, it's possible that a hot-standby backend
+ * is examining the page concurrently; so we mustn't transiently trash the
+ * buffer. The solution is to build the correct new page contents in
+ * local workspace and then memcpy into the buffer. Then only bytes that
+ * are supposed to change will change, even transiently. We must palloc
+ * the local page for alignment reasons.
*/
localpage = (Page) palloc(BufferGetPageSize(buffer));
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 619aa78d80..341262b6fc 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -276,7 +276,7 @@ static void AlterSeqNamespaces(Relation classRel, Relation rel,
Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved,
LOCKMODE lockmode);
static void ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd,
- bool recurse, bool recursing, LOCKMODE lockmode);
+ bool recurse, bool recursing, LOCKMODE lockmode);
static void ATExecValidateConstraint(Relation rel, char *constrName,
bool recurse, bool recursing, LOCKMODE lockmode);
static int transformColumnNameList(Oid relId, List *colList,
@@ -557,7 +557,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId)
&inheritOids, &old_constraints, &parentOidCount);
/*
- * Create a tuple descriptor from the relation schema. Note that this
+ * Create a tuple descriptor from the relation schema. Note that this
* deals with column names, types, and NOT NULL constraints, but not
* default values or CHECK constraints; we handle those below.
*/
@@ -657,7 +657,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId)
CommandCounterIncrement();
/*
- * Open the new relation and acquire exclusive lock on it. This isn't
+ * Open the new relation and acquire exclusive lock on it. This isn't
* really necessary for locking out other backends (since they can't see
* the new rel anyway until we commit), but it keeps the lock manager from
* complaining about deadlock risks.
@@ -702,7 +702,7 @@ DropErrorMsgNonExistent(RangeVar *rel, char rightkind, bool missing_ok)
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
- errmsg("schema \"%s\" does not exist", rel->schemaname)));
+ errmsg("schema \"%s\" does not exist", rel->schemaname)));
}
else
{
@@ -1022,10 +1022,10 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * In CASCADE mode, suck in all referencing relations as well. This
+ * In CASCADE mode, suck in all referencing relations as well. This
* requires multiple iterations to find indirectly-dependent relations. At
* each phase, we need to exclusive-lock new rels before looking for their
- * dependencies, else we might miss something. Also, we check each rel as
+ * dependencies, else we might miss something. Also, we check each rel as
* soon as we open it, to avoid a faux pas such as holding lock for a long
* time on a rel we have no permissions for.
*/
@@ -1246,7 +1246,7 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
+ * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
*/
static void
truncate_check_rel(Relation rel)
@@ -1674,7 +1674,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
/*
* Now copy the CHECK constraints of this parent, adjusting attnos
- * using the completed newattno[] map. Identically named constraints
+ * using the completed newattno[] map. Identically named constraints
* are merged if possible, else we throw error.
*/
if (constr && constr->num_check > 0)
@@ -1735,7 +1735,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing
+ * commit. That will prevent someone else from deleting or ALTERing
* the parent before the child is committed.
*/
heap_close(relation, NoLock);
@@ -2243,7 +2243,7 @@ renameatt_internal(Oid myrelid,
oldattname)));
/*
- * if the attribute is inherited, forbid the renaming. if this is a
+ * if the attribute is inherited, forbid the renaming. if this is a
* top-level call to renameatt(), then expected_parents will be 0, so the
* effect of this code will be to prohibit the renaming if the attribute
* is inherited at all. if this is a recursive call to renameatt(),
@@ -2547,7 +2547,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal)
newrelname)));
/*
- * Update pg_class tuple with new relname. (Scribbling on reltup is OK
+ * Update pg_class tuple with new relname. (Scribbling on reltup is OK
* because it's a copy...)
*/
namestrcpy(&(relform->relname), newrelname);
@@ -2603,7 +2603,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal)
* We also reject these commands if there are any pending AFTER trigger events
* for the rel. This is certainly necessary for the rewriting variants of
* ALTER TABLE, because they don't preserve tuple TIDs and so the pending
- * events would try to fetch the wrong tuples. It might be overly cautious
+ * events would try to fetch the wrong tuples. It might be overly cautious
* in other cases, but again it seems better to err on the side of paranoia.
*
* REINDEX calls this with "rel" referencing the index to be rebuilt; here
@@ -2659,23 +2659,23 @@ AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode)
* 3. Scan table(s) to check new constraints, and optionally recopy
* the data into new table(s).
* Phase 3 is not performed unless one or more of the subcommands requires
- * it. The intention of this design is to allow multiple independent
+ * it. The intention of this design is to allow multiple independent
* updates of the table schema to be performed with only one pass over the
* data.
*
- * ATPrepCmd performs phase 1. A "work queue" entry is created for
+ * ATPrepCmd performs phase 1. A "work queue" entry is created for
* each table to be affected (there may be multiple affected tables if the
* commands traverse a table inheritance hierarchy). Also we do preliminary
* validation of the subcommands, including parse transformation of those
* expressions that need to be evaluated with respect to the old table
* schema.
*
- * ATRewriteCatalogs performs phase 2 for each affected table. (Note that
+ * ATRewriteCatalogs performs phase 2 for each affected table. (Note that
* phases 2 and 3 normally do no explicit recursion, since phase 1 already
* did it --- although some subcommands have to recurse in phase 2 instead.)
* Certain subcommands need to be performed before others to avoid
* unnecessary conflicts; for example, DROP COLUMN should come before
- * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
+ * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
* lists, one for each logical "pass" of phase 2.
*
* ATRewriteTables performs phase 3 for those tables that need it.
@@ -2782,17 +2782,18 @@ AlterTableGetLockLevel(List *cmds)
* to SELECT */
case AT_SetTableSpace: /* must rewrite heap */
case AT_AlterColumnType: /* must rewrite heap */
- case AT_AddOids: /* must rewrite heap */
+ case AT_AddOids: /* must rewrite heap */
cmd_lockmode = AccessExclusiveLock;
break;
/*
- * These subcommands may require addition of toast tables. If we
- * add a toast table to a table currently being scanned, we
+ * These subcommands may require addition of toast tables. If
+ * we add a toast table to a table currently being scanned, we
* might miss data added to the new toast table by concurrent
* insert transactions.
*/
- case AT_SetStorage: /* may add toast tables, see ATRewriteCatalogs() */
+ case AT_SetStorage:/* may add toast tables, see
+ * ATRewriteCatalogs() */
cmd_lockmode = AccessExclusiveLock;
break;
@@ -2808,12 +2809,12 @@ AlterTableGetLockLevel(List *cmds)
/*
* Subcommands that may be visible to concurrent SELECTs
*/
- case AT_DropColumn: /* change visible to SELECT */
+ case AT_DropColumn: /* change visible to SELECT */
case AT_AddColumnToView: /* CREATE VIEW */
- case AT_DropOids: /* calls AT_DropColumn */
+ case AT_DropOids: /* calls AT_DropColumn */
case AT_EnableAlwaysRule: /* may change SELECT rules */
case AT_EnableReplicaRule: /* may change SELECT rules */
- case AT_EnableRule: /* may change SELECT rules */
+ case AT_EnableRule: /* may change SELECT rules */
case AT_DisableRule: /* may change SELECT rules */
cmd_lockmode = AccessExclusiveLock;
break;
@@ -2834,8 +2835,8 @@ AlterTableGetLockLevel(List *cmds)
break;
/*
- * These subcommands affect write operations only.
- * XXX Theoretically, these could be ShareRowExclusiveLock.
+ * These subcommands affect write operations only. XXX
+ * Theoretically, these could be ShareRowExclusiveLock.
*/
case AT_ColumnDefault:
case AT_ProcessedConstraint: /* becomes AT_AddConstraint */
@@ -2872,9 +2873,9 @@ AlterTableGetLockLevel(List *cmds)
* Cases essentially the same as CREATE INDEX. We
* could reduce the lock strength to ShareLock if
* we can work out how to allow concurrent catalog
- * updates.
- * XXX Might be set down to ShareRowExclusiveLock
- * but requires further analysis.
+ * updates. XXX Might be set down to
+ * ShareRowExclusiveLock but requires further
+ * analysis.
*/
cmd_lockmode = AccessExclusiveLock;
break;
@@ -2883,10 +2884,9 @@ AlterTableGetLockLevel(List *cmds)
/*
* We add triggers to both tables when we add a
* Foreign Key, so the lock level must be at least
- * as strong as CREATE TRIGGER.
- * XXX Might be set down to ShareRowExclusiveLock
- * though trigger info is accessed by
- * pg_get_triggerdef
+ * as strong as CREATE TRIGGER. XXX Might be set
+ * down to ShareRowExclusiveLock though trigger
+ * info is accessed by pg_get_triggerdef
*/
cmd_lockmode = AccessExclusiveLock;
break;
@@ -2902,8 +2902,8 @@ AlterTableGetLockLevel(List *cmds)
* started before us will continue to see the old inheritance
* behaviour, while queries started after we commit will see
* new behaviour. No need to prevent reads or writes to the
- * subtable while we hook it up though.
- * Changing the TupDesc may be a problem, so keep highest lock.
+ * subtable while we hook it up though. Changing the TupDesc
+ * may be a problem, so keep highest lock.
*/
case AT_AddInherit:
case AT_DropInherit:
@@ -2912,9 +2912,9 @@ AlterTableGetLockLevel(List *cmds)
/*
* These subcommands affect implicit row type conversion. They
- * have affects similar to CREATE/DROP CAST on queries.
- * don't provide for invalidating parse trees as a result of
- * such changes, so we keep these at AccessExclusiveLock.
+ * have affects similar to CREATE/DROP CAST on queries. don't
+ * provide for invalidating parse trees as a result of such
+ * changes, so we keep these at AccessExclusiveLock.
*/
case AT_AddOf:
case AT_DropOf:
@@ -2940,29 +2940,32 @@ AlterTableGetLockLevel(List *cmds)
* updates.
*/
case AT_SetStatistics: /* Uses MVCC in getTableAttrs() */
- case AT_ClusterOn: /* Uses MVCC in getIndexes() */
+ case AT_ClusterOn: /* Uses MVCC in getIndexes() */
case AT_DropCluster: /* Uses MVCC in getIndexes() */
- case AT_SetOptions: /* Uses MVCC in getTableAttrs() */
+ case AT_SetOptions: /* Uses MVCC in getTableAttrs() */
case AT_ResetOptions: /* Uses MVCC in getTableAttrs() */
cmd_lockmode = ShareUpdateExclusiveLock;
break;
- case AT_ValidateConstraint: /* Uses MVCC in getConstraints() */
+ case AT_ValidateConstraint: /* Uses MVCC in
+ * getConstraints() */
cmd_lockmode = ShareUpdateExclusiveLock;
break;
/*
* Rel options are more complex than first appears. Options
* are set here for tables, views and indexes; for historical
- * reasons these can all be used with ALTER TABLE, so we
- * can't decide between them using the basic grammar.
+ * reasons these can all be used with ALTER TABLE, so we can't
+ * decide between them using the basic grammar.
*
* XXX Look in detail at each option to determine lock level,
- * e.g.
- * cmd_lockmode = GetRelOptionsLockLevel((List *) cmd->def);
+ * e.g. cmd_lockmode = GetRelOptionsLockLevel((List *)
+ * cmd->def);
*/
- case AT_SetRelOptions: /* Uses MVCC in getIndexes() and getTables() */
- case AT_ResetRelOptions: /* Uses MVCC in getIndexes() and getTables() */
+ case AT_SetRelOptions: /* Uses MVCC in getIndexes() and
+ * getTables() */
+ case AT_ResetRelOptions: /* Uses MVCC in getIndexes() and
+ * getTables() */
cmd_lockmode = AccessExclusiveLock;
break;
@@ -3209,7 +3212,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
cmd->subtype = AT_ValidateConstraintRecurse;
pass = AT_PASS_MISC;
break;
- case AT_ReplicaIdentity: /* REPLICA IDENTITY ... */
+ case AT_ReplicaIdentity: /* REPLICA IDENTITY ... */
ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW);
pass = AT_PASS_MISC;
/* This command never recurses */
@@ -3258,7 +3261,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
/*
* ATRewriteCatalogs
*
- * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
+ * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
* dispatched in a "safe" execution order (designed to avoid unnecessary
* conflicts).
*/
@@ -3604,8 +3607,8 @@ ATRewriteTables(List **wqueue, LOCKMODE lockmode)
if (RelationIsUsedAsCatalogTable(OldHeap))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot rewrite table \"%s\" used as a catalog table",
- RelationGetRelationName(OldHeap))));
+ errmsg("cannot rewrite table \"%s\" used as a catalog table",
+ RelationGetRelationName(OldHeap))));
/*
* Don't allow rewrite on temp tables of other backends ... their
@@ -3856,7 +3859,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
{
/*
* All predicate locks on the tuples or pages are about to be made
- * invalid, because we move tuples around. Promote them to
+ * invalid, because we move tuples around. Promote them to
* relation locks.
*/
TransferPredicateLocksToHeapRelation(oldrel);
@@ -3946,8 +3949,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
HeapTupleSetOid(tuple, tupOid);
/*
- * Constraints might reference the tableoid column, so initialize
- * t_tableOid before evaluating them.
+ * Constraints might reference the tableoid column, so
+ * initialize t_tableOid before evaluating them.
*/
tuple->t_tableOid = RelationGetRelid(oldrel);
}
@@ -4404,7 +4407,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be
*
* Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF. If it
* isn't suitable, throw an error. Currently, we require that the type
- * originated with CREATE TYPE AS. We could support any row type, but doing so
+ * originated with CREATE TYPE AS. We could support any row type, but doing so
* would require handling a number of extra corner cases in the DDL commands.
*/
void
@@ -4423,7 +4426,7 @@ check_of_type(HeapTuple typetuple)
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing
+ * commit. That will prevent someone else from deleting or ALTERing
* the type before the typed table creation/conversion commits.
*/
relation_close(typeRelation, NoLock);
@@ -4882,7 +4885,7 @@ add_column_collation_dependency(Oid relid, int32 attnum, Oid collid)
/*
* ALTER TABLE SET WITH OIDS
*
- * Basically this is an ADD COLUMN for the special OID column. We have
+ * Basically this is an ADD COLUMN for the special OID column. We have
* to cons up a ColumnDef node because the ADD COLUMN code needs one.
*/
static void
@@ -5352,7 +5355,7 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue, LOCKMODE loc
*
* DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism,
* because we have to decide at runtime whether to recurse or not depending
- * on whether attinhcount goes to zero or not. (We can't check this in a
+ * on whether attinhcount goes to zero or not. (We can't check this in a
* static pre-pass because it won't handle multiple inheritance situations
* correctly.)
*/
@@ -5600,7 +5603,7 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
/*
* If TryReuseIndex() stashed a relfilenode for us, we used it for the new
- * index instead of building from scratch. The DROP of the old edition of
+ * index instead of building from scratch. The DROP of the old edition of
* this index will have scheduled the storage for deletion at commit, so
* cancel that pending deletion.
*/
@@ -5642,7 +5645,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
elog(ERROR, "index \"%s\" is not unique", indexName);
/*
- * Determine name to assign to constraint. We require a constraint to
+ * Determine name to assign to constraint. We require a constraint to
* have the same name as the underlying index; therefore, use the index's
* existing name as the default constraint name, and if the user
* explicitly gives some other name for the constraint, rename the index
@@ -5851,7 +5854,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
/*
* Check if ONLY was specified with ALTER TABLE. If so, allow the
- * contraint creation only if there are no children currently. Error out
+ * contraint creation only if there are no children currently. Error out
* otherwise.
*/
if (!recurse && children != NIL)
@@ -5883,7 +5886,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
/*
* Add a foreign-key constraint to a single table
*
- * Subroutine for ATExecAddConstraint. Must already hold exclusive
+ * Subroutine for ATExecAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity checks for it.
* We do permissions checks here, however.
*/
@@ -6022,7 +6025,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
*
* Note that we have to be careful about the difference between the actual
* PK column type and the opclass' declared input type, which might be
- * only binary-compatible with it. The declared opcintype is the right
+ * only binary-compatible with it. The declared opcintype is the right
* thing to probe pg_amop with.
*/
if (numfks != numpks)
@@ -6179,7 +6182,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
/*
* Upon a change to the cast from the FK column to its pfeqop
- * operand, revalidate the constraint. For this evaluation, a
+ * operand, revalidate the constraint. For this evaluation, a
* binary coercion cast is equivalent to no cast at all. While
* type implementors should design implicit casts with an eye
* toward consistency of operations like equality, we cannot
@@ -6197,7 +6200,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* Necessarily, the primary key column must then be of the domain
* type. Since the constraint was previously valid, all values on
* the foreign side necessarily exist on the primary side and in
- * turn conform to the domain. Consequently, we need not treat
+ * turn conform to the domain. Consequently, we need not treat
* domains specially here.
*
* Since we require that all collations share the same notion of
@@ -6207,7 +6210,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* We need not directly consider the PK type. It's necessarily
* binary coercible to the opcintype of the unique index column,
* and ri_triggers.c will only deal with PK datums in terms of
- * that opcintype. Changing the opcintype also changes pfeqop.
+ * that opcintype. Changing the opcintype also changes pfeqop.
*/
old_check_ok = (new_pathtype == old_pathtype &&
new_castfunc == old_castfunc &&
@@ -6300,14 +6303,14 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
*/
static void
ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd,
- bool recurse, bool recursing, LOCKMODE lockmode)
+ bool recurse, bool recursing, LOCKMODE lockmode)
{
Relation conrel;
SysScanDesc scan;
ScanKeyData key;
HeapTuple contuple;
Form_pg_constraint currcon = NULL;
- Constraint *cmdcon = NULL;
+ Constraint *cmdcon = NULL;
bool found = false;
Assert(IsA(cmd->def, Constraint));
@@ -6374,8 +6377,8 @@ ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd,
heap_freetuple(copyTuple);
/*
- * Now we need to update the multiple entries in pg_trigger
- * that implement the constraint.
+ * Now we need to update the multiple entries in pg_trigger that
+ * implement the constraint.
*/
tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
@@ -6397,7 +6400,7 @@ ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd,
CatalogUpdateIndexes(tgrel, copyTuple);
InvokeObjectPostAlterHook(TriggerRelationId,
- HeapTupleGetOid(tgtuple), 0);
+ HeapTupleGetOid(tgtuple), 0);
heap_freetuple(copyTuple);
}
@@ -6619,10 +6622,10 @@ transformColumnNameList(Oid relId, List *colList,
* transformFkeyGetPrimaryKey -
*
* Look up the names, attnums, and types of the primary key attributes
- * for the pkrel. Also return the index OID and index opclasses of the
+ * for the pkrel. Also return the index OID and index opclasses of the
* index supporting the primary key.
*
- * All parameters except pkrel are output parameters. Also, the function
+ * All parameters except pkrel are output parameters. Also, the function
* return value is the number of attributes in the primary key.
*
* Used when the column list in the REFERENCES specification is omitted.
@@ -6662,7 +6665,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
if (indexStruct->indisprimary && IndexIsValid(indexStruct))
{
/*
- * Refuse to use a deferrable primary key. This is per SQL spec,
+ * Refuse to use a deferrable primary key. This is per SQL spec,
* and there would be a lot of interesting semantic problems if we
* tried to allow it.
*/
@@ -7592,7 +7595,7 @@ ATPrepAlterColumnType(List **wqueue,
tab->relkind == RELKIND_FOREIGN_TABLE)
{
/*
- * For composite types, do this check now. Tables will check it later
+ * For composite types, do this check now. Tables will check it later
* when the table is being rewritten.
*/
find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL);
@@ -7601,7 +7604,7 @@ ATPrepAlterColumnType(List **wqueue,
ReleaseSysCache(tuple);
/*
- * The recursion case is handled by ATSimpleRecursion. However, if we are
+ * The recursion case is handled by ATSimpleRecursion. However, if we are
* told not to recurse, there had better not be any child tables; else the
* alter would put them out of step.
*/
@@ -7710,7 +7713,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
*
* We remove any implicit coercion steps at the top level of the old
* default expression; this has been agreed to satisfy the principle of
- * least surprise. (The conversion to the new column type should act like
+ * least surprise. (The conversion to the new column type should act like
* it started from what the user sees as the stored expression, and the
* implicit coercions aren't going to be shown.)
*/
@@ -7739,7 +7742,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* and record enough information to let us recreate the objects.
*
* The actual recreation does not happen here, but only after we have
- * performed all the individual ALTER TYPE operations. We have to save
+ * performed all the individual ALTER TYPE operations. We have to save
* the info before executing ALTER TYPE, though, else the deparser will
* get confused.
*
@@ -7868,7 +7871,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* used in the trigger's WHEN condition. The first case would
* not require any extra work, but the second case would
* require updating the WHEN expression, which will take a
- * significant amount of new code. Since we can't easily tell
+ * significant amount of new code. Since we can't easily tell
* which case applies, we punt for both. FIXME someday.
*/
ereport(ERROR,
@@ -8144,24 +8147,24 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
/*
* Re-parse the index and constraint definitions, and attach them to the
- * appropriate work queue entries. We do this before dropping because in
+ * appropriate work queue entries. We do this before dropping because in
* the case of a FOREIGN KEY constraint, we might not yet have exclusive
* lock on the table the constraint is attached to, and we need to get
* that before dropping. It's safe because the parser won't actually look
* at the catalogs to detect the existing entry.
*
- * We can't rely on the output of deparsing to tell us which relation
- * to operate on, because concurrent activity might have made the name
+ * We can't rely on the output of deparsing to tell us which relation to
+ * operate on, because concurrent activity might have made the name
* resolve differently. Instead, we've got to use the OID of the
- * constraint or index we're processing to figure out which relation
- * to operate on.
+ * constraint or index we're processing to figure out which relation to
+ * operate on.
*/
forboth(oid_item, tab->changedConstraintOids,
def_item, tab->changedConstraintDefs)
{
- Oid oldId = lfirst_oid(oid_item);
- Oid relid;
- Oid confrelid;
+ Oid oldId = lfirst_oid(oid_item);
+ Oid relid;
+ Oid confrelid;
get_constraint_relation_oids(oldId, &relid, &confrelid);
ATPostAlterTypeParse(oldId, relid, confrelid,
@@ -8171,8 +8174,8 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
forboth(oid_item, tab->changedIndexOids,
def_item, tab->changedIndexDefs)
{
- Oid oldId = lfirst_oid(oid_item);
- Oid relid;
+ Oid oldId = lfirst_oid(oid_item);
+ Oid relid;
relid = IndexGetRelation(oldId, false);
ATPostAlterTypeParse(oldId, relid, InvalidOid,
@@ -8238,9 +8241,9 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd,
cmd));
else if (IsA(stmt, AlterTableStmt))
querytree_list = list_concat(querytree_list,
- transformAlterTableStmt(oldRelId,
+ transformAlterTableStmt(oldRelId,
(AlterTableStmt *) stmt,
- cmd));
+ cmd));
else
querytree_list = lappend(querytree_list, stmt);
}
@@ -8925,13 +8928,13 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation,
if (check_option)
{
const char *view_updatable_error =
- view_query_is_auto_updatable(view_query, true);
+ view_query_is_auto_updatable(view_query, true);
if (view_updatable_error)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("WITH CHECK OPTION is supported only on auto-updatable views"),
- errhint("%s", view_updatable_error)));
+ errmsg("WITH CHECK OPTION is supported only on auto-updatable views"),
+ errhint("%s", view_updatable_error)));
}
}
@@ -9098,7 +9101,8 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode)
/* Fetch the list of indexes on toast relation if necessary */
if (OidIsValid(reltoastrelid))
{
- Relation toastRel = relation_open(reltoastrelid, lockmode);
+ Relation toastRel = relation_open(reltoastrelid, lockmode);
+
reltoastidxids = RelationGetIndexList(toastRel);
relation_close(toastRel, lockmode);
}
@@ -9120,8 +9124,8 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode)
FlushRelationBuffers(rel);
/*
- * Relfilenodes are not unique in databases across tablespaces, so we
- * need to allocate a new one in the new tablespace.
+ * Relfilenodes are not unique in databases across tablespaces, so we need
+ * to allocate a new one in the new tablespace.
*/
newrelfilenode = GetNewRelFileNode(newTableSpace, NULL,
rel->rd_rel->relpersistence);
@@ -9236,9 +9240,9 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
forkNum))));
/*
- * WAL-log the copied page. Unfortunately we don't know what kind of
- * a page this is, so we have to log the full page including any
- * unused space.
+ * WAL-log the copied page. Unfortunately we don't know what kind of a
+ * page this is, so we have to log the full page including any unused
+ * space.
*/
if (use_wal)
log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page, false);
@@ -9246,7 +9250,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
PageSetChecksumInplace(page, blkno);
/*
- * Now write the page. We say isTemp = true even if it's not a temp
+ * Now write the page. We say isTemp = true even if it's not a temp
* rel, because there's no need for smgr to schedule an fsync for this
* write; we'll do it ourselves below.
*/
@@ -9256,7 +9260,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
pfree(buf);
/*
- * If the rel is WAL-logged, must fsync before commit. We use heap_sync
+ * If the rel is WAL-logged, must fsync before commit. We use heap_sync
* to ensure that the toast table gets fsync'd too. (For a temp or
* unlogged rel we don't care since the data will be gone after a crash
* anyway.)
@@ -9431,7 +9435,7 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode)
MergeConstraintsIntoExisting(child_rel, parent_rel);
/*
- * OK, it looks valid. Make the catalog entries that show inheritance.
+ * OK, it looks valid. Make the catalog entries that show inheritance.
*/
StoreCatalogInheritance1(RelationGetRelid(child_rel),
RelationGetRelid(parent_rel),
@@ -9907,7 +9911,7 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
* Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE
* INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or
* heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will
- * be TypeRelationId). There's no convenient way to do this, so go trawling
+ * be TypeRelationId). There's no convenient way to do this, so go trawling
* through pg_depend.
*/
static void
@@ -10093,7 +10097,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
/*
* ALTER TABLE NOT OF
*
- * Detach a typed table from its originating type. Just clear reloftype and
+ * Detach a typed table from its originating type. Just clear reloftype and
* remove the dependency.
*/
static void
@@ -10155,7 +10159,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid,
*/
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
pg_class_tuple = SearchSysCacheCopy1(RELOID,
- ObjectIdGetDatum(RelationGetRelid(rel)));
+ ObjectIdGetDatum(RelationGetRelid(rel)));
if (!HeapTupleIsValid(pg_class_tuple))
elog(ERROR, "cache lookup failed for relation \"%s\"",
RelationGetRelationName(rel));
@@ -10191,8 +10195,8 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid,
}
/*
- * Clear the indisreplident flag from any index that had it previously, and
- * set it for any index that should have it now.
+ * Clear the indisreplident flag from any index that had it previously,
+ * and set it for any index that should have it now.
*/
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
foreach(index, RelationGetIndexList(rel))
@@ -10201,7 +10205,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid,
bool dirty = false;
pg_index_tuple = SearchSysCacheCopy1(INDEXRELID,
- ObjectIdGetDatum(thisIndexOid));
+ ObjectIdGetDatum(thisIndexOid));
if (!HeapTupleIsValid(pg_index_tuple))
elog(ERROR, "cache lookup failed for index %u", thisIndexOid);
pg_index_form = (Form_pg_index) GETSTRUCT(pg_index_tuple);
@@ -10261,7 +10265,7 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode
}
else if (stmt->identity_type == REPLICA_IDENTITY_INDEX)
{
- /* fallthrough */;
+ /* fallthrough */ ;
}
else
elog(ERROR, "unexpected identity type %u", stmt->identity_type);
@@ -10289,20 +10293,20 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode
if (!indexRel->rd_am->amcanunique || !indexRel->rd_index->indisunique)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot use non-unique index \"%s\" as replica identity",
- RelationGetRelationName(indexRel))));
+ errmsg("cannot use non-unique index \"%s\" as replica identity",
+ RelationGetRelationName(indexRel))));
/* Deferred indexes are not guaranteed to be always unique. */
if (!indexRel->rd_index->indimmediate)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use non-immediate index \"%s\" as replica identity",
- RelationGetRelationName(indexRel))));
+ errmsg("cannot use non-immediate index \"%s\" as replica identity",
+ RelationGetRelationName(indexRel))));
/* Expression indexes aren't supported. */
if (RelationGetIndexExpressions(indexRel) != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use expression index \"%s\" as replica identity",
- RelationGetRelationName(indexRel))));
+ errmsg("cannot use expression index \"%s\" as replica identity",
+ RelationGetRelationName(indexRel))));
/* Predicate indexes aren't supported. */
if (RelationGetIndexPredicate(indexRel) != NIL)
ereport(ERROR,
@@ -10319,7 +10323,7 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode
/* Check index for nullable columns. */
for (key = 0; key < indexRel->rd_index->indnatts; key++)
{
- int16 attno = indexRel->rd_index->indkey.values[key];
+ int16 attno = indexRel->rd_index->indkey.values[key];
Form_pg_attribute attr;
/* Of the system columns, only oid is indexable. */
@@ -10878,7 +10882,7 @@ AtEOXact_on_commit_actions(bool isCommit)
* Post-subcommit or post-subabort cleanup for ON COMMIT management.
*
* During subabort, we can immediately remove entries created during this
- * subtransaction. During subcommit, just relabel entries marked during
+ * subtransaction. During subcommit, just relabel entries marked during
* this subtransaction as being the parent's responsibility.
*/
void
@@ -10922,7 +10926,7 @@ AtEOSubXact_on_commit_actions(bool isCommit, SubTransactionId mySubid,
* This is intended as a callback for RangeVarGetRelidExtended(). It allows
* the relation to be locked only if (1) it's a plain table, materialized
* view, or TOAST table and (2) the current user is the owner (or the
- * superuser). This meets the permission-checking needs of CLUSTER, REINDEX
+ * superuser). This meets the permission-checking needs of CLUSTER, REINDEX
* TABLE, and REFRESH MATERIALIZED VIEW; we expose it here so that it can be
* used by all.
*/
@@ -10939,7 +10943,7 @@ RangeVarCallbackOwnsTable(const RangeVar *relation,
/*
* If the relation does exist, check whether it's an index. But note that
* the relation might have been dropped between the time we did the name
- * lookup and now. In that case, there's nothing to do.
+ * lookup and now. In that case, there's nothing to do.
*/
relkind = get_rel_relkind(relId);
if (!relkind)
@@ -11105,8 +11109,8 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
relkind != RELKIND_FOREIGN_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is not a table, view, materialized view, sequence, or foreign table",
- rv->relname)));
+ errmsg("\"%s\" is not a table, view, materialized view, sequence, or foreign table",
+ rv->relname)));
ReleaseSysCache(tuple);
}
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 357e6e1974..031be37a1e 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -31,7 +31,7 @@
* To allow CREATE DATABASE to give a new database a default tablespace
* that's different from the template database's default, we make the
* provision that a zero in pg_class.reltablespace means the database's
- * default tablespace. Without this, CREATE DATABASE would have to go in
+ * default tablespace. Without this, CREATE DATABASE would have to go in
* and munge the system catalogs of the new database.
*
*
@@ -281,7 +281,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
* reference the whole path here, but mkdir() uses the first two parts.
*/
if (strlen(location) + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 +
- OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1 + OIDCHARS > MAXPGPATH)
+ OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1 + OIDCHARS > MAXPGPATH)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("tablespace location \"%s\" is too long",
@@ -488,7 +488,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
* Not all files deleted? However, there can be lingering empty files
* in the directories, left behind by for example DROP TABLE, that
* have been scheduled for deletion at next checkpoint (see comments
- * in mdunlink() for details). We could just delete them immediately,
+ * in mdunlink() for details). We could just delete them immediately,
* but we can't tell them apart from important data files that we
* mustn't delete. So instead, we force a checkpoint which will clean
* out any lingering files, and try again.
@@ -562,10 +562,10 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
linkloc = psprintf("pg_tblspc/%u", tablespaceoid);
location_with_version_dir = psprintf("%s/%s", location,
- TABLESPACE_VERSION_DIRECTORY);
+ TABLESPACE_VERSION_DIRECTORY);
/*
- * Attempt to coerce target directory to safe permissions. If this fails,
+ * Attempt to coerce target directory to safe permissions. If this fails,
* it doesn't exist or has the wrong owner.
*/
if (chmod(location, S_IRWXU) != 0)
@@ -666,7 +666,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
* Attempt to remove filesystem infrastructure for the tablespace.
*
* 'redo' indicates we are redoing a drop from XLOG; in that case we should
- * not throw an ERROR for problems, just LOG them. The worst consequence of
+ * not throw an ERROR for problems, just LOG them. The worst consequence of
* not removing files here would be failure to release some disk space, which
* does not justify throwing an error that would require manual intervention
* to get the database running again.
@@ -684,7 +684,7 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo)
struct stat st;
linkloc_with_version_dir = psprintf("pg_tblspc/%u/%s", tablespaceoid,
- TABLESPACE_VERSION_DIRECTORY);
+ TABLESPACE_VERSION_DIRECTORY);
/*
* Check if the tablespace still contains any files. We try to rmdir each
@@ -701,10 +701,10 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo)
*
* If redo is true then ENOENT is a likely outcome here, and we allow it
* to pass without comment. In normal operation we still allow it, but
- * with a warning. This is because even though ProcessUtility disallows
+ * with a warning. This is because even though ProcessUtility disallows
* DROP TABLESPACE in a transaction block, it's possible that a previous
* DROP failed and rolled back after removing the tablespace directories
- * and/or symlink. We want to allow a new DROP attempt to succeed at
+ * and/or symlink. We want to allow a new DROP attempt to succeed at
* removing the catalog entries (and symlink if still present), so we
* should not give a hard error here.
*/
@@ -1119,8 +1119,8 @@ AlterTableSpaceMove(AlterTableSpaceMoveStmt *stmt)
/*
* Handle permissions-checking here since we are locking the tables
- * and also to avoid doing a bunch of work only to fail part-way.
- * Note that permissions will also be checked by AlterTableInternal().
+ * and also to avoid doing a bunch of work only to fail part-way. Note
+ * that permissions will also be checked by AlterTableInternal().
*
* Caller must be considered an owner on the table to move it.
*/
@@ -1179,7 +1179,7 @@ check_default_tablespace(char **newval, void **extra, GucSource source)
{
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the name. Must accept the value on faith.
+ * cannot verify the name. Must accept the value on faith.
*/
if (IsTransactionState())
{
@@ -1290,7 +1290,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source)
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the individual names. Must accept the list on faith.
+ * cannot verify the individual names. Must accept the list on faith.
* Fortunately, there's then also no need to pass the data to fd.c.
*/
if (IsTransactionState())
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 5f1ccf02c2..9bf0098b6c 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -107,7 +107,7 @@ static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
*
* constraintOid, if nonzero, says that this trigger is being created
* internally to implement that constraint. A suitable pg_depend entry will
- * be made to link the trigger to that constraint. constraintOid is zero when
+ * be made to link the trigger to that constraint. constraintOid is zero when
* executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
* TRIGGER, we build a pg_constraint entry internally.)
*
@@ -418,7 +418,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
if (funcrettype != TRIGGEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we see a
+ * We allow OPAQUE just so we can load old dump files. When we see a
* trigger function declared OPAQUE, change it to TRIGGER.
*/
if (funcrettype == OPAQUEOID)
@@ -440,7 +440,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* references one of the built-in RI_FKey trigger functions, assume it is
* from a dump of a pre-7.3 foreign key constraint, and take steps to
* convert this legacy representation into a regular foreign key
- * constraint. Ugly, but necessary for loading old dump files.
+ * constraint. Ugly, but necessary for loading old dump files.
*/
if (stmt->isconstraint && !isInternal &&
list_length(stmt->args) >= 6 &&
@@ -503,7 +503,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
/*
* If trigger is internally generated, modify the provided trigger name to
- * ensure uniqueness by appending the trigger OID. (Callers will usually
+ * ensure uniqueness by appending the trigger OID. (Callers will usually
* supply a simple constant trigger name in these cases.)
*/
if (isInternal)
@@ -627,7 +627,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
int16 attnum;
int j;
- /* Lookup column name. System columns are not allowed */
+ /* Lookup column name. System columns are not allowed */
attnum = attnameAttNum(rel, name, false);
if (attnum == InvalidAttrNumber)
ereport(ERROR,
@@ -732,7 +732,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
else
{
/*
- * User CREATE TRIGGER, so place dependencies. We make trigger be
+ * User CREATE TRIGGER, so place dependencies. We make trigger be
* auto-dropped if its relation is dropped or if the FK relation is
* dropped. (Auto drop is compatible with our pre-7.3 behavior.)
*/
@@ -801,7 +801,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* full-fledged foreign key constraints.
*
* The conversion is complex because a pre-7.3 foreign key involved three
- * separate triggers, which were reported separately in dumps. While the
+ * separate triggers, which were reported separately in dumps. While the
* single trigger on the referencing table adds no new information, we need
* to know the trigger functions of both of the triggers on the referenced
* table to build the constraint declaration. Also, due to lack of proper
@@ -2038,7 +2038,7 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -2113,7 +2113,7 @@ ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -2503,7 +2503,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -2599,7 +2599,7 @@ ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -3031,7 +3031,7 @@ typedef SetConstraintStateData *SetConstraintState;
* Although this is mutable state, we can keep it in AfterTriggerSharedData
* because all instances of the same type of event in a given event list will
* be fired at the same time, if they were queued between the same firing
- * cycles. So we need only ensure that ats_firing_id is zero when attaching
+ * cycles. So we need only ensure that ats_firing_id is zero when attaching
* a new event to an existing AfterTriggerSharedData record.
*/
typedef uint32 TriggerFlags;
@@ -3077,7 +3077,7 @@ typedef struct AfterTriggerEventDataOneCtid
typedef struct AfterTriggerEventDataZeroCtids
{
TriggerFlags ate_flags; /* status bits and offset to shared data */
-} AfterTriggerEventDataZeroCtids;
+} AfterTriggerEventDataZeroCtids;
#define SizeofTriggerEvent(evt) \
(((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
@@ -3092,7 +3092,7 @@ typedef struct AfterTriggerEventDataZeroCtids
/*
* To avoid palloc overhead, we keep trigger events in arrays in successively-
* larger chunks (a slightly more sophisticated version of an expansible
- * array). The space between CHUNK_DATA_START and freeptr is occupied by
+ * array). The space between CHUNK_DATA_START and freeptr is occupied by
* AfterTriggerEventData records; the space between endfree and endptr is
* occupied by AfterTriggerSharedData records.
*/
@@ -3134,7 +3134,7 @@ typedef struct AfterTriggerEventList
*
* firing_counter is incremented for each call of afterTriggerInvokeEvents.
* We mark firable events with the current firing cycle's ID so that we can
- * tell which ones to work on. This ensures sane behavior if a trigger
+ * tell which ones to work on. This ensures sane behavior if a trigger
* function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
* only fire those events that weren't already scheduled for firing.
*
@@ -3142,7 +3142,7 @@ typedef struct AfterTriggerEventList
* This is saved and restored across failed subtransactions.
*
* events is the current list of deferred events. This is global across
- * all subtransactions of the current transaction. In a subtransaction
+ * all subtransactions of the current transaction. In a subtransaction
* abort, we know that the events added by the subtransaction are at the
* end of the list, so it is relatively easy to discard them. The event
* list chunks themselves are stored in event_cxt.
@@ -3174,12 +3174,12 @@ typedef struct AfterTriggerEventList
* which we similarly use to clean up at subtransaction abort.
*
* firing_stack is a stack of copies of subtransaction-start-time
- * firing_counter. We use this to recognize which deferred triggers were
+ * firing_counter. We use this to recognize which deferred triggers were
* fired (or marked for firing) within an aborted subtransaction.
*
* We use GetCurrentTransactionNestLevel() to determine the correct array
* index in these stacks. maxtransdepth is the number of allocated entries in
- * each stack. (By not keeping our own stack pointer, we can avoid trouble
+ * each stack. (By not keeping our own stack pointer, we can avoid trouble
* in cases where errors during subxact abort cause multiple invocations
* of AfterTriggerEndSubXact() at the same nesting depth.)
*/
@@ -3490,7 +3490,7 @@ afterTriggerRestoreEventList(AfterTriggerEventList *events,
* single trigger function.
*
* Frequently, this will be fired many times in a row for triggers of
- * a single relation. Therefore, we cache the open relation and provide
+ * a single relation. Therefore, we cache the open relation and provide
* fmgr lookup cache space at the caller level. (For triggers fired at
* the end of a query, we can even piggyback on the executor's state.)
*
@@ -3566,6 +3566,7 @@ AfterTriggerExecute(AfterTriggerEvent event,
}
/* fall through */
case AFTER_TRIGGER_FDW_REUSE:
+
/*
* Using ExecMaterializeSlot() rather than ExecFetchSlotTuple()
* ensures that tg_trigtuple does not reference tuplestore memory.
@@ -4093,7 +4094,7 @@ AfterTriggerFireDeferred(void)
}
/*
- * Run all the remaining triggers. Loop until they are all gone, in case
+ * Run all the remaining triggers. Loop until they are all gone, in case
* some trigger queues more for us to do.
*/
while (afterTriggerMarkEvents(events, NULL, false))
@@ -4156,7 +4157,7 @@ AfterTriggerBeginSubXact(void)
int my_level = GetCurrentTransactionNestLevel();
/*
- * Ignore call if the transaction is in aborted state. (Probably
+ * Ignore call if the transaction is in aborted state. (Probably
* shouldn't happen?)
*/
if (afterTriggers == NULL)
@@ -4235,7 +4236,7 @@ AfterTriggerEndSubXact(bool isCommit)
CommandId subxact_firing_id;
/*
- * Ignore call if the transaction is in aborted state. (Probably
+ * Ignore call if the transaction is in aborted state. (Probably
* unneeded)
*/
if (afterTriggers == NULL)
@@ -4378,7 +4379,7 @@ SetConstraintStateCopy(SetConstraintState origstate)
}
/*
- * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
+ * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
* pointer to the state object (it will change if we have to repalloc).
*/
static SetConstraintState
@@ -4463,7 +4464,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
* First, identify all the named constraints and make a list of their
* OIDs. Since, unlike the SQL spec, we allow multiple constraints of
* the same name within a schema, the specifications are not
- * necessarily unique. Our strategy is to target all matching
+ * necessarily unique. Our strategy is to target all matching
* constraints within the first search-path schema that has any
* matches, but disregard matches in schemas beyond the first match.
* (This is a bit odd but it's the historical behavior.)
@@ -4489,7 +4490,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
/*
* If we're given the schema name with the constraint, look only
- * in that schema. If given a bare constraint name, use the
+ * in that schema. If given a bare constraint name, use the
* search path to find the first matching constraint.
*/
if (constraint->schemaname)
@@ -4593,7 +4594,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
/*
* Silently skip triggers that are marked as non-deferrable in
- * pg_trigger. This is not an error condition, since a
+ * pg_trigger. This is not an error condition, since a
* deferrable RI constraint may have some non-deferrable
* actions.
*/
@@ -4664,7 +4665,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
/*
* Make sure a snapshot has been established in case trigger
- * functions need one. Note that we avoid setting a snapshot if
+ * functions need one. Note that we avoid setting a snapshot if
* we don't find at least one trigger that has to be fired now.
* This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
* ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
@@ -4724,7 +4725,7 @@ AfterTriggerPendingOnRel(Oid relid)
AfterTriggerShared evtshared = GetTriggerSharedData(event);
/*
- * We can ignore completed events. (Even if a DONE flag is rolled
+ * We can ignore completed events. (Even if a DONE flag is rolled
* back by subxact abort, it's OK because the effects of the TRUNCATE
* or whatever must get rolled back too.)
*/
@@ -4765,7 +4766,7 @@ AfterTriggerPendingOnRel(Oid relid)
* be fired for an event.
*
* NOTE: this is called whenever there are any triggers associated with
- * the event (even if they are disabled). This function decides which
+ * the event (even if they are disabled). This function decides which
* triggers actually need to be queued.
* ----------
*/
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index c1ee69b323..f377c19371 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -514,8 +514,8 @@ DefineType(List *names, List *parameters)
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
/*
- * Check permissions on functions. We choose to require the creator/owner
- * of a type to also own the underlying functions. Since creating a type
+ * Check permissions on functions. We choose to require the creator/owner
+ * of a type to also own the underlying functions. Since creating a type
* is tantamount to granting public execute access on the functions, the
* minimum sane check would be for execute-with-grant-option. But we
* don't have a way to make the type go away if the grant option is
@@ -552,7 +552,7 @@ DefineType(List *names, List *parameters)
* now have TypeCreate do all the real work.
*
* Note: the pg_type.oid is stored in user tables as array elements (base
- * types) in ArrayType and in composite types in DatumTupleFields. This
+ * types) in ArrayType and in composite types in DatumTupleFields. This
* oid must be preserved by binary upgrades.
*/
typoid =
@@ -725,7 +725,7 @@ DefineDomain(CreateDomainStmt *stmt)
get_namespace_name(domainNamespace));
/*
- * Check for collision with an existing type name. If there is one and
+ * Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid2(TYPENAMENSP,
@@ -1076,7 +1076,7 @@ DefineEnum(CreateEnumStmt *stmt)
get_namespace_name(enumNamespace));
/*
- * Check for collision with an existing type name. If there is one and
+ * Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid2(TYPENAMENSP,
@@ -1193,7 +1193,7 @@ AlterEnum(AlterEnumStmt *stmt, bool isTopLevel)
/*
* Ordinarily we disallow adding values within transaction blocks, because
* we can't cope with enum OID values getting into indexes and then having
- * their defining pg_enum entries go away. However, it's okay if the enum
+ * their defining pg_enum entries go away. However, it's okay if the enum
* type was created in the current transaction, since then there can be no
* such indexes that wouldn't themselves go away on rollback. (We support
* this case because pg_dump --binary-upgrade needs it.) We test this by
@@ -1515,7 +1515,7 @@ DefineRange(CreateRangeStmt *stmt)
* impossible to define a polymorphic constructor; we have to generate new
* constructor functions explicitly for each range type.
*
- * We actually define 4 functions, with 0 through 3 arguments. This is just
+ * We actually define 4 functions, with 0 through 3 arguments. This is just
* to offer more convenience for the user.
*/
static void
@@ -2277,7 +2277,7 @@ AlterDomainNotNull(List *names, bool notNull)
/*
* In principle the auxiliary information for this
* error should be errdatatype(), but errtablecol()
- * seems considerably more useful in practice. Since
+ * seems considerably more useful in practice. Since
* this code only executes in an ALTER DOMAIN command,
* the client should already know which domain is in
* question.
@@ -2300,7 +2300,7 @@ AlterDomainNotNull(List *names, bool notNull)
}
/*
- * Okay to update pg_type row. We can scribble on typTup because it's a
+ * Okay to update pg_type row. We can scribble on typTup because it's a
* copy.
*/
typTup->typnotnull = notNull;
@@ -2488,7 +2488,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
/*
* Since all other constraint types throw errors, this must be a check
- * constraint. First, process the constraint expression and add an entry
+ * constraint. First, process the constraint expression and add an entry
* to pg_constraint.
*/
@@ -2674,7 +2674,7 @@ validateDomainConstraint(Oid domainoid, char *ccbin)
/*
* In principle the auxiliary information for this error
* should be errdomainconstraint(), but errtablecol()
- * seems considerably more useful in practice. Since this
+ * seems considerably more useful in practice. Since this
* code only executes in an ALTER DOMAIN command, the
* client should already know which domain is in question,
* and which constraint too.
@@ -2857,7 +2857,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
continue;
/*
- * Okay, add column to result. We store the columns in column-number
+ * Okay, add column to result. We store the columns in column-number
* order; this is just a hack to improve predictability of regression
* test output ...
*/
@@ -2944,7 +2944,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Set up a CoerceToDomainValue to represent the occurrence of VALUE in
- * the expression. Note that it will appear to have the type of the base
+ * the expression. Note that it will appear to have the type of the base
* type, not the domain. This seems correct since within the check
* expression, we should not assume the input value can be considered a
* member of the domain.
@@ -3317,7 +3317,7 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype)
/*
* If it's a composite type, invoke ATExecChangeOwner so that we fix
- * up the pg_class entry properly. That will call back to
+ * up the pg_class entry properly. That will call back to
* AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/
if (typTup->typtype == TYPTYPE_COMPOSITE)
@@ -3464,7 +3464,7 @@ AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses *objsMoved)
* Caller must have already checked privileges.
*
* The function automatically recurses to process the type's array type,
- * if any. isImplicitArray should be TRUE only when doing this internal
+ * if any. isImplicitArray should be TRUE only when doing this internal
* recursion (outside callers must never try to move an array type directly).
*
* If errorOnTableType is TRUE, the function errors out if the type is
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 7f5b8473d8..d3a2044191 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -995,7 +995,7 @@ DropRole(DropRoleStmt *stmt)
ReleaseSysCache(tuple);
/*
- * Remove role from the pg_auth_members table. We have to remove all
+ * Remove role from the pg_auth_members table. We have to remove all
* tuples that show it as either a role or a member.
*
* XXX what about grantor entries? Maybe we should do one heap scan.
@@ -1091,7 +1091,7 @@ RenameRole(const char *oldname, const char *newname)
* XXX Client applications probably store the session user somewhere, so
* renaming it could cause confusion. On the other hand, there may not be
* an actual problem besides a little confusion, so think about this and
- * decide. Same for SET ROLE ... we don't restrict renaming the current
+ * decide. Same for SET ROLE ... we don't restrict renaming the current
* effective userid, though.
*/
@@ -1347,7 +1347,7 @@ AddRoleMems(const char *rolename, Oid roleid,
/*
* Check permissions: must have createrole or admin option on the role to
- * be changed. To mess with a superuser role, you gotta be superuser.
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
@@ -1493,7 +1493,7 @@ DelRoleMems(const char *rolename, Oid roleid,
/*
* Check permissions: must have createrole or admin option on the role to
- * be changed. To mess with a superuser role, you gotta be superuser.
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index ded1841dc6..3d2c73902c 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -381,18 +381,18 @@ get_rel_oids(Oid relid, const RangeVar *vacrel)
*
* The output parameters are:
* - oldestXmin is the cutoff value used to distinguish whether tuples are
- * DEAD or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum).
+ * DEAD or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum).
* - freezeLimit is the Xid below which all Xids are replaced by
- * FrozenTransactionId during vacuum.
+ * FrozenTransactionId during vacuum.
* - xidFullScanLimit (computed from table_freeze_age parameter)
- * represents a minimum Xid value; a table whose relfrozenxid is older than
- * this will have a full-table vacuum applied to it, to freeze tuples across
- * the whole table. Vacuuming a table younger than this value can use a
- * partial scan.
+ * represents a minimum Xid value; a table whose relfrozenxid is older than
+ * this will have a full-table vacuum applied to it, to freeze tuples across
+ * the whole table. Vacuuming a table younger than this value can use a
+ * partial scan.
* - multiXactCutoff is the value below which all MultiXactIds are removed from
- * Xmax.
+ * Xmax.
* - mxactFullScanLimit is a value against which a table's relminmxid value is
- * compared to produce a full-table vacuum, as with xidFullScanLimit.
+ * compared to produce a full-table vacuum, as with xidFullScanLimit.
*
* xidFullScanLimit and mxactFullScanLimit can be passed as NULL if caller is
* not interested.
@@ -417,9 +417,9 @@ vacuum_set_xid_limits(Relation rel,
MultiXactId safeMxactLimit;
/*
- * We can always ignore processes running lazy vacuum. This is because we
+ * We can always ignore processes running lazy vacuum. This is because we
* use these values only for deciding which tuples we must keep in the
- * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
+ * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
* ignore it. In theory it could be problematic to ignore lazy vacuums in
* a full vacuum, but keep in mind that only one vacuum process can be
* working on a particular table at any time, and that each vacuum is
@@ -566,7 +566,7 @@ vacuum_set_xid_limits(Relation rel,
* If we scanned the whole relation then we should just use the count of
* live tuples seen; but if we did not, we should not trust the count
* unreservedly, especially not in VACUUM, which may have scanned a quite
- * nonrandom subset of the table. When we have only partial information,
+ * nonrandom subset of the table. When we have only partial information,
* we take the old value of pg_class.reltuples as a measurement of the
* tuple density in the unscanned pages.
*
@@ -712,7 +712,7 @@ vac_update_relstats(Relation relation,
/*
* If we have discovered that there are no indexes, then there's no
- * primary key either. This could be done more thoroughly...
+ * primary key either. This could be done more thoroughly...
*/
if (pgcform->relhaspkey && !hasindex)
{
@@ -772,7 +772,7 @@ vac_update_relstats(Relation relation,
* truncate pg_clog and pg_multixact.
*
* We violate transaction semantics here by overwriting the database's
- * existing pg_database tuple with the new value. This is reasonably
+ * existing pg_database tuple with the new value. This is reasonably
* safe since the new value is correct whether or not this transaction
* commits. As with vac_update_relstats, this avoids leaving dead tuples
* behind after a VACUUM.
@@ -892,7 +892,7 @@ vac_update_datfrozenxid(void)
* Also update the XID wrap limit info maintained by varsup.c.
*
* The passed XID is simply the one I just wrote into my pg_database
- * entry. It's used to initialize the "min" calculation.
+ * entry. It's used to initialize the "min" calculation.
*
* This routine is only invoked when we've managed to change our
* DB's datfrozenxid entry, or we found that the shared XID-wrap-limit
@@ -976,7 +976,7 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti)
/*
* Update the wrap limit for GetNewTransactionId and creation of new
* MultiXactIds. Note: these functions will also signal the postmaster
- * for an(other) autovac cycle if needed. XXX should we avoid possibly
+ * for an(other) autovac cycle if needed. XXX should we avoid possibly
* signalling twice?
*/
SetTransactionIdLimit(frozenXID, oldestxid_datoid);
@@ -988,7 +988,7 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti)
* vacuum_rel() -- vacuum one heap relation
*
* Doing one heap at a time incurs extra overhead, since we need to
- * check that the heap exists again just before we vacuum it. The
+ * check that the heap exists again just before we vacuum it. The
* reason that we do this is so that vacuuming can be spread across
* many small transactions. Otherwise, two-phase locking would require
* us to lock the entire database during one pass of the vacuum cleaner.
@@ -1045,7 +1045,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
}
/*
- * Check for user-requested abort. Note we want this to be inside a
+ * Check for user-requested abort. Note we want this to be inside a
* transaction, so xact.c doesn't issue useless WARNING.
*/
CHECK_FOR_INTERRUPTS();
@@ -1092,7 +1092,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
*
* We allow the user to vacuum a table if he is superuser, the table
* owner, or the database owner (but in the latter case, only if it's not
- * a shared relation). pg_class_ownercheck includes the superuser case.
+ * a shared relation). pg_class_ownercheck includes the superuser case.
*
* Note we choose to treat permissions failure as a WARNING and keep
* trying to vacuum the rest of the DB --- is this appropriate?
@@ -1220,7 +1220,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
/*
* If the relation has a secondary toast rel, vacuum that too while we
* still hold the session lock on the master table. Note however that
- * "analyze" will not get done on the toast table. This is good, because
+ * "analyze" will not get done on the toast table. This is good, because
* the toaster always uses hardcoded index access and statistics are
* totally unimportant for toast relations.
*/
@@ -1239,7 +1239,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
/*
* Open all the vacuumable indexes of the given relation, obtaining the
- * specified kind of lock on each. Return an array of Relation pointers for
+ * specified kind of lock on each. Return an array of Relation pointers for
* the indexes into *Irel, and the number of indexes into *nindexes.
*
* We consider an index vacuumable if it is marked insertable (IndexIsReady).
@@ -1289,7 +1289,7 @@ vac_open_indexes(Relation relation, LOCKMODE lockmode,
}
/*
- * Release the resources acquired by vac_open_indexes. Optionally release
+ * Release the resources acquired by vac_open_indexes. Optionally release
* the locks (say NoLock to keep 'em).
*/
void
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 3870df606b..b4abeed5ac 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -473,7 +473,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* Before entering the main loop, establish the invariant that
* next_not_all_visible_block is the next block number >= blkno that's not
* all-visible according to the visibility map, or nblocks if there's no
- * such block. Also, we set up the skipping_all_visible_blocks flag,
+ * such block. Also, we set up the skipping_all_visible_blocks flag,
* which is needed because we need hysteresis in the decision: once we've
* started skipping blocks, we may as well skip everything up to the next
* not-all-visible block.
@@ -706,10 +706,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* It's possible that another backend has extended the heap,
* initialized the page, and then failed to WAL-log the page
* due to an ERROR. Since heap extension is not WAL-logged,
- * recovery might try to replay our record setting the
- * page all-visible and find that the page isn't initialized,
- * which will cause a PANIC. To prevent that, check whether
- * the page has been previously WAL-logged, and if not, do that
+ * recovery might try to replay our record setting the page
+ * all-visible and find that the page isn't initialized, which
+ * will cause a PANIC. To prevent that, check whether the
+ * page has been previously WAL-logged, and if not, do that
* now.
*/
if (RelationNeedsWAL(onerel) &&
@@ -834,8 +834,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* NB: Like with per-tuple hint bits, we can't set the
* PD_ALL_VISIBLE flag if the inserter committed
* asynchronously. See SetHintBits for more info. Check
- * that the tuple is hinted xmin-committed because
- * of that.
+ * that the tuple is hinted xmin-committed because of
+ * that.
*/
if (all_visible)
{
@@ -972,7 +972,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/*
* It should never be the case that the visibility map page is set
* while the page-level bit is clear, but the reverse is allowed
- * (if checksums are not enabled). Regardless, set the both bits
+ * (if checksums are not enabled). Regardless, set the both bits
* so that we get back in sync.
*
* NB: If the heap page is all-visible but the VM bit is not set,
@@ -1034,8 +1034,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/*
* If we remembered any tuples for deletion, then the page will be
* visited again by lazy_vacuum_heap, which will compute and record
- * its post-compaction free space. If not, then we're done with this
- * page, so remember its free space as-is. (This path will always be
+ * its post-compaction free space. If not, then we're done with this
+ * page, so remember its free space as-is. (This path will always be
* taken if there are no indexes.)
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
@@ -1635,9 +1635,9 @@ static void
lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
{
long maxtuples;
- int vac_work_mem = IsAutoVacuumWorkerProcess() &&
- autovacuum_work_mem != -1 ?
- autovacuum_work_mem : maintenance_work_mem;
+ int vac_work_mem = IsAutoVacuumWorkerProcess() &&
+ autovacuum_work_mem != -1 ?
+ autovacuum_work_mem : maintenance_work_mem;
if (vacrelstats->hasindex)
{
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 18133242f7..f299738d66 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -176,7 +176,7 @@ check_datestyle(char **newval, void **extra, GucSource source)
}
/*
- * Prepare the canonical string to return. GUC wants it malloc'd.
+ * Prepare the canonical string to return. GUC wants it malloc'd.
*/
result = (char *) malloc(32);
if (!result)
@@ -257,7 +257,7 @@ check_timezone(char **newval, void **extra, GucSource source)
if (pg_strncasecmp(*newval, "interval", 8) == 0)
{
/*
- * Support INTERVAL 'foo'. This is for SQL spec compliance, not
+ * Support INTERVAL 'foo'. This is for SQL spec compliance, not
* because it has any actual real-world usefulness.
*/
const char *valueptr = *newval;
@@ -281,7 +281,7 @@ check_timezone(char **newval, void **extra, GucSource source)
/*
* Try to parse it. XXX an invalid interval format will result in
- * ereport(ERROR), which is not desirable for GUC. We did what we
+ * ereport(ERROR), which is not desirable for GUC. We did what we
* could to guard against this in flatten_set_variable_args, but a
* string coming in from postgresql.conf might contain anything.
*/
@@ -466,7 +466,7 @@ show_log_timezone(void)
* We allow idempotent changes (r/w -> r/w and r/o -> r/o) at any time, and
* we also always allow changes from read-write to read-only. However,
* read-only may be changed to read-write only when in a top-level transaction
- * that has not yet taken an initial snapshot. Can't do it in a hot standby
+ * that has not yet taken an initial snapshot. Can't do it in a hot standby
* slave, either.
*
* If we are not in a transaction at all, just allow the change; it means
@@ -627,7 +627,7 @@ check_transaction_deferrable(bool *newval, void **extra, GucSource source)
*
* We can't roll back the random sequence on error, and we don't want
* config file reloads to affect it, so we only want interactive SET SEED
- * commands to set it. We use the "extra" storage to ensure that rollbacks
+ * commands to set it. We use the "extra" storage to ensure that rollbacks
* don't try to do the operation again.
*/
@@ -903,7 +903,7 @@ const char *
show_role(void)
{
/*
- * Check whether SET ROLE is active; if not return "none". This is a
+ * Check whether SET ROLE is active; if not return "none". This is a
* kluge to deal with the fact that SET SESSION AUTHORIZATION logically
* resets SET ROLE to NONE, but we cannot set the GUC role variable from
* assign_session_authorization (because we haven't got enough info to
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index bc085666fb..683621c35e 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -52,7 +52,7 @@ validateWithCheckOption(char *value)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid value for \"check_option\" option"),
- errdetail("Valid values are \"local\", and \"cascaded\".")));
+ errdetail("Valid values are \"local\", and \"cascaded\".")));
}
}
@@ -344,11 +344,11 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse)
*rt_entry2;
/*
- * Make a copy of the given parsetree. It's not so much that we don't
+ * Make a copy of the given parsetree. It's not so much that we don't
* want to scribble on our input, it's that the parser has a bad habit of
* outputting multiple links to the same subtree for constructs like
* BETWEEN, and we mustn't have OffsetVarNodes increment the varno of a
- * Var node twice. copyObject will expand any multiply-referenced subtree
+ * Var node twice. copyObject will expand any multiply-referenced subtree
* into multiple copies.
*/
viewParse = (Query *) copyObject(viewParse);
@@ -460,13 +460,13 @@ DefineView(ViewStmt *stmt, const char *queryString)
}
/*
- * If the check option is specified, look to see if the view is
- * actually auto-updatable or not.
+ * If the check option is specified, look to see if the view is actually
+ * auto-updatable or not.
*/
if (check_option)
{
const char *view_updatable_error =
- view_query_is_auto_updatable(viewParse, true);
+ view_query_is_auto_updatable(viewParse, true);
if (view_updatable_error)
ereport(ERROR,
@@ -513,7 +513,7 @@ DefineView(ViewStmt *stmt, const char *queryString)
/*
* If the user didn't explicitly ask for a temporary view, check whether
- * we need one implicitly. We allow TEMP to be inserted automatically as
+ * we need one implicitly. We allow TEMP to be inserted automatically as
* long as the CREATE command is consistent with that --- no explicit
* schema name.
*/
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 8c01a63500..640964c5b7 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -317,7 +317,7 @@ ExecMarkPos(PlanState *node)
*
* NOTE: the semantics of this are that the first ExecProcNode following
* the restore operation will yield the same tuple as the first one following
- * the mark operation. It is unspecified what happens to the plan node's
+ * the mark operation. It is unspecified what happens to the plan node's
* result TupleTableSlot. (In most cases the result slot is unchanged by
* a restore, but the node may choose to clear it or to load it with the
* restored-to tuple.) Hence the caller should discard any previously
@@ -397,7 +397,7 @@ ExecSupportsMarkRestore(NodeTag plantype)
/*
* T_Result only supports mark/restore if it has a child plan that
* does, so we do not have enough information to give a really
- * correct answer. However, for current uses it's enough to
+ * correct answer. However, for current uses it's enough to
* always say "false", because this routine is not asked about
* gating Result plans, only base-case Results.
*/
diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c
index 32d0718ec5..7ff3e1ece1 100644
--- a/src/backend/executor/execCurrent.c
+++ b/src/backend/executor/execCurrent.c
@@ -142,7 +142,7 @@ execCurrentOf(CurrentOfExpr *cexpr,
/*
* This table didn't produce the cursor's current row; some other
- * inheritance child of the same parent must have. Signal caller to
+ * inheritance child of the same parent must have. Signal caller to
* do nothing on this table.
*/
return false;
diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c
index a9acd5b535..45d6477c2e 100644
--- a/src/backend/executor/execJunk.c
+++ b/src/backend/executor/execJunk.c
@@ -52,7 +52,7 @@
*
* Initialize the Junk filter.
*
- * The source targetlist is passed in. The output tuple descriptor is
+ * The source targetlist is passed in. The output tuple descriptor is
* built from the non-junk tlist entries, plus the passed specification
* of whether to include room for an OID or not.
* An optional resultSlot can be passed as well.
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 886c75125d..072c7df0ad 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -19,7 +19,7 @@
* ExecutorRun accepts direction and count arguments that specify whether
* the plan is to be executed forwards, backwards, and for how many tuples.
* In some cases ExecutorRun may be called multiple times to process all
- * the tuples for a plan. It is also acceptable to stop short of executing
+ * the tuples for a plan. It is also acceptable to stop short of executing
* the whole plan (but only if it is a SELECT).
*
* ExecutorFinish must be called after the final ExecutorRun call and
@@ -329,12 +329,12 @@ standard_ExecutorRun(QueryDesc *queryDesc,
* ExecutorFinish
*
* This routine must be called after the last ExecutorRun call.
- * It performs cleanup such as firing AFTER triggers. It is
+ * It performs cleanup such as firing AFTER triggers. It is
* separate from ExecutorEnd because EXPLAIN ANALYZE needs to
* include these actions in the total runtime.
*
* We provide a function hook variable that lets loadable plugins
- * get control when ExecutorFinish is called. Such a plugin would
+ * get control when ExecutorFinish is called. Such a plugin would
* normally call standard_ExecutorFinish().
*
* ----------------------------------------------------------------
@@ -565,7 +565,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
* userid to check as: current user unless we have a setuid indication.
*
* Note: GetUserId() is presently fast enough that there's no harm in
- * calling it separately for each RTE. If that stops being true, we could
+ * calling it separately for each RTE. If that stops being true, we could
* call it once in ExecCheckRTPerms and pass the userid down from there.
* But for now, no need for the extra clutter.
*/
@@ -1184,7 +1184,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
* if so it doesn't matter which one we pick.) However, it is sometimes
* necessary to fire triggers on other relations; this happens mainly when an
* RI update trigger queues additional triggers on other relations, which will
- * be processed in the context of the outer query. For efficiency's sake,
+ * be processed in the context of the outer query. For efficiency's sake,
* we want to have a ResultRelInfo for those triggers too; that can avoid
* repeated re-opening of the relation. (It also provides a way for EXPLAIN
* ANALYZE to report the runtimes of such triggers.) So we make additional
@@ -1221,7 +1221,7 @@ ExecGetTriggerResultRel(EState *estate, Oid relid)
/*
* Open the target relation's relcache entry. We assume that an
* appropriate lock is still held by the backend from whenever the trigger
- * event got queued, so we need take no new lock here. Also, we need not
+ * event got queued, so we need take no new lock here. Also, we need not
* recheck the relkind, so no need for CheckValidResultRel.
*/
rel = heap_open(relid, NoLock);
@@ -1327,7 +1327,7 @@ ExecPostprocessPlan(EState *estate)
/*
* Run any secondary ModifyTable nodes to completion, in case the main
- * query did not fetch all rows from them. (We do this to ensure that
+ * query did not fetch all rows from them. (We do this to ensure that
* such nodes have predictable results.)
*/
foreach(lc, estate->es_auxmodifytables)
@@ -1639,7 +1639,8 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo,
TupleTableSlot *slot, EState *estate)
{
ExprContext *econtext;
- ListCell *l1, *l2;
+ ListCell *l1,
+ *l2;
/*
* We will use the EState's per-tuple context for evaluating constraint
@@ -1655,7 +1656,7 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo,
l2, resultRelInfo->ri_WithCheckOptionExprs)
{
WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
- ExprState *wcoExpr = (ExprState *) lfirst(l2);
+ ExprState *wcoExpr = (ExprState *) lfirst(l2);
/*
* WITH CHECK OPTION checks are intended to ensure that the new tuple
@@ -1667,8 +1668,8 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo,
if (!ExecQual((List *) wcoExpr, econtext, false))
ereport(ERROR,
(errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
- errmsg("new row violates WITH CHECK OPTION for view \"%s\"",
- wco->viewname),
+ errmsg("new row violates WITH CHECK OPTION for view \"%s\"",
+ wco->viewname),
errdetail("Failing row contains %s.",
ExecBuildSlotValueDescription(slot,
RelationGetDescr(resultRelInfo->ri_RelationDesc),
@@ -1681,7 +1682,7 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo,
*
* This is intentionally very similar to BuildIndexValueDescription, but
* unlike that function, we truncate long field values (to at most maxfieldlen
- * bytes). That seems necessary here since heap field values could be very
+ * bytes). That seems necessary here since heap field values could be very
* long, whereas index entries typically aren't so wide.
*
* Also, unlike the case with index entries, we need to be prepared to ignore
@@ -1875,7 +1876,7 @@ EvalPlanQual(EState *estate, EPQState *epqstate,
*tid = copyTuple->t_self;
/*
- * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
+ * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
*/
EvalPlanQualBegin(epqstate, estate);
@@ -1958,7 +1959,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
/*
* If xmin isn't what we're expecting, the slot must have been
- * recycled and reused for an unrelated tuple. This implies that
+ * recycled and reused for an unrelated tuple. This implies that
* the latest version of the row was deleted, so we need do
* nothing. (Should be safe to examine xmin without getting
* buffer's content lock, since xmin never changes in an existing
@@ -2199,7 +2200,7 @@ EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
/*
* Fetch the current row values for any non-locked relations that need
- * to be scanned by an EvalPlanQual operation. origslot must have been set
+ * to be scanned by an EvalPlanQual operation. origslot must have been set
* to contain the current result row (top-level row) that we need to recheck.
*/
void
@@ -2428,7 +2429,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
/*
* Each EState must have its own es_epqScanDone state, but if we have
- * nested EPQ checks they should share es_epqTuple arrays. This allows
+ * nested EPQ checks they should share es_epqTuple arrays. This allows
* sub-rechecks to inherit the values being examined by an outer recheck.
*/
estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
@@ -2485,7 +2486,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
*
* This is a cut-down version of ExecutorEnd(); basically we want to do most
* of the normal cleanup, but *not* close result relations (which we are
- * just sharing from the outer query). We do, however, have to close any
+ * just sharing from the outer query). We do, however, have to close any
* trigger target relations that got opened, since those are not shared.
* (There probably shouldn't be any of the latter, but just in case...)
*/
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index c5ecd185b8..c0189eb5a1 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -52,7 +52,7 @@
* * ExecInitNode() notices that it is looking at a nest loop and
* as the code below demonstrates, it calls ExecInitNestLoop().
* Eventually this calls ExecInitNode() on the right and left subplans
- * and so forth until the entire plan is initialized. The result
+ * and so forth until the entire plan is initialized. The result
* of ExecInitNode() is a plan state tree built with the same structure
* as the underlying plan tree.
*
@@ -575,7 +575,7 @@ MultiExecProcNode(PlanState *node)
* at 'node'.
*
* After this operation, the query plan will not be able to be
- * processed any further. This should be called only after
+ * processed any further. This should be called only after
* the query plan has been fully executed.
* ----------------------------------------------------------------
*/
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 833c4ed6a4..f162e92fc7 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -26,7 +26,7 @@
* ExecProject() is used to make tuple projections. Rather then
* trying to speed it up, the execution plan should be pre-processed
* to facilitate attribute sharing between nodes wherever possible,
- * instead of doing needless copying. -cim 5/31/91
+ * instead of doing needless copying. -cim 5/31/91
*
* During expression evaluation, we check_stack_depth only in
* ExecMakeFunctionResult (and substitute routines) rather than at every
@@ -201,7 +201,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
*
* Note: for notational simplicity we declare these functions as taking the
* specific type of ExprState that they work on. This requires casting when
- * assigning the function pointer in ExecInitExpr. Be careful that the
+ * assigning the function pointer in ExecInitExpr. Be careful that the
* function signature is declared correctly, because the cast suppresses
* automatic checking!
*
@@ -236,7 +236,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
* The caller should already have switched into the temporary memory
* context econtext->ecxt_per_tuple_memory. The convenience entry point
* ExecEvalExprSwitchContext() is provided for callers who don't prefer to
- * do the switch in an outer loop. We do not do the switch in these routines
+ * do the switch in an outer loop. We do not do the switch in these routines
* because it'd be a waste of cycles during nested expression evaluation.
* ----------------------------------------------------------------
*/
@@ -366,7 +366,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
* We might have a nested-assignment situation, in which the
* refassgnexpr is itself a FieldStore or ArrayRef that needs to
* obtain and modify the previous value of the array element or slice
- * being replaced. If so, we have to extract that value from the
+ * being replaced. If so, we have to extract that value from the
* array and pass it down via the econtext's caseValue. It's safe to
* reuse the CASE mechanism because there cannot be a CASE between
* here and where the value would be needed, and an array assignment
@@ -439,7 +439,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
/*
* For assignment to varlena arrays, we handle a NULL original array
* by substituting an empty (zero-dimensional) array; insertion of the
- * new element will result in a singleton array value. It does not
+ * new element will result in a singleton array value. It does not
* matter whether the new element is NULL.
*/
if (*isNull)
@@ -829,11 +829,11 @@ ExecEvalWholeRowVar(WholeRowVarExprState *wrvstate, ExprContext *econtext,
* We really only care about numbers of attributes and data types.
* Also, we can ignore type mismatch on columns that are dropped in
* the destination type, so long as (1) the physical storage matches
- * or (2) the actual column value is NULL. Case (1) is helpful in
+ * or (2) the actual column value is NULL. Case (1) is helpful in
* some cases involving out-of-date cached plans, while case (2) is
* expected behavior in situations such as an INSERT into a table with
* dropped columns (the planner typically generates an INT4 NULL
- * regardless of the dropped column type). If we find a dropped
+ * regardless of the dropped column type). If we find a dropped
* column and cannot verify that case (1) holds, we have to use
* ExecEvalWholeRowSlow to check (2) for each row.
*/
@@ -1491,7 +1491,7 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo,
* ExecPrepareTuplestoreResult
*
* Subroutine for ExecMakeFunctionResult: prepare to extract rows from a
- * tuplestore function result. We must set up a funcResultSlot (unless
+ * tuplestore function result. We must set up a funcResultSlot (unless
* already done in a previous call cycle) and verify that the function
* returned the expected tuple descriptor.
*/
@@ -1536,7 +1536,7 @@ ExecPrepareTuplestoreResult(FuncExprState *fcache,
}
/*
- * If function provided a tupdesc, cross-check it. We only really need to
+ * If function provided a tupdesc, cross-check it. We only really need to
* do this for functions returning RECORD, but might as well do it always.
*/
if (resultDesc)
@@ -1719,7 +1719,7 @@ restart:
if (fcache->func.fn_retset || hasSetArg)
{
/*
- * We need to return a set result. Complain if caller not ready to
+ * We need to return a set result. Complain if caller not ready to
* accept one.
*/
if (isDone == NULL)
@@ -2046,7 +2046,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
/*
* Normally the passed expression tree will be a FuncExprState, since the
* grammar only allows a function call at the top level of a table
- * function reference. However, if the function doesn't return set then
+ * function reference. However, if the function doesn't return set then
* the planner might have replaced the function call via constant-folding
* or inlining. So if we see any other kind of expression node, execute
* it via the general ExecEvalExpr() code; the only difference is that we
@@ -2085,7 +2085,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
*
* Note: ideally, we'd do this in the per-tuple context, but then the
* argument values would disappear when we reset the context in the
- * inner loop. So do it in caller context. Perhaps we should make a
+ * inner loop. So do it in caller context. Perhaps we should make a
* separate context just to hold the evaluated arguments?
*/
argDone = ExecEvalFuncArgs(&fcinfo, fcache->args, econtext);
@@ -2171,7 +2171,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
* Can't do anything very useful with NULL rowtype values. For a
* function returning set, we consider this a protocol violation
* (but another alternative would be to just ignore the result and
- * "continue" to get another row). For a function not returning
+ * "continue" to get another row). For a function not returning
* set, we fall out of the loop; we'll cons up an all-nulls result
* row below.
*/
@@ -2305,7 +2305,7 @@ no_function_result:
}
/*
- * If function provided a tupdesc, cross-check it. We only really need to
+ * If function provided a tupdesc, cross-check it. We only really need to
* do this for functions returning RECORD, but might as well do it always.
*/
if (rsinfo.setDesc)
@@ -2483,7 +2483,7 @@ ExecEvalDistinct(FuncExprState *fcache,
*
* Evaluate "scalar op ANY/ALL (array)". The operator always yields boolean,
* and we combine the results across all array elements using OR and AND
- * (for ANY and ALL respectively). Of course we short-circuit as soon as
+ * (for ANY and ALL respectively). Of course we short-circuit as soon as
* the result is known.
*/
static Datum
@@ -2670,7 +2670,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
* qualification to conjunctive normal form. If we ever get
* an AND to evaluate, we can be sure that it's not a top-level
* clause in the qualification, but appears lower (as a function
- * argument, for example), or in the target list. Not that you
+ * argument, for example), or in the target list. Not that you
* need to know this, mind you...
* ----------------------------------------------------------------
*/
@@ -2801,7 +2801,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
/* ----------------------------------------------------------------
* ExecEvalConvertRowtype
*
- * Evaluate a rowtype coercion operation. This may require
+ * Evaluate a rowtype coercion operation. This may require
* rearranging field positions.
* ----------------------------------------------------------------
*/
@@ -2930,7 +2930,7 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
/*
* if we have a true test, then we return the result, since the case
- * statement is satisfied. A NULL result from the test is not
+ * statement is satisfied. A NULL result from the test is not
* considered true.
*/
if (DatumGetBool(clause_value) && !*isNull)
@@ -3144,7 +3144,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
* If all items were null or empty arrays, return an empty array;
* otherwise, if some were and some weren't, raise error. (Note: we
* must special-case this somehow to avoid trying to generate a 1-D
- * array formed from empty arrays. It's not ideal...)
+ * array formed from empty arrays. It's not ideal...)
*/
if (haveempty)
{
@@ -4315,7 +4315,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
* ExecInitExpr: prepare an expression tree for execution
*
* This function builds and returns an ExprState tree paralleling the given
- * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
+ * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
* for execution. Because the Expr tree itself is read-only as far as
* ExecInitExpr and ExecEvalExpr are concerned, several different executions
* of the same plan tree can occur concurrently.
@@ -4326,9 +4326,9 @@ ExecEvalExprSwitchContext(ExprState *expression,
*
* Any Aggref, WindowFunc, or SubPlan nodes found in the tree are added to the
* lists of such nodes held by the parent PlanState. Otherwise, we do very
- * little initialization here other than building the state-node tree. Any
+ * little initialization here other than building the state-node tree. Any
* nontrivial work associated with initializing runtime info for a node should
- * happen during the first actual evaluation of that node. (This policy lets
+ * happen during the first actual evaluation of that node. (This policy lets
* us avoid work if the node is never actually evaluated.)
*
* Note: there is no ExecEndExpr function; we assume that any resource
@@ -5133,7 +5133,7 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
/*
- * Evaluate the qual conditions one at a time. If we find a FALSE result,
+ * Evaluate the qual conditions one at a time. If we find a FALSE result,
* we can stop evaluating and return FALSE --- the AND result must be
* FALSE. Also, if we find a NULL result when resultForNull is FALSE, we
* can stop and return FALSE --- the AND result must be FALSE or NULL in
@@ -5292,7 +5292,7 @@ ExecTargetList(List *targetlist,
else
{
/*
- * We have some done and some undone sets. Restart the done ones
+ * We have some done and some undone sets. Restart the done ones
* so that we can deliver a tuple (if possible).
*/
foreach(tl, targetlist)
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index 5e4538fa5e..869abbecbd 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -30,7 +30,7 @@ static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, Tuple
* ExecScanFetch -- fetch next potential tuple
*
* This routine is concerned with substituting a test tuple if we are
- * inside an EvalPlanQual recheck. If we aren't, just execute
+ * inside an EvalPlanQual recheck. If we aren't, just execute
* the access method's next-tuple routine.
*/
static inline TupleTableSlot *
@@ -155,7 +155,7 @@ ExecScan(ScanState *node,
ResetExprContext(econtext);
/*
- * get a tuple from the access method. Loop until we obtain a tuple that
+ * get a tuple from the access method. Loop until we obtain a tuple that
* passes the qualification.
*/
for (;;)
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 928b5e3178..66515f71a2 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -4,7 +4,7 @@
* Routines dealing with TupleTableSlots. These are used for resource
* management associated with tuples (eg, releasing buffer pins for
* tuples in disk buffers, or freeing the memory occupied by transient
- * tuples). Slots also provide access abstraction that lets us implement
+ * tuples). Slots also provide access abstraction that lets us implement
* "virtual" tuples to reduce data-copying overhead.
*
* Routines dealing with the type information for tuples. Currently,
@@ -261,7 +261,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
ExecClearTuple(slot);
/*
- * Release any old descriptor. Also release old Datum/isnull arrays if
+ * Release any old descriptor. Also release old Datum/isnull arrays if
* present (we don't bother to check if they could be re-used).
*/
if (slot->tts_tupleDescriptor)
@@ -311,7 +311,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
* Another case where it is 'false' is when the referenced tuple is held
* in a tuple table slot belonging to a lower-level executor Proc node.
* In this case the lower-level slot retains ownership and responsibility
- * for eventually releasing the tuple. When this method is used, we must
+ * for eventually releasing the tuple. When this method is used, we must
* be certain that the upper-level Proc node will lose interest in the tuple
* sooner than the lower-level one does! If you're not certain, copy the
* lower-level tuple with heap_copytuple and let the upper-level table
@@ -650,7 +650,7 @@ ExecFetchSlotTuple(TupleTableSlot *slot)
* Fetch the slot's minimal physical tuple.
*
* If the slot contains a virtual tuple, we convert it to minimal
- * physical form. The slot retains ownership of the minimal tuple.
+ * physical form. The slot retains ownership of the minimal tuple.
* If it contains a regular tuple we convert to minimal form and store
* that in addition to the regular tuple (not instead of, because
* callers may hold pointers to Datums within the regular tuple).
@@ -829,7 +829,7 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
* ExecInit{Result,Scan,Extra}TupleSlot
*
* These are convenience routines to initialize the specified slot
- * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
+ * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
* is used for initializing special-purpose slots.
* --------------------------------
*/
@@ -1147,7 +1147,7 @@ BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values)
* code would have no way to obtain a tupledesc for the tuple.
*
* Note that if we do build a new tuple, it's palloc'd in the current
- * memory context. Beware of code that changes context between the initial
+ * memory context. Beware of code that changes context between the initial
* heap_form_tuple/etc call and calling HeapTuple(Header)GetDatum.
*
* For performance-critical callers, it could be worthwhile to take extra
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index fc71d852be..d5e1273e91 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -105,7 +105,7 @@ CreateExecutorState(void)
* Initialize all fields of the Executor State structure
*/
estate->es_direction = ForwardScanDirection;
- estate->es_snapshot = InvalidSnapshot; /* caller must initialize this */
+ estate->es_snapshot = InvalidSnapshot; /* caller must initialize this */
estate->es_crosscheck_snapshot = InvalidSnapshot; /* no crosscheck */
estate->es_range_table = NIL;
estate->es_plannedstmt = NULL;
@@ -342,7 +342,7 @@ CreateStandaloneExprContext(void)
* any previously computed pass-by-reference expression result will go away!
*
* If isCommit is false, we are being called in error cleanup, and should
- * not call callbacks but only release memory. (It might be better to call
+ * not call callbacks but only release memory. (It might be better to call
* the callbacks and pass the isCommit flag to them, but that would require
* more invasive code changes than currently seems justified.)
*
@@ -371,7 +371,7 @@ FreeExprContext(ExprContext *econtext, bool isCommit)
* ReScanExprContext
*
* Reset an expression context in preparation for a rescan of its
- * plan node. This requires calling any registered shutdown callbacks,
+ * plan node. This requires calling any registered shutdown callbacks,
* since any partially complete set-returning-functions must be canceled.
*
* Note we make no assumption about the caller's memory context.
@@ -412,7 +412,7 @@ MakePerTupleExprContext(EState *estate)
/* ----------------
* ExecAssignExprContext
*
- * This initializes the ps_ExprContext field. It is only necessary
+ * This initializes the ps_ExprContext field. It is only necessary
* to do this for nodes which use ExecQual or ExecProject
* because those routines require an econtext. Other nodes that
* don't have to evaluate expressions don't need to do this.
@@ -458,7 +458,7 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
/*
* ExecTypeFromTL needs the parse-time representation of the tlist, not a
- * list of ExprStates. This is good because some plan nodes don't bother
+ * list of ExprStates. This is good because some plan nodes don't bother
* to set up planstate->targetlist ...
*/
tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid);
@@ -486,7 +486,7 @@ ExecGetResultType(PlanState *planstate)
* the given tlist should be a list of ExprState nodes, not Expr nodes.
*
* inputDesc can be NULL, but if it is not, we check to see whether simple
- * Vars in the tlist match the descriptor. It is important to provide
+ * Vars in the tlist match the descriptor. It is important to provide
* inputDesc for relation-scan plan nodes, as a cross check that the relation
* hasn't been changed since the plan was made. At higher levels of a plan,
* there is no need to recheck.
@@ -692,7 +692,7 @@ ExecAssignProjectionInfo(PlanState *planstate,
*
* However ... there is no particular need to do it during ExecEndNode,
* because FreeExecutorState will free any remaining ExprContexts within
- * the EState. Letting FreeExecutorState do it allows the ExprContexts to
+ * the EState. Letting FreeExecutorState do it allows the ExprContexts to
* be freed in reverse order of creation, rather than order of creation as
* will happen if we delete them here, which saves O(N^2) work in the list
* cleanup inside FreeExprContext.
@@ -712,7 +712,7 @@ ExecFreeExprContext(PlanState *planstate)
* the following scan type support functions are for
* those nodes which are stubborn and return tuples in
* their Scan tuple slot instead of their Result tuple
- * slot.. luck fur us, these nodes do not do projections
+ * slot.. luck fur us, these nodes do not do projections
* so we don't have to worry about getting the ProjectionInfo
* right for them... -cim 6/3/91
* ----------------------------------------------------------------
@@ -1111,7 +1111,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
/*
* If the index has an associated exclusion constraint, check that.
* This is simpler than the process for uniqueness checks since we
- * always insert first and then check. If the constraint is deferred,
+ * always insert first and then check. If the constraint is deferred,
* we check now anyway, but don't throw error on violation; instead
* we'll queue a recheck event.
*
@@ -1295,7 +1295,7 @@ retry:
/*
* If an in-progress transaction is affecting the visibility of this
- * tuple, we need to wait for it to complete and then recheck. For
+ * tuple, we need to wait for it to complete and then recheck. For
* simplicity we do rechecking by just restarting the whole scan ---
* this case probably doesn't happen often enough to be worth trying
* harder, and anyway we don't want to hold any index internal locks
@@ -1357,7 +1357,7 @@ retry:
/*
* Check existing tuple's index values to see if it really matches the
- * exclusion condition against the new_values. Returns true if conflict.
+ * exclusion condition against the new_values. Returns true if conflict.
*/
static bool
index_recheck_constraint(Relation index, Oid *constr_procs,
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index f0a89d23b8..4d112604bb 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -47,7 +47,7 @@ typedef struct
} DR_sqlfunction;
/*
- * We have an execution_state record for each query in a function. Each
+ * We have an execution_state record for each query in a function. Each
* record contains a plantree for its query. If the query is currently in
* F_EXEC_RUN state then there's a QueryDesc too.
*
@@ -466,7 +466,7 @@ sql_fn_resolve_param_name(SQLFunctionParseInfoPtr pinfo,
* Set up the per-query execution_state records for a SQL function.
*
* The input is a List of Lists of parsed and rewritten, but not planned,
- * querytrees. The sublist structure denotes the original query boundaries.
+ * querytrees. The sublist structure denotes the original query boundaries.
*/
static List *
init_execution_state(List *queryTree_list,
@@ -590,7 +590,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK)
bool isNull;
/*
- * Create memory context that holds all the SQLFunctionCache data. It
+ * Create memory context that holds all the SQLFunctionCache data. It
* must be a child of whatever context holds the FmgrInfo.
*/
fcontext = AllocSetContextCreate(finfo->fn_mcxt,
@@ -602,7 +602,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK)
oldcontext = MemoryContextSwitchTo(fcontext);
/*
- * Create the struct proper, link it to fcontext and fn_extra. Once this
+ * Create the struct proper, link it to fcontext and fn_extra. Once this
* is done, we'll be able to recover the memory after failure, even if the
* FmgrInfo is long-lived.
*/
@@ -672,7 +672,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK)
fcache->src = TextDatumGetCString(tmp);
/*
- * Parse and rewrite the queries in the function text. Use sublists to
+ * Parse and rewrite the queries in the function text. Use sublists to
* keep track of the original query boundaries. But we also build a
* "flat" list of the rewritten queries to pass to check_sql_fn_retval.
* This is because the last canSetTag query determines the result type
@@ -712,7 +712,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK)
* any polymorphic arguments.
*
* Note: we set fcache->returnsTuple according to whether we are returning
- * the whole tuple result or just a single column. In the latter case we
+ * the whole tuple result or just a single column. In the latter case we
* clear returnsTuple because we need not act different from the scalar
* result case, even if it's a rowtype column. (However, we have to force
* lazy eval mode in that case; otherwise we'd need extra code to expand
@@ -944,7 +944,7 @@ postquel_get_single_result(TupleTableSlot *slot,
/*
* Set up to return the function value. For pass-by-reference datatypes,
* be sure to allocate the result in resultcontext, not the current memory
- * context (which has query lifespan). We can't leave the data in the
+ * context (which has query lifespan). We can't leave the data in the
* TupleTableSlot because we intend to clear the slot before returning.
*/
oldcontext = MemoryContextSwitchTo(resultcontext);
@@ -1052,7 +1052,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
/*
* Switch to context in which the fcache lives. This ensures that our
* tuplestore etc will have sufficient lifetime. The sub-executor is
- * responsible for deleting per-tuple information. (XXX in the case of a
+ * responsible for deleting per-tuple information. (XXX in the case of a
* long-lived FmgrInfo, this policy represents more memory leakage, but
* it's not entirely clear where to keep stuff instead.)
*/
@@ -1106,7 +1106,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
* suspend execution before completion is if we are returning a row from a
* lazily-evaluated SELECT. So, when first entering this loop, we'll
* either start a new query (and push a fresh snapshot) or re-establish
- * the active snapshot from the existing query descriptor. If we need to
+ * the active snapshot from the existing query descriptor. If we need to
* start a new query in a subsequent execution of the loop, either we need
* a fresh snapshot (and pushed_snapshot is false) or the existing
* snapshot is on the active stack and we can just bump its command ID.
@@ -1162,7 +1162,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
* Break from loop if we didn't shut down (implying we got a
* lazily-evaluated row). Otherwise we'll press on till the whole
* function is done, relying on the tuplestore to keep hold of the
- * data to eventually be returned. This is necessary since an
+ * data to eventually be returned. This is necessary since an
* INSERT/UPDATE/DELETE RETURNING that sets the result might be
* followed by additional rule-inserted commands, and we want to
* finish doing all those commands before we return anything.
@@ -1184,7 +1184,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
/*
* Flush the current snapshot so that we will take a new one for
- * the new query list. This ensures that new snaps are taken at
+ * the new query list. This ensures that new snaps are taken at
* original-query boundaries, matching the behavior of interactive
* execution.
*/
@@ -1242,7 +1242,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
else if (fcache->lazyEval)
{
/*
- * We are done with a lazy evaluation. Clean up.
+ * We are done with a lazy evaluation. Clean up.
*/
tuplestore_clear(fcache->tstore);
@@ -1266,8 +1266,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
else
{
/*
- * We are done with a non-lazy evaluation. Return whatever is in
- * the tuplestore. (It is now caller's responsibility to free the
+ * We are done with a non-lazy evaluation. Return whatever is in
+ * the tuplestore. (It is now caller's responsibility to free the
* tuplestore when done.)
*/
rsi->returnMode = SFRM_Materialize;
@@ -1379,7 +1379,7 @@ sql_exec_error_callback(void *arg)
/*
* Try to determine where in the function we failed. If there is a query
- * with non-null QueryDesc, finger it. (We check this rather than looking
+ * with non-null QueryDesc, finger it. (We check this rather than looking
* for F_EXEC_RUN state, so that errors during ExecutorStart or
* ExecutorEnd are blamed on the appropriate query; see postquel_start and
* postquel_end.)
@@ -1671,7 +1671,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
* the function that's calling it.
*
* XXX Note that if rettype is RECORD, the IsBinaryCoercible check
- * will succeed for any composite restype. For the moment we rely on
+ * will succeed for any composite restype. For the moment we rely on
* runtime type checking to catch any discrepancy, but it'd be nice to
* do better at parse time.
*/
@@ -1717,7 +1717,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
/*
* Verify that the targetlist matches the return tuple type. We scan
* the non-deleted attributes to ensure that they match the datatypes
- * of the non-resjunk columns. For deleted attributes, insert NULL
+ * of the non-resjunk columns. For deleted attributes, insert NULL
* result columns if the caller asked for that.
*/
tupnatts = tupdesc->natts;
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 186c319a3a..09ff03543d 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -25,7 +25,7 @@
* The agg's first input type and transtype must be the same in this case!
*
* If transfunc is marked "strict" then NULL input_values are skipped,
- * keeping the previous transvalue. If transfunc is not strict then it
+ * keeping the previous transvalue. If transfunc is not strict then it
* is called for every input tuple and must deal with NULL initcond
* or NULL input_values for itself.
*
@@ -66,7 +66,7 @@
* it is completely forbidden for functions to modify pass-by-ref inputs,
* but in the aggregate case we know the left input is either the initial
* transition value or a previous function result, and in either case its
- * value need not be preserved. See int8inc() for an example. Notice that
+ * value need not be preserved. See int8inc() for an example. Notice that
* advance_transition_function() is coded to avoid a data copy step when
* the previous transition value pointer is returned. Also, some
* transition functions want to store working state in addition to the
@@ -132,14 +132,14 @@ typedef struct AggStatePerAggData
Aggref *aggref;
/*
- * Nominal number of arguments for aggregate function. For plain aggs,
- * this excludes any ORDER BY expressions. For ordered-set aggs, this
+ * Nominal number of arguments for aggregate function. For plain aggs,
+ * this excludes any ORDER BY expressions. For ordered-set aggs, this
* counts both the direct and aggregated (ORDER BY) arguments.
*/
int numArguments;
/*
- * Number of aggregated input columns. This includes ORDER BY expressions
+ * Number of aggregated input columns. This includes ORDER BY expressions
* in both the plain-agg and ordered-set cases. Ordered-set direct args
* are not counted, though.
*/
@@ -153,7 +153,7 @@ typedef struct AggStatePerAggData
int numTransInputs;
/*
- * Number of arguments to pass to the finalfn. This is always at least 1
+ * Number of arguments to pass to the finalfn. This is always at least 1
* (the transition state value) plus any ordered-set direct args. If the
* finalfn wants extra args then we pass nulls corresponding to the
* aggregated input columns.
@@ -216,7 +216,7 @@ typedef struct AggStatePerAggData
transtypeByVal;
/*
- * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but
+ * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but
* with the addition of ORDER BY we now need at least a slot for passing
* data to the sort object, which requires a tupledesc, so we might as
* well go whole hog and use ExecProject too.
@@ -236,7 +236,7 @@ typedef struct AggStatePerAggData
* input tuple group and updated for each input tuple.
*
* For a simple (non DISTINCT/ORDER BY) aggregate, we just feed the input
- * values straight to the transition function. If it's DISTINCT or
+ * values straight to the transition function. If it's DISTINCT or
* requires ORDER BY, we pass the input values into a Tuplesort object;
* then at completion of the input tuple group, we scan the sorted values,
* eliminate duplicates if needed, and run the transition function on the
@@ -279,7 +279,7 @@ typedef struct AggStatePerGroupData
/*
* Note: noTransValue initially has the same value as transValueIsNull,
- * and if true both are cleared to false at the same time. They are not
+ * and if true both are cleared to false at the same time. They are not
* the same though: if transfn later returns a NULL, we want to keep that
* NULL and not auto-replace it with a later input value. Only the first
* non-NULL input will be auto-substituted.
@@ -289,7 +289,7 @@ typedef struct AggStatePerGroupData
/*
* To implement hashed aggregation, we need a hashtable that stores a
* representative tuple and an array of AggStatePerGroup structs for each
- * distinct set of GROUP BY column values. We compute the hash key from
+ * distinct set of GROUP BY column values. We compute the hash key from
* the GROUP BY columns.
*/
typedef struct AggHashEntryData *AggHashEntry;
@@ -416,7 +416,7 @@ initialize_aggregates(AggState *aggstate,
*
* The new values (and null flags) have been preloaded into argument positions
* 1 and up in peraggstate->transfn_fcinfo, so that we needn't copy them again
- * to pass to the transition function. We also expect that the static fields
+ * to pass to the transition function. We also expect that the static fields
* of the fcinfo are already initialized; that was done by ExecInitAgg().
*
* It doesn't matter which memory context this is called in.
@@ -495,7 +495,7 @@ advance_transition_function(AggState *aggstate,
/*
* If pass-by-ref datatype, must copy the new value into aggcontext and
- * pfree the prior transValue. But if transfn returned a pointer to its
+ * pfree the prior transValue. But if transfn returned a pointer to its
* first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
@@ -519,7 +519,7 @@ advance_transition_function(AggState *aggstate,
}
/*
- * Advance all the aggregates for one input tuple. The input tuple
+ * Advance all the aggregates for one input tuple. The input tuple
* has been stored in tmpcontext->ecxt_outertuple, so that it is accessible
* to ExecEvalExpr. pergroup is the array of per-group structs to use
* (this might be in a hashtable entry).
@@ -609,7 +609,7 @@ advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
/*
* Run the transition function for a DISTINCT or ORDER BY aggregate
* with only one input. This is called after we have completed
- * entering all the input values into the sort object. We complete the
+ * entering all the input values into the sort object. We complete the
* sort, read out the values in sorted order, and run the transition
* function on each value (applying DISTINCT if appropriate).
*
@@ -705,7 +705,7 @@ process_ordered_aggregate_single(AggState *aggstate,
/*
* Run the transition function for a DISTINCT or ORDER BY aggregate
* with more than one input. This is called after we have completed
- * entering all the input values into the sort object. We complete the
+ * entering all the input values into the sort object. We complete the
* sort, read out the values in sorted order, and run the transition
* function on each value (applying DISTINCT if appropriate).
*
@@ -1070,9 +1070,9 @@ lookup_hash_entry(AggState *aggstate, TupleTableSlot *inputslot)
* the appropriate attribute for each aggregate function use (Aggref
* node) appearing in the targetlist or qual of the node. The number
* of tuples to aggregate over depends on whether grouped or plain
- * aggregation is selected. In grouped aggregation, we produce a result
+ * aggregation is selected. In grouped aggregation, we produce a result
* row for each group; in plain aggregation there's a single result row
- * for the whole query. In either case, the value of each aggregate is
+ * for the whole query. In either case, the value of each aggregate is
* stored in the expression context to be used when ExecProject evaluates
* the result tuple.
*/
@@ -1097,7 +1097,7 @@ ExecAgg(AggState *node)
}
/*
- * Exit if nothing left to do. (We must do the ps_TupFromTlist check
+ * Exit if nothing left to do. (We must do the ps_TupFromTlist check
* first, because in some cases agg_done gets set before we emit the final
* aggregate tuple, and we have to finish running SRFs for it.)
*/
@@ -1181,11 +1181,11 @@ agg_retrieve_direct(AggState *aggstate)
/*
* Clear the per-output-tuple context for each group, as well as
* aggcontext (which contains any pass-by-ref transvalues of the old
- * group). We also clear any child contexts of the aggcontext; some
+ * group). We also clear any child contexts of the aggcontext; some
* aggregate functions store working state in such contexts.
*
* We use ReScanExprContext not just ResetExprContext because we want
- * any registered shutdown callbacks to be called. That allows
+ * any registered shutdown callbacks to be called. That allows
* aggregate functions to ensure they've cleaned up any non-memory
* resources.
*/
@@ -1518,8 +1518,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
aggstate->hashtable = NULL;
/*
- * Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a little
+ * Create expression contexts. We need two, one for per-input-tuple
+ * processing and one for per-output-tuple processing. We cheat a little
* by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &aggstate->ss.ps);
@@ -1552,7 +1552,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* initialize child expressions
*
* Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs
- * contain other agg calls in their arguments. This would make no sense
+ * contain other agg calls in their arguments. This would make no sense
* under SQL semantics anyway (and it's forbidden by the spec). Because
* that is true, we don't need to worry about evaluating the aggs in any
* particular order.
@@ -1599,7 +1599,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* This is not an error condition: we might be using the Agg node just
* to do hash-based grouping. Even in the regular case,
* constant-expression simplification could optimize away all of the
- * Aggrefs in the targetlist and qual. So keep going, but force local
+ * Aggrefs in the targetlist and qual. So keep going, but force local
* copy of numaggs positive so that palloc()s below don't choke.
*/
numaggs = 1;
@@ -1760,7 +1760,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
}
/*
- * Get actual datatypes of the (nominal) aggregate inputs. These
+ * Get actual datatypes of the (nominal) aggregate inputs. These
* could be different from the agg's declared input types, when the
* agg accepts ANY or a polymorphic type.
*/
@@ -1852,7 +1852,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* If the transfn is strict and the initval is NULL, make sure input
* type and transtype are the same (or at least binary-compatible), so
* that it's OK to use the first aggregated input value as the initial
- * transValue. This should have been checked at agg definition time,
+ * transValue. This should have been checked at agg definition time,
* but we must check again in case the transfn's strictness property
* has been changed.
*/
@@ -1885,7 +1885,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
/*
* If we're doing either DISTINCT or ORDER BY for a plain agg, then we
* have a list of SortGroupClause nodes; fish out the data in them and
- * stick them into arrays. We ignore ORDER BY for an ordered-set agg,
+ * stick them into arrays. We ignore ORDER BY for an ordered-set agg,
* however; the agg's transfn and finalfn are responsible for that.
*
* Note that by construction, if there is a DISTINCT clause then the
@@ -2144,8 +2144,8 @@ ExecReScanAgg(AggState *node)
*
* The transition and/or final functions of an aggregate may want to verify
* that they are being called as aggregates, rather than as plain SQL
- * functions. They should use this function to do so. The return value
- * is nonzero if being called as an aggregate, or zero if not. (Specific
+ * functions. They should use this function to do so. The return value
+ * is nonzero if being called as an aggregate, or zero if not. (Specific
* nonzero values are AGG_CONTEXT_AGGREGATE or AGG_CONTEXT_WINDOW, but more
* values could conceivably appear in future.)
*
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index 6185c1d0d1..ef121c420d 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -33,7 +33,7 @@
* /
* Append -------+------+------+--- nil
* / \ | | |
- * nil nil ... ... ...
+ * nil nil ... ... ...
* subplans
*
* Append nodes are currently used for unions, and to support
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 7d8a3f2c24..9b1e97578d 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -5,7 +5,7 @@
*
* NOTE: it is critical that this plan type only be used with MVCC-compliant
* snapshots (ie, regular snapshots, not SnapshotAny or one of the other
- * special snapshots). The reason is that since index and heap scans are
+ * special snapshots). The reason is that since index and heap scans are
* decoupled, there can be no assurance that the index tuple prompting a
* visit to a particular heap TID still exists when the visit is made.
* Therefore the tuple might not exist anymore either (which is OK because
@@ -340,7 +340,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
/*
* We must hold share lock on the buffer content while examining tuple
- * visibility. Afterwards, however, the tuples we have found to be
+ * visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c
index ac135d9ba8..9cc5345952 100644
--- a/src/backend/executor/nodeForeignscan.c
+++ b/src/backend/executor/nodeForeignscan.c
@@ -147,7 +147,7 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags)
scanstate->ss.ss_currentRelation = currentRelation;
/*
- * get the scan type from the relation descriptor. (XXX at some point we
+ * get the scan type from the relation descriptor. (XXX at some point we
* might want to let the FDW editorialize on the scan tupdesc.)
*/
ExecAssignScanType(&scanstate->ss, RelationGetDescr(currentRelation));
diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c
index 0300941a52..da5d8c114d 100644
--- a/src/backend/executor/nodeFunctionscan.c
+++ b/src/backend/executor/nodeFunctionscan.c
@@ -232,7 +232,7 @@ FunctionNext(FunctionScanState *node)
}
/*
- * If alldone, we just return the previously-cleared scanslot. Otherwise,
+ * If alldone, we just return the previously-cleared scanslot. Otherwise,
* finish creating the virtual tuple.
*/
if (!alldone)
@@ -449,8 +449,8 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate, int eflags)
* Create the combined TupleDesc
*
* If there is just one function without ordinality, the scan result
- * tupdesc is the same as the function result tupdesc --- except that
- * we may stuff new names into it below, so drop any rowtype label.
+ * tupdesc is the same as the function result tupdesc --- except that we
+ * may stuff new names into it below, so drop any rowtype label.
*/
if (scanstate->simple)
{
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 95ed9bd9d0..589b2f1509 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -365,7 +365,7 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls)
/*
* Set up for skew optimization, if possible and there's a need for more
- * than one batch. (In a one-batch join, there's no point in it.)
+ * than one batch. (In a one-batch join, there's no point in it.)
*/
if (nbatch > 1)
ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
@@ -407,7 +407,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/*
* Estimate tupsize based on footprint of tuple in hashtable... note this
- * does not allow for any palloc overhead. The manipulations of spaceUsed
+ * does not allow for any palloc overhead. The manipulations of spaceUsed
* don't count palloc overhead either.
*/
tupsize = HJTUPLE_OVERHEAD +
@@ -459,7 +459,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/*
* Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
* memory is filled. Set nbatch to the smallest power of 2 that appears
- * sufficient. The Min() steps limit the results so that the pointer
+ * sufficient. The Min() steps limit the results so that the pointer
* arrays we'll try to allocate do not exceed work_mem.
*/
max_pointers = (work_mem * 1024L) / sizeof(void *);
@@ -498,8 +498,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/*
* Both nbuckets and nbatch must be powers of 2 to make
- * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate
- * nbuckets to the next larger power of 2. We also force nbuckets to not
+ * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate
+ * nbuckets to the next larger power of 2. We also force nbuckets to not
* be real small, by starting the search at 2^10. (Note: above we made
* sure that nbuckets is not more than INT_MAX / 2, so this loop cannot
* overflow, nor can the final shift to recalculate nbuckets.)
@@ -817,7 +817,7 @@ ExecHashGetHashValue(HashJoinTable hashtable,
* the hash support function as strict even if the operator is not.
*
* Note: currently, all hashjoinable operators must be strict since
- * the hash index AM assumes that. However, it takes so little extra
+ * the hash index AM assumes that. However, it takes so little extra
* code here to allow non-strict that we may as well do it.
*/
if (isNull)
@@ -1237,7 +1237,7 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
/*
* While we have not hit a hole in the hashtable and have not hit
* the desired bucket, we have collided with some previous hash
- * value, so try the next bucket location. NB: this code must
+ * value, so try the next bucket location. NB: this code must
* match ExecHashGetSkewBucket.
*/
bucket = hashvalue & (nbuckets - 1);
@@ -1435,7 +1435,7 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
* NOTE: this is not nearly as simple as it looks on the surface, because
* of the possibility of collisions in the hashtable. Suppose that hash
* values A and B collide at a particular hashtable entry, and that A was
- * entered first so B gets shifted to a different table entry. If we were
+ * entered first so B gets shifted to a different table entry. If we were
* to remove A first then ExecHashGetSkewBucket would mistakenly start
* reporting that B is not in the hashtable, because it would hit the NULL
* before finding B. However, we always remove entries in the reverse
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 9c8398a9cf..7eec3f333d 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -126,7 +126,7 @@ ExecHashJoin(HashJoinState *node)
* check this when the outer relation's startup cost is less
* than the projected cost of building the hash table.
* Otherwise it's best to build the hash table first and see
- * if the inner relation is empty. (When it's a left join, we
+ * if the inner relation is empty. (When it's a left join, we
* should always make this check, since we aren't going to be
* able to skip the join on the strength of an empty inner
* relation anyway.)
@@ -530,7 +530,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags)
* tuple slot of the Hash node (which is our inner plan). we can do this
* because Hash nodes don't return tuples via ExecProcNode() -- instead
* the hash join node uses ExecScanHashBucket() to get at the contents of
- * the hash table. -cim 6/9/91
+ * the hash table. -cim 6/9/91
*/
{
HashState *hashstate = (HashState *) innerPlanState(hjstate);
@@ -896,7 +896,7 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue,
/*
* ExecHashJoinGetSavedTuple
- * read the next tuple from a batch file. Return NULL if no more.
+ * read the next tuple from a batch file. Return NULL if no more.
*
* On success, *hashvalue is set to the tuple's hash value, and the tuple
* itself is stored in the given slot.
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index 8d5c3544d5..c55723608d 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -88,7 +88,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
* Note on Memory Ordering Effects: visibilitymap_test does not lock
* the visibility map buffer, and therefore the result we read here
* could be slightly stale. However, it can't be stale enough to
- * matter. It suffices to show that (1) there is a read barrier
+ * matter. It suffices to show that (1) there is a read barrier
* between the time we read the index TID and the time we test the
* visibility map; and (2) there is a write barrier between the time
* some other concurrent process clears the visibility map bit and the
@@ -113,7 +113,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
/*
* Only MVCC snapshots are supported here, so there should be no
* need to keep following the HOT chain once a visible entry has
- * been found. If we did want to allow that, we'd need to keep
+ * been found. If we did want to allow that, we'd need to keep
* more state to remember not to call index_getnext_tid next time.
*/
if (scandesc->xs_continue_hot)
@@ -122,7 +122,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
/*
* Note: at this point we are holding a pin on the heap page, as
* recorded in scandesc->xs_cbuf. We could release that pin now,
- * but it's not clear whether it's a win to do so. The next index
+ * but it's not clear whether it's a win to do so. The next index
* entry might require a visit to the same heap page.
*/
}
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 4bde1e3afe..2b89dc60f6 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -216,7 +216,7 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext,
/*
* For each run-time key, extract the run-time expression and evaluate
- * it with respect to the current context. We then stick the result
+ * it with respect to the current context. We then stick the result
* into the proper scan key.
*
* Note: the result of the eval could be a pass-by-ref value that's
@@ -349,7 +349,7 @@ ExecIndexAdvanceArrayKeys(IndexArrayKeyInfo *arrayKeys, int numArrayKeys)
/*
* Note we advance the rightmost array key most quickly, since it will
* correspond to the lowest-order index column among the available
- * qualifications. This is hypothesized to result in better locality of
+ * qualifications. This is hypothesized to result in better locality of
* access in the index.
*/
for (j = numArrayKeys - 1; j >= 0; j--)
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index ba65047103..0c723ac224 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -113,7 +113,7 @@ ExecLimit(LimitState *node)
/*
* The subplan is known to return no tuples (or not more than
- * OFFSET tuples, in general). So we return no tuples.
+ * OFFSET tuples, in general). So we return no tuples.
*/
return NULL;
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index ae107961ba..298d4b4d01 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -182,7 +182,7 @@ lnext:
tuple.t_self = copyTuple->t_self;
/*
- * Need to run a recheck subquery. Initialize EPQ state if we
+ * Need to run a recheck subquery. Initialize EPQ state if we
* didn't do so already.
*/
if (!epq_started)
@@ -213,7 +213,7 @@ lnext:
{
/*
* First, fetch a copy of any rows that were successfully locked
- * without any update having occurred. (We do this in a separate pass
+ * without any update having occurred. (We do this in a separate pass
* so as to avoid overhead in the common case where there are no
* concurrent updates.)
*/
@@ -318,7 +318,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
/*
* Locate the ExecRowMark(s) that this node is responsible for, and
- * construct ExecAuxRowMarks for them. (InitPlan should already have
+ * construct ExecAuxRowMarks for them. (InitPlan should already have
* built the global list of ExecRowMarks.)
*/
lrstate->lr_arowMarks = NIL;
@@ -340,7 +340,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist);
/*
- * Only locking rowmarks go into our own list. Non-locking marks are
+ * Only locking rowmarks go into our own list. Non-locking marks are
* passed off to the EvalPlanQual machinery. This is because we don't
* want to bother fetching non-locked rows unless we actually have to
* do an EPQ recheck.
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 13002bf9b4..4a632ee686 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -185,7 +185,7 @@ ExecInitMaterial(Material *node, EState *estate, int eflags)
/*
* Tuplestore's interpretation of the flag bits is subtly different from
* the general executor meaning: it doesn't think BACKWARD necessarily
- * means "backwards all the way to start". If told to support BACKWARD we
+ * means "backwards all the way to start". If told to support BACKWARD we
* must include REWIND in the tuplestore eflags, else tuplestore_trim
* might throw away too much.
*/
diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c
index 74fa40da74..47ed068c7b 100644
--- a/src/backend/executor/nodeMergeAppend.c
+++ b/src/backend/executor/nodeMergeAppend.c
@@ -32,7 +32,7 @@
* /
* MergeAppend---+------+------+--- nil
* / \ | | |
- * nil nil ... ... ...
+ * nil nil ... ... ...
* subplans
*/
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 2a1b4ed8b6..bc036a30b0 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -41,7 +41,7 @@
*
* Therefore, rather than directly executing the merge join clauses,
* we evaluate the left and right key expressions separately and then
- * compare the columns one at a time (see MJCompare). The planner
+ * compare the columns one at a time (see MJCompare). The planner
* passes us enough information about the sort ordering of the inputs
* to allow us to determine how to make the comparison. We may use the
* appropriate btree comparison function, since Postgres' only notion
@@ -269,7 +269,7 @@ MJExamineQuals(List *mergeclauses,
* input, since we assume mergejoin operators are strict. If the NULL
* is in the first join column, and that column sorts nulls last, then
* we can further conclude that no following tuple can match anything
- * either, since they must all have nulls in the first column. However,
+ * either, since they must all have nulls in the first column. However,
* that case is only interesting if we're not in FillOuter mode, else
* we have to visit all the tuples anyway.
*
@@ -325,7 +325,7 @@ MJEvalOuterValues(MergeJoinState *mergestate)
/*
* MJEvalInnerValues
*
- * Same as above, but for the inner tuple. Here, we have to be prepared
+ * Same as above, but for the inner tuple. Here, we have to be prepared
* to load data from either the true current inner, or the marked inner,
* so caller must tell us which slot to load from.
*/
@@ -736,7 +736,7 @@ ExecMergeJoin(MergeJoinState *node)
case MJEVAL_MATCHABLE:
/*
- * OK, we have the initial tuples. Begin by skipping
+ * OK, we have the initial tuples. Begin by skipping
* non-matching tuples.
*/
node->mj_JoinState = EXEC_MJ_SKIP_TEST;
@@ -1131,7 +1131,7 @@ ExecMergeJoin(MergeJoinState *node)
* which means that all subsequent outer tuples will be
* larger than our marked inner tuples. So we need not
* revisit any of the marked tuples but can proceed to
- * look for a match to the current inner. If there's
+ * look for a match to the current inner. If there's
* no more inners, no more matches are possible.
* ----------------
*/
@@ -1522,7 +1522,7 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags)
* For certain types of inner child nodes, it is advantageous to issue
* MARK every time we advance past an inner tuple we will never return to.
* For other types, MARK on a tuple we cannot return to is a waste of
- * cycles. Detect which case applies and set mj_ExtraMarks if we want to
+ * cycles. Detect which case applies and set mj_ExtraMarks if we want to
* issue "unnecessary" MARK calls.
*
* Currently, only Material wants the extra MARKs, and it will be helpful
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index fca7a2581f..8ac60477fb 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -30,7 +30,7 @@
*
* If the query specifies RETURNING, then the ModifyTable returns a
* RETURNING tuple after completing each row insert, update, or delete.
- * It must be called again to continue the operation. Without RETURNING,
+ * It must be called again to continue the operation. Without RETURNING,
* we just loop within the node until all the work is done, then
* return NULL. This avoids useless call/return overhead.
*/
@@ -419,7 +419,7 @@ ldelete:;
* proceed. We don't want to discard the original DELETE
* while keeping the triggered actions based on its deletion;
* and it would be no better to allow the original DELETE
- * while discarding updates that it triggered. The row update
+ * while discarding updates that it triggered. The row update
* carries some information that might be important according
* to business rules; so throwing an error is the only safe
* course.
@@ -491,7 +491,7 @@ ldelete:;
{
/*
* We have to put the target tuple into a slot, which means first we
- * gotta fetch it. We can use the trigger tuple slot.
+ * gotta fetch it. We can use the trigger tuple slot.
*/
TupleTableSlot *rslot;
HeapTupleData deltuple;
@@ -549,7 +549,7 @@ ldelete:;
* note: we can't run UPDATE queries with transactions
* off because UPDATEs are actually INSERTs and our
* scan will mistakenly loop forever, updating the tuple
- * it just inserted.. This should be fixed but until it
+ * it just inserted.. This should be fixed but until it
* is, we don't want to get stuck in an infinite loop
* which corrupts your database..
*
@@ -657,7 +657,7 @@ ExecUpdate(ItemPointer tupleid,
*
* If we generate a new candidate tuple after EvalPlanQual testing, we
* must loop back here and recheck constraints. (We don't need to
- * redo triggers, however. If there are any BEFORE triggers then
+ * redo triggers, however. If there are any BEFORE triggers then
* trigger.c will have done heap_lock_tuple to lock the correct tuple,
* so there's no need to do them again.)
*/
@@ -900,7 +900,7 @@ ExecModifyTable(ModifyTableState *node)
/*
* es_result_relation_info must point to the currently active result
- * relation while we are within this ModifyTable node. Even though
+ * relation while we are within this ModifyTable node. Even though
* ModifyTable nodes can't be nested statically, they can be nested
* dynamically (since our subplan could include a reference to a modifying
* CTE). So we have to save and restore the caller's value.
@@ -916,7 +916,7 @@ ExecModifyTable(ModifyTableState *node)
for (;;)
{
/*
- * Reset the per-output-tuple exprcontext. This is needed because
+ * Reset the per-output-tuple exprcontext. This is needed because
* triggers expect to use that context as workspace. It's a bit ugly
* to do this below the top level of the plan, however. We might need
* to rethink this later.
@@ -973,6 +973,7 @@ ExecModifyTable(ModifyTableState *node)
* ctid!! */
tupleid = &tuple_ctid;
}
+
/*
* Use the wholerow attribute, when available, to reconstruct
* the old relation tuple.
@@ -1105,7 +1106,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* call ExecInitNode on each of the plans to be executed and save the
* results into the array "mt_plans". This is also a convenient place to
* verify that the proposed target relations are valid and open their
- * indexes for insertion of new index entries. Note we *must* set
+ * indexes for insertion of new index entries. Note we *must* set
* estate->es_result_relation_info correctly while we initialize each
* sub-plan; ExecContextForcesOids depends on that!
*/
@@ -1125,7 +1126,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/*
* If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new
- * index entries for the tuples we add/update. We need not do this
+ * index entries for the tuples we add/update. We need not do this
* for a DELETE, however, since deletion doesn't affect indexes. Also,
* inside an EvalPlanQual operation, the indexes might be open
* already, since we share the resultrel state with the original
@@ -1175,6 +1176,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
ExprState *wcoExpr = ExecInitExpr((Expr *) wco->qual,
mtstate->mt_plans[i]);
+
wcoExprs = lappend(wcoExprs, wcoExpr);
}
@@ -1194,7 +1196,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/*
* Initialize result tuple slot and assign its rowtype using the first
- * RETURNING list. We assume the rest will look the same.
+ * RETURNING list. We assume the rest will look the same.
*/
tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists),
false);
@@ -1240,7 +1242,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/*
* If we have any secondary relations in an UPDATE or DELETE, they need to
* be treated like non-locked relations in SELECT FOR UPDATE, ie, the
- * EvalPlanQual mechanism needs to be told about them. Locate the
+ * EvalPlanQual mechanism needs to be told about them. Locate the
* relevant ExecRowMarks.
*/
foreach(l, node->rowMarks)
@@ -1281,7 +1283,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* attribute present --- no need to look first.
*
* If there are multiple result relations, each one needs its own junk
- * filter. Note multiple rels are only possible for UPDATE/DELETE, so we
+ * filter. Note multiple rels are only possible for UPDATE/DELETE, so we
* can't be fooled by some needing a filter and some not.
*
* This section of code is also a convenient place to verify that the
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index 32c859c1a2..de3d87a5d6 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -316,7 +316,7 @@ ExecReScanRecursiveUnion(RecursiveUnionState *node)
/*
* if chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode. Because of above, we only have to do this to the
+ * first ExecProcNode. Because of above, we only have to do this to the
* non-recursive term.
*/
if (outerPlan->chgParam == NULL)
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 1f32c93848..75f6ed9883 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -5,7 +5,7 @@
*
* The input of a SetOp node consists of tuples from two relations,
* which have been combined into one dataset, with a junk attribute added
- * that shows which relation each tuple came from. In SETOP_SORTED mode,
+ * that shows which relation each tuple came from. In SETOP_SORTED mode,
* the input has furthermore been sorted according to all the grouping
* columns (ie, all the non-junk attributes). The SetOp node scans each
* group of identical tuples to determine how many came from each input
@@ -18,7 +18,7 @@
* relation is the left-hand one for EXCEPT, and tries to make the smaller
* input relation come first for INTERSECT. We build a hash table in memory
* with one entry for each group of identical tuples, and count the number of
- * tuples in the group from each relation. After seeing all the input, we
+ * tuples in the group from each relation. After seeing all the input, we
* scan the hashtable and generate the correct output using those counts.
* We can avoid making hashtable entries for any tuples appearing only in the
* second input relation, since they cannot result in any output.
@@ -268,7 +268,7 @@ setop_retrieve_direct(SetOpState *setopstate)
/*
* Store the copied first input tuple in the tuple table slot reserved
- * for it. The tuple will be deleted when it is cleared from the
+ * for it. The tuple will be deleted when it is cleared from the
* slot.
*/
ExecStoreTuple(setopstate->grp_firstTuple,
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 49d193bbae..5d02d9420b 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -261,12 +261,12 @@ ExecScanSubPlan(SubPlanState *node,
* semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK.
* (ROWCOMPARE_SUBLINK doesn't allow multiple tuples from the subplan.)
* NULL results from the combining operators are handled according to the
- * usual SQL semantics for OR and AND. The result for no input tuples is
+ * usual SQL semantics for OR and AND. The result for no input tuples is
* FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
* ROWCOMPARE_SUBLINK.
*
* For EXPR_SUBLINK we require the subplan to produce no more than one
- * tuple, else an error is raised. If zero tuples are produced, we return
+ * tuple, else an error is raised. If zero tuples are produced, we return
* NULL. Assuming we get a tuple, we just use its first column (there can
* be only one non-junk column in this case).
*
@@ -409,7 +409,7 @@ ExecScanSubPlan(SubPlanState *node,
else if (!found)
{
/*
- * deal with empty subplan result. result/isNull were previously
+ * deal with empty subplan result. result/isNull were previously
* initialized correctly for all sublink types except EXPR and
* ROWCOMPARE; for those, return NULL.
*/
@@ -894,7 +894,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent)
*
* This is called from ExecEvalParamExec() when the value of a PARAM_EXEC
* parameter is requested and the param's execPlan field is set (indicating
- * that the param has not yet been evaluated). This allows lazy evaluation
+ * that the param has not yet been evaluated). This allows lazy evaluation
* of initplans: we don't run the subplan until/unless we need its output.
* Note that this routine MUST clear the execPlan fields of the plan's
* output parameters after evaluating them!
@@ -1122,7 +1122,7 @@ ExecInitAlternativeSubPlan(AlternativeSubPlan *asplan, PlanState *parent)
/*
* Select the one to be used. For this, we need an estimate of the number
* of executions of the subplan. We use the number of output rows
- * expected from the parent plan node. This is a good estimate if we are
+ * expected from the parent plan node. This is a good estimate if we are
* in the parent's targetlist, and an underestimate (but probably not by
* more than a factor of 2) if we are in the qual.
*/
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index c69534da77..3d7cce2c9e 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -194,7 +194,7 @@ ExecReScanSubqueryScan(SubqueryScanState *node)
/*
* ExecReScan doesn't know about my subplan, so I have to do
- * changed-parameter signaling myself. This is just as well, because the
+ * changed-parameter signaling myself. This is just as well, because the
* subplan has its own memory context in which its chgParam state lives.
*/
if (node->ss.ps.chgParam != NULL)
diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c
index 597a26018a..ab3ec9735f 100644
--- a/src/backend/executor/nodeUnique.c
+++ b/src/backend/executor/nodeUnique.c
@@ -4,7 +4,7 @@
* Routines to handle unique'ing of queries where appropriate
*
* Unique is a very simple node type that just filters out duplicate
- * tuples from a stream of sorted tuples from its subplan. It's essentially
+ * tuples from a stream of sorted tuples from its subplan. It's essentially
* a dumbed-down form of Group: the duplicate-removal functionality is
* identical. However, Unique doesn't do projection nor qual checking,
* so it's marginally more efficient for cases where neither is needed.
diff --git a/src/backend/executor/nodeValuesscan.c b/src/backend/executor/nodeValuesscan.c
index 3016a6b072..83b1324abc 100644
--- a/src/backend/executor/nodeValuesscan.c
+++ b/src/backend/executor/nodeValuesscan.c
@@ -215,7 +215,7 @@ ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags)
planstate = &scanstate->ss.ps;
/*
- * Create expression contexts. We need two, one for per-sublist
+ * Create expression contexts. We need two, one for per-sublist
* processing and one for execScan.c to use for quals and projections. We
* cheat a little by using ExecAssignExprContext() to build both.
*/
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index 40a925331c..a0470d3eab 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -4,7 +4,7 @@
* routines to handle WindowAgg nodes.
*
* A WindowAgg node evaluates "window functions" across suitable partitions
- * of the input tuple set. Any one WindowAgg works for just a single window
+ * of the input tuple set. Any one WindowAgg works for just a single window
* specification, though it can evaluate multiple window functions sharing
* identical window specifications. The input tuples are required to be
* delivered in sorted order, with the PARTITION BY columns (if any) as
@@ -14,7 +14,7 @@
*
* Since window functions can require access to any or all of the rows in
* the current partition, we accumulate rows of the partition into a
- * tuplestore. The window functions are called using the WindowObject API
+ * tuplestore. The window functions are called using the WindowObject API
* so that they can access those rows as needed.
*
* We also support using plain aggregate functions as window functions.
@@ -280,7 +280,7 @@ advance_windowaggregate(WindowAggState *winstate,
{
/*
* For a strict transfn, nothing happens when there's a NULL input; we
- * just keep the prior transValue. Note transValueCount doesn't
+ * just keep the prior transValue. Note transValueCount doesn't
* change either.
*/
for (i = 1; i <= numArguments; i++)
@@ -330,7 +330,7 @@ advance_windowaggregate(WindowAggState *winstate,
}
/*
- * OK to call the transition function. Set winstate->curaggcontext while
+ * OK to call the transition function. Set winstate->curaggcontext while
* calling it, for possible use by AggCheckCallContext.
*/
InitFunctionCallInfoData(*fcinfo, &(peraggstate->transfn),
@@ -362,7 +362,7 @@ advance_windowaggregate(WindowAggState *winstate,
/*
* If pass-by-ref datatype, must copy the new value into aggcontext and
- * pfree the prior transValue. But if transfn returned a pointer to its
+ * pfree the prior transValue. But if transfn returned a pointer to its
* first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
@@ -485,7 +485,7 @@ advance_windowaggregate_base(WindowAggState *winstate,
}
/*
- * OK to call the inverse transition function. Set
+ * OK to call the inverse transition function. Set
* winstate->curaggcontext while calling it, for possible use by
* AggCheckCallContext.
*/
@@ -513,7 +513,7 @@ advance_windowaggregate_base(WindowAggState *winstate,
/*
* If pass-by-ref datatype, must copy the new value into aggcontext and
- * pfree the prior transValue. But if invtransfn returned a pointer to
+ * pfree the prior transValue. But if invtransfn returned a pointer to
* its first input, we don't need to do anything.
*
* Note: the checks for null values here will never fire, but it seems
@@ -827,7 +827,7 @@ eval_windowaggregates(WindowAggState *winstate)
*
* We assume that aggregates using the shared context always restart if
* *any* aggregate restarts, and we may thus clean up the shared
- * aggcontext if that is the case. Private aggcontexts are reset by
+ * aggcontext if that is the case. Private aggcontexts are reset by
* initialize_windowaggregate() if their owning aggregate restarts. If we
* aren't restarting an aggregate, we need to free any previously saved
* result for it, else we'll leak memory.
@@ -864,9 +864,9 @@ eval_windowaggregates(WindowAggState *winstate)
* (i.e., frameheadpos) and aggregatedupto, while restarted aggregates
* contain no rows. If there are any restarted aggregates, we must thus
* begin aggregating anew at frameheadpos, otherwise we may simply
- * continue at aggregatedupto. We must remember the old value of
+ * continue at aggregatedupto. We must remember the old value of
* aggregatedupto to know how long to skip advancing non-restarted
- * aggregates. If we modify aggregatedupto, we must also clear
+ * aggregates. If we modify aggregatedupto, we must also clear
* agg_row_slot, per the loop invariant below.
*/
aggregatedupto_nonrestarted = winstate->aggregatedupto;
@@ -881,7 +881,7 @@ eval_windowaggregates(WindowAggState *winstate)
* Advance until we reach a row not in frame (or end of partition).
*
* Note the loop invariant: agg_row_slot is either empty or holds the row
- * at position aggregatedupto. We advance aggregatedupto after processing
+ * at position aggregatedupto. We advance aggregatedupto after processing
* a row.
*/
for (;;)
@@ -1142,7 +1142,7 @@ spool_tuples(WindowAggState *winstate, int64 pos)
/*
* If the tuplestore has spilled to disk, alternate reading and writing
- * becomes quite expensive due to frequent buffer flushes. It's cheaper
+ * becomes quite expensive due to frequent buffer flushes. It's cheaper
* to force the entire partition to get spooled in one go.
*
* XXX this is a horrid kluge --- it'd be better to fix the performance
@@ -1239,7 +1239,7 @@ release_partition(WindowAggState *winstate)
* to our window framing rule
*
* The caller must have already determined that the row is in the partition
- * and fetched it into a slot. This function just encapsulates the framing
+ * and fetched it into a slot. This function just encapsulates the framing
* rules.
*/
static bool
@@ -1341,7 +1341,7 @@ row_is_in_frame(WindowAggState *winstate, int64 pos, TupleTableSlot *slot)
*
* Uses the winobj's read pointer for any required fetches; hence, if the
* frame mode is one that requires row comparisons, the winobj's mark must
- * not be past the currently known frame head. Also uses the specified slot
+ * not be past the currently known frame head. Also uses the specified slot
* for any required fetches.
*/
static void
@@ -1446,7 +1446,7 @@ update_frameheadpos(WindowObject winobj, TupleTableSlot *slot)
*
* Uses the winobj's read pointer for any required fetches; hence, if the
* frame mode is one that requires row comparisons, the winobj's mark must
- * not be past the currently known frame tail. Also uses the specified slot
+ * not be past the currently known frame tail. Also uses the specified slot
* for any required fetches.
*/
static void
@@ -1789,8 +1789,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
winstate->ss.ps.state = estate;
/*
- * Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a little
+ * Create expression contexts. We need two, one for per-input-tuple
+ * processing and one for per-output-tuple processing. We cheat a little
* by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &winstate->ss.ps);
@@ -2288,7 +2288,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
/*
* Insist that forward and inverse transition functions have the same
- * strictness setting. Allowing them to differ would require handling
+ * strictness setting. Allowing them to differ would require handling
* more special cases in advance_windowaggregate and
* advance_windowaggregate_base, for no discernible benefit. This should
* have been checked at agg definition time, but we must check again in
@@ -2467,7 +2467,7 @@ window_gettupleslot(WindowObject winobj, int64 pos, TupleTableSlot *slot)
* requested amount of space. Subsequent calls just return the same chunk.
*
* Memory obtained this way is normally used to hold state that should be
- * automatically reset for each new partition. If a window function wants
+ * automatically reset for each new partition. If a window function wants
* to hold state across the whole query, fcinfo->fn_extra can be used in the
* usual way for that.
*/
diff --git a/src/backend/executor/nodeWorktablescan.c b/src/backend/executor/nodeWorktablescan.c
index 2138ce78cf..94ecf754fb 100644
--- a/src/backend/executor/nodeWorktablescan.c
+++ b/src/backend/executor/nodeWorktablescan.c
@@ -82,7 +82,7 @@ ExecWorkTableScan(WorkTableScanState *node)
{
/*
* On the first call, find the ancestor RecursiveUnion's state via the
- * Param slot reserved for it. (We can't do this during node init because
+ * Param slot reserved for it. (We can't do this during node init because
* there are corner cases where we'll get the init call before the
* RecursiveUnion does.)
*/
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index e0325c4a7d..7ba1fd9066 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -256,7 +256,7 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid)
}
/*
- * Pop the stack entry and reset global variables. Unlike
+ * Pop the stack entry and reset global variables. Unlike
* SPI_finish(), we don't risk switching to memory contexts that might
* be already gone.
*/
@@ -1306,7 +1306,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
}
/*
- * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
+ * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
* check in transformDeclareCursorStmt because the cursor options might
* not have come through there.
*/
@@ -1560,7 +1560,7 @@ SPI_plan_is_valid(SPIPlanPtr plan)
/*
* SPI_result_code_string --- convert any SPI return code to a string
*
- * This is often useful in error messages. Most callers will probably
+ * This is often useful in error messages. Most callers will probably
* only pass negative (error-case) codes, but for generality we recognize
* the success codes too.
*/
@@ -1630,7 +1630,7 @@ SPI_result_code_string(int code)
* CachedPlanSources.
*
* This is exported so that pl/pgsql can use it (this beats letting pl/pgsql
- * look directly into the SPIPlan for itself). It's not documented in
+ * look directly into the SPIPlan for itself). It's not documented in
* spi.sgml because we'd just as soon not have too many places using this.
*/
List *
@@ -1646,7 +1646,7 @@ SPI_plan_get_plan_sources(SPIPlanPtr plan)
* return NULL. Caller is responsible for doing ReleaseCachedPlan().
*
* This is exported so that pl/pgsql can use it (this beats letting pl/pgsql
- * look directly into the SPIPlan for itself). It's not documented in
+ * look directly into the SPIPlan for itself). It's not documented in
* spi.sgml because we'd just as soon not have too many places using this.
*/
CachedPlan *
@@ -2204,7 +2204,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
/*
* The last canSetTag query sets the status values returned to the
- * caller. Be careful to free any tuptables not returned, to
+ * caller. Be careful to free any tuptables not returned, to
* avoid intratransaction memory leak.
*/
if (canSetTag)
diff --git a/src/backend/executor/tstoreReceiver.c b/src/backend/executor/tstoreReceiver.c
index 23f11360c3..c15c99a1f4 100644
--- a/src/backend/executor/tstoreReceiver.c
+++ b/src/backend/executor/tstoreReceiver.c
@@ -5,7 +5,7 @@
* a Tuplestore.
*
* Optionally, we can force detoasting (but not decompression) of out-of-line
- * toasted values. This is to support cursors WITH HOLD, which must retain
+ * toasted values. This is to support cursors WITH HOLD, which must retain
* data even if the underlying table is dropped.
*
*
diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c
index 9b9ba0a22e..7d0309079d 100644
--- a/src/backend/lib/stringinfo.c
+++ b/src/backend/lib/stringinfo.c
@@ -99,7 +99,7 @@ appendStringInfo(StringInfo str, const char *fmt,...)
* appendStringInfoVA
*
* Attempt to format text data under the control of fmt (an sprintf-style
- * format string) and append it to whatever is already in str. If successful
+ * format string) and append it to whatever is already in str. If successful
* return zero; if not (because there's not enough space), return an estimate
* of the space needed, without modifying str. Typically the caller should
* pass the return value to enlargeStringInfo() before trying again; see
@@ -247,7 +247,7 @@ enlargeStringInfo(StringInfo str, int needed)
int newlen;
/*
- * Guard against out-of-range "needed" values. Without this, we can get
+ * Guard against out-of-range "needed" values. Without this, we can get
* an overflow or infinite loop in the following.
*/
if (needed < 0) /* should not happen */
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 8fa9aa797f..70b0b93982 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -49,7 +49,7 @@ static int recv_and_check_password_packet(Port *port, char **logdetail);
/* Max size of username ident server can return */
#define IDENT_USERNAME_MAX 512
-/* Standard TCP port number for Ident service. Assigned by IANA */
+/* Standard TCP port number for Ident service. Assigned by IANA */
#define IDENT_PORT 113
static int ident_inet(hbaPort *port);
@@ -677,7 +677,7 @@ recv_password_packet(Port *port)
(errmsg("received password packet")));
/*
- * Return the received string. Note we do not attempt to do any
+ * Return the received string. Note we do not attempt to do any
* character-set conversion on it; since we don't yet know the client's
* encoding, there wouldn't be much point.
*/
@@ -1387,7 +1387,7 @@ interpret_ident_response(const char *ident_response,
/*
* Talk to the ident server on host "remote_ip_addr" and find out who
* owns the tcp connection from his port "remote_port" to port
- * "local_port_addr" on host "local_ip_addr". Return the user name the
+ * "local_port_addr" on host "local_ip_addr". Return the user name the
* ident server gives as "*ident_user".
*
* IP addresses and port numbers are in network byte order.
@@ -1591,7 +1591,7 @@ auth_peer(hbaPort *port)
{
ereport(LOG,
(errmsg("failed to look up local user id %ld: %s",
- (long) uid, errno ? strerror(errno) : _("user does not exist"))));
+ (long) uid, errno ? strerror(errno) : _("user does not exist"))));
return STATUS_ERROR;
}
@@ -2006,8 +2006,8 @@ CheckLDAPAuth(Port *port)
attributes[1] = NULL;
filter = psprintf("(%s=%s)",
- attributes[0],
- port->user_name);
+ attributes[0],
+ port->user_name);
r = ldap_search_s(ldap,
port->hba->ldapbasedn,
@@ -2095,9 +2095,9 @@ CheckLDAPAuth(Port *port)
}
else
fulluser = psprintf("%s%s%s",
- port->hba->ldapprefix ? port->hba->ldapprefix : "",
- port->user_name,
- port->hba->ldapsuffix ? port->hba->ldapsuffix : "");
+ port->hba->ldapprefix ? port->hba->ldapprefix : "",
+ port->user_name,
+ port->hba->ldapsuffix ? port->hba->ldapsuffix : "");
r = ldap_simple_bind_s(ldap, fulluser, passwd);
ldap_unbind(ldap);
diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c
index 827d4c5888..4a6bcf5598 100644
--- a/src/backend/libpq/be-fsstubs.c
+++ b/src/backend/libpq/be-fsstubs.c
@@ -768,7 +768,7 @@ lo_get_fragment_internal(Oid loOid, int64 offset, int32 nbytes)
LargeObjectDesc *loDesc;
int64 loSize;
int64 result_length;
- int total_read PG_USED_FOR_ASSERTS_ONLY;
+ int total_read PG_USED_FOR_ASSERTS_ONLY;
bytea *result = NULL;
/*
@@ -870,7 +870,7 @@ lo_create_bytea(PG_FUNCTION_ARGS)
Oid loOid = PG_GETARG_OID(0);
bytea *str = PG_GETARG_BYTEA_PP(1);
LargeObjectDesc *loDesc;
- int written PG_USED_FOR_ASSERTS_ONLY;
+ int written PG_USED_FOR_ASSERTS_ONLY;
CreateFSContext();
@@ -893,7 +893,7 @@ lo_put(PG_FUNCTION_ARGS)
int64 offset = PG_GETARG_INT64(1);
bytea *str = PG_GETARG_BYTEA_PP(2);
LargeObjectDesc *loDesc;
- int written PG_USED_FOR_ASSERTS_ONLY;
+ int written PG_USED_FOR_ASSERTS_ONLY;
CreateFSContext();
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index 56ad6ab424..59204cfe80 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -30,13 +30,13 @@
* impersonations.
*
* Another benefit of EDH is that it allows the backend and
- * clients to use DSA keys. DSA keys can only provide digital
+ * clients to use DSA keys. DSA keys can only provide digital
* signatures, not encryption, and are often acceptable in
* jurisdictions where RSA keys are unacceptable.
*
* The downside to EDH is that it makes it impossible to
* use ssldump(1) if there's a problem establishing an SSL
- * session. In this case you'll need to temporarily disable
+ * session. In this case you'll need to temporarily disable
* EDH by commenting out the callback.
*
* ...
@@ -119,7 +119,7 @@ char *SSLCipherSuites = NULL;
char *SSLECDHCurve;
/* GUC variable: if false, prefer client ciphers */
-bool SSLPreferServerCiphers;
+bool SSLPreferServerCiphers;
/* ------------------------------------------------------------ */
/* Hardcoded values */
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index b070bfeda3..fd98c60ddb 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -1758,7 +1758,7 @@ check_hba(hbaPort *port)
* Read the config file and create a List of HbaLine records for the contents.
*
* The configuration is read into a temporary list, and if any parse error
- * occurs the old list is kept in place and false is returned. Only if the
+ * occurs the old list is kept in place and false is returned. Only if the
* whole file parses OK is the list replaced, and the function returns true.
*
* On a false result, caller will take care of reporting a FATAL error in case
@@ -2244,7 +2244,7 @@ load_ident(void)
/*
* Determine what authentication method should be used when accessing database
- * "database" from frontend "raddr", user "user". Return the method and
+ * "database" from frontend "raddr", user "user". Return the method and
* an optional argument (stored in fields of *port), and STATUS_OK.
*
* If the file does not contain any entry matching the request, we return
diff --git a/src/backend/libpq/md5.c b/src/backend/libpq/md5.c
index e2c929fb52..90bc113681 100644
--- a/src/backend/libpq/md5.c
+++ b/src/backend/libpq/md5.c
@@ -2,7 +2,7 @@
* md5.c
*
* Implements the MD5 Message-Digest Algorithm as specified in
- * RFC 1321. This implementation is a simple one, in that it
+ * RFC 1321. This implementation is a simple one, in that it
* needs every input byte to be buffered before doing any
* calculations. I do not expect this file to be used for
* general purpose MD5'ing of large amounts of data, only for
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 0179451f08..605d8913b1 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -447,7 +447,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
/*
* Note: This might fail on some OS's, like Linux older than
* 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and map
- * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4
+ * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4
* connections.
*/
err = bind(fd, addr->ai_addr, addr->ai_addrlen);
@@ -692,6 +692,7 @@ StreamConnection(pgsocket server_fd, Port *port)
}
#ifdef WIN32
+
/*
* This is a Win32 socket optimization. The ideal size is 32k.
* http://support.microsoft.com/kb/823764/EN-US/
@@ -1126,7 +1127,7 @@ pq_getmessage(StringInfo s, int maxlen)
if (len > 0)
{
/*
- * Allocate space for message. If we run out of room (ridiculously
+ * Allocate space for message. If we run out of room (ridiculously
* large message), we will elog(ERROR), but we want to discard the
* message body so as not to lose communication sync.
*/
diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c
index ba9598a8c6..dfe3a646a1 100644
--- a/src/backend/libpq/pqformat.c
+++ b/src/backend/libpq/pqformat.c
@@ -120,7 +120,7 @@ pq_sendbytes(StringInfo buf, const char *data, int datalen)
* pq_sendcountedtext - append a counted text string (with character set conversion)
*
* The data sent to the frontend by this routine is a 4-byte count field
- * followed by the string. The count includes itself or not, as per the
+ * followed by the string. The count includes itself or not, as per the
* countincludesself flag (pre-3.0 protocol requires it to include itself).
* The passed text string need not be null-terminated, and the data sent
* to the frontend isn't either.
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 1b9cbd1de3..4a563741e9 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -69,7 +69,7 @@ main(int argc, char *argv[])
/*
* Remember the physical location of the initially given argv[] array for
- * possible use by ps display. On some platforms, the argv[] storage must
+ * possible use by ps display. On some platforms, the argv[] storage must
* be overwritten in order to set the process title for ps. In such cases
* save_ps_display_args makes and returns a new copy of the argv[] array.
*
@@ -98,10 +98,10 @@ main(int argc, char *argv[])
MemoryContextInit();
/*
- * Set up locale information from environment. Note that LC_CTYPE and
+ * Set up locale information from environment. Note that LC_CTYPE and
* LC_COLLATE will be overridden later from pg_control if we are in an
* already-initialized database. We set them here so that they will be
- * available to fill pg_control during initdb. LC_MESSAGES will get set
+ * available to fill pg_control during initdb. LC_MESSAGES will get set
* later during GUC option processing, but we set it here to allow startup
* error messages to be localized.
*/
@@ -109,6 +109,7 @@ main(int argc, char *argv[])
set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("postgres"));
#ifdef WIN32
+
/*
* Windows uses codepages rather than the environment, so we work around
* that by querying the environment explicitly first for LC_COLLATE and
@@ -202,6 +203,7 @@ main(int argc, char *argv[])
#endif
#ifdef WIN32
+
/*
* Start our win32 signal implementation
*
@@ -227,9 +229,9 @@ main(int argc, char *argv[])
/*
- * Place platform-specific startup hacks here. This is the right
+ * Place platform-specific startup hacks here. This is the right
* place to put code that must be executed early in the launch of any new
- * server process. Note that this code will NOT be executed when a backend
+ * server process. Note that this code will NOT be executed when a backend
* or sub-bootstrap process is forked, unless we are in a fork/exec
* environment (ie EXEC_BACKEND is defined).
*
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c
index 3a6d0fb236..c927b7891f 100644
--- a/src/backend/nodes/bitmapset.c
+++ b/src/backend/nodes/bitmapset.c
@@ -38,7 +38,7 @@
* where x's are unspecified bits. The two's complement negative is formed
* by inverting all the bits and adding one. Inversion gives
* yyyyyy01111
- * where each y is the inverse of the corresponding x. Incrementing gives
+ * where each y is the inverse of the corresponding x. Incrementing gives
* yyyyyy10000
* and then ANDing with the original value gives
* 00000010000
@@ -796,7 +796,7 @@ bms_join(Bitmapset *a, Bitmapset *b)
/*----------
* bms_first_member - find and remove first member of a set
*
- * Returns -1 if set is empty. NB: set is destructively modified!
+ * Returns -1 if set is empty. NB: set is destructively modified!
*
* This is intended as support for iterating through the members of a set.
* The typical pattern is
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 98ad91078e..43530aa24a 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -4,7 +4,7 @@
* Copy functions for Postgres tree nodes.
*
* NOTE: we currently support copying all node types found in parse and
- * plan trees. We do not support copying executor state trees; there
+ * plan trees. We do not support copying executor state trees; there
* is no need for that, and no point in maintaining all the code that
* would be needed. We also do not support copying Path trees, mainly
* because the circular linkages between RelOptInfo and Path nodes can't
@@ -30,7 +30,7 @@
/*
* Macros to simplify copying of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in a Copy routine are
* named 'newnode' and 'from'.
*/
@@ -1038,7 +1038,7 @@ _copyIntoClause(const IntoClause *from)
/*
* We don't need a _copyExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out copying the common fields...
*/
@@ -3300,7 +3300,7 @@ _copyReplicaIdentityStmt(const ReplicaIdentityStmt *from)
}
static AlterSystemStmt *
-_copyAlterSystemStmt(const AlterSystemStmt * from)
+_copyAlterSystemStmt(const AlterSystemStmt *from)
{
AlterSystemStmt *newnode = makeNode(AlterSystemStmt);
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 9901d231cd..2407cb73a3 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -11,7 +11,7 @@
* be handled easily in a simple depth-first traversal.
*
* Currently, in fact, equal() doesn't know how to compare Plan trees
- * either. This might need to be fixed someday.
+ * either. This might need to be fixed someday.
*
* NOTE: it is intentional that parse location fields (in nodes that have
* one) are not compared. This is because we want, for example, a variable
@@ -34,8 +34,8 @@
/*
- * Macros to simplify comparison of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify comparison of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in an Equal routine are
* named 'a' and 'b'.
*/
@@ -131,7 +131,7 @@ _equalIntoClause(const IntoClause *a, const IntoClause *b)
/*
* We don't need an _equalExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out comparing the common fields...
*/
@@ -764,9 +764,9 @@ static bool
_equalPlaceHolderVar(const PlaceHolderVar *a, const PlaceHolderVar *b)
{
/*
- * We intentionally do not compare phexpr. Two PlaceHolderVars with the
+ * We intentionally do not compare phexpr. Two PlaceHolderVars with the
* same ID and levelsup should be considered equal even if the contained
- * expressions have managed to mutate to different states. This will
+ * expressions have managed to mutate to different states. This will
* happen during final plan construction when there are nested PHVs, since
* the inner PHV will get replaced by a Param in some copies of the outer
* PHV. Another way in which it can happen is that initplan sublinks
@@ -1551,7 +1551,7 @@ _equalReplicaIdentityStmt(const ReplicaIdentityStmt *a, const ReplicaIdentityStm
}
static bool
-_equalAlterSystemStmt(const AlterSystemStmt * a, const AlterSystemStmt * b)
+_equalAlterSystemStmt(const AlterSystemStmt *a, const AlterSystemStmt *b)
{
COMPARE_NODE_FIELD(setstmt);
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index aebc5b60c2..f32124bedf 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -796,7 +796,7 @@ list_union_oid(const List *list1, const List *list2)
* "intersection" if list1 is known unique beforehand.
*
* This variant works on lists of pointers, and determines list
- * membership via equal(). Note that the list1 member will be pointed
+ * membership via equal(). Note that the list1 member will be pointed
* to in the result.
*/
List *
@@ -988,7 +988,7 @@ list_append_unique_oid(List *list, Oid datum)
* via equal().
*
* This is almost the same functionality as list_union(), but list1 is
- * modified in-place rather than being copied. Note also that list2's cells
+ * modified in-place rather than being copied. Note also that list2's cells
* are not inserted in list1, so the analogy to list_concat() isn't perfect.
*/
List *
diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c
index 664670d82a..da59c580b0 100644
--- a/src/backend/nodes/makefuncs.c
+++ b/src/backend/nodes/makefuncs.c
@@ -535,7 +535,7 @@ makeDefElemExtended(char *nameSpace, char *name, Node *arg,
* makeFuncCall -
*
* Initialize a FuncCall struct with the information every caller must
- * supply. Any non-default parameters have to be inserted by the caller.
+ * supply. Any non-default parameters have to be inserted by the caller.
*/
FuncCall *
makeFuncCall(List *name, List *args, int location)
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 1e48a7f889..5a98bfbc11 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -239,7 +239,7 @@ exprType(const Node *expr)
/*
* exprTypmod -
* returns the type-specific modifier of the expression's result type,
- * if it can be determined. In many cases, it can't and we return -1.
+ * if it can be determined. In many cases, it can't and we return -1.
*/
int32
exprTypmod(const Node *expr)
@@ -1543,8 +1543,8 @@ leftmostLoc(int loc1, int loc2)
*
* The walker routine should return "false" to continue the tree walk, or
* "true" to abort the walk and immediately return "true" to the top-level
- * caller. This can be used to short-circuit the traversal if the walker
- * has found what it came for. "false" is returned to the top-level caller
+ * caller. This can be used to short-circuit the traversal if the walker
+ * has found what it came for. "false" is returned to the top-level caller
* iff no invocation of the walker returned "true".
*
* The node types handled by expression_tree_walker include all those
@@ -1582,7 +1582,7 @@ leftmostLoc(int loc1, int loc2)
*
* expression_tree_walker will handle SubPlan nodes by recursing normally
* into the "testexpr" and the "args" list (which are expressions belonging to
- * the outer plan). It will not touch the completed subplan, however. Since
+ * the outer plan). It will not touch the completed subplan, however. Since
* there is no link to the original Query, it is not possible to recurse into
* subselects of an already-planned expression tree. This is OK for current
* uses, but may need to be revisited in future.
@@ -2154,8 +2154,8 @@ expression_tree_mutator(Node *node,
return (Node *) copyObject(node);
case T_WithCheckOption:
{
- WithCheckOption *wco = (WithCheckOption *) node;
- WithCheckOption *newnode;
+ WithCheckOption *wco = (WithCheckOption *) node;
+ WithCheckOption *newnode;
FLATCOPY(newnode, wco, WithCheckOption);
MUTATE(newnode->qual, wco->qual, Node *);
@@ -2658,7 +2658,7 @@ expression_tree_mutator(Node *node,
* This routine exists just to reduce the number of places that need to know
* where all the expression subtrees of a Query are. Note it can be used
* for starting a walk at top level of a Query regardless of whether the
- * mutator intends to descend into subqueries. It is also useful for
+ * mutator intends to descend into subqueries. It is also useful for
* descending into subqueries within a mutator.
*
* Some callers want to suppress mutating of certain items in the Query,
@@ -2668,7 +2668,7 @@ expression_tree_mutator(Node *node,
* indicated items. (More flag bits may be added as needed.)
*
* Normally the Query node itself is copied, but some callers want it to be
- * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
+ * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
* modified substructure is safely copied in any case.
*/
Query *
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 10e81391b1..11c7486007 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -13,7 +13,7 @@
* NOTES
* Every node type that can appear in stored rules' parsetrees *must*
* have an output function defined here (as well as an input function
- * in readfuncs.c). For use in debugging, we also provide output
+ * in readfuncs.c). For use in debugging, we also provide output
* functions for nodes that appear in raw parsetrees, path, and plan trees.
* These nodes however need not have input functions.
*
@@ -30,8 +30,8 @@
/*
- * Macros to simplify output of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify output of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in an Out
* routine.
*/
diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c
index 3916412dd1..b21d651f95 100644
--- a/src/backend/nodes/params.c
+++ b/src/backend/nodes/params.c
@@ -27,7 +27,7 @@
*
* Note: the intent of this function is to make a static, self-contained
* set of parameter values. If dynamic parameter hooks are present, we
- * intentionally do not copy them into the result. Rather, we forcibly
+ * intentionally do not copy them into the result. Rather, we forcibly
* instantiate all available parameter values and copy the datum values.
*/
ParamListInfo
diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c
index 7a88a0d46a..2c0edff6c1 100644
--- a/src/backend/nodes/read.c
+++ b/src/backend/nodes/read.c
@@ -85,21 +85,21 @@ stringToNode(char *str)
* Backslashes themselves must also be backslashed for consistency.
* Any other character can be, but need not be, backslashed as well.
* * If the resulting token is '<>' (with no backslash), it is returned
- * as a non-NULL pointer to the token but with length == 0. Note that
+ * as a non-NULL pointer to the token but with length == 0. Note that
* there is no other way to get a zero-length token.
*
* Returns a pointer to the start of the next token, and the length of the
- * token (including any embedded backslashes!) in *length. If there are
+ * token (including any embedded backslashes!) in *length. If there are
* no more tokens, NULL and 0 are returned.
*
* NOTE: this routine doesn't remove backslashes; the caller must do so
* if necessary (see "debackslash").
*
* NOTE: prior to release 7.0, this routine also had a special case to treat
- * a token starting with '"' as extending to the next '"'. This code was
+ * a token starting with '"' as extending to the next '"'. This code was
* broken, however, since it would fail to cope with a string containing an
* embedded '"'. I have therefore removed this special case, and instead
- * introduced rules for using backslashes to quote characters. Higher-level
+ * introduced rules for using backslashes to quote characters. Higher-level
* code should add backslashes to a string constant to ensure it is treated
* as a single token.
*/
@@ -259,7 +259,7 @@ nodeTokenType(char *token, int length)
* Slightly higher-level reader.
*
* This routine applies some semantic knowledge on top of the purely
- * lexical tokenizer pg_strtok(). It can read
+ * lexical tokenizer pg_strtok(). It can read
* * Value token nodes (integers, floats, or strings);
* * General nodes (via parseNodeString() from readfuncs.c);
* * Lists of the above;
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index ef1eae91bf..1ec4f3c695 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -12,7 +12,7 @@
*
* NOTES
* Path and Plan nodes do not have any readfuncs support, because we
- * never have occasion to read them in. (There was once code here that
+ * never have occasion to read them in. (There was once code here that
* claimed to read them, but it was broken as well as unused.) We
* never read executor state trees, either.
*
@@ -34,7 +34,7 @@
/*
* Macros to simplify reading of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in a Read
* routine.
*/
@@ -130,7 +130,7 @@
/*
* NOTE: use atoi() to read values written with %d, or atoui() to read
* values written with %u in outfuncs.c. An exception is OID values,
- * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
+ * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
* but this will probably change in the future.)
*/
#define atoui(x) ((unsigned int) strtoul((x), NULL, 10))
@@ -601,7 +601,7 @@ _readOpExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -632,7 +632,7 @@ _readDistinctExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -663,7 +663,7 @@ _readNullIfExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -694,7 +694,7 @@ _readScalarArrayOpExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index df3ae93b1d..a880c81cf1 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -19,7 +19,7 @@
* of lossiness. In theory we could fall back to page ranges at some
* point, but for now that seems useless complexity.
*
- * We also support the notion of candidate matches, or rechecking. This
+ * We also support the notion of candidate matches, or rechecking. This
* means we know that a search need visit only some tuples on a page,
* but we are not certain that all of those tuples are real matches.
* So the eventual heap scan must recheck the quals for these tuples only,
@@ -48,7 +48,7 @@
/*
* The maximum number of tuples per page is not large (typically 256 with
* 8K pages, or 1024 with 32K pages). So there's not much point in making
- * the per-page bitmaps variable size. We just legislate that the size
+ * the per-page bitmaps variable size. We just legislate that the size
* is this:
*/
#define MAX_TUPLES_PER_PAGE MaxHeapTuplesPerPage
@@ -61,10 +61,10 @@
* for that page in the page table.
*
* We actually store both exact pages and lossy chunks in the same hash
- * table, using identical data structures. (This is because dynahash.c's
+ * table, using identical data structures. (This is because dynahash.c's
* memory management doesn't allow space to be transferred easily from one
* hashtable to another.) Therefore it's best if PAGES_PER_CHUNK is the
- * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
+ * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
* also want PAGES_PER_CHUNK to be a power of 2 to avoid expensive integer
* remainder operations. So, define it like this:
*/
@@ -142,7 +142,7 @@ struct TIDBitmap
/*
* When iterating over a bitmap in sorted order, a TBMIterator is used to
- * track our progress. There can be several iterators scanning the same
+ * track our progress. There can be several iterators scanning the same
* bitmap concurrently. Note that the bitmap becomes read-only as soon as
* any iterator is created.
*/
@@ -790,7 +790,7 @@ tbm_find_pageentry(const TIDBitmap *tbm, BlockNumber pageno)
*
* If new, the entry is marked as an exact (non-chunk) entry.
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static PagetableEntry *
@@ -870,7 +870,7 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno)
/*
* tbm_mark_page_lossy - mark the page number as lossily stored
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static void
@@ -891,7 +891,7 @@ tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno)
chunk_pageno = pageno - bitno;
/*
- * Remove any extant non-lossy entry for the page. If the page is its own
+ * Remove any extant non-lossy entry for the page. If the page is its own
* chunk header, however, we skip this and handle the case below.
*/
if (bitno != 0)
@@ -956,7 +956,7 @@ tbm_lossify(TIDBitmap *tbm)
*
* Since we are called as soon as nentries exceeds maxentries, we should
* push nentries down to significantly less than maxentries, or else we'll
- * just end up doing this again very soon. We shoot for maxentries/2.
+ * just end up doing this again very soon. We shoot for maxentries/2.
*/
Assert(!tbm->iterating);
Assert(tbm->status == TBM_HASH);
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index 6ceb090e85..de2a6709dd 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -82,11 +82,11 @@ geqo_eval(PlannerInfo *root, Gene *tour, int num_gene)
* not already contain some entries. The newly added entries will be
* recycled by the MemoryContextDelete below, so we must ensure that the
* list is restored to its former state before exiting. We can do this by
- * truncating the list to its original length. NOTE this assumes that any
+ * truncating the list to its original length. NOTE this assumes that any
* added entries are appended at the end!
*
* We also must take care not to mess up the outer join_rel_hash, if there
- * is one. We can do this by just temporarily setting the link to NULL.
+ * is one. We can do this by just temporarily setting the link to NULL.
* (If we are dealing with enough join rels, which we very likely are, a
* new hash table will get built and used locally.)
*
@@ -217,7 +217,7 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene)
* Merge a "clump" into the list of existing clumps for gimme_tree.
*
* We try to merge the clump into some existing clump, and repeat if
- * successful. When no more merging is possible, insert the clump
+ * successful. When no more merging is possible, insert the clump
* into the list, preserving the list ordering rule (namely, that
* clumps of larger size appear earlier).
*
@@ -268,7 +268,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force)
/*
* Recursively try to merge the enlarged old_clump with
- * others. When no further merge is possible, we'll reinsert
+ * others. When no further merge is possible, we'll reinsert
* it into the list.
*/
return merge_clump(root, clumps, old_clump, force);
@@ -279,7 +279,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force)
/*
* No merging is possible, so add new_clump as an independent clump, in
- * proper order according to size. We can be fast for the common case
+ * proper order according to size. We can be fast for the common case
* where it has size 1 --- it should always go at the end.
*/
if (clumps == NIL || new_clump->size == 1)
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 5777cb2ff0..41eaa2653a 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -425,7 +425,7 @@ set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
* set_append_rel_size
* Set size estimates for an "append relation"
*
- * The passed-in rel and RTE represent the entire append relation. The
+ * The passed-in rel and RTE represent the entire append relation. The
* relation's contents are computed by appending together the output of
* the individual member relations. Note that in the inheritance case,
* the first member relation is actually the same table as is mentioned in
@@ -489,7 +489,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* We have to copy the parent's targetlist and quals to the child,
- * with appropriate substitution of variables. However, only the
+ * with appropriate substitution of variables. However, only the
* baserestrictinfo quals are needed before we can check for
* constraint exclusion; so do that first and then check to see if we
* can disregard this child.
@@ -553,7 +553,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* We have to make child entries in the EquivalenceClass data
- * structures as well. This is needed either if the parent
+ * structures as well. This is needed either if the parent
* participates in some eclass joins (because we will want to consider
* inner-indexscan joins on the individual children) or if the parent
* has useful pathkeys (because we should try to build MergeAppend
@@ -594,7 +594,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* Accumulate per-column estimates too. We need not do anything
- * for PlaceHolderVars in the parent list. If child expression
+ * for PlaceHolderVars in the parent list. If child expression
* isn't a Var, or we didn't record a width estimate for it, we
* have to fall back on a datatype-based estimate.
*
@@ -670,7 +670,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* Generate access paths for each member relation, and remember the
- * cheapest path for each one. Also, identify all pathkeys (orderings)
+ * cheapest path for each one. Also, identify all pathkeys (orderings)
* and parameterizations (required_outer sets) available for the member
* relations.
*/
@@ -720,7 +720,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* Collect lists of all the available path orderings and
- * parameterizations for all the children. We use these as a
+ * parameterizations for all the children. We use these as a
* heuristic to indicate which sort orderings and parameterizations we
* should build Append and MergeAppend paths for.
*/
@@ -806,7 +806,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
* so that not that many cases actually get considered here.)
*
* The Append node itself cannot enforce quals, so all qual checking must
- * be done in the child paths. This means that to have a parameterized
+ * be done in the child paths. This means that to have a parameterized
* Append path, we must have the exact same parameterization for each
* child path; otherwise some children might be failing to check the
* moved-down quals. To make them match up, we can try to increase the
@@ -977,7 +977,7 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
* joinquals to be checked within the path's scan. However, some existing
* paths might check the available joinquals already while others don't;
* therefore, it's not clear which existing path will be cheapest after
- * reparameterization. We have to go through them all and find out.
+ * reparameterization. We have to go through them all and find out.
*/
cheapest = NULL;
foreach(lc, rel->pathlist)
@@ -1103,7 +1103,7 @@ has_multiple_baserels(PlannerInfo *root)
*
* We don't currently support generating parameterized paths for subqueries
* by pushing join clauses down into them; it seems too expensive to re-plan
- * the subquery multiple times to consider different alternatives. So the
+ * the subquery multiple times to consider different alternatives. So the
* subquery will have exactly one path. (The path will be parameterized
* if the subquery contains LATERAL references, otherwise not.) Since there's
* no freedom of action here, there's no need for a separate set_subquery_size
@@ -1560,7 +1560,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
* independent jointree items in the query. This is > 1.
*
* 'initial_rels' is a list of RelOptInfo nodes for each independent
- * jointree item. These are the components to be joined together.
+ * jointree item. These are the components to be joined together.
* Note that levels_needed == list_length(initial_rels).
*
* Returns the final level of join relations, i.e., the relation that is
@@ -1576,7 +1576,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
* needed for these paths need have been instantiated.
*
* Note to plugin authors: the functions invoked during standard_join_search()
- * modify root->join_rel_list and root->join_rel_hash. If you want to do more
+ * modify root->join_rel_list and root->join_rel_hash. If you want to do more
* than one join-order search, you'll probably need to save and restore the
* original states of those data structures. See geqo_eval() for an example.
*/
@@ -1675,7 +1675,7 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
* column k is found to be unsafe to reference, we set unsafeColumns[k] to
* TRUE, but we don't reject the subquery overall since column k might
* not be referenced by some/all quals. The unsafeColumns[] array will be
- * consulted later by qual_is_pushdown_safe(). It's better to do it this
+ * consulted later by qual_is_pushdown_safe(). It's better to do it this
* way than to make the checks directly in qual_is_pushdown_safe(), because
* when the subquery involves set operations we have to check the output
* expressions in each arm of the set op.
@@ -1768,7 +1768,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery,
* check_output_expressions - check subquery's output expressions for safety
*
* There are several cases in which it's unsafe to push down an upper-level
- * qual if it references a particular output column of a subquery. We check
+ * qual if it references a particular output column of a subquery. We check
* each output column of the subquery and set unsafeColumns[k] to TRUE if
* that column is unsafe for a pushed-down qual to reference. The conditions
* checked here are:
@@ -1786,7 +1786,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery,
* of rows returned. (This condition is vacuous for DISTINCT, because then
* there are no non-DISTINCT output columns, so we needn't check. But note
* we are assuming that the qual can't distinguish values that the DISTINCT
- * operator sees as equal. This is a bit shaky but we have no way to test
+ * operator sees as equal. This is a bit shaky but we have no way to test
* for the case, and it's unlikely enough that we shouldn't refuse the
* optimization just because it could theoretically happen.)
*/
@@ -1903,7 +1903,7 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
/*
* It would be unsafe to push down window function calls, but at least for
- * the moment we could never see any in a qual anyhow. (The same applies
+ * the moment we could never see any in a qual anyhow. (The same applies
* to aggregates, which we check for in pull_var_clause below.)
*/
Assert(!contain_window_function(qual));
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c
index efeea374c2..9b657fb21f 100644
--- a/src/backend/optimizer/path/clausesel.c
+++ b/src/backend/optimizer/path/clausesel.c
@@ -58,7 +58,7 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
* See clause_selectivity() for the meaning of the additional parameters.
*
* Our basic approach is to take the product of the selectivities of the
- * subclauses. However, that's only right if the subclauses have independent
+ * subclauses. However, that's only right if the subclauses have independent
* probabilities, and in reality they are often NOT independent. So,
* we want to be smarter where we can.
@@ -75,12 +75,12 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
* see that hisel is the fraction of the range below the high bound, while
* losel is the fraction above the low bound; so hisel can be interpreted
* directly as a 0..1 value but we need to convert losel to 1-losel before
- * interpreting it as a value. Then the available range is 1-losel to hisel.
+ * interpreting it as a value. Then the available range is 1-losel to hisel.
* However, this calculation double-excludes nulls, so really we need
* hisel + losel + null_frac - 1.)
*
* If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation
- * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
+ * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
* yields an impossible (negative) result.
*
* A free side-effect is that we can recognize redundant inequalities such
@@ -174,7 +174,7 @@ clauselist_selectivity(PlannerInfo *root,
{
/*
* If it's not a "<" or ">" operator, just merge the
- * selectivity in generically. But if it's the right oprrest,
+ * selectivity in generically. But if it's the right oprrest,
* add the clause to rqlist for later processing.
*/
switch (get_oprrest(expr->opno))
@@ -459,14 +459,14 @@ treat_as_join_clause(Node *clause, RestrictInfo *rinfo,
* nestloop join's inner relation --- varRelid should then be the ID of the
* inner relation.
*
- * When varRelid is 0, all variables are treated as variables. This
+ * When varRelid is 0, all variables are treated as variables. This
* is appropriate for ordinary join clauses and restriction clauses.
*
* jointype is the join type, if the clause is a join clause. Pass JOIN_INNER
* if the clause isn't a join clause.
*
* sjinfo is NULL for a non-join clause, otherwise it provides additional
- * context information about the join being performed. There are some
+ * context information about the join being performed. There are some
* special cases:
* 1. For a special (not INNER) join, sjinfo is always a member of
* root->join_info_list.
@@ -501,7 +501,7 @@ clause_selectivity(PlannerInfo *root,
/*
* If the clause is marked pseudoconstant, then it will be used as a
* gating qual and should not affect selectivity estimates; hence
- * return 1.0. The only exception is that a constant FALSE may be
+ * return 1.0. The only exception is that a constant FALSE may be
* taken as having selectivity 0.0, since it will surely mean no rows
* out of the plan. This case is simple enough that we need not
* bother caching the result.
@@ -520,11 +520,11 @@ clause_selectivity(PlannerInfo *root,
/*
* If possible, cache the result of the selectivity calculation for
- * the clause. We can cache if varRelid is zero or the clause
+ * the clause. We can cache if varRelid is zero or the clause
* contains only vars of that relid --- otherwise varRelid will affect
* the result, so mustn't cache. Outer join quals might be examined
* with either their join's actual jointype or JOIN_INNER, so we need
- * two cache variables to remember both cases. Note: we assume the
+ * two cache variables to remember both cases. Note: we assume the
* result won't change if we are switching the input relations or
* considering a unique-ified case, so we only need one cache variable
* for all non-JOIN_INNER cases.
@@ -685,7 +685,7 @@ clause_selectivity(PlannerInfo *root,
/*
* This is not an operator, so we guess at the selectivity. THIS IS A
* HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
- * SELECTIVITIES THEMSELVES. -- JMH 7/9/92
+ * SELECTIVITIES THEMSELVES. -- JMH 7/9/92
*/
s1 = (Selectivity) 0.3333333;
}
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 326794acb8..848065ee7b 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -24,7 +24,7 @@
*
* Obviously, taking constants for these values is an oversimplification,
* but it's tough enough to get any useful estimates even at this level of
- * detail. Note that all of these parameters are user-settable, in case
+ * detail. Note that all of these parameters are user-settable, in case
* the default values are drastically off for a particular platform.
*
* seq_page_cost and random_page_cost can also be overridden for an individual
@@ -493,7 +493,7 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
* computed for us by query_planner.
*
* Caller is expected to have ensured that tuples_fetched is greater than zero
- * and rounded to integer (see clamp_row_est). The result will likewise be
+ * and rounded to integer (see clamp_row_est). The result will likewise be
* greater than zero and integral.
*/
double
@@ -694,7 +694,7 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
/*
* For small numbers of pages we should charge spc_random_page_cost
* apiece, while if nearly all the table's pages are being read, it's more
- * appropriate to charge spc_seq_page_cost apiece. The effect is
+ * appropriate to charge spc_seq_page_cost apiece. The effect is
* nonlinear, too. For lack of a better idea, interpolate like this to
* determine the cost per page.
*/
@@ -769,7 +769,7 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
* Estimate the cost of a BitmapAnd node
*
* Note that this considers only the costs of index scanning and bitmap
- * creation, not the eventual heap access. In that sense the object isn't
+ * creation, not the eventual heap access. In that sense the object isn't
* truly a Path, but it has enough path-like properties (costs in particular)
* to warrant treating it as one. We don't bother to set the path rows field,
* however.
@@ -828,7 +828,7 @@ cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
/*
* We estimate OR selectivity on the assumption that the inputs are
* non-overlapping, since that's often the case in "x IN (list)" type
- * situations. Of course, we clamp to 1.0 at the end.
+ * situations. Of course, we clamp to 1.0 at the end.
*
* The runtime cost of the BitmapOr itself is estimated at 100x
* cpu_operator_cost for each tbm_union needed. Probably too small,
@@ -917,7 +917,7 @@ cost_tidscan(Path *path, PlannerInfo *root,
/*
* We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
- * understands how to do it correctly. Therefore, honor enable_tidscan
+ * understands how to do it correctly. Therefore, honor enable_tidscan
* only when CURRENT OF isn't present. Also note that cost_qual_eval
* counts a CurrentOfExpr as having startup cost disable_cost, which we
* subtract off here; that's to prevent other plan types such as seqscan
@@ -1036,7 +1036,7 @@ cost_functionscan(Path *path, PlannerInfo *root,
*
* Currently, nodeFunctionscan.c always executes the functions to
* completion before returning any rows, and caches the results in a
- * tuplestore. So the function eval cost is all startup cost, and per-row
+ * tuplestore. So the function eval cost is all startup cost, and per-row
* costs are minimal.
*
* XXX in principle we ought to charge tuplestore spill costs if the
@@ -1108,7 +1108,7 @@ cost_valuesscan(Path *path, PlannerInfo *root,
*
* Note: this is used for both self-reference and regular CTEs; the
* possible cost differences are below the threshold of what we could
- * estimate accurately anyway. Note that the costs of evaluating the
+ * estimate accurately anyway. Note that the costs of evaluating the
* referenced CTE query are added into the final plan as initplan costs,
* and should NOT be counted here.
*/
@@ -1202,7 +1202,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
* If the total volume exceeds sort_mem, we switch to a tape-style merge
* algorithm. There will still be about t*log2(t) tuple comparisons in
* total, but we will also need to write and read each tuple once per
- * merge pass. We expect about ceil(logM(r)) merge passes where r is the
+ * merge pass. We expect about ceil(logM(r)) merge passes where r is the
* number of initial runs formed and M is the merge order used by tuplesort.c.
* Since the average initial run should be about twice sort_mem, we have
* disk traffic = 2 * relsize * ceil(logM(p / (2*sort_mem)))
@@ -1216,7 +1216,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
* accesses (XXX can't we refine that guess?)
*
* By default, we charge two operator evals per tuple comparison, which should
- * be in the right ballpark in most cases. The caller can tweak this by
+ * be in the right ballpark in most cases. The caller can tweak this by
* specifying nonzero comparison_cost; typically that's used for any extra
* work that has to be done to prepare the inputs to the comparison operators.
*
@@ -1340,7 +1340,7 @@ cost_sort(Path *path, PlannerInfo *root,
* Determines and returns the cost of a MergeAppend node.
*
* MergeAppend merges several pre-sorted input streams, using a heap that
- * at any given instant holds the next tuple from each stream. If there
+ * at any given instant holds the next tuple from each stream. If there
* are N streams, we need about N*log2(N) tuple comparisons to construct
* the heap at startup, and then for each output tuple, about log2(N)
* comparisons to delete the top heap entry and another log2(N) comparisons
@@ -1499,7 +1499,7 @@ cost_agg(Path *path, PlannerInfo *root,
* group otherwise. We charge cpu_tuple_cost for each output tuple.
*
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
- * same total CPU cost, but AGG_SORTED has lower startup cost. If the
+ * same total CPU cost, but AGG_SORTED has lower startup cost. If the
* input path is already sorted appropriately, AGG_SORTED should be
* preferred (since it has no risk of memory overflow). This will happen
* as long as the computed total costs are indeed exactly equal --- but if
@@ -2107,10 +2107,10 @@ initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
* Unlike other costsize functions, this routine makes one actual decision:
* whether we should materialize the inner path. We do that either because
* the inner path can't support mark/restore, or because it's cheaper to
- * use an interposed Material node to handle mark/restore. When the decision
+ * use an interposed Material node to handle mark/restore. When the decision
* is cost-based it would be logically cleaner to build and cost two separate
* paths with and without that flag set; but that would require repeating most
- * of the cost calculations, which are not all that cheap. Since the choice
+ * of the cost calculations, which are not all that cheap. Since the choice
* will not affect output pathkeys or startup cost, only total cost, there is
* no possibility of wanting to keep both paths. So it seems best to make
* the decision here and record it in the path's materialize_inner field.
@@ -2174,7 +2174,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
/*
- * Get approx # tuples passing the mergequals. We use approx_tuple_count
+ * Get approx # tuples passing the mergequals. We use approx_tuple_count
* here because we need an estimate done with JOIN_INNER semantics.
*/
mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
@@ -2188,7 +2188,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
* estimated approximately as size of merge join output minus size of
* inner relation. Assume that the distinct key values are 1, 2, ..., and
* denote the number of values of each key in the outer relation as m1,
- * m2, ...; in the inner relation, n1, n2, ... Then we have
+ * m2, ...; in the inner relation, n1, n2, ... Then we have
*
* size of join = m1 * n1 + m2 * n2 + ...
*
@@ -2199,7 +2199,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
* This equation works correctly for outer tuples having no inner match
* (nk = 0), but not for inner tuples having no outer match (mk = 0); we
* are effectively subtracting those from the number of rescanned tuples,
- * when we should not. Can we do better without expensive selectivity
+ * when we should not. Can we do better without expensive selectivity
* computations?
*
* The whole issue is moot if we are working from a unique-ified outer
@@ -2219,7 +2219,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
/*
* Decide whether we want to materialize the inner input to shield it from
- * mark/restore and performing re-fetches. Our cost model for regular
+ * mark/restore and performing re-fetches. Our cost model for regular
* re-fetches is that a re-fetch costs the same as an original fetch,
* which is probably an overestimate; but on the other hand we ignore the
* bookkeeping costs of mark/restore. Not clear if it's worth developing
@@ -2312,7 +2312,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
/*
* For each tuple that gets through the mergejoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic since
+ * clauses that are to be applied at the join. (This is pessimistic since
* not all of the quals may get evaluated at each tuple.)
*
* Note: we could adjust for SEMI/ANTI joins skipping some qual
@@ -2464,7 +2464,7 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
* If inner relation is too big then we will need to "batch" the join,
* which implies writing and reading most of the tuples to disk an extra
* time. Charge seq_page_cost per page, since the I/O should be nice and
- * sequential. Writing the inner rel counts as startup cost, all the rest
+ * sequential. Writing the inner rel counts as startup cost, all the rest
* as run cost.
*/
if (numbatches > 1)
@@ -2695,7 +2695,7 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
/*
* For each tuple that gets through the hashjoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic since
+ * clauses that are to be applied at the join. (This is pessimistic since
* not all of the quals may get evaluated at each tuple.)
*/
startup_cost += qp_qual_cost.startup;
@@ -2748,7 +2748,7 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
{
/*
* Otherwise we will be rescanning the subplan output on each
- * evaluation. We need to estimate how much of the output we will
+ * evaluation. We need to estimate how much of the output we will
* actually need to scan. NOTE: this logic should agree with the
* tuple_fraction estimates used by make_subplan() in
* plan/subselect.c.
@@ -2796,10 +2796,10 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
/*
* cost_rescan
* Given a finished Path, estimate the costs of rescanning it after
- * having done so the first time. For some Path types a rescan is
+ * having done so the first time. For some Path types a rescan is
* cheaper than an original scan (if no parameters change), and this
* function embodies knowledge about that. The default is to return
- * the same costs stored in the Path. (Note that the cost estimates
+ * the same costs stored in the Path. (Note that the cost estimates
* actually stored in Paths are always for first scans.)
*
* This function is not currently intended to model effects such as rescans
@@ -2840,7 +2840,7 @@ cost_rescan(PlannerInfo *root, Path *path,
{
/*
* These plan types materialize their final result in a
- * tuplestore or tuplesort object. So the rescan cost is only
+ * tuplestore or tuplesort object. So the rescan cost is only
* cpu_tuple_cost per tuple, unless the result is large enough
* to spill to disk.
*/
@@ -2865,8 +2865,8 @@ cost_rescan(PlannerInfo *root, Path *path,
{
/*
* These plan types not only materialize their results, but do
- * not implement qual filtering or projection. So they are
- * even cheaper to rescan than the ones above. We charge only
+ * not implement qual filtering or projection. So they are
+ * even cheaper to rescan than the ones above. We charge only
* cpu_operator_cost per tuple. (Note: keep that in sync with
* the run_cost charge in cost_sort, and also see comments in
* cost_material before you change it.)
@@ -3007,7 +3007,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
* evaluation of AND/OR? Probably *not*, because that would make the
* results depend on the clause ordering, and we are not in any position
* to expect that the current ordering of the clauses is the one that's
- * going to end up being used. The above per-RestrictInfo caching would
+ * going to end up being used. The above per-RestrictInfo caching would
* not mix well with trying to re-order clauses anyway.
*
* Another issue that is entirely ignored here is that if a set-returning
@@ -3129,7 +3129,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
else if (IsA(node, AlternativeSubPlan))
{
/*
- * Arbitrarily use the first alternative plan for costing. (We should
+ * Arbitrarily use the first alternative plan for costing. (We should
* certainly only include one alternative, and we don't yet have
* enough information to know which one the executor is most likely to
* use.)
@@ -3273,13 +3273,13 @@ compute_semi_anti_join_factors(PlannerInfo *root,
/*
* jselec can be interpreted as the fraction of outer-rel rows that have
* any matches (this is true for both SEMI and ANTI cases). And nselec is
- * the fraction of the Cartesian product that matches. So, the average
+ * the fraction of the Cartesian product that matches. So, the average
* number of matches for each outer-rel row that has at least one match is
* nselec * inner_rows / jselec.
*
* Note: it is correct to use the inner rel's "rows" count here, even
* though we might later be considering a parameterized inner path with
- * fewer rows. This is because we have included all the join clauses in
+ * fewer rows. This is because we have included all the join clauses in
* the selectivity estimate.
*/
if (jselec > 0) /* protect against zero divide */
@@ -3607,7 +3607,7 @@ calc_joinrel_size_estimate(PlannerInfo *root,
double nrows;
/*
- * Compute joinclause selectivity. Note that we are only considering
+ * Compute joinclause selectivity. Note that we are only considering
* clauses that become restriction clauses at this join level; we are not
* double-counting them because they were not considered in estimating the
* sizes of the component rels.
@@ -3665,7 +3665,7 @@ calc_joinrel_size_estimate(PlannerInfo *root,
*
* If we are doing an outer join, take that into account: the joinqual
* selectivity has to be clamped using the knowledge that the output must
- * be at least as large as the non-nullable input. However, any
+ * be at least as large as the non-nullable input. However, any
* pushed-down quals are applied after the outer join, so their
* selectivity applies fully.
*
@@ -3736,7 +3736,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
/*
* Compute per-output-column width estimates by examining the subquery's
- * targetlist. For any output that is a plain Var, get the width estimate
+ * targetlist. For any output that is a plain Var, get the width estimate
* that was made while planning the subquery. Otherwise, we leave it to
* set_rel_width to fill in a datatype-based default estimate.
*/
@@ -3755,7 +3755,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
* The subquery could be an expansion of a view that's had columns
* added to it since the current query was parsed, so that there are
* non-junk tlist columns in it that don't correspond to any column
- * visible at our query level. Ignore such columns.
+ * visible at our query level. Ignore such columns.
*/
if (te->resno < rel->min_attr || te->resno > rel->max_attr)
continue;
@@ -3904,7 +3904,7 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
* of estimating baserestrictcost, so we set that, and we also set up width
* using what will be purely datatype-driven estimates from the targetlist.
* There is no way to do anything sane with the rows value, so we just put
- * a default estimate and hope that the wrapper can improve on it. The
+ * a default estimate and hope that the wrapper can improve on it. The
* wrapper's GetForeignRelSize function will be called momentarily.
*
* The rel's targetlist and restrictinfo list must have been constructed
@@ -4025,7 +4025,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
{
/*
* We could be looking at an expression pulled up from a subquery,
- * or a ROW() representing a whole-row child Var, etc. Do what we
+ * or a ROW() representing a whole-row child Var, etc. Do what we
* can using the expression type information.
*/
int32 item_width;
@@ -4132,7 +4132,7 @@ void
set_default_effective_cache_size(void)
{
/*
- * We let check_effective_cache_size() compute the actual setting. Note
+ * We let check_effective_cache_size() compute the actual setting. Note
* that this call is a no-op if the user has supplied a setting (since
* that will have a higher priority than PGC_S_DYNAMIC_DEFAULT).
*/
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index ac12f84fd5..b7aff3775e 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -74,7 +74,7 @@ static bool reconsider_full_join_clause(PlannerInfo *root,
*
* If below_outer_join is true, then the clause was found below the nullable
* side of an outer join, so its sides might validly be both NULL rather than
- * strictly equal. We can still deduce equalities in such cases, but we take
+ * strictly equal. We can still deduce equalities in such cases, but we take
* care to mark an EquivalenceClass if it came from any such clauses. Also,
* we have to check that both sides are either pseudo-constants or strict
* functions of Vars, else they might not both go to NULL above the outer
@@ -141,9 +141,9 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
collation);
/*
- * Reject clauses of the form X=X. These are not as redundant as they
+ * Reject clauses of the form X=X. These are not as redundant as they
* might seem at first glance: assuming the operator is strict, this is
- * really an expensive way to write X IS NOT NULL. So we must not risk
+ * really an expensive way to write X IS NOT NULL. So we must not risk
* just losing the clause, which would be possible if there is already a
* single-element EquivalenceClass containing X. The case is not common
* enough to be worth contorting the EC machinery for, so just reject the
@@ -187,14 +187,14 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
* Sweep through the existing EquivalenceClasses looking for matches to
* item1 and item2. These are the possible outcomes:
*
- * 1. We find both in the same EC. The equivalence is already known, so
+ * 1. We find both in the same EC. The equivalence is already known, so
* there's nothing to do.
*
* 2. We find both in different ECs. Merge the two ECs together.
*
* 3. We find just one. Add the other to its EC.
*
- * 4. We find neither. Make a new, two-entry EC.
+ * 4. We find neither. Make a new, two-entry EC.
*
* Note: since all ECs are built through this process or the similar
* search in get_eclass_for_sort_expr(), it's impossible that we'd match
@@ -294,7 +294,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
/*
* We add ec2's items to ec1, then set ec2's ec_merged link to point
- * to ec1 and remove ec2 from the eq_classes list. We cannot simply
+ * to ec1 and remove ec2 from the eq_classes list. We cannot simply
* delete ec2 because that could leave dangling pointers in existing
* PathKeys. We leave it behind with a link so that the merged EC can
* be found.
@@ -406,7 +406,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
* Also, the expression's exposed collation must match the EC's collation.
* This is important because in comparisons like "foo < bar COLLATE baz",
* only one of the expressions has the correct exposed collation as we receive
- * it from the parser. Forcing both of them to have it ensures that all
+ * it from the parser. Forcing both of them to have it ensures that all
* variant spellings of such a construct behave the same. Again, we can
* stick on a RelabelType to force the right exposed collation. (It might
* work to not label the collation at all in EC members, but this is risky
@@ -511,22 +511,22 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids,
* single-member EquivalenceClass for it.
*
* expr is the expression, and nullable_relids is the set of base relids
- * that are potentially nullable below it. We actually only care about
+ * that are potentially nullable below it. We actually only care about
* the set of such relids that are used in the expression; but for caller
* convenience, we perform that intersection step here. The caller need
* only be sure that nullable_relids doesn't omit any nullable rels that
* might appear in the expr.
*
* sortref is the SortGroupRef of the originating SortGroupClause, if any,
- * or zero if not. (It should never be zero if the expression is volatile!)
+ * or zero if not. (It should never be zero if the expression is volatile!)
*
* If rel is not NULL, it identifies a specific relation we're considering
* a path for, and indicates that child EC members for that relation can be
- * considered. Otherwise child members are ignored. (Note: since child EC
+ * considered. Otherwise child members are ignored. (Note: since child EC
* members aren't guaranteed unique, a non-NULL value means that there could
* be more than one EC that matches the expression; if so it's order-dependent
* which one you get. This is annoying but it only happens in corner cases,
- * so for now we live with just reporting the first match. See also
+ * so for now we live with just reporting the first match. See also
* generate_implied_equalities_for_column and match_pathkeys_to_index.)
*
* If create_it is TRUE, we'll build a new EquivalenceClass when there is no
@@ -680,7 +680,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
*
* When an EC contains pseudoconstants, our strategy is to generate
* "member = const1" clauses where const1 is the first constant member, for
- * every other member (including other constants). If we are able to do this
+ * every other member (including other constants). If we are able to do this
* then we don't need any "var = var" comparisons because we've successfully
* constrained all the vars at their points of creation. If we fail to
* generate any of these clauses due to lack of cross-type operators, we fall
@@ -705,7 +705,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
* "WHERE a.x = b.y AND b.y = a.z", the scheme breaks down if we cannot
* generate "a.x = a.z" as a restriction clause for A.) In this case we mark
* the EC "ec_broken" and fall back to regurgitating its original source
- * RestrictInfos at appropriate times. We do not try to retract any derived
+ * RestrictInfos at appropriate times. We do not try to retract any derived
* clauses already generated from the broken EC, so the resulting plan could
* be poor due to bad selectivity estimates caused by redundant clauses. But
* the correct solution to that is to fix the opfamilies ...
@@ -968,8 +968,8 @@ generate_base_implied_equalities_broken(PlannerInfo *root,
* built any join RelOptInfos.
*
* An annoying special case for parameterized scans is that the inner rel can
- * be an appendrel child (an "other rel"). In this case we must generate
- * appropriate clauses using child EC members. add_child_rel_equivalences
+ * be an appendrel child (an "other rel"). In this case we must generate
+ * appropriate clauses using child EC members. add_child_rel_equivalences
* must already have been done for the child rel.
*
* The results are sufficient for use in merge, hash, and plain nestloop join
@@ -983,7 +983,7 @@ generate_base_implied_equalities_broken(PlannerInfo *root,
* we consider different join paths, we avoid generating multiple copies:
* whenever we select a particular pair of EquivalenceMembers to join,
* we check to see if the pair matches any original clause (in ec_sources)
- * or previously-built clause (in ec_derives). This saves memory and allows
+ * or previously-built clause (in ec_derives). This saves memory and allows
* re-use of information cached in RestrictInfos.
*
* join_relids should always equal bms_union(outer_relids, inner_rel->relids).
@@ -1079,7 +1079,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
* First, scan the EC to identify member values that are computable at the
* outer rel, at the inner rel, or at this relation but not in either
* input rel. The outer-rel members should already be enforced equal,
- * likewise for the inner-rel members. We'll need to create clauses to
+ * likewise for the inner-rel members. We'll need to create clauses to
* enforce that any newly computable members are all equal to each other
* as well as to at least one input member, plus enforce at least one
* outer-rel member equal to at least one inner-rel member.
@@ -1105,7 +1105,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
}
/*
- * First, select the joinclause if needed. We can equate any one outer
+ * First, select the joinclause if needed. We can equate any one outer
* member to any one inner member, but we have to find a datatype
* combination for which an opfamily member operator exists. If we have
* choices, we prefer simple Var members (possibly with RelabelType) since
@@ -1323,8 +1323,8 @@ create_join_clause(PlannerInfo *root,
/*
* Search to see if we already built a RestrictInfo for this pair of
- * EquivalenceMembers. We can use either original source clauses or
- * previously-derived clauses. The check on opno is probably redundant,
+ * EquivalenceMembers. We can use either original source clauses or
+ * previously-derived clauses. The check on opno is probably redundant,
* but be safe ...
*/
foreach(lc, ec->ec_sources)
@@ -1455,7 +1455,7 @@ create_join_clause(PlannerInfo *root,
*
* Outer join clauses that are marked outerjoin_delayed are special: this
* condition means that one or both VARs might go to null due to a lower
- * outer join. We can still push a constant through the clause, but only
+ * outer join. We can still push a constant through the clause, but only
* if its operator is strict; and we *have to* throw the clause back into
* regular joinclause processing. By keeping the strict join clause,
* we ensure that any null-extended rows that are mistakenly generated due
@@ -1649,7 +1649,7 @@ reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo,
/*
* Yes it does! Try to generate a clause INNERVAR = CONSTANT for each
- * CONSTANT in the EC. Note that we must succeed with at least one
+ * CONSTANT in the EC. Note that we must succeed with at least one
* constant before we can decide to throw away the outer-join clause.
*/
match = false;
@@ -1938,8 +1938,8 @@ add_child_rel_equivalences(PlannerInfo *root,
continue;
/*
- * No point in searching if parent rel not mentioned in eclass; but
- * we can't tell that for sure if parent rel is itself a child.
+ * No point in searching if parent rel not mentioned in eclass; but we
+ * can't tell that for sure if parent rel is itself a child.
*/
if (parent_rel->reloptkind == RELOPT_BASEREL &&
!bms_is_subset(parent_rel->relids, cur_ec->ec_relids))
@@ -2055,7 +2055,7 @@ mutate_eclass_expressions(PlannerInfo *root,
* is a redundant list of clauses equating the table/index column to each of
* the other-relation values it is known to be equal to. Any one of
* these clauses can be used to create a parameterized path, and there
- * is no value in using more than one. (But it *is* worthwhile to create
+ * is no value in using more than one. (But it *is* worthwhile to create
* a separate parameterized path for each one, since that leads to different
* join orders.)
*
@@ -2102,12 +2102,12 @@ generate_implied_equalities_for_column(PlannerInfo *root,
continue;
/*
- * Scan members, looking for a match to the target column. Note that
+ * Scan members, looking for a match to the target column. Note that
* child EC members are considered, but only when they belong to the
* target relation. (Unlike regular members, the same expression
* could be a child member of more than one EC. Therefore, it's
* potentially order-dependent which EC a child relation's target
- * column gets matched to. This is annoying but it only happens in
+ * column gets matched to. This is annoying but it only happens in
* corner cases, so for now we live with just reporting the first
* match. See also get_eclass_for_sort_expr.)
*/
@@ -2186,7 +2186,7 @@ generate_implied_equalities_for_column(PlannerInfo *root,
* a joinclause involving the two given relations.
*
* This is essentially a very cut-down version of
- * generate_join_implied_equalities(). Note it's OK to occasionally say "yes"
+ * generate_join_implied_equalities(). Note it's OK to occasionally say "yes"
* incorrectly. Hence we don't bother with details like whether the lack of a
* cross-type operator might prevent the clause from actually being generated.
*/
@@ -2222,7 +2222,7 @@ have_relevant_eclass_joinclause(PlannerInfo *root,
* OK as a possibly-overoptimistic heuristic.
*
* We don't test ec_has_const either, even though a const eclass won't
- * generate real join clauses. This is because if we had "WHERE a.x =
+ * generate real join clauses. This is because if we had "WHERE a.x =
* b.y and a.x = 42", it is worth considering a join between a and b,
* since the join result is likely to be small even though it'll end
* up being an unqualified nestloop.
@@ -2279,7 +2279,7 @@ has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
* against the specified relation.
*
* This is just a heuristic test and doesn't have to be exact; it's better
- * to say "yes" incorrectly than "no". Hence we don't bother with details
+ * to say "yes" incorrectly than "no". Hence we don't bother with details
* like whether the lack of a cross-type operator might prevent the clause
* from actually being generated.
*/
@@ -2300,7 +2300,7 @@ eclass_useful_for_merging(EquivalenceClass *eclass,
/*
* Note we don't test ec_broken; if we did, we'd need a separate code path
- * to look through ec_sources. Checking the members anyway is OK as a
+ * to look through ec_sources. Checking the members anyway is OK as a
* possibly-overoptimistic heuristic.
*/
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index a912174fb0..42dcb111ae 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -222,7 +222,7 @@ static Const *string_to_const(const char *str, Oid datatype);
* Note: in cases involving LATERAL references in the relation's tlist, it's
* possible that rel->lateral_relids is nonempty. Currently, we include
* lateral_relids into the parameterization reported for each path, but don't
- * take it into account otherwise. The fact that any such rels *must* be
+ * take it into account otherwise. The fact that any such rels *must* be
* available as parameter sources perhaps should influence our choices of
* index quals ... but for now, it doesn't seem worth troubling over.
* In particular, comments below about "unparameterized" paths should be read
@@ -270,7 +270,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
match_restriction_clauses_to_index(rel, index, &rclauseset);
/*
- * Build index paths from the restriction clauses. These will be
+ * Build index paths from the restriction clauses. These will be
* non-parameterized paths. Plain paths go directly to add_path(),
* bitmap paths are added to bitindexpaths to be handled below.
*/
@@ -278,10 +278,10 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
&bitindexpaths);
/*
- * Identify the join clauses that can match the index. For the moment
- * we keep them separate from the restriction clauses. Note that this
+ * Identify the join clauses that can match the index. For the moment
+ * we keep them separate from the restriction clauses. Note that this
* step finds only "loose" join clauses that have not been merged into
- * EquivalenceClasses. Also, collect join OR clauses for later.
+ * EquivalenceClasses. Also, collect join OR clauses for later.
*/
MemSet(&jclauseset, 0, sizeof(jclauseset));
match_join_clauses_to_index(root, rel, index,
@@ -343,9 +343,9 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
/*
* Likewise, if we found anything usable, generate BitmapHeapPaths for the
- * most promising combinations of join bitmap index paths. Our strategy
+ * most promising combinations of join bitmap index paths. Our strategy
* is to generate one such path for each distinct parameterization seen
- * among the available bitmap index paths. This may look pretty
+ * among the available bitmap index paths. This may look pretty
* expensive, but usually there won't be very many distinct
* parameterizations. (This logic is quite similar to that in
* consider_index_join_clauses, but we're working with whole paths not
@@ -461,7 +461,7 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
*
* For simplicity in selecting relevant clauses, we represent each set of
* outer rels as a maximum set of clause_relids --- that is, the indexed
- * relation itself is also included in the relids set. considered_relids
+ * relation itself is also included in the relids set. considered_relids
* lists all relids sets we've already tried.
*/
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
@@ -550,7 +550,7 @@ consider_index_join_outer_rels(PlannerInfo *root, RelOptInfo *rel,
/*
* If this clause was derived from an equivalence class, the
* clause list may contain other clauses derived from the same
- * eclass. We should not consider that combining this clause with
+ * eclass. We should not consider that combining this clause with
* one of those clauses generates a usefully different
* parameterization; so skip if any clause derived from the same
* eclass would already have been included when using oldrelids.
@@ -633,9 +633,9 @@ get_join_index_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * Add applicable eclass join clauses. The clauses generated for each
+ * Add applicable eclass join clauses. The clauses generated for each
* column are redundant (cf generate_implied_equalities_for_column),
- * so we need at most one. This is the only exception to the general
+ * so we need at most one. This is the only exception to the general
* rule of using all available index clauses.
*/
foreach(lc, eclauseset->indexclauses[indexcol])
@@ -722,7 +722,7 @@ bms_equal_any(Relids relids, List *relids_list)
* bitmap indexpaths are added to *bitindexpaths for later processing.
*
* This is a fairly simple frontend to build_index_paths(). Its reason for
- * existence is mainly to handle ScalarArrayOpExpr quals properly. If the
+ * existence is mainly to handle ScalarArrayOpExpr quals properly. If the
* index AM supports them natively, we should just include them in simple
* index paths. If not, we should exclude them while building simple index
* paths, and then make a separate attempt to include them in bitmap paths.
@@ -736,7 +736,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
ListCell *lc;
/*
- * Build simple index paths using the clauses. Allow ScalarArrayOpExpr
+ * Build simple index paths using the clauses. Allow ScalarArrayOpExpr
* clauses only if the index AM supports them natively.
*/
indexpaths = build_index_paths(root, rel,
@@ -748,7 +748,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
* Submit all the ones that can form plain IndexScan plans to add_path. (A
* plain IndexPath can represent either a plain IndexScan or an
* IndexOnlyScan, but for our purposes here that distinction does not
- * matter. However, some of the indexes might support only bitmap scans,
+ * matter. However, some of the indexes might support only bitmap scans,
* and those we mustn't submit to add_path here.)
*
* Also, pick out the ones that are usable as bitmap scans. For that, we
@@ -792,7 +792,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
* We return a list of paths because (1) this routine checks some cases
* that should cause us to not generate any IndexPath, and (2) in some
* cases we want to consider both a forward and a backward scan, so as
- * to obtain both sort orders. Note that the paths are just returned
+ * to obtain both sort orders. Note that the paths are just returned
* to the caller and not immediately fed to add_path().
*
* At top level, useful_predicate should be exactly the index's predOK flag
@@ -975,7 +975,7 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * 3. Check if an index-only scan is possible. If we're not building
+ * 3. Check if an index-only scan is possible. If we're not building
* plain indexscans, this isn't relevant since bitmap scans don't support
* index data retrieval anyway.
*/
@@ -1080,13 +1080,13 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
continue;
/*
- * Ignore partial indexes that do not match the query. If a partial
+ * Ignore partial indexes that do not match the query. If a partial
* index is marked predOK then we know it's OK. Otherwise, we have to
* test whether the added clauses are sufficient to imply the
* predicate. If so, we can use the index in the current context.
*
* We set useful_predicate to true iff the predicate was proven using
- * the current set of clauses. This is needed to prevent matching a
+ * the current set of clauses. This is needed to prevent matching a
* predOK index to an arm of an OR, which would be a legal but
* pointlessly inefficient plan. (A better plan will be generated by
* just scanning the predOK index alone, no OR.)
@@ -1256,7 +1256,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
* Given a nonempty list of bitmap paths, AND them into one path.
*
* This is a nontrivial decision since we can legally use any subset of the
- * given path set. We want to choose a good tradeoff between selectivity
+ * given path set. We want to choose a good tradeoff between selectivity
* and cost of computing the bitmap.
*
* The result is either a single one of the inputs, or a BitmapAndPath
@@ -1283,12 +1283,12 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
* In theory we should consider every nonempty subset of the given paths.
* In practice that seems like overkill, given the crude nature of the
* estimates, not to mention the possible effects of higher-level AND and
- * OR clauses. Moreover, it's completely impractical if there are a large
+ * OR clauses. Moreover, it's completely impractical if there are a large
* number of paths, since the work would grow as O(2^N).
*
* As a heuristic, we first check for paths using exactly the same sets of
* WHERE clauses + index predicate conditions, and reject all but the
- * cheapest-to-scan in any such group. This primarily gets rid of indexes
+ * cheapest-to-scan in any such group. This primarily gets rid of indexes
* that include the interesting columns but also irrelevant columns. (In
* situations where the DBA has gone overboard on creating variant
* indexes, this can make for a very large reduction in the number of
@@ -1308,7 +1308,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
* costsize.c and clausesel.c aren't very smart about redundant clauses.
* They will usually double-count the redundant clauses, producing a
* too-small selectivity that makes a redundant AND step look like it
- * reduces the total cost. Perhaps someday that code will be smarter and
+ * reduces the total cost. Perhaps someday that code will be smarter and
* we can remove this limitation. (But note that this also defends
* against flat-out duplicate input paths, which can happen because
* match_join_clauses_to_index will find the same OR join clauses that
@@ -1316,7 +1316,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
* of.)
*
* For the same reason, we reject AND combinations in which an index
- * predicate clause duplicates another clause. Here we find it necessary
+ * predicate clause duplicates another clause. Here we find it necessary
* to be even stricter: we'll reject a partial index if any of its
* predicate clauses are implied by the set of WHERE clauses and predicate
* clauses used so far. This covers cases such as a condition "x = 42"
@@ -1379,7 +1379,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
/*
* For each surviving index, consider it as an "AND group leader", and see
* whether adding on any of the later indexes results in an AND path with
- * cheaper total cost than before. Then take the cheapest AND group.
+ * cheaper total cost than before. Then take the cheapest AND group.
*/
for (i = 0; i < npaths; i++)
{
@@ -1711,7 +1711,7 @@ find_indexpath_quals(Path *bitmapqual, List **quals, List **preds)
/*
* find_list_position
* Return the given node's position (counting from 0) in the given
- * list of nodes. If it's not equal() to any existing list member,
+ * list of nodes. If it's not equal() to any existing list member,
* add it at the end, and return that position.
*/
static int
@@ -1817,7 +1817,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
* Since we produce parameterized paths before we've begun to generate join
* relations, it's impossible to predict exactly how many times a parameterized
* path will be iterated; we don't know the size of the relation that will be
- * on the outside of the nestloop. However, we should try to account for
+ * on the outside of the nestloop. However, we should try to account for
* multiple iterations somehow in costing the path. The heuristic embodied
* here is to use the rowcount of the smallest other base relation needed in
* the join clauses used by the path. (We could alternatively consider the
@@ -2032,7 +2032,7 @@ match_clause_to_index(IndexOptInfo *index,
* doesn't involve a volatile function or a Var of the index's relation.
* In particular, Vars belonging to other relations of the query are
* accepted here, since a clause of that form can be used in a
- * parameterized indexscan. It's the responsibility of higher code levels
+ * parameterized indexscan. It's the responsibility of higher code levels
* to manage restriction and join clauses appropriately.
*
* Note: we do need to check for Vars of the index's relation on the
@@ -2056,7 +2056,7 @@ match_clause_to_index(IndexOptInfo *index,
* It is also possible to match RowCompareExpr clauses to indexes (but
* currently, only btree indexes handle this). In this routine we will
* report a match if the first column of the row comparison matches the
- * target index column. This is sufficient to guarantee that some index
+ * target index column. This is sufficient to guarantee that some index
* condition can be constructed from the RowCompareExpr --- whether the
* remaining columns match the index too is considered in
* adjust_rowcompare_for_index().
@@ -2094,7 +2094,7 @@ match_clause_to_indexcol(IndexOptInfo *index,
bool plain_op;
/*
- * Never match pseudoconstants to indexes. (Normally this could not
+ * Never match pseudoconstants to indexes. (Normally this could not
* happen anyway, since a pseudoconstant clause couldn't contain a Var,
* but what if someone builds an expression index on a constant? It's not
* totally unreasonable to do so with a partial index, either.)
@@ -2378,7 +2378,7 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
* We allow any column of the index to match each pathkey; they
* don't have to match left-to-right as you might expect. This is
* correct for GiST, which is the sole existing AM supporting
- * amcanorderbyop. We might need different logic in future for
+ * amcanorderbyop. We might need different logic in future for
* other implementations.
*/
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
@@ -2429,7 +2429,7 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
* Note that we currently do not consider the collation of the ordering
* operator's result. In practical cases the result type will be numeric
* and thus have no collation, and it's not very clear what to match to
- * if it did have a collation. The index's collation should match the
+ * if it did have a collation. The index's collation should match the
* ordering operator's input collation, not its result.
*
* If successful, return 'clause' as-is if the indexkey is on the left,
@@ -2679,7 +2679,7 @@ ec_member_matches_indexcol(PlannerInfo *root, RelOptInfo *rel,
* if it is true.
* 2. A list of expressions in this relation, and a corresponding list of
* equality operators. The caller must have already checked that the operators
- * represent equality. (Note: the operators could be cross-type; the
+ * represent equality. (Note: the operators could be cross-type; the
* expressions should correspond to their RHS inputs.)
*
* The caller need only supply equality conditions arising from joins;
@@ -2868,7 +2868,7 @@ match_index_to_operand(Node *operand,
int indkey;
/*
- * Ignore any RelabelType node above the operand. This is needed to be
+ * Ignore any RelabelType node above the operand. This is needed to be
* able to apply indexscanning in binary-compatible-operator cases. Note:
* we can assume there is at most one RelabelType node;
* eval_const_expressions() will have simplified if more than one.
@@ -2935,10 +2935,10 @@ match_index_to_operand(Node *operand,
* indexscan machinery. The key idea is that these operators allow us
* to derive approximate indexscan qual clauses, such that any tuples
* that pass the operator clause itself must also satisfy the simpler
- * indexscan condition(s). Then we can use the indexscan machinery
+ * indexscan condition(s). Then we can use the indexscan machinery
* to avoid scanning as much of the table as we'd otherwise have to,
* while applying the original operator as a qpqual condition to ensure
- * we deliver only the tuples we want. (In essence, we're using a regular
+ * we deliver only the tuples we want. (In essence, we're using a regular
* index as if it were a lossy index.)
*
* An example of what we're doing is
@@ -2952,7 +2952,7 @@ match_index_to_operand(Node *operand,
*
* Another thing that we do with this machinery is to provide special
* smarts for "boolean" indexes (that is, indexes on boolean columns
- * that support boolean equality). We can transform a plain reference
+ * that support boolean equality). We can transform a plain reference
* to the indexkey into "indexkey = true", or "NOT indexkey" into
* "indexkey = false", so as to make the expression indexable using the
* regular index operators. (As of Postgres 8.1, we must do this here
@@ -3374,7 +3374,7 @@ expand_indexqual_opclause(RestrictInfo *rinfo, Oid opfamily, Oid idxcollation)
/*
* LIKE and regex operators are not members of any btree index opfamily,
* but they can be members of opfamilies for more exotic index types such
- * as GIN. Therefore, we should only do expansion if the operator is
+ * as GIN. Therefore, we should only do expansion if the operator is
* actually not in the opfamily. But checking that requires a syscache
* lookup, so it's best to first see if the operator is one we are
* interested in.
@@ -3492,7 +3492,7 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
* column matches) or a simple OpExpr (if the first-column match is all
* there is). In these cases the modified clause is always "<=" or ">="
* even when the original was "<" or ">" --- this is necessary to match all
- * the rows that could match the original. (We are essentially building a
+ * the rows that could match the original. (We are essentially building a
* lossy version of the row comparison when we do this.)
*
* *indexcolnos receives an integer list of the index column numbers (zero
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index a9961161db..be54f3de0b 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -107,7 +107,7 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* If it's SEMI or ANTI join, compute correction factors for cost
- * estimation. These will be the same for all paths.
+ * estimation. These will be the same for all paths.
*/
if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
compute_semi_anti_join_factors(root, outerrel, innerrel,
@@ -122,7 +122,7 @@ add_paths_to_joinrel(PlannerInfo *root,
* to the parameter source rel instead of joining to the other input rel.
* This restriction reduces the number of parameterized paths we have to
* deal with at higher join levels, without compromising the quality of
- * the resulting plan. We express the restriction as a Relids set that
+ * the resulting plan. We express the restriction as a Relids set that
* must overlap the parameterization of any proposed join path.
*/
foreach(lc, root->join_info_list)
@@ -155,7 +155,7 @@ add_paths_to_joinrel(PlannerInfo *root,
* However, when a LATERAL subquery is involved, we have to be a bit
* laxer, because there will simply not be any paths for the joinrel that
* aren't parameterized by whatever the subquery is parameterized by,
- * unless its parameterization is resolved within the joinrel. Hence, add
+ * unless its parameterization is resolved within the joinrel. Hence, add
* to param_source_rels anything that is laterally referenced in either
* input and is not in the join already.
*/
@@ -208,7 +208,7 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* 1. Consider mergejoin paths where both relations must be explicitly
- * sorted. Skip this if we can't mergejoin.
+ * sorted. Skip this if we can't mergejoin.
*/
if (mergejoin_allowed)
sort_inner_and_outer(root, joinrel, outerrel, innerrel,
@@ -233,7 +233,7 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* 3. Consider paths where the inner relation need not be explicitly
- * sorted. This includes mergejoins only (nestloops were already built in
+ * sorted. This includes mergejoins only (nestloops were already built in
* match_unsorted_outer).
*
* Diked out as redundant 2/13/2000 -- tgl. There isn't any really
@@ -507,7 +507,7 @@ try_hashjoin_path(PlannerInfo *root,
* We already know that the clause is a binary opclause referencing only the
* rels in the current join. The point here is to check whether it has the
* form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr",
- * rather than mixing outer and inner vars on either side. If it matches,
+ * rather than mixing outer and inner vars on either side. If it matches,
* we set the transient flag outer_is_left to identify which side is which.
*/
static inline bool
@@ -572,7 +572,7 @@ sort_inner_and_outer(PlannerInfo *root,
* sort.
*
* This function intentionally does not consider parameterized input
- * paths, except when the cheapest-total is parameterized. If we did so,
+ * paths, except when the cheapest-total is parameterized. If we did so,
* we'd have a combinatorial explosion of mergejoin paths of dubious
* value. This interacts with decisions elsewhere that also discriminate
* against mergejoins with parameterized inputs; see comments in
@@ -619,7 +619,7 @@ sort_inner_and_outer(PlannerInfo *root,
*
* Actually, it's not quite true that every mergeclause ordering will
* generate a different path order, because some of the clauses may be
- * partially redundant (refer to the same EquivalenceClasses). Therefore,
+ * partially redundant (refer to the same EquivalenceClasses). Therefore,
* what we do is convert the mergeclause list to a list of canonical
* pathkeys, and then consider different orderings of the pathkeys.
*
@@ -713,7 +713,7 @@ sort_inner_and_outer(PlannerInfo *root,
* cheapest-total inner-indexscan path (if any), and one on the
* cheapest-startup inner-indexscan path (if different).
*
- * We also consider mergejoins if mergejoin clauses are available. We have
+ * We also consider mergejoins if mergejoin clauses are available. We have
* two ways to generate the inner path for a mergejoin: sort the cheapest
* inner path, or use an inner path that is already suitably ordered for the
* merge. If we have several mergeclauses, it could be that there is no inner
@@ -845,8 +845,8 @@ match_unsorted_outer(PlannerInfo *root,
/*
* If we need to unique-ify the outer path, it's pointless to consider
- * any but the cheapest outer. (XXX we don't consider parameterized
- * outers, nor inners, for unique-ified cases. Should we?)
+ * any but the cheapest outer. (XXX we don't consider parameterized
+ * outers, nor inners, for unique-ified cases. Should we?)
*/
if (save_jointype == JOIN_UNIQUE_OUTER)
{
@@ -887,7 +887,7 @@ match_unsorted_outer(PlannerInfo *root,
{
/*
* Consider nestloop joins using this outer path and various
- * available paths for the inner relation. We consider the
+ * available paths for the inner relation. We consider the
* cheapest-total paths for each available parameterization of the
* inner relation, including the unparameterized case.
*/
@@ -1042,7 +1042,7 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Look for an inner path ordered well enough for the first
- * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
+ * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
* destructively, which is why we made a copy...
*/
trialsortkeys = list_truncate(trialsortkeys, sortkeycnt);
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 05eaef525d..610892890f 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -213,7 +213,7 @@ join_search_one_level(PlannerInfo *root, int level)
/*----------
* When special joins are involved, there may be no legal way
- * to make an N-way join for some values of N. For example consider
+ * to make an N-way join for some values of N. For example consider
*
* SELECT ... FROM t1 WHERE
* x IN (SELECT ... FROM t2,t3 WHERE ...) AND
@@ -337,7 +337,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
ListCell *l;
/*
- * Ensure output params are set on failure return. This is just to
+ * Ensure output params are set on failure return. This is just to
* suppress uninitialized-variable warnings from overly anal compilers.
*/
*sjinfo_p = NULL;
@@ -345,7 +345,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
/*
* If we have any special joins, the proposed join might be illegal; and
- * in any case we have to determine its join type. Scan the join info
+ * in any case we have to determine its join type. Scan the join info
* list for conflicts.
*/
match_sjinfo = NULL;
@@ -609,7 +609,7 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
/*
* If it's a plain inner join, then we won't have found anything in
- * join_info_list. Make up a SpecialJoinInfo so that selectivity
+ * join_info_list. Make up a SpecialJoinInfo so that selectivity
* estimation functions will know what's being joined.
*/
if (sjinfo == NULL)
@@ -916,7 +916,7 @@ have_join_order_restriction(PlannerInfo *root,
*
* Essentially, this tests whether have_join_order_restriction() could
* succeed with this rel and some other one. It's OK if we sometimes
- * say "true" incorrectly. (Therefore, we don't bother with the relatively
+ * say "true" incorrectly. (Therefore, we don't bother with the relatively
* expensive has_legal_joinclause test.)
*/
static bool
@@ -1027,7 +1027,7 @@ is_dummy_rel(RelOptInfo *rel)
* dummy.
*
* Also, when called during GEQO join planning, we are in a short-lived
- * memory context. We must make sure that the dummy path attached to a
+ * memory context. We must make sure that the dummy path attached to a
* baserel survives the GEQO cycle, else the baserel is trashed for future
* GEQO cycles. On the other hand, when we are marking a joinrel during GEQO,
* we don't want the dummy path to clutter the main planning context. Upshot
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 9179c61cbd..5d953dfb45 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -46,7 +46,7 @@ static bool right_merge_direction(PlannerInfo *root, PathKey *pathkey);
* entry if there's not one already.
*
* Note that this function must not be used until after we have completed
- * merging EquivalenceClasses. (We don't try to enforce that here; instead,
+ * merging EquivalenceClasses. (We don't try to enforce that here; instead,
* equivclass.c will complain if a merge occurs after root->canon_pathkeys
* has become nonempty.)
*/
@@ -120,7 +120,7 @@ make_canonical_pathkey(PlannerInfo *root,
*
* Both the given pathkey and the list members must be canonical for this
* to work properly, but that's okay since we no longer ever construct any
- * non-canonical pathkeys. (Note: the notion of a pathkey *list* being
+ * non-canonical pathkeys. (Note: the notion of a pathkey *list* being
* canonical includes the additional requirement of no redundant entries,
* which is exactly what we are checking for here.)
*
@@ -162,7 +162,7 @@ pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys)
*
* If rel is not NULL, it identifies a specific relation we're considering
* a path for, and indicates that child EC members for that relation can be
- * considered. Otherwise child members are ignored. (See the comments for
+ * considered. Otherwise child members are ignored. (See the comments for
* get_eclass_for_sort_expr.)
*
* create_it is TRUE if we should create any missing EquivalenceClass
@@ -192,7 +192,7 @@ make_pathkey_from_sortinfo(PlannerInfo *root,
/*
* EquivalenceClasses need to contain opfamily lists based on the family
* membership of mergejoinable equality operators, which could belong to
- * more than one opfamily. So we have to look up the opfamily's equality
+ * more than one opfamily. So we have to look up the opfamily's equality
* operator and get its membership.
*/
equality_op = get_opfamily_member(opfamily,
@@ -355,7 +355,7 @@ get_cheapest_path_for_pathkeys(List *paths, List *pathkeys,
/*
* Since cost comparison is a lot cheaper than pathkey comparison, do
- * that first. (XXX is that still true?)
+ * that first. (XXX is that still true?)
*/
if (matched_path != NULL &&
compare_path_costs(matched_path, path, cost_criterion) <= 0)
@@ -397,7 +397,7 @@ get_cheapest_fractional_path_for_pathkeys(List *paths,
/*
* Since cost comparison is a lot cheaper than pathkey comparison, do
- * that first. (XXX is that still true?)
+ * that first. (XXX is that still true?)
*/
if (matched_path != NULL &&
compare_fractional_path_costs(matched_path, path, fraction) <= 0)
@@ -555,7 +555,7 @@ build_expression_pathkey(PlannerInfo *root,
/*
* convert_subquery_pathkeys
* Build a pathkeys list that describes the ordering of a subquery's
- * result, in the terms of the outer query. This is essentially a
+ * result, in the terms of the outer query. This is essentially a
* task of conversion.
*
* 'rel': outer query's RelOptInfo for the subquery relation.
@@ -608,7 +608,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
/*
* Note: it might look funny to be setting sortref = 0 for a
- * reference to a volatile sub_eclass. However, the
+ * reference to a volatile sub_eclass. However, the
* expression is *not* volatile in the outer query: it's just
* a Var referencing whatever the subquery emitted. (IOW, the
* outer query isn't going to re-execute the volatile
@@ -645,7 +645,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
/*
* Otherwise, the sub_pathkey's EquivalenceClass could contain
* multiple elements (representing knowledge that multiple items
- * are effectively equal). Each element might match none, one, or
+ * are effectively equal). Each element might match none, one, or
* more of the output columns that are visible to the outer query.
* This means we may have multiple possible representations of the
* sub_pathkey in the context of the outer query. Ideally we
@@ -873,7 +873,7 @@ make_pathkeys_for_sortclauses(PlannerInfo *root,
* right sides.
*
* Note this is called before EC merging is complete, so the links won't
- * necessarily point to canonical ECs. Before they are actually used for
+ * necessarily point to canonical ECs. Before they are actually used for
* anything, update_mergeclause_eclasses must be called to ensure that
* they've been updated to point to canonical ECs.
*/
@@ -1007,7 +1007,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
* It's possible that multiple matching clauses might have different
* ECs on the other side, in which case the order we put them into our
* result makes a difference in the pathkeys required for the other
- * input path. However this routine hasn't got any info about which
+ * input path. However this routine hasn't got any info about which
* order would be best, so we don't worry about that.
*
* It's also possible that the selected mergejoin clauses produce
@@ -1038,7 +1038,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can still
+ * sort-key positions in the pathkeys are useless. (But we can still
* mergejoin if we found at least one mergeclause.)
*/
if (matched_restrictinfos == NIL)
@@ -1070,7 +1070,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
* Returns a pathkeys list that can be applied to the outer relation.
*
* Since we assume here that a sort is required, there is no particular use
- * in matching any available ordering of the outerrel. (joinpath.c has an
+ * in matching any available ordering of the outerrel. (joinpath.c has an
* entirely separate code path for considering sort-free mergejoins.) Rather,
* it's interesting to try to match the requested query_pathkeys so that a
* second output sort may be avoided; and failing that, we try to list "more
@@ -1401,7 +1401,7 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can still
+ * sort-key positions in the pathkeys are useless. (But we can still
* mergejoin if we found at least one mergeclause.)
*/
if (matched)
@@ -1431,7 +1431,7 @@ right_merge_direction(PlannerInfo *root, PathKey *pathkey)
pathkey->pk_opfamily == query_pathkey->pk_opfamily)
{
/*
- * Found a matching query sort column. Prefer this pathkey's
+ * Found a matching query sort column. Prefer this pathkey's
* direction iff it matches. Note that we ignore pk_nulls_first,
* which means that a sort might be needed anyway ... but we still
* want to prefer only one of the two possible directions, and we
@@ -1507,13 +1507,13 @@ truncate_useless_pathkeys(PlannerInfo *root,
* useful according to truncate_useless_pathkeys().
*
* This is a cheap test that lets us skip building pathkeys at all in very
- * simple queries. It's OK to err in the direction of returning "true" when
+ * simple queries. It's OK to err in the direction of returning "true" when
* there really aren't any usable pathkeys, but erring in the other direction
* is bad --- so keep this in sync with the routines above!
*
* We could make the test more complex, for example checking to see if any of
* the joinclauses are really mergejoinable, but that likely wouldn't win
- * often enough to repay the extra cycles. Queries with neither a join nor
+ * often enough to repay the extra cycles. Queries with neither a join nor
* a sort are reasonably common, though, so this much work seems worthwhile.
*/
bool
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index a751a7d36c..a31d67493b 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -19,7 +19,7 @@
* representation all the way through to execution.
*
* There is currently no special support for joins involving CTID; in
- * particular nothing corresponding to best_inner_indexscan(). Since it's
+ * particular nothing corresponding to best_inner_indexscan(). Since it's
* not very useful to store TIDs of one table in another table, there
* doesn't seem to be enough use-case to justify adding a lot of code
* for that.
@@ -57,7 +57,7 @@ static List *TidQualFromRestrictinfo(List *restrictinfo, int varno);
* or
* pseudoconstant = CTID
*
- * We check that the CTID Var belongs to relation "varno". That is probably
+ * We check that the CTID Var belongs to relation "varno". That is probably
* redundant considering this is only applied to restriction clauses, but
* let's be safe.
*/
diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c
index 523a1e75f8..129fc3dfae 100644
--- a/src/backend/optimizer/plan/analyzejoins.c
+++ b/src/backend/optimizer/plan/analyzejoins.c
@@ -40,7 +40,7 @@ static List *remove_rel_from_joinlist(List *joinlist, int relid, int *nremoved);
* Check for relations that don't actually need to be joined at all,
* and remove them from the query.
*
- * We are passed the current joinlist and return the updated list. Other
+ * We are passed the current joinlist and return the updated list. Other
* data structures that have to be updated are accessible via "root".
*/
List *
@@ -90,7 +90,7 @@ restart:
* Restart the scan. This is necessary to ensure we find all
* removable joins independently of ordering of the join_info_list
* (note that removal of attr_needed bits may make a join appear
- * removable that did not before). Also, since we just deleted the
+ * removable that did not before). Also, since we just deleted the
* current list cell, we'd have to have some kluge to continue the
* list scan anyway.
*/
@@ -107,7 +107,7 @@ restart:
* We already know that the clause is a binary opclause referencing only the
* rels in the current join. The point here is to check whether it has the
* form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr",
- * rather than mixing outer and inner vars on either side. If it matches,
+ * rather than mixing outer and inner vars on either side. If it matches,
* we set the transient flag outer_is_left to identify which side is which.
*/
static inline bool
@@ -154,7 +154,7 @@ join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo)
/*
* Currently, we only know how to remove left joins to a baserel with
- * unique indexes. We can check most of these criteria pretty trivially
+ * unique indexes. We can check most of these criteria pretty trivially
* to avoid doing useless extra work. But checking whether any of the
* indexes are unique would require iterating over the indexlist, so for
* now we just make sure there are indexes of some sort or other. If none
@@ -203,7 +203,7 @@ join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo)
* actually references some inner-rel attributes; but the correct check
* for that is relatively expensive, so we first check against ph_eval_at,
* which must mention the inner rel if the PHV uses any inner-rel attrs as
- * non-lateral references. Note that if the PHV's syntactic scope is just
+ * non-lateral references. Note that if the PHV's syntactic scope is just
* the inner rel, we can't drop the rel even if the PHV is variable-free.
*/
foreach(l, root->placeholder_list)
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 784805fbf4..4b641a2ca1 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -171,7 +171,7 @@ static Material *make_material(Plan *lefttree);
/*
* create_plan
* Creates the access plan for a query by recursively processing the
- * desired tree of pathnodes, starting at the node 'best_path'. For
+ * desired tree of pathnodes, starting at the node 'best_path'. For
* every pathnode found, we create a corresponding plan node containing
* appropriate id, target list, and qualification information.
*
@@ -286,7 +286,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
/*
* For table scans, rather than using the relation targetlist (which is
* only those Vars actually needed by the query), we prefer to generate a
- * tlist containing all Vars in order. This will allow the executor to
+ * tlist containing all Vars in order. This will allow the executor to
* optimize away projection of the table tuples, if possible. (Note that
* planner.c may replace the tlist we generate here, forcing projection to
* occur.)
@@ -523,7 +523,7 @@ use_physical_tlist(PlannerInfo *root, RelOptInfo *rel)
*
* If the plan node immediately above a scan would prefer to get only
* needed Vars and not a physical tlist, it must call this routine to
- * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
+ * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
* and Material nodes want this, so they don't have to store useless columns.
*/
static void
@@ -654,7 +654,7 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path)
/*
* * Expensive function pullups may have pulled local predicates * into
- * this path node. Put them in the qpqual of the plan node. * JMH,
+ * this path node. Put them in the qpqual of the plan node. * JMH,
* 6/15/92
*/
if (get_loc_restrictinfo(best_path) != NIL)
@@ -1170,10 +1170,10 @@ create_indexscan_plan(PlannerInfo *root,
/*
* The qpqual list must contain all restrictions not automatically handled
* by the index, other than pseudoconstant clauses which will be handled
- * by a separate gating plan node. All the predicates in the indexquals
+ * by a separate gating plan node. All the predicates in the indexquals
* will be checked (either by the index itself, or by nodeIndexscan.c),
* but if there are any "special" operators involved then they must be
- * included in qpqual. The upshot is that qpqual must contain
+ * included in qpqual. The upshot is that qpqual must contain
* scan_clauses minus whatever appears in indexquals.
*
* In normal cases simple pointer equality checks will be enough to spot
@@ -1310,15 +1310,15 @@ create_bitmap_scan_plan(PlannerInfo *root,
/*
* The qpqual list must contain all restrictions not automatically handled
* by the index, other than pseudoconstant clauses which will be handled
- * by a separate gating plan node. All the predicates in the indexquals
+ * by a separate gating plan node. All the predicates in the indexquals
* will be checked (either by the index itself, or by
* nodeBitmapHeapscan.c), but if there are any "special" operators
- * involved then they must be added to qpqual. The upshot is that qpqual
+ * involved then they must be added to qpqual. The upshot is that qpqual
* must contain scan_clauses minus whatever appears in indexquals.
*
* This loop is similar to the comparable code in create_indexscan_plan(),
* but with some differences because it has to compare the scan clauses to
- * stripped (no RestrictInfos) indexquals. See comments there for more
+ * stripped (no RestrictInfos) indexquals. See comments there for more
* info.
*
* In normal cases simple equal() checks will be enough to spot duplicate
@@ -1363,7 +1363,7 @@ create_bitmap_scan_plan(PlannerInfo *root,
/*
* When dealing with special operators, we will at this point have
- * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
+ * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
* 'em from bitmapqualorig, since there's no point in making the tests
* twice.
*/
@@ -1475,7 +1475,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
- * to just "true". We do not try to eliminate redundant subclauses
+ * to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
@@ -1571,7 +1571,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
/*
* We know that the index predicate must have been implied by the
* query condition as a whole, but it may or may not be implied by
- * the conditions that got pushed into the bitmapqual. Avoid
+ * the conditions that got pushed into the bitmapqual. Avoid
* generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), ipath->indexclauses))
@@ -1954,14 +1954,14 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
Assert(rte->rtekind == RTE_RELATION);
/*
- * Sort clauses into best execution order. We do this first since the FDW
+ * Sort clauses into best execution order. We do this first since the FDW
* might have more info than we do and wish to adjust the ordering.
*/
scan_clauses = order_qual_clauses(root, scan_clauses);
/*
* Let the FDW perform its processing on the restriction clauses and
- * generate the plan node. Note that the FDW might remove restriction
+ * generate the plan node. Note that the FDW might remove restriction
* clauses that it intends to execute remotely, or even add more (if it
* has selected some join clauses for remote use but also wants them
* rechecked locally).
@@ -2615,7 +2615,7 @@ replace_nestloop_params_mutator(Node *node, PlannerInfo *root)
*
* Note that after doing this, we might have different
* representations of the contents of the same PHV in different
- * parts of the plan tree. This is OK because equal() will just
+ * parts of the plan tree. This is OK because equal() will just
* match on phid/phlevelsup, so setrefs.c will still recognize an
* upper-level reference to a lower-level copy of the same PHV.
*/
@@ -2793,7 +2793,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path)
/*
* Check to see if the indexkey is on the right; if so, commute
- * the clause. The indexkey should be the side that refers to
+ * the clause. The indexkey should be the side that refers to
* (only) the base relation.
*/
if (!bms_equal(rinfo->left_relids, index->rel->relids))
@@ -2887,7 +2887,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path)
*
* This is a simplified version of fix_indexqual_references. The input does
* not have RestrictInfo nodes, and we assume that indxpath.c already
- * commuted the clauses to put the index keys on the left. Also, we don't
+ * commuted the clauses to put the index keys on the left. Also, we don't
* bother to support any cases except simple OpExprs, since nothing else
* is allowed for ordering operators.
*/
@@ -3126,7 +3126,7 @@ order_qual_clauses(PlannerInfo *root, List *clauses)
/*
* Sort. We don't use qsort() because it's not guaranteed stable for
- * equal keys. The expected number of entries is small enough that a
+ * equal keys. The expected number of entries is small enough that a
* simple insertion sort should be good enough.
*/
for (i = 1; i < nitems; i++)
@@ -3771,7 +3771,7 @@ make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
* prepare_sort_from_pathkeys
* Prepare to sort according to given pathkeys
*
- * This is used to set up for both Sort and MergeAppend nodes. It calculates
+ * This is used to set up for both Sort and MergeAppend nodes. It calculates
* the executor's representation of the sort key information, and adjusts the
* plan targetlist if needed to add resjunk sort columns.
*
@@ -3784,7 +3784,7 @@ make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
*
* We must convert the pathkey information into arrays of sort key column
* numbers, sort operator OIDs, collation OIDs, and nulls-first flags,
- * which is the representation the executor wants. These are returned into
+ * which is the representation the executor wants. These are returned into
* the output parameters *p_numsortkeys etc.
*
* When looking for matches to an EquivalenceClass's members, we will only
@@ -4229,7 +4229,7 @@ make_material(Plan *lefttree)
* materialize_finished_plan: stick a Material node atop a completed plan
*
* There are a couple of places where we want to attach a Material node
- * after completion of subquery_planner(). This currently requires hackery.
+ * after completion of subquery_planner(). This currently requires hackery.
* Since subquery_planner has already run SS_finalize_plan on the subplan
* tree, we have to kluge up parameter lists for the Material node.
* Possibly this could be fixed by postponing SS_finalize_plan processing
@@ -4435,7 +4435,7 @@ make_group(PlannerInfo *root,
/*
* distinctList is a list of SortGroupClauses, identifying the targetlist items
- * that should be considered by the Unique filter. The input path must
+ * that should be considered by the Unique filter. The input path must
* already be sorted accordingly.
*/
Unique *
@@ -4453,7 +4453,7 @@ make_unique(Plan *lefttree, List *distinctList)
/*
* Charge one cpu_operator_cost per comparison per input tuple. We assume
- * all columns get compared at most of the tuples. (XXX probably this is
+ * all columns get compared at most of the tuples. (XXX probably this is
* an overestimate.)
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index b57bfd2176..f88e493edb 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -87,12 +87,12 @@ static void check_hashjoinable(RestrictInfo *restrictinfo);
* appearing in the jointree.
*
* The initial invocation must pass root->parse->jointree as the value of
- * jtnode. Internally, the function recurses through the jointree.
+ * jtnode. Internally, the function recurses through the jointree.
*
* At the end of this process, there should be one baserel RelOptInfo for
* every non-join RTE that is used in the query. Therefore, this routine
* is the only place that should call build_simple_rel with reloptkind
- * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
+ * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
* "other rel" RelOptInfos for the members of any appendrels we find here.)
*/
void
@@ -234,10 +234,10 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars,
* means setting suitable where_needed values for them.
*
* Note that this only deals with lateral references in unflattened LATERAL
- * subqueries. When we flatten a LATERAL subquery, its lateral references
+ * subqueries. When we flatten a LATERAL subquery, its lateral references
* become plain Vars in the parent query, but they may have to be wrapped in
* PlaceHolderVars if they need to be forced NULL by outer joins that don't
- * also null the LATERAL subquery. That's all handled elsewhere.
+ * also null the LATERAL subquery. That's all handled elsewhere.
*
* This has to run before deconstruct_jointree, since it might result in
* creation of PlaceHolderInfos.
@@ -360,7 +360,7 @@ extract_lateral_references(PlannerInfo *root, RelOptInfo *brel, Index rtindex)
/*
* We mark the Vars as being "needed" at the LATERAL RTE. This is a bit
* of a cheat: a more formal approach would be to mark each one as needed
- * at the join of the LATERAL RTE with its source RTE. But it will work,
+ * at the join of the LATERAL RTE with its source RTE. But it will work,
* and it's much less tedious than computing a separate where_needed for
* each Var.
*/
@@ -568,7 +568,7 @@ create_lateral_join_info(PlannerInfo *root)
* add_lateral_info
* Add a LateralJoinInfo to root->lateral_info_list, if needed
*
- * We suppress redundant list entries. The passed Relids are copied if saved.
+ * We suppress redundant list entries. The passed Relids are copied if saved.
*/
static void
add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs)
@@ -615,7 +615,7 @@ add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs)
* deconstruct_jointree
* Recursively scan the query's join tree for WHERE and JOIN/ON qual
* clauses, and add these to the appropriate restrictinfo and joininfo
- * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes
+ * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes
* to root->join_info_list for any outer joins appearing in the query tree.
* Return a "joinlist" data structure showing the join order decisions
* that need to be made by make_one_rel().
@@ -632,9 +632,9 @@ add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs)
* be evaluated at the lowest level where all the variables it mentions are
* available. However, we cannot push a qual down into the nullable side(s)
* of an outer join since the qual might eliminate matching rows and cause a
- * NULL row to be incorrectly emitted by the join. Therefore, we artificially
+ * NULL row to be incorrectly emitted by the join. Therefore, we artificially
* OR the minimum-relids of such an outer join into the required_relids of
- * clauses appearing above it. This forces those clauses to be delayed until
+ * clauses appearing above it. This forces those clauses to be delayed until
* application of the outer join (or maybe even higher in the join tree).
*/
List *
@@ -755,7 +755,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
*inner_join_rels = *qualscope;
/*
- * Try to process any quals postponed by children. If they need
+ * Try to process any quals postponed by children. If they need
* further postponement, add them to my output postponed_qual_list.
*/
foreach(l, child_postponed_quals)
@@ -807,7 +807,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
* regard for whether this level is an outer join, which is correct.
* Then we place our own join quals, which are restricted by lower
* outer joins in any case, and are forced to this level if this is an
- * outer join and they mention the outer side. Finally, if this is an
+ * outer join and they mention the outer side. Finally, if this is an
* outer join, we create a join_info_list entry for the join. This
* will prevent quals above us in the join tree that use those rels
* from being pushed down below this level. (It's okay for upper
@@ -897,7 +897,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
nullable_rels);
/*
- * Try to process any quals postponed by children. If they need
+ * Try to process any quals postponed by children. If they need
* further postponement, add them to my output postponed_qual_list.
* Quals that can be processed now must be included in my_quals, so
* that they'll be handled properly in make_outerjoininfo.
@@ -1059,7 +1059,7 @@ make_outerjoininfo(PlannerInfo *root,
* complain if any nullable rel is FOR [KEY] UPDATE/SHARE.
*
* You might be wondering why this test isn't made far upstream in the
- * parser. It's because the parser hasn't got enough info --- consider
+ * parser. It's because the parser hasn't got enough info --- consider
* FOR UPDATE applied to a view. Only after rewriting and flattening do
* we know whether the view contains an outer join.
*
@@ -1074,8 +1074,8 @@ make_outerjoininfo(PlannerInfo *root,
(jointype == JOIN_FULL && bms_is_member(rc->rti, left_rels)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s cannot be applied to the nullable side of an outer join",
LCS_asString(rc->strength))));
}
@@ -1117,7 +1117,7 @@ make_outerjoininfo(PlannerInfo *root,
min_lefthand = bms_intersect(clause_relids, left_rels);
/*
- * Similarly for required RHS. But here, we must also include any lower
+ * Similarly for required RHS. But here, we must also include any lower
* inner joins, to ensure we don't try to commute with any of them.
*/
min_righthand = bms_int_members(bms_union(clause_relids, inner_join_rels),
@@ -1169,7 +1169,7 @@ make_outerjoininfo(PlannerInfo *root,
* Here, we have to consider that "our join condition" includes any
* clauses that syntactically appeared above the lower OJ and below
* ours; those are equivalent to degenerate clauses in our OJ and must
- * be treated as such. Such clauses obviously can't reference our
+ * be treated as such. Such clauses obviously can't reference our
* LHS, and they must be non-strict for the lower OJ's RHS (else
* reduce_outer_joins would have reduced the lower OJ to a plain
* join). Hence the other ways in which we handle clauses within our
@@ -1248,7 +1248,7 @@ make_outerjoininfo(PlannerInfo *root,
* distribute_qual_to_rels
* Add clause information to either the baserestrictinfo or joininfo list
* (depending on whether the clause is a join) of each base relation
- * mentioned in the clause. A RestrictInfo node is created and added to
+ * mentioned in the clause. A RestrictInfo node is created and added to
* the appropriate list for each rel. Alternatively, if the clause uses a
* mergejoinable operator and is not delayed by outer-join rules, enter
* the left- and right-side expressions into the query's list of
@@ -1313,7 +1313,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* In ordinary SQL, a WHERE or JOIN/ON clause can't reference any rels
* that aren't within its syntactic scope; however, if we pulled up a
* LATERAL subquery then we might find such references in quals that have
- * been pulled up. We need to treat such quals as belonging to the join
+ * been pulled up. We need to treat such quals as belonging to the join
* level that includes every rel they reference. Although we could make
* pull_up_subqueries() place such quals correctly to begin with, it's
* easier to handle it here. When we find a clause that contains Vars
@@ -1357,10 +1357,10 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* gating Result plan node. We put such a clause into the regular
* RestrictInfo lists for the moment, but eventually createplan.c will
* pull it out and make a gating Result node immediately above whatever
- * plan node the pseudoconstant clause is assigned to. It's usually best
+ * plan node the pseudoconstant clause is assigned to. It's usually best
* to put a gating node as high in the plan tree as possible. If we are
* not below an outer join, we can actually push the pseudoconstant qual
- * all the way to the top of the tree. If we are below an outer join, we
+ * all the way to the top of the tree. If we are below an outer join, we
* leave the qual at its original syntactic level (we could push it up to
* just below the outer join, but that seems more complex than it's
* worth).
@@ -1414,7 +1414,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* Note: it is not immediately obvious that a simple boolean is enough
* for this: if for some reason we were to attach a degenerate qual to
* its original join level, it would need to be treated as an outer join
- * qual there. However, this cannot happen, because all the rels the
+ * qual there. However, this cannot happen, because all the rels the
* clause mentions must be in the outer join's min_righthand, therefore
* the join it needs must be formed before the outer join; and we always
* attach quals to the lowest level where they can be evaluated. But
@@ -1448,7 +1448,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* We can't use such a clause to deduce equivalence (the left and
* right sides might be unequal above the join because one of them has
* gone to NULL) ... but we might be able to use it for more limited
- * deductions, if it is mergejoinable. So consider adding it to the
+ * deductions, if it is mergejoinable. So consider adding it to the
* lists of set-aside outer-join clauses.
*/
is_pushed_down = false;
@@ -1478,7 +1478,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
else
{
/*
- * Normal qual clause or degenerate outer-join clause. Either way, we
+ * Normal qual clause or degenerate outer-join clause. Either way, we
* can mark it as pushed-down.
*/
is_pushed_down = true;
@@ -1598,7 +1598,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
*
* In all cases, it's important to initialize the left_ec and right_ec
* fields of a mergejoinable clause, so that all possibly mergejoinable
- * expressions have representations in EquivalenceClasses. If
+ * expressions have representations in EquivalenceClasses. If
* process_equivalence is successful, it will take care of that;
* otherwise, we have to call initialize_mergeclause_eclasses to do it.
*/
@@ -1674,7 +1674,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* For an is_pushed_down qual, we can evaluate the qual as soon as (1) we have
* all the rels it mentions, and (2) we are at or above any outer joins that
* can null any of these rels and are below the syntactic location of the
- * given qual. We must enforce (2) because pushing down such a clause below
+ * given qual. We must enforce (2) because pushing down such a clause below
* the OJ might cause the OJ to emit null-extended rows that should not have
* been formed, or that should have been rejected by the clause. (This is
* only an issue for non-strict quals, since if we can prove a qual mentioning
@@ -1700,7 +1700,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* required relids overlap the LHS too) causes that OJ's delay_upper_joins
* flag to be set TRUE. This will prevent any higher-level OJs from
* being interchanged with that OJ, which would result in not having any
- * correct place to evaluate the qual. (The case we care about here is a
+ * correct place to evaluate the qual. (The case we care about here is a
* sub-select WHERE clause within the RHS of some outer join. The WHERE
* clause must effectively be treated as a degenerate clause of that outer
* join's condition. Rather than trying to match such clauses with joins
@@ -1928,7 +1928,7 @@ distribute_restrictinfo_to_rels(PlannerInfo *root,
* that provides all its variables.
*
* "nullable_relids" is the set of relids used in the expressions that are
- * potentially nullable below the expressions. (This has to be supplied by
+ * potentially nullable below the expressions. (This has to be supplied by
* caller because this function is used after deconstruct_jointree, so we
* don't have knowledge of where the clause items came from.)
*
@@ -2098,7 +2098,7 @@ check_mergejoinable(RestrictInfo *restrictinfo)
* info fields in the restrictinfo.
*
* Currently, we support hashjoin for binary opclauses where
- * the operator is a hashjoinable operator. The arguments can be
+ * the operator is a hashjoinable operator. The arguments can be
* anything --- as long as there are no volatile functions in them.
*/
static void
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index 7937ff00e0..94ca92d78e 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -10,9 +10,9 @@
* ORDER BY col ASC/DESC
* LIMIT 1)
* Given a suitable index on tab.col, this can be much faster than the
- * generic scan-all-the-rows aggregation plan. We can handle multiple
+ * generic scan-all-the-rows aggregation plan. We can handle multiple
* MIN/MAX aggregates by generating multiple subqueries, and their
- * orderings can be different. However, if the query contains any
+ * orderings can be different. However, if the query contains any
* non-optimizable aggregates, there's no point since we'll have to
* scan all the rows anyway.
*
@@ -128,7 +128,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
/*
* Scan the tlist and HAVING qual to find all the aggregates and verify
- * all are MIN/MAX aggregates. Stop as soon as we find one that isn't.
+ * all are MIN/MAX aggregates. Stop as soon as we find one that isn't.
*/
aggs_list = NIL;
if (find_minmax_aggs_walker((Node *) tlist, &aggs_list))
@@ -163,7 +163,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
* We can use either an ordering that gives NULLS FIRST or one that
* gives NULLS LAST; furthermore there's unlikely to be much
* performance difference between them, so it doesn't seem worth
- * costing out both ways if we get a hit on the first one. NULLS
+ * costing out both ways if we get a hit on the first one. NULLS
* FIRST is more likely to be available if the operator is a
* reverse-sort operator, so try that first if reverse.
*/
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index 3ea916f166..93484a0cd5 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -33,7 +33,7 @@
* which may involve joins but not any fancier features.
*
* Since query_planner does not handle the toplevel processing (grouping,
- * sorting, etc) it cannot select the best path by itself. Instead, it
+ * sorting, etc) it cannot select the best path by itself. Instead, it
* returns the RelOptInfo for the top level of joining, and the caller
* (grouping_planner) can choose one of the surviving paths for the rel.
* Normally it would choose either the rel's cheapest path, or the cheapest
@@ -63,7 +63,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If the query has an empty join tree, then it's something easy like
- * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly.
+ * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly.
*/
if (parse->jointree->fromlist == NIL)
{
@@ -129,7 +129,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* Examine the targetlist and join tree, adding entries to baserel
* targetlists for all referenced Vars, and generating PlaceHolderInfo
- * entries for all referenced PlaceHolderVars. Restrict and join clauses
+ * entries for all referenced PlaceHolderVars. Restrict and join clauses
* are added to appropriate lists belonging to the mentioned relations. We
* also build EquivalenceClasses for provably equivalent expressions. The
* SpecialJoinInfo list is also built to hold information about join order
@@ -153,7 +153,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If we formed any equivalence classes, generate additional restriction
- * clauses as appropriate. (Implied join clauses are formed on-the-fly
+ * clauses as appropriate. (Implied join clauses are formed on-the-fly
* later.)
*/
generate_base_implied_equalities(root);
@@ -168,14 +168,14 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* Examine any "placeholder" expressions generated during subquery pullup.
* Make sure that the Vars they need are marked as needed at the relevant
- * join level. This must be done before join removal because it might
+ * join level. This must be done before join removal because it might
* cause Vars or placeholders to be needed above a join when they weren't
* so marked before.
*/
fix_placeholder_input_needed_levels(root);
/*
- * Remove any useless outer joins. Ideally this would be done during
+ * Remove any useless outer joins. Ideally this would be done during
* jointree preprocessing, but the necessary information isn't available
* until we've built baserel data structures and classified qual clauses.
*/
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 0508d16902..0f1e2e4680 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -192,7 +192,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
/*
* We document cursor_tuple_fraction as simply being a fraction, which
- * means the edge cases 0 and 1 have to be treated specially here. We
+ * means the edge cases 0 and 1 have to be treated specially here. We
* convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
*/
if (tuple_fraction >= 1.0)
@@ -386,7 +386,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
}
/*
- * Preprocess RowMark information. We need to do this after subquery
+ * Preprocess RowMark information. We need to do this after subquery
* pullup (so that all non-inherited RTEs are present) and before
* inheritance expansion (so that the info is available for
* expand_inherited_tables to examine and modify).
@@ -506,7 +506,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
* to execute that we're better off doing it only once per group, despite
* the loss of selectivity. This is hard to estimate short of doing the
* entire planning process twice, so we use a heuristic: clauses
- * containing subplans are left in HAVING. Otherwise, we move or copy the
+ * containing subplans are left in HAVING. Otherwise, we move or copy the
* HAVING clause into WHERE, in hopes of eliminating tuples before
* aggregation instead of after.
*
@@ -916,8 +916,8 @@ inheritance_planner(PlannerInfo *root)
subplan = grouping_planner(&subroot, 0.0 /* retrieve all tuples */ );
/*
- * Planning may have modified the query result relation (if there
- * were security barrier quals on the result RTE).
+ * Planning may have modified the query result relation (if there were
+ * security barrier quals on the result RTE).
*/
appinfo->child_relid = subroot.parse->resultRelation;
@@ -940,7 +940,8 @@ inheritance_planner(PlannerInfo *root)
else
{
List *tmp_rtable = NIL;
- ListCell *cell1, *cell2;
+ ListCell *cell1,
+ *cell2;
/*
* Check to see if any of the original RTEs were turned into
@@ -1108,7 +1109,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* If there's a top-level ORDER BY, assume we have to fetch all the
- * tuples. This might be too simplistic given all the hackery below
+ * tuples. This might be too simplistic given all the hackery below
* to possibly avoid the sort; but the odds of accurate estimates here
* are pretty low anyway.
*/
@@ -1135,7 +1136,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* We should not need to call preprocess_targetlist, since we must be
- * in a SELECT query node. Instead, use the targetlist returned by
+ * in a SELECT query node. Instead, use the targetlist returned by
* plan_set_operations (since this tells whether it returned any
* resjunk columns!), and transfer any sort key information from the
* original tlist.
@@ -1152,11 +1153,11 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
if (parse->rowMarks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
LCS_asString(((RowMarkClause *)
- linitial(parse->rowMarks))->strength))));
+ linitial(parse->rowMarks))->strength))));
/*
* Calculate pathkeys that represent result ordering requirements
@@ -1279,7 +1280,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* Generate the best unsorted and presorted paths for this Query (but
- * note there may not be any presorted paths). We also generate (in
+ * note there may not be any presorted paths). We also generate (in
* standard_qp_callback) pathkey representations of the query's sort
* clause, distinct clause, etc.
*/
@@ -1314,7 +1315,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* In GROUP BY mode, an absolute LIMIT is relative to the number
- * of groups not the number of tuples. If the caller gave us a
+ * of groups not the number of tuples. If the caller gave us a
* fraction, keep it as-is. (In both cases, we are effectively
* assuming that all the groups are about the same size.)
*/
@@ -1673,7 +1674,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* Furthermore, there cannot be any variables in either HAVING
* or the targetlist, so we actually do not need the FROM
* table at all! We can just throw away the plan-so-far and
- * generate a Result node. This is a sufficiently unusual
+ * generate a Result node. This is a sufficiently unusual
* corner case that it's not worth contorting the structure of
* this routine to avoid having to generate the plan in the
* first place.
@@ -1717,14 +1718,14 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* The "base" targetlist for all steps of the windowing process is
- * a flat tlist of all Vars and Aggs needed in the result. (In
+ * a flat tlist of all Vars and Aggs needed in the result. (In
* some cases we wouldn't need to propagate all of these all the
* way to the top, since they might only be needed as inputs to
* WindowFuncs. It's probably not worth trying to optimize that
* though.) We also add window partitioning and sorting
* expressions to the base tlist, to ensure they're computed only
* once at the bottom of the stack (that's critical for volatile
- * functions). As we climb up the stack, we'll add outputs for
+ * functions). As we climb up the stack, we'll add outputs for
* the WindowFuncs computed at each level.
*/
window_tlist = make_windowInputTargetList(root,
@@ -1733,7 +1734,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* The copyObject steps here are needed to ensure that each plan
- * node has a separately modifiable tlist. (XXX wouldn't a
+ * node has a separately modifiable tlist. (XXX wouldn't a
* shallow list copy do for that?)
*/
result_plan->targetlist = (List *) copyObject(window_tlist);
@@ -2018,7 +2019,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
*
* Once grouping_planner() has applied a general tlist to the topmost
* scan/join plan node, any tlist eval cost for added-on nodes should be
- * accounted for as we create those nodes. Presently, of the node types we
+ * accounted for as we create those nodes. Presently, of the node types we
* can add on later, only Agg, WindowAgg, and Group project new tlists (the
* rest just copy their input tuples) --- so make_agg(), make_windowagg() and
* make_group() are responsible for calling this function to account for their
@@ -2150,7 +2151,7 @@ preprocess_rowmarks(PlannerInfo *root)
* insufficient because of rule substitution, query pullup, etc.
*/
CheckSelectLocking(parse, ((RowMarkClause *)
- linitial(parse->rowMarks))->strength);
+ linitial(parse->rowMarks))->strength);
}
else
{
@@ -2184,7 +2185,7 @@ preprocess_rowmarks(PlannerInfo *root)
/*
* Currently, it is syntactically impossible to have FOR UPDATE et al
- * applied to an update/delete target rel. If that ever becomes
+ * applied to an update/delete target rel. If that ever becomes
* possible, we should drop the target from the PlanRowMark list.
*/
Assert(rc->rti != parse->resultRelation);
@@ -2268,7 +2269,7 @@ preprocess_rowmarks(PlannerInfo *root)
* preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
*
* We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
- * results back in *count_est and *offset_est. These variables are set to
+ * results back in *count_est and *offset_est. These variables are set to
* 0 if the corresponding clause is not present, and -1 if it's present
* but we couldn't estimate the value for it. (The "0" convention is OK
* for OFFSET but a little bit bogus for LIMIT: effectively we estimate
@@ -2277,7 +2278,7 @@ preprocess_rowmarks(PlannerInfo *root)
* be passed to make_limit, which see if you change this code.
*
* The return value is the suitably adjusted tuple_fraction to use for
- * planning the query. This adjustment is not overridable, since it reflects
+ * planning the query. This adjustment is not overridable, since it reflects
* plan actions that grouping_planner() will certainly take, not assumptions
* about context.
*/
@@ -2401,7 +2402,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
else if (*offset_est != 0 && tuple_fraction > 0.0)
{
/*
- * We have an OFFSET but no LIMIT. This acts entirely differently
+ * We have an OFFSET but no LIMIT. This acts entirely differently
* from the LIMIT case: here, we need to increase rather than decrease
* the caller's tuple_fraction, because the OFFSET acts to cause more
* tuples to be fetched instead of fewer. This only matters if we got
@@ -2416,7 +2417,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
/*
* If we have absolute counts from both caller and OFFSET, add them
- * together; likewise if they are both fractional. If one is
+ * together; likewise if they are both fractional. If one is
* fractional and the other absolute, we want to take the larger, and
* we heuristically assume that's the fractional one.
*/
@@ -2457,7 +2458,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
*
* If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
* a Limit node. This is worth checking for because "OFFSET 0" is a common
- * locution for an optimization fence. (Because other places in the planner
+ * locution for an optimization fence. (Because other places in the planner
* merely check whether parse->limitOffset isn't NULL, it will still work as
* an optimization fence --- we're just suppressing unnecessary run-time
* overhead.)
@@ -2700,7 +2701,7 @@ choose_hashed_grouping(PlannerInfo *root,
/*
* Executor doesn't support hashed aggregation with DISTINCT or ORDER BY
- * aggregates. (Doing so would imply storing *all* the input values in
+ * aggregates. (Doing so would imply storing *all* the input values in
* the hash table, and/or running many sorts in parallel, either of which
* seems like a certain loser.) We similarly don't support ordered-set
* aggregates in hashed aggregation, but that case is included in the
@@ -2840,7 +2841,7 @@ choose_hashed_grouping(PlannerInfo *root,
* pass in the costs as individual variables.)
*
* But note that making the two choices independently is a bit bogus in
- * itself. If the two could be combined into a single choice operation
+ * itself. If the two could be combined into a single choice operation
* it'd probably be better, but that seems far too unwieldy to be practical,
* especially considering that the combination of GROUP BY and DISTINCT
* isn't very common in real queries. By separating them, we are giving
@@ -2937,7 +2938,7 @@ choose_hashed_distinct(PlannerInfo *root,
0.0, work_mem, limit_tuples);
/*
- * Now for the GROUP case. See comments in grouping_planner about the
+ * Now for the GROUP case. See comments in grouping_planner about the
* sorting choices here --- this code should match that code.
*/
sorted_p.startup_cost = sorted_startup_cost;
@@ -3127,7 +3128,7 @@ make_subplanTargetList(PlannerInfo *root,
* add them to the result tlist if not already present. (A Var used
* directly as a GROUP BY item will be present already.) Note this
* includes Vars used in resjunk items, so we are covering the needs of
- * ORDER BY and window specifications. Vars used within Aggrefs will be
+ * ORDER BY and window specifications. Vars used within Aggrefs will be
* pulled out here, too.
*/
non_group_vars = pull_var_clause((Node *) non_group_cols,
@@ -3178,7 +3179,7 @@ get_grouping_column_index(Query *parse, TargetEntry *tle)
* Locate grouping columns in the tlist chosen by create_plan.
*
* This is only needed if we don't use the sub_tlist chosen by
- * make_subplanTargetList. We have to forget the column indexes found
+ * make_subplanTargetList. We have to forget the column indexes found
* by that routine and re-locate the grouping exprs in the real sub_tlist.
* We assume the grouping exprs are just Vars (see make_subplanTargetList).
*/
@@ -3209,11 +3210,11 @@ locate_grouping_columns(PlannerInfo *root,
/*
* The grouping column returned by create_plan might not have the same
- * typmod as the original Var. (This can happen in cases where a
+ * typmod as the original Var. (This can happen in cases where a
* set-returning function has been inlined, so that we now have more
* knowledge about what it returns than we did when the original Var
* was created.) So we can't use tlist_member() to search the tlist;
- * instead use tlist_member_match_var. For safety, still check that
+ * instead use tlist_member_match_var. For safety, still check that
* the vartype matches.
*/
if (!(groupexpr && IsA(groupexpr, Var)))
@@ -3339,7 +3340,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
*
* When grouping_planner inserts one or more WindowAgg nodes into the plan,
* this function computes the initial target list to be computed by the node
- * just below the first WindowAgg. This list must contain all values needed
+ * just below the first WindowAgg. This list must contain all values needed
* to evaluate the window functions, compute the final target list, and
* perform any required final sort step. If multiple WindowAggs are needed,
* each intermediate one adds its window function results onto this tlist;
@@ -3347,7 +3348,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
*
* This function is much like make_subplanTargetList, though not quite enough
* like it to share code. As in that function, we flatten most expressions
- * into their component variables. But we do not want to flatten window
+ * into their component variables. But we do not want to flatten window
* PARTITION BY/ORDER BY clauses, since that might result in multiple
* evaluations of them, which would be bad (possibly even resulting in
* inconsistent answers, if they contain volatile functions). Also, we must
@@ -3520,7 +3521,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
* This depends on the behavior of make_pathkeys_for_window()!
*
* We are given the target WindowClause and an array of the input column
- * numbers associated with the resulting pathkeys. In the easy case, there
+ * numbers associated with the resulting pathkeys. In the easy case, there
* are the same number of pathkey columns as partitioning + ordering columns
* and we just have to copy some data around. However, it's possible that
* some of the original partitioning + ordering columns were eliminated as
@@ -3532,7 +3533,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
* determine which keys are significant.
*
* The method used here is a bit brute-force: add the sort columns to a list
- * one at a time and note when the resulting pathkey list gets longer. But
+ * one at a time and note when the resulting pathkey list gets longer. But
* it's a sufficiently uncommon case that a faster way doesn't seem worth
* the amount of code refactoring that'd be needed.
*----------
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 46affe7dad..768c5c7670 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -145,7 +145,7 @@ static bool extract_query_dependencies_walker(Node *node,
/*
* set_plan_references
*
- * This is the final processing pass of the planner/optimizer. The plan
+ * This is the final processing pass of the planner/optimizer. The plan
* tree is complete; we just have to adjust some representational details
* for the convenience of the executor:
*
@@ -189,7 +189,7 @@ static bool extract_query_dependencies_walker(Node *node,
* and root->glob->invalItems (for everything else).
*
* Notice that we modify Plan nodes in-place, but use expression_tree_mutator
- * to process targetlist and qual expressions. We can assume that the Plan
+ * to process targetlist and qual expressions. We can assume that the Plan
* nodes were just built by the planner and are not multiply referenced, but
* it's not so safe to assume that for expression tree nodes.
*/
@@ -262,7 +262,7 @@ add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing)
/*
* If there are any dead subqueries, they are not referenced in the Plan
* tree, so we must add RTEs contained in them to the flattened rtable
- * separately. (If we failed to do this, the executor would not perform
+ * separately. (If we failed to do this, the executor would not perform
* expected permission checks for tables mentioned in such subqueries.)
*
* Note: this pass over the rangetable can't be combined with the previous
@@ -292,7 +292,7 @@ add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing)
/*
* The subquery might never have been planned at all, if it
* was excluded on the basis of self-contradictory constraints
- * in our query level. In this case apply
+ * in our query level. In this case apply
* flatten_unplanned_rtes.
*
* If it was planned but the plan is dummy, we assume that it
@@ -591,7 +591,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
/*
* These plan types don't actually bother to evaluate their
* targetlists, because they just return their unmodified input
- * tuples. Even though the targetlist won't be used by the
+ * tuples. Even though the targetlist won't be used by the
* executor, we fix it up for possible use by EXPLAIN (not to
* mention ease of debugging --- wrong varnos are very confusing).
*/
@@ -609,7 +609,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
/*
* Like the plan types above, LockRows doesn't evaluate its
- * tlist or quals. But we have to fix up the RT indexes in
+ * tlist or quals. But we have to fix up the RT indexes in
* its rowmarks.
*/
set_dummy_tlist_references(plan, rtoffset);
@@ -727,7 +727,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
* Set up the visible plan targetlist as being the same as
* the first RETURNING list. This is for the use of
* EXPLAIN; the executor won't pay any attention to the
- * targetlist. We postpone this step until here so that
+ * targetlist. We postpone this step until here so that
* we don't have to do set_returning_clause_references()
* twice on identical targetlists.
*/
@@ -953,7 +953,7 @@ set_subqueryscan_references(PlannerInfo *root,
else
{
/*
- * Keep the SubqueryScan node. We have to do the processing that
+ * Keep the SubqueryScan node. We have to do the processing that
* set_plan_references would otherwise have done on it. Notice we do
* not do set_upper_references() here, because a SubqueryScan will
* always have been created with correct references to its subplan's
@@ -1425,7 +1425,7 @@ set_dummy_tlist_references(Plan *plan, int rtoffset)
*
* In most cases, subplan tlists will be "flat" tlists with only Vars,
* so we try to optimize that case by extracting information about Vars
- * in advance. Matching a parent tlist to a child is still an O(N^2)
+ * in advance. Matching a parent tlist to a child is still an O(N^2)
* operation, but at least with a much smaller constant factor than plain
* tlist_member() searches.
*
@@ -1870,7 +1870,7 @@ fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context)
* adjust any Vars that refer to other tables to reference junk tlist
* entries in the top subplan's targetlist. Vars referencing the result
* table should be left alone, however (the executor will evaluate them
- * using the actual heap tuple, after firing triggers if any). In the
+ * using the actual heap tuple, after firing triggers if any). In the
* adjusted RETURNING list, result-table Vars will have their original
* varno (plus rtoffset), but Vars for other rels will have varno OUTER_VAR.
*
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index a3f358377d..be92049ec4 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -434,7 +434,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
Node *result;
/*
- * Copy the source Query node. This is a quick and dirty kluge to resolve
+ * Copy the source Query node. This is a quick and dirty kluge to resolve
* the fact that the parser can generate trees with multiple links to the
* same sub-Query node, but the planner wants to scribble on the Query.
* Try to clean this up when we do querytree redesign...
@@ -459,7 +459,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
* path/costsize.c.
*
* XXX If an ANY subplan is uncorrelated, build_subplan may decide to hash
- * its output. In that case it would've been better to specify full
+ * its output. In that case it would've been better to specify full
* retrieval. At present, however, we can only check hashability after
* we've made the subplan :-(. (Determining whether it'll fit in work_mem
* is the really hard part.) Therefore, we don't want to be too
@@ -496,7 +496,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
/*
* If it's a correlated EXISTS with an unimportant targetlist, we might be
* able to transform it to the equivalent of an IN and then implement it
- * by hashing. We don't have enough information yet to tell which way is
+ * by hashing. We don't have enough information yet to tell which way is
* likely to be better (it depends on the expected number of executions of
* the EXISTS qual, and we are much too early in planning the outer query
* to be able to guess that). So we generate both plans, if possible, and
@@ -724,7 +724,7 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot,
* Otherwise, we have the option to tack a Material node onto the top
* of the subplan, to reduce the cost of reading it repeatedly. This
* is pointless for a direct-correlated subplan, since we'd have to
- * recompute its results each time anyway. For uncorrelated/undirect
+ * recompute its results each time anyway. For uncorrelated/undirect
* correlated subplans, we add Material unless the subplan's top plan
* node would materialize its output anyway. Also, if enable_material
* is false, then the user does not want us to materialize anything
@@ -750,10 +750,10 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot,
/*
* A parameterless subplan (not initplan) should be prepared to handle
- * REWIND efficiently. If it has direct parameters then there's no point
+ * REWIND efficiently. If it has direct parameters then there's no point
* since it'll be reset on each scan anyway; and if it's an initplan then
* there's no point since it won't get re-run without parameter changes
- * anyway. The input of a hashed subplan doesn't need REWIND either.
+ * anyway. The input of a hashed subplan doesn't need REWIND either.
*/
if (splan->parParam == NIL && !isInitPlan && !splan->useHashTable)
root->glob->rewindPlanIDs = bms_add_member(root->glob->rewindPlanIDs,
@@ -850,7 +850,7 @@ generate_subquery_vars(PlannerInfo *root, List *tlist, Index varno)
/*
* convert_testexpr: convert the testexpr given by the parser into
* actually executable form. This entails replacing PARAM_SUBLINK Params
- * with Params or Vars representing the results of the sub-select. The
+ * with Params or Vars representing the results of the sub-select. The
* nodes to be substituted are passed in as the List result from
* generate_subquery_params or generate_subquery_vars.
*/
@@ -952,7 +952,7 @@ testexpr_is_hashable(Node *testexpr)
*
* The combining operators must be hashable and strict. The need for
* hashability is obvious, since we want to use hashing. Without
- * strictness, behavior in the presence of nulls is too unpredictable. We
+ * strictness, behavior in the presence of nulls is too unpredictable. We
* actually must assume even more than plain strictness: they can't yield
* NULL for non-null inputs, either (see nodeSubplan.c). However, hash
* indexes and hash joins assume that too.
@@ -1060,7 +1060,7 @@ SS_process_ctes(PlannerInfo *root)
}
/*
- * Copy the source Query node. Probably not necessary, but let's keep
+ * Copy the source Query node. Probably not necessary, but let's keep
* this similar to make_subplan.
*/
subquery = (Query *) copyObject(cte->ctequery);
@@ -1086,7 +1086,7 @@ SS_process_ctes(PlannerInfo *root)
elog(ERROR, "unexpected outer reference in CTE query");
/*
- * Make a SubPlan node for it. This is just enough unlike
+ * Make a SubPlan node for it. This is just enough unlike
* build_subplan that we can't share code.
*
* Note plan_id, plan_name, and cost fields are set further down.
@@ -1309,7 +1309,7 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
/*
* See if the subquery can be simplified based on the knowledge that it's
- * being used in EXISTS(). If we aren't able to get rid of its
+ * being used in EXISTS(). If we aren't able to get rid of its
* targetlist, we have to fail, because the pullup operation leaves us
* with noplace to evaluate the targetlist.
*/
@@ -1358,9 +1358,9 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
* what pull_up_subqueries has to go through.
*
* In fact, it's even easier than what convert_ANY_sublink_to_join has to
- * do. The machinations of simplify_EXISTS_query ensured that there is
+ * do. The machinations of simplify_EXISTS_query ensured that there is
* nothing interesting in the subquery except an rtable and jointree, and
- * even the jointree FromExpr no longer has quals. So we can just append
+ * even the jointree FromExpr no longer has quals. So we can just append
* the rtable to our own and use the FromExpr in our jointree. But first,
* adjust all level-zero varnos in the subquery to account for the rtable
* merger.
@@ -1491,7 +1491,7 @@ simplify_EXISTS_query(Query *query)
*
* On success, the modified subselect is returned, and we store a suitable
* upper-level test expression at *testexpr, plus a list of the subselect's
- * output Params at *paramIds. (The test expression is already Param-ified
+ * output Params at *paramIds. (The test expression is already Param-ified
* and hence need not go through convert_testexpr, which is why we have to
* deal with the Param IDs specially.)
*
@@ -1654,7 +1654,7 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
return NULL;
/*
- * Also reject sublinks in the stuff we intend to pull up. (It might be
+ * Also reject sublinks in the stuff we intend to pull up. (It might be
* possible to support this, but doesn't seem worth the complication.)
*/
if (contain_subplans((Node *) leftargs))
@@ -1856,7 +1856,7 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context)
* is needed for a bare List.)
*
* Anywhere within the top-level AND/OR clause structure, we can tell
- * make_subplan() that NULL and FALSE are interchangeable. So isTopQual
+ * make_subplan() that NULL and FALSE are interchangeable. So isTopQual
* propagates down in both cases. (Note that this is unlike the meaning
* of "top level qual" used in most other places in Postgres.)
*/
@@ -1962,7 +1962,7 @@ SS_finalize_plan(PlannerInfo *root, Plan *plan, bool attach_initplans)
* Now determine the set of params that are validly referenceable in this
* query level; to wit, those available from outer query levels plus the
* output parameters of any local initPlans. (We do not include output
- * parameters of regular subplans. Those should only appear within the
+ * parameters of regular subplans. Those should only appear within the
* testexpr of SubPlan nodes, and are taken care of locally within
* finalize_primnode. Likewise, special parameters that are generated by
* nodes such as ModifyTable are handled within finalize_plan.)
@@ -2138,7 +2138,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params,
/*
* In a SubqueryScan, SS_finalize_plan has already been run on the
* subplan by the inner invocation of subquery_planner, so there's
- * no need to do it again. Instead, just pull out the subplan's
+ * no need to do it again. Instead, just pull out the subplan's
* extParams list, which represents the params it needs from my
* level and higher levels.
*/
@@ -2500,7 +2500,7 @@ finalize_primnode(Node *node, finalize_primnode_context *context)
/*
* Remove any param IDs of output parameters of the subplan that were
- * referenced in the testexpr. These are not interesting for
+ * referenced in the testexpr. These are not interesting for
* parameter change signaling since we always re-evaluate the subplan.
* Note that this wouldn't work too well if there might be uses of the
* same param IDs elsewhere in the plan, but that can't happen because
@@ -2598,7 +2598,7 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan,
/* Label the subplan for EXPLAIN purposes */
node->plan_name = psprintf("InitPlan %d (returns $%d)",
- node->plan_id, prm->paramid);
+ node->plan_id, prm->paramid);
return prm;
}
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index 812e56d4c1..776fe426c3 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -116,7 +116,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
*
* A clause "foo op ANY (sub-SELECT)" can be processed by pulling the
* sub-SELECT up to become a rangetable entry and treating the implied
- * comparisons as quals of a semijoin. However, this optimization *only*
+ * comparisons as quals of a semijoin. However, this optimization *only*
* works at the top level of WHERE or a JOIN/ON clause, because we cannot
* distinguish whether the ANY ought to return FALSE or NULL in cases
* involving NULL inputs. Also, in an outer join's ON clause we can only
@@ -133,7 +133,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
* transformations if any are found.
*
* This routine has to run before preprocess_expression(), so the quals
- * clauses are not yet reduced to implicit-AND format. That means we need
+ * clauses are not yet reduced to implicit-AND format. That means we need
* to recursively search through explicit AND clauses, which are
* probably only binary ANDs. We stop as soon as we hit a non-AND item.
*/
@@ -287,7 +287,7 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
/*
* Although we could include the pulled-up subqueries in the returned
* relids, there's no need since upper quals couldn't refer to their
- * outputs anyway. But we *do* need to include the join's own rtindex
+ * outputs anyway. But we *do* need to include the join's own rtindex
* because we haven't yet collapsed join alias variables, so upper
* levels would mistakenly think they couldn't use references to this
* join.
@@ -609,7 +609,7 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode)
*
* If this jointree node is within either side of an outer join, then
* lowest_outer_join references the lowest such JoinExpr node; otherwise
- * it is NULL. We use this to constrain the effects of LATERAL subqueries.
+ * it is NULL. We use this to constrain the effects of LATERAL subqueries.
*
* If this jointree node is within the nullable side of an outer join, then
* lowest_nulling_outer_join references the lowest such JoinExpr node;
@@ -759,7 +759,7 @@ pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode,
* Attempt to pull up a single simple subquery.
*
* jtnode is a RangeTblRef that has been tentatively identified as a simple
- * subquery by pull_up_subqueries. We return the replacement jointree node,
+ * subquery by pull_up_subqueries. We return the replacement jointree node,
* or jtnode itself if we determine that the subquery can't be pulled up after
* all.
*
@@ -792,7 +792,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Create a PlannerInfo data structure for this subquery.
*
* NOTE: the next few steps should match the first processing in
- * subquery_planner(). Can we refactor to avoid code duplication, or
+ * subquery_planner(). Can we refactor to avoid code duplication, or
* would that just make things uglier?
*/
subroot = makeNode(PlannerInfo);
@@ -842,7 +842,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
/*
* Now we must recheck whether the subquery is still simple enough to pull
- * up. If not, abandon processing it.
+ * up. If not, abandon processing it.
*
* We don't really need to recheck all the conditions involved, but it's
* easier just to keep this "if" looking the same as the one in
@@ -859,7 +859,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Give up, return unmodified RangeTblRef.
*
* Note: The work we just did will be redone when the subquery gets
- * planned on its own. Perhaps we could avoid that by storing the
+ * planned on its own. Perhaps we could avoid that by storing the
* modified subquery back into the rangetable, but I'm not gonna risk
* it now.
*/
@@ -900,7 +900,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* non-nullable items and lateral references may have to be turned into
* PlaceHolderVars. If we are dealing with an appendrel member then
* anything that's not a simple Var has to be turned into a
- * PlaceHolderVar. Set up required context data for pullup_replace_vars.
+ * PlaceHolderVar. Set up required context data for pullup_replace_vars.
*/
rvcontext.root = root;
rvcontext.targetlist = subquery->targetList;
@@ -925,7 +925,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* replace any of the jointree structure. (This'd be a lot cleaner if we
* could use query_tree_mutator.) We have to use PHVs in the targetList,
* returningList, and havingQual, since those are certainly above any
- * outer join. replace_vars_in_jointree tracks its location in the
+ * outer join. replace_vars_in_jointree tracks its location in the
* jointree and uses PHVs or not appropriately.
*/
parse->targetList = (List *)
@@ -1084,7 +1084,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Pull up a single simple UNION ALL subquery.
*
* jtnode is a RangeTblRef that has been identified as a simple UNION ALL
- * subquery by pull_up_subqueries. We pull up the leaf subqueries and
+ * subquery by pull_up_subqueries. We pull up the leaf subqueries and
* build an "append relation" for the union set. The result value is just
* jtnode, since we don't actually need to change the query jointree.
*/
@@ -1098,7 +1098,7 @@ pull_up_simple_union_all(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte)
/*
* Make a modifiable copy of the subquery's rtable, so we can adjust
- * upper-level Vars in it. There are no such Vars in the setOperations
+ * upper-level Vars in it. There are no such Vars in the setOperations
* tree proper, so fixing the rtable should be sufficient.
*/
rtable = copyObject(subquery->rtable);
@@ -1370,7 +1370,7 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte,
/*
* Don't pull up a subquery that has any set-returning functions in its
- * targetlist. Otherwise we might well wind up inserting set-returning
+ * targetlist. Otherwise we might well wind up inserting set-returning
* functions into places where they mustn't go, such as quals of higher
* queries.
*/
@@ -1379,7 +1379,7 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte,
/*
* Don't pull up a subquery that has any volatile functions in its
- * targetlist. Otherwise we might introduce multiple evaluations of these
+ * targetlist. Otherwise we might introduce multiple evaluations of these
* functions, if they get copied to multiple places in the upper query,
* leading to surprising results. (Note: the PlaceHolderVar mechanism
* doesn't quite guarantee single evaluation; else we could pull up anyway
@@ -1609,7 +1609,7 @@ replace_vars_in_jointree(Node *jtnode,
/*
* If the RangeTblRef refers to a LATERAL subquery (that isn't the
* same subquery we're pulling up), it might contain references to the
- * target subquery, which we must replace. We drive this from the
+ * target subquery, which we must replace. We drive this from the
* jointree scan, rather than a scan of the rtable, for a couple of
* reasons: we can avoid processing no-longer-referenced RTEs, and we
* can use the appropriate setting of need_phvs depending on whether
@@ -1770,7 +1770,7 @@ pullup_replace_vars_callback(Var *var,
/*
* Insert PlaceHolderVar if needed. Notice that we are wrapping one
* PlaceHolderVar around the whole RowExpr, rather than putting one
- * around each element of the row. This is because we need the
+ * around each element of the row. This is because we need the
* expression to yield NULL, not ROW(NULL,NULL,...) when it is forced
* to null by an outer join.
*/
@@ -1872,7 +1872,7 @@ pullup_replace_vars_callback(Var *var,
/*
* Cache it if possible (ie, if the attno is in range, which it
- * probably always should be). We can cache the value even if we
+ * probably always should be). We can cache the value even if we
* decided we didn't need a PHV, since this result will be
* suitable for any request that has need_phvs.
*/
@@ -1915,7 +1915,7 @@ pullup_replace_vars_subquery(Query *query,
*
* If a query's setOperations tree consists entirely of simple UNION ALL
* operations, flatten it into an append relation, which we can process more
- * intelligently than the general setops case. Otherwise, do nothing.
+ * intelligently than the general setops case. Otherwise, do nothing.
*
* In most cases, this can succeed only for a top-level query, because for a
* subquery in FROM, the parent query's invocation of pull_up_subqueries would
@@ -2027,7 +2027,7 @@ flatten_simple_union_all(PlannerInfo *root)
* SELECT ... FROM a LEFT JOIN b ON (a.x = b.y) WHERE b.y IS NULL;
* If the join clause is strict for b.y, then only null-extended rows could
* pass the upper WHERE, and we can conclude that what the query is really
- * specifying is an anti-semijoin. We change the join type from JOIN_LEFT
+ * specifying is an anti-semijoin. We change the join type from JOIN_LEFT
* to JOIN_ANTI. The IS NULL clause then becomes redundant, and must be
* removed to prevent bogus selectivity calculations, but we leave it to
* distribute_qual_to_rels to get rid of such clauses.
@@ -2267,7 +2267,7 @@ reduce_outer_joins_pass2(Node *jtnode,
/*
* See if we can reduce JOIN_LEFT to JOIN_ANTI. This is the case if
* the join's own quals are strict for any var that was forced null by
- * higher qual levels. NOTE: there are other ways that we could
+ * higher qual levels. NOTE: there are other ways that we could
* detect an anti-join, in particular if we were to check whether Vars
* coming from the RHS must be non-null because of table constraints.
* That seems complicated and expensive though (in particular, one
@@ -2425,7 +2425,7 @@ reduce_outer_joins_pass2(Node *jtnode,
* pulled-up relid, and change them to reference the replacement relid(s).
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. This should be OK since the tree was copied by
+ * nodes in-place. This should be OK since the tree was copied by
* pullup_replace_vars earlier. Avoid scribbling on the original values of
* the bitmapsets, though, because expression_tree_mutator doesn't copy those.
*/
diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c
index 812fbaddba..2a24938d84 100644
--- a/src/backend/optimizer/prep/prepqual.c
+++ b/src/backend/optimizer/prep/prepqual.c
@@ -54,12 +54,12 @@ static Expr *process_duplicate_ors(List *orlist);
* Although this can be invoked on its own, it's mainly intended as a helper
* for eval_const_expressions(), and that context drives several design
* decisions. In particular, if the input is already AND/OR flat, we must
- * preserve that property. We also don't bother to recurse in situations
+ * preserve that property. We also don't bother to recurse in situations
* where we can assume that lower-level executions of eval_const_expressions
* would already have simplified sub-clauses of the input.
*
* The difference between this and a simple make_notclause() is that this
- * tries to get rid of the NOT node by logical simplification. It's clearly
+ * tries to get rid of the NOT node by logical simplification. It's clearly
* always a win if the NOT node can be eliminated altogether. However, our
* use of DeMorgan's laws could result in having more NOT nodes rather than
* fewer. We do that unconditionally anyway, because in WHERE clauses it's
@@ -152,7 +152,7 @@ negate_clause(Node *node)
* those properties. For example, if no direct child of
* the given AND clause is an AND or a NOT-above-OR, then
* the recursive calls of negate_clause() can't return any
- * OR clauses. So we needn't call pull_ors() before
+ * OR clauses. So we needn't call pull_ors() before
* building a new OR clause. Similarly for the OR case.
*--------------------
*/
@@ -293,7 +293,7 @@ canonicalize_qual(Expr *qual)
/*
* Pull up redundant subclauses in OR-of-AND trees. We do this only
* within the top-level AND/OR structure; there's no point in looking
- * deeper. Also remove any NULL constants in the top-level structure.
+ * deeper. Also remove any NULL constants in the top-level structure.
*/
newqual = find_duplicate_ors(qual);
@@ -374,7 +374,7 @@ pull_ors(List *orlist)
*
* This may seem like a fairly useless activity, but it turns out to be
* applicable to many machine-generated queries, and there are also queries
- * in some of the TPC benchmarks that need it. This was in fact almost the
+ * in some of the TPC benchmarks that need it. This was in fact almost the
* sole useful side-effect of the old prepqual code that tried to force
* the query into canonical AND-of-ORs form: the canonical equivalent of
* ((A AND B) OR (A AND C))
@@ -400,7 +400,7 @@ pull_ors(List *orlist)
* results, so it's valid to treat NULL::boolean the same as FALSE and then
* simplify AND/OR accordingly.
*
- * Returns the modified qualification. AND/OR flatness is preserved.
+ * Returns the modified qualification. AND/OR flatness is preserved.
*/
static Expr *
find_duplicate_ors(Expr *qual)
diff --git a/src/backend/optimizer/prep/prepsecurity.c b/src/backend/optimizer/prep/prepsecurity.c
index 7daaa3349e..dd7f9003a2 100644
--- a/src/backend/optimizer/prep/prepsecurity.c
+++ b/src/backend/optimizer/prep/prepsecurity.c
@@ -33,7 +33,7 @@ typedef struct
Relation rel; /* RTE relation at rt_index */
List *targetlist; /* Targetlist for new subquery RTE */
List *colnames; /* Column names in subquery RTE */
- List *vars_processed; /* List of Vars already processed */
+ List *vars_processed; /* List of Vars already processed */
} security_barrier_replace_vars_context;
static void expand_security_qual(PlannerInfo *root, List *tlist, int rt_index,
@@ -43,7 +43,7 @@ static void security_barrier_replace_vars(Node *node,
security_barrier_replace_vars_context *context);
static bool security_barrier_replace_vars_walker(Node *node,
- security_barrier_replace_vars_context *context);
+ security_barrier_replace_vars_context *context);
/*
@@ -97,6 +97,7 @@ expand_security_quals(PlannerInfo *root, List *tlist)
if (rt_index == parse->resultRelation)
{
RangeTblEntry *newrte = copyObject(rte);
+
parse->rtable = lappend(parse->rtable, newrte);
parse->resultRelation = list_length(parse->rtable);
@@ -117,11 +118,11 @@ expand_security_quals(PlannerInfo *root, List *tlist)
rte->modifiedCols = NULL;
/*
- * For the most part, Vars referencing the original relation should
- * remain as they are, meaning that they pull OLD values from the
- * expanded RTE. But in the RETURNING list and in any WITH CHECK
- * OPTION quals, we want such Vars to represent NEW values, so
- * change them to reference the new RTE.
+ * For the most part, Vars referencing the original relation
+ * should remain as they are, meaning that they pull OLD values
+ * from the expanded RTE. But in the RETURNING list and in any
+ * WITH CHECK OPTION quals, we want such Vars to represent NEW
+ * values, so change them to reference the new RTE.
*/
ChangeVarNodes((Node *) parse->returningList, rt_index,
parse->resultRelation, 0);
@@ -141,7 +142,8 @@ expand_security_quals(PlannerInfo *root, List *tlist)
*/
while (rte->securityQuals != NIL)
{
- Node *qual = (Node *) linitial(rte->securityQuals);
+ Node *qual = (Node *) linitial(rte->securityQuals);
+
rte->securityQuals = list_delete_first(rte->securityQuals);
ChangeVarNodes(qual, rt_index, 1, 0);
@@ -160,14 +162,14 @@ static void
expand_security_qual(PlannerInfo *root, List *tlist, int rt_index,
RangeTblEntry *rte, Node *qual)
{
- Query *parse = root->parse;
- Oid relid = rte->relid;
- Query *subquery;
- RangeTblEntry *subrte;
- RangeTblRef *subrtr;
- PlanRowMark *rc;
+ Query *parse = root->parse;
+ Oid relid = rte->relid;
+ Query *subquery;
+ RangeTblEntry *subrte;
+ RangeTblRef *subrtr;
+ PlanRowMark *rc;
security_barrier_replace_vars_context context;
- ListCell *cell;
+ ListCell *cell;
/*
* There should only be 2 possible cases:
@@ -182,6 +184,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index,
switch (rte->rtekind)
{
case RTE_RELATION:
+
/*
* Turn the relation RTE into a security barrier subquery RTE,
* moving all permissions checks down into the subquery.
@@ -204,7 +207,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index,
rte->relid = InvalidOid;
rte->subquery = subquery;
rte->security_barrier = true;
- rte->inh = false; /* must not be set for a subquery */
+ rte->inh = false; /* must not be set for a subquery */
/* the permissions checks have now been moved down */
rte->requiredPerms = 0;
@@ -219,9 +222,9 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index,
* Note that we can't push the user-defined quals down since they
* may included untrusted functions and that means that we will
* end up locking all rows which pass the securityQuals, even if
- * those rows don't pass the user-defined quals. This is currently
- * documented behavior, but it'd be nice to come up with a better
- * solution some day.
+ * those rows don't pass the user-defined quals. This is
+ * currently documented behavior, but it'd be nice to come up with
+ * a better solution some day.
*/
rc = get_plan_rowmark(root->rowMarks, rt_index);
if (rc != NULL)
@@ -277,6 +280,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index,
break;
case RTE_SUBQUERY:
+
/*
* Build a new subquery that includes all the same columns as the
* original subquery.
@@ -288,8 +292,8 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index,
foreach(cell, rte->subquery->targetList)
{
- TargetEntry *tle;
- Var *var;
+ TargetEntry *tle;
+ Var *var;
tle = (TargetEntry *) lfirst(cell);
var = makeVarFromTargetEntry(1, tle);
@@ -333,7 +337,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index,
* variable that needs to be exposed by the security barrier subquery RTE.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. The given expression tree should have been copied
+ * nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
static void
@@ -355,7 +359,7 @@ security_barrier_replace_vars(Node *node,
static bool
security_barrier_replace_vars_walker(Node *node,
- security_barrier_replace_vars_context *context)
+ security_barrier_replace_vars_context *context)
{
if (node == NULL)
return false;
@@ -405,7 +409,7 @@ security_barrier_replace_vars_walker(Node *node,
Form_pg_attribute att_tup;
att_tup = SystemAttributeDefinition(var->varattno,
- context->rel->rd_rel->relhasoids);
+ context->rel->rd_rel->relhasoids);
attname = NameStr(att_tup->attname);
}
else if (var->varattno == InvalidAttrNumber)
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index ee773b834e..4ab12e51df 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -4,7 +4,7 @@
* Routines to preprocess the parse tree target list
*
* For INSERT and UPDATE queries, the targetlist must contain an entry for
- * each attribute of the target relation in the correct order. For all query
+ * each attribute of the target relation in the correct order. For all query
* types, we may need to add junk tlist entries for Vars used in the RETURNING
* list and row ID information needed for SELECT FOR UPDATE locking and/or
* EvalPlanQual checking.
@@ -79,7 +79,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
/*
* Add necessary junk columns for rowmarked rels. These values are needed
* for locking of rels selected FOR UPDATE/SHARE, and to do EvalPlanQual
- * rechecking. See comments for PlanRowMark in plannodes.h.
+ * rechecking. See comments for PlanRowMark in plannodes.h.
*/
foreach(lc, root->rowMarks)
{
@@ -144,7 +144,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
/*
* If the query has a RETURNING list, add resjunk entries for any Vars
* used in RETURNING that belong to other relations. We need to do this
- * to make these Vars available for the RETURNING calculation. Vars that
+ * to make these Vars available for the RETURNING calculation. Vars that
* belong to the result rel don't need to be added, because they will be
* made to refer to the actual heap tuple.
*/
@@ -252,9 +252,9 @@ expand_targetlist(List *tlist, int command_type,
* When generating a NULL constant for a dropped column, we label
* it INT4 (any other guaranteed-to-exist datatype would do as
* well). We can't label it with the dropped column's datatype
- * since that might not exist anymore. It does not really matter
+ * since that might not exist anymore. It does not really matter
* what we claim the type is, since NULL is NULL --- its
- * representation is datatype-independent. This could perhaps
+ * representation is datatype-independent. This could perhaps
* confuse code comparing the finished plan to the target
* relation, however.
*/
@@ -336,7 +336,7 @@ expand_targetlist(List *tlist, int command_type,
/*
* The remaining tlist entries should be resjunk; append them all to the
* end of the new tlist, making sure they have resnos higher than the last
- * real attribute. (Note: although the rewriter already did such
+ * real attribute. (Note: although the rewriter already did such
* renumbering, we have to do it again here in case we are doing an UPDATE
* in a table with dropped columns, or an inheritance child table with
* extra columns.)
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index cdf541d34d..0410fddc54 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -6,14 +6,14 @@
*
* There are two code paths in the planner for set-operation queries.
* If a subquery consists entirely of simple UNION ALL operations, it
- * is converted into an "append relation". Otherwise, it is handled
+ * is converted into an "append relation". Otherwise, it is handled
* by the general code in this module (plan_set_operations and its
* subroutines). There is some support code here for the append-relation
* case, but most of the heavy lifting for that is done elsewhere,
* notably in prepjointree.c and allpaths.c.
*
* There is also some code here to support planning of queries that use
- * inheritance (SELECT FROM foo*). Inheritance trees are converted into
+ * inheritance (SELECT FROM foo*). Inheritance trees are converted into
* append relations, and thenceforth share code with the UNION ALL case.
*
*
@@ -577,7 +577,7 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
*
* The tlist for an Append plan isn't important as far as the Append is
* concerned, but we must make it look real anyway for the benefit of the
- * next plan level up. In fact, it has to be real enough that the flag
+ * next plan level up. In fact, it has to be real enough that the flag
* column is shown as a variable not a constant, else setrefs.c will get
* confused.
*/
@@ -970,7 +970,7 @@ generate_setop_tlist(List *colTypes, List *colCollations,
* Ensure the tlist entry's exposed collation matches the set-op. This
* is necessary because plan_set_operations() reports the result
* ordering as a list of SortGroupClauses, which don't carry collation
- * themselves but just refer to tlist entries. If we don't show the
+ * themselves but just refer to tlist entries. If we don't show the
* right collation then planner.c might do the wrong thing in
* higher-level queries.
*
@@ -1184,7 +1184,7 @@ generate_setop_grouplist(SetOperationStmt *op, List *targetlist)
/*
* expand_inherited_tables
* Expand each rangetable entry that represents an inheritance set
- * into an "append relation". At the conclusion of this process,
+ * into an "append relation". At the conclusion of this process,
* the "inh" flag is set in all and only those RTEs that are append
* relation parents.
*/
@@ -1216,7 +1216,7 @@ expand_inherited_tables(PlannerInfo *root)
* Check whether a rangetable entry represents an inheritance set.
* If so, add entries for all the child tables to the query's
* rangetable, and build AppendRelInfo nodes for all the child tables
- * and add them to root->append_rel_list. If not, clear the entry's
+ * and add them to root->append_rel_list. If not, clear the entry's
* "inh" flag to prevent later code from looking for AppendRelInfos.
*
* Note that the original RTE is considered to represent the whole
@@ -1527,7 +1527,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation,
* parent rel's attribute numbering to the child's.
*
* The only surprise here is that we don't translate a parent whole-row
- * reference into a child whole-row reference. That would mean requiring
+ * reference into a child whole-row reference. That would mean requiring
* permissions on all child columns, which is overly strict, since the
* query is really only going to reference the inherited columns. Instead
* we set the per-column bits for all inherited columns.
@@ -1708,6 +1708,7 @@ adjust_appendrel_attrs_mutator(Node *node,
foreach(lc, fields)
{
Var *field = (Var *) lfirst(lc);
+
field->varlevelsup += context->sublevels_up;
}
rowexpr = makeNode(RowExpr);
@@ -1887,7 +1888,7 @@ adjust_relid_set(Relids relids, Index oldrelid, Index newrelid)
*
* The expressions have already been fixed, but we have to make sure that
* the target resnos match the child table (they may not, in the case of
- * a column that was added after-the-fact by ALTER TABLE). In some cases
+ * a column that was added after-the-fact by ALTER TABLE). In some cases
* this can force us to re-order the tlist to preserve resno ordering.
* (We do all this work in special cases so that preptlist.c is fast for
* the typical case.)
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 3f307e6464..97dacaaac1 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -540,7 +540,7 @@ count_agg_clauses_walker(Node *node, count_agg_clauses_context *context)
/*
* If the transition type is pass-by-value then it doesn't add
- * anything to the required size of the hashtable. If it is
+ * anything to the required size of the hashtable. If it is
* pass-by-reference then we have to add the estimated size of the
* value itself, plus palloc overhead.
*/
@@ -835,7 +835,7 @@ contain_subplans_walker(Node *node, void *context)
* Recursively search for mutable functions within a clause.
*
* Returns true if any mutable function (or operator implemented by a
- * mutable function) is found. This test is needed so that we don't
+ * mutable function) is found. This test is needed so that we don't
* mistakenly think that something like "WHERE random() < 0.5" can be treated
* as a constant qualification.
*
@@ -962,7 +962,7 @@ contain_mutable_functions_walker(Node *node, void *context)
* invalid conversions of volatile expressions into indexscan quals.
*
* We will recursively look into Query nodes (i.e., SubLink sub-selects)
- * but not into SubPlans. This is a bit odd, but intentional. If we are
+ * but not into SubPlans. This is a bit odd, but intentional. If we are
* looking at a SubLink, we are probably deciding whether a query tree
* transformation is safe, and a contained sub-select should affect that;
* for example, duplicating a sub-select containing a volatile function
@@ -1207,7 +1207,7 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context)
* The idea here is that the caller has verified that the expression contains
* one or more Var or Param nodes (as appropriate for the caller's need), and
* now wishes to prove that the expression result will be NULL if any of these
- * inputs is NULL. If we return false, then the proof succeeded.
+ * inputs is NULL. If we return false, then the proof succeeded.
*/
bool
contain_nonstrict_functions(Node *clause)
@@ -1326,7 +1326,7 @@ contain_nonstrict_functions_walker(Node *node, void *context)
* Recursively search for leaky functions within a clause.
*
* Returns true if any function call with side-effect may be present in the
- * clause. Qualifiers from outside the a security_barrier view should not
+ * clause. Qualifiers from outside the a security_barrier view should not
* be pushed down into the view, lest the contents of tuples intended to be
* filtered out be revealed via side effects.
*/
@@ -1465,7 +1465,7 @@ contain_leaky_functions_walker(Node *node, void *context)
*
* Returns the set of all Relids that are referenced in the clause in such
* a way that the clause cannot possibly return TRUE if any of these Relids
- * is an all-NULL row. (It is OK to err on the side of conservatism; hence
+ * is an all-NULL row. (It is OK to err on the side of conservatism; hence
* the analysis here is simplistic.)
*
* The semantics here are subtly different from contain_nonstrict_functions:
@@ -1571,7 +1571,7 @@ find_nonnullable_rels_walker(Node *node, bool top_level)
* could be FALSE (hence not NULL). However, if *all* the
* arms produce NULL then the result is NULL, so we can take
* the intersection of the sets of nonnullable rels, just as
- * for OR. Fall through to share code.
+ * for OR. Fall through to share code.
*/
/* FALL THRU */
case OR_EXPR:
@@ -1779,7 +1779,7 @@ find_nonnullable_vars_walker(Node *node, bool top_level)
* could be FALSE (hence not NULL). However, if *all* the
* arms produce NULL then the result is NULL, so we can take
* the intersection of the sets of nonnullable vars, just as
- * for OR. Fall through to share code.
+ * for OR. Fall through to share code.
*/
/* FALL THRU */
case OR_EXPR:
@@ -2049,7 +2049,7 @@ is_strict_saop(ScalarArrayOpExpr *expr, bool falseOK)
* variables of the current query level and no uses of volatile functions.
* Such an expr is not necessarily a true constant: it can still contain
* Params and outer-level Vars, not to mention functions whose results
- * may vary from one statement to the next. However, the expr's value
+ * may vary from one statement to the next. However, the expr's value
* will be constant over any one scan of the current query, so it can be
* used as, eg, an indexscan key.
*
@@ -2255,7 +2255,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum,
* expression tree, for example "2 + 2" => "4". More interestingly,
* we can reduce certain boolean expressions even when they contain
* non-constant subexpressions: "x OR true" => "true" no matter what
- * the subexpression x is. (XXX We assume that no such subexpression
+ * the subexpression x is. (XXX We assume that no such subexpression
* will have important side-effects, which is not necessarily a good
* assumption in the presence of user-defined functions; do we need a
* pg_proc flag that prevents discarding the execution of a function?)
@@ -2268,7 +2268,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum,
*
* Whenever a function is eliminated from the expression by means of
* constant-expression evaluation or inlining, we add the function to
- * root->glob->invalItems. This ensures the plan is known to depend on
+ * root->glob->invalItems. This ensures the plan is known to depend on
* such functions, even though they aren't referenced anymore.
*
* We assume that the tree has already been type-checked and contains
@@ -2451,7 +2451,7 @@ eval_const_expressions_mutator(Node *node,
/*
* Code for op/func reduction is pretty bulky, so split it out
- * as a separate function. Note: exprTypmod normally returns
+ * as a separate function. Note: exprTypmod normally returns
* -1 for a FuncExpr, but not when the node is recognizably a
* length coercion; we want to preserve the typmod in the
* eventual Const if so.
@@ -2495,7 +2495,7 @@ eval_const_expressions_mutator(Node *node,
OpExpr *newexpr;
/*
- * Need to get OID of underlying function. Okay to scribble
+ * Need to get OID of underlying function. Okay to scribble
* on input to this extent.
*/
set_opfuncid(expr);
@@ -2598,7 +2598,7 @@ eval_const_expressions_mutator(Node *node,
/* (NOT okay to try to inline it, though!) */
/*
- * Need to get OID of underlying function. Okay to
+ * Need to get OID of underlying function. Okay to
* scribble on input to this extent.
*/
set_opfuncid((OpExpr *) expr); /* rely on struct
@@ -2963,13 +2963,13 @@ eval_const_expressions_mutator(Node *node,
* TRUE: drop all remaining alternatives
* If the first non-FALSE alternative is a constant TRUE,
* we can simplify the entire CASE to that alternative's
- * expression. If there are no non-FALSE alternatives,
+ * expression. If there are no non-FALSE alternatives,
* we simplify the entire CASE to the default result (ELSE).
*
* If we have a simple-form CASE with constant test
* expression, we substitute the constant value for contained
* CaseTestExpr placeholder nodes, so that we have the
- * opportunity to reduce constant test conditions. For
+ * opportunity to reduce constant test conditions. For
* example this allows
* CASE 0 WHEN 0 THEN 1 ELSE 1/0 END
* to reduce to 1 rather than drawing a divide-by-0 error.
@@ -3191,7 +3191,7 @@ eval_const_expressions_mutator(Node *node,
{
/*
* We can optimize field selection from a whole-row Var into a
- * simple Var. (This case won't be generated directly by the
+ * simple Var. (This case won't be generated directly by the
* parser, because ParseComplexProjection short-circuits it.
* But it can arise while simplifying functions.) Also, we
* can optimize field selection from a RowExpr construct.
@@ -3449,7 +3449,7 @@ simplify_or_arguments(List *args,
/*
* Since the parser considers OR to be a binary operator, long OR lists
* become deeply nested expressions. We must flatten these into long
- * argument lists of a single OR operator. To avoid blowing out the stack
+ * argument lists of a single OR operator. To avoid blowing out the stack
* with recursion of eval_const_expressions, we resort to some tenseness
* here: we keep a list of not-yet-processed inputs, and handle flattening
* of nested ORs by prepending to the to-do list instead of recursing.
@@ -3497,7 +3497,7 @@ simplify_or_arguments(List *args,
}
/*
- * OK, we have a const-simplified non-OR argument. Process it per
+ * OK, we have a const-simplified non-OR argument. Process it per
* comments above.
*/
if (IsA(arg, Const))
@@ -3732,7 +3732,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
* deliver a constant result, use a transform function to generate a
* substitute node tree, or expand in-line the body of the function
* definition (which only works for simple SQL-language functions, but
- * that is a common case). Each case needs access to the function's
+ * that is a common case). Each case needs access to the function's
* pg_proc tuple, so fetch it just once.
*
* Note: the allow_non_const flag suppresses both the second and third
@@ -3770,7 +3770,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
if (!newexpr && allow_non_const && OidIsValid(func_form->protransform))
{
/*
- * Build a dummy FuncExpr node containing the simplified arg list. We
+ * Build a dummy FuncExpr node containing the simplified arg list. We
* use this approach to present a uniform interface to the transform
* function regardless of how the function is actually being invoked.
*/
@@ -3978,7 +3978,7 @@ fetch_function_defaults(HeapTuple func_tuple)
*
* It is possible for some of the defaulted arguments to be polymorphic;
* therefore we can't assume that the default expressions have the correct
- * data types already. We have to re-resolve polymorphics and do coercion
+ * data types already. We have to re-resolve polymorphics and do coercion
* just like the parser did.
*
* This should be a no-op if there are no polymorphic arguments,
@@ -4141,7 +4141,7 @@ evaluate_function(Oid funcid, Oid result_type, int32 result_typmod,
* do not re-expand them. Also, if a parameter is used more than once
* in the SQL-function body, we require it not to contain any volatile
* functions (volatiles might deliver inconsistent answers) nor to be
- * unreasonably expensive to evaluate. The expensiveness check not only
+ * unreasonably expensive to evaluate. The expensiveness check not only
* prevents us from doing multiple evaluations of an expensive parameter
* at runtime, but is a safety value to limit growth of an expression due
* to repeated inlining.
@@ -4184,7 +4184,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* Forget it if the function is not SQL-language or has other showstopper
- * properties. (The nargs check is just paranoia.)
+ * properties. (The nargs check is just paranoia.)
*/
if (funcform->prolang != SQLlanguageId ||
funcform->prosecdef ||
@@ -4262,7 +4262,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* We just do parsing and parse analysis, not rewriting, because rewriting
* will not affect table-free-SELECT-only queries, which is all that we
- * care about. Also, we can punt as soon as we detect more than one
+ * care about. Also, we can punt as soon as we detect more than one
* command in the function body.
*/
raw_parsetree_list = pg_parse_query(src);
@@ -4304,7 +4304,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* Make sure the function (still) returns what it's declared to. This
* will raise an error if wrong, but that's okay since the function would
- * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
* a RelabelType if needed to make the tlist expression match the declared
* type of the function.
*
@@ -4349,7 +4349,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* We may be able to do it; there are still checks on parameter usage to
* make, but those are most easily done in combination with the actual
- * substitution of the inputs. So start building expression with inputs
+ * substitution of the inputs. So start building expression with inputs
* substituted.
*/
usecounts = (int *) palloc0(funcform->pronargs * sizeof(int));
@@ -4549,7 +4549,7 @@ evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod,
fix_opfuncids((Node *) expr);
/*
- * Prepare expr for execution. (Note: we can't use ExecPrepareExpr
+ * Prepare expr for execution. (Note: we can't use ExecPrepareExpr
* because it'd result in recursively invoking eval_const_expressions.)
*/
exprstate = ExecInitExpr(expr, NULL);
@@ -4671,7 +4671,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
* Refuse to inline if the arguments contain any volatile functions or
* sub-selects. Volatile functions are rejected because inlining may
* result in the arguments being evaluated multiple times, risking a
- * change in behavior. Sub-selects are rejected partly for implementation
+ * change in behavior. Sub-selects are rejected partly for implementation
* reasons (pushing them down another level might change their behavior)
* and partly because they're likely to be expensive and so multiple
* evaluation would be bad.
@@ -4698,7 +4698,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Forget it if the function is not SQL-language or has other showstopper
- * properties. In particular it mustn't be declared STRICT, since we
+ * properties. In particular it mustn't be declared STRICT, since we
* couldn't enforce that. It also mustn't be VOLATILE, because that is
* supposed to cause it to be executed with its own snapshot, rather than
* sharing the snapshot of the calling query. (Rechecking proretset is
@@ -4728,9 +4728,9 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* When we call eval_const_expressions below, it might try to add items to
- * root->glob->invalItems. Since it is running in the temp context, those
+ * root->glob->invalItems. Since it is running in the temp context, those
* items will be in that context, and will need to be copied out if we're
- * successful. Temporarily reset the list so that we can keep those items
+ * successful. Temporarily reset the list so that we can keep those items
* separate from the pre-existing list contents.
*/
saveInvalItems = root->glob->invalItems;
@@ -4760,7 +4760,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Run eval_const_expressions on the function call. This is necessary to
* ensure that named-argument notation is converted to positional notation
- * and any default arguments are inserted. It's a bit of overkill for the
+ * and any default arguments are inserted. It's a bit of overkill for the
* arguments, since they'll get processed again later, but no harm will be
* done.
*/
@@ -4812,7 +4812,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Make sure the function (still) returns what it's declared to. This
* will raise an error if wrong, but that's okay since the function would
- * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
* RelabelType(s) and/or NULL columns if needed to make the tlist
* expression(s) match the declared type of the function.
*
diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c
index a6421580f9..0418946d71 100644
--- a/src/backend/optimizer/util/joininfo.c
+++ b/src/backend/optimizer/util/joininfo.c
@@ -83,7 +83,7 @@ have_relevant_joinclause(PlannerInfo *root,
* Add 'restrictinfo' to the joininfo list of each relation it requires.
*
* Note that the same copy of the restrictinfo node is linked to by all the
- * lists it is in. This allows us to exploit caching of information about
+ * lists it is in. This allows us to exploit caching of information about
* the restriction clause (but we must be careful that the information does
* not depend on context).
*
diff --git a/src/backend/optimizer/util/orclauses.c b/src/backend/optimizer/util/orclauses.c
index e9fd47bffb..9e954d0d35 100644
--- a/src/backend/optimizer/util/orclauses.c
+++ b/src/backend/optimizer/util/orclauses.c
@@ -50,7 +50,7 @@ static void consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel,
*
* The added quals are partially redundant with the original OR, and therefore
* would cause the size of the joinrel to be underestimated when it is finally
- * formed. (This would be true of a full transformation to CNF as well; the
+ * formed. (This would be true of a full transformation to CNF as well; the
* fault is not really in the transformation, but in clauselist_selectivity's
* inability to recognize redundant conditions.) We can compensate for this
* redundancy by changing the cached selectivity of the original OR clause,
@@ -60,10 +60,10 @@ static void consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel,
* and on the fact that the same RestrictInfo node will appear in every
* joininfo list that might be used when the joinrel is formed.
* And it doesn't work in cases where the size estimation is nonlinear
- * (i.e., outer and IN joins). But it beats not doing anything.
+ * (i.e., outer and IN joins). But it beats not doing anything.
*
* We examine each base relation to see if join clauses associated with it
- * contain extractable restriction conditions. If so, add those conditions
+ * contain extractable restriction conditions. If so, add those conditions
* to the rel's baserestrictinfo and update the cached selectivities of the
* join clauses. Note that the same join clause will be examined afresh
* from the point of view of each baserel that participates in it, so its
@@ -129,7 +129,7 @@ static bool
is_safe_restriction_clause_for(RestrictInfo *rinfo, RelOptInfo *rel)
{
/*
- * We want clauses that mention the rel, and only the rel. So in
+ * We want clauses that mention the rel, and only the rel. So in
* particular pseudoconstant clauses can be rejected quickly. Then check
* the clause's Var membership.
*/
@@ -168,7 +168,7 @@ extract_or_clause(RestrictInfo *or_rinfo, RelOptInfo *rel)
* in those nodes to make is_safe_restriction_clause_for()'s checks
* cheaper. We'll strip those nodes from the returned tree, though,
* meaning that fresh ones will be built if the clause is accepted as a
- * restriction clause. This might seem wasteful --- couldn't we re-use
+ * restriction clause. This might seem wasteful --- couldn't we re-use
* the existing RestrictInfos? But that'd require assuming that
* selectivity and other cached data is computed exactly the same way for
* a restriction clause as for a join clause, which seems undesirable.
@@ -193,7 +193,7 @@ extract_or_clause(RestrictInfo *or_rinfo, RelOptInfo *rel)
if (restriction_is_or_clause(rinfo))
{
/*
- * Recurse to deal with nested OR. Note we *must* recurse
+ * Recurse to deal with nested OR. Note we *must* recurse
* here, this isn't just overly-tense optimization: we
* have to descend far enough to find and strip all
* RestrictInfos in the expression.
@@ -314,7 +314,7 @@ consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel,
SpecialJoinInfo sjinfo;
/*
- * Make up a SpecialJoinInfo for JOIN_INNER semantics. (Compare
+ * Make up a SpecialJoinInfo for JOIN_INNER semantics. (Compare
* approx_tuple_count() in costsize.c.)
*/
sjinfo.type = T_SpecialJoinInfo;
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index b79af7af4e..4e05dcd246 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -127,11 +127,11 @@ compare_fractional_path_costs(Path *path1, Path *path2,
*
* The fuzz_factor argument must be 1.0 plus delta, where delta is the
* fraction of the smaller cost that is considered to be a significant
- * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
+ * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
* be 1% of the smaller cost.
*
* The two paths are said to have "equal" costs if both startup and total
- * costs are fuzzily the same. Path1 is said to be better than path2 if
+ * costs are fuzzily the same. Path1 is said to be better than path2 if
* it has fuzzily better startup cost and fuzzily no worse total cost,
* or if it has fuzzily better total cost and fuzzily no worse startup cost.
* Path2 is better than path1 if the reverse holds. Finally, if one path
@@ -207,12 +207,12 @@ compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor,
*
* cheapest_total_path is normally the cheapest-total-cost unparameterized
* path; but if there are no unparameterized paths, we assign it to be the
- * best (cheapest least-parameterized) parameterized path. However, only
+ * best (cheapest least-parameterized) parameterized path. However, only
* unparameterized paths are considered candidates for cheapest_startup_path,
* so that will be NULL if there are no unparameterized paths.
*
* The cheapest_parameterized_paths list collects all parameterized paths
- * that have survived the add_path() tournament for this relation. (Since
+ * that have survived the add_path() tournament for this relation. (Since
* add_path ignores pathkeys and startup cost for a parameterized path,
* these will be paths that have best total cost or best row count for their
* parameterization.) cheapest_parameterized_paths always includes the
@@ -431,7 +431,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
p1_next = lnext(p1);
/*
- * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this
+ * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this
* percentage need to be user-configurable?)
*/
costcmp = compare_path_costs_fuzzily(new_path, old_path, 1.01,
@@ -607,7 +607,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
* and have lower bounds for its costs.
*
* Note that we do not know the path's rowcount, since getting an estimate for
- * that is too expensive to do before prechecking. We assume here that paths
+ * that is too expensive to do before prechecking. We assume here that paths
* of a superset parameterization will generate fewer rows; if that holds,
* then paths with different parameterizations cannot dominate each other
* and so we can simply ignore existing paths of another parameterization.
@@ -907,7 +907,7 @@ create_append_path(RelOptInfo *rel, List *subpaths, Relids required_outer)
* Compute rows and costs as sums of subplan rows and costs. We charge
* nothing extra for the Append itself, which perhaps is too optimistic,
* but since it doesn't do any selection or projection, it is a pretty
- * cheap node. If you change this, see also make_append().
+ * cheap node. If you change this, see also make_append().
*/
pathnode->path.rows = 0;
pathnode->path.startup_cost = 0;
@@ -1456,7 +1456,7 @@ translate_sub_tlist(List *tlist, int relid)
*
* colnos is an integer list of output column numbers (resno's). We are
* interested in whether rows consisting of just these columns are certain
- * to be distinct. "Distinctness" is defined according to whether the
+ * to be distinct. "Distinctness" is defined according to whether the
* corresponding upper-level equality operators listed in opids would think
* the values are distinct. (Note: the opids entries could be cross-type
* operators, and thus not exactly the equality operators that the subquery
@@ -1577,7 +1577,7 @@ query_is_distinct_for(Query *query, List *colnos, List *opids)
* distinct_col_search - subroutine for query_is_distinct_for
*
* If colno is in colnos, return the corresponding element of opids,
- * else return InvalidOid. (We expect colnos does not contain duplicates,
+ * else return InvalidOid. (We expect colnos does not contain duplicates,
* so the result is well-defined.)
*/
static Oid
@@ -1977,10 +1977,10 @@ create_hashjoin_path(PlannerInfo *root,
/*
* A hashjoin never has pathkeys, since its output ordering is
- * unpredictable due to possible batching. XXX If the inner relation is
+ * unpredictable due to possible batching. XXX If the inner relation is
* small enough, we could instruct the executor that it must not batch,
* and then we could assume that the output inherits the outer relation's
- * ordering, which might save a sort step. However there is considerable
+ * ordering, which might save a sort step. However there is considerable
* downside if our estimate of the inner relation size is badly off. For
* the moment we don't risk it. (Note also that if we wanted to take this
* seriously, joinpath.c would have to consider many more paths for the
@@ -2007,7 +2007,7 @@ create_hashjoin_path(PlannerInfo *root,
* same parameterization level, ensuring that they all enforce the same set
* of join quals (and thus that that parameterization can be attributed to
* an append path built from such paths). Currently, only a few path types
- * are supported here, though more could be added at need. We return NULL
+ * are supported here, though more could be added at need. We return NULL
* if we can't reparameterize the given path.
*
* Note: we intentionally do not pass created paths to add_path(); it would
@@ -2039,7 +2039,7 @@ reparameterize_path(PlannerInfo *root, Path *path,
/*
* We can't use create_index_path directly, and would not want
* to because it would re-compute the indexqual conditions
- * which is wasted effort. Instead we hack things a bit:
+ * which is wasted effort. Instead we hack things a bit:
* flat-copy the path node, revise its param_info, and redo
* the cost estimate.
*/
diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c
index 1172d24b9a..8d7c4feca4 100644
--- a/src/backend/optimizer/util/placeholder.c
+++ b/src/backend/optimizer/util/placeholder.c
@@ -60,7 +60,7 @@ make_placeholder_expr(PlannerInfo *root, Expr *expr, Relids phrels)
* We build PlaceHolderInfos only for PHVs that are still present in the
* simplified query passed to query_planner().
*
- * Note: this should only be called after query_planner() has started. Also,
+ * Note: this should only be called after query_planner() has started. Also,
* create_new_ph must not be TRUE after deconstruct_jointree begins, because
* make_outerjoininfo assumes that we already know about all placeholders.
*/
@@ -94,7 +94,7 @@ find_placeholder_info(PlannerInfo *root, PlaceHolderVar *phv,
/*
* Any referenced rels that are outside the PHV's syntactic scope are
* LATERAL references, which should be included in ph_lateral but not in
- * ph_eval_at. If no referenced rels are within the syntactic scope,
+ * ph_eval_at. If no referenced rels are within the syntactic scope,
* force evaluation at the syntactic location.
*/
rels_used = pull_varnos((Node *) phv->phexpr);
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 73ba2f60b2..b2becfa676 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -427,12 +427,12 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* minimum size estimate of 10 pages. The idea here is to avoid
* assuming a newly-created table is really small, even if it
* currently is, because that may not be true once some data gets
- * loaded into it. Once a vacuum or analyze cycle has been done
+ * loaded into it. Once a vacuum or analyze cycle has been done
* on it, it's more reasonable to believe the size is somewhat
* stable.
*
* (Note that this is only an issue if the plan gets cached and
- * used again after the table has been filled. What we're trying
+ * used again after the table has been filled. What we're trying
* to avoid is using a nestloop-type plan on a table that has
* grown substantially since the plan was made. Normally,
* autovacuum/autoanalyze will occur once enough inserts have
@@ -441,7 +441,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* such as temporary tables.)
*
* We approximate "never vacuumed" by "has relpages = 0", which
- * means this will also fire on genuinely empty relations. Not
+ * means this will also fire on genuinely empty relations. Not
* great, but fortunately that's a seldom-seen case in the real
* world, and it shouldn't degrade the quality of the plan too
* much anyway to err in this direction.
@@ -786,7 +786,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
return false;
/*
- * OK to fetch the constraint expressions. Include "col IS NOT NULL"
+ * OK to fetch the constraint expressions. Include "col IS NOT NULL"
* expressions for attnotnull columns, in case we can refute those.
*/
constraint_pred = get_relation_constraints(root, rte->relid, rel, true);
@@ -834,7 +834,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
* Exception: if there are any dropped columns, we punt and return NIL.
* Ideally we would like to handle the dropped-column case too. However this
* creates problems for ExecTypeFromTL, which may be asked to build a tupdesc
- * for a tlist that includes vars of no-longer-existent types. In theory we
+ * for a tlist that includes vars of no-longer-existent types. In theory we
* could dig out the required info from the pg_attribute entries of the
* relation, but that data is not readily available to ExecTypeFromTL.
* For now, we don't apply the physical-tlist optimization when there are
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index eadd2d5104..9d61a4d71c 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -133,7 +133,7 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list)
/*
* If either input is a single-element list, replace it with its lone
- * member; this avoids one useless level of AND-recursion. We only need
+ * member; this avoids one useless level of AND-recursion. We only need
* to worry about this at top level, since eval_const_expressions should
* have gotten rid of any trivial ANDs or ORs below that.
*/
@@ -191,7 +191,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
/*
* If either input is a single-element list, replace it with its lone
- * member; this avoids one useless level of AND-recursion. We only need
+ * member; this avoids one useless level of AND-recursion. We only need
* to worry about this at top level, since eval_const_expressions should
* have gotten rid of any trivial ANDs or ORs below that.
*/
@@ -225,7 +225,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
* OR-expr A => AND-expr B iff: A => each of B's components
* OR-expr A => OR-expr B iff: each of A's components => any of B's
*
- * An "atom" is anything other than an AND or OR node. Notice that we don't
+ * An "atom" is anything other than an AND or OR node. Notice that we don't
* have any special logic to handle NOT nodes; these should have been pushed
* down or eliminated where feasible by prepqual.c.
*
@@ -658,7 +658,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
* We cannot make the stronger conclusion that B is refuted if B
* implies A's arg; that would only prove that B is not-TRUE, not
* that it's not NULL either. Hence use equal() rather than
- * predicate_implied_by_recurse(). We could do the latter if we
+ * predicate_implied_by_recurse(). We could do the latter if we
* ever had a need for the weak form of refutation.
*/
not_arg = extract_strong_not_arg(clause);
@@ -820,7 +820,7 @@ predicate_classify(Node *clause, PredIterInfo info)
}
/*
- * PredIterInfo routines for iterating over regular Lists. The iteration
+ * PredIterInfo routines for iterating over regular Lists. The iteration
* state variable is the next ListCell to visit.
*/
static void
@@ -1014,13 +1014,13 @@ arrayexpr_cleanup_fn(PredIterInfo info)
* implies another:
*
* A simple and general way is to see if they are equal(); this works for any
- * kind of expression. (Actually, there is an implied assumption that the
+ * kind of expression. (Actually, there is an implied assumption that the
* functions in the expression are immutable, ie dependent only on their input
* arguments --- but this was checked for the predicate by the caller.)
*
* When the predicate is of the form "foo IS NOT NULL", we can conclude that
* the predicate is implied if the clause is a strict operator or function
- * that has "foo" as an input. In this case the clause must yield NULL when
+ * that has "foo" as an input. In this case the clause must yield NULL when
* "foo" is NULL, which we can take as equivalent to FALSE because we know
* we are within an AND/OR subtree of a WHERE clause. (Again, "foo" is
* already known immutable, so the clause will certainly always fail.)
@@ -1244,7 +1244,7 @@ list_member_strip(List *list, Expr *datum)
*
* The strategy numbers defined by btree indexes (see access/skey.h) are:
* (1) < (2) <= (3) = (4) >= (5) >
- * and in addition we use (6) to represent <>. <> is not a btree-indexable
+ * and in addition we use (6) to represent <>. <> is not a btree-indexable
* operator, but we assume here that if an equality operator of a btree
* opfamily has a negator operator, the negator behaves as <> for the opfamily.
* (This convention is also known to get_op_btree_interpretation().)
@@ -1328,7 +1328,7 @@ static const StrategyNumber BT_refute_table[6][6] = {
* if not able to prove it.
*
* What we look for here is binary boolean opclauses of the form
- * "foo op constant", where "foo" is the same in both clauses. The operators
+ * "foo op constant", where "foo" is the same in both clauses. The operators
* and constants can be different but the operators must be in the same btree
* operator family. We use the above operator implication tables to
* derive implications between nonidentical clauses. (Note: "foo" is known
@@ -1418,7 +1418,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
/*
* Check for matching subexpressions on the non-Const sides. We used to
* only allow a simple Var, but it's about as easy to allow any
- * expression. Remember we already know that the pred expression does not
+ * expression. Remember we already know that the pred expression does not
* contain any non-immutable functions, so identical expressions should
* yield identical results.
*/
@@ -1690,7 +1690,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
* Last check: test_op must be immutable.
*
* Note that we require only the test_op to be immutable, not the
- * original clause_op. (pred_op is assumed to have been checked
+ * original clause_op. (pred_op is assumed to have been checked
* immutable by the caller.) Essentially we are assuming that the
* opfamily is consistent even if it contains operators that are
* merely stable.
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index 8ae8f55121..c938c2700f 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -262,7 +262,7 @@ RelOptInfo *
find_join_rel(PlannerInfo *root, Relids relids)
{
/*
- * Switch to using hash lookup when list grows "too long". The threshold
+ * Switch to using hash lookup when list grows "too long". The threshold
* is arbitrary and is known only here.
*/
if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
@@ -448,7 +448,7 @@ build_join_rel(PlannerInfo *root,
/*
* Also, if dynamic-programming join search is active, add the new joinrel
- * to the appropriate sublist. Note: you might think the Assert on number
+ * to the appropriate sublist. Note: you might think the Assert on number
* of members should be for equality, but some of the level 1 rels might
* have been joinrels already, so we can only assert <=.
*/
@@ -529,7 +529,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
* the join list need only be computed once for any join RelOptInfo.
* The join list is fully determined by the set of rels making up the
* joinrel, so we should get the same results (up to ordering) from any
- * candidate pair of sub-relations. But the restriction list is whatever
+ * candidate pair of sub-relations. But the restriction list is whatever
* is not handled in the sub-relations, so it depends on which
* sub-relations are considered.
*
@@ -538,7 +538,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
* we put it into the joininfo list for the joinrel. Otherwise,
* the clause is now a restrict clause for the joined relation, and we
* return it to the caller of build_joinrel_restrictlist() to be stored in
- * join paths made from this pair of sub-relations. (It will not need to
+ * join paths made from this pair of sub-relations. (It will not need to
* be considered further up the join tree.)
*
* In many case we will find the same RestrictInfos in both input
@@ -557,7 +557,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
*
* NB: Formerly, we made deep(!) copies of each input RestrictInfo to pass
* up to the join relation. I believe this is no longer necessary, because
- * RestrictInfo nodes are no longer context-dependent. Instead, just include
+ * RestrictInfo nodes are no longer context-dependent. Instead, just include
* the original nodes in the lists made for the join relation.
*/
static List *
@@ -577,7 +577,7 @@ build_joinrel_restrictlist(PlannerInfo *root,
result = subbuild_joinrel_restrictlist(joinrel, inner_rel->joininfo, result);
/*
- * Add on any clauses derived from EquivalenceClasses. These cannot be
+ * Add on any clauses derived from EquivalenceClasses. These cannot be
* redundant with the clauses in the joininfo lists, so don't bother
* checking.
*/
@@ -945,7 +945,7 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel,
*restrict_clauses);
/*
- * And now we can build the ParamPathInfo. No point in saving the
+ * And now we can build the ParamPathInfo. No point in saving the
* input-pair-dependent clause list, though.
*
* Note: in GEQO mode, we'll be called in a temporary memory context, but
@@ -965,8 +965,8 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel,
* Get the ParamPathInfo for a parameterized path for an append relation.
*
* For an append relation, the rowcount estimate will just be the sum of
- * the estimates for its children. However, we still need a ParamPathInfo
- * to flag the fact that the path requires parameters. So this just creates
+ * the estimates for its children. However, we still need a ParamPathInfo
+ * to flag the fact that the path requires parameters. So this just creates
* a suitable struct with zero ppi_rows (and no ppi_clauses either, since
* the Append node isn't responsible for checking quals).
*/
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index 62de590523..e861ce6657 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -210,7 +210,7 @@ make_restrictinfo_internal(Expr *clause,
/*
* Fill in all the cacheable fields with "not yet set" markers. None of
- * these will be computed until/unless needed. Note in particular that we
+ * these will be computed until/unless needed. Note in particular that we
* don't mark a binary opclause as mergejoinable or hashjoinable here;
* that happens only if it appears in the right context (top level of a
* joinclause list).
diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c
index 5e26f3b57e..f1f1be1b7f 100644
--- a/src/backend/optimizer/util/tlist.c
+++ b/src/backend/optimizer/util/tlist.c
@@ -26,7 +26,7 @@
/*
* tlist_member
* Finds the (first) member of the given tlist whose expression is
- * equal() to the given expression. Result is NULL if no such member.
+ * equal() to the given expression. Result is NULL if no such member.
*/
TargetEntry *
tlist_member(Node *node, List *targetlist)
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index d629fcd90d..d4f46b8d46 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -165,7 +165,7 @@ pull_varnos_walker(Node *node, pull_varnos_context *context)
* lower than that if it references only a subset of the rels in its
* syntactic scope. It might also contain lateral references, but we
* should ignore such references when computing the set of varnos in
- * an expression tree. Also, if the PHV contains no variables within
+ * an expression tree. Also, if the PHV contains no variables within
* its syntactic scope, it will be forced to be evaluated exactly at
* the syntactic scope, so take that as the relid set.
*/
@@ -364,7 +364,7 @@ contain_var_clause_walker(Node *node, void *context)
*
* Returns true if any such Var found.
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*/
bool
contain_vars_of_level(Node *node, int levelsup)
@@ -424,10 +424,10 @@ contain_vars_of_level_walker(Node *node, int *sublevels_up)
* Find the parse location of any Var of the specified query level.
*
* Returns -1 if no such Var is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*
* Note: it might seem appropriate to merge this functionality into
* contain_vars_of_level, but that would complicate that function's API.
@@ -514,7 +514,7 @@ locate_var_of_level_walker(Node *node,
* Upper-level vars (with varlevelsup > 0) should not be seen here,
* likewise for upper-level Aggrefs and PlaceHolderVars.
*
- * Returns list of nodes found. Note the nodes themselves are not
+ * Returns list of nodes found. Note the nodes themselves are not
* copied, only referenced.
*
* Does not examine subqueries, therefore must only be used after reduction
@@ -591,7 +591,7 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context)
* flatten_join_alias_vars
* Replace Vars that reference JOIN outputs with references to the original
* relation variables instead. This allows quals involving such vars to be
- * pushed down. Whole-row Vars that reference JOIN relations are expanded
+ * pushed down. Whole-row Vars that reference JOIN relations are expanded
* into RowExpr constructs that name the individual output Vars. This
* is necessary since we will not scan the JOIN as a base relation, which
* is the only way that the executor can directly handle whole-row Vars.
@@ -603,7 +603,7 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context)
* entries might now be arbitrary expressions, not just Vars. This affects
* this function in one important way: we might find ourselves inserting
* SubLink expressions into subqueries, and we must make sure that their
- * Query.hasSubLinks fields get set to TRUE if so. If there are any
+ * Query.hasSubLinks fields get set to TRUE if so. If there are any
* SubLinks in the join alias lists, the outer Query should already have
* hasSubLinks = TRUE, so this is only relevant to un-flattened subqueries.
*
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 7225bb62ab..fb6c44c11c 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -79,7 +79,7 @@ static void transformLockingClause(ParseState *pstate, Query *qry,
* Optionally, information about $n parameter types can be supplied.
* References to $n indexes not defined by paramTypes[] are disallowed.
*
- * The result is a Query node. Optimizable statements require considerable
+ * The result is a Query node. Optimizable statements require considerable
* transformation, while utility-type statements are simply hung off
* a dummy CMD_UTILITY Query node.
*/
@@ -457,7 +457,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
/*
* If a non-nil rangetable/namespace was passed in, and we are doing
* INSERT/SELECT, arrange to pass the rangetable/namespace down to the
- * SELECT. This can only happen if we are inside a CREATE RULE, and in
+ * SELECT. This can only happen if we are inside a CREATE RULE, and in
* that case we want the rule's OLD and NEW rtable entries to appear as
* part of the SELECT's rtable, not as outer references for it. (Kluge!)
* The SELECT's joinlist is not affected however. We must do this before
@@ -642,7 +642,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* We must assign collations now because assign_query_collations
* doesn't process rangetable entries. We just assign all the
* collations independently in each row, and don't worry about
- * whether they are consistent vertically. The outer INSERT query
+ * whether they are consistent vertically. The outer INSERT query
* isn't going to care about the collations of the VALUES columns,
* so it's not worth the effort to identify a common collation for
* each one here. (But note this does have one user-visible
@@ -691,7 +691,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
else
{
/*
- * Process INSERT ... VALUES with a single VALUES sublist. We treat
+ * Process INSERT ... VALUES with a single VALUES sublist. We treat
* this case separately for efficiency. The sublist is just computed
* directly as the Query's targetlist, with no VALUES RTE. So it
* works just like a SELECT without any FROM.
@@ -789,7 +789,7 @@ transformInsertRow(ParseState *pstate, List *exprlist,
* Check length of expr list. It must not have more expressions than
* there are target columns. We allow fewer, but only if no explicit
* columns list was given (the remaining columns are implicitly
- * defaulted). Note we must check this *after* transformation because
+ * defaulted). Note we must check this *after* transformation because
* that could expand '*' into multiple items.
*/
if (list_length(exprlist) > list_length(icolumns))
@@ -859,7 +859,7 @@ transformInsertRow(ParseState *pstate, List *exprlist,
* return -1 if expression isn't a RowExpr or a Var referencing one.
*
* This is currently used only for hint purposes, so we aren't terribly
- * tense about recognizing all possible cases. The Var case is interesting
+ * tense about recognizing all possible cases. The Var case is interesting
* because that's what we'll get in the INSERT ... SELECT (...) case.
*/
static int
@@ -1191,7 +1191,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
/*
* Ordinarily there can't be any current-level Vars in the expression
* lists, because the namespace was empty ... but if we're inside CREATE
- * RULE, then NEW/OLD references might appear. In that case we have to
+ * RULE, then NEW/OLD references might appear. In that case we have to
* mark the VALUES RTE as LATERAL.
*/
if (pstate->p_rtable != NIL &&
@@ -1234,11 +1234,11 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
if (stmt->lockingClause)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s cannot be applied to VALUES",
LCS_asString(((LockingClause *)
- linitial(stmt->lockingClause))->strength))));
+ linitial(stmt->lockingClause))->strength))));
qry->rtable = pstate->p_rtable;
qry->jointree = makeFromExpr(pstate->p_joinlist, NULL);
@@ -1329,8 +1329,8 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
if (lockingClause)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
LCS_asString(((LockingClause *)
linitial(lockingClause))->strength))));
@@ -1413,7 +1413,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* As a first step towards supporting sort clauses that are expressions
* using the output columns, generate a namespace entry that makes the
- * output columns visible. A Join RTE node is handy for this, since we
+ * output columns visible. A Join RTE node is handy for this, since we
* can easily control the Vars generated upon matches.
*
* Note: we don't yet do anything useful with such cases, but at least
@@ -1493,7 +1493,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
* Recursively transform leaves and internal nodes of a set-op tree
*
* In addition to returning the transformed node, if targetlist isn't NULL
- * then we return a list of its non-resjunk TargetEntry nodes. For a leaf
+ * then we return a list of its non-resjunk TargetEntry nodes. For a leaf
* set-op node these are the actual targetlist entries; otherwise they are
* dummy entries created to carry the type, typmod, collation, and location
* (for error messages) of each output column of the set-op node. This info
@@ -1527,16 +1527,16 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
if (stmt->lockingClause)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
LCS_asString(((LockingClause *)
- linitial(stmt->lockingClause))->strength))));
+ linitial(stmt->lockingClause))->strength))));
/*
* If an internal node of a set-op tree has ORDER BY, LIMIT, FOR UPDATE,
* or WITH clauses attached, we need to treat it like a leaf node to
- * generate an independent sub-Query tree. Otherwise, it can be
+ * generate an independent sub-Query tree. Otherwise, it can be
* represented by a SetOperationStmt node underneath the parent Query.
*/
if (stmt->op == SETOP_NONE)
@@ -1712,7 +1712,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
rescoltypmod = -1;
/*
- * Verify the coercions are actually possible. If not, we'd fail
+ * Verify the coercions are actually possible. If not, we'd fail
* later anyway, but we want to fail now while we have sufficient
* context to produce an error cursor position.
*
@@ -1721,7 +1721,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
* child query's semantics.
*
* If a child expression is an UNKNOWN-type Const or Param, we
- * want to replace it with the coerced expression. This can only
+ * want to replace it with the coerced expression. This can only
* happen when the child is a leaf set-op node. It's safe to
* replace the expression because if the child query's semantics
* depended on the type of this output column, it'd have already
@@ -2113,8 +2113,8 @@ transformDeclareCursorStmt(ParseState *pstate, DeclareCursorStmt *stmt)
if (result->rowMarks != NIL && (stmt->options & CURSOR_OPT_HOLD))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("DECLARE CURSOR WITH HOLD ... %s is not supported",
LCS_asString(((RowMarkClause *)
linitial(result->rowMarks))->strength)),
@@ -2124,8 +2124,8 @@ transformDeclareCursorStmt(ParseState *pstate, DeclareCursorStmt *stmt)
if (result->rowMarks != NIL && (stmt->options & CURSOR_OPT_SCROLL))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("DECLARE SCROLL CURSOR ... %s is not supported",
LCS_asString(((RowMarkClause *)
linitial(result->rowMarks))->strength)),
@@ -2135,8 +2135,8 @@ transformDeclareCursorStmt(ParseState *pstate, DeclareCursorStmt *stmt)
if (result->rowMarks != NIL && (stmt->options & CURSOR_OPT_INSENSITIVE))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("DECLARE INSENSITIVE CURSOR ... %s is not supported",
LCS_asString(((RowMarkClause *)
linitial(result->rowMarks))->strength)),
@@ -2220,7 +2220,7 @@ transformCreateTableAsStmt(ParseState *pstate, CreateTableAsStmt *stmt)
/*
* A materialized view would either need to save parameters for use in
- * maintaining/loading the data or prohibit them entirely. The latter
+ * maintaining/loading the data or prohibit them entirely. The latter
* seems safer and more sane.
*/
if (query_contains_extern_params(query))
@@ -2272,7 +2272,7 @@ LCS_asString(LockClauseStrength strength)
case LCS_FORUPDATE:
return "FOR UPDATE";
}
- return "FOR some"; /* shouldn't happen */
+ return "FOR some"; /* shouldn't happen */
}
/*
@@ -2286,50 +2286,50 @@ CheckSelectLocking(Query *qry, LockClauseStrength strength)
if (qry->setOperations)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
LCS_asString(strength))));
if (qry->distinctClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s is not allowed with DISTINCT clause",
LCS_asString(strength))));
if (qry->groupClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s is not allowed with GROUP BY clause",
LCS_asString(strength))));
if (qry->havingQual != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s is not allowed with HAVING clause",
LCS_asString(strength))));
if (qry->hasAggs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s is not allowed with aggregate functions",
LCS_asString(strength))));
if (qry->hasWindowFuncs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s is not allowed with window functions",
LCS_asString(strength))));
if (expression_returns_set((Node *) qry->targetList))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s is not allowed with set-returning functions in the target list",
LCS_asString(strength))));
}
@@ -2407,8 +2407,8 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc,
if (thisrel->catalogname || thisrel->schemaname)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s must specify unqualified relation names",
LCS_asString(lc->strength)),
parser_errposition(pstate, thisrel->location)));
@@ -2440,8 +2440,8 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc,
case RTE_JOIN:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s cannot be applied to a join",
LCS_asString(lc->strength)),
parser_errposition(pstate, thisrel->location)));
@@ -2449,17 +2449,17 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc,
case RTE_FUNCTION:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
- errmsg("%s cannot be applied to a function",
- LCS_asString(lc->strength)),
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
+ errmsg("%s cannot be applied to a function",
+ LCS_asString(lc->strength)),
parser_errposition(pstate, thisrel->location)));
break;
case RTE_VALUES:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("%s cannot be applied to VALUES",
LCS_asString(lc->strength)),
parser_errposition(pstate, thisrel->location)));
@@ -2467,10 +2467,10 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc,
case RTE_CTE:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
- errmsg("%s cannot be applied to a WITH query",
- LCS_asString(lc->strength)),
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
+ errmsg("%s cannot be applied to a WITH query",
+ LCS_asString(lc->strength)),
parser_errposition(pstate, thisrel->location)));
break;
default:
@@ -2484,8 +2484,8 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc,
if (rt == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- /*------
- translator: %s is a SQL row locking clause such as FOR UPDATE */
+ /*------
+ translator: %s is a SQL row locking clause such as FOR UPDATE */
errmsg("relation \"%s\" in %s clause not found in FROM clause",
thisrel->relname,
LCS_asString(lc->strength)),
diff --git a/src/backend/parser/kwlookup.c b/src/backend/parser/kwlookup.c
index 5b28ddecce..af05aa70ea 100644
--- a/src/backend/parser/kwlookup.c
+++ b/src/backend/parser/kwlookup.c
@@ -52,7 +52,7 @@ ScanKeywordLookup(const char *text,
return NULL;
/*
- * Apply an ASCII-only downcasing. We must not use tolower() since it may
+ * Apply an ASCII-only downcasing. We must not use tolower() since it may
* produce the wrong translation in some locales (eg, Turkish).
*/
for (i = 0; i < len; i++)
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index 9af43d2a32..c984b7d5e4 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -437,7 +437,7 @@ check_agg_arguments(ParseState *pstate,
/*
* Now check for vars/aggs in the direct arguments, and throw error if
- * needed. Note that we allow a Var of the agg's semantic level, but not
+ * needed. Note that we allow a Var of the agg's semantic level, but not
* an Agg of that level. In principle such Aggs could probably be
* supported, but it would create an ordering dependency among the
* aggregates at execution time. Since the case appears neither to be
@@ -815,7 +815,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
/*
* If there are join alias vars involved, we have to flatten them to the
* underlying vars, so that aliased and unaliased vars will be correctly
- * taken as equal. We can skip the expense of doing this if no rangetable
+ * taken as equal. We can skip the expense of doing this if no rangetable
* entries are RTE_JOIN kind. We use the planner's flatten_join_alias_vars
* routine to do the flattening; it wants a PlannerInfo root node, which
* fortunately can be mostly dummy.
@@ -853,7 +853,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
*
* Note: because we check resjunk tlist elements as well as regular ones,
* this will also find ungrouped variables that came from ORDER BY and
- * WINDOW clauses. For that matter, it's also going to examine the
+ * WINDOW clauses. For that matter, it's also going to examine the
* grouping expressions themselves --- but they'll all pass the test ...
*/
clause = (Node *) qry->targetList;
@@ -984,7 +984,7 @@ check_ungrouped_columns_walker(Node *node,
/*
* If we have an ungrouped Var of the original query level, we have a
* failure. Vars below the original query level are not a problem, and
- * neither are Vars from above it. (If such Vars are ungrouped as far as
+ * neither are Vars from above it. (If such Vars are ungrouped as far as
* their own query level is concerned, that's someone else's problem...)
*/
if (IsA(node, Var))
@@ -1015,7 +1015,7 @@ check_ungrouped_columns_walker(Node *node,
/*
* Check whether the Var is known functionally dependent on the GROUP
- * BY columns. If so, we can allow the Var to be used, because the
+ * BY columns. If so, we can allow the Var to be used, because the
* grouping is really a no-op for this table. However, this deduction
* depends on one or more constraints of the table, so we have to add
* those constraints to the query's constraintDeps list, because it's
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index aa704bb441..fcee1379c0 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -149,7 +149,7 @@ transformFromClause(ParseState *pstate, List *frmList)
*
* If alsoSource is true, add the target to the query's joinlist and
* namespace. For INSERT, we don't want the target to be joined to;
- * it's a destination of tuples, not a source. For UPDATE/DELETE,
+ * it's a destination of tuples, not a source. For UPDATE/DELETE,
* we do need to scan or join the target. (NOTE: we do not bother
* to check for namespace conflict; we assume that the namespace was
* initially empty in these cases.)
@@ -219,7 +219,7 @@ setTargetTable(ParseState *pstate, RangeVar *relation,
* Simplify InhOption (yes/no/default) into boolean yes/no.
*
* The reason we do things this way is that we don't want to examine the
- * SQL_inheritance option flag until parse_analyze() is run. Otherwise,
+ * SQL_inheritance option flag until parse_analyze() is run. Otherwise,
* we'd do the wrong thing with query strings that intermix SET commands
* with queries.
*/
@@ -396,7 +396,7 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j, List *namespace)
/*
* The namespace that the join expression should see is just the two
* subtrees of the JOIN plus any outer references from upper pstate
- * levels. Temporarily set this pstate's namespace accordingly. (We need
+ * levels. Temporarily set this pstate's namespace accordingly. (We need
* not check for refname conflicts, because transformFromClauseItem()
* already did.) All namespace items are marked visible regardless of
* LATERAL state.
@@ -490,7 +490,7 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r)
pstate->p_expr_kind = EXPR_KIND_NONE;
/*
- * Check that we got something reasonable. Many of these conditions are
+ * Check that we got something reasonable. Many of these conditions are
* impossible given restrictions of the grammar, but check 'em anyway.
*/
if (!IsA(query, Query) ||
@@ -526,7 +526,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
/*
* We make lateral_only names of this level visible, whether or not the
- * RangeFunction is explicitly marked LATERAL. This is needed for SQL
+ * RangeFunction is explicitly marked LATERAL. This is needed for SQL
* spec compliance in the case of UNNEST(), and seems useful on
* convenience grounds for all functions in FROM.
*
@@ -546,7 +546,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
* node types.
*
* We have to get this info now, because FigureColname only works on raw
- * parsetrees. Actually deciding what to do with the names is left up to
+ * parsetrees. Actually deciding what to do with the names is left up to
* addRangeTableEntryForFunction.
*
* Likewise, collect column definition lists if there were any. But
@@ -570,7 +570,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
* other ways of implementing the SQL-standard UNNEST() syntax.
*
* If there is any decoration (including a coldeflist), we don't
- * transform, which probably means a no-such-function error later. We
+ * transform, which probably means a no-such-function error later. We
* could alternatively throw an error right now, but that doesn't seem
* tremendously helpful. If someone is using any such decoration,
* then they're not using the SQL-standard syntax, and they're more
@@ -682,7 +682,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("WITH ORDINALITY cannot be used with a column definition list"),
- errhint("Put the column definition list inside ROWS FROM()."),
+ errhint("Put the column definition list inside ROWS FROM()."),
parser_errposition(pstate,
exprLocation((Node *) r->coldeflist))));
@@ -721,10 +721,10 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
* (We could extract this from the function return node, but it saves cycles
* to pass it back separately.)
*
- * *top_rti: receives the rangetable index of top_rte. (Ditto.)
+ * *top_rti: receives the rangetable index of top_rte. (Ditto.)
*
* *namespace: receives a List of ParseNamespaceItems for the RTEs exposed
- * as table/column names by this item. (The lateral_only flags in these items
+ * as table/column names by this item. (The lateral_only flags in these items
* are indeterminate and should be explicitly set by the caller before use.)
*/
static Node *
@@ -837,7 +837,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
* right side, by temporarily adding them to the pstate's namespace
* list. Per SQL:2008, if the join type is not INNER or LEFT then the
* left-side names must still be exposed, but it's an error to
- * reference them. (Stupid design, but that's what it says.) Hence,
+ * reference them. (Stupid design, but that's what it says.) Hence,
* we always push them into the namespace, but mark them as not
* lateral_ok if the jointype is wrong.
*
@@ -1101,7 +1101,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
*
* Note: if there are nested alias-less JOINs, the lower-level ones
* will remain in the list although they have neither p_rel_visible
- * nor p_cols_visible set. We could delete such list items, but it's
+ * nor p_cols_visible set. We could delete such list items, but it's
* unclear that it's worth expending cycles to do so.
*/
if (j->alias != NULL)
@@ -1438,9 +1438,9 @@ checkTargetlistEntrySQL92(ParseState *pstate, TargetEntry *tle,
*
* This function supports the old SQL92 ORDER BY interpretation, where the
* expression is an output column name or number. If we fail to find a
- * match of that sort, we fall through to the SQL99 rules. For historical
+ * match of that sort, we fall through to the SQL99 rules. For historical
* reasons, Postgres also allows this interpretation for GROUP BY, though
- * the standard never did. However, for GROUP BY we prefer a SQL99 match.
+ * the standard never did. However, for GROUP BY we prefer a SQL99 match.
* This function is *not* used for WINDOW definitions.
*
* node the ORDER BY, GROUP BY, or DISTINCT ON expression to be matched
@@ -1458,7 +1458,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
*
* 1. Bare ColumnName (no qualifier or subscripts)
* For a bare identifier, we search for a matching column name
- * in the existing target list. Multiple matches are an error
+ * in the existing target list. Multiple matches are an error
* unless they refer to identical values; for example,
* we allow SELECT a, a FROM table ORDER BY a
* but not SELECT a AS b, b FROM table ORDER BY b
@@ -1467,7 +1467,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
* For GROUP BY, it is incorrect to match the grouping item against
* targetlist entries: according to SQL92, an identifier in GROUP BY
* is a reference to a column name exposed by FROM, not to a target
- * list column. However, many implementations (including pre-7.0
+ * list column. However, many implementations (including pre-7.0
* PostgreSQL) accept this anyway. So for GROUP BY, we look first
* to see if the identifier matches any FROM column name, and only
* try for a targetlist name if it doesn't. This ensures that we
@@ -1625,7 +1625,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist,
/*
* Convert the untransformed node to a transformed expression, and search
* for a match in the tlist. NOTE: it doesn't really matter whether there
- * is more than one match. Also, we are willing to match an existing
+ * is more than one match. Also, we are willing to match an existing
* resjunk target here, though the SQL92 cases above must ignore resjunk
* targets.
*/
@@ -1653,7 +1653,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist,
/*
* If no matches, construct a new target entry which is appended to the
- * end of the target list. This target is given resjunk = TRUE so that it
+ * end of the target list. This target is given resjunk = TRUE so that it
* will not be projected into the final tuple.
*/
target_result = transformTargetEntry(pstate, node, expr, exprKind,
@@ -1864,7 +1864,7 @@ transformWindowDefinitions(ParseState *pstate,
* <window clause> syntax rule 10 and general rule 1. The frame
* clause rule is especially bizarre because it makes "OVER foo"
* different from "OVER (foo)", and requires the latter to throw an
- * error if foo has a nondefault frame clause. Well, ours not to
+ * error if foo has a nondefault frame clause. Well, ours not to
* reason why, but we do go out of our way to throw a useful error
* message for such cases.
*/
@@ -1967,7 +1967,7 @@ transformDistinctClause(ParseState *pstate,
/*
* The distinctClause should consist of all ORDER BY items followed by all
- * other non-resjunk targetlist items. There must not be any resjunk
+ * other non-resjunk targetlist items. There must not be any resjunk
* ORDER BY items --- that would imply that we are sorting by a value that
* isn't necessarily unique within a DISTINCT group, so the results
* wouldn't be well-defined. This construction ensures we follow the rule
@@ -2023,7 +2023,7 @@ transformDistinctClause(ParseState *pstate,
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
is_agg ?
- errmsg("an aggregate with DISTINCT must have at least one argument") :
+ errmsg("an aggregate with DISTINCT must have at least one argument") :
errmsg("SELECT DISTINCT must have at least one column")));
return result;
@@ -2104,7 +2104,7 @@ transformDistinctOnClause(ParseState *pstate, List *distinctlist,
/*
* Now add any remaining DISTINCT ON items, using default sort/group
- * semantics for their data types. (Note: this is pretty questionable; if
+ * semantics for their data types. (Note: this is pretty questionable; if
* the ORDER BY list doesn't include all the DISTINCT ON items and more
* besides, you certainly aren't using DISTINCT ON in the intended way,
* and you probably aren't going to get consistent results. It might be
@@ -2131,7 +2131,8 @@ transformDistinctOnClause(ParseState *pstate, List *distinctlist,
}
/*
- * An empty result list is impossible here because of grammar restrictions.
+ * An empty result list is impossible here because of grammar
+ * restrictions.
*/
Assert(result != NIL);
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index 854d723221..8416d3675b 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -56,12 +56,12 @@ static bool typeIsOfTypedTable(Oid reltypeId, Oid reloftypeId);
* Convert an expression to a target type and typmod.
*
* This is the general-purpose entry point for arbitrary type coercion
- * operations. Direct use of the component operations can_coerce_type,
+ * operations. Direct use of the component operations can_coerce_type,
* coerce_type, and coerce_type_typmod should be restricted to special
* cases (eg, when the conversion is expected to succeed).
*
* Returns the possibly-transformed expression tree, or NULL if the type
- * conversion is not possible. (We do this, rather than ereport'ing directly,
+ * conversion is not possible. (We do this, rather than ereport'ing directly,
* so that callers can generate custom error messages indicating context.)
*
* pstate - parse state (can be NULL, see coerce_type)
@@ -145,7 +145,7 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype,
* already be properly coerced to the specified typmod.
*
* pstate is only used in the case that we are able to resolve the type of
- * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the
+ * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the
* caller does not want type information updated for Params.
*
* Note: this function must not modify the given expression tree, only add
@@ -175,7 +175,7 @@ coerce_type(ParseState *pstate, Node *node,
*
* Note: by returning the unmodified node here, we are saying that
* it's OK to treat an UNKNOWN constant as a valid input for a
- * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be
+ * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be
* all right, since an UNKNOWN value is still a perfectly valid Datum.
*
* NB: we do NOT want a RelabelType here: the exposed type of the
@@ -250,7 +250,7 @@ coerce_type(ParseState *pstate, Node *node,
/*
* If the target type is a domain, we want to call its base type's
- * input routine, not domain_in(). This is to avoid premature failure
+ * input routine, not domain_in(). This is to avoid premature failure
* when the domain applies a typmod: existing input routines follow
* implicit-coercion semantics for length checks, which is not always
* what we want here. The needed check will be applied properly
@@ -263,7 +263,7 @@ coerce_type(ParseState *pstate, Node *node,
* For most types we pass typmod -1 to the input routine, because
* existing input routines follow implicit-coercion semantics for
* length checks, which is not always what we want here. Any length
- * constraint will be applied later by our caller. An exception
+ * constraint will be applied later by our caller. An exception
* however is the INTERVAL type, for which we *must* pass the typmod
* or it won't be able to obey the bizarre SQL-spec input rules. (Ugly
* as sin, but so is this part of the spec...)
@@ -343,7 +343,7 @@ coerce_type(ParseState *pstate, Node *node,
{
/*
* If we have a COLLATE clause, we have to push the coercion
- * underneath the COLLATE. This is really ugly, but there is little
+ * underneath the COLLATE. This is really ugly, but there is little
* choice because the above hacks on Consts and Params wouldn't happen
* otherwise. This kluge has consequences in coerce_to_target_type.
*/
@@ -366,7 +366,7 @@ coerce_type(ParseState *pstate, Node *node,
{
/*
* Generate an expression tree representing run-time application
- * of the conversion function. If we are dealing with a domain
+ * of the conversion function. If we are dealing with a domain
* target type, the conversion function will yield the base type,
* and we need to extract the correct typmod to use from the
* domain's typtypmod.
@@ -402,7 +402,7 @@ coerce_type(ParseState *pstate, Node *node,
* to have the intended type when inspected by higher-level code.
*
* Also, domains may have value restrictions beyond the base type
- * that must be accounted for. If the destination is a domain
+ * that must be accounted for. If the destination is a domain
* then we won't need a RelabelType node.
*/
result = coerce_to_domain(node, InvalidOid, -1, targetTypeId,
@@ -649,7 +649,7 @@ coerce_to_domain(Node *arg, Oid baseTypeId, int32 baseTypeMod, Oid typeId,
}
/*
- * Now build the domain coercion node. This represents run-time checking
+ * Now build the domain coercion node. This represents run-time checking
* of any constraints currently attached to the domain. This also ensures
* that the expression is properly labeled as to result type.
*/
@@ -722,7 +722,7 @@ coerce_type_typmod(Node *node, Oid targetTypeId, int32 targetTypMod,
* Mark a coercion node as IMPLICIT so it will never be displayed by
* ruleutils.c. We use this when we generate a nest of coercion nodes
* to implement what is logically one conversion; the inner nodes are
- * forced to IMPLICIT_CAST format. This does not change their semantics,
+ * forced to IMPLICIT_CAST format. This does not change their semantics,
* only display behavior.
*
* It is caller error to call this on something that doesn't have a
@@ -1181,7 +1181,7 @@ select_common_type(ParseState *pstate, List *exprs, const char *context,
}
/*
- * Nope, so set up for the full algorithm. Note that at this point, lc
+ * Nope, so set up for the full algorithm. Note that at this point, lc
* points to the first list item with type different from pexpr's; we need
* not re-examine any items the previous loop advanced over.
*/
@@ -1476,7 +1476,7 @@ check_generic_type_consistency(Oid *actual_arg_types,
*
* If any polymorphic pseudotype is used in a function's arguments or
* return type, we make sure the actual data types are consistent with
- * each other. The argument consistency rules are shown above for
+ * each other. The argument consistency rules are shown above for
* check_generic_type_consistency().
*
* If we have UNKNOWN input (ie, an untyped literal) for any polymorphic
@@ -1498,7 +1498,7 @@ check_generic_type_consistency(Oid *actual_arg_types,
* impossible to determine the range type from the subtype alone.)
* 4) If return type is ANYARRAY, but no argument is ANYARRAY or ANYELEMENT,
* generate an error. Similarly, if return type is ANYRANGE, but no
- * argument is ANYRANGE, generate an error. (These conditions are
+ * argument is ANYRANGE, generate an error. (These conditions are
* prevented by CREATE FUNCTION and therefore are not expected here.)
* 5) If return type is ANYELEMENT, and any argument is ANYELEMENT, use the
* argument's actual type as the function's return type.
@@ -1508,7 +1508,7 @@ check_generic_type_consistency(Oid *actual_arg_types,
* type or the range type's corresponding subtype (or both, in which case
* they must match).
* 7) If return type is ANYELEMENT, no argument is ANYELEMENT, ANYARRAY, or
- * ANYRANGE, generate an error. (This condition is prevented by CREATE
+ * ANYRANGE, generate an error. (This condition is prevented by CREATE
* FUNCTION and therefore is not expected here.)
* 8) ANYENUM is treated the same as ANYELEMENT except that if it is used
* (alone or in combination with plain ANYELEMENT), we add the extra
@@ -1525,14 +1525,14 @@ check_generic_type_consistency(Oid *actual_arg_types,
*
* When allow_poly is false, we are not expecting any of the actual_arg_types
* to be polymorphic, and we should not return a polymorphic result type
- * either. When allow_poly is true, it is okay to have polymorphic "actual"
+ * either. When allow_poly is true, it is okay to have polymorphic "actual"
* arg types, and we can return ANYARRAY, ANYRANGE, or ANYELEMENT as the
- * result. (This case is currently used only to check compatibility of an
+ * result. (This case is currently used only to check compatibility of an
* aggregate's declaration with the underlying transfn.)
*
* A special case is that we could see ANYARRAY as an actual_arg_type even
* when allow_poly is false (this is possible only because pg_statistic has
- * columns shown as anyarray in the catalogs). We allow this to match a
+ * columns shown as anyarray in the catalogs). We allow this to match a
* declared ANYARRAY argument, but only if there is no ANYELEMENT argument
* or result (since we can't determine a specific element type to match to
* ANYELEMENT). Note this means that functions taking ANYARRAY had better
@@ -1638,7 +1638,7 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
/*
* Fast Track: if none of the arguments are polymorphic, return the
- * unmodified rettype. We assume it can't be polymorphic either.
+ * unmodified rettype. We assume it can't be polymorphic either.
*/
if (!have_generics)
return rettype;
@@ -1981,8 +1981,8 @@ IsPreferredType(TYPCATEGORY category, Oid type)
* Check if srctype is binary-coercible to targettype.
*
* This notion allows us to cheat and directly exchange values without
- * going through the trouble of calling a conversion function. Note that
- * in general, this should only be an implementation shortcut. Before 7.4,
+ * going through the trouble of calling a conversion function. Note that
+ * in general, this should only be an implementation shortcut. Before 7.4,
* this was also used as a heuristic for resolving overloaded functions and
* operators, but that's basically a bad idea.
*
@@ -1995,7 +1995,7 @@ IsPreferredType(TYPCATEGORY category, Oid type)
* types.
*
* This function replaces IsBinaryCompatible(), which was an inherently
- * symmetric test. Since the pg_cast entries aren't necessarily symmetric,
+ * symmetric test. Since the pg_cast entries aren't necessarily symmetric,
* the order of the operands is now significant.
*/
bool
@@ -2181,7 +2181,7 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId,
* Hack: disallow coercions to oidvector and int2vector, which
* otherwise tend to capture coercions that should go to "real" array
* types. We want those types to be considered "real" arrays for many
- * purposes, but not this one. (Also, ArrayCoerceExpr isn't
+ * purposes, but not this one. (Also, ArrayCoerceExpr isn't
* guaranteed to produce an output that meets the restrictions of
* these datatypes, such as being 1-dimensional.)
*/
diff --git a/src/backend/parser/parse_collate.c b/src/backend/parser/parse_collate.c
index aa30864fc2..bbd10304cc 100644
--- a/src/backend/parser/parse_collate.c
+++ b/src/backend/parser/parse_collate.c
@@ -14,19 +14,19 @@
* 1. The output collation of each expression node, or InvalidOid if it
* returns a noncollatable data type. This can also be InvalidOid if the
* result type is collatable but the collation is indeterminate.
- * 2. The collation to be used in executing each function. InvalidOid means
+ * 2. The collation to be used in executing each function. InvalidOid means
* that there are no collatable inputs or their collation is indeterminate.
* This value is only stored in node types that might call collation-using
* functions.
*
* You might think we could get away with storing only one collation per
- * node, but the two concepts really need to be kept distinct. Otherwise
+ * node, but the two concepts really need to be kept distinct. Otherwise
* it's too confusing when a function produces a collatable output type but
* has no collatable inputs or produces noncollatable output from collatable
* inputs.
*
* Cases with indeterminate collation might result in an error being thrown
- * at runtime. If we knew exactly which functions require collation
+ * at runtime. If we knew exactly which functions require collation
* information, we could throw those errors at parse time instead.
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
@@ -245,7 +245,7 @@ select_common_collation(ParseState *pstate, List *exprs, bool none_ok)
* Recursive guts of collation processing.
*
* Nodes with no children (eg, Vars, Consts, Params) must have been marked
- * when built. All upper-level nodes are marked here.
+ * when built. All upper-level nodes are marked here.
*
* Note: if this is invoked directly on a List, it will attempt to infer a
* common collation for all the list members. In particular, it will throw
@@ -448,7 +448,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
/*
* TargetEntry can have only one child, and should bubble that
- * state up to its parent. We can't use the general-case code
+ * state up to its parent. We can't use the general-case code
* below because exprType and friends don't work on TargetEntry.
*/
collation = loccontext.collation;
@@ -463,7 +463,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
* There are some cases where there might not be a failure, for
* example if the planner chooses to use hash aggregation instead
* of sorting for grouping; but it seems better to predictably
- * throw an error. (Compare transformSetOperationTree, which will
+ * throw an error. (Compare transformSetOperationTree, which will
* throw error for indeterminate collation of set-op columns, even
* though the planner might be able to implement the set-op
* without sorting.)
@@ -501,7 +501,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
* SubLink. Act as though the Query returns its first output
* column, which indeed is what it does for EXPR_SUBLINK and
* ARRAY_SUBLINK cases. In the cases where the SubLink
- * returns boolean, this info will be ignored. Special case:
+ * returns boolean, this info will be ignored. Special case:
* in EXISTS, the Query might return no columns, in which case
* we need do nothing.
*
@@ -961,7 +961,7 @@ assign_hypothetical_collations(Aggref *aggref,
/*
* Assign collations internally in this pair of expressions, then
- * choose a common collation for them. This should match
+ * choose a common collation for them. This should match
* select_common_collation(), but we can't use that function as-is
* because we need access to the whole collation state so we can
* bubble it up to the aggregate function's level.
diff --git a/src/backend/parser/parse_cte.c b/src/backend/parser/parse_cte.c
index 76eb418d1b..04b585d1e2 100644
--- a/src/backend/parser/parse_cte.c
+++ b/src/backend/parser/parse_cte.c
@@ -181,7 +181,7 @@ transformWithClause(ParseState *pstate, WithClause *withClause)
checkWellFormedRecursion(&cstate);
/*
- * Set up the ctenamespace for parse analysis. Per spec, all the WITH
+ * Set up the ctenamespace for parse analysis. Per spec, all the WITH
* items are visible to all others, so stuff them all in before parse
* analysis. We build the list in safe processing order so that the
* planner can process the queries in sequence.
@@ -207,7 +207,7 @@ transformWithClause(ParseState *pstate, WithClause *withClause)
{
/*
* For non-recursive WITH, just analyze each CTE in sequence and then
- * add it to the ctenamespace. This corresponds to the spec's
+ * add it to the ctenamespace. This corresponds to the spec's
* definition of the scope of each WITH name. However, to allow error
* reports to be aware of the possibility of an erroneous reference,
* we maintain a list in p_future_ctes of the not-yet-visible CTEs.
@@ -245,7 +245,7 @@ analyzeCTE(ParseState *pstate, CommonTableExpr *cte)
cte->ctequery = (Node *) query;
/*
- * Check that we got something reasonable. These first two cases should
+ * Check that we got something reasonable. These first two cases should
* be prevented by the grammar.
*/
if (!IsA(query, Query))
@@ -393,7 +393,7 @@ analyzeCTETargetList(ParseState *pstate, CommonTableExpr *cte, List *tlist)
/*
* If the CTE is recursive, force the exposed column type of any
- * "unknown" column to "text". This corresponds to the fact that
+ * "unknown" column to "text". This corresponds to the fact that
* SELECT 'foo' UNION SELECT 'bar' will ultimately produce text. We
* might see "unknown" as a result of an untyped literal in the
* non-recursive term's select list, and if we don't convert to text
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 81c9338054..088224573f 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -506,7 +506,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
} crerr = CRERR_NO_COLUMN;
/*
- * Give the PreParseColumnRefHook, if any, first shot. If it returns
+ * Give the PreParseColumnRefHook, if any, first shot. If it returns
* non-null then that's all, folks.
*/
if (pstate->p_pre_columnref_hook != NULL)
@@ -577,7 +577,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
}
/*
- * Try to find the name as a relation. Note that only
+ * Try to find the name as a relation. Note that only
* relations already entered into the rangetable will be
* recognized.
*
@@ -808,7 +808,7 @@ transformParamRef(ParseState *pstate, ParamRef *pref)
Node *result;
/*
- * The core parser knows nothing about Params. If a hook is supplied,
+ * The core parser knows nothing about Params. If a hook is supplied,
* call it. If not, or if the hook returns NULL, throw a generic error.
*/
if (pstate->p_paramref_hook != NULL)
@@ -1108,7 +1108,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
* We try to generate a ScalarArrayOpExpr from IN/NOT IN, but this is only
* possible if there is a suitable array type available. If not, we fall
* back to a boolean condition tree with multiple copies of the lefthand
- * expression. Also, any IN-list items that contain Vars are handled as
+ * expression. Also, any IN-list items that contain Vars are handled as
* separate boolean conditions, because that gives the planner more scope
* for optimization on such clauses.
*
@@ -1139,7 +1139,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
Oid array_type;
/*
- * Try to select a common type for the array elements. Note that
+ * Try to select a common type for the array elements. Note that
* since the LHS' type is first in the list, it will be preferred when
* there is doubt (eg, when all the RHS items are unknown literals).
*
@@ -1254,8 +1254,8 @@ transformFuncCall(ParseState *pstate, FuncCall *fn)
/*
* When WITHIN GROUP is used, we treat its ORDER BY expressions as
* additional arguments to the function, for purposes of function lookup
- * and argument type coercion. So, transform each such expression and add
- * them to the targs list. We don't explicitly mark where each argument
+ * and argument type coercion. So, transform each such expression and add
+ * them to the targs list. We don't explicitly mark where each argument
* came from, but ParseFuncOrColumn can tell what's what by reference to
* list_length(fn->agg_order).
*/
@@ -1510,7 +1510,7 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
qtree = parse_sub_analyze(sublink->subselect, pstate, NULL, false);
/*
- * Check that we got something reasonable. Many of these conditions are
+ * Check that we got something reasonable. Many of these conditions are
* impossible given restrictions of the grammar, but check 'em anyway.
*/
if (!IsA(qtree, Query) ||
@@ -1925,7 +1925,7 @@ transformXmlExpr(ParseState *pstate, XmlExpr *x)
newx->location = x->location;
/*
- * gram.y built the named args as a list of ResTarget. Transform each,
+ * gram.y built the named args as a list of ResTarget. Transform each,
* and break the names out as a separate list.
*/
newx->named_args = NIL;
@@ -2188,9 +2188,9 @@ transformWholeRowRef(ParseState *pstate, RangeTblEntry *rte, int location)
vnum = RTERangeTablePosn(pstate, rte, &sublevels_up);
/*
- * Build the appropriate referencing node. Note that if the RTE is a
+ * Build the appropriate referencing node. Note that if the RTE is a
* function returning scalar, we create just a plain reference to the
- * function value, not a composite containing a single column. This is
+ * function value, not a composite containing a single column. This is
* pretty inconsistent at first sight, but it's what we've done
* historically. One argument for it is that "rel" and "rel.*" mean the
* same thing for composite relations, so why not for scalar functions...
@@ -2374,7 +2374,7 @@ make_row_comparison_op(ParseState *pstate, List *opname,
/*
* Now we must determine which row comparison semantics (= <> < <= > >=)
- * apply to this set of operators. We look for btree opfamilies
+ * apply to this set of operators. We look for btree opfamilies
* containing the operators, and see which interpretations (strategy
* numbers) exist for each operator.
*/
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index cc4608417b..9ebd3fd43b 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -104,7 +104,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Most of the rest of the parser just assumes that functions do not have
- * more than FUNC_MAX_ARGS parameters. We have to test here to protect
+ * more than FUNC_MAX_ARGS parameters. We have to test here to protect
* against array overruns, etc. Of course, this may not be a function,
* but the test doesn't hurt.
*/
@@ -520,7 +520,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
* If there are default arguments, we have to include their types in
* actual_arg_types for the purpose of checking generic type consistency.
* However, we do NOT put them into the generated parse node, because
- * their actual values might change before the query gets run. The
+ * their actual values might change before the query gets run. The
* planner has to insert the up-to-date values at plan time.
*/
nargsplusdefs = nargs;
@@ -653,7 +653,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Reject attempt to call a parameterless aggregate without (*)
- * syntax. This is mere pedantry but some folks insisted ...
+ * syntax. This is mere pedantry but some folks insisted ...
*/
if (fargs == NIL && !agg_star && !agg_within_group)
ereport(ERROR,
@@ -672,7 +672,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
* We might want to support named arguments later, but disallow it for
* now. We'd need to figure out the parsed representation (should the
* NamedArgExprs go above or below the TargetEntry nodes?) and then
- * teach the planner to reorder the list properly. Or maybe we could
+ * teach the planner to reorder the list properly. Or maybe we could
* make transformAggregateCall do that? However, if you'd also like
* to allow default arguments for aggregates, we'd need to do it in
* planning to avoid semantic problems.
@@ -717,7 +717,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Reject attempt to call a parameterless aggregate without (*)
- * syntax. This is mere pedantry but some folks insisted ...
+ * syntax. This is mere pedantry but some folks insisted ...
*/
if (wfunc->winagg && fargs == NIL && !agg_star)
ereport(ERROR,
@@ -895,7 +895,7 @@ func_select_candidate(int nargs,
* matches" in the exact-match heuristic; it also makes it possible to do
* something useful with the type-category heuristics. Note that this
* makes it difficult, but not impossible, to use functions declared to
- * take a domain as an input datatype. Such a function will be selected
+ * take a domain as an input datatype. Such a function will be selected
* over the base-type function only if it is an exact match at all
* argument positions, and so was already chosen by our caller.
*
@@ -1019,7 +1019,7 @@ func_select_candidate(int nargs,
/*
* The next step examines each unknown argument position to see if we can
- * determine a "type category" for it. If any candidate has an input
+ * determine a "type category" for it. If any candidate has an input
* datatype of STRING category, use STRING category (this bias towards
* STRING is appropriate since unknown-type literals look like strings).
* Otherwise, if all the candidates agree on the type category of this
@@ -1030,7 +1030,7 @@ func_select_candidate(int nargs,
* the candidates takes a preferred datatype within the category.
*
* Having completed this examination, remove candidates that accept the
- * wrong category at any unknown position. Also, if at least one
+ * wrong category at any unknown position. Also, if at least one
* candidate accepted a preferred type at a position, remove candidates
* that accept non-preferred types. If just one candidate remains, return
* that one. However, if this rule turns out to reject all candidates,
@@ -1159,7 +1159,7 @@ func_select_candidate(int nargs,
* type, and see if that gives us a unique match. If so, use that match.
*
* NOTE: for a binary operator with one unknown and one non-unknown input,
- * we already tried this heuristic in binary_oper_exact(). However, that
+ * we already tried this heuristic in binary_oper_exact(). However, that
* code only finds exact matches, whereas here we will handle matches that
* involve coercion, polymorphic type resolution, etc.
*/
@@ -1328,7 +1328,7 @@ func_get_detail(List *funcname,
*
* NB: it's important that this code does not exceed what coerce_type
* can do, because the caller will try to apply coerce_type if we
- * return FUNCDETAIL_COERCION. If we return that result for something
+ * return FUNCDETAIL_COERCION. If we return that result for something
* coerce_type can't handle, we'll cause infinite recursion between
* this module and coerce_type!
*/
@@ -1506,7 +1506,7 @@ func_get_detail(List *funcname,
{
/*
* This is a bit tricky in named notation, since the supplied
- * arguments could replace any subset of the defaults. We
+ * arguments could replace any subset of the defaults. We
* work by making a bitmapset of the argnumbers of defaulted
* arguments, then scanning the defaults list and selecting
* the needed items. (This assumes that defaulted arguments
@@ -1751,7 +1751,7 @@ FuncNameAsType(List *funcname)
* ParseComplexProjection -
* handles function calls with a single argument that is of complex type.
* If the function call is actually a column projection, return a suitably
- * transformed expression tree. If not, return NULL.
+ * transformed expression tree. If not, return NULL.
*/
static Node *
ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg,
@@ -1825,7 +1825,7 @@ ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg,
* The result is something like "foo(integer)".
*
* If argnames isn't NIL, it is a list of C strings representing the actual
- * arg names for the last N arguments. This must be considered part of the
+ * arg names for the last N arguments. This must be considered part of the
* function signature too, when dealing with named-notation function calls.
*
* This is typically used in the construction of function-not-found error
diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c
index fc9e53a41d..1e3d1f68fa 100644
--- a/src/backend/parser/parse_node.c
+++ b/src/backend/parser/parse_node.c
@@ -99,8 +99,8 @@ free_parsestate(ParseState *pstate)
* is a dummy (always 0, in fact).
*
* The locations stored in raw parsetrees are byte offsets into the source
- * string. We have to convert them to 1-based character indexes for reporting
- * to clients. (We do things this way to avoid unnecessary overhead in the
+ * string. We have to convert them to 1-based character indexes for reporting
+ * to clients. (We do things this way to avoid unnecessary overhead in the
* normal non-error case: computing character indexes would be much more
* expensive than storing token offsets.)
*/
@@ -129,7 +129,7 @@ parser_errposition(ParseState *pstate, int location)
* Sometimes the parser calls functions that aren't part of the parser
* subsystem and can't reasonably be passed a ParseState; yet we would
* like any errors thrown in those functions to be tagged with a parse
- * error location. Use this function to set up an error context stack
+ * error location. Use this function to set up an error context stack
* entry that will accomplish that. Usage pattern:
*
* declare a local variable "ParseCallbackState pcbstate"
@@ -221,7 +221,7 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod)
* If the input is a domain, smash to base type, and extract the actual
* typmod to be applied to the base type. Subscripting a domain is an
* operation that necessarily works on the base array type, not the domain
- * itself. (Note that we provide no method whereby the creator of a
+ * itself. (Note that we provide no method whereby the creator of a
* domain over an array type could hide its ability to be subscripted.)
*/
*arrayType = getBaseTypeAndTypmod(*arrayType, arrayTypmod);
@@ -269,7 +269,7 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod)
*
* In an array assignment, we are given a destination array value plus a
* source value that is to be assigned to a single element or a slice of
- * that array. We produce an expression that represents the new array value
+ * that array. We produce an expression that represents the new array value
* with the source data inserted into the right part of the array.
*
* For both cases, if the source array is of a domain-over-array type,
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index a2b712d516..b65b632f17 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -447,7 +447,7 @@ oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId,
*
* This is tighter than oper() because it will not return an operator that
* requires coercion of the input datatypes (but binary-compatible operators
- * are accepted). Otherwise, the semantics are the same.
+ * are accepted). Otherwise, the semantics are the same.
*/
Operator
compatible_oper(ParseState *pstate, List *op, Oid arg1, Oid arg2,
@@ -980,7 +980,7 @@ make_scalar_array_op(ParseState *pstate, List *opname,
* mapping is pretty expensive to compute, especially for ambiguous operators;
* this is mainly because there are a *lot* of instances of popular operator
* names such as "=", and we have to check each one to see which is the
- * best match. So once we have identified the correct mapping, we save it
+ * best match. So once we have identified the correct mapping, we save it
* in a cache that need only be flushed on pg_operator or pg_cast change.
* (pg_cast must be considered because changes in the set of implicit casts
* affect the set of applicable operators for any given input datatype.)
diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c
index c5c034b2d8..41b755a1fa 100644
--- a/src/backend/parser/parse_param.c
+++ b/src/backend/parser/parse_param.c
@@ -256,7 +256,7 @@ variable_coerce_param_hook(ParseState *pstate, Param *param,
* of parsing with parse_variable_parameters.
*
* Note: this code intentionally does not check that all parameter positions
- * were used, nor that all got non-UNKNOWN types assigned. Caller of parser
+ * were used, nor that all got non-UNKNOWN types assigned. Caller of parser
* should enforce that if it's important.
*/
void
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 8760952dfe..478584d946 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -71,7 +71,7 @@ static bool isQueryUsingTempRelation_walker(Node *node, void *context);
*
* A qualified refname (schemaname != NULL) can only match a relation RTE
* that (a) has no alias and (b) is for the same relation identified by
- * schemaname.refname. In this case we convert schemaname.refname to a
+ * schemaname.refname. In this case we convert schemaname.refname to a
* relation OID and search by relid, rather than by alias name. This is
* peculiar, but it's what SQL says to do.
*/
@@ -181,7 +181,7 @@ scanNameSpaceForRefname(ParseState *pstate, const char *refname, int location)
/*
* Search the query's table namespace for a relation RTE matching the
- * given relation OID. Return the RTE if a unique match, or NULL
+ * given relation OID. Return the RTE if a unique match, or NULL
* if no match. Raise error if multiple matches.
*
* See the comments for refnameRangeTblEntry to understand why this
@@ -285,7 +285,7 @@ isFutureCTE(ParseState *pstate, const char *refname)
*
* This is different from refnameRangeTblEntry in that it considers every
* entry in the ParseState's rangetable(s), not only those that are currently
- * visible in the p_namespace list(s). This behavior is invalid per the SQL
+ * visible in the p_namespace list(s). This behavior is invalid per the SQL
* spec, and it may give ambiguous results (there might be multiple equally
* valid matches, but only one will be returned). This must be used ONLY
* as a heuristic in giving suitable error messages. See errorMissingRTE.
@@ -308,8 +308,8 @@ searchRangeTableForRel(ParseState *pstate, RangeVar *relation)
* relation.
*
* NB: It's not critical that RangeVarGetRelid return the correct answer
- * here in the face of concurrent DDL. If it doesn't, the worst case
- * scenario is a less-clear error message. Also, the tables involved in
+ * here in the face of concurrent DDL. If it doesn't, the worst case
+ * scenario is a less-clear error message. Also, the tables involved in
* the query are already locked, which reduces the number of cases in
* which surprising behavior can occur. So we do the name lookup
* unlocked.
@@ -431,7 +431,7 @@ check_lateral_ref_ok(ParseState *pstate, ParseNamespaceItem *nsitem,
/*
* given an RTE, return RT index (starting with 1) of the entry,
- * and optionally get its nesting depth (0 = current). If sublevels_up
+ * and optionally get its nesting depth (0 = current). If sublevels_up
* is NULL, only consider rels at the current nesting level.
* Raises error if RTE not found.
*/
@@ -585,11 +585,11 @@ scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte, char *colname,
/* In constraint check, no system column is allowed except tableOid */
if (pstate->p_expr_kind == EXPR_KIND_CHECK_CONSTRAINT &&
- attnum < InvalidAttrNumber && attnum != TableOidAttributeNumber)
+ attnum < InvalidAttrNumber && attnum != TableOidAttributeNumber)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg("system column \"%s\" reference in check constraint is invalid",
- colname),
+ colname),
parser_errposition(pstate, location)));
if (attnum != InvalidAttrNumber)
@@ -673,7 +673,7 @@ colNameToVar(ParseState *pstate, char *colname, bool localonly,
*
* This is different from colNameToVar in that it considers every entry in
* the ParseState's rangetable(s), not only those that are currently visible
- * in the p_namespace list(s). This behavior is invalid per the SQL spec,
+ * in the p_namespace list(s). This behavior is invalid per the SQL spec,
* and it may give ambiguous results (there might be multiple equally valid
* matches, but only one will be returned). This must be used ONLY as a
* heuristic in giving suitable error messages. See errorMissingColumn.
@@ -1016,7 +1016,7 @@ addRangeTableEntry(ParseState *pstate,
/*
* Get the rel's OID. This access also ensures that we have an up-to-date
- * relcache entry for the rel. Since this is typically the first access
+ * relcache entry for the rel. Since this is typically the first access
* to a rel in a statement, be careful to get the right access level
* depending on whether we're doing SELECT FOR UPDATE/SHARE.
*/
@@ -2580,7 +2580,7 @@ get_rte_attribute_is_dropped(RangeTblEntry *rte, AttrNumber attnum)
* Dropped attributes are only possible with functions that
* return named composite types. In such a case we have to
* look up the result type to see if it currently has this
- * column dropped. So first, loop over the funcs until we
+ * column dropped. So first, loop over the funcs until we
* find the one that covers the requested column.
*/
foreach(lc, rte->functions)
@@ -2811,7 +2811,7 @@ errorMissingRTE(ParseState *pstate, RangeVar *relation)
/*
* Check to see if there are any potential matches in the query's
- * rangetable. (Note: cases involving a bad schema name in the RangeVar
+ * rangetable. (Note: cases involving a bad schema name in the RangeVar
* will throw error immediately here. That seems OK.)
*/
rte = searchRangeTableForRel(pstate, relation);
@@ -2865,7 +2865,7 @@ errorMissingColumn(ParseState *pstate,
RangeTblEntry *rte;
/*
- * If relname was given, just play dumb and report it. (In practice, a
+ * If relname was given, just play dumb and report it. (In practice, a
* bad qualification name should end up at errorMissingRTE, not here, so
* no need to work hard on this case.)
*/
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index f971c71a92..2ee1270ec5 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -182,7 +182,7 @@ transformTargetList(ParseState *pstate, List *targetlist,
* This is the identical transformation to transformTargetList, except that
* the input list elements are bare expressions without ResTarget decoration,
* and the output elements are likewise just expressions without TargetEntry
- * decoration. We use this for ROW() and VALUES() constructs.
+ * decoration. We use this for ROW() and VALUES() constructs.
*/
List *
transformExpressionList(ParseState *pstate, List *exprlist,
@@ -348,7 +348,7 @@ markTargetListOrigin(ParseState *pstate, TargetEntry *tle,
/*
* transformAssignedExpr()
- * This is used in INSERT and UPDATE statements only. It prepares an
+ * This is used in INSERT and UPDATE statements only. It prepares an
* expression for assignment to a column of the target table.
* This includes coercing the given value to the target column's type
* (if necessary), and dealing with any subfield names or subscripts
@@ -367,7 +367,7 @@ markTargetListOrigin(ParseState *pstate, TargetEntry *tle,
*
* Note: location points at the target column name (SET target or INSERT
* column name list entry), and must therefore be -1 in an INSERT that
- * omits the column name list. So we should usually prefer to use
+ * omits the column name list. So we should usually prefer to use
* exprLocation(expr) for errors that can happen in a default INSERT.
*/
Expr *
@@ -442,7 +442,7 @@ transformAssignedExpr(ParseState *pstate,
/*
* If there is indirection on the target column, prepare an array or
- * subfield assignment expression. This will generate a new column value
+ * subfield assignment expression. This will generate a new column value
* that the source value has been inserted into, which can then be placed
* in the new tuple constructed by INSERT or UPDATE.
*/
@@ -550,7 +550,7 @@ updateTargetListEntry(ParseState *pstate,
/*
* Set the resno to identify the target column --- the rewriter and
- * planner depend on this. We also set the resname to identify the target
+ * planner depend on this. We also set the resname to identify the target
* column, but this is only for debugging purposes; it should not be
* relied on. (In particular, it might be out of date in a stored rule.)
*/
@@ -998,7 +998,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
*
* Note: this code is a lot like transformColumnRef; it's tempting to
* call that instead and then replace the resulting whole-row Var with
- * a list of Vars. However, that would leave us with the RTE's
+ * a list of Vars. However, that would leave us with the RTE's
* selectedCols bitmap showing the whole row as needing select
* permission, as well as the individual columns. That would be
* incorrect (since columns added later shouldn't need select
@@ -1017,7 +1017,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
} crserr = CRSERR_NO_RTE;
/*
- * Give the PreParseColumnRefHook, if any, first shot. If it returns
+ * Give the PreParseColumnRefHook, if any, first shot. If it returns
* non-null then we should use that expression.
*/
if (pstate->p_pre_columnref_hook != NULL)
@@ -1133,7 +1133,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
* Transforms '*' (in the target list) into a list of targetlist entries.
*
* tlist entries are generated for each relation visible for unqualified
- * column name access. We do not consider qualified-name-only entries because
+ * column name access. We do not consider qualified-name-only entries because
* that would include input tables of aliasless JOINs, NEW/OLD pseudo-entries,
* etc.
*
@@ -1280,7 +1280,7 @@ ExpandRowReference(ParseState *pstate, Node *expr,
/*
* If the rowtype expression is a whole-row Var, we can expand the fields
- * as simple Vars. Note: if the RTE is a relation, this case leaves us
+ * as simple Vars. Note: if the RTE is a relation, this case leaves us
* with the RTE's selectedCols bitmap showing the whole row as needing
* select permission, as well as the individual columns. However, we can
* only get here for weird notations like (table.*).*, so it's not worth
@@ -1362,7 +1362,7 @@ ExpandRowReference(ParseState *pstate, Node *expr,
* Get the tuple descriptor for a Var of type RECORD, if possible.
*
* Since no actual table or view column is allowed to have type RECORD, such
- * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
+ * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
* drill down to find the ultimate defining expression and attempt to infer
* the tupdesc from it. We ereport if we can't determine the tupdesc.
*
@@ -1445,7 +1445,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
{
/*
* Recurse into the sub-select to see what its Var refers
- * to. We have to build an additional level of ParseState
+ * to. We have to build an additional level of ParseState
* to keep in step with varlevelsup in the subselect.
*/
ParseState mypstate;
@@ -1519,7 +1519,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass to
+ * get_expr_result_type() can do anything with it. If not, pass to
* lookup_rowtype_tupdesc() which will probably fail, but will give an
* appropriate error message while failing.
*/
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index b8c10e11c9..d0803dfafd 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -35,7 +35,7 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typeName,
/*
* LookupTypeName
* Given a TypeName object, lookup the pg_type syscache entry of the type.
- * Returns NULL if no such type can be found. If the type is found,
+ * Returns NULL if no such type can be found. If the type is found,
* the typmod value represented in the TypeName struct is computed and
* stored into *typmod_p.
*
@@ -48,7 +48,7 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typeName,
*
* typmod_p can be passed as NULL if the caller does not care to know the
* typmod value, but the typmod decoration (if any) will be validated anyway,
- * except in the case where the type is not found. Note that if the type is
+ * except in the case where the type is not found. Note that if the type is
* found but is a shell, and there is typmod decoration, an error will be
* thrown --- this is intentional.
*
@@ -113,7 +113,7 @@ LookupTypeName(ParseState *pstate, const TypeName *typeName,
* Look up the field.
*
* XXX: As no lock is taken here, this might fail in the presence of
- * concurrent DDL. But taking a lock would carry a performance
+ * concurrent DDL. But taking a lock would carry a performance
* penalty and would also require a permissions check.
*/
relid = RangeVarGetRelid(rel, NoLock, missing_ok);
@@ -625,7 +625,7 @@ typeTypeCollation(Type typ)
/*
* Given a type structure and a string, returns the internal representation
- * of that string. The "string" can be NULL to perform conversion of a NULL
+ * of that string. The "string" can be NULL to perform conversion of a NULL
* (which might result in failure, if the input function rejects NULLs).
*/
Datum
@@ -649,7 +649,7 @@ stringTypeDatum(Type tp, char *string, int32 atttypmod)
* instability in the input function is that comparison of Const nodes
* relies on bytewise comparison of the datums, so if the input function
* leaves garbage then subexpressions that should be identical may not get
- * recognized as such. See pgsql-hackers discussion of 2008-04-04.
+ * recognized as such. See pgsql-hackers discussion of 2008-04-04.
*/
if (string && !typform->typbyval)
{
@@ -696,7 +696,7 @@ pts_error_callback(void *arg)
/*
* Currently we just suppress any syntax error position report, rather
- * than transforming to an "internal query" error. It's unlikely that a
+ * than transforming to an "internal query" error. It's unlikely that a
* type name is complex enough to need positioning.
*/
errposition(0);
@@ -792,9 +792,9 @@ parseTypeString(const char *str, Oid *typeid_p, int32 *typmod_p,
if (!missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("type \"%s\" does not exist",
+ errmsg("type \"%s\" does not exist",
TypeNameToString(typeName)),
- parser_errposition(NULL, typeName->location)));
+ parser_errposition(NULL, typeName->location)));
*typeid_p = InvalidOid;
}
else
@@ -802,9 +802,9 @@ parseTypeString(const char *str, Oid *typeid_p, int32 *typmod_p,
if (!((Form_pg_type) GETSTRUCT(tup))->typisdefined)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("type \"%s\" is only a shell",
+ errmsg("type \"%s\" is only a shell",
TypeNameToString(typeName)),
- parser_errposition(NULL, typeName->location)));
+ parser_errposition(NULL, typeName->location)));
*typeid_p = HeapTupleGetOid(tup);
ReleaseSysCache(tup);
}
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 1e071d7908..7c1939f9c4 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -157,7 +157,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
stmt = (CreateStmt *) copyObject(stmt);
/*
- * Look up the creation namespace. This also checks permissions on the
+ * Look up the creation namespace. This also checks permissions on the
* target namespace, locks it against concurrent drops, checks for a
* preexisting relation in that namespace with the same name, and updates
* stmt->relation->relpersistence if the select namespace is temporary.
@@ -183,7 +183,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
* If the target relation name isn't schema-qualified, make it so. This
* prevents some corner cases in which added-on rewritten commands might
* think they should apply to other relations that have the same name and
- * are earlier in the search path. But a local temp table is effectively
+ * are earlier in the search path. But a local temp table is effectively
* specified to be in pg_temp, so no need for anything extra in that case.
*/
if (stmt->relation->schemaname == NULL
@@ -672,7 +672,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
if (cxt->isforeign)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("LIKE is not supported for creating foreign tables")));
+ errmsg("LIKE is not supported for creating foreign tables")));
relation = relation_openrv(table_like_clause->relation, AccessShareLock);
@@ -712,7 +712,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
constr = tupleDesc->constr;
/*
- * Initialize column number map for map_variable_attnos(). We need this
+ * Initialize column number map for map_variable_attnos(). We need this
* since dropped columns in the source table aren't copied, so the new
* table can have different column numbers.
*/
@@ -927,7 +927,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing the
+ * commit. That will prevent someone else from deleting or ALTERing the
* parent before the child is committed.
*/
heap_close(relation, NoLock);
@@ -1608,7 +1608,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
parser_errposition(cxt->pstate, constraint->location)));
/*
- * Insist on it being a btree. That's the only kind that supports
+ * Insist on it being a btree. That's the only kind that supports
* uniqueness at the moment anyway; but we must have an index that
* exactly matches what you'd get from plain ADD CONSTRAINT syntax,
* else dump and reload will produce a different index (breaking
@@ -1635,7 +1635,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
/*
* We shouldn't see attnum == 0 here, since we already rejected
- * expression indexes. If we do, SystemAttributeDefinition will
+ * expression indexes. If we do, SystemAttributeDefinition will
* throw an error.
*/
if (attnum > 0)
@@ -1649,7 +1649,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
attname = pstrdup(NameStr(attform->attname));
/*
- * Insist on default opclass and sort options. While the index
+ * Insist on default opclass and sort options. While the index
* would still work as a constraint with non-default settings, it
* might not provide exactly the same uniqueness semantics as
* you'd get from a normally-created constraint; and there's also
@@ -1900,7 +1900,7 @@ transformFKConstraints(CreateStmtContext *cxt,
* transformIndexStmt - parse analysis for CREATE INDEX and ALTER TABLE
*
* Note: this is a no-op for an index not using either index expressions or
- * a predicate expression. There are several code paths that create indexes
+ * a predicate expression. There are several code paths that create indexes
* without bothering to call this, because they know they don't have any
* such expressions to deal with.
*
@@ -2023,7 +2023,7 @@ transformRuleStmt(RuleStmt *stmt, const char *queryString,
/*
* To avoid deadlock, make sure the first thing we do is grab
- * AccessExclusiveLock on the target relation. This will be needed by
+ * AccessExclusiveLock on the target relation. This will be needed by
* DefineQueryRewrite(), and we don't want to grab a lesser lock
* beforehand.
*/
diff --git a/src/backend/parser/parser.c b/src/backend/parser/parser.c
index a439e8b199..663296683a 100644
--- a/src/backend/parser/parser.c
+++ b/src/backend/parser/parser.c
@@ -65,7 +65,7 @@ raw_parser(const char *str)
* Intermediate filter between parser and core lexer (core_yylex in scan.l).
*
* The filter is needed because in some cases the standard SQL grammar
- * requires more than one token lookahead. We reduce these cases to one-token
+ * requires more than one token lookahead. We reduce these cases to one-token
* lookahead by combining tokens here, in order to keep the grammar LALR(1).
*
* Using a filter is simpler than trying to recognize multiword tokens
diff --git a/src/backend/parser/scansup.c b/src/backend/parser/scansup.c
index b9871bb297..e9fa5dd0b0 100644
--- a/src/backend/parser/scansup.c
+++ b/src/backend/parser/scansup.c
@@ -132,7 +132,7 @@ downcase_truncate_identifier(const char *ident, int len, bool warn)
{
char *result;
int i;
- bool enc_is_single_byte;
+ bool enc_is_single_byte;
result = palloc(len + 1);
enc_is_single_byte = pg_database_encoding_max_length() == 1;
@@ -143,8 +143,8 @@ downcase_truncate_identifier(const char *ident, int len, bool warn)
* locale-aware translation. However, there are some locales where this
* is not right either (eg, Turkish may do strange things with 'i' and
* 'I'). Our current compromise is to use tolower() for characters with
- * the high bit set, as long as they aren't part of a multi-byte character,
- * and use an ASCII-only downcasing for 7-bit characters.
+ * the high bit set, as long as they aren't part of a multi-byte
+ * character, and use an ASCII-only downcasing for 7-bit characters.
*/
for (i = 0; i < len; i++)
{
diff --git a/src/backend/port/darwin/system.c b/src/backend/port/darwin/system.c
index d571f26ef8..1cd5266929 100644
--- a/src/backend/port/darwin/system.c
+++ b/src/backend/port/darwin/system.c
@@ -24,7 +24,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/darwin.c b/src/backend/port/dynloader/darwin.c
index 484eb43b5c..ccd92c39d4 100644
--- a/src/backend/port/dynloader/darwin.c
+++ b/src/backend/port/dynloader/darwin.c
@@ -47,7 +47,7 @@ pg_dlerror(void)
/*
* These routines were taken from the Apache source, but were made
- * available with a PostgreSQL-compatible license. Kudos Wilfredo
+ * available with a PostgreSQL-compatible license. Kudos Wilfredo
* Sánchez <wsanchez@apple.com>.
*/
diff --git a/src/backend/port/dynloader/freebsd.c b/src/backend/port/dynloader/freebsd.c
index 53af482f07..60d8654818 100644
--- a/src/backend/port/dynloader/freebsd.c
+++ b/src/backend/port/dynloader/freebsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/netbsd.c b/src/backend/port/dynloader/netbsd.c
index 1333cbcc58..9af0467347 100644
--- a/src/backend/port/dynloader/netbsd.c
+++ b/src/backend/port/dynloader/netbsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/openbsd.c b/src/backend/port/dynloader/openbsd.c
index 4a04b12fcd..41459db388 100644
--- a/src/backend/port/dynloader/openbsd.c
+++ b/src/backend/port/dynloader/openbsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c
index c6ac713bc5..1aafd31e1f 100644
--- a/src/backend/port/posix_sema.c
+++ b/src/backend/port/posix_sema.c
@@ -138,7 +138,7 @@ PosixSemaphoreKill(sem_t * sem)
*
* This is called during postmaster start or shared memory reinitialization.
* It should do whatever is needed to be able to support up to maxSemas
- * subsequent PGSemaphoreCreate calls. Also, if any system resources
+ * subsequent PGSemaphoreCreate calls. Also, if any system resources
* are acquired here or in PGSemaphoreCreate, register an on_shmem_exit
* callback to release them.
*
diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c
index d5d66edcd3..9f72ed3115 100644
--- a/src/backend/port/sysv_sema.c
+++ b/src/backend/port/sysv_sema.c
@@ -253,7 +253,7 @@ IpcSemaphoreCreate(int numSems)
/*
* Can only get here if some other process managed to create the same
- * sema key before we did. Let him have that one, loop around to try
+ * sema key before we did. Let him have that one, loop around to try
* next key.
*/
}
@@ -278,12 +278,12 @@ IpcSemaphoreCreate(int numSems)
*
* This is called during postmaster start or shared memory reinitialization.
* It should do whatever is needed to be able to support up to maxSemas
- * subsequent PGSemaphoreCreate calls. Also, if any system resources
+ * subsequent PGSemaphoreCreate calls. Also, if any system resources
* are acquired here or in PGSemaphoreCreate, register an on_shmem_exit
* callback to release them.
*
* The port number is passed for possible use as a key (for SysV, we use
- * it to generate the starting semaphore key). In a standalone backend,
+ * it to generate the starting semaphore key). In a standalone backend,
* zero will be passed.
*
* In the SysV implementation, we acquire semaphore sets on-demand; the
@@ -378,7 +378,7 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* from the operation prematurely because we were sent a signal. So we
* try and lock the semaphore again.
*
- * Each time around the loop, we check for a cancel/die interrupt. On
+ * Each time around the loop, we check for a cancel/die interrupt. On
* some platforms, if such an interrupt comes in while we are waiting, it
* will cause the semop() call to exit with errno == EINTR, allowing us to
* service the interrupt (if not in a critical section already) during the
@@ -396,7 +396,7 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will
* execute directly. However, there is a huge pitfall: there is another
* window of a few instructions after the semop() before we are able to
- * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
+ * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
* control, which means that the lock has been acquired but our caller did
* not get a chance to record the fact. Therefore, we only set
* ImmediateInterruptOK if the caller tells us it's OK to do so, ie, the
@@ -409,9 +409,9 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* On some platforms, signals marked SA_RESTART (which is most, for us)
* will not interrupt the semop(); it will just keep waiting. Therefore
* it's necessary for cancel/die interrupts to be serviced directly by the
- * signal handler. On these platforms the behavior is really the same
+ * signal handler. On these platforms the behavior is really the same
* whether the signal arrives just before the semop() begins, or while it
- * is waiting. The loop on EINTR is thus important only for platforms
+ * is waiting. The loop on EINTR is thus important only for platforms
* without SA_RESTART.
*/
do
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index 5e3850b024..7430757c75 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -228,7 +228,7 @@ IpcMemoryDelete(int status, Datum shmId)
* Is a previously-existing shmem segment still existing and in use?
*
* The point of this exercise is to detect the case where a prior postmaster
- * crashed, but it left child backends that are still running. Therefore
+ * crashed, but it left child backends that are still running. Therefore
* we only care about shmem segments that are associated with the intended
* DataDir. This is an important consideration since accidental matches of
* shmem segment IDs are reasonably common.
@@ -374,8 +374,8 @@ CreateAnonymousSegment(Size *size)
(huge_pages == HUGE_PAGES_TRY && ptr == MAP_FAILED))
{
/*
- * use the original size, not the rounded up value, when falling
- * back to non-huge pages.
+ * use the original size, not the rounded up value, when falling back
+ * to non-huge pages.
*/
allocsize = *size;
ptr = mmap(NULL, allocsize, PROT_READ | PROT_WRITE,
@@ -411,14 +411,14 @@ CreateAnonymousSegment(Size *size)
* the storage.
*
* Dead Postgres segments are recycled if found, but we do not fail upon
- * collision with non-Postgres shmem segments. The idea here is to detect and
+ * collision with non-Postgres shmem segments. The idea here is to detect and
* re-use keys that may have been assigned by a crashed postmaster or backend.
*
* makePrivate means to always create a new segment, rather than attach to
* or recycle any existing segment.
*
* The port number is passed for possible use as a key (for SysV, we use
- * it to generate the starting shmem key). In a standalone backend,
+ * it to generate the starting shmem key). In a standalone backend,
* zero will be passed.
*/
PGShmemHeader *
@@ -512,9 +512,9 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port,
/*
* The segment appears to be from a dead Postgres process, or from a
* previous cycle of life in this same process. Zap it, if possible,
- * and any associated dynamic shared memory segments, as well.
- * This probably shouldn't fail, but if it does, assume the segment
- * belongs to someone else after all, and continue quietly.
+ * and any associated dynamic shared memory segments, as well. This
+ * probably shouldn't fail, but if it does, assume the segment belongs
+ * to someone else after all, and continue quietly.
*/
if (hdr->dsm_control != 0)
dsm_cleanup_using_control_segment(hdr->dsm_control);
@@ -583,7 +583,7 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port,
/*
* PGSharedMemoryReAttach
*
- * Re-attach to an already existing shared memory segment. In the non
+ * Re-attach to an already existing shared memory segment. In the non
* EXEC_BACKEND case this is not used, because postmaster children inherit
* the shared memory segment attachment via fork().
*
@@ -626,7 +626,7 @@ PGSharedMemoryReAttach(void)
*
* Detach from the shared memory segment, if still attached. This is not
* intended for use by the process that originally created the segment
- * (it will have an on_shmem_exit callback registered to do that). Rather,
+ * (it will have an on_shmem_exit callback registered to do that). Rather,
* this is for subprocesses that have inherited an attachment and want to
* get rid of it.
*/
diff --git a/src/backend/port/unix_latch.c b/src/backend/port/unix_latch.c
index 4d1a305177..d0e928f8c4 100644
--- a/src/backend/port/unix_latch.c
+++ b/src/backend/port/unix_latch.c
@@ -239,7 +239,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
/*
* Initialize timeout if requested. We must record the current time so
* that we can determine the remaining timeout if the poll() or select()
- * is interrupted. (On some platforms, select() will update the contents
+ * is interrupted. (On some platforms, select() will update the contents
* of "tv" for us, but unfortunately we can't rely on that.)
*/
if (wakeEvents & WL_TIMEOUT)
@@ -500,7 +500,7 @@ SetLatch(volatile Latch *latch)
/*
* XXX there really ought to be a memory barrier operation right here, to
* ensure that any flag variables we might have changed get flushed to
- * main memory before we check/set is_set. Without that, we have to
+ * main memory before we check/set is_set. Without that, we have to
* require that callers provide their own synchronization for machines
* with weak memory ordering (see latch.h).
*/
@@ -559,7 +559,7 @@ ResetLatch(volatile Latch *latch)
/*
* XXX there really ought to be a memory barrier operation right here, to
* ensure that the write to is_set gets flushed to main memory before we
- * examine any flag variables. Otherwise a concurrent SetLatch might
+ * examine any flag variables. Otherwise a concurrent SetLatch might
* falsely conclude that it needn't signal us, even though we have missed
* seeing some flag updates that SetLatch was supposed to inform us of.
* For the moment, callers must supply their own synchronization of flag
diff --git a/src/backend/port/win32/socket.c b/src/backend/port/win32/socket.c
index adc0e02335..7b0f71b65d 100644
--- a/src/backend/port/win32/socket.c
+++ b/src/backend/port/win32/socket.c
@@ -151,7 +151,7 @@ pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout)
(errmsg_internal("could not reset socket waiting event: error code %lu", GetLastError())));
/*
- * Track whether socket is UDP or not. (NB: most likely, this is both
+ * Track whether socket is UDP or not. (NB: most likely, this is both
* useless and wrong; there is no reason to think that the behavior of
* WSAEventSelect is different for TCP and UDP.)
*/
@@ -160,7 +160,7 @@ pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout)
current_socket = s;
/*
- * Attach event to socket. NOTE: we must detach it again before
+ * Attach event to socket. NOTE: we must detach it again before
* returning, since other bits of code may try to attach other events to
* the socket.
*/
diff --git a/src/backend/port/win32_latch.c b/src/backend/port/win32_latch.c
index b5b7195d44..6c50dbbe01 100644
--- a/src/backend/port/win32_latch.c
+++ b/src/backend/port/win32_latch.c
@@ -246,7 +246,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
rc == WAIT_OBJECT_0 + pmdeath_eventno)
{
/*
- * Postmaster apparently died. Since the consequences of falsely
+ * Postmaster apparently died. Since the consequences of falsely
* returning WL_POSTMASTER_DEATH could be pretty unpleasant, we
* take the trouble to positively verify this with
* PostmasterIsAlive(), even though there is no known reason to
diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c
index 8d0cc89840..d144edaa19 100644
--- a/src/backend/port/win32_shmem.c
+++ b/src/backend/port/win32_shmem.c
@@ -79,7 +79,7 @@ GetSharedMemName(void)
* Is a previously-existing shmem segment still existing and in use?
*
* The point of this exercise is to detect the case where a prior postmaster
- * crashed, but it left child backends that are still running. Therefore
+ * crashed, but it left child backends that are still running. Therefore
* we only care about shmem segments that are associated with the intended
* DataDir. This is an important consideration since accidental matches of
* shmem segment IDs are reasonably common.
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 8926325faa..b53cfdbf6d 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -21,21 +21,21 @@
* There is an autovacuum shared memory area, where the launcher stores
* information about the database it wants vacuumed. When it wants a new
* worker to start, it sets a flag in shared memory and sends a signal to the
- * postmaster. Then postmaster knows nothing more than it must start a worker;
- * so it forks a new child, which turns into a worker. This new process
+ * postmaster. Then postmaster knows nothing more than it must start a worker;
+ * so it forks a new child, which turns into a worker. This new process
* connects to shared memory, and there it can inspect the information that the
* launcher has set up.
*
* If the fork() call fails in the postmaster, it sets a flag in the shared
* memory area, and sends a signal to the launcher. The launcher, upon
* noticing the flag, can try starting the worker again by resending the
- * signal. Note that the failure can only be transient (fork failure due to
+ * signal. Note that the failure can only be transient (fork failure due to
* high load, memory pressure, too many processes, etc); more permanent
* problems, like failure to connect to a database, are detected later in the
* worker and dealt with just by having the worker exit normally. The launcher
* will launch a new worker again later, per schedule.
*
- * When the worker is done vacuuming it sends SIGUSR2 to the launcher. The
+ * When the worker is done vacuuming it sends SIGUSR2 to the launcher. The
* launcher then wakes up and is able to launch another worker, if the schedule
* is so tight that a new worker is needed immediately. At this time the
* launcher can also balance the settings for the various remaining workers'
@@ -244,7 +244,7 @@ typedef enum
/*-------------
* The main autovacuum shmem struct. On shared memory we store this main
- * struct and the array of WorkerInfo structs. This struct keeps:
+ * struct and the array of WorkerInfo structs. This struct keeps:
*
* av_signal set by other processes to indicate various conditions
* av_launcherpid the PID of the autovacuum launcher
@@ -429,7 +429,7 @@ AutoVacLauncherMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (autovacuum probably never has any
+ * can signal any child processes too. (autovacuum probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -439,7 +439,7 @@ AutoVacLauncherMain(int argc, char *argv[])
#endif
/*
- * Set up signal handlers. We operate on databases much like a regular
+ * Set up signal handlers. We operate on databases much like a regular
* backend, so we use the same signal handling. See equivalent code in
* tcop/postgres.c.
*/
@@ -546,7 +546,7 @@ AutoVacLauncherMain(int argc, char *argv[])
/*
* Force zero_damaged_pages OFF in the autovac process, even if it is set
- * in postgresql.conf. We don't really want such a dangerous option being
+ * in postgresql.conf. We don't really want such a dangerous option being
* applied non-interactively.
*/
SetConfigOption("zero_damaged_pages", "false", PGC_SUSET, PGC_S_OVERRIDE);
@@ -869,7 +869,7 @@ launcher_determine_sleep(bool canlaunch, bool recursing, struct timeval * nap)
* this the "new" database, because when the database was already present on
* the list, we expect that this function is not called at all). The
* preexisting list, if any, will be used to preserve the order of the
- * databases in the autovacuum_naptime period. The new database is put at the
+ * databases in the autovacuum_naptime period. The new database is put at the
* end of the interval. The actual values are not saved, which should not be
* much of a problem.
*/
@@ -1073,7 +1073,7 @@ db_comparator(const void *a, const void *b)
*
* Bare-bones procedure for starting an autovacuum worker from the launcher.
* It determines what database to work on, sets up shared memory stuff and
- * signals postmaster to start the worker. It fails gracefully if invoked when
+ * signals postmaster to start the worker. It fails gracefully if invoked when
* autovacuum_workers are already active.
*
* Return value is the OID of the database that the worker is going to process,
@@ -1345,7 +1345,7 @@ launch_worker(TimestampTz now)
/*
* Called from postmaster to signal a failure to fork a process to become
- * worker. The postmaster should kill(SIGUSR2) the launcher shortly
+ * worker. The postmaster should kill(SIGUSR2) the launcher shortly
* after calling this function.
*/
void
@@ -1497,7 +1497,7 @@ AutoVacWorkerMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (autovacuum probably never has any
+ * can signal any child processes too. (autovacuum probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -1507,7 +1507,7 @@ AutoVacWorkerMain(int argc, char *argv[])
#endif
/*
- * Set up signal handlers. We operate on databases much like a regular
+ * Set up signal handlers. We operate on databases much like a regular
* backend, so we use the same signal handling. See equivalent code in
* tcop/postgres.c.
*
@@ -1558,7 +1558,7 @@ AutoVacWorkerMain(int argc, char *argv[])
EmitErrorReport();
/*
- * We can now go away. Note that because we called InitProcess, a
+ * We can now go away. Note that because we called InitProcess, a
* callback was registered to do ProcKill, which will clean up
* necessary state.
*/
@@ -1572,7 +1572,7 @@ AutoVacWorkerMain(int argc, char *argv[])
/*
* Force zero_damaged_pages OFF in the autovac process, even if it is set
- * in postgresql.conf. We don't really want such a dangerous option being
+ * in postgresql.conf. We don't really want such a dangerous option being
* applied non-interactively.
*/
SetConfigOption("zero_damaged_pages", "false", PGC_SUSET, PGC_S_OVERRIDE);
@@ -1700,7 +1700,7 @@ FreeWorkerInfo(int code, Datum arg)
/*
* Wake the launcher up so that he can launch a new worker immediately
* if required. We only save the launcher's PID in local memory here;
- * the actual signal will be sent when the PGPROC is recycled. Note
+ * the actual signal will be sent when the PGPROC is recycled. Note
* that we always do this, so that the launcher can rebalance the cost
* limit setting of the remaining workers.
*
@@ -1808,7 +1808,7 @@ autovac_balance_cost(void)
/*
* We put a lower bound of 1 on the cost_limit, to avoid division-
- * by-zero in the vacuum code. Also, in case of roundoff trouble
+ * by-zero in the vacuum code. Also, in case of roundoff trouble
* in these calculations, let's be sure we don't ever set
* cost_limit to more than the base value.
*/
@@ -1851,7 +1851,7 @@ get_database_list(void)
/*
* Start a transaction so we can access pg_database, and get a snapshot.
* We don't have a use for the snapshot itself, but we're interested in
- * the secondary effect that it sets RecentGlobalXmin. (This is critical
+ * the secondary effect that it sets RecentGlobalXmin. (This is critical
* for anything that reads heap pages, because HOT may decide to prune
* them even if the process doesn't attempt to modify any tuples.)
*/
@@ -2266,14 +2266,14 @@ do_autovacuum(void)
}
/*
- * Ok, good to go. Store the table in shared memory before releasing
+ * Ok, good to go. Store the table in shared memory before releasing
* the lock so that other workers don't vacuum it concurrently.
*/
MyWorkerInfo->wi_tableoid = relid;
LWLockRelease(AutovacuumScheduleLock);
/*
- * Remember the prevailing values of the vacuum cost GUCs. We have to
+ * Remember the prevailing values of the vacuum cost GUCs. We have to
* restore these at the bottom of the loop, else we'll compute wrong
* values in the next iteration of autovac_balance_cost().
*/
@@ -2302,7 +2302,7 @@ do_autovacuum(void)
/*
* Save the relation name for a possible error message, to avoid a
- * catalog lookup in case of an error. If any of these return NULL,
+ * catalog lookup in case of an error. If any of these return NULL,
* then the relation has been dropped since last we checked; skip it.
* Note: they must live in a long-lived memory context because we call
* vacuum and analyze in different transactions.
@@ -2744,7 +2744,7 @@ relation_needs_vacanalyze(Oid relid,
{
/*
* Skip a table not found in stat hash, unless we have to force vacuum
- * for anti-wrap purposes. If it's not acted upon, there's no need to
+ * for anti-wrap purposes. If it's not acted upon, there's no need to
* vacuum it.
*/
*dovacuum = force_vacuum;
@@ -2946,7 +2946,7 @@ AutoVacuumShmemInit(void)
* Refresh pgstats data for an autovacuum process
*
* Cause the next pgstats read operation to obtain fresh data, but throttle
- * such refreshing in the autovacuum launcher. This is mostly to avoid
+ * such refreshing in the autovacuum launcher. This is mostly to avoid
* rereading the pgstats files too many times in quick succession when there
* are many databases.
*
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index f65a80374c..a6b25d8494 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -35,7 +35,7 @@
/*
* The postmaster's list of registered background workers, in private memory.
*/
-slist_head BackgroundWorkerList = SLIST_STATIC_INIT(BackgroundWorkerList);
+slist_head BackgroundWorkerList = SLIST_STATIC_INIT(BackgroundWorkerList);
/*
* BackgroundWorkerSlots exist in shared memory and can be accessed (via
@@ -71,23 +71,23 @@ slist_head BackgroundWorkerList = SLIST_STATIC_INIT(BackgroundWorkerList);
*/
typedef struct BackgroundWorkerSlot
{
- bool in_use;
- bool terminate;
- pid_t pid; /* InvalidPid = not started yet; 0 = dead */
- uint64 generation; /* incremented when slot is recycled */
+ bool in_use;
+ bool terminate;
+ pid_t pid; /* InvalidPid = not started yet; 0 = dead */
+ uint64 generation; /* incremented when slot is recycled */
BackgroundWorker worker;
} BackgroundWorkerSlot;
typedef struct BackgroundWorkerArray
{
- int total_slots;
+ int total_slots;
BackgroundWorkerSlot slot[FLEXIBLE_ARRAY_MEMBER];
} BackgroundWorkerArray;
struct BackgroundWorkerHandle
{
- int slot;
- uint64 generation;
+ int slot;
+ uint64 generation;
};
static BackgroundWorkerArray *BackgroundWorkerData;
@@ -127,10 +127,10 @@ BackgroundWorkerShmemInit(void)
BackgroundWorkerData->total_slots = max_worker_processes;
/*
- * Copy contents of worker list into shared memory. Record the
- * shared memory slot assigned to each worker. This ensures
- * a 1-to-1 correspondence betwen the postmaster's private list and
- * the array in shared memory.
+ * Copy contents of worker list into shared memory. Record the shared
+ * memory slot assigned to each worker. This ensures a 1-to-1
+ * correspondence betwen the postmaster's private list and the array
+ * in shared memory.
*/
slist_foreach(siter, &BackgroundWorkerList)
{
@@ -144,7 +144,7 @@ BackgroundWorkerShmemInit(void)
slot->pid = InvalidPid;
slot->generation = 0;
rw->rw_shmem_slot = slotno;
- rw->rw_worker.bgw_notify_pid = 0; /* might be reinit after crash */
+ rw->rw_worker.bgw_notify_pid = 0; /* might be reinit after crash */
memcpy(&slot->worker, &rw->rw_worker, sizeof(BackgroundWorker));
++slotno;
}
@@ -194,27 +194,27 @@ FindRegisteredWorkerBySlotNumber(int slotno)
void
BackgroundWorkerStateChange(void)
{
- int slotno;
+ int slotno;
/*
* The total number of slots stored in shared memory should match our
* notion of max_worker_processes. If it does not, something is very
* wrong. Further down, we always refer to this value as
- * max_worker_processes, in case shared memory gets corrupted while
- * we're looping.
+ * max_worker_processes, in case shared memory gets corrupted while we're
+ * looping.
*/
if (max_worker_processes != BackgroundWorkerData->total_slots)
{
elog(LOG,
"inconsistent background worker state (max_worker_processes=%d, total_slots=%d",
- max_worker_processes,
- BackgroundWorkerData->total_slots);
+ max_worker_processes,
+ BackgroundWorkerData->total_slots);
return;
}
/*
- * Iterate through slots, looking for newly-registered workers or
- * workers who must die.
+ * Iterate through slots, looking for newly-registered workers or workers
+ * who must die.
*/
for (slotno = 0; slotno < max_worker_processes; ++slotno)
{
@@ -267,8 +267,8 @@ BackgroundWorkerStateChange(void)
}
/*
- * Copy strings in a paranoid way. If shared memory is corrupted,
- * the source data might not even be NUL-terminated.
+ * Copy strings in a paranoid way. If shared memory is corrupted, the
+ * source data might not even be NUL-terminated.
*/
ascii_safe_strlcpy(rw->rw_worker.bgw_name,
slot->worker.bgw_name, BGW_MAXLEN);
@@ -280,10 +280,10 @@ BackgroundWorkerStateChange(void)
/*
* Copy various fixed-size fields.
*
- * flags, start_time, and restart_time are examined by the
- * postmaster, but nothing too bad will happen if they are
- * corrupted. The remaining fields will only be examined by the
- * child process. It might crash, but we won't.
+ * flags, start_time, and restart_time are examined by the postmaster,
+ * but nothing too bad will happen if they are corrupted. The
+ * remaining fields will only be examined by the child process. It
+ * might crash, but we won't.
*/
rw->rw_worker.bgw_flags = slot->worker.bgw_flags;
rw->rw_worker.bgw_start_time = slot->worker.bgw_start_time;
@@ -292,13 +292,13 @@ BackgroundWorkerStateChange(void)
rw->rw_worker.bgw_main_arg = slot->worker.bgw_main_arg;
/*
- * Copy the PID to be notified about state changes, but only if
- * the postmaster knows about a backend with that PID. It isn't
- * an error if the postmaster doesn't know about the PID, because
- * the backend that requested the worker could have died (or been
- * killed) just after doing so. Nonetheless, at least until we get
- * some experience with how this plays out in the wild, log a message
- * at a relative high debug level.
+ * Copy the PID to be notified about state changes, but only if the
+ * postmaster knows about a backend with that PID. It isn't an error
+ * if the postmaster doesn't know about the PID, because the backend
+ * that requested the worker could have died (or been killed) just
+ * after doing so. Nonetheless, at least until we get some experience
+ * with how this plays out in the wild, log a message at a relative
+ * high debug level.
*/
rw->rw_worker.bgw_notify_pid = slot->worker.bgw_notify_pid;
if (!PostmasterMarkPIDForWorkerNotify(rw->rw_worker.bgw_notify_pid))
@@ -319,7 +319,7 @@ BackgroundWorkerStateChange(void)
/* Log it! */
ereport(LOG,
(errmsg("registering background worker \"%s\"",
- rw->rw_worker.bgw_name)));
+ rw->rw_worker.bgw_name)));
slist_push_head(&BackgroundWorkerList, &rw->rw_lnode);
}
@@ -348,7 +348,7 @@ ForgetBackgroundWorker(slist_mutable_iter *cur)
ereport(LOG,
(errmsg("unregistering background worker \"%s\"",
- rw->rw_worker.bgw_name)));
+ rw->rw_worker.bgw_name)));
slist_delete_current(cur);
free(rw);
@@ -458,7 +458,7 @@ SanityCheckBackgroundWorker(BackgroundWorker *worker, int elevel)
static void
bgworker_quickdie(SIGNAL_ARGS)
{
- sigaddset(&BlockSig, SIGQUIT); /* prevent nested calls */
+ sigaddset(&BlockSig, SIGQUIT); /* prevent nested calls */
PG_SETMASK(&BlockSig);
/*
@@ -504,7 +504,7 @@ bgworker_die(SIGNAL_ARGS)
static void
bgworker_sigusr1_handler(SIGNAL_ARGS)
{
- int save_errno = errno;
+ int save_errno = errno;
latch_sigusr1_handler();
@@ -581,7 +581,7 @@ StartBackgroundWorker(void)
pqsignal(SIGHUP, SIG_IGN);
pqsignal(SIGQUIT, bgworker_quickdie);
- InitializeTimeouts(); /* establishes SIGALRM handler */
+ InitializeTimeouts(); /* establishes SIGALRM handler */
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR2, SIG_IGN);
@@ -633,11 +633,11 @@ StartBackgroundWorker(void)
/*
* If bgw_main is set, we use that value as the initial entrypoint.
* However, if the library containing the entrypoint wasn't loaded at
- * postmaster startup time, passing it as a direct function pointer is
- * not possible. To work around that, we allow callers for whom a
- * function pointer is not available to pass a library name (which will
- * be loaded, if necessary) and a function name (which will be looked up
- * in the named library).
+ * postmaster startup time, passing it as a direct function pointer is not
+ * possible. To work around that, we allow callers for whom a function
+ * pointer is not available to pass a library name (which will be loaded,
+ * if necessary) and a function name (which will be looked up in the named
+ * library).
*/
if (worker->bgw_main != NULL)
entrypt = worker->bgw_main;
@@ -677,7 +677,7 @@ RegisterBackgroundWorker(BackgroundWorker *worker)
if (!IsUnderPostmaster)
ereport(LOG,
- (errmsg("registering background worker \"%s\"", worker->bgw_name)));
+ (errmsg("registering background worker \"%s\"", worker->bgw_name)));
if (!process_shared_preload_libraries_in_progress)
{
@@ -697,7 +697,7 @@ RegisterBackgroundWorker(BackgroundWorker *worker)
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("background worker \"%s\": only dynamic background workers can request notification",
- worker->bgw_name)));
+ worker->bgw_name)));
return;
}
@@ -756,17 +756,17 @@ bool
RegisterDynamicBackgroundWorker(BackgroundWorker *worker,
BackgroundWorkerHandle **handle)
{
- int slotno;
- bool success = false;
- uint64 generation = 0;
+ int slotno;
+ bool success = false;
+ uint64 generation = 0;
/*
- * We can't register dynamic background workers from the postmaster.
- * If this is a standalone backend, we're the only process and can't
- * start any more. In a multi-process environement, it might be
- * theoretically possible, but we don't currently support it due to
- * locking considerations; see comments on the BackgroundWorkerSlot
- * data structure.
+ * We can't register dynamic background workers from the postmaster. If
+ * this is a standalone backend, we're the only process and can't start
+ * any more. In a multi-process environement, it might be theoretically
+ * possible, but we don't currently support it due to locking
+ * considerations; see comments on the BackgroundWorkerSlot data
+ * structure.
*/
if (!IsUnderPostmaster)
return false;
@@ -792,8 +792,8 @@ RegisterDynamicBackgroundWorker(BackgroundWorker *worker,
generation = slot->generation;
/*
- * Make sure postmaster doesn't see the slot as in use before
- * it sees the new contents.
+ * Make sure postmaster doesn't see the slot as in use before it
+ * sees the new contents.
*/
pg_write_barrier();
@@ -839,16 +839,16 @@ BgwHandleStatus
GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
{
BackgroundWorkerSlot *slot;
- pid_t pid;
+ pid_t pid;
Assert(handle->slot < max_worker_processes);
slot = &BackgroundWorkerData->slot[handle->slot];
/*
- * We could probably arrange to synchronize access to data using
- * memory barriers only, but for now, let's just keep it simple and
- * grab the lock. It seems unlikely that there will be enough traffic
- * here to result in meaningful contention.
+ * We could probably arrange to synchronize access to data using memory
+ * barriers only, but for now, let's just keep it simple and grab the
+ * lock. It seems unlikely that there will be enough traffic here to
+ * result in meaningful contention.
*/
LWLockAcquire(BackgroundWorkerLock, LW_SHARED);
@@ -887,9 +887,9 @@ GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
BgwHandleStatus
WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *handle, pid_t *pidp)
{
- BgwHandleStatus status;
- int rc;
- bool save_set_latch_on_sigusr1;
+ BgwHandleStatus status;
+ int rc;
+ bool save_set_latch_on_sigusr1;
save_set_latch_on_sigusr1 = set_latch_on_sigusr1;
set_latch_on_sigusr1 = true;
@@ -898,7 +898,7 @@ WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *handle, pid_t *pidp)
{
for (;;)
{
- pid_t pid;
+ pid_t pid;
CHECK_FOR_INTERRUPTS();
@@ -942,7 +942,7 @@ void
TerminateBackgroundWorker(BackgroundWorkerHandle *handle)
{
BackgroundWorkerSlot *slot;
- bool signal_postmaster = false;
+ bool signal_postmaster = false;
Assert(handle->slot < max_worker_processes);
slot = &BackgroundWorkerData->slot[handle->slot];
diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index 1ec66c221f..780ee3bdcb 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -2,11 +2,11 @@
*
* bgwriter.c
*
- * The background writer (bgwriter) is new as of Postgres 8.0. It attempts
+ * The background writer (bgwriter) is new as of Postgres 8.0. It attempts
* to keep regular backends from having to write out dirty shared buffers
* (which they would only do when needing to free a shared buffer to read in
* another page). In the best scenario all writes from shared buffers will
- * be issued by the background writer process. However, regular backends are
+ * be issued by the background writer process. However, regular backends are
* still empowered to issue writes if the bgwriter fails to maintain enough
* clean shared buffers.
*
@@ -115,7 +115,7 @@ BackgroundWriterMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (bgwriter probably never has any
+ * can signal any child processes too. (bgwriter probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -194,7 +194,7 @@ BackgroundWriterMain(void)
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about in bgwriter, but we do have LWLocks, buffers, and temp files.
*/
LWLockReleaseAll();
@@ -291,18 +291,18 @@ BackgroundWriterMain(void)
if (FirstCallSinceLastCheckpoint())
{
/*
- * After any checkpoint, close all smgr files. This is so we
+ * After any checkpoint, close all smgr files. This is so we
* won't hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
}
/*
- * Log a new xl_running_xacts every now and then so replication can get
- * into a consistent state faster (think of suboverflowed snapshots)
- * and clean up resources (locks, KnownXids*) more frequently. The
- * costs of this are relatively low, so doing it 4 times
- * (LOG_SNAPSHOT_INTERVAL_MS) a minute seems fine.
+ * Log a new xl_running_xacts every now and then so replication can
+ * get into a consistent state faster (think of suboverflowed
+ * snapshots) and clean up resources (locks, KnownXids*) more
+ * frequently. The costs of this are relatively low, so doing it 4
+ * times (LOG_SNAPSHOT_INTERVAL_MS) a minute seems fine.
*
* We assume the interval for writing xl_running_xacts is
* significantly bigger than BgWriterDelay, so we don't complicate the
@@ -314,20 +314,21 @@ BackgroundWriterMain(void)
* we've logged a running xacts.
*
* We do this logging in the bgwriter as its the only process thats
- * run regularly and returns to its mainloop all the
- * time. E.g. Checkpointer, when active, is barely ever in its
- * mainloop and thus makes it hard to log regularly.
+ * run regularly and returns to its mainloop all the time. E.g.
+ * Checkpointer, when active, is barely ever in its mainloop and thus
+ * makes it hard to log regularly.
*/
if (XLogStandbyInfoActive() && !RecoveryInProgress())
{
TimestampTz timeout = 0;
TimestampTz now = GetCurrentTimestamp();
+
timeout = TimestampTzPlusMilliseconds(last_snapshot_ts,
LOG_SNAPSHOT_INTERVAL_MS);
/*
- * only log if enough time has passed and some xlog record has been
- * inserted.
+ * only log if enough time has passed and some xlog record has
+ * been inserted.
*/
if (now >= timeout &&
last_snapshot_lsn != GetXLogInsertRecPtr())
@@ -366,7 +367,7 @@ BackgroundWriterMain(void)
* and the time we call StrategyNotifyBgWriter. While it's not
* critical that we not hibernate anyway, we try to reduce the odds of
* that by only hibernating when BgBufferSync says nothing's happening
- * for two consecutive cycles. Also, we mitigate any possible
+ * for two consecutive cycles. Also, we mitigate any possible
* consequences of a missed wakeup by not hibernating forever.
*/
if (rc == WL_TIMEOUT && can_hibernate && prev_hibernate)
@@ -420,7 +421,7 @@ bg_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index e544c1f6d2..2ac3061d97 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -2,7 +2,7 @@
*
* checkpointer.c
*
- * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
+ * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
* Checkpoints are automatically dispatched after a certain amount of time has
* elapsed since the last one, and it can be signaled to perform requested
* checkpoints as well. (The GUC parameter that mandates a checkpoint every
@@ -14,7 +14,7 @@
* subprocess finishes, or as soon as recovery begins if we are doing archive
* recovery. It remains alive until the postmaster commands it to terminate.
* Normal termination is by SIGUSR2, which instructs the checkpointer to
- * execute a shutdown checkpoint and then exit(0). (All backends must be
+ * execute a shutdown checkpoint and then exit(0). (All backends must be
* stopped before SIGUSR2 is issued!) Emergency termination is by SIGQUIT;
* like any backend, the checkpointer will simply abort and exit on SIGQUIT.
*
@@ -198,7 +198,7 @@ CheckpointerMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (checkpointer probably never has
+ * can signal any child processes too. (checkpointer probably never has
* any child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -211,7 +211,7 @@ CheckpointerMain(void)
* Properly accept or ignore signals the postmaster might send us
*
* Note: we deliberately ignore SIGTERM, because during a standard Unix
- * system shutdown cycle, init will SIGTERM all processes at once. We
+ * system shutdown cycle, init will SIGTERM all processes at once. We
* want to wait for the backends to exit, whereupon the postmaster will
* tell us it's okay to shut down (via SIGUSR2).
*/
@@ -279,7 +279,7 @@ CheckpointerMain(void)
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about in checkpointer, but we do have LWLocks, buffers, and temp
* files.
*/
@@ -506,7 +506,7 @@ CheckpointerMain(void)
ckpt_performed = CreateRestartPoint(flags);
/*
- * After any checkpoint, close all smgr files. This is so we
+ * After any checkpoint, close all smgr files. This is so we
* won't hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
@@ -639,7 +639,7 @@ CheckArchiveTimeout(void)
}
/*
- * Returns true if an immediate checkpoint request is pending. (Note that
+ * Returns true if an immediate checkpoint request is pending. (Note that
* this does not check the *current* checkpoint's IMMEDIATE flag, but whether
* there is one pending behind it.)
*/
@@ -826,7 +826,7 @@ chkpt_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
@@ -977,7 +977,7 @@ RequestCheckpoint(int flags)
CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE);
/*
- * After any checkpoint, close all smgr files. This is so we won't
+ * After any checkpoint, close all smgr files. This is so we won't
* hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
@@ -1108,7 +1108,7 @@ RequestCheckpoint(int flags)
* to the requests[] queue without checking for duplicates. The checkpointer
* will have to eliminate dups internally anyway. However, if we discover
* that the queue is full, we make a pass over the entire queue to compact
- * it. This is somewhat expensive, but the alternative is for the backend
+ * it. This is somewhat expensive, but the alternative is for the backend
* to perform its own fsync, which is far more expensive in practice. It
* is theoretically possible a backend fsync might still be necessary, if
* the queue is full and contains no duplicate entries. In that case, we
@@ -1134,7 +1134,7 @@ ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/*
* If the checkpointer isn't running or the request queue is full, the
- * backend will have to perform its own fsync request. But before forcing
+ * backend will have to perform its own fsync request. But before forcing
* that to happen, we can try to compact the request queue.
*/
if (CheckpointerShmem->checkpointer_pid == 0 ||
@@ -1178,7 +1178,7 @@ ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
* Although a full fsync request queue is not common, it can lead to severe
* performance problems when it does happen. So far, this situation has
* only been observed to occur when the system is under heavy write load,
- * and especially during the "sync" phase of a checkpoint. Without this
+ * and especially during the "sync" phase of a checkpoint. Without this
* logic, each backend begins doing an fsync for every block written, which
* gets very expensive and can slow down the whole system.
*
diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c
index 3e2acdd0f5..f6df2de870 100644
--- a/src/backend/postmaster/fork_process.c
+++ b/src/backend/postmaster/fork_process.c
@@ -101,7 +101,7 @@ fork_process(void)
#endif /* LINUX_OOM_SCORE_ADJ */
/*
- * Older Linux kernels have oom_adj not oom_score_adj. This works
+ * Older Linux kernels have oom_adj not oom_score_adj. This works
* similarly except with a different scale of adjustment values. If
* it's necessary to build Postgres to work with either API, you can
* define both LINUX_OOM_SCORE_ADJ and LINUX_OOM_ADJ.
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index 815316055a..6a5c5b0713 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -487,14 +487,20 @@ pgarch_ArchiverCopyLoop(void)
/* successful */
pgarch_archiveDone(xlog);
- /* Tell the collector about the WAL file that we successfully archived */
+ /*
+ * Tell the collector about the WAL file that we successfully
+ * archived
+ */
pgstat_send_archiver(xlog, false);
break; /* out of inner retry loop */
}
else
{
- /* Tell the collector about the WAL file that we failed to archive */
+ /*
+ * Tell the collector about the WAL file that we failed to
+ * archive
+ */
pgstat_send_archiver(xlog, true);
if (++failures >= NUM_ARCHIVE_RETRIES)
@@ -590,9 +596,9 @@ pgarch_archiveXlog(char *xlog)
{
/*
* If either the shell itself, or a called command, died on a signal,
- * abort the archiver. We do this because system() ignores SIGINT and
+ * abort the archiver. We do this because system() ignores SIGINT and
* SIGQUIT while waiting; so a signal is very likely something that
- * should have interrupted us too. If we overreact it's no big deal,
+ * should have interrupted us too. If we overreact it's no big deal,
* the postmaster will just start the archiver again.
*
* Per the Single Unix Spec, shells report exit status > 128 when a
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 479dfa7d3c..f86481665f 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -368,7 +368,7 @@ pgstat_init(void)
* On some platforms, pg_getaddrinfo_all() may return multiple addresses
* only one of which will actually work (eg, both IPv6 and IPv4 addresses
* when kernel will reject IPv6). Worse, the failure may occur at the
- * bind() or perhaps even connect() stage. So we must loop through the
+ * bind() or perhaps even connect() stage. So we must loop through the
* results till we find a working combination. We will generate LOG
* messages, but no error, for bogus combinations.
*/
@@ -616,7 +616,7 @@ pgstat_reset_remove_files(const char *directory)
/*
* pgstat_reset_all() -
*
- * Remove the stats files. This is currently used only if WAL
+ * Remove the stats files. This is currently used only if WAL
* recovery is needed after a crash.
*/
void
@@ -677,7 +677,7 @@ pgstat_start(void)
/*
* Do nothing if too soon since last collector start. This is a safety
* valve to protect against continuous respawn attempts if the collector
- * is dying immediately at launch. Note that since we will be re-called
+ * is dying immediately at launch. Note that since we will be re-called
* from the postmaster main loop, we will get another chance later.
*/
curtime = time(NULL);
@@ -1122,7 +1122,7 @@ pgstat_vacuum_stat(void)
*
* Collect the OIDs of all objects listed in the specified system catalog
* into a temporary hash table. Caller should hash_destroy the result
- * when done with it. (However, we make the table in CurrentMemoryContext
+ * when done with it. (However, we make the table in CurrentMemoryContext
* so that it will be freed properly in event of an error.)
* ----------
*/
@@ -1374,7 +1374,7 @@ pgstat_report_analyze(Relation rel,
* have counted such rows as live or dead respectively. Because we will
* report our counts of such rows at transaction end, we should subtract
* off these counts from what we send to the collector now, else they'll
- * be double-counted after commit. (This approach also ensures that the
+ * be double-counted after commit. (This approach also ensures that the
* collector ends up with the right numbers if we abort instead of
* committing.)
*/
@@ -1605,7 +1605,7 @@ pgstat_end_function_usage(PgStat_FunctionCallUsage *fcu, bool finalize)
/*
* Compute the new f_total_time as the total elapsed time added to the
- * pre-call value of f_total_time. This is necessary to avoid
+ * pre-call value of f_total_time. This is necessary to avoid
* double-counting any time taken by recursive calls of myself. (We do
* not need any similar kluge for self time, since that already excludes
* any recursive calls.)
@@ -2091,7 +2091,7 @@ AtPrepare_PgStat(void)
* Clean up after successful PREPARE.
*
* All we need do here is unlink the transaction stats state from the
- * nontransactional state. The nontransactional action counts will be
+ * nontransactional state. The nontransactional action counts will be
* reported to the stats collector immediately, while the effects on live
* and dead tuple counts are preserved in the 2PC state file.
*
@@ -2317,8 +2317,8 @@ pgstat_fetch_stat_beentry(int beid)
/* ----------
* pgstat_fetch_stat_local_beentry() -
*
- * Like pgstat_fetch_stat_beentry() but with locally computed addtions (like
- * xid and xmin values of the backend)
+ * Like pgstat_fetch_stat_beentry() but with locally computed addtions (like
+ * xid and xmin values of the backend)
*
* NB: caller is responsible for a check if the user is permitted to see
* this info (especially the querystring).
@@ -2670,7 +2670,7 @@ pgstat_report_activity(BackendState state, const char *cmd_str)
{
/*
* track_activities is disabled, but we last reported a
- * non-disabled state. As our final update, change the state and
+ * non-disabled state. As our final update, change the state and
* clear fields we will not be updating anymore.
*/
beentry->st_changecount++;
@@ -2895,12 +2895,12 @@ pgstat_read_current_status(void)
* pgstat_get_backend_current_activity() -
*
* Return a string representing the current activity of the backend with
- * the specified PID. This looks directly at the BackendStatusArray,
+ * the specified PID. This looks directly at the BackendStatusArray,
* and so will provide current information regardless of the age of our
* transaction's snapshot of the status array.
*
* It is the caller's responsibility to invoke this only for backends whose
- * state is expected to remain stable while the result is in use. The
+ * state is expected to remain stable while the result is in use. The
* only current use is in deadlock reporting, where we can expect that
* the target backend is blocked on a lock. (There are corner cases
* where the target's wait could get aborted while we are looking at it,
@@ -2968,7 +2968,7 @@ pgstat_get_backend_current_activity(int pid, bool checkUser)
* pgstat_get_crashed_backend_activity() -
*
* Return a string representing the current activity of the backend with
- * the specified PID. Like the function above, but reads shared memory with
+ * the specified PID. Like the function above, but reads shared memory with
* the expectation that it may be corrupt. On success, copy the string
* into the "buffer" argument and return that pointer. On failure,
* return NULL.
@@ -2977,7 +2977,7 @@ pgstat_get_backend_current_activity(int pid, bool checkUser)
* query that crashed a backend. In particular, no attempt is made to
* follow the correct concurrency protocol when accessing the
* BackendStatusArray. But that's OK, in the worst case we'll return a
- * corrupted message. We also must take care not to trip on ereport(ERROR).
+ * corrupted message. We also must take care not to trip on ereport(ERROR).
* ----------
*/
const char *
@@ -3097,7 +3097,7 @@ pgstat_send(void *msg, int len)
void
pgstat_send_archiver(const char *xlog, bool failed)
{
- PgStat_MsgArchiver msg;
+ PgStat_MsgArchiver msg;
/*
* Prepare and send the message
@@ -3145,7 +3145,7 @@ pgstat_send_bgwriter(void)
/* ----------
* PgstatCollectorMain() -
*
- * Start up the statistics collector process. This is the body of the
+ * Start up the statistics collector process. This is the body of the
* postmaster child process.
*
* The argc/argv parameters are valid only in EXEC_BACKEND case.
@@ -3166,7 +3166,7 @@ PgstatCollectorMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (pgstat probably never has any
+ * can signal any child processes too. (pgstat probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -3395,7 +3395,7 @@ PgstatCollectorMain(int argc, char *argv[])
/*
* Windows, at least in its Windows Server 2003 R2 incarnation,
- * sometimes loses FD_READ events. Waking up and retrying the recv()
+ * sometimes loses FD_READ events. Waking up and retrying the recv()
* fixes that, so don't sleep indefinitely. This is a crock of the
* first water, but until somebody wants to debug exactly what's
* happening there, this is the best we can do. The two-second
@@ -3912,8 +3912,8 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
/*
- * Clear out global and archiver statistics so they start from zero
- * in case we can't load an existing statsfile.
+ * Clear out global and archiver statistics so they start from zero in
+ * case we can't load an existing statsfile.
*/
memset(&globalStats, 0, sizeof(globalStats));
memset(&archiverStats, 0, sizeof(archiverStats));
@@ -4271,7 +4271,7 @@ pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent,
const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename;
/*
- * Try to open the stats file. As above, anything but ENOENT is worthy of
+ * Try to open the stats file. As above, anything but ENOENT is worthy of
* complaining about.
*/
if ((fpin = AllocateFile(statfile, PG_BINARY_R)) == NULL)
@@ -4419,7 +4419,7 @@ backend_read_statsfile(void)
*
* We don't recompute min_ts after sleeping, except in the
* unlikely case that cur_ts went backwards. So we might end up
- * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In
+ * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In
* practice that shouldn't happen, though, as long as the sleep
* time is less than PGSTAT_STAT_INTERVAL; and we don't want to
* tell the collector that our cutoff time is less than what we'd
@@ -4512,7 +4512,7 @@ pgstat_setup_memcxt(void)
/* ----------
* pgstat_clear_snapshot() -
*
- * Discard any data collected in the current transaction. Any subsequent
+ * Discard any data collected in the current transaction. Any subsequent
* request will cause new snapshots to be read.
*
* This is also invoked during transaction commit or abort to discard
@@ -4996,7 +4996,7 @@ pgstat_recv_archiver(PgStat_MsgArchiver *msg, int len)
/* Failed archival attempt */
++archiverStats.failed_count;
memcpy(archiverStats.last_failed_wal, msg->m_xlog,
- sizeof(archiverStats.last_failed_wal));
+ sizeof(archiverStats.last_failed_wal));
archiverStats.last_failed_timestamp = msg->m_timestamp;
}
else
@@ -5004,7 +5004,7 @@ pgstat_recv_archiver(PgStat_MsgArchiver *msg, int len)
/* Successful archival operation */
++archiverStats.archived_count;
memcpy(archiverStats.last_archived_wal, msg->m_xlog,
- sizeof(archiverStats.last_archived_wal));
+ sizeof(archiverStats.last_archived_wal));
archiverStats.last_archived_timestamp = msg->m_timestamp;
}
}
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index b573fd82b6..6d098874d9 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -2,7 +2,7 @@
*
* postmaster.c
* This program acts as a clearing house for requests to the
- * POSTGRES system. Frontend programs send a startup message
+ * POSTGRES system. Frontend programs send a startup message
* to the Postmaster and the postmaster uses the info in the
* message to setup a backend process.
*
@@ -15,7 +15,7 @@
* The postmaster process creates the shared memory and semaphore
* pools during startup, but as a rule does not touch them itself.
* In particular, it is not a member of the PGPROC array of backends
- * and so it cannot participate in lock-manager operations. Keeping
+ * and so it cannot participate in lock-manager operations. Keeping
* the postmaster away from shared memory operations makes it simpler
* and more reliable. The postmaster is almost always able to recover
* from crashes of individual backends by resetting shared memory;
@@ -144,7 +144,7 @@
* children we have and send them appropriate signals when necessary.
*
* "Special" children such as the startup, bgwriter and autovacuum launcher
- * tasks are not in this list. Autovacuum worker and walsender are in it.
+ * tasks are not in this list. Autovacuum worker and walsender are in it.
* Also, "dead_end" children are in it: these are children launched just for
* the purpose of sending a friendly rejection message to a would-be client.
* We must track them because they are attached to shared memory, but we know
@@ -161,13 +161,13 @@ typedef struct bkend
int child_slot; /* PMChildSlot for this backend, if any */
/*
- * Flavor of backend or auxiliary process. Note that BACKEND_TYPE_WALSND
+ * Flavor of backend or auxiliary process. Note that BACKEND_TYPE_WALSND
* backends initially announce themselves as BACKEND_TYPE_NORMAL, so if
* bkend_type is normal, you should check for a recent transition.
*/
int bkend_type;
bool dead_end; /* is it going to send an error and quit? */
- bool bgworker_notify; /* gets bgworker start/stop notifications */
+ bool bgworker_notify; /* gets bgworker start/stop notifications */
dlist_node elem; /* list link in BackendList */
} Backend;
@@ -212,10 +212,10 @@ static char ExtraOptions[MAXPGPATH];
/*
* These globals control the behavior of the postmaster in case some
- * backend dumps core. Normally, it kills all peers of the dead backend
+ * backend dumps core. Normally, it kills all peers of the dead backend
* and reinitializes shared memory. By specifying -s or -n, we can have
* the postmaster stop (rather than kill) peers and not reinitialize
- * shared data structures. (Reinit is currently dead code, though.)
+ * shared data structures. (Reinit is currently dead code, though.)
*/
static bool Reinit = true;
static int SendStop = false;
@@ -264,7 +264,7 @@ static bool RecoveryError = false; /* T if WAL recovery failed */
* state and the startup process is launched. The startup process begins by
* reading the control file and other preliminary initialization steps.
* In a normal startup, or after crash recovery, the startup process exits
- * with exit code 0 and we switch to PM_RUN state. However, archive recovery
+ * with exit code 0 and we switch to PM_RUN state. However, archive recovery
* is handled specially since it takes much longer and we would like to support
* hot standby during archive recovery.
*
@@ -273,7 +273,7 @@ static bool RecoveryError = false; /* T if WAL recovery failed */
* checkpointer are launched, while the startup process continues applying WAL.
* If Hot Standby is enabled, then, after reaching a consistent point in WAL
* redo, startup process signals us again, and we switch to PM_HOT_STANDBY
- * state and begin accepting connections to perform read-only queries. When
+ * state and begin accepting connections to perform read-only queries. When
* archive recovery is finished, the startup process exits with exit code 0
* and we switch to PM_RUN state.
*
@@ -456,7 +456,7 @@ typedef struct
VariableCache ShmemVariableCache;
Backend *ShmemBackendArray;
#ifndef HAVE_SPINLOCKS
- PGSemaphore SpinlockSemaArray;
+ PGSemaphore SpinlockSemaArray;
#endif
LWLockPadded *MainLWLockArray;
slock_t *ProcStructLock;
@@ -599,7 +599,7 @@ PostmasterMain(int argc, char *argv[])
opterr = 1;
/*
- * Parse command-line options. CAUTION: keep this in sync with
+ * Parse command-line options. CAUTION: keep this in sync with
* tcop/postgres.c (the option sets should not conflict) and with the
* common help() function in main/main.c.
*/
@@ -1093,6 +1093,7 @@ PostmasterMain(int argc, char *argv[])
InitPostmasterDeathWatchHandle();
#ifdef WIN32
+
/*
* Initialize I/O completion port used to deliver list of dead children.
*/
@@ -1157,8 +1158,8 @@ PostmasterMain(int argc, char *argv[])
if (!(Log_destination & LOG_DESTINATION_STDERR))
ereport(LOG,
(errmsg("ending log output to stderr"),
- errhint("Future log output will go to log destination \"%s\".",
- Log_destination_string)));
+ errhint("Future log output will go to log destination \"%s\".",
+ Log_destination_string)));
whereToSendOutput = DestNone;
@@ -1197,7 +1198,7 @@ PostmasterMain(int argc, char *argv[])
/*
- * Remove old temporary files. At this point there can be no other
+ * Remove old temporary files. At this point there can be no other
* Postgres processes running in this directory, so this should be safe.
*/
RemovePgTempFiles();
@@ -1427,11 +1428,11 @@ DetermineSleepTime(struct timeval * timeout)
if (HaveCrashedWorker)
{
- slist_mutable_iter siter;
+ slist_mutable_iter siter;
/*
* When there are crashed bgworkers, we sleep just long enough that
- * they are restarted when they request to be. Scan the list to
+ * they are restarted when they request to be. Scan the list to
* determine the minimum of all wakeup times according to most recent
* crash time and requested restart interval.
*/
@@ -1655,9 +1656,9 @@ ServerLoop(void)
/*
* If we already sent SIGQUIT to children and they are slow to shut
- * down, it's time to send them SIGKILL. This doesn't happen normally,
- * but under certain conditions backends can get stuck while shutting
- * down. This is a last measure to get them unwedged.
+ * down, it's time to send them SIGKILL. This doesn't happen
+ * normally, but under certain conditions backends can get stuck while
+ * shutting down. This is a last measure to get them unwedged.
*
* Note we also do this during recovery from a process crash.
*/
@@ -1671,8 +1672,8 @@ ServerLoop(void)
AbortStartTime = 0;
/*
- * Additionally, unless we're recovering from a process crash, it's
- * now the time for postmaster to abandon ship.
+ * Additionally, unless we're recovering from a process crash,
+ * it's now the time for postmaster to abandon ship.
*/
if (!FatalError)
ExitPostmaster(1);
@@ -1731,7 +1732,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
{
/*
* EOF after SSLdone probably means the client didn't like our
- * response to NEGOTIATE_SSL_CODE. That's not an error condition, so
+ * response to NEGOTIATE_SSL_CODE. That's not an error condition, so
* don't clutter the log with a complaint.
*/
if (!SSLdone)
@@ -1856,7 +1857,7 @@ retry1:
int32 offset = sizeof(ProtocolVersion);
/*
- * Scan packet body for name/option pairs. We can assume any string
+ * Scan packet body for name/option pairs. We can assume any string
* beginning within the packet body is null-terminated, thanks to
* zeroing extra byte above.
*/
@@ -1898,7 +1899,7 @@ retry1:
else if (!parse_bool(valptr, &am_walsender))
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"replication\""),
+ errmsg("invalid value for parameter \"replication\""),
errhint("Valid values are: false, 0, true, 1, database.")));
}
else
@@ -2291,7 +2292,7 @@ reset_shared(int port)
*
* Note: in each "cycle of life" we will normally assign the same IPC keys
* (if using SysV shmem and/or semas), since the port number is used to
- * determine IPC keys. This helps ensure that we will clean up dead IPC
+ * determine IPC keys. This helps ensure that we will clean up dead IPC
* objects if the postmaster crashes and is restarted.
*/
CreateSharedMemoryAndSemaphores(false, port);
@@ -2650,7 +2651,7 @@ reaper(SIGNAL_ARGS)
/*
* OK, we saw normal exit of the checkpointer after it's been
* told to shut down. We expect that it wrote a shutdown
- * checkpoint. (If for some reason it didn't, recovery will
+ * checkpoint. (If for some reason it didn't, recovery will
* occur on next postmaster start.)
*
* At this point we should have no normal backend children
@@ -2726,7 +2727,7 @@ reaper(SIGNAL_ARGS)
/*
* Was it the autovacuum launcher? Normal exit can be ignored; we'll
* start a new one at the next iteration of the postmaster's main
- * loop, if necessary. Any other exit condition is treated as a
+ * loop, if necessary. Any other exit condition is treated as a
* crash.
*/
if (pid == AutoVacPID)
@@ -2868,7 +2869,7 @@ CleanupBackgroundWorker(int pid,
if (!ReleasePostmasterChildSlot(rw->rw_child_slot))
{
/*
- * Uh-oh, the child failed to clean itself up. Treat as a crash
+ * Uh-oh, the child failed to clean itself up. Treat as a crash
* after all.
*/
rw->rw_crashed_at = GetCurrentTimestamp();
@@ -2884,6 +2885,7 @@ CleanupBackgroundWorker(int pid,
#ifdef EXEC_BACKEND
ShmemBackendArrayRemove(rw->rw_backend);
#endif
+
/*
* It's possible that this background worker started some OTHER
* background worker and asked to be notified when that worker
@@ -2897,7 +2899,7 @@ CleanupBackgroundWorker(int pid,
}
rw->rw_pid = 0;
rw->rw_child_slot = 0;
- ReportBackgroundWorkerPID(rw); /* report child death */
+ ReportBackgroundWorkerPID(rw); /* report child death */
LogChildExit(LOG, namebuf, pid, exitstatus);
@@ -2930,6 +2932,7 @@ CleanupBackend(int pid,
*/
#ifdef WIN32
+
/*
* On win32, also treat ERROR_WAIT_NO_CHILDREN (128) as nonfatal case,
* since that sometimes happens under load when the process fails to start
@@ -2961,7 +2964,7 @@ CleanupBackend(int pid,
if (!ReleasePostmasterChildSlot(bp->child_slot))
{
/*
- * Uh-oh, the child failed to clean itself up. Treat as a
+ * Uh-oh, the child failed to clean itself up. Treat as a
* crash after all.
*/
HandleChildCrash(pid, exitstatus, _("server process"));
@@ -2974,12 +2977,12 @@ CleanupBackend(int pid,
if (bp->bgworker_notify)
{
/*
- * This backend may have been slated to receive SIGUSR1
- * when some background worker started or stopped. Cancel
- * those notifications, as we don't want to signal PIDs that
- * are not PostgreSQL backends. This gets skipped in the
- * (probably very common) case where the backend has never
- * requested any such notifications.
+ * This backend may have been slated to receive SIGUSR1 when
+ * some background worker started or stopped. Cancel those
+ * notifications, as we don't want to signal PIDs that are not
+ * PostgreSQL backends. This gets skipped in the (probably
+ * very common) case where the backend has never requested any
+ * such notifications.
*/
BackgroundWorkerStopNotifications(bp->pid);
}
@@ -3006,10 +3009,11 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
bool take_action;
/*
- * We only log messages and send signals if this is the first process crash
- * and we're not doing an immediate shutdown; otherwise, we're only here to
- * update postmaster's idea of live processes. If we have already signalled
- * children, nonzero exit status is to be expected, so don't clutter log.
+ * We only log messages and send signals if this is the first process
+ * crash and we're not doing an immediate shutdown; otherwise, we're only
+ * here to update postmaster's idea of live processes. If we have already
+ * signalled children, nonzero exit status is to be expected, so don't
+ * clutter log.
*/
take_action = !FatalError && Shutdown != ImmediateShutdown;
@@ -3052,7 +3056,7 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
else
{
/*
- * This worker is still alive. Unless we did so already, tell it
+ * This worker is still alive. Unless we did so already, tell it
* to commit hara-kiri.
*
* SIGQUIT is the special signal that says exit without proc_exit
@@ -3366,13 +3370,13 @@ PostmasterStateMachine(void)
* PM_WAIT_BACKENDS state ends when we have no regular backends
* (including autovac workers), no bgworkers (including unconnected
* ones), and no walwriter, autovac launcher or bgwriter. If we are
- * doing crash recovery or an immediate shutdown then we expect
- * the checkpointer to exit as well, otherwise not. The archiver,
- * stats, and syslogger processes are disregarded since
- * they are not connected to shared memory; we also disregard
- * dead_end children here. Walsenders are also disregarded,
- * they will be terminated later after writing the checkpoint record,
- * like the archiver process.
+ * doing crash recovery or an immediate shutdown then we expect the
+ * checkpointer to exit as well, otherwise not. The archiver, stats,
+ * and syslogger processes are disregarded since they are not
+ * connected to shared memory; we also disregard dead_end children
+ * here. Walsenders are also disregarded, they will be terminated
+ * later after writing the checkpoint record, like the archiver
+ * process.
*/
if (CountChildren(BACKEND_TYPE_NORMAL | BACKEND_TYPE_WORKER) == 0 &&
CountUnconnectedWorkers() == 0 &&
@@ -3387,7 +3391,7 @@ PostmasterStateMachine(void)
if (Shutdown >= ImmediateShutdown || FatalError)
{
/*
- * Start waiting for dead_end children to die. This state
+ * Start waiting for dead_end children to die. This state
* change causes ServerLoop to stop creating new ones.
*/
pmState = PM_WAIT_DEAD_END;
@@ -3487,7 +3491,7 @@ PostmasterStateMachine(void)
/*
* If we've been told to shut down, we exit as soon as there are no
- * remaining children. If there was a crash, cleanup will occur at the
+ * remaining children. If there was a crash, cleanup will occur at the
* next startup. (Before PostgreSQL 8.3, we tried to recover from the
* crash before exiting, but that seems unwise if we are quitting because
* we got SIGTERM from init --- there may well not be time for recovery
@@ -3565,7 +3569,7 @@ PostmasterStateMachine(void)
* system().
*
* There is a race condition for recently-forked children: they might not
- * have executed setsid() yet. So we signal the child directly as well as
+ * have executed setsid() yet. So we signal the child directly as well as
* the group. We assume such a child will handle the signal before trying
* to spawn any grandchild processes. We also assume that signaling the
* child twice will not cause any problems.
@@ -3817,7 +3821,7 @@ BackendStartup(Port *port)
/*
* Try to report backend fork() failure to client before we close the
- * connection. Since we do not care to risk blocking the postmaster on
+ * connection. Since we do not care to risk blocking the postmaster on
* this connection, we set the connection to non-blocking and try only once.
*
* This is grungy special-purpose code; we cannot use backend libpq since
@@ -3871,7 +3875,7 @@ BackendInitialize(Port *port)
/*
* PreAuthDelay is a debugging aid for investigating problems in the
* authentication cycle: it can be set in postgresql.conf to allow time to
- * attach to the newly-forked backend with a debugger. (See also
+ * attach to the newly-forked backend with a debugger. (See also
* PostAuthDelay, which we allow clients to pass through PGOPTIONS, but it
* is not honored until after authentication.)
*/
@@ -3898,7 +3902,7 @@ BackendInitialize(Port *port)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (We do this now on the off chance
+ * can signal any child processes too. (We do this now on the off chance
* that something might spawn a child process during authentication.)
*/
#ifdef HAVE_SETSID
@@ -3908,7 +3912,7 @@ BackendInitialize(Port *port)
/*
* We arrange for a simple exit(1) if we receive SIGTERM or SIGQUIT or
- * timeout while trying to collect the startup packet. Otherwise the
+ * timeout while trying to collect the startup packet. Otherwise the
* postmaster cannot shutdown the database FAST or IMMED cleanly if a
* buggy client fails to send the packet promptly.
*/
@@ -3995,7 +3999,7 @@ BackendInitialize(Port *port)
status = ProcessStartupPacket(port, false);
/*
- * Stop here if it was bad or a cancel packet. ProcessStartupPacket
+ * Stop here if it was bad or a cancel packet. ProcessStartupPacket
* already did any appropriate error reporting.
*/
if (status != STATUS_OK)
@@ -4546,7 +4550,7 @@ SubPostmasterMain(int argc, char *argv[])
read_nondefault_variables();
/*
- * Reload any libraries that were preloaded by the postmaster. Since we
+ * Reload any libraries that were preloaded by the postmaster. Since we
* exec'd this process, those libraries didn't come along with us; but we
* should load them into all child processes to be consistent with the
* non-EXEC_BACKEND behavior.
@@ -4599,7 +4603,7 @@ SubPostmasterMain(int argc, char *argv[])
*
* This prevents a randomized stack base address that causes child
* shared memory to be at a different address than the parent, making
- * it impossible to attached to shared memory. Return the value to
+ * it impossible to attached to shared memory. Return the value to
* '1' when finished.
*/
CreateSharedMemoryAndSemaphores(false, 0);
@@ -4719,7 +4723,7 @@ ExitPostmaster(int status)
/* should cleanup shared memory and kill all backends */
/*
- * Not sure of the semantics here. When the Postmaster dies, should the
+ * Not sure of the semantics here. When the Postmaster dies, should the
* backends all be killed? probably not.
*
* MUST -- vadim 05-10-1999
@@ -5028,7 +5032,7 @@ CountChildren(int target)
/*
* StartChildProcess -- start an auxiliary process for the postmaster
*
- * xlop determines what kind of child will be started. All child types
+ * xlop determines what kind of child will be started. All child types
* initially go to AuxiliaryProcessMain, which will handle common setup.
*
* Return value of StartChildProcess is subprocess' PID, or 0 if failed
@@ -5253,7 +5257,7 @@ CreateOptsFile(int argc, char *argv[], char *fullprogname)
* These arrays include regular backends, autovac workers, walsenders
* and background workers, but not special children nor dead_end children.
* This allows the arrays to have a fixed maximum size, to wit the same
- * too-many-children limit enforced by canAcceptConnections(). The exact value
+ * too-many-children limit enforced by canAcceptConnections(). The exact value
* isn't too critical as long as it's more than MaxBackends.
*/
int
@@ -5468,7 +5472,7 @@ assign_backendlist_entry(RegisteredBgWorker *rw)
static void
maybe_start_bgworker(void)
{
- slist_mutable_iter iter;
+ slist_mutable_iter iter;
TimestampTz now = 0;
if (FatalError)
@@ -5544,7 +5548,7 @@ maybe_start_bgworker(void)
else
rw->rw_child_slot = MyPMChildSlot = AssignPostmasterChildSlot();
- do_start_bgworker(rw); /* sets rw->rw_pid */
+ do_start_bgworker(rw); /* sets rw->rw_pid */
if (rw->rw_backend)
{
@@ -5955,7 +5959,7 @@ ShmemBackendArrayRemove(Backend *bn)
#ifdef WIN32
/*
- * Subset implementation of waitpid() for Windows. We assume pid is -1
+ * Subset implementation of waitpid() for Windows. We assume pid is -1
* (that is, check all child processes) and options is WNOHANG (don't wait).
*/
static pid_t
diff --git a/src/backend/postmaster/startup.c b/src/backend/postmaster/startup.c
index 5673c8c20f..a116d029f2 100644
--- a/src/backend/postmaster/startup.c
+++ b/src/backend/postmaster/startup.c
@@ -81,7 +81,7 @@ startupproc_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index 4731ab73fe..f89a5339e0 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -67,7 +67,7 @@
/*
- * GUC parameters. Logging_collector cannot be changed after postmaster
+ * GUC parameters. Logging_collector cannot be changed after postmaster
* start, but the rest can change at SIGHUP.
*/
bool Logging_collector = false;
@@ -193,7 +193,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* If we restarted, our stderr is already redirected into our own input
* pipe. This is of course pretty useless, not to mention that it
- * interferes with detecting pipe EOF. Point stderr to /dev/null. This
+ * interferes with detecting pipe EOF. Point stderr to /dev/null. This
* assumes that all interesting messages generated in the syslogger will
* come through elog.c and will be sent to write_syslogger_file.
*/
@@ -203,7 +203,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* The closes might look redundant, but they are not: we want to be
- * darn sure the pipe gets closed even if the open failed. We can
+ * darn sure the pipe gets closed even if the open failed. We can
* survive running with stderr pointing nowhere, but we can't afford
* to have extra pipe input descriptors hanging around.
*
@@ -249,7 +249,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (syslogger probably never has any
+ * can signal any child processes too. (syslogger probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -419,7 +419,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* Calculate time till next time-based rotation, so that we don't
- * sleep longer than that. We assume the value of "now" obtained
+ * sleep longer than that. We assume the value of "now" obtained
* above is still close enough. Note we can't make this calculation
* until after calling logfile_rotate(), since it will advance
* next_rotation_time.
@@ -523,7 +523,7 @@ SysLoggerMain(int argc, char *argv[])
(errmsg("logger shutting down")));
/*
- * Normal exit from the syslogger is here. Note that we
+ * Normal exit from the syslogger is here. Note that we
* deliberately do not close syslogFile before exiting; this is to
* allow for the possibility of elog messages being generated
* inside proc_exit. Regular exit() will take care of flushing
@@ -652,8 +652,8 @@ SysLogger_Start(void)
*/
ereport(LOG,
(errmsg("redirecting log output to logging collector process"),
- errhint("Future log output will appear in directory \"%s\".",
- Log_directory)));
+ errhint("Future log output will appear in directory \"%s\".",
+ Log_directory)));
#ifndef WIN32
fflush(stdout);
@@ -670,6 +670,7 @@ SysLogger_Start(void)
close(syslogPipe[1]);
syslogPipe[1] = -1;
#else
+
/*
* open the pipe in binary mode and make sure stderr is binary
* after it's been dup'ed into, to avoid disturbing the pipe
@@ -1354,7 +1355,7 @@ set_next_rotation_time(void)
/*
* The requirements here are to choose the next time > now that is a
* "multiple" of the log rotation interval. "Multiple" can be interpreted
- * fairly loosely. In this version we align to log_timezone rather than
+ * fairly loosely. In this version we align to log_timezone rather than
* GMT.
*/
rotinterval = Log_RotationAge * SECS_PER_MINUTE; /* convert to seconds */
diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c
index f8b19c2aa8..0826f8874c 100644
--- a/src/backend/postmaster/walwriter.c
+++ b/src/backend/postmaster/walwriter.c
@@ -103,7 +103,7 @@ WalWriterMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (walwriter probably never has any
+ * can signal any child processes too. (walwriter probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -176,7 +176,7 @@ WalWriterMain(void)
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about in walwriter, but we do have LWLocks, and perhaps buffers?
*/
LWLockReleaseAll();
@@ -250,7 +250,7 @@ WalWriterMain(void)
int rc;
/*
- * Advertise whether we might hibernate in this cycle. We do this
+ * Advertise whether we might hibernate in this cycle. We do this
* before resetting the latch to ensure that any async commits will
* see the flag set if they might possibly need to wake us up, and
* that we won't miss any signal they send us. (If we discover work
@@ -341,7 +341,7 @@ wal_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/regex/regc_color.c b/src/backend/regex/regc_color.c
index e6aa899518..c495cee300 100644
--- a/src/backend/regex/regc_color.c
+++ b/src/backend/regex/regc_color.c
@@ -2,7 +2,7 @@
* colorings of characters
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_cvec.c b/src/backend/regex/regc_cvec.c
index 580a693161..921a7d7f92 100644
--- a/src/backend/regex/regc_cvec.c
+++ b/src/backend/regex/regc_cvec.c
@@ -2,7 +2,7 @@
* Utility functions for handling cvecs
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c
index c4095e98cb..6f2c0cb3eb 100644
--- a/src/backend/regex/regc_lex.c
+++ b/src/backend/regex/regc_lex.c
@@ -2,7 +2,7 @@
* lexical analyzer
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_locale.c b/src/backend/regex/regc_locale.c
index da59705344..e7bbb50ef4 100644
--- a/src/backend/regex/regc_locale.c
+++ b/src/backend/regex/regc_locale.c
@@ -30,7 +30,7 @@
*
* THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
+ * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
* IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
* NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
* MODIFICATIONS.
@@ -38,7 +38,7 @@
* GOVERNMENT USE: If you are acquiring this software on behalf of the
* U.S. government, the Government shall have only "Restricted Rights"
* in the software and related documentation as defined in the Federal
- * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
+ * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
* are acquiring the software on behalf of the Department of Defense, the
* software shall be classified as "Commercial Computer Software" and the
* Government shall have only "Restricted Rights" as defined in Clause
@@ -667,7 +667,7 @@ allcases(struct vars * v, /* context */
/*
* cmp - chr-substring compare
*
- * Backrefs need this. It should preferably be efficient.
+ * Backrefs need this. It should preferably be efficient.
* Note that it does not need to report anything except equal/unequal.
* Note also that the length is exact, and the comparison should not
* stop at embedded NULs!
diff --git a/src/backend/regex/regc_nfa.c b/src/backend/regex/regc_nfa.c
index f6dad013b5..3487734a64 100644
--- a/src/backend/regex/regc_nfa.c
+++ b/src/backend/regex/regc_nfa.c
@@ -2,7 +2,7 @@
* NFA utilities.
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -1304,7 +1304,7 @@ fixempties(struct nfa * nfa,
}
/*
- * And remove any states that have become useless. (This cleanup is not
+ * And remove any states that have become useless. (This cleanup is not
* very thorough, and would be even less so if we tried to combine it with
* the previous step; but cleanup() will take care of anything we miss.)
*/
@@ -1372,7 +1372,7 @@ replaceempty(struct nfa * nfa,
* non-EMPTY out-arcs), we must keep it so, so always push forward in that
* case.
*
- * The fan-out/fan-in comparison should count only non-EMPTY arcs. If
+ * The fan-out/fan-in comparison should count only non-EMPTY arcs. If
* "from" is doomed, we can skip counting "to"'s arcs, since we want to
* force taking the copyins path in that case.
*/
diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c
index 425c278de4..6b2e38e165 100644
--- a/src/backend/regex/regc_pg_locale.c
+++ b/src/backend/regex/regc_pg_locale.c
@@ -24,7 +24,7 @@
* several implementation strategies depending on the situation:
*
* 1. In C/POSIX collations, we use hard-wired code. We can't depend on
- * the <ctype.h> functions since those will obey LC_CTYPE. Note that these
+ * the <ctype.h> functions since those will obey LC_CTYPE. Note that these
* collations don't give a fig about multibyte characters.
*
* 2. In the "default" collation (which is supposed to obey LC_CTYPE):
@@ -36,10 +36,10 @@
*
* 2b. In all other encodings, or on machines that lack <wctype.h>, we use
* the <ctype.h> functions for pg_wchar values up to 255, and punt for values
- * above that. This is only 100% correct in single-byte encodings such as
- * LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern
+ * above that. This is only 100% correct in single-byte encodings such as
+ * LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern
* character sets for which the properties being tested here aren't very
- * relevant for higher code values anyway. The difficulty with using the
+ * relevant for higher code values anyway. The difficulty with using the
* <wctype.h> functions with non-Unicode multibyte encodings is that we can
* have no certainty that the platform's wchar_t representation matches
* what we do in pg_wchar conversions.
@@ -730,7 +730,7 @@ store_match(pg_ctype_cache *pcc, pg_wchar chr1, int nchrs)
/*
* Given a probe function (e.g., pg_wc_isalpha) get a struct cvec for all
- * chrs satisfying the probe function. The active collation is the one
+ * chrs satisfying the probe function. The active collation is the one
* previously set by pg_set_regex_collation. Return NULL if out of memory.
*
* Note that the result must not be freed or modified by caller.
@@ -777,7 +777,7 @@ pg_ctype_get_cache(pg_wc_probefunc probefunc)
* UTF8 go up to 0x7FF, which is a pretty arbitrary cutoff but we cannot
* extend it as far as we'd like (say, 0xFFFF, the end of the Basic
* Multilingual Plane) without creating significant performance issues due
- * to too many characters being fed through the colormap code. This will
+ * to too many characters being fed through the colormap code. This will
* need redesign to fix reasonably, but at least for the moment we have
* all common European languages covered. Otherwise (not C, not UTF8) go
* up to 255. These limits are interrelated with restrictions discussed
diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c
index d31d7f7b72..bfe6edd3e1 100644
--- a/src/backend/regex/regcomp.c
+++ b/src/backend/regex/regcomp.c
@@ -2,7 +2,7 @@
* re_*comp and friends - compile REs
* This file #includes several others (see the bottom).
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -564,7 +564,7 @@ makesearch(struct vars * v,
* constraints, often knowing when you were in the pre state tells you
* little; it's the next state(s) that are informative. But some of them
* may have other inarcs, i.e. it may be possible to make actual progress
- * and then return to one of them. We must de-optimize such cases,
+ * and then return to one of them. We must de-optimize such cases,
* splitting each such state into progress and no-progress states.
*/
@@ -610,7 +610,7 @@ makesearch(struct vars * v,
* parse - parse an RE
*
* This is actually just the top level, which parses a bunch of branches
- * tied together with '|'. They appear in the tree as the left children
+ * tied together with '|'. They appear in the tree as the left children
* of a chain of '|' subres.
*/
static struct subre *
@@ -1352,7 +1352,7 @@ bracket(struct vars * v,
/*
* cbracket - handle complemented bracket expression
* We do it by calling bracket() with dummy endpoints, and then complementing
- * the result. The alternative would be to invoke rainbow(), and then delete
+ * the result. The alternative would be to invoke rainbow(), and then delete
* arcs as the b.e. is seen... but that gets messy.
*/
static void
diff --git a/src/backend/regex/rege_dfa.c b/src/backend/regex/rege_dfa.c
index 7a7ba5b89c..d367a77e85 100644
--- a/src/backend/regex/rege_dfa.c
+++ b/src/backend/regex/rege_dfa.c
@@ -2,7 +2,7 @@
* DFA routines
* This file is #included by regexec.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regerror.c b/src/backend/regex/regerror.c
index 4b2573e625..f863ee7344 100644
--- a/src/backend/regex/regerror.c
+++ b/src/backend/regex/regerror.c
@@ -1,7 +1,7 @@
/*
* regerror - error-code expansion
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regexec.c b/src/backend/regex/regexec.c
index 2e976627f5..7f41437cb5 100644
--- a/src/backend/regex/regexec.c
+++ b/src/backend/regex/regexec.c
@@ -1,7 +1,7 @@
/*
* re_*exec and friends - match REs
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -955,7 +955,7 @@ citerdissect(struct vars * v,
}
/*
- * We need workspace to track the endpoints of each sub-match. Normally
+ * We need workspace to track the endpoints of each sub-match. Normally
* we consider only nonzero-length sub-matches, so there can be at most
* end-begin of them. However, if min is larger than that, we will also
* consider zero-length sub-matches in order to find enough matches.
@@ -984,8 +984,8 @@ citerdissect(struct vars * v,
/*
* Our strategy is to first find a set of sub-match endpoints that are
* valid according to the child node's DFA, and then recursively dissect
- * each sub-match to confirm validity. If any validity check fails,
- * backtrack the last sub-match and try again. And, when we next try for
+ * each sub-match to confirm validity. If any validity check fails,
+ * backtrack the last sub-match and try again. And, when we next try for
* a validity check, we need not recheck any successfully verified
* sub-matches that we didn't move the endpoints of. nverified remembers
* how many sub-matches are currently known okay.
@@ -1036,7 +1036,7 @@ citerdissect(struct vars * v,
/*
* We've identified a way to divide the string into k sub-matches that
- * works so far as the child DFA can tell. If k is an allowed number
+ * works so far as the child DFA can tell. If k is an allowed number
* of matches, start the slow part: recurse to verify each sub-match.
* We always have k <= max_matches, needn't check that.
*/
@@ -1140,7 +1140,7 @@ creviterdissect(struct vars * v,
}
/*
- * We need workspace to track the endpoints of each sub-match. Normally
+ * We need workspace to track the endpoints of each sub-match. Normally
* we consider only nonzero-length sub-matches, so there can be at most
* end-begin of them. However, if min is larger than that, we will also
* consider zero-length sub-matches in order to find enough matches.
@@ -1169,8 +1169,8 @@ creviterdissect(struct vars * v,
/*
* Our strategy is to first find a set of sub-match endpoints that are
* valid according to the child node's DFA, and then recursively dissect
- * each sub-match to confirm validity. If any validity check fails,
- * backtrack the last sub-match and try again. And, when we next try for
+ * each sub-match to confirm validity. If any validity check fails,
+ * backtrack the last sub-match and try again. And, when we next try for
* a validity check, we need not recheck any successfully verified
* sub-matches that we didn't move the endpoints of. nverified remembers
* how many sub-matches are currently known okay.
@@ -1223,7 +1223,7 @@ creviterdissect(struct vars * v,
/*
* We've identified a way to divide the string into k sub-matches that
- * works so far as the child DFA can tell. If k is an allowed number
+ * works so far as the child DFA can tell. If k is an allowed number
* of matches, start the slow part: recurse to verify each sub-match.
* We always have k <= max_matches, needn't check that.
*/
diff --git a/src/backend/regex/regfree.c b/src/backend/regex/regfree.c
index b291749bd1..ae17ae70eb 100644
--- a/src/backend/regex/regfree.c
+++ b/src/backend/regex/regfree.c
@@ -1,7 +1,7 @@
/*
* regfree - free an RE
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regprefix.c b/src/backend/regex/regprefix.c
index 3b205e22dc..9234b4c20a 100644
--- a/src/backend/regex/regprefix.c
+++ b/src/backend/regex/regprefix.c
@@ -38,7 +38,7 @@ static int findprefix(struct cnfa * cnfa, struct colormap * cm,
*
* This function does not analyze all complex cases (such as lookahead
* constraints) exactly. Therefore it is possible that some strings matching
- * the reported prefix or exact-match string do not satisfy the regex. But
+ * the reported prefix or exact-match string do not satisfy the regex. But
* it should never be the case that a string satisfying the regex does not
* match the reported prefix or exact-match string.
*/
@@ -150,7 +150,7 @@ findprefix(struct cnfa * cnfa,
* We could find a state with multiple out-arcs that are all labeled with
* the same singleton color; this comes from patterns like "^ab(cde|cxy)".
* In that case we add the chr "c" to the output string but then exit the
- * loop with nextst == -1. This leaves a little bit on the table: if the
+ * loop with nextst == -1. This leaves a little bit on the table: if the
* pattern is like "^ab(cde|cdy)", we won't notice that "d" could be added
* to the prefix. But chasing multiple parallel state chains doesn't seem
* worth the trouble.
@@ -201,14 +201,14 @@ findprefix(struct cnfa * cnfa,
/*
* Identify the color's sole member chr and add it to the prefix
- * string. In general the colormap data structure doesn't provide a
+ * string. In general the colormap data structure doesn't provide a
* way to find color member chrs, except by trying GETCOLOR() on each
* possible chr value, which won't do at all. However, for the cases
* we care about it should be sufficient to test the "firstchr" value,
* that is the first chr ever added to the color. There are cases
* where this might no longer be a member of the color (so we do need
* to test), but none of them are likely to arise for a character that
- * is a member of a common prefix. If we do hit such a corner case,
+ * is a member of a common prefix. If we do hit such a corner case,
* we just fall out without adding anything to the prefix string.
*/
c = cm->cd[thiscolor].firstchr;
diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c
index 42e66f2fed..a3bf5001ec 100644
--- a/src/backend/replication/basebackup.c
+++ b/src/backend/replication/basebackup.c
@@ -137,8 +137,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
SendXlogRecPtrResult(startptr, starttli);
/*
- * Calculate the relative path of temporary statistics directory
- * in order to skip the files which are located in that directory later.
+ * Calculate the relative path of temporary statistics directory in order
+ * to skip the files which are located in that directory later.
*/
if (is_absolute_path(pgstat_stat_directory) &&
strncmp(pgstat_stat_directory, DataDir, datadirpathlen) == 0)
@@ -231,8 +231,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
(int64) opt->maxrate * (int64) 1024 / THROTTLING_FREQUENCY;
/*
- * The minimum amount of time for throttling_sample
- * bytes to be transfered.
+ * The minimum amount of time for throttling_sample bytes to be
+ * transfered.
*/
elapsed_min_unit = USECS_PER_SEC / THROTTLING_FREQUENCY;
@@ -613,7 +613,7 @@ parse_basebackup_options(List *options, basebackup_options *opt)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("%d is outside the valid range for parameter \"%s\" (%d .. %d)",
- (int) maxrate, "MAX_RATE", MAX_RATE_LOWER, MAX_RATE_UPPER)));
+ (int) maxrate, "MAX_RATE", MAX_RATE_LOWER, MAX_RATE_UPPER)));
opt->maxrate = (uint32) maxrate;
o_maxrate = true;
@@ -841,7 +841,7 @@ sendFileWithContent(const char *filename, const char *content)
/*
* Include the tablespace directory pointed to by 'path' in the output tar
- * stream. If 'sizeonly' is true, we just calculate a total length and return
+ * stream. If 'sizeonly' is true, we just calculate a total length and return
* it, without actually sending anything.
*
* Only used to send auxiliary tablespaces, not PGDATA.
@@ -975,7 +975,7 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces)
* always created there.
*/
if ((statrelpath != NULL && strcmp(pathbuf, statrelpath) == 0) ||
- strncmp(de->d_name, PG_STAT_TMP_DIR, strlen(PG_STAT_TMP_DIR)) == 0)
+ strncmp(de->d_name, PG_STAT_TMP_DIR, strlen(PG_STAT_TMP_DIR)) == 0)
{
if (!sizeonly)
_tarWriteHeader(pathbuf + basepathlen + 1, NULL, &statbuf);
@@ -1270,14 +1270,14 @@ throttle(size_t increment)
* the maximum time to sleep. Thus the cast to long is safe.
*/
wait_result = WaitLatch(&MyWalSnd->latch,
- WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
+ WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
(long) (sleep / 1000));
}
else
{
/*
- * The actual transfer rate is below the limit. A negative value would
- * distort the adjustment of throttled_last.
+ * The actual transfer rate is below the limit. A negative value
+ * would distort the adjustment of throttled_last.
*/
wait_result = 0;
sleep = 0;
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index 88d27c7690..7bc761db8f 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -50,7 +50,7 @@ static void libpqrcv_connect(char *conninfo);
static void libpqrcv_identify_system(TimeLineID *primary_tli);
static void libpqrcv_readtimelinehistoryfile(TimeLineID tli, char **filename, char **content, int *len);
static bool libpqrcv_startstreaming(TimeLineID tli, XLogRecPtr startpoint,
- char *slotname);
+ char *slotname);
static void libpqrcv_endstreaming(TimeLineID *next_tli);
static int libpqrcv_receive(int timeout, char **buffer);
static void libpqrcv_send(const char *buffer, int nbytes);
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index 414cfa9558..7b6114a209 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -9,12 +9,12 @@
*
* NOTE:
* This basically tries to handle all low level xlog stuff for
- * reorderbuffer.c and snapbuild.c. There's some minor leakage where a
- * specific record's struct is used to pass data along, but those just
- * happen to contain the right amount of data in a convenient
- * format. There isn't and shouldn't be much intelligence about the
- * contents of records in here except turning them into a more usable
- * format.
+ * reorderbuffer.c and snapbuild.c. There's some minor leakage where a
+ * specific record's struct is used to pass data along, but those just
+ * happen to contain the right amount of data in a convenient
+ * format. There isn't and shouldn't be much intelligence about the
+ * contents of records in here except turning them into a more usable
+ * format.
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@@ -44,10 +44,10 @@
typedef struct XLogRecordBuffer
{
- XLogRecPtr origptr;
- XLogRecPtr endptr;
- XLogRecord record;
- char *record_data;
+ XLogRecPtr origptr;
+ XLogRecPtr endptr;
+ XLogRecord record;
+ char *record_data;
} XLogRecordBuffer;
/* RMGR Handlers */
@@ -63,10 +63,10 @@ static void DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
static void DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
static void DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
- TransactionId xid, Oid dboid,
- TimestampTz commit_time,
- int nsubxacts, TransactionId *sub_xids,
- int ninval_msgs, SharedInvalidationMessage *msg);
+ TransactionId xid, Oid dboid,
+ TimestampTz commit_time,
+ int nsubxacts, TransactionId *sub_xids,
+ int ninval_msgs, SharedInvalidationMessage *msg);
static void DecodeAbort(LogicalDecodingContext *ctx, XLogRecPtr lsn,
TransactionId xid, TransactionId *sub_xids, int nsubxacts);
@@ -91,10 +91,10 @@ LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogRecord *record)
/* cast so we get a warning when new rmgrs are added */
switch ((RmgrIds) buf.record.xl_rmid)
{
- /*
- * Rmgrs we care about for logical decoding. Add new rmgrs in
- * rmgrlist.h's order.
- */
+ /*
+ * Rmgrs we care about for logical decoding. Add new rmgrs in
+ * rmgrlist.h's order.
+ */
case RM_XLOG_ID:
DecodeXLogOp(ctx, &buf);
break;
@@ -115,11 +115,11 @@ LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogRecord *record)
DecodeHeapOp(ctx, &buf);
break;
- /*
- * Rmgrs irrelevant for logical decoding; they describe stuff not
- * represented in logical decoding. Add new rmgrs in rmgrlist.h's
- * order.
- */
+ /*
+ * Rmgrs irrelevant for logical decoding; they describe stuff not
+ * represented in logical decoding. Add new rmgrs in rmgrlist.h's
+ * order.
+ */
case RM_SMGR_ID:
case RM_CLOG_ID:
case RM_DBASE_ID:
@@ -149,13 +149,14 @@ DecodeXLogOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
switch (info)
{
- /* this is also used in END_OF_RECOVERY checkpoints */
+ /* this is also used in END_OF_RECOVERY checkpoints */
case XLOG_CHECKPOINT_SHUTDOWN:
case XLOG_END_OF_RECOVERY:
SnapBuildSerializationPoint(builder, buf->origptr);
break;
case XLOG_CHECKPOINT_ONLINE:
+
/*
* a RUNNING_XACTS record will have been logged near to this, we
* can restart from there.
@@ -181,9 +182,9 @@ DecodeXLogOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
static void
DecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
{
- SnapBuild *builder = ctx->snapshot_builder;
- ReorderBuffer *reorder = ctx->reorder;
- XLogRecord *r = &buf->record;
+ SnapBuild *builder = ctx->snapshot_builder;
+ ReorderBuffer *reorder = ctx->reorder;
+ XLogRecord *r = &buf->record;
uint8 info = r->xl_info & ~XLR_INFO_MASK;
/* no point in doing anything yet, data could not be decoded anyway */
@@ -280,7 +281,7 @@ DecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
int i;
TransactionId *sub_xid;
- xlrec = (xl_xact_assignment *) buf->record_data;
+ xlrec = (xl_xact_assignment *) buf->record_data;
sub_xid = &xlrec->xsub[0];
@@ -292,6 +293,7 @@ DecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
break;
}
case XLOG_XACT_PREPARE:
+
/*
* Currently decoding ignores PREPARE TRANSACTION and will just
* decode the transaction when the COMMIT PREPARED is sent or
@@ -321,7 +323,9 @@ DecodeStandbyOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
case XLOG_RUNNING_XACTS:
{
xl_running_xacts *running = (xl_running_xacts *) buf->record_data;
+
SnapBuildProcessRunningXacts(builder, buf->origptr, running);
+
/*
* Abort all transactions that we keep track of, that are
* older than the record's oldestRunningXid. This is the most
@@ -364,22 +368,25 @@ DecodeHeap2Op(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
case XLOG_HEAP2_NEW_CID:
{
xl_heap_new_cid *xlrec;
+
xlrec = (xl_heap_new_cid *) buf->record_data;
SnapBuildProcessNewCid(builder, xid, buf->origptr, xlrec);
break;
}
case XLOG_HEAP2_REWRITE:
+
/*
* Although these records only exist to serve the needs of logical
* decoding, all the work happens as part of crash or archive
* recovery, so we don't need to do anything here.
*/
break;
- /*
- * Everything else here is just low level physical stuff we're
- * not interested in.
- */
+
+ /*
+ * Everything else here is just low level physical stuff we're not
+ * interested in.
+ */
case XLOG_HEAP2_FREEZE_PAGE:
case XLOG_HEAP2_CLEAN:
case XLOG_HEAP2_CLEANUP_INFO:
@@ -429,6 +436,7 @@ DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
break;
case XLOG_HEAP_NEWPAGE:
+
/*
* This is only used in places like indexams and CLUSTER which
* don't contain changes relevant for logical replication.
@@ -436,6 +444,7 @@ DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
break;
case XLOG_HEAP_INPLACE:
+
/*
* Inplace updates are only ever performed on catalog tuples and
* can, per definition, not change tuple visibility. Since we
@@ -503,8 +512,8 @@ DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
* There basically two reasons we might not be interested in this
* transaction:
* 1) We might not be interested in decoding transactions up to this
- * LSN. This can happen because we previously decoded it and now just
- * are restarting or if we haven't assembled a consistent snapshot yet.
+ * LSN. This can happen because we previously decoded it and now just
+ * are restarting or if we haven't assembled a consistent snapshot yet.
* 2) The transaction happened in another database.
*
* We can't just use ReorderBufferAbort() here, because we need to execute
diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
index 1d08b50da3..438a3fb152 100644
--- a/src/backend/replication/logical/logical.c
+++ b/src/backend/replication/logical/logical.c
@@ -8,21 +8,21 @@
* src/backend/replication/logical/logical.c
*
* NOTES
- * This file coordinates interaction between the various modules that
- * together provide logical decoding, primarily by providing so
- * called LogicalDecodingContexts. The goal is to encapsulate most of the
- * internal complexity for consumers of logical decoding, so they can
- * create and consume a changestream with a low amount of code. Builtin
- * consumers are the walsender and SQL SRF interface, but it's possible to
- * add further ones without changing core code, e.g. to consume changes in
- * a bgworker.
+ * This file coordinates interaction between the various modules that
+ * together provide logical decoding, primarily by providing so
+ * called LogicalDecodingContexts. The goal is to encapsulate most of the
+ * internal complexity for consumers of logical decoding, so they can
+ * create and consume a changestream with a low amount of code. Builtin
+ * consumers are the walsender and SQL SRF interface, but it's possible to
+ * add further ones without changing core code, e.g. to consume changes in
+ * a bgworker.
*
- * The idea is that a consumer provides three callbacks, one to read WAL,
- * one to prepare a data write, and a final one for actually writing since
- * their implementation depends on the type of consumer. Check
- * logicalfuncs.c for an example implementation of a fairly simple consumer
- * and a implementation of a WAL reading callback that's suitable for
- * simple consumers.
+ * The idea is that a consumer provides three callbacks, one to read WAL,
+ * one to prepare a data write, and a final one for actually writing since
+ * their implementation depends on the type of consumer. Check
+ * logicalfuncs.c for an example implementation of a fairly simple consumer
+ * and a implementation of a WAL reading callback that's suitable for
+ * simple consumers.
*-------------------------------------------------------------------------
*/
@@ -56,13 +56,13 @@ typedef struct LogicalErrorCallbackState
/* wrappers around output plugin callbacks */
static void output_plugin_error_callback(void *arg);
static void startup_cb_wrapper(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
- bool is_init);
+ bool is_init);
static void shutdown_cb_wrapper(LogicalDecodingContext *ctx);
static void begin_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn);
static void commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
- XLogRecPtr commit_lsn);
+ XLogRecPtr commit_lsn);
static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
- Relation relation, ReorderBufferChange *change);
+ Relation relation, ReorderBufferChange *change);
static void LoadOutputPlugin(OutputPluginCallbacks *callbacks, char *plugin);
@@ -90,18 +90,18 @@ CheckLogicalDecodingRequirements(void)
*
* There's basically three things missing to allow this:
* 1) We need to be able to correctly and quickly identify the timeline a
- * LSN belongs to
+ * LSN belongs to
* 2) We need to force hot_standby_feedback to be enabled at all times so
- * the primary cannot remove rows we need.
+ * the primary cannot remove rows we need.
* 3) support dropping replication slots referring to a database, in
- * dbase_redo. There can't be any active ones due to HS recovery
- * conflicts, so that should be relatively easy.
+ * dbase_redo. There can't be any active ones due to HS recovery
+ * conflicts, so that should be relatively easy.
* ----
*/
if (RecoveryInProgress())
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("logical decoding cannot be used while in recovery")));
+ errmsg("logical decoding cannot be used while in recovery")));
}
/*
@@ -117,7 +117,8 @@ StartupDecodingContext(List *output_plugin_options,
LogicalOutputPluginWriterWrite do_write)
{
ReplicationSlot *slot;
- MemoryContext context, old_context;
+ MemoryContext context,
+ old_context;
LogicalDecodingContext *ctx;
/* shorter lines... */
@@ -133,7 +134,10 @@ StartupDecodingContext(List *output_plugin_options,
ctx->context = context;
- /* (re-)load output plugins, so we detect a bad (removed) output plugin now. */
+ /*
+ * (re-)load output plugins, so we detect a bad (removed) output plugin
+ * now.
+ */
LoadOutputPlugin(&ctx->callbacks, NameStr(slot->data.plugin));
/*
@@ -195,10 +199,10 @@ CreateInitDecodingContext(char *plugin,
LogicalOutputPluginWriterPrepareWrite prepare_write,
LogicalOutputPluginWriterWrite do_write)
{
- TransactionId xmin_horizon = InvalidTransactionId;
+ TransactionId xmin_horizon = InvalidTransactionId;
ReplicationSlot *slot;
LogicalDecodingContext *ctx;
- MemoryContext old_context;
+ MemoryContext old_context;
/* shorter lines... */
slot = MyReplicationSlot;
@@ -219,8 +223,8 @@ CreateInitDecodingContext(char *plugin,
if (slot->data.database != MyDatabaseId)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("replication slot \"%s\" was not created in this database",
- NameStr(slot->data.name))));
+ errmsg("replication slot \"%s\" was not created in this database",
+ NameStr(slot->data.name))));
if (IsTransactionState() &&
GetTopTransactionIdIfAny() != InvalidTransactionId)
@@ -252,9 +256,9 @@ CreateInitDecodingContext(char *plugin,
*/
if (!RecoveryInProgress())
{
- XLogRecPtr flushptr;
+ XLogRecPtr flushptr;
- /* start at current insert position*/
+ /* start at current insert position */
slot->data.restart_lsn = GetXLogInsertRecPtr();
/* make sure we have enough information to start */
@@ -307,8 +311,8 @@ CreateInitDecodingContext(char *plugin,
LWLockRelease(ProcArrayLock);
/*
- * tell the snapshot builder to only assemble snapshot once reaching
- * the a running_xact's record with the respective xmin.
+ * tell the snapshot builder to only assemble snapshot once reaching the a
+ * running_xact's record with the respective xmin.
*/
xmin_horizon = slot->data.catalog_xmin;
@@ -316,7 +320,7 @@ CreateInitDecodingContext(char *plugin,
ReplicationSlotSave();
ctx = StartupDecodingContext(NIL, InvalidXLogRecPtr, xmin_horizon,
- read_page, prepare_write, do_write);
+ read_page, prepare_write, do_write);
/* call output plugin initialization callback */
old_context = MemoryContextSwitchTo(ctx->context);
@@ -352,7 +356,7 @@ CreateDecodingContext(XLogRecPtr start_lsn,
{
LogicalDecodingContext *ctx;
ReplicationSlot *slot;
- MemoryContext old_context;
+ MemoryContext old_context;
/* shorter lines... */
slot = MyReplicationSlot;
@@ -370,8 +374,8 @@ CreateDecodingContext(XLogRecPtr start_lsn,
if (slot->data.database != MyDatabaseId)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- (errmsg("replication slot \"%s\" was not created in this database",
- NameStr(slot->data.name)))));
+ (errmsg("replication slot \"%s\" was not created in this database",
+ NameStr(slot->data.name)))));
if (start_lsn == InvalidXLogRecPtr)
{
@@ -385,14 +389,14 @@ CreateDecodingContext(XLogRecPtr start_lsn,
* pretty common for a client to acknowledge a LSN it doesn't have to
* do anything for, and thus didn't store persistently, because the
* xlog records didn't result in anything relevant for logical
- * decoding. Clients have to be able to do that to support
- * synchronous replication.
+ * decoding. Clients have to be able to do that to support synchronous
+ * replication.
*/
start_lsn = slot->data.confirmed_flush;
elog(DEBUG1, "cannot stream from %X/%X, minimum is %X/%X, forwarding",
- (uint32)(start_lsn >> 32), (uint32)start_lsn,
- (uint32)(slot->data.confirmed_flush >> 32),
- (uint32)slot->data.confirmed_flush);
+ (uint32) (start_lsn >> 32), (uint32) start_lsn,
+ (uint32) (slot->data.confirmed_flush >> 32),
+ (uint32) slot->data.confirmed_flush);
}
ctx = StartupDecodingContext(output_plugin_options,
@@ -409,10 +413,10 @@ CreateDecodingContext(XLogRecPtr start_lsn,
(errmsg("starting logical decoding for slot %s",
NameStr(slot->data.name)),
errdetail("streaming transactions committing after %X/%X, reading WAL from %X/%X",
- (uint32)(slot->data.confirmed_flush >> 32),
- (uint32)slot->data.confirmed_flush,
- (uint32)(slot->data.restart_lsn >> 32),
- (uint32)slot->data.restart_lsn)));
+ (uint32) (slot->data.confirmed_flush >> 32),
+ (uint32) slot->data.confirmed_flush,
+ (uint32) (slot->data.restart_lsn >> 32),
+ (uint32) slot->data.restart_lsn)));
return ctx;
}
@@ -438,8 +442,8 @@ DecodingContextFindStartpoint(LogicalDecodingContext *ctx)
startptr = ctx->slot->data.restart_lsn;
elog(DEBUG1, "searching for logical decoding starting point, starting at %X/%X",
- (uint32)(ctx->slot->data.restart_lsn >> 32),
- (uint32)ctx->slot->data.restart_lsn);
+ (uint32) (ctx->slot->data.restart_lsn >> 32),
+ (uint32) ctx->slot->data.restart_lsn);
/* Wait for a consistent starting point */
for (;;)
@@ -543,14 +547,15 @@ static void
output_plugin_error_callback(void *arg)
{
LogicalErrorCallbackState *state = (LogicalErrorCallbackState *) arg;
+
/* not all callbacks have an associated LSN */
if (state->report_location != InvalidXLogRecPtr)
errcontext("slot \"%s\", output plugin \"%s\", in the %s callback, associated LSN %X/%X",
NameStr(state->ctx->slot->data.name),
NameStr(state->ctx->slot->data.plugin),
state->callback_name,
- (uint32)(state->report_location >> 32),
- (uint32)state->report_location);
+ (uint32) (state->report_location >> 32),
+ (uint32) state->report_location);
else
errcontext("slot \"%s\", output plugin \"%s\", in the %s callback",
NameStr(state->ctx->slot->data.name),
@@ -643,7 +648,7 @@ begin_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn)
static void
commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
- XLogRecPtr commit_lsn)
+ XLogRecPtr commit_lsn)
{
LogicalDecodingContext *ctx = cache->private_data;
LogicalErrorCallbackState state;
@@ -652,7 +657,7 @@ commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
/* Push callback + info on the error context stack */
state.ctx = ctx;
state.callback_name = "commit";
- state.report_location = txn->final_lsn; /* beginning of commit record */
+ state.report_location = txn->final_lsn; /* beginning of commit record */
errcallback.callback = output_plugin_error_callback;
errcallback.arg = (void *) &state;
errcallback.previous = error_context_stack;
@@ -672,7 +677,7 @@ commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
static void
change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
- Relation relation, ReorderBufferChange *change)
+ Relation relation, ReorderBufferChange *change)
{
LogicalDecodingContext *ctx = cache->private_data;
LogicalErrorCallbackState state;
@@ -690,6 +695,7 @@ change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
/* set output state */
ctx->accept_writes = true;
ctx->write_xid = txn->xid;
+
/*
* report this change's lsn so replies from clients can give an up2date
* answer. This won't ever be enough (and shouldn't be!) to confirm
@@ -715,7 +721,7 @@ change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
void
LogicalIncreaseXminForSlot(XLogRecPtr current_lsn, TransactionId xmin)
{
- bool updated_xmin = false;
+ bool updated_xmin = false;
ReplicationSlot *slot;
slot = MyReplicationSlot;
@@ -725,16 +731,17 @@ LogicalIncreaseXminForSlot(XLogRecPtr current_lsn, TransactionId xmin)
SpinLockAcquire(&slot->mutex);
/*
- * don't overwrite if we already have a newer xmin. This can
- * happen if we restart decoding in a slot.
+ * don't overwrite if we already have a newer xmin. This can happen if we
+ * restart decoding in a slot.
*/
if (TransactionIdPrecedesOrEquals(xmin, slot->data.catalog_xmin))
{
}
+
/*
- * If the client has already confirmed up to this lsn, we directly
- * can mark this as accepted. This can happen if we restart
- * decoding in a slot.
+ * If the client has already confirmed up to this lsn, we directly can
+ * mark this as accepted. This can happen if we restart decoding in a
+ * slot.
*/
else if (current_lsn <= slot->data.confirmed_flush)
{
@@ -744,6 +751,7 @@ LogicalIncreaseXminForSlot(XLogRecPtr current_lsn, TransactionId xmin)
/* our candidate can directly be used */
updated_xmin = true;
}
+
/*
* Only increase if the previous values have been applied, otherwise we
* might never end up updating if the receiver acks too slowly.
@@ -770,7 +778,7 @@ LogicalIncreaseXminForSlot(XLogRecPtr current_lsn, TransactionId xmin)
void
LogicalIncreaseRestartDecodingForSlot(XLogRecPtr current_lsn, XLogRecPtr restart_lsn)
{
- bool updated_lsn = false;
+ bool updated_lsn = false;
ReplicationSlot *slot;
slot = MyReplicationSlot;
@@ -781,13 +789,14 @@ LogicalIncreaseRestartDecodingForSlot(XLogRecPtr current_lsn, XLogRecPtr restart
SpinLockAcquire(&slot->mutex);
- /* don't overwrite if have a newer restart lsn*/
+ /* don't overwrite if have a newer restart lsn */
if (restart_lsn <= slot->data.restart_lsn)
{
}
+
/*
- * We might have already flushed far enough to directly accept this lsn, in
- * this case there is no need to check for existing candidate LSNs
+ * We might have already flushed far enough to directly accept this lsn,
+ * in this case there is no need to check for existing candidate LSNs
*/
else if (current_lsn <= slot->data.confirmed_flush)
{
@@ -797,6 +806,7 @@ LogicalIncreaseRestartDecodingForSlot(XLogRecPtr current_lsn, XLogRecPtr restart
/* our candidate can directly be used */
updated_lsn = true;
}
+
/*
* Only increase if the previous values have been applied, otherwise we
* might never end up updating if the receiver acks too slowly. A missed
@@ -896,6 +906,7 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
ReplicationSlotSave();
elog(DEBUG1, "updated xmin: %u restart: %u", updated_xmin, updated_restart);
}
+
/*
* Now the new xmin is safely on disk, we can let the global value
* advance. We do not take ProcArrayLock or similar since we only
diff --git a/src/backend/replication/logical/logicalfuncs.c b/src/backend/replication/logical/logicalfuncs.c
index 5fa1848001..2da6bb10b2 100644
--- a/src/backend/replication/logical/logicalfuncs.c
+++ b/src/backend/replication/logical/logicalfuncs.c
@@ -42,11 +42,12 @@
#include "storage/fd.h"
/* private date for writing out data */
-typedef struct DecodingOutputState {
+typedef struct DecodingOutputState
+{
Tuplestorestate *tupstore;
- TupleDesc tupdesc;
- bool binary_output;
- int64 returned_rows;
+ TupleDesc tupdesc;
+ bool binary_output;
+ int64 returned_rows;
} DecodingOutputState;
/*
@@ -91,7 +92,7 @@ LogicalOutputWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xi
/* ick, but cstring_to_text_with_len works for bytea perfectly fine */
values[2] = PointerGetDatum(
- cstring_to_text_with_len(ctx->out->data, ctx->out->len));
+ cstring_to_text_with_len(ctx->out->data, ctx->out->len));
tuplestore_putvalues(p->tupstore, p->tupdesc, values, nulls);
p->returned_rows++;
@@ -412,7 +413,7 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin
InvalidateSystemCaches();
while ((startptr != InvalidXLogRecPtr && startptr < end_of_wal) ||
- (ctx->reader->EndRecPtr && ctx->reader->EndRecPtr < end_of_wal))
+ (ctx->reader->EndRecPtr && ctx->reader->EndRecPtr < end_of_wal))
{
XLogRecord *record;
char *errm = NULL;
@@ -474,7 +475,8 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin
Datum
pg_logical_slot_get_changes(PG_FUNCTION_ARGS)
{
- Datum ret = pg_logical_slot_get_changes_guts(fcinfo, true, false);
+ Datum ret = pg_logical_slot_get_changes_guts(fcinfo, true, false);
+
return ret;
}
@@ -484,7 +486,8 @@ pg_logical_slot_get_changes(PG_FUNCTION_ARGS)
Datum
pg_logical_slot_peek_changes(PG_FUNCTION_ARGS)
{
- Datum ret = pg_logical_slot_get_changes_guts(fcinfo, false, false);
+ Datum ret = pg_logical_slot_get_changes_guts(fcinfo, false, false);
+
return ret;
}
@@ -494,7 +497,8 @@ pg_logical_slot_peek_changes(PG_FUNCTION_ARGS)
Datum
pg_logical_slot_get_binary_changes(PG_FUNCTION_ARGS)
{
- Datum ret = pg_logical_slot_get_changes_guts(fcinfo, true, true);
+ Datum ret = pg_logical_slot_get_changes_guts(fcinfo, true, true);
+
return ret;
}
@@ -504,6 +508,7 @@ pg_logical_slot_get_binary_changes(PG_FUNCTION_ARGS)
Datum
pg_logical_slot_peek_binary_changes(PG_FUNCTION_ARGS)
{
- Datum ret = pg_logical_slot_get_changes_guts(fcinfo, false, true);
+ Datum ret = pg_logical_slot_get_changes_guts(fcinfo, false, true);
+
return ret;
}
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index a2b2adb173..7f2bbca302 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -60,7 +60,7 @@
#include "replication/logical.h"
#include "replication/reorderbuffer.h"
#include "replication/slot.h"
-#include "replication/snapbuild.h" /* just for SnapBuildSnapDecRefcount */
+#include "replication/snapbuild.h" /* just for SnapBuildSnapDecRefcount */
#include "storage/bufmgr.h"
#include "storage/fd.h"
#include "storage/sinval.h"
@@ -582,7 +582,7 @@ ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create,
*/
void
ReorderBufferQueueChange(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn,
- ReorderBufferChange *change)
+ ReorderBufferChange *change)
{
ReorderBufferTXN *txn;
@@ -1047,8 +1047,8 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
}
/*
- * Cleanup the tuplecids we stored for decoding catalog snapshot
- * access. They are always stored in the toplevel transaction.
+ * Cleanup the tuplecids we stored for decoding catalog snapshot access.
+ * They are always stored in the toplevel transaction.
*/
dlist_foreach_modify(iter, &txn->tuplecids)
{
@@ -1204,9 +1204,9 @@ ReorderBufferCopySnap(ReorderBuffer *rb, Snapshot orig_snap,
snap->subxip[i++] = txn->xid;
/*
- * nsubxcnt isn't decreased when subtransactions abort, so count
- * manually. Since it's an upper boundary it is safe to use it for the
- * allocation above.
+ * nsubxcnt isn't decreased when subtransactions abort, so count manually.
+ * Since it's an upper boundary it is safe to use it for the allocation
+ * above.
*/
snap->subxcnt = 1;
@@ -1262,10 +1262,10 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
ReorderBufferIterTXNState *iterstate = NULL;
ReorderBufferChange *change;
- volatile CommandId command_id = FirstCommandId;
- volatile Snapshot snapshot_now = NULL;
- volatile bool txn_started = false;
- volatile bool subtxn_started = false;
+ volatile CommandId command_id = FirstCommandId;
+ volatile Snapshot snapshot_now = NULL;
+ volatile bool txn_started = false;
+ volatile bool subtxn_started = false;
txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr,
false);
@@ -1309,8 +1309,8 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
/*
* Decoding needs access to syscaches et al., which in turn use
- * heavyweight locks and such. Thus we need to have enough state around
- * to keep track of those. The easiest way is to simply use a
+ * heavyweight locks and such. Thus we need to have enough state
+ * around to keep track of those. The easiest way is to simply use a
* transaction internally. That also allows us to easily enforce that
* nothing writes to the database by checking for xid assignments.
*
@@ -1344,7 +1344,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
Assert(snapshot_now);
reloid = RelidByRelfilenode(change->data.tp.relnode.spcNode,
- change->data.tp.relnode.relNode);
+ change->data.tp.relnode.relNode);
/*
* Catalog tuple without data, emitted while catalog was
@@ -1415,6 +1415,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
ReorderBufferCopySnap(rb, change->data.snapshot,
txn, command_id);
}
+
/*
* Restored from disk, need to be careful not to double
* free. We could introduce refcounting for that, but for
@@ -1447,7 +1448,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
{
/* we don't use the global one anymore */
snapshot_now = ReorderBufferCopySnap(rb, snapshot_now,
- txn, command_id);
+ txn, command_id);
}
snapshot_now->curcid = command_id;
@@ -1586,7 +1587,7 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid)
*/
dlist_foreach_modify(it, &rb->toplevel_by_lsn)
{
- ReorderBufferTXN * txn;
+ ReorderBufferTXN *txn;
txn = dlist_container(ReorderBufferTXN, node, it.cur);
@@ -1998,7 +1999,8 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
case REORDER_BUFFER_CHANGE_DELETE:
{
char *data;
- ReorderBufferTupleBuf *oldtup, *newtup;
+ ReorderBufferTupleBuf *oldtup,
+ *newtup;
Size oldlen = 0;
Size newlen = 0;
@@ -2007,12 +2009,12 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
if (oldtup)
oldlen = offsetof(ReorderBufferTupleBuf, data)
- + oldtup->tuple.t_len
+ +oldtup->tuple.t_len
- offsetof(HeapTupleHeaderData, t_bits);
if (newtup)
newlen = offsetof(ReorderBufferTupleBuf, data)
- + newtup->tuple.t_len
+ +newtup->tuple.t_len
- offsetof(HeapTupleHeaderData, t_bits);
sz += oldlen;
@@ -2188,7 +2190,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn,
else if (readBytes < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from reorderbuffer spill file: %m")));
+ errmsg("could not read from reorderbuffer spill file: %m")));
else if (readBytes != sizeof(ReorderBufferDiskChange))
ereport(ERROR,
(errcode_for_file_access(),
@@ -2199,7 +2201,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn,
ondisk = (ReorderBufferDiskChange *) rb->outbuf;
ReorderBufferSerializeReserve(rb,
- sizeof(ReorderBufferDiskChange) + ondisk->size);
+ sizeof(ReorderBufferDiskChange) + ondisk->size);
ondisk = (ReorderBufferDiskChange *) rb->outbuf;
readBytes = read(*fd, rb->outbuf + sizeof(ReorderBufferDiskChange),
@@ -2208,13 +2210,13 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn,
if (readBytes < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from reorderbuffer spill file: %m")));
+ errmsg("could not read from reorderbuffer spill file: %m")));
else if (readBytes != ondisk->size - sizeof(ReorderBufferDiskChange))
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read from reorderbuffer spill file: read %d instead of %u bytes",
readBytes,
- (uint32) (ondisk->size - sizeof(ReorderBufferDiskChange)))));
+ (uint32) (ondisk->size - sizeof(ReorderBufferDiskChange)))));
/*
* ok, read a full change from disk, now restore it into proper
@@ -2364,7 +2366,7 @@ StartupReorderBuffer(void)
logical_dir = AllocateDir("pg_replslot");
while ((logical_de = ReadDir(logical_dir, "pg_replslot")) != NULL)
{
- struct stat statbuf;
+ struct stat statbuf;
char path[MAXPGPATH];
if (strcmp(logical_de->d_name, ".") == 0 ||
@@ -2620,7 +2622,7 @@ ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn,
cchange = dlist_container(ReorderBufferChange, node, it.cur);
ctup = cchange->data.tp.newtuple;
chunk = DatumGetPointer(
- fastgetattr(&ctup->tuple, 3, toast_desc, &isnull));
+ fastgetattr(&ctup->tuple, 3, toast_desc, &isnull));
Assert(!isnull);
Assert(!VARATT_IS_EXTERNAL(chunk));
@@ -2800,7 +2802,7 @@ ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *fname)
ReorderBufferTupleCidKey key;
ReorderBufferTupleCidEnt *ent;
ReorderBufferTupleCidEnt *new_ent;
- bool found;
+ bool found;
/* be careful about padding */
memset(&key, 0, sizeof(ReorderBufferTupleCidKey));
@@ -2813,7 +2815,7 @@ ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *fname)
(errcode_for_file_access(),
errmsg("could not read file \"%s\": %m",
path)));
- else if (readBytes == 0) /* EOF */
+ else if (readBytes == 0) /* EOF */
break;
else if (readBytes != sizeof(LogicalRewriteMappingData))
ereport(ERROR,
@@ -2884,8 +2886,8 @@ TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
static int
file_sort_by_lsn(const void *a_p, const void *b_p)
{
- RewriteMappingFile *a = *(RewriteMappingFile **)a_p;
- RewriteMappingFile *b = *(RewriteMappingFile **)b_p;
+ RewriteMappingFile *a = *(RewriteMappingFile **) a_p;
+ RewriteMappingFile *b = *(RewriteMappingFile **) b_p;
if (a->lsn < b->lsn)
return -1;
@@ -2912,19 +2914,20 @@ UpdateLogicalMappings(HTAB *tuplecid_data, Oid relid, Snapshot snapshot)
mapping_dir = AllocateDir("pg_llog/mappings");
while ((mapping_de = ReadDir(mapping_dir, "pg_llog/mappings")) != NULL)
{
- Oid f_dboid;
- Oid f_relid;
- TransactionId f_mapped_xid;
- TransactionId f_create_xid;
- XLogRecPtr f_lsn;
- uint32 f_hi, f_lo;
+ Oid f_dboid;
+ Oid f_relid;
+ TransactionId f_mapped_xid;
+ TransactionId f_create_xid;
+ XLogRecPtr f_lsn;
+ uint32 f_hi,
+ f_lo;
RewriteMappingFile *f;
if (strcmp(mapping_de->d_name, ".") == 0 ||
strcmp(mapping_de->d_name, "..") == 0)
continue;
- /* Ignore files that aren't ours*/
+ /* Ignore files that aren't ours */
if (strncmp(mapping_de->d_name, "map-", 4) != 0)
continue;
@@ -2971,11 +2974,12 @@ UpdateLogicalMappings(HTAB *tuplecid_data, Oid relid, Snapshot snapshot)
qsort(files_a, list_length(files), sizeof(RewriteMappingFile *),
file_sort_by_lsn);
- for(off = 0; off < list_length(files); off++)
+ for (off = 0; off < list_length(files); off++)
{
RewriteMappingFile *f = files_a[off];
+
elog(DEBUG1, "applying mapping: \"%s\" in %u", f->fname,
- snapshot->subxip[0]);
+ snapshot->subxip[0]);
ApplyLogicalMappingFile(tuplecid_data, relid, f->fname);
pfree(f);
}
@@ -2995,7 +2999,7 @@ ResolveCminCmaxDuringDecoding(HTAB *tuplecid_data,
ReorderBufferTupleCidEnt *ent;
ForkNumber forkno;
BlockNumber blockno;
- bool updated_mapping = false;
+ bool updated_mapping = false;
/* be careful about padding */
memset(&key, 0, sizeof(key));
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 36034dbec9..cb45f906fc 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -57,27 +57,27 @@
*
* The snapbuild machinery is starting up in several stages, as illustrated
* by the following graph:
- * +-------------------------+
- * +----|SNAPBUILD_START |-------------+
- * | +-------------------------+ |
- * | | |
- * | | |
- * | running_xacts with running xacts |
- * | | |
- * | | |
- * | v |
- * | +-------------------------+ v
- * | |SNAPBUILD_FULL_SNAPSHOT |------------>|
- * | +-------------------------+ |
- * running_xacts | saved snapshot
- * with zero xacts | at running_xacts's lsn
- * | | |
- * | all running toplevel TXNs finished |
- * | | |
- * | v |
- * | +-------------------------+ |
- * +--->|SNAPBUILD_CONSISTENT |<------------+
- * +-------------------------+
+ * +-------------------------+
+ * +----|SNAPBUILD_START |-------------+
+ * | +-------------------------+ |
+ * | | |
+ * | | |
+ * | running_xacts with running xacts |
+ * | | |
+ * | | |
+ * | v |
+ * | +-------------------------+ v
+ * | |SNAPBUILD_FULL_SNAPSHOT |------------>|
+ * | +-------------------------+ |
+ * running_xacts | saved snapshot
+ * with zero xacts | at running_xacts's lsn
+ * | | |
+ * | all running toplevel TXNs finished |
+ * | | |
+ * | v |
+ * | +-------------------------+ |
+ * +--->|SNAPBUILD_CONSISTENT |<------------+
+ * +-------------------------+
*
* Initially the machinery is in the START stage. When a xl_running_xacts
* record is read that is sufficiently new (above the safe xmin horizon),
@@ -184,7 +184,7 @@ struct SnapBuild
* Information about initially running transactions
*
* When we start building a snapshot there already may be transactions in
- * progress. Those are stored in running.xip. We don't have enough
+ * progress. Those are stored in running.xip. We don't have enough
* information about those to decode their contents, so until they are
* finished (xcnt=0) we cannot switch to a CONSISTENT state.
*/
@@ -244,7 +244,7 @@ struct SnapBuild
* removes knowledge about the previously used resowner, so we save it here.
*/
ResourceOwner SavedResourceOwnerDuringExport = NULL;
-bool ExportInProgress = false;
+bool ExportInProgress = false;
/* transaction state manipulation functions */
static void SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid);
@@ -496,7 +496,7 @@ SnapBuildBuildSnapshot(SnapBuild *builder, TransactionId xid)
snapshot->copied = false;
snapshot->curcid = FirstCommandId;
snapshot->active_count = 0;
- snapshot->regd_count = 1; /* mark as registered so nobody frees it */
+ snapshot->regd_count = 1; /* mark as registered so nobody frees it */
return snapshot;
}
@@ -635,7 +635,7 @@ SnapBuildClearExportedSnapshot()
bool
SnapBuildProcessChange(SnapBuild *builder, TransactionId xid, XLogRecPtr lsn)
{
- bool is_old_tx;
+ bool is_old_tx;
/*
* We can't handle data in transactions if we haven't built a snapshot
@@ -692,10 +692,10 @@ SnapBuildProcessNewCid(SnapBuild *builder, TransactionId xid,
CommandId cid;
/*
- * we only log new_cid's if a catalog tuple was modified, so mark
- * the transaction as containing catalog modifications
+ * we only log new_cid's if a catalog tuple was modified, so mark the
+ * transaction as containing catalog modifications
*/
- ReorderBufferXidSetCatalogChanges(builder->reorder, xid,lsn);
+ ReorderBufferXidSetCatalogChanges(builder->reorder, xid, lsn);
ReorderBufferAddNewTupleCids(builder->reorder, xlrec->top_xid, lsn,
xlrec->target.node, xlrec->target.tid,
@@ -712,7 +712,7 @@ SnapBuildProcessNewCid(SnapBuild *builder, TransactionId xid,
cid = xlrec->cmin;
else
{
- cid = InvalidCommandId; /* silence compiler */
+ cid = InvalidCommandId; /* silence compiler */
elog(ERROR, "xl_heap_new_cid record without a valid CommandId");
}
@@ -818,7 +818,7 @@ SnapBuildAddCommittedTxn(SnapBuild *builder, TransactionId xid)
(uint32) builder->committed.xcnt_space);
builder->committed.xip = repalloc(builder->committed.xip,
- builder->committed.xcnt_space * sizeof(TransactionId));
+ builder->committed.xcnt_space * sizeof(TransactionId));
}
/*
@@ -900,10 +900,10 @@ SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid)
* so our incrementaly built snapshot now is consistent.
*/
ereport(LOG,
- (errmsg("logical decoding found consistent point at %X/%X",
- (uint32)(lsn >> 32), (uint32)lsn),
- errdetail("xid %u finished, no running transactions anymore",
- xid)));
+ (errmsg("logical decoding found consistent point at %X/%X",
+ (uint32) (lsn >> 32), (uint32) lsn),
+ errdetail("xid %u finished, no running transactions anymore",
+ xid)));
builder->state = SNAPBUILD_CONSISTENT;
}
}
@@ -1170,15 +1170,16 @@ SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xact
*/
if (txn != NULL && txn->restart_decoding_lsn != InvalidXLogRecPtr)
LogicalIncreaseRestartDecodingForSlot(lsn, txn->restart_decoding_lsn);
+
/*
* No in-progress transaction, can reuse the last serialized snapshot if
* we have one.
*/
else if (txn == NULL &&
- builder->reorder->current_restart_decoding_lsn != InvalidXLogRecPtr &&
+ builder->reorder->current_restart_decoding_lsn != InvalidXLogRecPtr &&
builder->last_serialized_snapshot != InvalidXLogRecPtr)
LogicalIncreaseRestartDecodingForSlot(lsn,
- builder->last_serialized_snapshot);
+ builder->last_serialized_snapshot);
}
@@ -1199,23 +1200,23 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
* the currently running transactions. There are several ways to do that:
*
* a) There were no running transactions when the xl_running_xacts record
- * was inserted, jump to CONSISTENT immediately. We might find such a
- * state we were waiting for b) and c).
+ * was inserted, jump to CONSISTENT immediately. We might find such a
+ * state we were waiting for b) and c).
*
* b) Wait for all toplevel transactions that were running to end. We
- * simply track the number of in-progress toplevel transactions and
- * lower it whenever one commits or aborts. When that number
- * (builder->running.xcnt) reaches zero, we can go from FULL_SNAPSHOT
- * to CONSISTENT.
+ * simply track the number of in-progress toplevel transactions and
+ * lower it whenever one commits or aborts. When that number
+ * (builder->running.xcnt) reaches zero, we can go from FULL_SNAPSHOT
+ * to CONSISTENT.
* NB: We need to search running.xip when seeing a transaction's end to
- * make sure it's a toplevel transaction and it's been one of the
- * intially running ones.
+ * make sure it's a toplevel transaction and it's been one of the
+ * intially running ones.
* Interestingly, in contrast to HS, this allows us not to care about
* subtransactions - and by extension suboverflowed xl_running_xacts -
* at all.
*
* c) This (in a previous run) or another decoding slot serialized a
- * snapshot to disk that we can use.
+ * snapshot to disk that we can use.
* ---
*/
@@ -1231,7 +1232,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
(errmsg("skipping snapshot at %X/%X while building logical decoding snapshot, xmin horizon too low",
(uint32) (lsn >> 32), (uint32) lsn),
errdetail("initial xmin horizon of %u vs the snapshot's %u",
- builder->initial_xmin_horizon, running->oldestRunningXid)));
+ builder->initial_xmin_horizon, running->oldestRunningXid)));
return true;
}
@@ -1263,7 +1264,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
ereport(LOG,
(errmsg("logical decoding found consistent point at %X/%X",
- (uint32)(lsn >> 32), (uint32)lsn),
+ (uint32) (lsn >> 32), (uint32) lsn),
errdetail("running xacts with xcnt == 0")));
return false;
@@ -1274,15 +1275,16 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
/* there won't be any state to cleanup */
return false;
}
+
/*
* b) first encounter of a useable xl_running_xacts record. If we had
- * found one earlier we would either track running transactions
- * (i.e. builder->running.xcnt != 0) or be consistent (this function
- * wouldn't get called).
+ * found one earlier we would either track running transactions (i.e.
+ * builder->running.xcnt != 0) or be consistent (this function wouldn't
+ * get called).
*/
else if (!builder->running.xcnt)
{
- int off;
+ int off;
/*
* We only care about toplevel xids as those are the ones we
@@ -1302,7 +1304,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
builder->running.xcnt_space = running->xcnt;
builder->running.xip =
MemoryContextAlloc(builder->context,
- builder->running.xcnt * sizeof(TransactionId));
+ builder->running.xcnt * sizeof(TransactionId));
memcpy(builder->running.xip, running->xids,
builder->running.xcnt * sizeof(TransactionId));
@@ -1320,9 +1322,9 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
builder->state = SNAPBUILD_FULL_SNAPSHOT;
ereport(LOG,
- (errmsg("logical decoding found initial starting point at %X/%X",
- (uint32)(lsn >> 32), (uint32)lsn),
- errdetail("%u xacts need to finish", (uint32) builder->running.xcnt)));
+ (errmsg("logical decoding found initial starting point at %X/%X",
+ (uint32) (lsn >> 32), (uint32) lsn),
+ errdetail("%u xacts need to finish", (uint32) builder->running.xcnt)));
/*
* Iterate through all xids, wait for them to finish.
@@ -1331,7 +1333,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
* isolationtester to notice that we're currently waiting for
* something.
*/
- for(off = 0; off < builder->running.xcnt; off++)
+ for (off = 0; off < builder->running.xcnt; off++)
{
TransactionId xid = builder->running.xip[off];
@@ -1471,9 +1473,9 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn)
* but remember location, so we don't need to read old data again.
*
* To be sure it has been synced to disk after the rename() from the
- * tempfile filename to the real filename, we just repeat the
- * fsync. That ought to be cheap because in most scenarios it should
- * already be safely on disk.
+ * tempfile filename to the real filename, we just repeat the fsync.
+ * That ought to be cheap because in most scenarios it should already
+ * be safely on disk.
*/
fsync_fname(path, false);
fsync_fname("pg_llog/snapshots", true);
@@ -1504,7 +1506,7 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn)
if (unlink(tmppath) != 0 && errno != ENOENT)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not unlink file \"%s\": %m", path)));
+ errmsg("could not unlink file \"%s\": %m", path)));
needed_length = sizeof(SnapBuildOnDisk) +
sizeof(TransactionId) * builder->running.xcnt_space +
@@ -1518,7 +1520,7 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn)
INIT_CRC32(ondisk->checksum);
COMP_CRC32(ondisk->checksum,
((char *) ondisk) + SnapBuildOnDiskNotChecksummedSize,
- SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize);
+ SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize);
ondisk_c += sizeof(SnapBuildOnDisk);
memcpy(&ondisk->builder, builder, sizeof(SnapBuild));
@@ -1597,8 +1599,8 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn)
fsync_fname("pg_llog/snapshots", true);
/*
- * Now there's no way we can loose the dumped state anymore, remember
- * this as a serialization point.
+ * Now there's no way we can loose the dumped state anymore, remember this
+ * as a serialization point.
*/
builder->last_serialized_snapshot = lsn;
@@ -1673,7 +1675,7 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn)
INIT_CRC32(checksum);
COMP_CRC32(checksum,
((char *) &ondisk) + SnapBuildOnDiskNotChecksummedSize,
- SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize);
+ SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize);
/* read SnapBuild */
readBytes = read(fd, &ondisk.builder, sizeof(SnapBuild));
@@ -1781,7 +1783,7 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn)
ereport(LOG,
(errmsg("logical decoding found consistent point at %X/%X",
- (uint32)(lsn >> 32), (uint32)lsn),
+ (uint32) (lsn >> 32), (uint32) lsn),
errdetail("found initial snapshot in snapbuild file")));
return true;
@@ -1829,7 +1831,7 @@ CheckPointSnapBuild(void)
uint32 hi;
uint32 lo;
XLogRecPtr lsn;
- struct stat statbuf;
+ struct stat statbuf;
if (strcmp(snap_de->d_name, ".") == 0 ||
strcmp(snap_de->d_name, "..") == 0)
@@ -1846,8 +1848,8 @@ CheckPointSnapBuild(void)
/*
* temporary filenames from SnapBuildSerialize() include the LSN and
* everything but are postfixed by .$pid.tmp. We can just remove them
- * the same as other files because there can be none that are currently
- * being written that are older than cutoff.
+ * the same as other files because there can be none that are
+ * currently being written that are older than cutoff.
*
* We just log a message if a file doesn't fit the pattern, it's
* probably some editors lock/state file or similar...
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index 76e5573660..ee0c7c07a9 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -72,7 +72,7 @@ typedef struct ReplicationSlotOnDisk
sizeof(ReplicationSlotOnDisk) - ReplicationSlotOnDiskConstantSize
#define SLOT_MAGIC 0x1051CA1 /* format identifier */
-#define SLOT_VERSION 1 /* version for new files */
+#define SLOT_VERSION 1 /* version for new files */
/* Control array for replication slot management */
ReplicationSlotCtlData *ReplicationSlotCtl = NULL;
@@ -81,7 +81,8 @@ ReplicationSlotCtlData *ReplicationSlotCtl = NULL;
ReplicationSlot *MyReplicationSlot = NULL;
/* GUCs */
-int max_replication_slots = 0; /* the maximum number of replication slots */
+int max_replication_slots = 0; /* the maximum number of replication
+ * slots */
static void ReplicationSlotDropAcquired(void);
@@ -180,8 +181,8 @@ ReplicationSlotValidateName(const char *name, int elevel)
{
ereport(elevel,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("replication slot name \"%s\" contains invalid character",
- name),
+ errmsg("replication slot name \"%s\" contains invalid character",
+ name),
errhint("Replication slot names may only contain letters, numbers and the underscore character.")));
return false;
}
@@ -194,7 +195,7 @@ ReplicationSlotValidateName(const char *name, int elevel)
*
* name: Name of the slot
* db_specific: logical decoding is db specific; if the slot is going to
- * be used for that pass true, otherwise false.
+ * be used for that pass true, otherwise false.
*/
void
ReplicationSlotCreate(const char *name, bool db_specific,
@@ -208,18 +209,18 @@ ReplicationSlotCreate(const char *name, bool db_specific,
ReplicationSlotValidateName(name, ERROR);
/*
- * If some other backend ran this code currently with us, we'd likely
- * both allocate the same slot, and that would be bad. We'd also be
- * at risk of missing a name collision. Also, we don't want to try to
- * create a new slot while somebody's busy cleaning up an old one, because
- * we might both be monkeying with the same directory.
+ * If some other backend ran this code currently with us, we'd likely both
+ * allocate the same slot, and that would be bad. We'd also be at risk of
+ * missing a name collision. Also, we don't want to try to create a new
+ * slot while somebody's busy cleaning up an old one, because we might
+ * both be monkeying with the same directory.
*/
LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
/*
- * Check for name collision, and identify an allocatable slot. We need
- * to hold ReplicationSlotControlLock in shared mode for this, so that
- * nobody else can change the in_use flags while we're looking at them.
+ * Check for name collision, and identify an allocatable slot. We need to
+ * hold ReplicationSlotControlLock in shared mode for this, so that nobody
+ * else can change the in_use flags while we're looking at them.
*/
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
@@ -243,10 +244,10 @@ ReplicationSlotCreate(const char *name, bool db_specific,
errhint("Free one or increase max_replication_slots.")));
/*
- * Since this slot is not in use, nobody should be looking at any
- * part of it other than the in_use field unless they're trying to allocate
- * it. And since we hold ReplicationSlotAllocationLock, nobody except us
- * can be doing that. So it's safe to initialize the slot.
+ * Since this slot is not in use, nobody should be looking at any part of
+ * it other than the in_use field unless they're trying to allocate it.
+ * And since we hold ReplicationSlotAllocationLock, nobody except us can
+ * be doing that. So it's safe to initialize the slot.
*/
Assert(!slot->in_use);
Assert(!slot->active);
@@ -366,6 +367,7 @@ ReplicationSlotRelease(void)
{
/* Mark slot inactive. We're not freeing it, just disconnecting. */
volatile ReplicationSlot *vslot = slot;
+
SpinLockAcquire(&slot->mutex);
vslot->active = false;
SpinLockRelease(&slot->mutex);
@@ -444,7 +446,7 @@ ReplicationSlotDropAcquired(void)
else
{
volatile ReplicationSlot *vslot = slot;
- bool fail_softly = slot->data.persistency == RS_EPHEMERAL;
+ bool fail_softly = slot->data.persistency == RS_EPHEMERAL;
SpinLockAcquire(&slot->mutex);
vslot->active = false;
@@ -571,8 +573,8 @@ ReplicationSlotsComputeRequiredXmin(bool already_locked)
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
- TransactionId effective_xmin;
- TransactionId effective_catalog_xmin;
+ TransactionId effective_xmin;
+ TransactionId effective_catalog_xmin;
if (!s->in_use)
continue;
@@ -612,7 +614,7 @@ void
ReplicationSlotsComputeRequiredLSN(void)
{
int i;
- XLogRecPtr min_required = InvalidXLogRecPtr;
+ XLogRecPtr min_required = InvalidXLogRecPtr;
Assert(ReplicationSlotCtl != NULL);
@@ -620,7 +622,7 @@ ReplicationSlotsComputeRequiredLSN(void)
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
- XLogRecPtr restart_lsn;
+ XLogRecPtr restart_lsn;
if (!s->in_use)
continue;
@@ -669,7 +671,7 @@ ReplicationSlotsComputeLogicalRestartLSN(void)
for (i = 0; i < max_replication_slots; i++)
{
volatile ReplicationSlot *s;
- XLogRecPtr restart_lsn;
+ XLogRecPtr restart_lsn;
s = &ReplicationSlotCtl->replication_slots[i];
@@ -772,8 +774,8 @@ CheckSlotRequirements(void)
static bool
string_endswith(const char *str, const char *end)
{
- size_t slen = strlen(str);
- size_t elen = strlen(end);
+ size_t slen = strlen(str);
+ size_t elen = strlen(end);
/* can't be a postfix if longer */
if (elen > slen)
@@ -802,8 +804,8 @@ CheckPointReplicationSlots(void)
* Prevent any slot from being created/dropped while we're active. As we
* explicitly do *not* want to block iterating over replication_slots or
* acquiring a slot we cannot take the control lock - but that's OK,
- * because holding ReplicationSlotAllocationLock is strictly stronger,
- * and enough to guarantee that nobody can change the in_use bits on us.
+ * because holding ReplicationSlotAllocationLock is strictly stronger, and
+ * enough to guarantee that nobody can change the in_use bits on us.
*/
LWLockAcquire(ReplicationSlotAllocationLock, LW_SHARED);
@@ -839,7 +841,7 @@ StartupReplicationSlots(XLogRecPtr checkPointRedo)
replication_dir = AllocateDir("pg_replslot");
while ((replication_de = ReadDir(replication_dir, "pg_replslot")) != NULL)
{
- struct stat statbuf;
+ struct stat statbuf;
char path[MAXPGPATH];
if (strcmp(replication_de->d_name, ".") == 0 ||
@@ -892,7 +894,7 @@ CreateSlotOnDisk(ReplicationSlot *slot)
{
char tmppath[MAXPGPATH];
char path[MAXPGPATH];
- struct stat st;
+ struct stat st;
/*
* No need to take out the io_in_progress_lock, nobody else can see this
@@ -904,11 +906,10 @@ CreateSlotOnDisk(ReplicationSlot *slot)
sprintf(tmppath, "pg_replslot/%s.tmp", NameStr(slot->data.name));
/*
- * It's just barely possible that some previous effort to create or
- * drop a slot with this name left a temp directory lying around.
- * If that seems to be the case, try to remove it. If the rmtree()
- * fails, we'll error out at the mkdir() below, so we don't bother
- * checking success.
+ * It's just barely possible that some previous effort to create or drop a
+ * slot with this name left a temp directory lying around. If that seems
+ * to be the case, try to remove it. If the rmtree() fails, we'll error
+ * out at the mkdir() below, so we don't bother checking success.
*/
if (stat(tmppath, &st) == 0 && S_ISDIR(st.st_mode))
rmtree(tmppath, true);
@@ -922,7 +923,7 @@ CreateSlotOnDisk(ReplicationSlot *slot)
fsync_fname(tmppath, true);
/* Write the actual state file. */
- slot->dirty = true; /* signal that we really need to write */
+ slot->dirty = true; /* signal that we really need to write */
SaveSlotToPath(slot, tmppath, ERROR);
/* Rename the directory into place. */
@@ -1003,12 +1004,13 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
SpinLockRelease(&slot->mutex);
COMP_CRC32(cp.checksum,
- (char *)(&cp) + ReplicationSlotOnDiskConstantSize,
+ (char *) (&cp) + ReplicationSlotOnDiskConstantSize,
ReplicationSlotOnDiskDynamicSize);
if ((write(fd, &cp, sizeof(cp))) != sizeof(cp))
{
- int save_errno = errno;
+ int save_errno = errno;
+
CloseTransientFile(fd);
errno = save_errno;
ereport(elevel,
@@ -1021,7 +1023,8 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
/* fsync the temporary file */
if (pg_fsync(fd) != 0)
{
- int save_errno = errno;
+ int save_errno = errno;
+
CloseTransientFile(fd);
errno = save_errno;
ereport(elevel,
@@ -1150,19 +1153,19 @@ RestoreSlotFromDisk(const char *name)
if (cp.version != SLOT_VERSION)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("replication slot file \"%s\" has unsupported version %u",
- path, cp.version)));
+ errmsg("replication slot file \"%s\" has unsupported version %u",
+ path, cp.version)));
/* boundary check on length */
if (cp.length != ReplicationSlotOnDiskDynamicSize)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("replication slot file \"%s\" has corrupted length %u",
- path, cp.length)));
+ errmsg("replication slot file \"%s\" has corrupted length %u",
+ path, cp.length)));
/* Now that we know the size, read the entire file */
readBytes = read(fd,
- (char *)&cp + ReplicationSlotOnDiskConstantSize,
+ (char *) &cp + ReplicationSlotOnDiskConstantSize,
cp.length);
if (readBytes != cp.length)
{
@@ -1181,7 +1184,7 @@ RestoreSlotFromDisk(const char *name)
/* now verify the CRC32 */
INIT_CRC32(checksum);
COMP_CRC32(checksum,
- (char *)&cp + ReplicationSlotOnDiskConstantSize,
+ (char *) &cp + ReplicationSlotOnDiskConstantSize,
ReplicationSlotOnDiskDynamicSize);
if (!EQ_CRC32(checksum, cp.checksum))
diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c
index c9416b03ee..dc94f504ee 100644
--- a/src/backend/replication/slotfuncs.c
+++ b/src/backend/replication/slotfuncs.c
@@ -53,7 +53,7 @@ pg_create_physical_replication_slot(PG_FUNCTION_ARGS)
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
- /* acquire replication slot, this will check for conflicting names*/
+ /* acquire replication slot, this will check for conflicting names */
ReplicationSlotCreate(NameStr(*name), false, RS_PERSISTENT);
values[0] = NameGetDatum(&MyReplicationSlot->data.name);
@@ -97,8 +97,7 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
Assert(!MyReplicationSlot);
/*
- * Acquire a logical decoding slot, this will check for conflicting
- * names.
+ * Acquire a logical decoding slot, this will check for conflicting names.
*/
ReplicationSlotCreate(NameStr(*name), true, RS_EPHEMERAL);
@@ -106,8 +105,8 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
* Create logical decoding context, to build the initial snapshot.
*/
ctx = CreateInitDecodingContext(
- NameStr(*plugin), NIL,
- logical_read_local_xlog_page, NULL, NULL);
+ NameStr(*plugin), NIL,
+ logical_read_local_xlog_page, NULL, NULL);
/* build initial snapshot, might take a while */
DecodingContextFindStartpoint(ctx);
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index f65021caa6..aa54bfba6c 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -117,8 +117,8 @@ SyncRepWaitForLSN(XLogRecPtr XactCommitLSN)
* set. See SyncRepUpdateSyncStandbysDefined.
*
* Also check that the standby hasn't already replied. Unlikely race
- * condition but we'll be fetching that cache line anyway so it's likely to
- * be a low cost check.
+ * condition but we'll be fetching that cache line anyway so it's likely
+ * to be a low cost check.
*/
if (!WalSndCtl->sync_standbys_defined ||
XactCommitLSN <= WalSndCtl->lsn[mode])
@@ -517,7 +517,7 @@ SyncRepGetStandbyPriority(void)
}
/*
- * Walk the specified queue from head. Set the state of any backends that
+ * Walk the specified queue from head. Set the state of any backends that
* need to be woken, remove them from the queue, and then wake them.
* Pass all = true to wake whole queue; otherwise, just wake up to
* the walsender's LSN.
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index b0de0ea253..c2d4ed3a96 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -258,7 +258,7 @@ WalReceiverMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (walreceiver probably never has
+ * can signal any child processes too. (walreceiver probably never has
* any child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -786,7 +786,7 @@ WalRcvQuickDieHandler(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
@@ -934,9 +934,9 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
if (lseek(recvFile, (off_t) startoff, SEEK_SET) < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not seek in log segment %s to offset %u: %m",
- XLogFileNameP(recvFileTLI, recvSegNo),
- startoff)));
+ errmsg("could not seek in log segment %s to offset %u: %m",
+ XLogFileNameP(recvFileTLI, recvSegNo),
+ startoff)));
recvOff = startoff;
}
diff --git a/src/backend/replication/walreceiverfuncs.c b/src/backend/replication/walreceiverfuncs.c
index acadec57f5..579216af34 100644
--- a/src/backend/replication/walreceiverfuncs.c
+++ b/src/backend/replication/walreceiverfuncs.c
@@ -291,7 +291,7 @@ RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo,
* Returns the last+1 byte position that walreceiver has written.
*
* Optionally, returns the previous chunk start, that is the first byte
- * written in the most recent walreceiver flush cycle. Callers not
+ * written in the most recent walreceiver flush cycle. Callers not
* interested in that value may pass NULL for latestChunkStart. Same for
* receiveTLI.
*/
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 6e22c03bcf..5c11d681c3 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -82,7 +82,7 @@
#include "utils/timestamp.h"
/*
- * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
+ * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
*
* We don't have a good idea of what a good value would be; there's some
* overhead per message in both walsender and walreceiver, but on the other
@@ -165,7 +165,7 @@ static bool streamingDoneSending;
static bool streamingDoneReceiving;
/* Are we there yet? */
-static bool WalSndCaughtUp = false;
+static bool WalSndCaughtUp = false;
/* Flags set by signal handlers for later service in main loop */
static volatile sig_atomic_t got_SIGHUP = false;
@@ -180,7 +180,7 @@ static volatile sig_atomic_t walsender_ready_to_stop = false;
static volatile sig_atomic_t replication_active = false;
static LogicalDecodingContext *logical_decoding_ctx = NULL;
-static XLogRecPtr logical_startptr = InvalidXLogRecPtr;
+static XLogRecPtr logical_startptr = InvalidXLogRecPtr;
/* Signal handlers */
static void WalSndSigHupHandler(SIGNAL_ARGS);
@@ -188,7 +188,7 @@ static void WalSndXLogSendHandler(SIGNAL_ARGS);
static void WalSndLastCycleHandler(SIGNAL_ARGS);
/* Prototypes for private functions */
-typedef void (*WalSndSendDataCallback)(void);
+typedef void (*WalSndSendDataCallback) (void);
static void WalSndLoop(WalSndSendDataCallback send_data);
static void InitWalSenderSlot(void);
static void WalSndKill(int code, Datum arg);
@@ -301,8 +301,8 @@ IdentifySystem(void)
/*
* Reply with a result set with one row, four columns. First col is system
- * ID, second is timeline ID, third is current xlog location and the fourth
- * contains the database name if we are connected to one.
+ * ID, second is timeline ID, third is current xlog location and the
+ * fourth contains the database name if we are connected to one.
*/
snprintf(sysid, sizeof(sysid), UINT64_FORMAT,
@@ -358,22 +358,22 @@ IdentifySystem(void)
pq_sendint(&buf, 0, 2); /* format code */
/* third field */
- pq_sendstring(&buf, "xlogpos"); /* col name */
- pq_sendint(&buf, 0, 4); /* table oid */
- pq_sendint(&buf, 0, 2); /* attnum */
- pq_sendint(&buf, TEXTOID, 4); /* type oid */
- pq_sendint(&buf, -1, 2); /* typlen */
- pq_sendint(&buf, 0, 4); /* typmod */
- pq_sendint(&buf, 0, 2); /* format code */
+ pq_sendstring(&buf, "xlogpos"); /* col name */
+ pq_sendint(&buf, 0, 4); /* table oid */
+ pq_sendint(&buf, 0, 2); /* attnum */
+ pq_sendint(&buf, TEXTOID, 4); /* type oid */
+ pq_sendint(&buf, -1, 2); /* typlen */
+ pq_sendint(&buf, 0, 4); /* typmod */
+ pq_sendint(&buf, 0, 2); /* format code */
/* fourth field */
- pq_sendstring(&buf, "dbname"); /* col name */
- pq_sendint(&buf, 0, 4); /* table oid */
- pq_sendint(&buf, 0, 2); /* attnum */
- pq_sendint(&buf, TEXTOID, 4); /* type oid */
- pq_sendint(&buf, -1, 2); /* typlen */
- pq_sendint(&buf, 0, 4); /* typmod */
- pq_sendint(&buf, 0, 2); /* format code */
+ pq_sendstring(&buf, "dbname"); /* col name */
+ pq_sendint(&buf, 0, 4); /* table oid */
+ pq_sendint(&buf, 0, 2); /* attnum */
+ pq_sendint(&buf, TEXTOID, 4); /* type oid */
+ pq_sendint(&buf, -1, 2); /* typlen */
+ pq_sendint(&buf, 0, 4); /* typmod */
+ pq_sendint(&buf, 0, 2); /* format code */
pq_endmessage(&buf);
/* Send a DataRow message */
@@ -388,12 +388,12 @@ IdentifySystem(void)
/* send NULL if not connected to a database */
if (dbname)
{
- pq_sendint(&buf, strlen(dbname), 4); /* col4 len */
+ pq_sendint(&buf, strlen(dbname), 4); /* col4 len */
pq_sendbytes(&buf, (char *) dbname, strlen(dbname));
}
else
{
- pq_sendint(&buf, -1, 4); /* col4 len, NULL */
+ pq_sendint(&buf, -1, 4); /* col4 len, NULL */
}
pq_endmessage(&buf);
@@ -731,11 +731,11 @@ StartReplication(StartReplicationCmd *cmd)
* set everytime WAL is flushed.
*/
static int
-logical_read_xlog_page(XLogReaderState* state, XLogRecPtr targetPagePtr, int reqLen,
- XLogRecPtr targetRecPtr, char* cur_page, TimeLineID *pageTLI)
+logical_read_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen,
+ XLogRecPtr targetRecPtr, char *cur_page, TimeLineID *pageTLI)
{
- XLogRecPtr flushptr;
- int count;
+ XLogRecPtr flushptr;
+ int count;
/* make sure we have enough WAL available */
flushptr = WalSndWaitForWal(targetPagePtr + reqLen);
@@ -764,7 +764,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
{
const char *slot_name;
const char *snapshot_name = NULL;
- char xpos[MAXFNAMELEN];
+ char xpos[MAXFNAMELEN];
StringInfoData buf;
Assert(!MyReplicationSlot);
@@ -792,9 +792,9 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
LogicalDecodingContext *ctx;
ctx = CreateInitDecodingContext(
- cmd->plugin, NIL,
- logical_read_xlog_page,
- WalSndPrepareWrite, WalSndWriteData);
+ cmd->plugin, NIL,
+ logical_read_xlog_page,
+ WalSndPrepareWrite, WalSndWriteData);
/* build initial snapshot, might take a while */
DecodingContextFindStartpoint(ctx);
@@ -838,7 +838,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
pq_sendint(&buf, 0, 2); /* format code */
/* third field: exported snapshot's name */
- pq_sendstring(&buf, "snapshot_name"); /* col name */
+ pq_sendstring(&buf, "snapshot_name"); /* col name */
pq_sendint(&buf, 0, 4); /* table oid */
pq_sendint(&buf, 0, 2); /* attnum */
pq_sendint(&buf, TEXTOID, 4); /* type oid */
@@ -847,7 +847,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
pq_sendint(&buf, 0, 2); /* format code */
/* fourth field: output plugin */
- pq_sendstring(&buf, "output_plugin"); /* col name */
+ pq_sendstring(&buf, "output_plugin"); /* col name */
pq_sendint(&buf, 0, 4); /* table oid */
pq_sendint(&buf, 0, 2); /* attnum */
pq_sendint(&buf, TEXTOID, 4); /* type oid */
@@ -862,26 +862,26 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
pq_sendint(&buf, 4, 2); /* # of columns */
/* slot_name */
- pq_sendint(&buf, strlen(slot_name), 4); /* col1 len */
+ pq_sendint(&buf, strlen(slot_name), 4); /* col1 len */
pq_sendbytes(&buf, slot_name, strlen(slot_name));
/* consistent wal location */
- pq_sendint(&buf, strlen(xpos), 4); /* col2 len */
+ pq_sendint(&buf, strlen(xpos), 4); /* col2 len */
pq_sendbytes(&buf, xpos, strlen(xpos));
/* snapshot name */
if (snapshot_name != NULL)
{
- pq_sendint(&buf, strlen(snapshot_name), 4); /* col3 len */
+ pq_sendint(&buf, strlen(snapshot_name), 4); /* col3 len */
pq_sendbytes(&buf, snapshot_name, strlen(snapshot_name));
}
else
- pq_sendint(&buf, -1, 4); /* col3 len, NULL */
+ pq_sendint(&buf, -1, 4); /* col3 len, NULL */
/* plugin */
if (cmd->plugin != NULL)
{
- pq_sendint(&buf, strlen(cmd->plugin), 4); /* col4 len */
+ pq_sendint(&buf, strlen(cmd->plugin), 4); /* col4 len */
pq_sendbytes(&buf, cmd->plugin, strlen(cmd->plugin));
}
else
@@ -951,9 +951,9 @@ StartLogicalReplication(StartReplicationCmd *cmd)
* to be shipped from that position.
*/
logical_decoding_ctx = CreateDecodingContext(
- cmd->startpoint, cmd->options,
- logical_read_xlog_page,
- WalSndPrepareWrite, WalSndWriteData);
+ cmd->startpoint, cmd->options,
+ logical_read_xlog_page,
+ WalSndPrepareWrite, WalSndWriteData);
/* Start reading WAL from the oldest required WAL. */
logical_startptr = MyReplicationSlot->data.restart_lsn;
@@ -1013,11 +1013,12 @@ WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xi
pq_sendbyte(ctx->out, 'w');
pq_sendint64(ctx->out, lsn); /* dataStart */
pq_sendint64(ctx->out, lsn); /* walEnd */
+
/*
* Fill out the sendtime later, just as it's done in XLogSendPhysical, but
* reserve space here.
*/
- pq_sendint64(ctx->out, 0); /* sendtime */
+ pq_sendint64(ctx->out, 0); /* sendtime */
}
/*
@@ -1035,9 +1036,9 @@ WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
pq_putmessage_noblock('d', ctx->out->data, ctx->out->len);
/*
- * Fill the send timestamp last, so that it is taken as late as
- * possible. This is somewhat ugly, but the protocol's set as it's already
- * used for several releases by streaming physical replication.
+ * Fill the send timestamp last, so that it is taken as late as possible.
+ * This is somewhat ugly, but the protocol's set as it's already used for
+ * several releases by streaming physical replication.
*/
resetStringInfo(&tmpbuf);
pq_sendint64(&tmpbuf, GetCurrentIntegerTimestamp());
@@ -1056,7 +1057,7 @@ WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
{
int wakeEvents;
long sleeptime;
- TimestampTz now;
+ TimestampTz now;
/*
* Emergency bailout if postmaster has died. This is to avoid the
@@ -1140,7 +1141,7 @@ WalSndWaitForWal(XLogRecPtr loc)
for (;;)
{
long sleeptime;
- TimestampTz now;
+ TimestampTz now;
/*
* Emergency bailout if postmaster has died. This is to avoid the
@@ -1297,6 +1298,7 @@ exec_replication_command(const char *cmd_string)
case T_StartReplicationCmd:
{
StartReplicationCmd *cmd = (StartReplicationCmd *) cmd_node;
+
if (cmd->kind == REPLICATION_KIND_PHYSICAL)
StartReplication(cmd);
else
@@ -1472,7 +1474,8 @@ ProcessStandbyMessage(void)
static void
PhysicalConfirmReceivedLocation(XLogRecPtr lsn)
{
- bool changed = false;
+ bool changed = false;
+
/* use volatile pointer to prevent code rearrangement */
volatile ReplicationSlot *slot = MyReplicationSlot;
@@ -1492,9 +1495,9 @@ PhysicalConfirmReceivedLocation(XLogRecPtr lsn)
}
/*
- * One could argue that the slot should be saved to disk now, but that'd be
- * energy wasted - the worst lost information can do here is give us wrong
- * information in a statistics view - we'll just potentially be more
+ * One could argue that the slot should be saved to disk now, but that'd
+ * be energy wasted - the worst lost information can do here is give us
+ * wrong information in a statistics view - we'll just potentially be more
* conservative in removing files.
*/
}
@@ -1561,15 +1564,16 @@ ProcessStandbyReplyMessage(void)
static void
PhysicalReplicationSlotNewXmin(TransactionId feedbackXmin)
{
- bool changed = false;
+ bool changed = false;
volatile ReplicationSlot *slot = MyReplicationSlot;
SpinLockAcquire(&slot->mutex);
MyPgXact->xmin = InvalidTransactionId;
+
/*
- * For physical replication we don't need the interlock provided
- * by xmin and effective_xmin since the consequences of a missed increase
- * are limited to query cancellations, so set both at once.
+ * For physical replication we don't need the interlock provided by xmin
+ * and effective_xmin since the consequences of a missed increase are
+ * limited to query cancellations, so set both at once.
*/
if (!TransactionIdIsNormal(slot->data.xmin) ||
!TransactionIdIsNormal(feedbackXmin) ||
@@ -1655,7 +1659,7 @@ ProcessStandbyHSFeedbackMessage(void)
* perhaps far enough to make feedbackXmin wrap around. In that case the
* xmin we set here would be "in the future" and have no effect. No point
* in worrying about this since it's too late to save the desired data
- * anyway. Assuming that the standby sends us an increasing sequence of
+ * anyway. Assuming that the standby sends us an increasing sequence of
* xmins, this could only happen during the first reply cycle, else our
* own xmin would prevent nextXid from advancing so far.
*
@@ -1667,11 +1671,11 @@ ProcessStandbyHSFeedbackMessage(void)
*
* If we're using a replication slot we reserve the xmin via that,
* otherwise via the walsender's PGXACT entry.
-
+ *
* XXX: It might make sense to introduce ephemeral slots and always use
* the slot mechanism.
*/
- if (MyReplicationSlot != NULL) /* XXX: persistency configurable? */
+ if (MyReplicationSlot != NULL) /* XXX: persistency configurable? */
PhysicalReplicationSlotNewXmin(feedbackXmin);
else
MyPgXact->xmin = feedbackXmin;
@@ -1692,8 +1696,8 @@ WalSndComputeSleeptime(TimestampTz now)
if (wal_sender_timeout > 0)
{
TimestampTz wakeup_time;
- long sec_to_timeout;
- int microsec_to_timeout;
+ long sec_to_timeout;
+ int microsec_to_timeout;
/*
* At the latest stop sleeping once wal_sender_timeout has been
@@ -1703,13 +1707,13 @@ WalSndComputeSleeptime(TimestampTz now)
wal_sender_timeout);
/*
- * If no ping has been sent yet, wakeup when it's time to do
- * so. WalSndKeepaliveIfNecessary() wants to send a keepalive once
- * half of the timeout passed without a response.
+ * If no ping has been sent yet, wakeup when it's time to do so.
+ * WalSndKeepaliveIfNecessary() wants to send a keepalive once half of
+ * the timeout passed without a response.
*/
if (!waiting_for_ping_response)
wakeup_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
- wal_sender_timeout / 2);
+ wal_sender_timeout / 2);
/* Compute relative time until wakeup. */
TimestampDifference(now, wakeup_time,
@@ -1738,11 +1742,11 @@ WalSndCheckTimeOut(TimestampTz now)
{
/*
* Since typically expiration of replication timeout means
- * communication problem, we don't send the error message to
- * the standby.
+ * communication problem, we don't send the error message to the
+ * standby.
*/
ereport(COMMERROR,
- (errmsg("terminating walsender process due to replication timeout")));
+ (errmsg("terminating walsender process due to replication timeout")));
WalSndShutdown();
}
@@ -1770,7 +1774,7 @@ WalSndLoop(WalSndSendDataCallback send_data)
*/
for (;;)
{
- TimestampTz now;
+ TimestampTz now;
/*
* Emergency bailout if postmaster has died. This is to avoid the
@@ -1839,10 +1843,10 @@ WalSndLoop(WalSndSendDataCallback send_data)
/*
* When SIGUSR2 arrives, we send any outstanding logs up to the
- * shutdown checkpoint record (i.e., the latest record), wait
- * for them to be replicated to the standby, and exit.
- * This may be a normal termination at shutdown, or a promotion,
- * the walsender is not sure which.
+ * shutdown checkpoint record (i.e., the latest record), wait for
+ * them to be replicated to the standby, and exit. This may be a
+ * normal termination at shutdown, or a promotion, the walsender
+ * is not sure which.
*/
if (walsender_ready_to_stop)
WalSndDone(send_data);
@@ -2246,7 +2250,7 @@ XLogSendPhysical(void)
*
* Attempt to send all data that's already been written out and
* fsync'd to disk. We cannot go further than what's been written out
- * given the current implementation of XLogRead(). And in any case
+ * given the current implementation of XLogRead(). And in any case
* it's unsafe to send WAL that is not securely down to disk on the
* master: if the master subsequently crashes and restarts, slaves
* must not have applied any WAL that gets lost on the master.
@@ -2416,8 +2420,8 @@ XLogSendLogical(void)
else
{
/*
- * If the record we just wanted read is at or beyond the flushed point,
- * then we're caught up.
+ * If the record we just wanted read is at or beyond the flushed
+ * point, then we're caught up.
*/
if (logical_decoding_ctx->reader->EndRecPtr >= GetFlushRecPtr())
WalSndCaughtUp = true;
@@ -2452,10 +2456,10 @@ WalSndDone(WalSndSendDataCallback send_data)
send_data();
/*
- * Check a write location to see whether all the WAL have
- * successfully been replicated if this walsender is connecting
- * to a standby such as pg_receivexlog which always returns
- * an invalid flush location. Otherwise, check a flush location.
+ * Check a write location to see whether all the WAL have successfully
+ * been replicated if this walsender is connecting to a standby such as
+ * pg_receivexlog which always returns an invalid flush location.
+ * Otherwise, check a flush location.
*/
replicatedPtr = XLogRecPtrIsInvalid(MyWalSnd->flush) ?
MyWalSnd->write : MyWalSnd->flush;
@@ -2562,8 +2566,8 @@ WalSndLastCycleHandler(SIGNAL_ARGS)
/*
* If replication has not yet started, die like with SIGTERM. If
* replication is active, only set a flag and wake up the main loop. It
- * will send any outstanding WAL, wait for it to be replicated to
- * the standby, and then exit gracefully.
+ * will send any outstanding WAL, wait for it to be replicated to the
+ * standby, and then exit gracefully.
*/
if (!replication_active)
kill(MyProcPid, SIGTERM);
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 1fb6b69225..50ecf7e884 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -202,7 +202,7 @@ DefineRule(RuleStmt *stmt, const char *queryString)
transformRuleStmt(stmt, queryString, &actions, &whereClause);
/*
- * Find and lock the relation. Lock level should match
+ * Find and lock the relation. Lock level should match
* DefineQueryRewrite.
*/
relId = RangeVarGetRelid(stmt->relation, AccessExclusiveLock, false);
@@ -357,7 +357,7 @@ DefineQueryRewrite(char *rulename,
RelationGetDescr(event_relation),
true,
event_relation->rd_rel->relkind !=
- RELKIND_MATVIEW);
+ RELKIND_MATVIEW);
/*
* ... there must not be another ON SELECT rule already ...
@@ -409,7 +409,7 @@ DefineQueryRewrite(char *rulename,
*
* If so, check that the relation is empty because the storage for the
* relation is going to be deleted. Also insist that the rel not have
- * any triggers, indexes, or child tables. (Note: these tests are too
+ * any triggers, indexes, or child tables. (Note: these tests are too
* strict, because they will reject relations that once had such but
* don't anymore. But we don't really care, because this whole
* business of converting relations to views is just a kluge to allow
@@ -712,7 +712,7 @@ checkRuleResultList(List *targetList, TupleDesc resultDesc, bool isSelect,
* Note: for a view (ON SELECT rule), the checkAsUser field of the OLD
* RTE entry will be overridden when the view rule is expanded, and the
* checkAsUser field of the NEW entry is irrelevant because that entry's
- * requiredPerms bits will always be zero. However, for other types of rules
+ * requiredPerms bits will always be zero. However, for other types of rules
* it's important to set these fields to match the rule owner. So we just set
* them always.
*/
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index caed8caee6..e6c553068c 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -209,7 +209,7 @@ AcquireRewriteLocks(Query *parsetree,
/*
* The elements of an alias list have to refer to
* earlier RTEs of the same rtable, because that's the
- * order the planner builds things in. So we already
+ * order the planner builds things in. So we already
* processed the referenced RTE, and so it's safe to
* use get_rte_attribute_is_dropped on it. (This might
* not hold after rewriting or planning, but it's OK
@@ -371,7 +371,7 @@ rewriteRuleAction(Query *parsetree,
/*
* Generate expanded rtable consisting of main parsetree's rtable plus
* rule action's rtable; this becomes the complete rtable for the rule
- * action. Some of the entries may be unused after we finish rewriting,
+ * action. Some of the entries may be unused after we finish rewriting,
* but we leave them all in place for two reasons:
*
* We'd have a much harder job to adjust the query's varnos if we
@@ -437,7 +437,7 @@ rewriteRuleAction(Query *parsetree,
* that if the rule action refers to OLD, its jointree will add a
* reference to rt_index. If the rule action doesn't refer to OLD, but
* either the rule_qual or the user query quals do, then we need to keep
- * the original rtindex in the jointree to provide data for the quals. We
+ * the original rtindex in the jointree to provide data for the quals. We
* don't want the original rtindex to be joined twice, however, so avoid
* keeping it if the rule action mentions it.
*
@@ -459,7 +459,7 @@ rewriteRuleAction(Query *parsetree,
{
/*
* If sub_action is a setop, manipulating its jointree will do no
- * good at all, because the jointree is dummy. (Perhaps someday
+ * good at all, because the jointree is dummy. (Perhaps someday
* we could push the joining and quals down to the member
* statements of the setop?)
*/
@@ -668,7 +668,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index)
* then junk fields (these in no particular order).
*
* We must do items 1,2,3 before firing rewrite rules, else rewritten
- * references to NEW.foo will produce wrong or incomplete results. Item 4
+ * references to NEW.foo will produce wrong or incomplete results. Item 4
* is not needed for rewriting, but will be needed by the planner, and we
* can do it essentially for free while handling the other items.
*
@@ -876,7 +876,7 @@ process_matched_tle(TargetEntry *src_tle,
}
/*----------
- * Multiple assignments to same attribute. Allow only if all are
+ * Multiple assignments to same attribute. Allow only if all are
* FieldStore or ArrayRef assignment operations. This is a bit
* tricky because what we may actually be looking at is a nest of
* such nodes; consider
@@ -894,7 +894,7 @@ process_matched_tle(TargetEntry *src_tle,
* assignments appear to occur left-to-right.
*
* For FieldStore, instead of nesting we can generate a single
- * FieldStore with multiple target fields. We must nest when
+ * FieldStore with multiple target fields. We must nest when
* ArrayRefs are involved though.
*----------
*/
@@ -1186,7 +1186,7 @@ rewriteValuesRTE(RangeTblEntry *rte, Relation target_relation, List *attrnos)
* rewriteTargetListUD - rewrite UPDATE/DELETE targetlist as needed
*
* This function adds a "junk" TLE that is needed to allow the executor to
- * find the original row for the update or delete. When the target relation
+ * find the original row for the update or delete. When the target relation
* is a regular table, the junk TLE emits the ctid attribute of the original
* row. When the target relation is a view, there is no ctid, so we instead
* emit a whole-row Var that will contain the "old" values of the view row.
@@ -1375,9 +1375,9 @@ ApplyRetrieveRule(Query *parsetree,
* fine as the result relation.
*
* For UPDATE/DELETE, we need to expand the view so as to have source
- * data for the operation. But we also need an unmodified RTE to
+ * data for the operation. But we also need an unmodified RTE to
* serve as the target. So, copy the RTE and add the copy to the
- * rangetable. Note that the copy does not get added to the jointree.
+ * rangetable. Note that the copy does not get added to the jointree.
* Also note that there's a hack in fireRIRrules to avoid calling this
* function again when it arrives at the copied RTE.
*/
@@ -1549,7 +1549,7 @@ markQueryForLocking(Query *qry, Node *jtnode,
* in the given tree.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * SubLink nodes in-place. It is caller's responsibility to ensure that
+ * SubLink nodes in-place. It is caller's responsibility to ensure that
* no unwanted side-effects occur!
*
* This is unlike most of the other routines that recurse into subselects,
@@ -1745,7 +1745,7 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
* not just "NOT x" which the planner is much smarter about, else we will
* do the wrong thing when the qual evaluates to NULL.)
*
- * The rule_qual may contain references to OLD or NEW. OLD references are
+ * The rule_qual may contain references to OLD or NEW. OLD references are
* replaced by references to the specified rt_index (the relation that the
* rule applies to). NEW references are only possible for INSERT and UPDATE
* queries on the relation itself, and so they should be replaced by copies
@@ -1818,7 +1818,7 @@ CopyAndAddInvertedQual(Query *parsetree,
* rows that the qualified action doesn't act on. (If there are multiple
* qualified INSTEAD rules, we AND all the negated quals onto a single
* modified original query.) We won't execute the original, unmodified
- * query if we find either qualified or unqualified INSTEAD rules. If
+ * query if we find either qualified or unqualified INSTEAD rules. If
* we find both, the modified original query is discarded too.
*/
static List *
@@ -2174,8 +2174,8 @@ view_cols_are_auto_updatable(Query *viewquery,
ListCell *cell;
/*
- * The caller should have verified that this view is auto-updatable and
- * so there should be a single base relation.
+ * The caller should have verified that this view is auto-updatable and so
+ * there should be a single base relation.
*/
Assert(list_length(viewquery->jointree->fromlist) == 1);
rtr = (RangeTblRef *) linitial(viewquery->jointree->fromlist);
@@ -2212,7 +2212,7 @@ view_cols_are_auto_updatable(Query *viewquery,
}
}
- return NULL; /* all the required view columns are updatable */
+ return NULL; /* all the required view columns are updatable */
}
@@ -2227,7 +2227,7 @@ view_cols_are_auto_updatable(Query *viewquery,
* updatability.
*
* This is used for the information_schema views, which have separate concepts
- * of "updatable" and "trigger updatable". A relation is "updatable" if it
+ * of "updatable" and "trigger updatable". A relation is "updatable" if it
* can be updated without the need for triggers (either because it has a
* suitable RULE, or because it is simple enough to be automatically updated).
* A relation is "trigger updatable" if it has a suitable INSTEAD OF trigger.
@@ -2239,7 +2239,7 @@ view_cols_are_auto_updatable(Query *viewquery,
* to have trigger updatability included in the result.
*
* The return value is a bitmask of rule event numbers indicating which of
- * the INSERT, UPDATE and DELETE operations are supported. (We do it this way
+ * the INSERT, UPDATE and DELETE operations are supported. (We do it this way
* so that we can test for UPDATE plus DELETE support in a single call.)
*/
int
@@ -2354,9 +2354,9 @@ relation_is_updatable(Oid reloid,
/*
* Determine which of the view's columns are updatable. If there
- * are none within the set of columns we are looking at, then
- * the view doesn't support INSERT/UPDATE, but it may still
- * support DELETE.
+ * are none within the set of columns we are looking at, then the
+ * view doesn't support INSERT/UPDATE, but it may still support
+ * DELETE.
*/
view_cols_are_auto_updatable(viewquery, NULL,
&updatable_cols, NULL);
@@ -2365,9 +2365,9 @@ relation_is_updatable(Oid reloid,
updatable_cols = bms_int_members(updatable_cols, include_cols);
if (bms_is_empty(updatable_cols))
- auto_events = (1 << CMD_DELETE); /* May support DELETE */
+ auto_events = (1 << CMD_DELETE); /* May support DELETE */
else
- auto_events = ALL_EVENTS; /* May support all events */
+ auto_events = ALL_EVENTS; /* May support all events */
/*
* The base relation must also support these update commands.
@@ -2476,7 +2476,7 @@ adjust_view_column_set(Bitmapset *cols, List *targetlist)
* the view's base relation becomes the target relation.
*
* Note that the base relation here may itself be a view, which may or may not
- * have INSTEAD OF triggers or rules to handle the update. That is handled by
+ * have INSTEAD OF triggers or rules to handle the update. That is handled by
* the recursion in RewriteQuery.
*/
static Query *
@@ -2573,18 +2573,18 @@ rewriteTargetView(Query *parsetree, Relation view)
case CMD_INSERT:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot insert into column \"%s\" of view \"%s\"",
- non_updatable_col,
- RelationGetRelationName(view)),
- errdetail_internal("%s", _(auto_update_detail))));
+ errmsg("cannot insert into column \"%s\" of view \"%s\"",
+ non_updatable_col,
+ RelationGetRelationName(view)),
+ errdetail_internal("%s", _(auto_update_detail))));
break;
case CMD_UPDATE:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot update column \"%s\" of view \"%s\"",
- non_updatable_col,
- RelationGetRelationName(view)),
- errdetail_internal("%s", _(auto_update_detail))));
+ errmsg("cannot update column \"%s\" of view \"%s\"",
+ non_updatable_col,
+ RelationGetRelationName(view)),
+ errdetail_internal("%s", _(auto_update_detail))));
break;
default:
elog(ERROR, "unrecognized CmdType: %d",
@@ -2688,7 +2688,7 @@ rewriteTargetView(Query *parsetree, Relation view)
* that does not correspond to what happens in ordinary SELECT usage of a
* view: all referenced columns must have read permission, even if
* optimization finds that some of them can be discarded during query
- * transformation. The flattening we're doing here is an optional
+ * transformation. The flattening we're doing here is an optional
* optimization, too. (If you are unpersuaded and want to change this,
* note that applying adjust_view_column_set to view_rte->selectedCols is
* clearly *not* the right answer, since that neglects base-rel columns
@@ -2703,8 +2703,8 @@ rewriteTargetView(Query *parsetree, Relation view)
/*
* Move any security barrier quals from the view RTE onto the new target
- * RTE. Any such quals should now apply to the new target RTE and will not
- * reference the original view RTE in the rewritten query.
+ * RTE. Any such quals should now apply to the new target RTE and will
+ * not reference the original view RTE in the rewritten query.
*/
new_rte->securityQuals = view_rte->securityQuals;
view_rte->securityQuals = NIL;
@@ -2790,8 +2790,8 @@ rewriteTargetView(Query *parsetree, Relation view)
* we did with the view targetlist).
*
* Note that there is special-case handling for the quals of a security
- * barrier view, since they need to be kept separate from any user-supplied
- * quals, so these quals are kept on the new target RTE.
+ * barrier view, since they need to be kept separate from any
+ * user-supplied quals, so these quals are kept on the new target RTE.
*
* For INSERT, the view's quals can be ignored in the main query.
*/
@@ -2836,13 +2836,14 @@ rewriteTargetView(Query *parsetree, Relation view)
* If the parent view has a cascaded check option, treat this view as
* if it also had a cascaded check option.
*
- * New WithCheckOptions are added to the start of the list, so if there
- * is a cascaded check option, it will be the first item in the list.
+ * New WithCheckOptions are added to the start of the list, so if
+ * there is a cascaded check option, it will be the first item in the
+ * list.
*/
if (parsetree->withCheckOptions != NIL)
{
WithCheckOption *parent_wco =
- (WithCheckOption *) linitial(parsetree->withCheckOptions);
+ (WithCheckOption *) linitial(parsetree->withCheckOptions);
if (parent_wco->cascaded)
{
@@ -3089,7 +3090,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
/*
* At this point product_queries contains any DO ALSO rule
- * actions. Add the rewritten query before or after those. This
+ * actions. Add the rewritten query before or after those. This
* must match the handling the original query would have gotten
* below, if we allowed it to be included again.
*/
@@ -3309,7 +3310,7 @@ QueryRewrite(Query *parsetree)
*
* If the original query is still in the list, it sets the command tag.
* Otherwise, the last INSTEAD query of the same kind as the original is
- * allowed to set the tag. (Note these rules can leave us with no query
+ * allowed to set the tag. (Note these rules can leave us with no query
* setting the tag. The tcop code has to cope with this by setting up a
* default tag based on the original un-rewritten query.)
*
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index 1829c76bad..bcf3bd9243 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -58,7 +58,7 @@ static Relids adjust_relid_set(Relids relids, int oldrelid, int newrelid);
* specified query level.
*
* The objective of this routine is to detect whether there are aggregates
- * belonging to the given query level. Aggregates belonging to subqueries
+ * belonging to the given query level. Aggregates belonging to subqueries
* or outer queries do NOT cause a true result. We must recurse into
* subqueries to detect outer-reference aggregates that logically belong to
* the specified query level.
@@ -113,7 +113,7 @@ contain_aggs_of_level_walker(Node *node,
* Find the parse location of any aggregate of the specified query level.
*
* Returns -1 if no such agg is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
* Note: it might seem appropriate to merge this functionality into
@@ -208,7 +208,7 @@ contain_windowfuncs_walker(Node *node, void *context)
* Find the parse location of any windowfunc of the current query level.
*
* Returns -1 if no such windowfunc is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
* Note: it might seem appropriate to merge this functionality into
@@ -287,11 +287,11 @@ checkExprHasSubLink_walker(Node *node, void *context)
*
* Find all Var nodes in the given tree with varlevelsup == sublevels_up,
* and increment their varno fields (rangetable indexes) by 'offset'.
- * The varnoold fields are adjusted similarly. Also, adjust other nodes
+ * The varnoold fields are adjusted similarly. Also, adjust other nodes
* that contain rangetable indexes, such as RangeTblRef and JoinExpr.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. The given expression tree should have been copied
+ * nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
@@ -449,11 +449,11 @@ offset_relid_set(Relids relids, int offset)
*
* Find all Var nodes in the given tree belonging to a specific relation
* (identified by sublevels_up and rt_index), and change their varno fields
- * to 'new_index'. The varnoold fields are changed too. Also, adjust other
+ * to 'new_index'. The varnoold fields are changed too. Also, adjust other
* nodes that contain rangetable indexes, such as RangeTblRef and JoinExpr.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. The given expression tree should have been copied
+ * nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
@@ -646,7 +646,7 @@ adjust_relid_set(Relids relids, int oldrelid, int newrelid)
* Likewise for other nodes containing levelsup fields, such as Aggref.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * Var nodes in-place. The given expression tree should have been copied
+ * Var nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
@@ -1157,7 +1157,7 @@ replace_rte_variables_mutator(Node *node,
* If the expression tree contains a whole-row Var for the target RTE,
* the Var is not changed but *found_whole_row is returned as TRUE.
* For most callers this is an error condition, but we leave it to the caller
- * to report the error so that useful context can be provided. (In some
+ * to report the error so that useful context can be provided. (In some
* usages it would be appropriate to modify the Var's vartype and insert a
* ConvertRowtypeExpr node to map back to the original vartype. We might
* someday extend this function's API to support that. For now, the only
diff --git a/src/backend/rewrite/rewriteSupport.c b/src/backend/rewrite/rewriteSupport.c
index c107587da4..eb54d5ce6d 100644
--- a/src/backend/rewrite/rewriteSupport.c
+++ b/src/backend/rewrite/rewriteSupport.c
@@ -122,7 +122,7 @@ get_rewrite_oid(Oid relid, const char *rulename, bool missing_ok)
* Find rule oid, given only a rule name but no rel OID.
*
* If there's more than one, it's an error. If there aren't any, that's an
- * error, too. In general, this should be avoided - it is provided to support
+ * error, too. In general, this should be avoided - it is provided to support
* syntax that is compatible with pre-7.3 versions of PG, where rule names
* were unique across the entire database.
*/
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index e187242666..e03394c08b 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -44,7 +44,7 @@ int32 *PrivateRefCount;
*
* IO_IN_PROGRESS -- this is a flag in the buffer descriptor.
* It must be set when an IO is initiated and cleared at
- * the end of the IO. It is there to make sure that one
+ * the end of the IO. It is there to make sure that one
* process doesn't start to use a buffer while another is
* faulting it in. see WaitIO and related routines.
*
@@ -54,7 +54,7 @@ int32 *PrivateRefCount;
*
* PrivateRefCount -- Each buffer also has a private refcount that keeps
* track of the number of times the buffer is pinned in the current
- * process. This is used for two purposes: first, if we pin a
+ * process. This is used for two purposes: first, if we pin a
* a buffer more than once, we only need to change the shared refcount
* once, thus only lock the shared state once; second, when a transaction
* aborts, it should only unpin the buffers exactly the number of times it
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index bdbfea4c72..7a38f2f150 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -3,7 +3,7 @@
* buf_table.c
* routines for mapping BufferTags to buffer indexes.
*
- * Note: the routines in this file do no locking of their own. The caller
+ * Note: the routines in this file do no locking of their own. The caller
* must hold a suitable lock on the appropriate BufMappingLock, as specified
* in the comments. We can't do the locking inside these functions because
* in most cases the caller needs to adjust the buffer header contents
@@ -112,7 +112,7 @@ BufTableLookup(BufferTag *tagPtr, uint32 hashcode)
* Insert a hashtable entry for given tag and buffer ID,
* unless an entry already exists for that tag
*
- * Returns -1 on successful insertion. If a conflicting entry exists
+ * Returns -1 on successful insertion. If a conflicting entry exists
* already, returns the buffer ID in that entry.
*
* Caller must hold exclusive lock on BufMappingLock for tag's partition
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 246f31bfe1..c070278944 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -116,7 +116,7 @@ static int rnode_comparator(const void *p1, const void *p2);
* PrefetchBuffer -- initiate asynchronous read of a block of a relation
*
* This is named by analogy to ReadBuffer but doesn't actually allocate a
- * buffer. Instead it tries to ensure that a future ReadBuffer for the given
+ * buffer. Instead it tries to ensure that a future ReadBuffer for the given
* block will not be delayed by the I/O. Prefetching is optional.
* No-op if prefetching isn't compiled in.
*/
@@ -206,7 +206,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* Assume when this function is called, that reln has been opened already.
*
* In RBM_NORMAL mode, the page is read from disk, and the page header is
- * validated. An error is thrown if the page header is not valid. (But
+ * validated. An error is thrown if the page header is not valid. (But
* note that an all-zero page is considered "valid"; see PageIsVerified().)
*
* RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
@@ -214,7 +214,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* for non-critical data, where the caller is prepared to repair errors.
*
* In RBM_ZERO mode, if the page isn't in buffer cache already, it's filled
- * with zeros instead of reading it from disk. Useful when the caller is
+ * with zeros instead of reading it from disk. Useful when the caller is
* going to fill the page from scratch, since this saves I/O and avoids
* unnecessary failure if the page-on-disk has corrupt page headers.
* Caution: do not use this mode to read a page that is beyond the relation's
@@ -371,7 +371,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* This can happen because mdread doesn't complain about reads beyond
* EOF (when zero_damaged_pages is ON) and so a previous attempt to
* read a block beyond EOF could have left a "valid" zero-filled
- * buffer. Unfortunately, we have also seen this case occurring
+ * buffer. Unfortunately, we have also seen this case occurring
* because of buggy Linux kernels that sometimes return an
* lseek(SEEK_END) result that doesn't account for a recent write. In
* that situation, the pre-existing buffer would contain valid data
@@ -597,7 +597,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
/*
* Didn't find it in the buffer pool. We'll have to initialize a new
- * buffer. Remember to unlock the mapping lock while doing the work.
+ * buffer. Remember to unlock the mapping lock while doing the work.
*/
LWLockRelease(newPartitionLock);
@@ -607,7 +607,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
bool lock_held;
/*
- * Select a victim buffer. The buffer is returned with its header
+ * Select a victim buffer. The buffer is returned with its header
* spinlock still held! Also (in most cases) the BufFreelistLock is
* still held, since it would be bad to hold the spinlock while
* possibly waking up other processes.
@@ -656,7 +656,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* If using a nondefault strategy, and writing the buffer
* would require a WAL flush, let the strategy decide whether
* to go ahead and write/reuse the buffer or to choose another
- * victim. We need lock to inspect the page LSN, so this
+ * victim. We need lock to inspect the page LSN, so this
* can't be done inside StrategyGetBuffer.
*/
if (strategy != NULL)
@@ -786,7 +786,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
{
/*
* We can only get here if (a) someone else is still reading
- * in the page, or (b) a previous read attempt failed. We
+ * in the page, or (b) a previous read attempt failed. We
* have to wait for any active read attempt to finish, and
* then set up our own read attempt if the page is still not
* BM_VALID. StartBufferIO does it all.
@@ -879,7 +879,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* This is used only in contexts such as dropping a relation. We assume
* that no other backend could possibly be interested in using the page,
* so the only reason the buffer might be pinned is if someone else is
- * trying to write it out. We have to let them finish before we can
+ * trying to write it out. We have to let them finish before we can
* reclaim the buffer.
*
* The buffer could get reclaimed by someone else while we are waiting
@@ -978,7 +978,7 @@ retry:
*
* Marks buffer contents as dirty (actual write happens later).
*
- * Buffer must be pinned and exclusive-locked. (If caller does not hold
+ * Buffer must be pinned and exclusive-locked. (If caller does not hold
* exclusive lock, then somebody could be in process of writing the buffer,
* leading to risk of bad data written to disk.)
*/
@@ -1027,7 +1027,7 @@ MarkBufferDirty(Buffer buffer)
*
* Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
* compared to calling the two routines separately. Now it's mainly just
- * a convenience function. However, if the passed buffer is valid and
+ * a convenience function. However, if the passed buffer is valid and
* already contains the desired block, we just return it as-is; and that
* does save considerable work compared to a full release and reacquire.
*
@@ -1079,7 +1079,7 @@ ReleaseAndReadBuffer(Buffer buffer,
* when we first pin it; for other strategies we just make sure the usage_count
* isn't zero. (The idea of the latter is that we don't want synchronized
* heap scans to inflate the count, but we need it to not be zero to discourage
- * other backends from stealing buffers from our ring. As long as we cycle
+ * other backends from stealing buffers from our ring. As long as we cycle
* through the ring faster than the global clock-sweep cycles, buffers in
* our ring won't be chosen as victims for replacement by other backends.)
*
@@ -1087,7 +1087,7 @@ ReleaseAndReadBuffer(Buffer buffer,
*
* Note that ResourceOwnerEnlargeBuffers must have been done already.
*
- * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
+ * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
* some callers to avoid an extra spinlock cycle.
*/
static bool
@@ -1241,7 +1241,7 @@ BufferSync(int flags)
* have the flag set.
*
* Note that if we fail to write some buffer, we may leave buffers with
- * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
+ * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
* certainly need to be written for the next checkpoint attempt, too.
*/
num_to_write = 0;
@@ -1344,7 +1344,7 @@ BufferSync(int flags)
* This is called periodically by the background writer process.
*
* Returns true if it's appropriate for the bgwriter process to go into
- * low-power hibernation mode. (This happens if the strategy clock sweep
+ * low-power hibernation mode. (This happens if the strategy clock sweep
* has been "lapped" and no buffer allocations have occurred recently,
* or if the bgwriter has been effectively disabled by setting
* bgwriter_lru_maxpages to 0.)
@@ -2110,7 +2110,7 @@ BufferGetLSNAtomic(Buffer buffer)
* specified relation fork that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* Currently, this is called only from smgr.c when the underlying file
@@ -2119,7 +2119,7 @@ BufferGetLSNAtomic(Buffer buffer)
* be deleted momentarily anyway, and there is no point in writing it.
* It is the responsibility of higher-level code to ensure that the
* deletion or truncation does not lose any data that could be needed
- * later. It is also the responsibility of higher-level code to ensure
+ * later. It is also the responsibility of higher-level code to ensure
* that no other process could be trying to load more pages of the
* relation into buffers.
*
@@ -2281,9 +2281,9 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
*
* This function removes all the buffers in the buffer cache for a
* particular database. Dirty pages are simply dropped, without
- * bothering to write them out first. This is used when we destroy a
+ * bothering to write them out first. This is used when we destroy a
* database, to avoid trying to flush data to disk when the directory
- * tree no longer exists. Implementation is pretty similar to
+ * tree no longer exists. Implementation is pretty similar to
* DropRelFileNodeBuffers() which is for destroying just one relation.
* --------------------------------------------------------------------
*/
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index 42afac6925..4befab0e1a 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -36,7 +36,7 @@ typedef struct
*/
/*
- * Statistics. These counters should be wide enough that they can't
+ * Statistics. These counters should be wide enough that they can't
* overflow during a single bgwriter cycle.
*/
uint32 completePasses; /* Complete cycles of the clock sweep */
@@ -135,7 +135,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
/*
* We count buffer allocation requests so that the bgwriter can estimate
- * the rate of buffer consumption. Note that buffers recycled by a
+ * the rate of buffer consumption. Note that buffers recycled by a
* strategy object are intentionally not counted here.
*/
StrategyControl->numBufferAllocs++;
@@ -266,7 +266,7 @@ StrategyFreeBuffer(volatile BufferDesc *buf)
*
* In addition, we return the completed-pass count (which is effectively
* the higher-order bits of nextVictimBuffer) and the count of recent buffer
- * allocs if non-NULL pointers are passed. The alloc count is reset after
+ * allocs if non-NULL pointers are passed. The alloc count is reset after
* being read.
*/
int
@@ -291,7 +291,7 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
* StrategyNotifyBgWriter -- set or clear allocation notification latch
*
* If bgwriterLatch isn't NULL, the next invocation of StrategyGetBuffer will
- * set that latch. Pass NULL to clear the pending notification before it
+ * set that latch. Pass NULL to clear the pending notification before it
* happens. This feature is used by the bgwriter process to wake itself up
* from hibernation, and is not meant for anybody else to use.
*/
@@ -484,7 +484,7 @@ GetBufferFromRing(BufferAccessStrategy strategy)
/*
* If the slot hasn't been filled yet, tell the caller to allocate a new
- * buffer with the normal allocation strategy. He will then fill this
+ * buffer with the normal allocation strategy. He will then fill this
* slot by calling AddBufferToRing with the new buffer.
*/
bufnum = strategy->buffers[strategy->current];
@@ -537,7 +537,7 @@ AddBufferToRing(BufferAccessStrategy strategy, volatile BufferDesc *buf)
*
* When a nondefault strategy is used, the buffer manager calls this function
* when it turns out that the buffer selected by StrategyGetBuffer needs to
- * be written out and doing so would require flushing WAL too. This gives us
+ * be written out and doing so would require flushing WAL too. This gives us
* a chance to choose a different victim.
*
* Returns true if buffer manager should ask for a new victim, and false
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 62adc1ce6b..3135c5cf15 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -94,7 +94,7 @@ LocalPrefetchBuffer(SMgrRelation smgr, ForkNumber forkNum,
* Find or create a local buffer for the given page of the given relation.
*
* API is similar to bufmgr.c's BufferAlloc, except that we do not need
- * to do any locking since this is all local. Also, IO_IN_PROGRESS
+ * to do any locking since this is all local. Also, IO_IN_PROGRESS
* does not get set. Lastly, we support only default access strategy
* (hence, usage_count is always advanced).
*/
@@ -292,7 +292,7 @@ MarkLocalBufferDirty(Buffer buffer)
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* See DropRelFileNodeBuffers in bufmgr.c for more notes.
@@ -459,7 +459,7 @@ GetLocalBufferStorage(void)
/*
* We allocate local buffers in a context of their own, so that the
* space eaten for them is easily recognizable in MemoryContextStats
- * output. Create the context on first use.
+ * output. Create the context on first use.
*/
if (LocalBufferContext == NULL)
LocalBufferContext =
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index e62d5d916e..0f007c8212 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -29,7 +29,7 @@
* that was current at that time.
*
* BufFile also supports temporary files that exceed the OS file size limit
- * (by opening multiple fd.c temporary files). This is an essential feature
+ * (by opening multiple fd.c temporary files). This is an essential feature
* for sorts and hashjoins on large amounts of data.
*-------------------------------------------------------------------------
*/
@@ -72,7 +72,7 @@ struct BufFile
bool dirty; /* does buffer need to be written? */
/*
- * resowner is the ResourceOwner to use for underlying temp files. (We
+ * resowner is the ResourceOwner to use for underlying temp files. (We
* don't need to remember the memory context we're using explicitly,
* because after creation we only repalloc our arrays larger.)
*/
@@ -519,7 +519,7 @@ BufFileSeek(BufFile *file, int fileno, off_t offset, int whence)
{
/*
* Seek is to a point within existing buffer; we can just adjust
- * pos-within-buffer, without flushing buffer. Note this is OK
+ * pos-within-buffer, without flushing buffer. Note this is OK
* whether reading or writing, but buffer remains dirty if we were
* writing.
*/
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 0560bf9d72..1f69c9e03c 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -83,7 +83,7 @@
* and other code that tries to open files without consulting fd.c. This
* is the number left free. (While we can be pretty sure we won't get
* EMFILE, there's never any guarantee that we won't get ENFILE due to
- * other processes chewing up FDs. So it's a bad idea to try to open files
+ * other processes chewing up FDs. So it's a bad idea to try to open files
* without consulting fd.c. Nonetheless we cannot control all code.)
*
* Because this is just a fixed setting, we are effectively assuming that
@@ -168,8 +168,8 @@ typedef struct vfd
} Vfd;
/*
- * Virtual File Descriptor array pointer and size. This grows as
- * needed. 'File' values are indexes into this array.
+ * Virtual File Descriptor array pointer and size. This grows as
+ * needed. 'File' values are indexes into this array.
* Note that VfdCache[0] is not a usable VFD, just a list header.
*/
static Vfd *VfdCache;
@@ -189,7 +189,7 @@ static bool have_xact_temporary_files = false;
/*
* Tracks the total size of all temporary files. Note: when temp_file_limit
* is being enforced, this cannot overflow since the limit cannot be more
- * than INT_MAX kilobytes. When not enforcing, it could theoretically
+ * than INT_MAX kilobytes. When not enforcing, it could theoretically
* overflow, but we don't care.
*/
static uint64 temporary_files_size = 0;
@@ -252,7 +252,7 @@ static int nextTempTableSpace = 0;
*
* The Least Recently Used ring is a doubly linked list that begins and
* ends on element zero. Element zero is special -- it doesn't represent
- * a file and its "fd" field always == VFD_CLOSED. Element zero is just an
+ * a file and its "fd" field always == VFD_CLOSED. Element zero is just an
* anchor that shows us the beginning/end of the ring.
* Only VFD elements that are currently really open (have an FD assigned) are
* in the Lru ring. Elements that are "virtually" open can be recognized
@@ -473,7 +473,7 @@ InitFileAccess(void)
* We stop counting if usable_fds reaches max_to_probe. Note: a small
* value of max_to_probe might result in an underestimate of already_open;
* we must fill in any "gaps" in the set of used FDs before the calculation
- * of already_open will give the right answer. In practice, max_to_probe
+ * of already_open will give the right answer. In practice, max_to_probe
* of a couple of dozen should be enough to ensure good results.
*
* We assume stdin (FD 0) is available for dup'ing
@@ -550,7 +550,7 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
pfree(fd);
/*
- * Return results. usable_fds is just the number of successful dups. We
+ * Return results. usable_fds is just the number of successful dups. We
* assume that the system limit is highestfd+1 (remember 0 is a legal FD
* number) and so already_open is highestfd+1 - usable_fds.
*/
@@ -1045,7 +1045,7 @@ OpenTemporaryFile(bool interXact)
/*
* If not, or if tablespace is bad, create in database's default
- * tablespace. MyDatabaseTableSpace should normally be set before we get
+ * tablespace. MyDatabaseTableSpace should normally be set before we get
* here, but just in case it isn't, fall back to pg_default tablespace.
*/
if (file <= 0)
@@ -1339,7 +1339,7 @@ FileWrite(File file, char *buffer, int amount)
/*
* If enforcing temp_file_limit and it's a temp file, check to see if the
- * write would overrun temp_file_limit, and throw error if so. Note: it's
+ * write would overrun temp_file_limit, and throw error if so. Note: it's
* really a modularity violation to throw error here; we should set errno
* and return -1. However, there's no way to report a suitable error
* message if we do that. All current callers would just throw error
@@ -1618,7 +1618,7 @@ reserveAllocatedDesc(void)
/*
* Routines that want to use stdio (ie, FILE*) should use AllocateFile
* rather than plain fopen(). This lets fd.c deal with freeing FDs if
- * necessary to open the file. When done, call FreeFile rather than fclose.
+ * necessary to open the file. When done, call FreeFile rather than fclose.
*
* Note that files that will be open for any significant length of time
* should NOT be handled this way, since they cannot share kernel file
@@ -1923,7 +1923,7 @@ TryAgain:
* Read a directory opened with AllocateDir, ereport'ing any error.
*
* This is easier to use than raw readdir() since it takes care of some
- * otherwise rather tedious and error-prone manipulation of errno. Also,
+ * otherwise rather tedious and error-prone manipulation of errno. Also,
* if you are happy with a generic error message for AllocateDir failure,
* you can just do
*
@@ -2058,7 +2058,7 @@ SetTempTablespaces(Oid *tableSpaces, int numSpaces)
numTempTableSpaces = numSpaces;
/*
- * Select a random starting point in the list. This is to minimize
+ * Select a random starting point in the list. This is to minimize
* conflicts between backends that are most likely sharing the same list
* of temp tablespaces. Note that if we create multiple temp files in the
* same transaction, we'll advance circularly through the list --- this
@@ -2087,7 +2087,7 @@ TempTablespacesAreSet(void)
/*
* GetNextTempTableSpace
*
- * Select the next temp tablespace to use. A result of InvalidOid means
+ * Select the next temp tablespace to use. A result of InvalidOid means
* to use the current database's default tablespace.
*/
Oid
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index cdf444111f..8eee0ce80e 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -48,7 +48,7 @@
* Range Category
* 0 - 31 0
* 32 - 63 1
- * ... ... ...
+ * ... ... ...
* 8096 - 8127 253
* 8128 - 8163 254
* 8164 - 8192 255
@@ -123,7 +123,7 @@ static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof);
* will turn out to have too little space available by the time the caller
* gets a lock on it. In that case, the caller should report the actual
* amount of free space available on that page and then try again (see
- * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
+ * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
* extend the relation.
*/
BlockNumber
diff --git a/src/backend/storage/freespace/fsmpage.c b/src/backend/storage/freespace/fsmpage.c
index 64dc2fd726..6b003cbf87 100644
--- a/src/backend/storage/freespace/fsmpage.c
+++ b/src/backend/storage/freespace/fsmpage.c
@@ -185,13 +185,13 @@ restart:
/*----------
* Start the search from the target slot. At every step, move one
- * node to the right, then climb up to the parent. Stop when we reach
+ * node to the right, then climb up to the parent. Stop when we reach
* a node with enough free space (as we must, since the root has enough
* space).
*
* The idea is to gradually expand our "search triangle", that is, all
* nodes covered by the current node, and to be sure we search to the
- * right from the start point. At the first step, only the target slot
+ * right from the start point. At the first step, only the target slot
* is examined. When we move up from a left child to its parent, we are
* adding the right-hand subtree of that parent to the search triangle.
* When we move right then up from a right child, we are dropping the
diff --git a/src/backend/storage/ipc/dsm.c b/src/backend/storage/ipc/dsm.c
index 6c410f77d9..733fa5f7bd 100644
--- a/src/backend/storage/ipc/dsm.c
+++ b/src/backend/storage/ipc/dsm.c
@@ -59,29 +59,29 @@
/* Backend-local tracking for on-detach callbacks. */
typedef struct dsm_segment_detach_callback
{
- on_dsm_detach_callback function;
- Datum arg;
- slist_node node;
+ on_dsm_detach_callback function;
+ Datum arg;
+ slist_node node;
} dsm_segment_detach_callback;
/* Backend-local state for a dynamic shared memory segment. */
struct dsm_segment
{
- dlist_node node; /* List link in dsm_segment_list. */
- ResourceOwner resowner; /* Resource owner. */
- dsm_handle handle; /* Segment name. */
- uint32 control_slot; /* Slot in control segment. */
- void *impl_private; /* Implementation-specific private data. */
- void *mapped_address; /* Mapping address, or NULL if unmapped. */
- Size mapped_size; /* Size of our mapping. */
- slist_head on_detach; /* On-detach callbacks. */
+ dlist_node node; /* List link in dsm_segment_list. */
+ ResourceOwner resowner; /* Resource owner. */
+ dsm_handle handle; /* Segment name. */
+ uint32 control_slot; /* Slot in control segment. */
+ void *impl_private; /* Implementation-specific private data. */
+ void *mapped_address; /* Mapping address, or NULL if unmapped. */
+ Size mapped_size; /* Size of our mapping. */
+ slist_head on_detach; /* On-detach callbacks. */
};
/* Shared-memory state for a dynamic shared memory segment. */
typedef struct dsm_control_item
{
dsm_handle handle;
- uint32 refcnt; /* 2+ = active, 1 = moribund, 0 = gone */
+ uint32 refcnt; /* 2+ = active, 1 = moribund, 0 = gone */
} dsm_control_item;
/* Layout of the dynamic shared memory control segment. */
@@ -90,7 +90,7 @@ typedef struct dsm_control_header
uint32 magic;
uint32 nitems;
uint32 maxitems;
- dsm_control_item item[FLEXIBLE_ARRAY_MEMBER];
+ dsm_control_item item[FLEXIBLE_ARRAY_MEMBER];
} dsm_control_header;
static void dsm_cleanup_for_mmap(void);
@@ -132,7 +132,7 @@ static dlist_head dsm_segment_list = DLIST_STATIC_INIT(dsm_segment_list);
static dsm_handle dsm_control_handle;
static dsm_control_header *dsm_control;
static Size dsm_control_mapped_size = 0;
-static void *dsm_control_impl_private = NULL;
+static void *dsm_control_impl_private = NULL;
/*
* Start up the dynamic shared memory system.
@@ -166,14 +166,14 @@ dsm_postmaster_startup(PGShmemHeader *shim)
maxitems = PG_DYNSHMEM_FIXED_SLOTS
+ PG_DYNSHMEM_SLOTS_PER_BACKEND * MaxBackends;
elog(DEBUG2, "dynamic shared memory system will support %u segments",
- maxitems);
+ maxitems);
segsize = dsm_control_bytes_needed(maxitems);
/*
- * Loop until we find an unused identifier for the new control segment.
- * We sometimes use 0 as a sentinel value indicating that no control
- * segment is known to exist, so avoid using that value for a real
- * control segment.
+ * Loop until we find an unused identifier for the new control segment. We
+ * sometimes use 0 as a sentinel value indicating that no control segment
+ * is known to exist, so avoid using that value for a real control
+ * segment.
*/
for (;;)
{
@@ -224,17 +224,17 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
/*
* Try to attach the segment. If this fails, it probably just means that
- * the operating system has been rebooted and the segment no longer exists,
- * or an unrelated proces has used the same shm ID. So just fall out
- * quietly.
+ * the operating system has been rebooted and the segment no longer
+ * exists, or an unrelated proces has used the same shm ID. So just fall
+ * out quietly.
*/
if (!dsm_impl_op(DSM_OP_ATTACH, old_control_handle, 0, &impl_private,
&mapped_address, &mapped_size, DEBUG1))
return;
/*
- * We've managed to reattach it, but the contents might not be sane.
- * If they aren't, we disregard the segment after all.
+ * We've managed to reattach it, but the contents might not be sane. If
+ * they aren't, we disregard the segment after all.
*/
old_control = (dsm_control_header *) mapped_address;
if (!dsm_control_segment_sane(old_control, mapped_size))
@@ -245,14 +245,14 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
}
/*
- * OK, the control segment looks basically valid, so we can get use
- * it to get a list of segments that need to be removed.
+ * OK, the control segment looks basically valid, so we can get use it to
+ * get a list of segments that need to be removed.
*/
nitems = old_control->nitems;
for (i = 0; i < nitems; ++i)
{
- dsm_handle handle;
- uint32 refcnt;
+ dsm_handle handle;
+ uint32 refcnt;
/* If the reference count is 0, the slot is actually unused. */
refcnt = old_control->item[i].refcnt;
@@ -262,7 +262,7 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
/* Log debugging information. */
handle = old_control->item[i].handle;
elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u (reference count %u)",
- handle, refcnt);
+ handle, refcnt);
/* Destroy the referenced segment. */
dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
@@ -290,7 +290,7 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
static void
dsm_cleanup_for_mmap(void)
{
- DIR *dir;
+ DIR *dir;
struct dirent *dent;
/* Open the directory; can't use AllocateDir in postmaster. */
@@ -298,15 +298,16 @@ dsm_cleanup_for_mmap(void)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open directory \"%s\": %m",
- PG_DYNSHMEM_DIR)));
+ PG_DYNSHMEM_DIR)));
/* Scan for something with a name of the correct format. */
while ((dent = ReadDir(dir, PG_DYNSHMEM_DIR)) != NULL)
{
if (strncmp(dent->d_name, PG_DYNSHMEM_MMAP_FILE_PREFIX,
- strlen(PG_DYNSHMEM_MMAP_FILE_PREFIX)) == 0)
+ strlen(PG_DYNSHMEM_MMAP_FILE_PREFIX)) == 0)
{
- char buf[MAXPGPATH];
+ char buf[MAXPGPATH];
+
snprintf(buf, MAXPGPATH, PG_DYNSHMEM_DIR "/%s", dent->d_name);
elog(DEBUG2, "removing file \"%s\"", buf);
@@ -314,7 +315,7 @@ dsm_cleanup_for_mmap(void)
/* We found a matching file; so remove it. */
if (unlink(buf) != 0)
{
- int save_errno;
+ int save_errno;
save_errno = errno;
closedir(dir);
@@ -352,8 +353,8 @@ dsm_postmaster_shutdown(int code, Datum arg)
* If some other backend exited uncleanly, it might have corrupted the
* control segment while it was dying. In that case, we warn and ignore
* the contents of the control segment. This may end up leaving behind
- * stray shared memory segments, but there's not much we can do about
- * that if the metadata is gone.
+ * stray shared memory segments, but there's not much we can do about that
+ * if the metadata is gone.
*/
nitems = dsm_control->nitems;
if (!dsm_control_segment_sane(dsm_control, dsm_control_mapped_size))
@@ -375,7 +376,7 @@ dsm_postmaster_shutdown(int code, Datum arg)
/* Log debugging information. */
handle = dsm_control->item[i].handle;
elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u",
- handle);
+ handle);
/* Destroy the segment. */
dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
@@ -427,7 +428,7 @@ dsm_backend_startup(void)
&dsm_control_mapped_size, WARNING);
ereport(FATAL,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("dynamic shared memory control segment is not valid")));
+ errmsg("dynamic shared memory control segment is not valid")));
}
}
#endif
@@ -455,9 +456,9 @@ dsm_set_control_handle(dsm_handle h)
dsm_segment *
dsm_create(Size size)
{
- dsm_segment *seg = dsm_create_descriptor();
- uint32 i;
- uint32 nitems;
+ dsm_segment *seg = dsm_create_descriptor();
+ uint32 i;
+ uint32 nitems;
/* Unsafe in postmaster (and pointless in a stand-alone backend). */
Assert(IsUnderPostmaster);
@@ -524,10 +525,10 @@ dsm_create(Size size)
dsm_segment *
dsm_attach(dsm_handle h)
{
- dsm_segment *seg;
- dlist_iter iter;
- uint32 i;
- uint32 nitems;
+ dsm_segment *seg;
+ dlist_iter iter;
+ uint32 i;
+ uint32 nitems;
/* Unsafe in postmaster (and pointless in a stand-alone backend). */
Assert(IsUnderPostmaster);
@@ -537,13 +538,13 @@ dsm_attach(dsm_handle h)
/*
* Since this is just a debugging cross-check, we could leave it out
- * altogether, or include it only in assert-enabled builds. But since
- * the list of attached segments should normally be very short, let's
- * include it always for right now.
+ * altogether, or include it only in assert-enabled builds. But since the
+ * list of attached segments should normally be very short, let's include
+ * it always for right now.
*
- * If you're hitting this error, you probably want to attempt to
- * find an existing mapping via dsm_find_mapping() before calling
- * dsm_attach() to create a new one.
+ * If you're hitting this error, you probably want to attempt to find an
+ * existing mapping via dsm_find_mapping() before calling dsm_attach() to
+ * create a new one.
*/
dlist_foreach(iter, &dsm_segment_list)
{
@@ -584,10 +585,10 @@ dsm_attach(dsm_handle h)
LWLockRelease(DynamicSharedMemoryControlLock);
/*
- * If we didn't find the handle we're looking for in the control
- * segment, it probably means that everyone else who had it mapped,
- * including the original creator, died before we got to this point.
- * It's up to the caller to decide what to do about that.
+ * If we didn't find the handle we're looking for in the control segment,
+ * it probably means that everyone else who had it mapped, including the
+ * original creator, died before we got to this point. It's up to the
+ * caller to decide what to do about that.
*/
if (seg->control_slot == INVALID_CONTROL_SLOT)
{
@@ -612,7 +613,7 @@ dsm_backend_shutdown(void)
{
while (!dlist_is_empty(&dsm_segment_list))
{
- dsm_segment *seg;
+ dsm_segment *seg;
seg = dlist_head_element(dsm_segment, node, &dsm_segment_list);
dsm_detach(seg);
@@ -628,11 +629,11 @@ dsm_backend_shutdown(void)
void
dsm_detach_all(void)
{
- void *control_address = dsm_control;
+ void *control_address = dsm_control;
while (!dlist_is_empty(&dsm_segment_list))
{
- dsm_segment *seg;
+ dsm_segment *seg;
seg = dlist_head_element(dsm_segment, node, &dsm_segment_list);
dsm_detach(seg);
@@ -697,7 +698,7 @@ dsm_detach(dsm_segment *seg)
{
slist_node *node;
dsm_segment_detach_callback *cb;
- on_dsm_detach_callback function;
+ on_dsm_detach_callback function;
Datum arg;
node = slist_pop_head_node(&seg->on_detach);
@@ -710,13 +711,12 @@ dsm_detach(dsm_segment *seg)
}
/*
- * Try to remove the mapping, if one exists. Normally, there will be,
- * but maybe not, if we failed partway through a create or attach
- * operation. We remove the mapping before decrementing the reference
- * count so that the process that sees a zero reference count can be
- * certain that no remaining mappings exist. Even if this fails, we
- * pretend that it works, because retrying is likely to fail in the
- * same way.
+ * Try to remove the mapping, if one exists. Normally, there will be, but
+ * maybe not, if we failed partway through a create or attach operation.
+ * We remove the mapping before decrementing the reference count so that
+ * the process that sees a zero reference count can be certain that no
+ * remaining mappings exist. Even if this fails, we pretend that it
+ * works, because retrying is likely to fail in the same way.
*/
if (seg->mapped_address != NULL)
{
@@ -730,8 +730,8 @@ dsm_detach(dsm_segment *seg)
/* Reduce reference count, if we previously increased it. */
if (seg->control_slot != INVALID_CONTROL_SLOT)
{
- uint32 refcnt;
- uint32 control_slot = seg->control_slot;
+ uint32 refcnt;
+ uint32 control_slot = seg->control_slot;
LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
Assert(dsm_control->item[control_slot].handle == seg->handle);
@@ -744,15 +744,15 @@ dsm_detach(dsm_segment *seg)
if (refcnt == 1)
{
/*
- * If we fail to destroy the segment here, or are killed before
- * we finish doing so, the reference count will remain at 1, which
+ * If we fail to destroy the segment here, or are killed before we
+ * finish doing so, the reference count will remain at 1, which
* will mean that nobody else can attach to the segment. At
* postmaster shutdown time, or when a new postmaster is started
* after a hard kill, another attempt will be made to remove the
* segment.
*
- * The main case we're worried about here is being killed by
- * a signal before we can finish removing the segment. In that
+ * The main case we're worried about here is being killed by a
+ * signal before we can finish removing the segment. In that
* case, it's important to be sure that the segment still gets
* removed. If we actually fail to remove the segment for some
* other reason, the postmaster may not have any better luck than
@@ -827,8 +827,8 @@ dsm_keep_segment(dsm_segment *seg)
dsm_segment *
dsm_find_mapping(dsm_handle h)
{
- dlist_iter iter;
- dsm_segment *seg;
+ dlist_iter iter;
+ dsm_segment *seg;
dlist_foreach(iter, &dsm_segment_list)
{
@@ -899,7 +899,7 @@ void
cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function,
Datum arg)
{
- slist_mutable_iter iter;
+ slist_mutable_iter iter;
slist_foreach_modify(iter, &seg->on_detach)
{
@@ -921,7 +921,7 @@ cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function,
void
reset_on_dsm_detach(void)
{
- dlist_iter iter;
+ dlist_iter iter;
dlist_foreach(iter, &dsm_segment_list)
{
@@ -952,7 +952,7 @@ reset_on_dsm_detach(void)
static dsm_segment *
dsm_create_descriptor(void)
{
- dsm_segment *seg;
+ dsm_segment *seg;
ResourceOwnerEnlargeDSMs(CurrentResourceOwner);
@@ -1005,5 +1005,5 @@ static uint64
dsm_control_bytes_needed(uint32 nitems)
{
return offsetof(dsm_control_header, item)
- + sizeof(dsm_control_item) * (uint64) nitems;
+ +sizeof(dsm_control_item) * (uint64) nitems;
}
diff --git a/src/backend/storage/ipc/dsm_impl.c b/src/backend/storage/ipc/dsm_impl.c
index fa253f0af5..74dace999e 100644
--- a/src/backend/storage/ipc/dsm_impl.c
+++ b/src/backend/storage/ipc/dsm_impl.c
@@ -76,40 +76,40 @@ static bool dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
#endif
#ifdef USE_DSM_SYSV
static bool dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
- void **impl_private, void **mapped_address,
- Size *mapped_size, int elevel);
+ void **impl_private, void **mapped_address,
+ Size *mapped_size, int elevel);
#endif
#ifdef USE_DSM_WINDOWS
static bool dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
- void **impl_private, void **mapped_address,
- Size *mapped_size, int elevel);
+ void **impl_private, void **mapped_address,
+ Size *mapped_size, int elevel);
#endif
#ifdef USE_DSM_MMAP
static bool dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
void **impl_private, void **mapped_address,
Size *mapped_size, int elevel);
#endif
-static int errcode_for_dynamic_shared_memory(void);
+static int errcode_for_dynamic_shared_memory(void);
const struct config_enum_entry dynamic_shared_memory_options[] = {
#ifdef USE_DSM_POSIX
- { "posix", DSM_IMPL_POSIX, false},
+ {"posix", DSM_IMPL_POSIX, false},
#endif
#ifdef USE_DSM_SYSV
- { "sysv", DSM_IMPL_SYSV, false},
+ {"sysv", DSM_IMPL_SYSV, false},
#endif
#ifdef USE_DSM_WINDOWS
- { "windows", DSM_IMPL_WINDOWS, false},
+ {"windows", DSM_IMPL_WINDOWS, false},
#endif
#ifdef USE_DSM_MMAP
- { "mmap", DSM_IMPL_MMAP, false},
+ {"mmap", DSM_IMPL_MMAP, false},
#endif
- { "none", DSM_IMPL_NONE, false},
+ {"none", DSM_IMPL_NONE, false},
{NULL, 0, false}
};
/* Implementation selector. */
-int dynamic_shared_memory_type;
+int dynamic_shared_memory_type;
/* Size of buffer to be used for zero-filling. */
#define ZBUFFER_SIZE 8192
@@ -137,20 +137,20 @@ int dynamic_shared_memory_type;
* segment.
*
* Arguments:
- * op: The operation to be performed.
- * handle: The handle of an existing object, or for DSM_OP_CREATE, the
- * a new handle the caller wants created.
- * request_size: For DSM_OP_CREATE, the requested size. For DSM_OP_RESIZE,
- * the new size. Otherwise, 0.
- * impl_private: Private, implementation-specific data. Will be a pointer
- * to NULL for the first operation on a shared memory segment within this
- * backend; thereafter, it will point to the value to which it was set
- * on the previous call.
- * mapped_address: Pointer to start of current mapping; pointer to NULL
- * if none. Updated with new mapping address.
- * mapped_size: Pointer to size of current mapping; pointer to 0 if none.
- * Updated with new mapped size.
- * elevel: Level at which to log errors.
+ * op: The operation to be performed.
+ * handle: The handle of an existing object, or for DSM_OP_CREATE, the
+ * a new handle the caller wants created.
+ * request_size: For DSM_OP_CREATE, the requested size. For DSM_OP_RESIZE,
+ * the new size. Otherwise, 0.
+ * impl_private: Private, implementation-specific data. Will be a pointer
+ * to NULL for the first operation on a shared memory segment within this
+ * backend; thereafter, it will point to the value to which it was set
+ * on the previous call.
+ * mapped_address: Pointer to start of current mapping; pointer to NULL
+ * if none. Updated with new mapping address.
+ * mapped_size: Pointer to size of current mapping; pointer to 0 if none.
+ * Updated with new mapped size.
+ * elevel: Level at which to log errors.
*
* Return value: true on success, false on failure. When false is returned,
* a message should first be logged at the specified elevel, except in the
@@ -165,7 +165,7 @@ dsm_impl_op(dsm_op op, dsm_handle handle, Size request_size,
{
Assert(op == DSM_OP_CREATE || op == DSM_OP_RESIZE || request_size == 0);
Assert((op != DSM_OP_CREATE && op != DSM_OP_ATTACH) ||
- (*mapped_address == NULL && *mapped_size == 0));
+ (*mapped_address == NULL && *mapped_size == 0));
switch (dynamic_shared_memory_type)
{
@@ -243,10 +243,10 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
void **impl_private, void **mapped_address, Size *mapped_size,
int elevel)
{
- char name[64];
- int flags;
- int fd;
- char *address;
+ char name[64];
+ int flags;
+ int fd;
+ char *address;
snprintf(name, 64, "/PostgreSQL.%u", handle);
@@ -258,8 +258,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not unmap shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not unmap shared memory segment \"%s\": %m",
+ name)));
return false;
}
*mapped_address = NULL;
@@ -268,8 +268,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not remove shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not remove shared memory segment \"%s\": %m",
+ name)));
return false;
}
return true;
@@ -290,7 +290,7 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not open shared memory segment \"%s\": %m",
- name)));
+ name)));
return false;
}
@@ -304,7 +304,7 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
if (fstat(fd, &st) != 0)
{
- int save_errno;
+ int save_errno;
/* Back out what's already been done. */
save_errno = errno;
@@ -314,14 +314,14 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not stat shared memory segment \"%s\": %m",
- name)));
+ name)));
return false;
}
request_size = st.st_size;
}
else if (*mapped_size != request_size && ftruncate(fd, request_size))
{
- int save_errno;
+ int save_errno;
/* Back out what's already been done. */
save_errno = errno;
@@ -332,8 +332,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not resize shared memory segment %s to %zu bytes: %m",
- name, request_size)));
+ errmsg("could not resize shared memory segment %s to %zu bytes: %m",
+ name, request_size)));
return false;
}
@@ -347,7 +347,7 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
return true;
if (munmap(*mapped_address, *mapped_size) != 0)
{
- int save_errno;
+ int save_errno;
/* Back out what's already been done. */
save_errno = errno;
@@ -358,8 +358,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not unmap shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not unmap shared memory segment \"%s\": %m",
+ name)));
return false;
}
*mapped_address = NULL;
@@ -367,11 +367,11 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
}
/* Map it. */
- address = mmap(NULL, request_size, PROT_READ|PROT_WRITE,
- MAP_SHARED|MAP_HASSEMAPHORE, fd, 0);
+ address = mmap(NULL, request_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_HASSEMAPHORE, fd, 0);
if (address == MAP_FAILED)
{
- int save_errno;
+ int save_errno;
/* Back out what's already been done. */
save_errno = errno;
@@ -409,11 +409,11 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
void **impl_private, void **mapped_address, Size *mapped_size,
int elevel)
{
- key_t key;
- int ident;
- char *address;
- char name[64];
- int *ident_cache;
+ key_t key;
+ int ident;
+ char *address;
+ char name[64];
+ int *ident_cache;
/* Resize is not supported for System V shared memory. */
if (op == DSM_OP_RESIZE)
@@ -427,38 +427,38 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
return true;
/*
- * POSIX shared memory and mmap-based shared memory identify segments
- * with names. To avoid needless error message variation, we use the
- * handle as the name.
+ * POSIX shared memory and mmap-based shared memory identify segments with
+ * names. To avoid needless error message variation, we use the handle as
+ * the name.
*/
snprintf(name, 64, "%u", handle);
/*
- * The System V shared memory namespace is very restricted; names are
- * of type key_t, which is expected to be some sort of integer data type,
- * but not necessarily the same one as dsm_handle. Since we use
- * dsm_handle to identify shared memory segments across processes, this
- * might seem like a problem, but it's really not. If dsm_handle is
- * bigger than key_t, the cast below might truncate away some bits from
- * the handle the user-provided, but it'll truncate exactly the same bits
- * away in exactly the same fashion every time we use that handle, which
- * is all that really matters. Conversely, if dsm_handle is smaller than
- * key_t, we won't use the full range of available key space, but that's
- * no big deal either.
+ * The System V shared memory namespace is very restricted; names are of
+ * type key_t, which is expected to be some sort of integer data type, but
+ * not necessarily the same one as dsm_handle. Since we use dsm_handle to
+ * identify shared memory segments across processes, this might seem like
+ * a problem, but it's really not. If dsm_handle is bigger than key_t,
+ * the cast below might truncate away some bits from the handle the
+ * user-provided, but it'll truncate exactly the same bits away in exactly
+ * the same fashion every time we use that handle, which is all that
+ * really matters. Conversely, if dsm_handle is smaller than key_t, we
+ * won't use the full range of available key space, but that's no big deal
+ * either.
*
- * We do make sure that the key isn't negative, because that might not
- * be portable.
+ * We do make sure that the key isn't negative, because that might not be
+ * portable.
*/
key = (key_t) handle;
- if (key < 1) /* avoid compiler warning if type is unsigned */
+ if (key < 1) /* avoid compiler warning if type is unsigned */
key = -key;
/*
* There's one special key, IPC_PRIVATE, which can't be used. If we end
- * up with that value by chance during a create operation, just pretend
- * it already exists, so that caller will retry. If we run into it
- * anywhere else, the caller has passed a handle that doesn't correspond
- * to anything we ever created, which should not happen.
+ * up with that value by chance during a create operation, just pretend it
+ * already exists, so that caller will retry. If we run into it anywhere
+ * else, the caller has passed a handle that doesn't correspond to
+ * anything we ever created, which should not happen.
*/
if (key == IPC_PRIVATE)
{
@@ -469,9 +469,9 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
}
/*
- * Before we can do anything with a shared memory segment, we have to
- * map the shared memory key to a shared memory identifier using shmget().
- * To avoid repeated lookups, we store the key using impl_private.
+ * Before we can do anything with a shared memory segment, we have to map
+ * the shared memory key to a shared memory identifier using shmget(). To
+ * avoid repeated lookups, we store the key using impl_private.
*/
if (*impl_private != NULL)
{
@@ -480,8 +480,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
}
else
{
- int flags = IPCProtection;
- size_t segsize;
+ int flags = IPCProtection;
+ size_t segsize;
/*
* Allocate the memory BEFORE acquiring the resource, so that we don't
@@ -506,7 +506,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
{
if (errno != EEXIST)
{
- int save_errno = errno;
+ int save_errno = errno;
+
pfree(ident_cache);
errno = save_errno;
ereport(elevel,
@@ -529,8 +530,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not unmap shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not unmap shared memory segment \"%s\": %m",
+ name)));
return false;
}
*mapped_address = NULL;
@@ -539,8 +540,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not remove shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not remove shared memory segment \"%s\": %m",
+ name)));
return false;
}
return true;
@@ -553,7 +554,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
if (shmctl(ident, IPC_STAT, &shm) != 0)
{
- int save_errno;
+ int save_errno;
/* Back out what's already been done. */
save_errno = errno;
@@ -564,7 +565,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not stat shared memory segment \"%s\": %m",
- name)));
+ name)));
return false;
}
request_size = shm.shm_segsz;
@@ -574,7 +575,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
address = shmat(ident, NULL, PG_SHMAT_FLAGS);
if (address == (void *) -1)
{
- int save_errno;
+ int save_errno;
/* Back out what's already been done. */
save_errno = errno;
@@ -614,9 +615,9 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
void **impl_private, void **mapped_address,
Size *mapped_size, int elevel)
{
- char *address;
+ char *address;
HANDLE hmap;
- char name[64];
+ char name[64];
MEMORY_BASIC_INFORMATION info;
/* Resize is not supported for Windows shared memory. */
@@ -631,12 +632,12 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
return true;
/*
- * Storing the shared memory segment in the Global\ namespace, can
- * allow any process running in any session to access that file
- * mapping object provided that the caller has the required access rights.
- * But to avoid issues faced in main shared memory, we are using the naming
- * convention similar to main shared memory. We can change here once
- * issue mentioned in GetSharedMemName is resolved.
+ * Storing the shared memory segment in the Global\ namespace, can allow
+ * any process running in any session to access that file mapping object
+ * provided that the caller has the required access rights. But to avoid
+ * issues faced in main shared memory, we are using the naming convention
+ * similar to main shared memory. We can change here once issue mentioned
+ * in GetSharedMemName is resolved.
*/
snprintf(name, 64, "%s.%u", SEGMENT_NAME_PREFIX, handle);
@@ -652,8 +653,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
_dosmaperr(GetLastError());
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not unmap shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not unmap shared memory segment \"%s\": %m",
+ name)));
return false;
}
if (*impl_private != NULL
@@ -662,8 +663,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
_dosmaperr(GetLastError());
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not remove shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not remove shared memory segment \"%s\": %m",
+ name)));
return false;
}
@@ -688,9 +689,9 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
size_low = (DWORD) request_size;
hmap = CreateFileMapping(INVALID_HANDLE_VALUE, /* Use the pagefile */
- NULL, /* Default security attrs */
- PAGE_READWRITE, /* Memory is read/write */
- size_high, /* Upper 32 bits of size */
+ NULL, /* Default security attrs */
+ PAGE_READWRITE, /* Memory is read/write */
+ size_high, /* Upper 32 bits of size */
size_low, /* Lower 32 bits of size */
name);
if (!hmap)
@@ -698,8 +699,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
_dosmaperr(GetLastError());
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not create shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not create shared memory segment \"%s\": %m",
+ name)));
return false;
}
_dosmaperr(GetLastError());
@@ -718,8 +719,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
else
{
hmap = OpenFileMapping(FILE_MAP_WRITE | FILE_MAP_READ,
- FALSE, /* do not inherit the name */
- name); /* name of mapping object */
+ FALSE, /* do not inherit the name */
+ name); /* name of mapping object */
if (!hmap)
{
_dosmaperr(GetLastError());
@@ -736,7 +737,7 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
0, 0, 0);
if (!address)
{
- int save_errno;
+ int save_errno;
_dosmaperr(GetLastError());
/* Back out what's already been done. */
@@ -752,14 +753,14 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
}
/*
- * VirtualQuery gives size in page_size units, which is 4K for Windows.
- * We need size only when we are attaching, but it's better to get the
- * size when creating new segment to keep size consistent both for
+ * VirtualQuery gives size in page_size units, which is 4K for Windows. We
+ * need size only when we are attaching, but it's better to get the size
+ * when creating new segment to keep size consistent both for
* DSM_OP_CREATE and DSM_OP_ATTACH.
*/
if (VirtualQuery(address, &info, sizeof(info)) == 0)
{
- int save_errno;
+ int save_errno;
_dosmaperr(GetLastError());
/* Back out what's already been done. */
@@ -770,8 +771,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not stat shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not stat shared memory segment \"%s\": %m",
+ name)));
return false;
}
@@ -799,13 +800,13 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
void **impl_private, void **mapped_address, Size *mapped_size,
int elevel)
{
- char name[64];
- int flags;
- int fd;
- char *address;
+ char name[64];
+ int flags;
+ int fd;
+ char *address;
snprintf(name, 64, PG_DYNSHMEM_DIR "/" PG_DYNSHMEM_MMAP_FILE_PREFIX "%u",
- handle);
+ handle);
/* Handle teardown cases. */
if (op == DSM_OP_DETACH || op == DSM_OP_DESTROY)
@@ -815,8 +816,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not unmap shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not unmap shared memory segment \"%s\": %m",
+ name)));
return false;
}
*mapped_address = NULL;
@@ -825,8 +826,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
{
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not remove shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not remove shared memory segment \"%s\": %m",
+ name)));
return false;
}
return true;
@@ -840,7 +841,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not open shared memory segment \"%s\": %m",
- name)));
+ name)));
return false;
}
@@ -854,7 +855,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
if (fstat(fd, &st) != 0)
{
- int save_errno;
+ int save_errno;
/* Back out what's already been done. */
save_errno = errno;
@@ -864,14 +865,14 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not stat shared memory segment \"%s\": %m",
- name)));
+ name)));
return false;
}
request_size = st.st_size;
}
else if (*mapped_size > request_size && ftruncate(fd, request_size))
{
- int save_errno;
+ int save_errno;
/* Back out what's already been done. */
save_errno = errno;
@@ -882,8 +883,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not resize shared memory segment %s to %zu bytes: %m",
- name, request_size)));
+ errmsg("could not resize shared memory segment %s to %zu bytes: %m",
+ name, request_size)));
return false;
}
else if (*mapped_size < request_size)
@@ -891,23 +892,23 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
/*
* Allocate a buffer full of zeros.
*
- * Note: palloc zbuffer, instead of just using a local char array,
- * to ensure it is reasonably well-aligned; this may save a few
- * cycles transferring data to the kernel.
+ * Note: palloc zbuffer, instead of just using a local char array, to
+ * ensure it is reasonably well-aligned; this may save a few cycles
+ * transferring data to the kernel.
*/
- char *zbuffer = (char *) palloc0(ZBUFFER_SIZE);
- uint32 remaining = request_size;
- bool success = true;
+ char *zbuffer = (char *) palloc0(ZBUFFER_SIZE);
+ uint32 remaining = request_size;
+ bool success = true;
/*
- * Zero-fill the file. We have to do this the hard way to ensure
- * that all the file space has really been allocated, so that we
- * don't later seg fault when accessing the memory mapping. This
- * is pretty pessimal.
+ * Zero-fill the file. We have to do this the hard way to ensure that
+ * all the file space has really been allocated, so that we don't
+ * later seg fault when accessing the memory mapping. This is pretty
+ * pessimal.
*/
while (success && remaining > 0)
{
- Size goal = remaining;
+ Size goal = remaining;
if (goal > ZBUFFER_SIZE)
goal = ZBUFFER_SIZE;
@@ -919,7 +920,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
if (!success)
{
- int save_errno;
+ int save_errno;
/* Back out what's already been done. */
save_errno = errno;
@@ -931,7 +932,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
errmsg("could not resize shared memory segment %s to %zu bytes: %m",
- name, request_size)));
+ name, request_size)));
return false;
}
}
@@ -946,7 +947,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
return true;
if (munmap(*mapped_address, *mapped_size) != 0)
{
- int save_errno;
+ int save_errno;
/* Back out what's already been done. */
save_errno = errno;
@@ -957,8 +958,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not unmap shared memory segment \"%s\": %m",
- name)));
+ errmsg("could not unmap shared memory segment \"%s\": %m",
+ name)));
return false;
}
*mapped_address = NULL;
@@ -966,11 +967,11 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
}
/* Map it. */
- address = mmap(NULL, request_size, PROT_READ|PROT_WRITE,
- MAP_SHARED|MAP_HASSEMAPHORE, fd, 0);
+ address = mmap(NULL, request_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_HASSEMAPHORE, fd, 0);
if (address == MAP_FAILED)
{
- int save_errno;
+ int save_errno;
/* Back out what's already been done. */
save_errno = errno;
@@ -1009,24 +1010,24 @@ dsm_impl_keep_segment(dsm_handle handle, void *impl_private)
{
#ifdef USE_DSM_WINDOWS
case DSM_IMPL_WINDOWS:
- {
- HANDLE hmap;
-
- if (!DuplicateHandle(GetCurrentProcess(), impl_private,
- PostmasterHandle, &hmap, 0, FALSE,
- DUPLICATE_SAME_ACCESS))
{
- char name[64];
-
- snprintf(name, 64, "%s.%u", SEGMENT_NAME_PREFIX, handle);
- _dosmaperr(GetLastError());
- ereport(ERROR,
- (errcode_for_dynamic_shared_memory(),
- errmsg("could not duplicate handle for \"%s\": %m",
- name)));
+ HANDLE hmap;
+
+ if (!DuplicateHandle(GetCurrentProcess(), impl_private,
+ PostmasterHandle, &hmap, 0, FALSE,
+ DUPLICATE_SAME_ACCESS))
+ {
+ char name[64];
+
+ snprintf(name, 64, "%s.%u", SEGMENT_NAME_PREFIX, handle);
+ _dosmaperr(GetLastError());
+ ereport(ERROR,
+ (errcode_for_dynamic_shared_memory(),
+ errmsg("could not duplicate handle for \"%s\": %m",
+ name)));
+ }
+ break;
}
- break;
- }
#endif
default:
break;
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index 5dea0ed8dd..bd7cbeae98 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -4,7 +4,7 @@
* POSTGRES inter-process communication definitions.
*
* This file is misnamed, as it no longer has much of anything directly
- * to do with IPC. The functionality here is concerned with managing
+ * to do with IPC. The functionality here is concerned with managing
* exit-time cleanup for either a postmaster or a backend.
*
*
@@ -90,7 +90,7 @@ static int on_proc_exit_index,
* -cim 2/6/90
*
* Unfortunately, we can't really guarantee that add-on code
- * obeys the rule of not calling exit() directly. So, while
+ * obeys the rule of not calling exit() directly. So, while
* this is the preferred way out of the system, we also register
* an atexit callback that will make sure cleanup happens.
* ----------------------------------------------------------------
@@ -109,7 +109,7 @@ proc_exit(int code)
* fixed file name, each backend will overwrite earlier profiles. To
* fix that, we create a separate subdirectory for each backend
* (./gprof/pid) and 'cd' to that subdirectory before we exit() - that
- * forces mcleanup() to write each profile into its own directory. We
+ * forces mcleanup() to write each profile into its own directory. We
* end up with something like: $PGDATA/gprof/8829/gmon.out
* $PGDATA/gprof/8845/gmon.out ...
*
@@ -219,16 +219,16 @@ shmem_exit(int code)
/*
* Call before_shmem_exit callbacks.
*
- * These should be things that need most of the system to still be
- * up and working, such as cleanup of temp relations, which requires
- * catalog access; or things that need to be completed because later
- * cleanup steps depend on them, such as releasing lwlocks.
+ * These should be things that need most of the system to still be up and
+ * working, such as cleanup of temp relations, which requires catalog
+ * access; or things that need to be completed because later cleanup steps
+ * depend on them, such as releasing lwlocks.
*/
elog(DEBUG3, "shmem_exit(%d): %d before_shmem_exit callbacks to make",
code, before_shmem_exit_index);
while (--before_shmem_exit_index >= 0)
(*before_shmem_exit_list[before_shmem_exit_index].function) (code,
- before_shmem_exit_list[before_shmem_exit_index].arg);
+ before_shmem_exit_list[before_shmem_exit_index].arg);
before_shmem_exit_index = 0;
/*
@@ -241,9 +241,9 @@ shmem_exit(int code)
* callback before invoking it, so that we don't get stuck in an infinite
* loop if one of those callbacks itself throws an ERROR or FATAL.
*
- * Note that explicitly calling this function here is quite different
- * from registering it as an on_shmem_exit callback for precisely this
- * reason: if one dynamic shared memory callback errors out, the remaining
+ * Note that explicitly calling this function here is quite different from
+ * registering it as an on_shmem_exit callback for precisely this reason:
+ * if one dynamic shared memory callback errors out, the remaining
* callbacks will still be invoked. Thus, hard-coding this call puts it
* equal footing with callbacks for the main shared memory segment.
*/
@@ -261,7 +261,7 @@ shmem_exit(int code)
code, on_shmem_exit_index);
while (--on_shmem_exit_index >= 0)
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
- on_shmem_exit_list[on_shmem_exit_index].arg);
+ on_shmem_exit_list[on_shmem_exit_index].arg);
on_shmem_exit_index = 0;
}
@@ -287,7 +287,7 @@ atexit_callback(void)
* on_proc_exit
*
* this function adds a callback function to the list of
- * functions invoked by proc_exit(). -cim 2/6/90
+ * functions invoked by proc_exit(). -cim 2/6/90
* ----------------------------------------------------------------
*/
void
@@ -380,7 +380,7 @@ cancel_before_shmem_exit(pg_on_exit_callback function, Datum arg)
{
if (before_shmem_exit_index > 0 &&
before_shmem_exit_list[before_shmem_exit_index - 1].function
- == function &&
+ == function &&
before_shmem_exit_list[before_shmem_exit_index - 1].arg == arg)
--before_shmem_exit_index;
}
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 4290d2dc81..1d04c5508a 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -55,7 +55,7 @@ static bool addin_request_allowed = true;
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
@@ -85,7 +85,7 @@ RequestAddinShmemSpace(Size size)
* This is a bit code-wasteful and could be cleaned up.)
*
* If "makePrivate" is true then we only need private memory, not shared
- * memory. This is true for a standalone backend, false for a postmaster.
+ * memory. This is true for a standalone backend, false for a postmaster.
*/
void
CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
diff --git a/src/backend/storage/ipc/pmsignal.c b/src/backend/storage/ipc/pmsignal.c
index 7347234d6a..83b8d17048 100644
--- a/src/backend/storage/ipc/pmsignal.c
+++ b/src/backend/storage/ipc/pmsignal.c
@@ -26,9 +26,9 @@
/*
* The postmaster is signaled by its children by sending SIGUSR1. The
- * specific reason is communicated via flags in shared memory. We keep
+ * specific reason is communicated via flags in shared memory. We keep
* a boolean flag for each possible "reason", so that different reasons
- * can be signaled by different backends at the same time. (However,
+ * can be signaled by different backends at the same time. (However,
* if the same reason is signaled more than once simultaneously, the
* postmaster will observe it only once.)
*
@@ -42,7 +42,7 @@
* have three possible states: UNUSED, ASSIGNED, ACTIVE. An UNUSED slot is
* available for assignment. An ASSIGNED slot is associated with a postmaster
* child process, but either the process has not touched shared memory yet,
- * or it has successfully cleaned up after itself. A ACTIVE slot means the
+ * or it has successfully cleaned up after itself. A ACTIVE slot means the
* process is actively using shared memory. The slots are assigned to
* child processes at random, and postmaster.c is responsible for tracking
* which one goes with which PID.
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index ac32d5cb62..cdd92d99a2 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -19,11 +19,11 @@
*
* During hot standby, we also keep a list of XIDs representing transactions
* that are known to be running in the master (or more precisely, were running
- * as of the current point in the WAL stream). This list is kept in the
+ * as of the current point in the WAL stream). This list is kept in the
* KnownAssignedXids array, and is updated by watching the sequence of
* arriving XIDs. This is necessary because if we leave those XIDs out of
* snapshots taken for standby queries, then they will appear to be already
- * complete, leading to MVCC failures. Note that in hot standby, the PGPROC
+ * complete, leading to MVCC failures. Note that in hot standby, the PGPROC
* array represents standby processes, which by definition are not running
* transactions that have XIDs.
*
@@ -276,7 +276,7 @@ ProcArrayAdd(PGPROC *proc)
if (arrayP->numProcs >= arrayP->maxProcs)
{
/*
- * Ooops, no room. (This really shouldn't happen, since there is a
+ * Ooops, no room. (This really shouldn't happen, since there is a
* fixed supply of PGPROC structs too, and so we should have failed
* earlier.)
*/
@@ -686,7 +686,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
ExtendSUBTRANS(latestObservedXid);
TransactionIdAdvance(latestObservedXid);
}
- TransactionIdRetreat(latestObservedXid); /* = running->nextXid - 1 */
+ TransactionIdRetreat(latestObservedXid); /* = running->nextXid - 1 */
/* ----------
* Now we've got the running xids we need to set the global values that
@@ -733,7 +733,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
* ShmemVariableCache->nextXid must be beyond any observed xid.
*
* We don't expect anyone else to modify nextXid, hence we don't need to
- * hold a lock while examining it. We still acquire the lock to modify
+ * hold a lock while examining it. We still acquire the lock to modify
* it, though.
*/
nextXid = latestObservedXid;
@@ -1485,7 +1485,7 @@ GetSnapshotData(Snapshot snapshot)
* do that much work while holding the ProcArrayLock.
*
* The other backend can add more subxids concurrently, but cannot
- * remove any. Hence it's important to fetch nxids just once.
+ * remove any. Hence it's important to fetch nxids just once.
* Should be safe to use memcpy, though. (We needn't worry about
* missing any xids added concurrently, because they must postdate
* xmax.)
@@ -2153,7 +2153,7 @@ BackendPidGetProc(int pid)
* Only main transaction Ids are considered. This function is mainly
* useful for determining what backend owns a lock.
*
- * Beware that not every xact has an XID assigned. However, as long as you
+ * Beware that not every xact has an XID assigned. However, as long as you
* only call this using an XID found on disk, you're safe.
*/
int
@@ -2217,7 +2217,7 @@ IsBackendPid(int pid)
* some snapshot we have. Since we examine the procarray with only shared
* lock, there are race conditions: a backend could set its xmin just after
* we look. Indeed, on multiprocessors with weak memory ordering, the
- * other backend could have set its xmin *before* we look. We know however
+ * other backend could have set its xmin *before* we look. We know however
* that such a backend must have held shared ProcArrayLock overlapping our
* own hold of ProcArrayLock, else we would see its xmin update. Therefore,
* any snapshot the other backend is taking concurrently with our scan cannot
@@ -2723,7 +2723,7 @@ ProcArrayGetReplicationSlotXmin(TransactionId *xmin,
* XidCacheRemoveRunningXids
*
* Remove a bunch of TransactionIds from the list of known-running
- * subtransactions for my backend. Both the specified xid and those in
+ * subtransactions for my backend. Both the specified xid and those in
* the xids[] array (of length nxids) are removed from the subxids cache.
* latestXid must be the latest XID among the group.
*/
@@ -2829,7 +2829,7 @@ DisplayXidCache(void)
* treated as running by standby transactions, even though they are not in
* the standby server's PGXACT array.
*
- * We record all XIDs that we know have been assigned. That includes all the
+ * We record all XIDs that we know have been assigned. That includes all the
* XIDs seen in WAL records, plus all unobserved XIDs that we can deduce have
* been assigned. We can deduce the existence of unobserved XIDs because we
* know XIDs are assigned in sequence, with no gaps. The KnownAssignedXids
@@ -2838,7 +2838,7 @@ DisplayXidCache(void)
*
* During hot standby we do not fret too much about the distinction between
* top-level XIDs and subtransaction XIDs. We store both together in the
- * KnownAssignedXids list. In backends, this is copied into snapshots in
+ * KnownAssignedXids list. In backends, this is copied into snapshots in
* GetSnapshotData(), taking advantage of the fact that XidInMVCCSnapshot()
* doesn't care about the distinction either. Subtransaction XIDs are
* effectively treated as top-level XIDs and in the typical case pg_subtrans
@@ -3053,14 +3053,14 @@ ExpireOldKnownAssignedTransactionIds(TransactionId xid)
* must hold shared ProcArrayLock to examine the array. To remove XIDs from
* the array, the startup process must hold ProcArrayLock exclusively, for
* the usual transactional reasons (compare commit/abort of a transaction
- * during normal running). Compressing unused entries out of the array
+ * during normal running). Compressing unused entries out of the array
* likewise requires exclusive lock. To add XIDs to the array, we just insert
* them into slots to the right of the head pointer and then advance the head
* pointer. This wouldn't require any lock at all, except that on machines
* with weak memory ordering we need to be careful that other processors
* see the array element changes before they see the head pointer change.
* We handle this by using a spinlock to protect reads and writes of the
- * head/tail pointers. (We could dispense with the spinlock if we were to
+ * head/tail pointers. (We could dispense with the spinlock if we were to
* create suitable memory access barrier primitives and use those instead.)
* The spinlock must be taken to read or write the head/tail pointers unless
* the caller holds ProcArrayLock exclusively.
@@ -3157,7 +3157,7 @@ KnownAssignedXidsCompress(bool force)
* If exclusive_lock is true then caller already holds ProcArrayLock in
* exclusive mode, so we need no extra locking here. Else caller holds no
* lock, so we need to be sure we maintain sufficient interlocks against
- * concurrent readers. (Only the startup process ever calls this, so no need
+ * concurrent readers. (Only the startup process ever calls this, so no need
* to worry about concurrent writers.)
*/
static void
@@ -3203,7 +3203,7 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
Assert(tail >= 0 && tail < pArray->maxKnownAssignedXids);
/*
- * Verify that insertions occur in TransactionId sequence. Note that even
+ * Verify that insertions occur in TransactionId sequence. Note that even
* if the last existing element is marked invalid, it must still have a
* correctly sequenced XID value.
*/
@@ -3306,7 +3306,7 @@ KnownAssignedXidsSearch(TransactionId xid, bool remove)
}
/*
- * Standard binary search. Note we can ignore the KnownAssignedXidsValid
+ * Standard binary search. Note we can ignore the KnownAssignedXidsValid
* array here, since even invalid entries will contain sorted XIDs.
*/
first = tail;
diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c
index 6526b2688a..cd9a287efe 100644
--- a/src/backend/storage/ipc/procsignal.c
+++ b/src/backend/storage/ipc/procsignal.c
@@ -64,7 +64,7 @@ typedef struct
* Spurious wakeups must be expected. Make sure that the flag is cleared
* in the error path.
*/
-bool set_latch_on_sigusr1;
+bool set_latch_on_sigusr1;
static ProcSignalSlot *ProcSignalSlots = NULL;
static volatile ProcSignalSlot *MyProcSignalSlot = NULL;
diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c
index 4f7dd9c4ef..6f9c3a3b6c 100644
--- a/src/backend/storage/ipc/shm_mq.c
+++ b/src/backend/storage/ipc/shm_mq.c
@@ -142,7 +142,7 @@ static shm_mq_result shm_mq_send_bytes(shm_mq_handle *mq, Size nbytes,
void *data, bool nowait, Size *bytes_written);
static shm_mq_result shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed,
bool nowait, Size *nbytesp, void **datap);
-static bool shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr,
+static bool shm_mq_wait_internal(volatile shm_mq *mq, PGPROC *volatile * ptr,
BackgroundWorkerHandle *handle);
static uint64 shm_mq_get_bytes_read(volatile shm_mq *mq, bool *detached);
static void shm_mq_inc_bytes_read(volatile shm_mq *mq, Size n);
@@ -152,8 +152,8 @@ static shm_mq_result shm_mq_notify_receiver(volatile shm_mq *mq);
static void shm_mq_detach_callback(dsm_segment *seg, Datum arg);
/* Minimum queue size is enough for header and at least one chunk of data. */
-const Size shm_mq_minimum_size =
- MAXALIGN(offsetof(shm_mq, mq_ring)) + MAXIMUM_ALIGNOF;
+const Size shm_mq_minimum_size =
+MAXALIGN(offsetof(shm_mq, mq_ring)) + MAXIMUM_ALIGNOF;
#define MQH_INITIAL_BUFSIZE 8192
@@ -193,7 +193,7 @@ void
shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
{
volatile shm_mq *vmq = mq;
- PGPROC *sender;
+ PGPROC *sender;
SpinLockAcquire(&mq->mq_mutex);
Assert(vmq->mq_receiver == NULL);
@@ -212,7 +212,7 @@ void
shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
{
volatile shm_mq *vmq = mq;
- PGPROC *receiver;
+ PGPROC *receiver;
SpinLockAcquire(&mq->mq_mutex);
Assert(vmq->mq_sender == NULL);
@@ -231,7 +231,7 @@ PGPROC *
shm_mq_get_receiver(shm_mq *mq)
{
volatile shm_mq *vmq = mq;
- PGPROC *receiver;
+ PGPROC *receiver;
SpinLockAcquire(&mq->mq_mutex);
receiver = vmq->mq_receiver;
@@ -247,7 +247,7 @@ PGPROC *
shm_mq_get_sender(shm_mq *mq)
{
volatile shm_mq *vmq = mq;
- PGPROC *sender;
+ PGPROC *sender;
SpinLockAcquire(&mq->mq_mutex);
sender = vmq->mq_sender;
@@ -280,7 +280,7 @@ shm_mq_get_sender(shm_mq *mq)
shm_mq_handle *
shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
{
- shm_mq_handle *mqh = palloc(sizeof(shm_mq_handle));
+ shm_mq_handle *mqh = palloc(sizeof(shm_mq_handle));
Assert(mq->mq_receiver == MyProc || mq->mq_sender == MyProc);
mqh->mqh_queue = mq;
@@ -317,9 +317,9 @@ shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
shm_mq_result
shm_mq_send(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait)
{
- shm_mq_result res;
- shm_mq *mq = mqh->mqh_queue;
- Size bytes_written;
+ shm_mq_result res;
+ shm_mq *mq = mqh->mqh_queue;
+ Size bytes_written;
Assert(mq->mq_sender == MyProc);
@@ -328,7 +328,7 @@ shm_mq_send(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait)
{
Assert(mqh->mqh_partial_bytes < sizeof(Size));
res = shm_mq_send_bytes(mqh, sizeof(Size) - mqh->mqh_partial_bytes,
- ((char *) &nbytes) + mqh->mqh_partial_bytes,
+ ((char *) &nbytes) +mqh->mqh_partial_bytes,
nowait, &bytes_written);
mqh->mqh_partial_bytes += bytes_written;
if (res != SHM_MQ_SUCCESS)
@@ -390,11 +390,11 @@ shm_mq_send(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait)
shm_mq_result
shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
{
- shm_mq *mq = mqh->mqh_queue;
- shm_mq_result res;
- Size rb = 0;
- Size nbytes;
- void *rawdata;
+ shm_mq *mq = mqh->mqh_queue;
+ shm_mq_result res;
+ Size rb = 0;
+ Size nbytes;
+ void *rawdata;
Assert(mq->mq_receiver == MyProc);
@@ -439,18 +439,19 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
*/
if (mqh->mqh_partial_bytes == 0 && rb >= sizeof(Size))
{
- Size needed;
+ Size needed;
- nbytes = * (Size *) rawdata;
+ nbytes = *(Size *) rawdata;
/* If we've already got the whole message, we're done. */
needed = MAXALIGN(sizeof(Size)) + MAXALIGN(nbytes);
if (rb >= needed)
{
/*
- * Technically, we could consume the message length information
- * at this point, but the extra write to shared memory wouldn't
- * be free and in most cases we would reap no benefit.
+ * Technically, we could consume the message length
+ * information at this point, but the extra write to shared
+ * memory wouldn't be free and in most cases we would reap no
+ * benefit.
*/
mqh->mqh_consume_pending = needed;
*nbytesp = nbytes;
@@ -469,7 +470,7 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
}
else
{
- Size lengthbytes;
+ Size lengthbytes;
/* Can't be split unless bigger than required alignment. */
Assert(sizeof(Size) > MAXIMUM_ALIGNOF);
@@ -498,7 +499,7 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
if (mqh->mqh_partial_bytes >= sizeof(Size))
{
Assert(mqh->mqh_partial_bytes == sizeof(Size));
- mqh->mqh_expected_bytes = * (Size *) mqh->mqh_buffer;
+ mqh->mqh_expected_bytes = *(Size *) mqh->mqh_buffer;
mqh->mqh_length_word_complete = true;
mqh->mqh_partial_bytes = 0;
}
@@ -527,12 +528,12 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
/*
* The message has wrapped the buffer. We'll need to copy it in order
- * to return it to the client in one chunk. First, make sure we have a
- * large enough buffer available.
+ * to return it to the client in one chunk. First, make sure we have
+ * a large enough buffer available.
*/
if (mqh->mqh_buflen < nbytes)
{
- Size newbuflen = Max(mqh->mqh_buflen, MQH_INITIAL_BUFSIZE);
+ Size newbuflen = Max(mqh->mqh_buflen, MQH_INITIAL_BUFSIZE);
while (newbuflen < nbytes)
newbuflen *= 2;
@@ -551,7 +552,7 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
/* Loop until we've copied the entire message. */
for (;;)
{
- Size still_needed;
+ Size still_needed;
/* Copy as much as we can. */
Assert(mqh->mqh_partial_bytes + rb <= nbytes);
@@ -559,10 +560,10 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
mqh->mqh_partial_bytes += rb;
/*
- * Update count of bytes read, with alignment padding. Note
- * that this will never actually insert any padding except at the
- * end of a message, because the buffer size is a multiple of
- * MAXIMUM_ALIGNOF, and each read and write is as well.
+ * Update count of bytes read, with alignment padding. Note that this
+ * will never actually insert any padding except at the end of a
+ * message, because the buffer size is a multiple of MAXIMUM_ALIGNOF,
+ * and each read and write is as well.
*/
Assert(mqh->mqh_partial_bytes == nbytes || rb == MAXALIGN(rb));
shm_mq_inc_bytes_read(mq, MAXALIGN(rb));
@@ -601,7 +602,7 @@ shm_mq_result
shm_mq_wait_for_attach(shm_mq_handle *mqh)
{
shm_mq *mq = mqh->mqh_queue;
- PGPROC **victim;
+ PGPROC **victim;
if (shm_mq_get_receiver(mq) == MyProc)
victim = &mq->mq_sender;
@@ -663,8 +664,8 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait,
while (sent < nbytes)
{
- bool detached;
- uint64 rb;
+ bool detached;
+ uint64 rb;
/* Compute number of ring buffer bytes used and available. */
rb = shm_mq_get_bytes_read(mq, &detached);
@@ -679,7 +680,7 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait,
if (available == 0)
{
- shm_mq_result res;
+ shm_mq_result res;
/*
* The queue is full, so if the receiver isn't yet known to be
@@ -717,11 +718,11 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait,
}
/*
- * Wait for our latch to be set. It might already be set for
- * some unrelated reason, but that'll just result in one extra
- * trip through the loop. It's worth it to avoid resetting the
- * latch at top of loop, because setting an already-set latch is
- * much cheaper than setting one that has been reset.
+ * Wait for our latch to be set. It might already be set for some
+ * unrelated reason, but that'll just result in one extra trip
+ * through the loop. It's worth it to avoid resetting the latch
+ * at top of loop, because setting an already-set latch is much
+ * cheaper than setting one that has been reset.
*/
WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0);
@@ -733,8 +734,8 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait,
}
else
{
- Size offset = mq->mq_bytes_written % (uint64) ringsize;
- Size sendnow = Min(available, ringsize - offset);
+ Size offset = mq->mq_bytes_written % (uint64) ringsize;
+ Size sendnow = Min(available, ringsize - offset);
/* Write as much data as we can via a single memcpy(). */
memcpy(&mq->mq_ring[mq->mq_ring_offset + offset],
@@ -751,9 +752,9 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait,
shm_mq_inc_bytes_written(mq, MAXALIGN(sendnow));
/*
- * For efficiency, we don't set the reader's latch here. We'll
- * do that only when the buffer fills up or after writing an
- * entire message.
+ * For efficiency, we don't set the reader's latch here. We'll do
+ * that only when the buffer fills up or after writing an entire
+ * message.
*/
}
}
@@ -801,10 +802,10 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait,
/*
* Fall out before waiting if the queue has been detached.
*
- * Note that we don't check for this until *after* considering
- * whether the data already available is enough, since the
- * receiver can finish receiving a message stored in the buffer
- * even after the sender has detached.
+ * Note that we don't check for this until *after* considering whether
+ * the data already available is enough, since the receiver can finish
+ * receiving a message stored in the buffer even after the sender has
+ * detached.
*/
if (detached)
return SHM_MQ_DETACHED;
@@ -814,11 +815,11 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait,
return SHM_MQ_WOULD_BLOCK;
/*
- * Wait for our latch to be set. It might already be set for
- * some unrelated reason, but that'll just result in one extra
- * trip through the loop. It's worth it to avoid resetting the
- * latch at top of loop, because setting an already-set latch is
- * much cheaper than setting one that has been reset.
+ * Wait for our latch to be set. It might already be set for some
+ * unrelated reason, but that'll just result in one extra trip through
+ * the loop. It's worth it to avoid resetting the latch at top of
+ * loop, because setting an already-set latch is much cheaper than
+ * setting one that has been reset.
*/
WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0);
@@ -842,11 +843,11 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait,
* non-NULL when our counterpart attaches to the queue.
*/
static bool
-shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr,
+shm_mq_wait_internal(volatile shm_mq *mq, PGPROC *volatile * ptr,
BackgroundWorkerHandle *handle)
{
- bool save_set_latch_on_sigusr1;
- bool result = false;
+ bool save_set_latch_on_sigusr1;
+ bool result = false;
save_set_latch_on_sigusr1 = set_latch_on_sigusr1;
if (handle != NULL)
@@ -856,9 +857,9 @@ shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr,
{
for (;;)
{
- BgwHandleStatus status;
- pid_t pid;
- bool detached;
+ BgwHandleStatus status;
+ pid_t pid;
+ bool detached;
/* Acquire the lock just long enough to check the pointer. */
SpinLockAcquire(&mq->mq_mutex);
@@ -913,7 +914,7 @@ shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr,
static uint64
shm_mq_get_bytes_read(volatile shm_mq *mq, bool *detached)
{
- uint64 v;
+ uint64 v;
SpinLockAcquire(&mq->mq_mutex);
v = mq->mq_bytes_read;
@@ -948,7 +949,7 @@ shm_mq_inc_bytes_read(volatile shm_mq *mq, Size n)
static uint64
shm_mq_get_bytes_written(volatile shm_mq *mq, bool *detached)
{
- uint64 v;
+ uint64 v;
SpinLockAcquire(&mq->mq_mutex);
v = mq->mq_bytes_written;
@@ -975,8 +976,8 @@ shm_mq_inc_bytes_written(volatile shm_mq *mq, Size n)
static shm_mq_result
shm_mq_notify_receiver(volatile shm_mq *mq)
{
- PGPROC *receiver;
- bool detached;
+ PGPROC *receiver;
+ bool detached;
SpinLockAcquire(&mq->mq_mutex);
detached = mq->mq_detached;
diff --git a/src/backend/storage/ipc/shm_toc.c b/src/backend/storage/ipc/shm_toc.c
index e4e007b97a..820b12e12c 100644
--- a/src/backend/storage/ipc/shm_toc.c
+++ b/src/backend/storage/ipc/shm_toc.c
@@ -19,17 +19,17 @@
typedef struct shm_toc_entry
{
- uint64 key; /* Arbitrary identifier */
- uint64 offset; /* Bytes offset */
+ uint64 key; /* Arbitrary identifier */
+ uint64 offset; /* Bytes offset */
} shm_toc_entry;
struct shm_toc
{
- uint64 toc_magic; /* Magic number for this TOC */
- slock_t toc_mutex; /* Spinlock for mutual exclusion */
- Size toc_total_bytes; /* Bytes managed by this TOC */
+ uint64 toc_magic; /* Magic number for this TOC */
+ slock_t toc_mutex; /* Spinlock for mutual exclusion */
+ Size toc_total_bytes; /* Bytes managed by this TOC */
Size toc_allocated_bytes; /* Bytes allocated of those managed */
- Size toc_nentry; /* Number of entries in TOC */
+ Size toc_nentry; /* Number of entries in TOC */
shm_toc_entry toc_entry[FLEXIBLE_ARRAY_MEMBER];
};
@@ -39,7 +39,7 @@ struct shm_toc
shm_toc *
shm_toc_create(uint64 magic, void *address, Size nbytes)
{
- shm_toc *toc = (shm_toc *) address;
+ shm_toc *toc = (shm_toc *) address;
Assert(nbytes > offsetof(shm_toc, toc_entry));
toc->toc_magic = magic;
@@ -58,7 +58,7 @@ shm_toc_create(uint64 magic, void *address, Size nbytes)
extern shm_toc *
shm_toc_attach(uint64 magic, void *address)
{
- shm_toc *toc = (shm_toc *) address;
+ shm_toc *toc = (shm_toc *) address;
if (toc->toc_magic != magic)
return NULL;
@@ -96,7 +96,7 @@ shm_toc_allocate(shm_toc *toc, Size nbytes)
total_bytes = vtoc->toc_total_bytes;
allocated_bytes = vtoc->toc_allocated_bytes;
nentry = vtoc->toc_nentry;
- toc_bytes = offsetof(shm_toc, toc_entry) + nentry * sizeof(shm_toc_entry)
+ toc_bytes = offsetof(shm_toc, toc_entry) +nentry * sizeof(shm_toc_entry)
+ allocated_bytes;
/* Check for memory exhaustion and overflow. */
@@ -132,7 +132,7 @@ shm_toc_freespace(shm_toc *toc)
nentry = vtoc->toc_nentry;
SpinLockRelease(&toc->toc_mutex);
- toc_bytes = offsetof(shm_toc, toc_entry) + nentry * sizeof(shm_toc_entry);
+ toc_bytes = offsetof(shm_toc, toc_entry) +nentry * sizeof(shm_toc_entry);
Assert(allocated_bytes + BUFFERALIGN(toc_bytes) <= total_bytes);
return total_bytes - (allocated_bytes + BUFFERALIGN(toc_bytes));
}
@@ -176,7 +176,7 @@ shm_toc_insert(shm_toc *toc, uint64 key, void *address)
total_bytes = vtoc->toc_total_bytes;
allocated_bytes = vtoc->toc_allocated_bytes;
nentry = vtoc->toc_nentry;
- toc_bytes = offsetof(shm_toc, toc_entry) + nentry * sizeof(shm_toc_entry)
+ toc_bytes = offsetof(shm_toc, toc_entry) +nentry * sizeof(shm_toc_entry)
+ allocated_bytes;
/* Check for memory exhaustion and overflow. */
@@ -241,6 +241,6 @@ Size
shm_toc_estimate(shm_toc_estimator *e)
{
return add_size(offsetof(shm_toc, toc_entry),
- add_size(mul_size(e->number_of_keys, sizeof(shm_toc_entry)),
- e->space_for_chunks));
+ add_size(mul_size(e->number_of_keys, sizeof(shm_toc_entry)),
+ e->space_for_chunks));
}
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 1d27a89bdd..2ea2216a65 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -26,7 +26,7 @@
* for a module and should never be allocated after the shared memory
* initialization phase. Hash tables have a fixed maximum size, but
* their actual size can vary dynamically. When entries are added
- * to the table, more space is allocated. Queues link data structures
+ * to the table, more space is allocated. Queues link data structures
* that have been allocated either within fixed-size structures or as hash
* buckets. Each shared data structure has a string name to identify
* it (assigned in the module that declares it).
@@ -40,7 +40,7 @@
* The shmem index has two purposes: first, it gives us
* a simple model of how the world looks when a backend process
* initializes. If something is present in the shmem index,
- * it is initialized. If it is not, it is uninitialized. Second,
+ * it is initialized. If it is not, it is uninitialized. Second,
* the shmem index allows us to allocate shared memory on demand
* instead of trying to preallocate structures and hard-wire the
* sizes and locations in header files. If you are using a lot
@@ -55,8 +55,8 @@
* pointers using the method described in (b) above.
*
* (d) memory allocation model: shared memory can never be
- * freed, once allocated. Each hash table has its own free list,
- * so hash buckets can be reused when an item is deleted. However,
+ * freed, once allocated. Each hash table has its own free list,
+ * so hash buckets can be reused when an item is deleted. However,
* if one hash table grows very large and then shrinks, its space
* cannot be redistributed to other tables. We could build a simple
* hash bucket garbage collector if need be. Right now, it seems
@@ -232,7 +232,7 @@ InitShmemIndex(void)
*
* Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex
* hashtable to exist already, we have a bit of a circularity problem in
- * initializing the ShmemIndex itself. The special "ShmemIndex" hash
+ * initializing the ShmemIndex itself. The special "ShmemIndex" hash
* table name will tell ShmemInitStruct to fake it.
*/
info.keysize = SHMEM_INDEX_KEYSIZE;
@@ -309,7 +309,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
* ShmemInitStruct -- Create/attach to a structure in shared memory.
*
* This is called during initialization to find or allocate
- * a data structure in shared memory. If no other process
+ * a data structure in shared memory. If no other process
* has created the structure, this routine allocates space
* for it. If it exists already, a pointer to the existing
* structure is returned.
@@ -318,7 +318,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
* already in the shmem index (hence, already initialized).
*
* Note: before Postgres 9.0, this function returned NULL for some failure
- * cases. Now, it always throws error instead, so callers need not check
+ * cases. Now, it always throws error instead, so callers need not check
* for NULL.
*/
void *
@@ -350,7 +350,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
* be trying to init the shmem index itself.
*
* Notice that the ShmemIndexLock is released before the shmem
- * index has been initialized. This should be OK because no other
+ * index has been initialized. This should be OK because no other
* process can be accessing shared memory yet.
*/
Assert(shmemseghdr->index == NULL);
diff --git a/src/backend/storage/ipc/shmqueue.c b/src/backend/storage/ipc/shmqueue.c
index 872c63f0ec..708ccea208 100644
--- a/src/backend/storage/ipc/shmqueue.c
+++ b/src/backend/storage/ipc/shmqueue.c
@@ -14,7 +14,7 @@
*
* Package for managing doubly-linked lists in shared memory.
* The only tricky thing is that SHM_QUEUE will usually be a field
- * in a larger record. SHMQueueNext has to return a pointer
+ * in a larger record. SHMQueueNext has to return a pointer
* to the record itself instead of a pointer to the SHMQueue field
* of the record. It takes an extra parameter and does some extra
* pointer arithmetic to do this correctly.
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index 264f700207..d7d040628c 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -29,7 +29,7 @@ uint64 SharedInvalidMessageCounter;
* Because backends sitting idle will not be reading sinval events, we
* need a way to give an idle backend a swift kick in the rear and make
* it catch up before the sinval queue overflows and forces it to go
- * through a cache reset exercise. This is done by sending
+ * through a cache reset exercise. This is done by sending
* PROCSIG_CATCHUP_INTERRUPT to any backend that gets too far behind.
*
* State for catchup events consists of two flags: one saying whether
@@ -68,7 +68,7 @@ SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
* NOTE: it is entirely possible for this routine to be invoked recursively
* as a consequence of processing inside the invalFunction or resetFunction.
* Furthermore, such a recursive call must guarantee that all outstanding
- * inval messages have been processed before it exits. This is the reason
+ * inval messages have been processed before it exits. This is the reason
* for the strange-looking choice to use a statically allocated buffer array
* and counters; it's so that a recursive call can process messages already
* sucked out of sinvaladt.c.
@@ -137,7 +137,7 @@ ReceiveSharedInvalidMessages(
* We are now caught up. If we received a catchup signal, reset that
* flag, and call SICleanupQueue(). This is not so much because we need
* to flush dead messages right now, as that we want to pass on the
- * catchup signal to the next slowest backend. "Daisy chaining" the
+ * catchup signal to the next slowest backend. "Daisy chaining" the
* catchup signal this way avoids creating spikes in system load for what
* should be just a background maintenance activity.
*/
@@ -157,7 +157,7 @@ ReceiveSharedInvalidMessages(
*
* If we are idle (catchupInterruptEnabled is set), we can safely
* invoke ProcessCatchupEvent directly. Otherwise, just set a flag
- * to do it later. (Note that it's quite possible for normal processing
+ * to do it later. (Note that it's quite possible for normal processing
* of the current transaction to cause ReceiveSharedInvalidMessages()
* to be run later on; in that case the flag will get cleared again,
* since there's no longer any reason to do anything.)
@@ -233,7 +233,7 @@ HandleCatchupInterrupt(void)
* EnableCatchupInterrupt
*
* This is called by the PostgresMain main loop just before waiting
- * for a frontend command. We process any pending catchup events,
+ * for a frontend command. We process any pending catchup events,
* and enable the signal handler to process future events directly.
*
* NOTE: the signal handler starts out disabled, and stays so until
@@ -278,7 +278,7 @@ EnableCatchupInterrupt(void)
* DisableCatchupInterrupt
*
* This is called by the PostgresMain main loop just after receiving
- * a frontend command. Signal handler execution of catchup events
+ * a frontend command. Signal handler execution of catchup events
* is disabled until the next EnableCatchupInterrupt call.
*
* The PROCSIG_NOTIFY_INTERRUPT signal handler also needs to call this,
diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c
index e6805d96b1..0328660b83 100644
--- a/src/backend/storage/ipc/sinvaladt.c
+++ b/src/backend/storage/ipc/sinvaladt.c
@@ -46,7 +46,7 @@
* In reality, the messages are stored in a circular buffer of MAXNUMMESSAGES
* entries. We translate MsgNum values into circular-buffer indexes by
* computing MsgNum % MAXNUMMESSAGES (this should be fast as long as
- * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
+ * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
* doesn't exceed minMsgNum by more than MAXNUMMESSAGES, we have enough space
* in the buffer. If the buffer does overflow, we recover by setting the
* "reset" flag for each backend that has fallen too far behind. A backend
@@ -59,7 +59,7 @@
* normal behavior is that at most one such interrupt is in flight at a time;
* when a backend completes processing a catchup interrupt, it executes
* SICleanupQueue, which will signal the next-furthest-behind backend if
- * needed. This avoids undue contention from multiple backends all trying
+ * needed. This avoids undue contention from multiple backends all trying
* to catch up at once. However, the furthest-back backend might be stuck
* in a state where it can't catch up. Eventually it will get reset, so it
* won't cause any more problems for anyone but itself. But we don't want
@@ -90,7 +90,7 @@
* the writer wants to change maxMsgNum while readers need to read it.
* We deal with that by having a spinlock that readers must take for just
* long enough to read maxMsgNum, while writers take it for just long enough
- * to write maxMsgNum. (The exact rule is that you need the spinlock to
+ * to write maxMsgNum. (The exact rule is that you need the spinlock to
* read maxMsgNum if you are not holding SInvalWriteLock, and you need the
* spinlock to write maxMsgNum unless you are holding both locks.)
*
@@ -442,7 +442,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
SISeg *segP = shmInvalBuffer;
/*
- * N can be arbitrarily large. We divide the work into groups of no more
+ * N can be arbitrarily large. We divide the work into groups of no more
* than WRITE_QUANTUM messages, to be sure that we don't hold the lock for
* an unreasonably long time. (This is not so much because we care about
* letting in other writers, as that some just-caught-up backend might be
@@ -465,7 +465,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* If the buffer is full, we *must* acquire some space. Clean the
* queue and reset anyone who is preventing space from being freed.
* Otherwise, clean the queue only when it's exceeded the next
- * fullness threshold. We have to loop and recheck the buffer state
+ * fullness threshold. We have to loop and recheck the buffer state
* after any call of SICleanupQueue.
*/
for (;;)
@@ -533,11 +533,11 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* executing on behalf of other backends, since each instance will modify only
* fields of its own backend's ProcState, and no instance will look at fields
* of other backends' ProcStates. We express this by grabbing SInvalReadLock
- * in shared mode. Note that this is not exactly the normal (read-only)
+ * in shared mode. Note that this is not exactly the normal (read-only)
* interpretation of a shared lock! Look closely at the interactions before
* allowing SInvalReadLock to be grabbed in shared mode for any other reason!
*
- * NB: this can also run in parallel with SIInsertDataEntries. It is not
+ * NB: this can also run in parallel with SIInsertDataEntries. It is not
* guaranteed that we will return any messages added after the routine is
* entered.
*
@@ -557,10 +557,10 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
/*
* Before starting to take locks, do a quick, unlocked test to see whether
- * there can possibly be anything to read. On a multiprocessor system,
+ * there can possibly be anything to read. On a multiprocessor system,
* it's possible that this load could migrate backwards and occur before
* we actually enter this function, so we might miss a sinval message that
- * was just added by some other processor. But they can't migrate
+ * was just added by some other processor. But they can't migrate
* backwards over a preceding lock acquisition, so it should be OK. If we
* haven't acquired a lock preventing against further relevant
* invalidations, any such occurrence is not much different than if the
@@ -651,7 +651,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
*
* Caution: because we transiently release write lock when we have to signal
* some other backend, it is NOT guaranteed that there are still minFree
- * free message slots at exit. Caller must recheck and perhaps retry.
+ * free message slots at exit. Caller must recheck and perhaps retry.
*/
void
SICleanupQueue(bool callerHasWriteLock, int minFree)
@@ -672,7 +672,7 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
/*
* Recompute minMsgNum = minimum of all backends' nextMsgNum, identify the
* furthest-back backend that needs signaling (if any), and reset any
- * backends that are too far back. Note that because we ignore sendOnly
+ * backends that are too far back. Note that because we ignore sendOnly
* backends here it is possible for them to keep sending messages without
* a problem even when they are the only active backend.
*/
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index aa8bea5538..d0abe4117f 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -130,7 +130,7 @@ GetStandbyLimitTime(void)
/*
* The cutoff time is the last WAL data receipt time plus the appropriate
- * delay variable. Delay of -1 means wait forever.
+ * delay variable. Delay of -1 means wait forever.
*/
GetXLogReceiptTime(&rtime, &fromStream);
if (fromStream)
@@ -475,7 +475,7 @@ SendRecoveryConflictWithBufferPin(ProcSignalReason reason)
* determine whether an actual deadlock condition is present: the lock we
* need to wait for might be unrelated to any held by the Startup process.
* Sooner or later, this mechanism should get ripped out in favor of somehow
- * accounting for buffer locks in DeadLockCheck(). However, errors here
+ * accounting for buffer locks in DeadLockCheck(). However, errors here
* seem to be very low-probability in practice, so for now it's not worth
* the trouble.
*/
@@ -867,7 +867,7 @@ standby_redo(XLogRecPtr lsn, XLogRecord *record)
XLogRecPtr
LogStandbySnapshot(void)
{
- XLogRecPtr recptr;
+ XLogRecPtr recptr;
RunningTransactions running;
xl_standby_lock *locks;
int nlocks;
@@ -889,8 +889,8 @@ LogStandbySnapshot(void)
running = GetRunningTransactionData();
/*
- * GetRunningTransactionData() acquired ProcArrayLock, we must release
- * it. For Hot Standby this can be done before inserting the WAL record
+ * GetRunningTransactionData() acquired ProcArrayLock, we must release it.
+ * For Hot Standby this can be done before inserting the WAL record
* because ProcArrayApplyRecoveryInfo() rechecks the commit status using
* the clog. For logical decoding, though, the lock can't be released
* early becuase the clog might be "in the future" from the POV of the
@@ -977,9 +977,9 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
/*
* Ensure running_xacts information is synced to disk not too far in the
* future. We don't want to stall anything though (i.e. use XLogFlush()),
- * so we let the wal writer do it during normal
- * operation. XLogSetAsyncXactLSN() conveniently will mark the LSN as
- * to-be-synced and nudge the WALWriter into action if sleeping. Check
+ * so we let the wal writer do it during normal operation.
+ * XLogSetAsyncXactLSN() conveniently will mark the LSN as to-be-synced
+ * and nudge the WALWriter into action if sleeping. Check
* XLogBackgroundFlush() for details why a record might not be flushed
* without it.
*/
diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c
index 69c7bdb2a0..57ec1c2a6f 100644
--- a/src/backend/storage/large_object/inv_api.c
+++ b/src/backend/storage/large_object/inv_api.c
@@ -266,10 +266,10 @@ inv_open(Oid lobjId, int flags, MemoryContext mcxt)
errmsg("large object %u does not exist", lobjId)));
/*
- * We must register the snapshot in TopTransaction's resowner, because
- * it must stay alive until the LO is closed rather than until the
- * current portal shuts down. Do this after checking that the LO exists,
- * to avoid leaking the snapshot if an error is thrown.
+ * We must register the snapshot in TopTransaction's resowner, because it
+ * must stay alive until the LO is closed rather than until the current
+ * portal shuts down. Do this after checking that the LO exists, to avoid
+ * leaking the snapshot if an error is thrown.
*/
if (snapshot)
snapshot = RegisterSnapshotOnOwner(snapshot,
@@ -809,7 +809,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int64 len)
/*
* If we found the page of the truncation point we need to truncate the
- * data in it. Otherwise if we're in a hole, we need to create a page to
+ * data in it. Otherwise if we're in a hole, we need to create a page to
* mark the end of data.
*/
if (olddata != NULL && olddata->pageno == pageno)
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index 736fcd0619..298c577640 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -51,7 +51,7 @@ typedef struct
} WAIT_ORDER;
/*
- * Information saved about each edge in a detected deadlock cycle. This
+ * Information saved about each edge in a detected deadlock cycle. This
* is used to print a diagnostic message upon failure.
*
* Note: because we want to examine this info after releasing the lock
@@ -119,7 +119,7 @@ static PGPROC *blocking_autovacuum_proc = NULL;
* InitDeadLockChecking -- initialize deadlock checker during backend startup
*
* This does per-backend initialization of the deadlock checker; primarily,
- * allocation of working memory for DeadLockCheck. We do this per-backend
+ * allocation of working memory for DeadLockCheck. We do this per-backend
* since there's no percentage in making the kernel do copy-on-write
* inheritance of workspace from the postmaster. We want to allocate the
* space at startup because (a) the deadlock checker might be invoked when
@@ -291,10 +291,10 @@ GetBlockingAutoVacuumPgproc(void)
* DeadLockCheckRecurse -- recursively search for valid orderings
*
* curConstraints[] holds the current set of constraints being considered
- * by an outer level of recursion. Add to this each possible solution
+ * by an outer level of recursion. Add to this each possible solution
* constraint for any cycle detected at this level.
*
- * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
+ * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
* state is attainable, in which case waitOrders[] shows the required
* rearrangements of lock wait queues (if any).
*/
@@ -429,7 +429,7 @@ TestConfiguration(PGPROC *startProc)
*
* Since we need to be able to check hypothetical configurations that would
* exist after wait queue rearrangement, the routine pays attention to the
- * table of hypothetical queue orders in waitOrders[]. These orders will
+ * table of hypothetical queue orders in waitOrders[]. These orders will
* be believed in preference to the actual ordering seen in the locktable.
*/
static bool
@@ -506,7 +506,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
/*
- * Scan for procs that already hold conflicting locks. These are "hard"
+ * Scan for procs that already hold conflicting locks. These are "hard"
* edges in the waits-for graph.
*/
procLocks = &(lock->procLocks);
@@ -705,7 +705,7 @@ ExpandConstraints(EDGE *constraints,
nWaitOrders = 0;
/*
- * Scan constraint list backwards. This is because the last-added
+ * Scan constraint list backwards. This is because the last-added
* constraint is the only one that could fail, and so we want to test it
* for inconsistency first.
*/
@@ -759,7 +759,7 @@ ExpandConstraints(EDGE *constraints,
* The initial queue ordering is taken directly from the lock's wait queue.
* The output is an array of PGPROC pointers, of length equal to the lock's
* wait queue length (the caller is responsible for providing this space).
- * The partial order is specified by an array of EDGE structs. Each EDGE
+ * The partial order is specified by an array of EDGE structs. Each EDGE
* is one that we need to reverse, therefore the "waiter" must appear before
* the "blocker" in the output array. The EDGE array may well contain
* edges associated with other locks; these should be ignored.
@@ -829,7 +829,7 @@ TopoSort(LOCK *lock,
afterConstraints[k] = i + 1;
}
/*--------------------
- * Now scan the topoProcs array backwards. At each step, output the
+ * Now scan the topoProcs array backwards. At each step, output the
* last proc that has no remaining before-constraints, and decrease
* the beforeConstraints count of each of the procs it was constrained
* against.
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index d692aad6cb..6cc4d269ea 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -35,7 +35,7 @@ typedef struct XactLockTableWaitInfo
{
XLTW_Oper oper;
Relation rel;
- ItemPointer ctid;
+ ItemPointer ctid;
} XactLockTableWaitInfo;
static void XactLockTableWaitErrorCb(void *arg);
@@ -80,7 +80,7 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid)
/*
* LockRelationOid
*
- * Lock a relation given only its OID. This should generally be used
+ * Lock a relation given only its OID. This should generally be used
* before attempting to open the relation's relcache entry.
*/
void
@@ -268,7 +268,7 @@ LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
/*
* LockRelationIdForSession
*
- * This routine grabs a session-level lock on the target relation. The
+ * This routine grabs a session-level lock on the target relation. The
* session lock persists across transaction boundaries. It will be removed
* when UnlockRelationIdForSession() is called, or if an ereport(ERROR) occurs,
* or if the backend exits.
@@ -471,7 +471,7 @@ XactLockTableInsert(TransactionId xid)
*
* Delete the lock showing that the given transaction ID is running.
* (This is never used for main transaction IDs; those locks are only
- * released implicitly at transaction end. But we do use it for subtrans IDs.)
+ * released implicitly at transaction end. But we do use it for subtrans IDs.)
*/
void
XactLockTableDelete(TransactionId xid)
@@ -494,7 +494,7 @@ XactLockTableDelete(TransactionId xid)
* subtransaction, we will exit as soon as it aborts or its top parent commits.
* It takes some extra work to ensure this, because to save on shared memory
* the XID lock of a subtransaction is released when it ends, whether
- * successfully or unsuccessfully. So we have to check if it's "still running"
+ * successfully or unsuccessfully. So we have to check if it's "still running"
* and if so wait for its parent.
*/
void
@@ -663,7 +663,7 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode)
/*
* Note: GetLockConflicts() never reports our own xid, hence we need not
- * check for that. Also, prepared xacts are not reported, which is fine
+ * check for that. Also, prepared xacts are not reported, which is fine
* since they certainly aren't going to do anything anymore.
*/
@@ -690,7 +690,7 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode)
void
WaitForLockers(LOCKTAG heaplocktag, LOCKMODE lockmode)
{
- List *l;
+ List *l;
l = list_make1(&heaplocktag);
WaitForLockersMultiple(l, lockmode);
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 682506374f..cd468bcc99 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -187,7 +187,7 @@ static int FastPathLocalUseCount = 0;
/*
* The fast-path lock mechanism is concerned only with relation locks on
- * unshared relations by backends bound to a database. The fast-path
+ * unshared relations by backends bound to a database. The fast-path
* mechanism exists mostly to accelerate acquisition and release of locks
* that rarely conflict. Because ShareUpdateExclusiveLock is
* self-conflicting, it can't use the fast-path mechanism; but it also does
@@ -914,7 +914,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
/*
* If lock requested conflicts with locks requested by waiters, must join
- * wait queue. Otherwise, check for conflict with already-held locks.
+ * wait queue. Otherwise, check for conflict with already-held locks.
* (That's last because most complex check.)
*/
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
@@ -995,7 +995,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
/*
* NOTE: do not do any material change of state between here and
- * return. All required changes in locktable state must have been
+ * return. All required changes in locktable state must have been
* done when the lock was granted to us --- see notes in WaitOnLock.
*/
@@ -1032,7 +1032,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
{
/*
* Decode the locktag back to the original values, to avoid sending
- * lots of empty bytes with every message. See lock.h to check how a
+ * lots of empty bytes with every message. See lock.h to check how a
* locktag is defined for LOCKTAG_RELATION
*/
LogAccessExclusiveLock(locktag->locktag_field1,
@@ -1289,7 +1289,7 @@ LockCheckConflicts(LockMethod lockMethodTable,
}
/*
- * Rats. Something conflicts. But it could still be my own lock. We have
+ * Rats. Something conflicts. But it could still be my own lock. We have
* to construct a conflict mask that does not reflect our own locks, but
* only lock types held by other processes.
*/
@@ -1381,7 +1381,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
/*
* We need only run ProcLockWakeup if the released lock conflicts with at
- * least one of the lock types requested by waiter(s). Otherwise whatever
+ * least one of the lock types requested by waiter(s). Otherwise whatever
* conflict made them wait must still exist. NOTE: before MVCC, we could
* skip wakeup if lock->granted[lockmode] was still positive. But that's
* not true anymore, because the remaining granted locks might belong to
@@ -1401,7 +1401,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
}
/*
- * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
+ * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
* proclock and lock objects if possible, and call ProcLockWakeup if there
* are remaining requests and the caller says it's OK. (Normally, this
* should be called after UnGrantLock, and wakeupNeeded is the result from
@@ -1823,7 +1823,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
}
/*
- * Decrease the total local count. If we're still holding the lock, we're
+ * Decrease the total local count. If we're still holding the lock, we're
* done.
*/
locallock->nLocks--;
@@ -1955,7 +1955,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
#endif
/*
- * Get rid of our fast-path VXID lock, if appropriate. Note that this is
+ * Get rid of our fast-path VXID lock, if appropriate. Note that this is
* the only way that the lock we hold on our own VXID can ever get
* released: it is always and only released when a toplevel transaction
* ends.
@@ -2042,7 +2042,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
* fast-path data structures, we must acquire it before attempting
* to release the lock via the fast-path. We will continue to
* hold the LWLock until we're done scanning the locallock table,
- * unless we hit a transferred fast-path lock. (XXX is this
+ * unless we hit a transferred fast-path lock. (XXX is this
* really such a good idea? There could be a lot of entries ...)
*/
if (!have_fast_path_lwlock)
@@ -2061,7 +2061,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* Our lock, originally taken via the fast path, has been
- * transferred to the main lock table. That's going to require
+ * transferred to the main lock table. That's going to require
* some extra work, so release our fast-path lock before starting.
*/
LWLockRelease(MyProc->backendLock);
@@ -2070,7 +2070,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* Now dump the lock. We haven't got a pointer to the LOCK or
* PROCLOCK in this case, so we have to handle this a bit
- * differently than a normal lock release. Unfortunately, this
+ * differently than a normal lock release. Unfortunately, this
* requires an extra LWLock acquire-and-release cycle on the
* partitionLock, but hopefully it shouldn't happen often.
*/
@@ -2505,9 +2505,9 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
* acquiring proc->backendLock. In particular, it's certainly safe to
* assume that if the target backend holds any fast-path locks, it
* must have performed a memory-fencing operation (in particular, an
- * LWLock acquisition) since setting proc->databaseId. However, it's
+ * LWLock acquisition) since setting proc->databaseId. However, it's
* less clear that our backend is certain to have performed a memory
- * fencing operation since the other backend set proc->databaseId. So
+ * fencing operation since the other backend set proc->databaseId. So
* for now, we test it after acquiring the LWLock just to be safe.
*/
if (proc->databaseId != locktag->locktag_field1)
@@ -3021,7 +3021,7 @@ AtPrepare_Locks(void)
continue;
/*
- * If we have both session- and transaction-level locks, fail. This
+ * If we have both session- and transaction-level locks, fail. This
* should never happen with regular locks, since we only take those at
* session level in some special operations like VACUUM. It's
* possible to hit this with advisory locks, though.
@@ -3030,7 +3030,7 @@ AtPrepare_Locks(void)
* the transactional hold to the prepared xact. However, that would
* require two PROCLOCK objects, and we cannot be sure that another
* PROCLOCK will be available when it comes time for PostPrepare_Locks
- * to do the deed. So for now, we error out while we can still do so
+ * to do the deed. So for now, we error out while we can still do so
* safely.
*/
if (haveSessionLock)
@@ -3219,7 +3219,7 @@ PostPrepare_Locks(TransactionId xid)
/*
* We cannot simply modify proclock->tag.myProc to reassign
* ownership of the lock, because that's part of the hash key and
- * the proclock would then be in the wrong hash chain. Instead
+ * the proclock would then be in the wrong hash chain. Instead
* use hash_update_hash_key. (We used to create a new hash entry,
* but that risks out-of-memory failure if other processes are
* busy making proclocks too.) We must unlink the proclock from
@@ -3319,7 +3319,7 @@ GetLockStatusData(void)
/*
* First, we iterate through the per-backend fast-path arrays, locking
- * them one at a time. This might produce an inconsistent picture of the
+ * them one at a time. This might produce an inconsistent picture of the
* system state, but taking all of those LWLocks at the same time seems
* impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
* matter too much, because none of these locks can be involved in lock
@@ -3398,7 +3398,7 @@ GetLockStatusData(void)
* will be self-consistent.
*
* Since this is a read-only operation, we take shared instead of
- * exclusive lock. There's not a whole lot of point to this, because all
+ * exclusive lock. There's not a whole lot of point to this, because all
* the normal operations require exclusive lock, but it doesn't hurt
* anything either. It will at least allow two backends to do
* GetLockStatusData in parallel.
@@ -3917,7 +3917,7 @@ lock_twophase_postabort(TransactionId xid, uint16 info,
* as MyProc->lxid, you might wonder if we really need both. The
* difference is that MyProc->lxid is set and cleared unlocked, and
* examined by procarray.c, while fpLocalTransactionId is protected by
- * backendLock and is used only by the locking subsystem. Doing it this
+ * backendLock and is used only by the locking subsystem. Doing it this
* way makes it easier to verify that there are no funny race conditions.
*
* We don't bother recording this lock in the local lock table, since it's
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index df8f9bfd89..d23ac62bf8 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -6,7 +6,7 @@
* Lightweight locks are intended primarily to provide mutual exclusion of
* access to shared-memory data structures. Therefore, they offer both
* exclusive and shared lock modes (to support read/write and read-only
- * access to a shared object). There are few other frammishes. User-level
+ * access to a shared object). There are few other frammishes. User-level
* locking should be done with the full lock manager --- which depends on
* LWLocks to protect its shared state.
*
@@ -54,7 +54,7 @@ extern slock_t *ShmemLock;
* to the current backend.
*/
static LWLockTranche **LWLockTrancheArray = NULL;
-static int LWLockTranchesAllocated = 0;
+static int LWLockTranchesAllocated = 0;
#define T_NAME(lock) \
(LWLockTrancheArray[(lock)->tranche]->name)
@@ -91,18 +91,18 @@ static bool LWLockAcquireCommon(LWLock *l, LWLockMode mode, uint64 *valptr,
#ifdef LWLOCK_STATS
typedef struct lwlock_stats_key
{
- int tranche;
- int instance;
-} lwlock_stats_key;
+ int tranche;
+ int instance;
+} lwlock_stats_key;
typedef struct lwlock_stats
{
- lwlock_stats_key key;
- int sh_acquire_count;
- int ex_acquire_count;
- int block_count;
- int spin_delay_count;
-} lwlock_stats;
+ lwlock_stats_key key;
+ int sh_acquire_count;
+ int ex_acquire_count;
+ int block_count;
+ int spin_delay_count;
+} lwlock_stats;
static int counts_for_pid = 0;
static HTAB *lwlock_stats_htab;
@@ -173,7 +173,7 @@ print_lwlock_stats(int code, Datum arg)
while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
{
fprintf(stderr,
- "PID %d lwlock %s %d: shacq %u exacq %u blk %u spindelay %u\n",
+ "PID %d lwlock %s %d: shacq %u exacq %u blk %u spindelay %u\n",
MyProcPid, LWLockTrancheArray[lwstats->key.tranche]->name,
lwstats->key.instance, lwstats->sh_acquire_count,
lwstats->ex_acquire_count, lwstats->block_count,
@@ -186,9 +186,9 @@ print_lwlock_stats(int code, Datum arg)
static lwlock_stats *
get_lwlock_stats_entry(LWLock *lock)
{
- lwlock_stats_key key;
+ lwlock_stats_key key;
lwlock_stats *lwstats;
- bool found;
+ bool found;
/* Set up local count state first time through in a given process */
if (counts_for_pid != MyProcPid)
@@ -270,7 +270,7 @@ NumLWLocks(void)
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
@@ -339,12 +339,12 @@ CreateLWLocks(void)
* before the first LWLock. LWLockCounter[0] is the allocation
* counter for lwlocks, LWLockCounter[1] is the maximum number that
* can be allocated from the main array, and LWLockCounter[2] is the
- * allocation counter for tranches.
+ * allocation counter for tranches.
*/
LWLockCounter = (int *) ((char *) MainLWLockArray - 3 * sizeof(int));
LWLockCounter[0] = NUM_FIXED_LWLOCKS;
LWLockCounter[1] = numLocks;
- LWLockCounter[2] = 1; /* 0 is the main array */
+ LWLockCounter[2] = 1; /* 0 is the main array */
}
if (LWLockTrancheArray == NULL)
@@ -352,7 +352,7 @@ CreateLWLocks(void)
LWLockTranchesAllocated = 16;
LWLockTrancheArray = (LWLockTranche **)
MemoryContextAlloc(TopMemoryContext,
- LWLockTranchesAllocated * sizeof(LWLockTranche *));
+ LWLockTranchesAllocated * sizeof(LWLockTranche *));
}
MainLWLockTranche.name = "main";
@@ -422,7 +422,7 @@ LWLockRegisterTranche(int tranche_id, LWLockTranche *tranche)
if (tranche_id >= LWLockTranchesAllocated)
{
- int i = LWLockTranchesAllocated;
+ int i = LWLockTranchesAllocated;
while (i <= tranche_id)
i *= 2;
@@ -534,7 +534,7 @@ LWLockAcquireCommon(LWLock *l, LWLockMode mode, uint64 *valptr, uint64 val)
* in the presence of contention. The efficiency of being able to do that
* outweighs the inefficiency of sometimes wasting a process dispatch
* cycle because the lock is not free when a released waiter finally gets
- * to run. See pgsql-hackers archives for 29-Dec-01.
+ * to run. See pgsql-hackers archives for 29-Dec-01.
*/
for (;;)
{
@@ -731,7 +731,7 @@ LWLockConditionalAcquire(LWLock *l, LWLockMode mode)
/*
* LWLockAcquireOrWait - Acquire lock, or wait until it's free
*
- * The semantics of this function are a bit funky. If the lock is currently
+ * The semantics of this function are a bit funky. If the lock is currently
* free, it is acquired in the given mode, and the function returns true. If
* the lock isn't immediately free, the function waits until it is released
* and returns false, but does not acquire the lock.
@@ -920,8 +920,8 @@ LWLockWaitForVar(LWLock *l, uint64 *valptr, uint64 oldval, uint64 *newval)
return true;
/*
- * Lock out cancel/die interrupts while we sleep on the lock. There is
- * no cleanup mechanism to remove us from the wait queue if we got
+ * Lock out cancel/die interrupts while we sleep on the lock. There is no
+ * cleanup mechanism to remove us from the wait queue if we got
* interrupted.
*/
HOLD_INTERRUPTS();
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 593d80f9d2..7c8d53e6a5 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -32,11 +32,11 @@
* examining the MVCC data.)
*
* (1) Besides tuples actually read, they must cover ranges of tuples
- * which would have been read based on the predicate. This will
+ * which would have been read based on the predicate. This will
* require modelling the predicates through locks against database
* objects such as pages, index ranges, or entire tables.
*
- * (2) They must be kept in RAM for quick access. Because of this, it
+ * (2) They must be kept in RAM for quick access. Because of this, it
* isn't possible to always maintain tuple-level granularity -- when
* the space allocated to store these approaches exhaustion, a
* request for a lock may need to scan for situations where a single
@@ -49,7 +49,7 @@
*
* (4) While they are associated with a transaction, they must survive
* a successful COMMIT of that transaction, and remain until all
- * overlapping transactions complete. This even means that they
+ * overlapping transactions complete. This even means that they
* must survive termination of the transaction's process. If a
* top level transaction is rolled back, however, it is immediately
* flagged so that it can be ignored, and its SIREAD locks can be
@@ -90,7 +90,7 @@
* may yet matter because they overlap still-active transactions.
*
* SerializablePredicateLockListLock
- * - Protects the linked list of locks held by a transaction. Note
+ * - Protects the linked list of locks held by a transaction. Note
* that the locks themselves are also covered by the partition
* locks of their respective lock targets; this lock only affects
* the linked list connecting the locks related to a transaction.
@@ -101,11 +101,11 @@
* - It is relatively infrequent that another process needs to
* modify the list for a transaction, but it does happen for such
* things as index page splits for pages with predicate locks and
- * freeing of predicate locked pages by a vacuum process. When
+ * freeing of predicate locked pages by a vacuum process. When
* removing a lock in such cases, the lock itself contains the
* pointers needed to remove it from the list. When adding a
* lock in such cases, the lock can be added using the anchor in
- * the transaction structure. Neither requires walking the list.
+ * the transaction structure. Neither requires walking the list.
* - Cleaning up the list for a terminated transaction is sometimes
* not done on a retail basis, in which case no lock is required.
* - Due to the above, a process accessing its active transaction's
@@ -355,7 +355,7 @@ int max_predicate_locks_per_xact; /* set by guc.c */
/*
* This provides a list of objects in order to track transactions
- * participating in predicate locking. Entries in the list are fixed size,
+ * participating in predicate locking. Entries in the list are fixed size,
* and reside in shared memory. The memory address of an entry must remain
* fixed during its lifetime. The list will be protected from concurrent
* update externally; no provision is made in this code to manage that. The
@@ -547,7 +547,7 @@ SerializationNeededForWrite(Relation relation)
/*
* These functions are a simple implementation of a list for this specific
- * type of struct. If there is ever a generalized shared memory list, we
+ * type of struct. If there is ever a generalized shared memory list, we
* should probably switch to that.
*/
static SERIALIZABLEXACT *
@@ -767,7 +767,7 @@ OldSerXidPagePrecedesLogically(int p, int q)
int diff;
/*
- * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2. Both inputs should
+ * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2. Both inputs should
* be in the range 0..OLDSERXID_MAX_PAGE.
*/
Assert(p >= 0 && p <= OLDSERXID_MAX_PAGE);
@@ -929,7 +929,7 @@ OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
}
/*
- * Get the minimum commitSeqNo for any conflict out for the given xid. For
+ * Get the minimum commitSeqNo for any conflict out for the given xid. For
* a transaction which exists but has no conflict out, InvalidSerCommitSeqNo
* will be returned.
*/
@@ -982,7 +982,7 @@ OldSerXidSetActiveSerXmin(TransactionId xid)
/*
* When no sxacts are active, nothing overlaps, set the xid values to
* invalid to show that there are no valid entries. Don't clear headPage,
- * though. A new xmin might still land on that page, and we don't want to
+ * though. A new xmin might still land on that page, and we don't want to
* repeatedly zero out the same page.
*/
if (!TransactionIdIsValid(xid))
@@ -1467,7 +1467,7 @@ SummarizeOldestCommittedSxact(void)
/*
* Grab the first sxact off the finished list -- this will be the earliest
- * commit. Remove it from the list.
+ * commit. Remove it from the list.
*/
sxact = (SERIALIZABLEXACT *)
SHMQueueNext(FinishedSerializableTransactions,
@@ -1620,7 +1620,7 @@ SetSerializableTransactionSnapshot(Snapshot snapshot,
/*
* We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
* import snapshots, since there's no way to wait for a safe snapshot when
- * we're using the snap we're told to. (XXX instead of throwing an error,
+ * we're using the snap we're told to. (XXX instead of throwing an error,
* we could just ignore the XactDeferrable flag?)
*/
if (XactReadOnly && XactDeferrable)
@@ -1669,7 +1669,7 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
* release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
* this means we have to create the sxact first, which is a bit annoying
* (in particular, an elog(ERROR) in procarray.c would cause us to leak
- * the sxact). Consider refactoring to avoid this.
+ * the sxact). Consider refactoring to avoid this.
*/
#ifdef TEST_OLDSERXID
SummarizeOldestCommittedSxact();
@@ -2051,7 +2051,7 @@ RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
/*
* Delete child target locks owned by this process.
* This implementation is assuming that the usage of each target tag field
- * is uniform. No need to make this hard if we don't have to.
+ * is uniform. No need to make this hard if we don't have to.
*
* We aren't acquiring lightweight locks for the predicate lock or lock
* target structures associated with this transaction unless we're going
@@ -2092,7 +2092,7 @@ DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
if (TargetTagIsCoveredBy(oldtargettag, *newtargettag))
{
uint32 oldtargettaghash;
- LWLock *partitionLock;
+ LWLock *partitionLock;
PREDICATELOCK *rmpredlock PG_USED_FOR_ASSERTS_ONLY;
oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
@@ -2497,7 +2497,7 @@ PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
}
/*
- * Do quick-but-not-definitive test for a relation lock first. This will
+ * Do quick-but-not-definitive test for a relation lock first. This will
* never cause a return when the relation is *not* locked, but will
* occasionally let the check continue when there really *is* a relation
* level lock.
@@ -2809,7 +2809,7 @@ exit:
* transaction which is not serializable.
*
* NOTE: This is currently only called with transfer set to true, but that may
- * change. If we decide to clean up the locks from a table on commit of a
+ * change. If we decide to clean up the locks from a table on commit of a
* transaction which executed DROP TABLE, the false condition will be useful.
*/
static void
@@ -2890,7 +2890,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
continue; /* already the right lock */
/*
- * If we made it here, we have work to do. We make sure the heap
+ * If we made it here, we have work to do. We make sure the heap
* relation lock exists, then we walk the list of predicate locks for
* the old target we found, moving all locks to the heap relation lock
* -- unless they already hold that.
@@ -3338,7 +3338,7 @@ ReleasePredicateLocks(bool isCommit)
}
/*
- * Release all outConflicts to committed transactions. If we're rolling
+ * Release all outConflicts to committed transactions. If we're rolling
* back clear them all. Set SXACT_FLAG_CONFLICT_OUT if any point to
* previously committed transactions.
*/
@@ -3657,7 +3657,7 @@ ClearOldPredicateLocks(void)
* matter -- but keep the transaction entry itself and any outConflicts.
*
* When the summarize flag is set, we've run short of room for sxact data
- * and must summarize to the SLRU. Predicate locks are transferred to a
+ * and must summarize to the SLRU. Predicate locks are transferred to a
* dummy "old" transaction, with duplicate locks on a single target
* collapsing to a single lock with the "latest" commitSeqNo from among
* the conflicting locks..
@@ -3850,7 +3850,7 @@ XidIsConcurrent(TransactionId xid)
/*
* CheckForSerializableConflictOut
* We are reading a tuple which has been modified. If it is visible to
- * us but has been deleted, that indicates a rw-conflict out. If it's
+ * us but has been deleted, that indicates a rw-conflict out. If it's
* not visible and was created by a concurrent (overlapping)
* serializable transaction, that is also a rw-conflict out,
*
@@ -3937,7 +3937,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
/*
- * Find top level xid. Bail out if xid is too early to be a conflict, or
+ * Find top level xid. Bail out if xid is too early to be a conflict, or
* if it's our own xid.
*/
if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
@@ -4002,7 +4002,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
/*
* We have a conflict out to a transaction which has a conflict out to a
- * summarized transaction. That summarized transaction must have
+ * summarized transaction. That summarized transaction must have
* committed first, and we can't tell when it committed in relation to our
* snapshot acquisition, so something needs to be canceled.
*/
@@ -4036,7 +4036,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
&& (!SxactHasConflictOut(sxact)
|| MySerializableXact->SeqNo.lastCommitBeforeSnapshot < sxact->SeqNo.earliestOutConflictCommit))
{
- /* Read-only transaction will appear to run first. No conflict. */
+ /* Read-only transaction will appear to run first. No conflict. */
LWLockRelease(SerializableXactHashLock);
return;
}
@@ -4282,8 +4282,8 @@ CheckForSerializableConflictIn(Relation relation, HeapTuple tuple,
SET_PREDICATELOCKTARGETTAG_TUPLE(targettag,
relation->rd_node.dbNode,
relation->rd_id,
- ItemPointerGetBlockNumber(&(tuple->t_self)),
- ItemPointerGetOffsetNumber(&(tuple->t_self)));
+ ItemPointerGetBlockNumber(&(tuple->t_self)),
+ ItemPointerGetOffsetNumber(&(tuple->t_self)));
CheckTargetForConflictsIn(&targettag);
}
@@ -4627,7 +4627,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
*
* If a dangerous structure is found, the pivot (the near conflict) is
* marked for death, because rolling back another transaction might mean
- * that we flail without ever making progress. This transaction is
+ * that we flail without ever making progress. This transaction is
* committing writes, so letting it commit ensures progress. If we
* canceled the far conflict, it might immediately fail again on retry.
*/
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 5cd8fcec45..266b0daa94 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -229,10 +229,10 @@ InitProcGlobal(void)
/*
* Newly created PGPROCs for normal backends, autovacuum and bgworkers
- * must be queued up on the appropriate free list. Because there can
+ * must be queued up on the appropriate free list. Because there can
* only ever be a small, fixed number of auxiliary processes, no free
* list is used in that case; InitAuxiliaryProcess() instead uses a
- * linear search. PGPROCs for prepared transactions are added to a
+ * linear search. PGPROCs for prepared transactions are added to a
* free list by TwoPhaseShmemInit().
*/
if (i < MaxConnections)
@@ -291,7 +291,7 @@ InitProcess(void)
elog(ERROR, "you already exist");
/*
- * Initialize process-local latch support. This could fail if the kernel
+ * Initialize process-local latch support. This could fail if the kernel
* is low on resources, and if so we want to exit cleanly before acquiring
* any shared-memory resources.
*/
@@ -400,7 +400,7 @@ InitProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -450,7 +450,7 @@ InitProcessPhase2(void)
*
* Auxiliary processes are presently not expected to wait for real (lockmgr)
* locks, so we need not set up the deadlock checker. They are never added
- * to the ProcArray or the sinval messaging mechanism, either. They also
+ * to the ProcArray or the sinval messaging mechanism, either. They also
* don't get a VXID assigned, since this is only useful when we actually
* hold lockmgr locks.
*
@@ -476,7 +476,7 @@ InitAuxiliaryProcess(void)
elog(ERROR, "you already exist");
/*
- * Initialize process-local latch support. This could fail if the kernel
+ * Initialize process-local latch support. This could fail if the kernel
* is low on resources, and if so we want to exit cleanly before acquiring
* any shared-memory resources.
*/
@@ -557,7 +557,7 @@ InitAuxiliaryProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -715,7 +715,7 @@ LockErrorCleanup(void)
/*
* We used to do PGSemaphoreReset() here to ensure that our proc's wait
- * semaphore is reset to zero. This prevented a leftover wakeup signal
+ * semaphore is reset to zero. This prevented a leftover wakeup signal
* from remaining in the semaphore if someone else had granted us the lock
* we wanted before we were able to remove ourselves from the wait-list.
* However, now that ProcSleep loops until waitStatus changes, a leftover
@@ -851,7 +851,7 @@ ProcKill(int code, Datum arg)
/*
* AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
- * processes (bgwriter, etc). The PGPROC and sema are not released, only
+ * processes (bgwriter, etc). The PGPROC and sema are not released, only
* marked as not-in-use.
*/
static void
@@ -977,7 +977,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
*
* Special case: if I find I should go in front of some waiter, check to
* see if I conflict with already-held locks or the requests before that
- * waiter. If not, then just grant myself the requested lock immediately.
+ * waiter. If not, then just grant myself the requested lock immediately.
* This is the same as the test for immediate grant in LockAcquire, except
* we are only considering the part of the wait queue before my insertion
* point.
@@ -996,7 +996,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
{
/*
- * Yes, so we have a deadlock. Easiest way to clean up
+ * Yes, so we have a deadlock. Easiest way to clean up
* correctly is to call RemoveFromWaitQueue(), but we
* can't do that until we are *on* the wait queue. So, set
* a flag to check below, and break out of loop. Also,
@@ -1117,8 +1117,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
- * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
- * implementation. While this is normally good, there are cases where a
+ * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
+ * implementation. While this is normally good, there are cases where a
* saved wakeup might be leftover from a previous operation (for example,
* we aborted ProcWaitForSignal just before someone did ProcSendSignal).
* So, loop to wait again if the waitStatus shows we haven't been granted
@@ -1138,7 +1138,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* waitStatus could change from STATUS_WAITING to something else
- * asynchronously. Read it just once per loop to prevent surprising
+ * asynchronously. Read it just once per loop to prevent surprising
* behavior (such as missing log messages).
*/
myWaitStatus = MyProc->waitStatus;
@@ -1623,10 +1623,10 @@ check_done:
* This can share the semaphore normally used for waiting for locks,
* since a backend could never be waiting for a lock and a signal at
* the same time. As with locks, it's OK if the signal arrives just
- * before we actually reach the waiting state. Also as with locks,
+ * before we actually reach the waiting state. Also as with locks,
* it's necessary that the caller be robust against bogus wakeups:
* always check that the desired state has occurred, and wait again
- * if not. This copes with possible "leftover" wakeups.
+ * if not. This copes with possible "leftover" wakeups.
*/
void
ProcWaitForSignal(void)
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index b3987494c1..efe1b43fa7 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -79,7 +79,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
*
* We time out and declare error after NUM_DELAYS delays (thus, exactly
* that many tries). With the given settings, this will usually take 2 or
- * so minutes. It seems better to fix the total number of tries (and thus
+ * so minutes. It seems better to fix the total number of tries (and thus
* the probability of unintended failure) than to fix the total time
* spent.
*/
@@ -137,7 +137,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
* Note: spins_per_delay is local within our current process. We want to
* average these observations across multiple backends, since it's
* relatively rare for this function to even get entered, and so a single
- * backend might not live long enough to converge on a good value. That
+ * backend might not live long enough to converge on a good value. That
* is handled by the two routines below.
*/
if (cur_delay == 0)
@@ -177,7 +177,7 @@ update_spins_per_delay(int shared_spins_per_delay)
/*
* We use an exponential moving average with a relatively slow adaption
* rate, so that noise in any one backend's result won't affect the shared
- * value too much. As long as both inputs are within the allowed range,
+ * value too much. As long as both inputs are within the allowed range,
* the result must be too, so we need not worry about clamping the result.
*
* We deliberately truncate rather than rounding; this is so that single
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 9499db115a..9b71744cc6 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -5,7 +5,7 @@
*
*
* For machines that have test-and-set (TAS) instructions, s_lock.h/.c
- * define the spinlock implementation. This file contains only a stub
+ * define the spinlock implementation. This file contains only a stub
* implementation for spinlocks using PGSemaphores. Unless semaphores
* are implemented in a way that doesn't involve a kernel call, this
* is too slow to be very useful :-(
@@ -74,7 +74,7 @@ SpinlockSemas(void)
extern void
SpinlockSemaInit(PGSemaphore spinsemas)
{
- int i;
+ int i;
for (i = 0; i < NUM_SPINLOCK_SEMAPHORES; ++i)
PGSemaphoreCreate(&spinsemas[i]);
@@ -88,7 +88,7 @@ SpinlockSemaInit(PGSemaphore spinsemas)
void
s_init_lock_sema(volatile slock_t *lock)
{
- static int counter = 0;
+ static int counter = 0;
*lock = (++counter) % NUM_SPINLOCK_SEMAPHORES;
}
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 7729fcacf0..6351a9bea4 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -63,7 +63,7 @@ PageInit(Page page, Size pageSize, Size specialSize)
* PageIsVerified
* Check that the page header and checksum (if any) appear valid.
*
- * This is called when a page has just been read in from disk. The idea is
+ * This is called when a page has just been read in from disk. The idea is
* to cheaply detect trashed pages before we go nuts following bogus item
* pointers, testing invalid transaction identifiers, etc.
*
@@ -155,7 +155,7 @@ PageIsVerified(Page page, BlockNumber blkno)
/*
* PageAddItem
*
- * Add an item to a page. Return value is offset at which it was
+ * Add an item to a page. Return value is offset at which it was
* inserted, or InvalidOffsetNumber if there's not room to insert.
*
* If overwrite is true, we just store the item at the specified
@@ -769,7 +769,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
* PageIndexMultiDelete
*
* This routine handles the case of deleting multiple tuples from an
- * index page at once. It is considerably faster than a loop around
+ * index page at once. It is considerably faster than a loop around
* PageIndexTupleDelete ... however, the caller *must* supply the array
* of item numbers to be deleted in item number order!
*/
@@ -780,7 +780,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Offset pd_lower = phdr->pd_lower;
Offset pd_upper = phdr->pd_upper;
Offset pd_special = phdr->pd_special;
- itemIdSortData itemidbase[MaxIndexTuplesPerPage];
+ itemIdSortData itemidbase[MaxIndexTuplesPerPage];
itemIdSort itemidptr;
ItemId lp;
int nline,
@@ -903,7 +903,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
* If checksums are disabled, or if the page is not initialized, just return
* the input. Otherwise, we must make a copy of the page before calculating
* the checksum, to prevent concurrent modifications (e.g. setting hint bits)
- * from making the final checksum invalid. It doesn't matter if we include or
+ * from making the final checksum invalid. It doesn't matter if we include or
* exclude hints during the copy, as long as we write a valid page and
* associated checksum.
*
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 921861b0bd..3c1c81a729 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -86,7 +86,7 @@
* not needed because of an mdtruncate() operation. The reason for leaving
* them present at size zero, rather than unlinking them, is that other
* backends and/or the checkpointer might be holding open file references to
- * such segments. If the relation expands again after mdtruncate(), such
+ * such segments. If the relation expands again after mdtruncate(), such
* that a deactivated segment becomes active again, it is important that
* such file references still be valid --- else data might get written
* out to an unlinked old copy of a segment file that will eventually
@@ -123,7 +123,7 @@ static MemoryContext MdCxt; /* context for all md.c allocations */
* we keep track of pending fsync operations: we need to remember all relation
* segments that have been written since the last checkpoint, so that we can
* fsync them down to disk before completing the next checkpoint. This hash
- * table remembers the pending operations. We use a hash table mostly as
+ * table remembers the pending operations. We use a hash table mostly as
* a convenient way of merging duplicate requests.
*
* We use a similar mechanism to remember no-longer-needed files that can
@@ -291,7 +291,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* During bootstrap, there are cases where a system relation will be
* accessed (by internal backend processes) before the bootstrap
* script nominally creates it. Therefore, allow the file to exist
- * already, even if isRedo is not set. (See also mdopen)
+ * already, even if isRedo is not set. (See also mdopen)
*/
if (isRedo || IsBootstrapProcessingMode())
fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
@@ -336,7 +336,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* if the contents of the file were repopulated by subsequent WAL entries.
* But if we didn't WAL-log insertions, but instead relied on fsyncing the
* file after populating it (as for instance CLUSTER and CREATE INDEX do),
- * the contents of the file would be lost forever. By leaving the empty file
+ * the contents of the file would be lost forever. By leaving the empty file
* until after the next checkpoint, we prevent reassignment of the relfilenode
* number until it's safe, because relfilenode assignment skips over any
* existing file.
@@ -349,7 +349,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
*
* All the above applies only to the relation's main fork; other forks can
* just be removed immediately, since they are not needed to prevent the
- * relfilenode number from being recycled. Also, we do not carefully
+ * relfilenode number from being recycled. Also, we do not carefully
* track whether other forks have been created or not, but just attempt to
* unlink them unconditionally; so we should never complain about ENOENT.
*
@@ -366,7 +366,7 @@ mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
{
/*
* We have to clean out any pending fsync requests for the doomed
- * relation, else the next mdsync() will fail. There can't be any such
+ * relation, else the next mdsync() will fail. There can't be any such
* requests for a temp relation, though. We can send just one request
* even when deleting multiple forks, since the fsync queuing code accepts
* the "InvalidForkNumber = all forks" convention.
@@ -503,7 +503,7 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
/*
* Note: because caller usually obtained blocknum by calling mdnblocks,
* which did a seek(SEEK_END), this seek is often redundant and will be
- * optimized away by fd.c. It's not redundant, however, if there is a
+ * optimized away by fd.c. It's not redundant, however, if there is a
* partial page at the end of the file. In that case we want to try to
* overwrite the partial page with a full page. It's also not redundant
* if bufmgr.c had to dump another buffer of the same file to make room
@@ -803,9 +803,9 @@ mdnblocks(SMgrRelation reln, ForkNumber forknum)
* exactly RELSEG_SIZE long, and it's useless to recheck that each time.
*
* NOTE: this assumption could only be wrong if another backend has
- * truncated the relation. We rely on higher code levels to handle that
+ * truncated the relation. We rely on higher code levels to handle that
* scenario by closing and re-opening the md fd, which is handled via
- * relcache flush. (Since the checkpointer doesn't participate in
+ * relcache flush. (Since the checkpointer doesn't participate in
* relcache flush, it could have segment chain entries for inactive
* segments; that's OK because the checkpointer never needs to compute
* relation size.)
@@ -999,7 +999,7 @@ mdsync(void)
/*
* If we are in the checkpointer, the sync had better include all fsync
- * requests that were queued by backends up to this point. The tightest
+ * requests that were queued by backends up to this point. The tightest
* race condition that could occur is that a buffer that must be written
* and fsync'd for the checkpoint could have been dumped by a backend just
* before it was visited by BufferSync(). We know the backend will have
@@ -1115,7 +1115,7 @@ mdsync(void)
* that have been deleted (unlinked) by the time we get to
* them. Rather than just hoping an ENOENT (or EACCES on
* Windows) error can be ignored, what we do on error is
- * absorb pending requests and then retry. Since mdunlink()
+ * absorb pending requests and then retry. Since mdunlink()
* queues a "cancel" message before actually unlinking, the
* fsync request is guaranteed to be marked canceled after the
* absorb if it really was this case. DROP DATABASE likewise
@@ -1219,7 +1219,7 @@ mdsync(void)
/*
* We've finished everything that was requested before we started to
- * scan the entry. If no new requests have been inserted meanwhile,
+ * scan the entry. If no new requests have been inserted meanwhile,
* remove the entry. Otherwise, update its cycle counter, as all the
* requests now in it must have arrived during this cycle.
*/
@@ -1324,7 +1324,7 @@ mdpostckpt(void)
/*
* As in mdsync, we don't want to stop absorbing fsync requests for a
- * long time when there are many deletions to be done. We can safely
+ * long time when there are many deletions to be done. We can safely
* call AbsorbFsyncRequests() at this point in the loop (note it might
* try to delete list entries).
*/
@@ -1449,7 +1449,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/*
* We can't just delete the entry since mdsync could have an
* active hashtable scan. Instead we delete the bitmapsets; this
- * is safe because of the way mdsync is coded. We also set the
+ * is safe because of the way mdsync is coded. We also set the
* "canceled" flags so that mdsync can tell that a cancel arrived
* for the fork(s).
*/
@@ -1551,7 +1551,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/*
* NB: it's intentional that we don't change cycle_ctr if the entry
- * already exists. The cycle_ctr must represent the oldest fsync
+ * already exists. The cycle_ctr must represent the oldest fsync
* request that could be in the entry.
*/
@@ -1720,7 +1720,7 @@ _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
{
/*
* Normally we will create new segments only if authorized by the
- * caller (i.e., we are doing mdextend()). But when doing WAL
+ * caller (i.e., we are doing mdextend()). But when doing WAL
* recovery, create segments anyway; this allows cases such as
* replaying WAL data that has a write into a high-numbered
* segment of a relation that was later deleted. We want to go
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index fcbdc4117a..d16f559298 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -494,7 +494,7 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
}
/*
- * Get rid of any remaining buffers for the relations. bufmgr will just
+ * Get rid of any remaining buffers for the relations. bufmgr will just
* drop them without bothering to write the contents.
*/
DropRelFileNodesAllBuffers(rnodes, nrels);
@@ -679,7 +679,7 @@ smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
* Send a shared-inval message to force other backends to close any smgr
* references they may have for this rel. This is useful because they
* might have open file pointers to segments that got removed, and/or
- * smgr_targblock variables pointing past the new rel end. (The inval
+ * smgr_targblock variables pointing past the new rel end. (The inval
* message will come back to our backend, too, causing a
* probably-unnecessary local smgr flush. But we don't expect that this
* is a performance-critical path.) As in the unlink code, we want to be
diff --git a/src/backend/tcop/fastpath.c b/src/backend/tcop/fastpath.c
index b211349694..9f50c5add5 100644
--- a/src/backend/tcop/fastpath.c
+++ b/src/backend/tcop/fastpath.c
@@ -44,8 +44,8 @@
* each fastpath call as a separate transaction command, and so the
* cached data could never actually have been reused. If it had worked
* as intended, it would have had problems anyway with dangling references
- * in the FmgrInfo struct. So, forget about caching and just repeat the
- * syscache fetches on each usage. They're not *that* expensive.
+ * in the FmgrInfo struct. So, forget about caching and just repeat the
+ * syscache fetches on each usage. They're not *that* expensive.
*/
struct fp_info
{
@@ -205,7 +205,7 @@ fetch_fp_info(Oid func_id, struct fp_info * fip)
/*
* Since the validity of this structure is determined by whether the
- * funcid is OK, we clear the funcid here. It must not be set to the
+ * funcid is OK, we clear the funcid here. It must not be set to the
* correct value until we are about to return with a good struct fp_info,
* since we can be interrupted (i.e., with an ereport(ERROR, ...)) at any
* time. [No longer really an issue since we don't save the struct
@@ -257,7 +257,7 @@ fetch_fp_info(Oid func_id, struct fp_info * fip)
* RETURNS:
* 0 if successful completion, EOF if frontend connection lost.
*
- * Note: All ordinary errors result in ereport(ERROR,...). However,
+ * Note: All ordinary errors result in ereport(ERROR,...). However,
* if we lose the frontend connection there is no one to ereport to,
* and no use in proceeding...
*
@@ -526,7 +526,7 @@ parse_fcall_arguments(StringInfo msgBuf, struct fp_info * fip,
/*
* Since stringinfo.c keeps a trailing null in place even for
- * binary data, the contents of abuf are a valid C string. We
+ * binary data, the contents of abuf are a valid C string. We
* have to do encoding conversion before calling the typinput
* routine, though.
*/
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index be961017d6..89d2d4a7b8 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -443,7 +443,7 @@ SocketBackend(StringInfo inBuf)
default:
/*
- * Otherwise we got garbage from the frontend. We treat this as
+ * Otherwise we got garbage from the frontend. We treat this as
* fatal because we have probably lost message boundary sync, and
* there's no good way to recover.
*/
@@ -851,7 +851,7 @@ exec_simple_query(const char *query_string)
ResetUsage();
/*
- * Start up a transaction command. All queries generated by the
+ * Start up a transaction command. All queries generated by the
* query_string will be in this same command block, *unless* we find a
* BEGIN/COMMIT/ABORT statement; we have to force a new xact command after
* one of those, else bad things will happen in xact.c. (Note that this
@@ -860,7 +860,7 @@ exec_simple_query(const char *query_string)
start_xact_command();
/*
- * Zap any pre-existing unnamed statement. (While not strictly necessary,
+ * Zap any pre-existing unnamed statement. (While not strictly necessary,
* it seems best to define simple-Query mode as if it used the unnamed
* statement and portal; this ensures we recover any storage used by prior
* unnamed operations.)
@@ -919,7 +919,7 @@ exec_simple_query(const char *query_string)
/*
* Get the command name for use in status display (it also becomes the
- * default completion tag, down inside PortalRun). Set ps_status and
+ * default completion tag, down inside PortalRun). Set ps_status and
* do any special start-of-SQL-command processing needed by the
* destination.
*/
@@ -1007,7 +1007,7 @@ exec_simple_query(const char *query_string)
/*
* Select the appropriate output format: text unless we are doing a
- * FETCH from a binary cursor. (Pretty grotty to have to do this here
+ * FETCH from a binary cursor. (Pretty grotty to have to do this here
* --- but it avoids grottiness in other places. Ah, the joys of
* backward compatibility...)
*/
@@ -1308,7 +1308,7 @@ exec_parse_message(const char *query_string, /* string to execute */
}
else
{
- /* Empty input string. This is legal. */
+ /* Empty input string. This is legal. */
raw_parse_tree = NULL;
commandTag = NULL;
psrc = CreateCachedPlan(raw_parse_tree, query_string, commandTag);
@@ -1358,7 +1358,7 @@ exec_parse_message(const char *query_string, /* string to execute */
/*
* We do NOT close the open transaction command here; that only happens
- * when the client sends Sync. Instead, do CommandCounterIncrement just
+ * when the client sends Sync. Instead, do CommandCounterIncrement just
* in case something happened during parse/plan.
*/
CommandCounterIncrement();
@@ -1500,7 +1500,7 @@ exec_bind_message(StringInfo input_message)
* If we are in aborted transaction state, the only portals we can
* actually run are those containing COMMIT or ROLLBACK commands. We
* disallow binding anything else to avoid problems with infrastructure
- * that expects to run inside a valid transaction. We also disallow
+ * that expects to run inside a valid transaction. We also disallow
* binding any parameters, since we can't risk calling user-defined I/O
* functions.
*/
@@ -1589,7 +1589,7 @@ exec_bind_message(StringInfo input_message)
/*
* Rather than copying data around, we just set up a phony
* StringInfo pointing to the correct portion of the message
- * buffer. We assume we can scribble on the message buffer so
+ * buffer. We assume we can scribble on the message buffer so
* as to maintain the convention that StringInfos have a
* trailing null. This is grotty but is a big win when
* dealing with very large parameter strings.
@@ -1939,7 +1939,7 @@ exec_execute_message(const char *portal_name, long max_rows)
if (is_xact_command)
{
/*
- * If this was a transaction control statement, commit it. We
+ * If this was a transaction control statement, commit it. We
* will start a new xact command for the next command (if any).
*/
finish_xact_command();
@@ -2345,7 +2345,7 @@ exec_describe_portal_message(const char *portal_name)
/*
* If we are in aborted transaction state, we can't run
* SendRowDescriptionMessage(), because that needs catalog accesses.
- * Hence, refuse to Describe portals that return data. (We shouldn't just
+ * Hence, refuse to Describe portals that return data. (We shouldn't just
* refuse all Describes, since that might break the ability of some
* clients to issue COMMIT or ROLLBACK commands, if they use code that
* blindly Describes whatever it does.)
@@ -2562,7 +2562,7 @@ quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
@@ -3291,7 +3291,7 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx,
#endif
/*
- * Parse command-line options. CAUTION: keep this in sync with
+ * Parse command-line options. CAUTION: keep this in sync with
* postmaster/postmaster.c (the option sets should not conflict) and with
* the common help() function in main/main.c.
*/
@@ -3594,7 +3594,7 @@ PostgresMain(int argc, char *argv[],
* we have set up the handler.
*
* Also note: it's best not to use any signals that are SIG_IGNored in the
- * postmaster. If such a signal arrives before we are able to change the
+ * postmaster. If such a signal arrives before we are able to change the
* handler to non-SIG_IGN, it'll get dropped. Instead, make a dummy
* handler in the postmaster to reserve the signal. (Of course, this isn't
* an issue for signals that are locally generated, such as SIGALRM and
@@ -3800,7 +3800,7 @@ PostgresMain(int argc, char *argv[],
/*
* NOTE: if you are tempted to add more code in this if-block,
* consider the high probability that it should be in
- * AbortTransaction() instead. The only stuff done directly here
+ * AbortTransaction() instead. The only stuff done directly here
* should be stuff that is guaranteed to apply *only* for outer-level
* error recovery, such as adjusting the FE/BE protocol status.
*/
@@ -3923,7 +3923,7 @@ PostgresMain(int argc, char *argv[],
* collector, and to update the PS stats display. We avoid doing
* those every time through the message loop because it'd slow down
* processing of batched messages, and because we don't want to report
- * uncommitted updates (that confuses autovacuum). The notification
+ * uncommitted updates (that confuses autovacuum). The notification
* processor wants a call too, if we are not in a transaction block.
*/
if (send_ready_for_query)
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index f85bd031c1..fa561a4861 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -558,7 +558,7 @@ PortalStart(Portal portal, ParamListInfo params,
/*
* We don't start the executor until we are told to run the
- * portal. We do need to set up the result tupdesc.
+ * portal. We do need to set up the result tupdesc.
*/
{
PlannedStmt *pstmt;
@@ -908,7 +908,7 @@ PortalRunSelect(Portal portal,
Assert(queryDesc || portal->holdStore);
/*
- * Force the queryDesc destination to the right thing. This supports
+ * Force the queryDesc destination to the right thing. This supports
* MOVE, for example, which will pass in dest = DestNone. This is okay to
* change as long as we do it on every fetch. (The Executor must not
* assume that dest never changes.)
@@ -1156,12 +1156,12 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
elog(DEBUG3, "ProcessUtility");
/*
- * Set snapshot if utility stmt needs one. Most reliable way to do this
+ * Set snapshot if utility stmt needs one. Most reliable way to do this
* seems to be to enumerate those that do not need one; this is a short
* list. Transaction control, LOCK, and SET must *not* set a snapshot
* since they need to be executable at the start of a transaction-snapshot
* mode transaction without freezing a snapshot. By extension we allow
- * SHOW not to set a snapshot. The other stmts listed are just efficiency
+ * SHOW not to set a snapshot. The other stmts listed are just efficiency
* hacks. Beware of listing anything that can modify the database --- if,
* say, it has to update an index with expressions that invoke
* user-defined functions, then it had better have a snapshot.
@@ -1196,7 +1196,7 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
/*
* Some utility commands may pop the ActiveSnapshot stack from under us,
- * so we only pop the stack if we actually see a snapshot set. Note that
+ * so we only pop the stack if we actually see a snapshot set. Note that
* the set of utility commands that do this must be the same set
* disallowed to run inside a transaction; otherwise, we could be popping
* a snapshot that belongs to some other operation.
@@ -1518,7 +1518,7 @@ DoPortalRunFetch(Portal portal,
* Definition: Rewind to start, advance count-1 rows, return
* next row (if any). In practice, if the goal is less than
* halfway back to the start, it's better to scan from where
- * we are. In any case, we arrange to fetch the target row
+ * we are. In any case, we arrange to fetch the target row
* going forwards.
*/
if (portal->posOverflow || portal->portalPos == LONG_MAX ||
@@ -1625,7 +1625,7 @@ DoPortalRunFetch(Portal portal,
* If we are sitting on a row, back up one so we can re-fetch it.
* If we are not sitting on a row, we still have to start up and
* shut down the executor so that the destination is initialized
- * and shut down correctly; so keep going. To PortalRunSelect,
+ * and shut down correctly; so keep going. To PortalRunSelect,
* count == 0 means we will retrieve no row.
*/
if (on_row)
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 1846570a3e..3423898c11 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -232,7 +232,7 @@ PreventCommandIfReadOnly(const char *cmdname)
* PreventCommandDuringRecovery: throw error if RecoveryInProgress
*
* The majority of operations that are unsafe in a Hot Standby slave
- * will be rejected by XactReadOnly tests. However there are a few
+ * will be rejected by XactReadOnly tests. However there are a few
* commands that are allowed in "read-only" xacts but cannot be allowed
* in Hot Standby mode. Those commands should call this function.
*/
@@ -965,7 +965,7 @@ ProcessUtilitySlow(Node *parsetree,
LOCKMODE lockmode;
/*
- * Figure out lock mode, and acquire lock. This also does
+ * Figure out lock mode, and acquire lock. This also does
* basic permissions checks, so that we won't wait for a
* lock on (for example) a relation on which we have no
* permissions.
diff --git a/src/backend/tsearch/ts_locale.c b/src/backend/tsearch/ts_locale.c
index f9490c835d..1b866f31ed 100644
--- a/src/backend/tsearch/ts_locale.c
+++ b/src/backend/tsearch/ts_locale.c
@@ -90,7 +90,7 @@ t_isprint(const char *ptr)
/*
- * Set up to read a file using tsearch_readline(). This facility is
+ * Set up to read a file using tsearch_readline(). This facility is
* better than just reading the file directly because it provides error
* context pointing to the specific line where a problem is detected.
*
@@ -168,7 +168,7 @@ tsearch_readline_callback(void *arg)
/*
* We can't include the text of the config line for errors that occur
- * during t_readline() itself. This is only partly a consequence of our
+ * during t_readline() itself. This is only partly a consequence of our
* arms-length use of that routine: the major cause of such errors is
* encoding violations, and we daren't try to print error messages
* containing badly-encoded data.
diff --git a/src/backend/tsearch/ts_selfuncs.c b/src/backend/tsearch/ts_selfuncs.c
index 273f13068b..25337e87ab 100644
--- a/src/backend/tsearch/ts_selfuncs.c
+++ b/src/backend/tsearch/ts_selfuncs.c
@@ -319,7 +319,7 @@ tsquery_opr_selec(QueryItem *item, char *operand,
* exclusive. We treat occurrences as independent events.
*
* This is only a good plan if we have a pretty fair number of
- * MCELEMs available; we set the threshold at 100. If no stats or
+ * MCELEMs available; we set the threshold at 100. If no stats or
* insufficient stats, arbitrarily use DEFAULT_TS_MATCH_SEL*4.
*/
if (lookup == NULL || length < 100)
diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c
index c4691fa0b0..fe208704a1 100644
--- a/src/backend/tsearch/ts_typanalyze.c
+++ b/src/backend/tsearch/ts_typanalyze.c
@@ -114,13 +114,13 @@ ts_typanalyze(PG_FUNCTION_ARGS)
* language's frequency table, where K is the target number of entries in
* the MCELEM array plus an arbitrary constant, meant to reflect the fact
* that the most common words in any language would usually be stopwords
- * so we will not actually see them in the input. We assume that the
+ * so we will not actually see them in the input. We assume that the
* distribution of word frequencies (including the stopwords) follows Zipf's
* law with an exponent of 1.
*
* Assuming Zipfian distribution, the frequency of the K'th word is equal
* to 1/(K * H(W)) where H(n) is 1/2 + 1/3 + ... + 1/n and W is the number of
- * words in the language. Putting W as one million, we get roughly 0.07/K.
+ * words in the language. Putting W as one million, we get roughly 0.07/K.
* Assuming top 10 words are stopwords gives s = 0.07/(K + 10). We set
* epsilon = s/10, which gives bucket width w = (K + 10)/0.007 and
* maximum expected hashtable size of about 1000 * (K + 10).
@@ -161,7 +161,7 @@ compute_tsvector_stats(VacAttrStats *stats,
TrackItem *item;
/*
- * We want statistics_target * 10 lexemes in the MCELEM array. This
+ * We want statistics_target * 10 lexemes in the MCELEM array. This
* multiplier is pretty arbitrary, but is meant to reflect the fact that
* the number of individual lexeme values tracked in pg_statistic ought to
* be more than the number of values for a simple scalar column.
@@ -232,7 +232,7 @@ compute_tsvector_stats(VacAttrStats *stats,
/*
* We loop through the lexemes in the tsvector and add them to our
- * tracking hashtable. Note: the hashtable entries will point into
+ * tracking hashtable. Note: the hashtable entries will point into
* the (detoasted) tsvector value, therefore we cannot free that
* storage until we're done.
*/
@@ -299,7 +299,7 @@ compute_tsvector_stats(VacAttrStats *stats,
/*
* Construct an array of the interesting hashtable items, that is,
- * those meeting the cutoff frequency (s - epsilon)*N. Also identify
+ * those meeting the cutoff frequency (s - epsilon)*N. Also identify
* the minimum and maximum frequencies among these items.
*
* Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
@@ -332,7 +332,7 @@ compute_tsvector_stats(VacAttrStats *stats,
/*
* If we obtained more lexemes than we really want, get rid of those
- * with least frequencies. The easiest way is to qsort the array into
+ * with least frequencies. The easiest way is to qsort the array into
* descending frequency order and truncate the array.
*/
if (num_mcelem < track_len)
@@ -363,7 +363,7 @@ compute_tsvector_stats(VacAttrStats *stats,
* they get sorted on frequencies. The rationale is that we
* usually search through most common elements looking for a
* specific value, so we can grab its frequency. When values are
- * presorted we can employ binary search for that. See
+ * presorted we can employ binary search for that. See
* ts_selfuncs.c for a real usage scenario.
*/
qsort(sort_table, num_mcelem, sizeof(TrackItem *),
diff --git a/src/backend/tsearch/ts_utils.c b/src/backend/tsearch/ts_utils.c
index 8a0e0767cb..2e8f4f168f 100644
--- a/src/backend/tsearch/ts_utils.c
+++ b/src/backend/tsearch/ts_utils.c
@@ -23,8 +23,8 @@
/*
* Given the base name and extension of a tsearch config file, return
- * its full path name. The base name is assumed to be user-supplied,
- * and is checked to prevent pathname attacks. The extension is assumed
+ * its full path name. The base name is assumed to be user-supplied,
+ * and is checked to prevent pathname attacks. The extension is assumed
* to be safe.
*
* The result is a palloc'd string.
@@ -37,7 +37,7 @@ get_tsearch_config_filename(const char *basename,
char *result;
/*
- * We limit the basename to contain a-z, 0-9, and underscores. This may
+ * We limit the basename to contain a-z, 0-9, and underscores. This may
* be overly restrictive, but we don't want to allow access to anything
* outside the tsearch_data directory, so for instance '/' *must* be
* rejected, and on some platforms '\' and ':' are risky as well. Allowing
@@ -61,7 +61,7 @@ get_tsearch_config_filename(const char *basename,
/*
* Reads a stop-word file. Each word is run through 'wordop'
- * function, if given. wordop may either modify the input in-place,
+ * function, if given. wordop may either modify the input in-place,
* or palloc a new version.
*/
void
diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c
index 6728212141..d53f2e75c2 100644
--- a/src/backend/tsearch/wparser_def.c
+++ b/src/backend/tsearch/wparser_def.c
@@ -330,7 +330,7 @@ TParserInit(char *str, int len)
/*
* Use of %.*s here is a bit risky since it can misbehave if the data is
- * not in what libc thinks is the prevailing encoding. However, since
+ * not in what libc thinks is the prevailing encoding. However, since
* this is just a debugging aid, we choose to live with that.
*/
fprintf(stderr, "parsing \"%.*s\"\n", len, str);
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index dfac1243a4..38cd5b89c9 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -123,7 +123,7 @@ static Oid get_role_oid_or_public(const char *rolname);
/*
* getid
* Consumes the first alphanumeric string (identifier) found in string
- * 's', ignoring any leading white space. If it finds a double quote
+ * 's', ignoring any leading white space. If it finds a double quote
* it returns the word inside the quotes.
*
* RETURNS:
@@ -229,7 +229,7 @@ putid(char *p, const char *s)
*
* RETURNS:
* the string position in 's' immediately following the ACL
- * specification. Also:
+ * specification. Also:
* - loads the structure pointed to by 'aip' with the appropriate
* UID/GID, id type identifier and mode type values.
*/
@@ -837,7 +837,7 @@ acldefault(GrantObjectType objtype, Oid ownerId)
/*
- * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
+ * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
* ACL_OBJECT_* values, but it's only used in the information schema, not
* documented for general use.
*/
@@ -1006,7 +1006,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
}
/*
- * Remove abandoned privileges (cascading revoke). Currently we can only
+ * Remove abandoned privileges (cascading revoke). Currently we can only
* handle this when the grantee is not PUBLIC.
*/
if ((old_goptions & ~new_goptions) != 0)
@@ -1072,7 +1072,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
/*
* If the old ACL contained any references to the new owner, then we may
- * now have generated an ACL containing duplicate entries. Find them and
+ * now have generated an ACL containing duplicate entries. Find them and
* merge them so that there are not duplicates. (This is relatively
* expensive since we use a stupid O(N^2) algorithm, but it's unlikely to
* be the normal case.)
@@ -1083,7 +1083,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
* remove privilege-free entries, should there be any in the input.) dst
* is the next output slot, targ is the currently considered input slot
* (always >= dst), and src scans entries to the right of targ looking for
- * duplicates. Once an entry has been emitted to dst it is known
+ * duplicates. Once an entry has been emitted to dst it is known
* duplicate-free and need not be considered anymore.
*/
if (newpresent)
@@ -2468,7 +2468,7 @@ column_privilege_check(Oid tableoid, AttrNumber attnum,
* existence of the pg_class row before risking calling pg_class_aclcheck.
* Note: it might seem there's a race condition against concurrent DROP,
* but really it's safe because there will be no syscache flush between
- * here and there. So if we see the row in the syscache, so will
+ * here and there. So if we see the row in the syscache, so will
* pg_class_aclcheck.
*/
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(tableoid)))
@@ -4904,7 +4904,7 @@ is_member_of_role_nosuper(Oid member, Oid role)
/*
- * Is member an admin of role? That is, is member the role itself (subject to
+ * Is member an admin of role? That is, is member the role itself (subject to
* restrictions below), a member (directly or indirectly) WITH ADMIN OPTION,
* or a superuser?
*/
@@ -4919,6 +4919,7 @@ is_admin_of_role(Oid member, Oid role)
return true;
if (member == role)
+
/*
* A role can admin itself when it matches the session user and we're
* outside any security-restricted operation, SECURITY DEFINER or
@@ -5015,14 +5016,14 @@ count_one_bits(AclMode mask)
* The grantor must always be either the object owner or some role that has
* been explicitly granted grant options. This ensures that all granted
* privileges appear to flow from the object owner, and there are never
- * multiple "original sources" of a privilege. Therefore, if the would-be
+ * multiple "original sources" of a privilege. Therefore, if the would-be
* grantor is a member of a role that has the needed grant options, we have
* to do the grant as that role instead.
*
* It is possible that the would-be grantor is a member of several roles
* that have different subsets of the desired grant options, but no one
* role has 'em all. In this case we pick a role with the largest number
- * of desired options. Ties are broken in favor of closer ancestors.
+ * of desired options. Ties are broken in favor of closer ancestors.
*
* roleId: the role attempting to do the GRANT/REVOKE
* privileges: the privileges to be granted/revoked
diff --git a/src/backend/utils/adt/array_selfuncs.c b/src/backend/utils/adt/array_selfuncs.c
index 20eb358a62..170a28a067 100644
--- a/src/backend/utils/adt/array_selfuncs.c
+++ b/src/backend/utils/adt/array_selfuncs.c
@@ -524,7 +524,7 @@ mcelem_array_selec(ArrayType *array, TypeCacheEntry *typentry,
/*
* Estimate selectivity of "column @> const" and "column && const" based on
- * most common element statistics. This estimation assumes element
+ * most common element statistics. This estimation assumes element
* occurrences are independent.
*
* mcelem (of length nmcelem) and numbers (of length nnumbers) are from
@@ -689,7 +689,7 @@ mcelem_array_contain_overlap_selec(Datum *mcelem, int nmcelem,
* In the "column @> const" and "column && const" cases, we usually have a
* "const" with low number of elements (otherwise we have selectivity close
* to 0 or 1 respectively). That's why the effect of dependence related
- * to distinct element count distribution is negligible there. In the
+ * to distinct element count distribution is negligible there. In the
* "column <@ const" case, number of elements is usually high (otherwise we
* have selectivity close to 0). That's why we should do a correction with
* the array distinct element count distribution here.
@@ -848,7 +848,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*
* The presence of many distinct rare elements materially decreases
* selectivity. Use the Poisson distribution to estimate the probability
- * of a column value having zero occurrences of such elements. See above
+ * of a column value having zero occurrences of such elements. See above
* for the definition of "rest".
*/
mult *= exp(-rest);
@@ -856,7 +856,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*----------
* Using the distinct element count histogram requires
* O(unique_nitems * (nmcelem + unique_nitems))
- * operations. Beyond a certain computational cost threshold, it's
+ * operations. Beyond a certain computational cost threshold, it's
* reasonable to sacrifice accuracy for decreased planning time. We limit
* the number of operations to EFFORT * nmcelem; since nmcelem is limited
* by the column's statistics target, the work done is user-controllable.
@@ -868,7 +868,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
* elements to start with, we'd have to remove any discarded elements'
* frequencies from "mult", but since this is only an approximation
* anyway, we don't bother with that. Therefore it's sufficient to qsort
- * elem_selec[] and take the largest elements. (They will no longer match
+ * elem_selec[] and take the largest elements. (They will no longer match
* up with the elements of array_data[], but we don't care.)
*----------
*/
@@ -878,7 +878,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
unique_nitems > EFFORT * nmcelem / (nmcelem + unique_nitems))
{
/*
- * Use the quadratic formula to solve for largest allowable N. We
+ * Use the quadratic formula to solve for largest allowable N. We
* have A = 1, B = nmcelem, C = - EFFORT * nmcelem.
*/
double b = (double) nmcelem;
@@ -953,7 +953,7 @@ calc_hist(const float4 *hist, int nhist, int n)
/*
* frac is a probability contribution for each interval between histogram
- * values. We have nhist - 1 intervals, so contribution of each one will
+ * values. We have nhist - 1 intervals, so contribution of each one will
* be 1 / (nhist - 1).
*/
frac = 1.0f / ((float) (nhist - 1));
@@ -1020,8 +1020,8 @@ calc_hist(const float4 *hist, int nhist, int n)
* "rest" is the sum of the probabilities of all low-probability events not
* included in p.
*
- * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
- * probability that exactly j of first i events occur. Obviously M[0,0] = 1.
+ * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
+ * probability that exactly j of first i events occur. Obviously M[0,0] = 1.
* For any constant j, each increment of i increases the probability iff the
* event occurs. So, by the law of total probability:
* M[i,j] = M[i - 1, j] * (1 - p[i]) + M[i - 1, j - 1] * p[i]
@@ -1143,7 +1143,7 @@ floor_log2(uint32 n)
/*
* find_next_mcelem binary-searches a most common elements array, starting
- * from *index, for the first member >= value. It saves the position of the
+ * from *index, for the first member >= value. It saves the position of the
* match into *index and returns true if it's an exact match. (Note: we
* assume the mcelem elements are distinct so there can't be more than one
* exact match.)
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index 70aba1b5d8..4d7e9c311f 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -160,13 +160,13 @@ array_typanalyze(PG_FUNCTION_ARGS)
* compute_array_stats() -- compute statistics for a array column
*
* This function computes statistics useful for determining selectivity of
- * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
+ * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
* compute_stats hook after sample rows have been collected.
*
* We also invoke the standard compute_stats function, which will compute
* "scalar" statistics relevant to the btree-style array comparison operators.
* However, exact duplicates of an entire array may be rare despite many
- * arrays sharing individual elements. This especially afflicts long arrays,
+ * arrays sharing individual elements. This especially afflicts long arrays,
* which are also liable to lack all scalar statistics due to the low
* WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats,
* we find the most common array elements and compute a histogram of distinct
@@ -201,7 +201,7 @@ array_typanalyze(PG_FUNCTION_ARGS)
* In the absence of a principled basis for other particular values, we
* follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
* But we leave out the correction for stopwords, which do not apply to
- * arrays. These parameters give bucket width w = K/0.007 and maximum
+ * arrays. These parameters give bucket width w = K/0.007 and maximum
* expected hashtable size of about 1000 * K.
*
* Elements may repeat within an array. Since duplicates do not change the
@@ -463,7 +463,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* Construct an array of the interesting hashtable items, that is,
- * those meeting the cutoff frequency (s - epsilon)*N. Also identify
+ * those meeting the cutoff frequency (s - epsilon)*N. Also identify
* the minimum and maximum frequencies among these items.
*
* Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
@@ -498,7 +498,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* If we obtained more elements than we really want, get rid of those
- * with least frequencies. The easiest way is to qsort the array into
+ * with least frequencies. The easiest way is to qsort the array into
* descending frequency order and truncate the array.
*/
if (num_mcelem < track_len)
@@ -532,7 +532,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* We sorted statistics on the element value, but we want to be
* able to find the minimal and maximal frequencies without going
- * through all the values. We also want the frequency of null
+ * through all the values. We also want the frequency of null
* elements. Store these three values at the end of mcelem_freqs.
*/
mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
@@ -623,7 +623,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* (compare the histogram-making loop in compute_scalar_stats()).
* But instead of that we have the sorted_count_items[] array,
* which holds unique DEC values with their frequencies (that is,
- * a run-length-compressed version of the full array). So we
+ * a run-length-compressed version of the full array). So we
* control advancing through sorted_count_items[] with the
* variable "frac", which is defined as (x - y) * (num_hist - 1),
* where x is the index in the notional DECs array corresponding
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index c62e3fb176..831466dec9 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -502,7 +502,7 @@ array_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
@@ -517,7 +517,7 @@ array_agg_finalfn(PG_FUNCTION_ARGS)
int lbs[1];
/*
- * Test for null before Asserting we are in right context. This is to
+ * Test for null before Asserting we are in right context. This is to
* avoid possible Assert failure in 8.4beta installations, where it is
* possible for users to create NULL constants of type internal.
*/
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 91df184242..f8e94ec365 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -694,7 +694,7 @@ ReadArrayStr(char *arrayStr,
/*
* We have to remove " and \ characters to create a clean item value to
- * pass to the datatype input routine. We overwrite each item value
+ * pass to the datatype input routine. We overwrite each item value
* in-place within arrayStr to do this. srcptr is the current scan point,
* and dstptr is where we are copying to.
*
@@ -894,7 +894,7 @@ ReadArrayStr(char *arrayStr,
* referenced by Datums after copying them.
*
* If the input data is of varlena type, the caller must have ensured that
- * the values are not toasted. (Doing it here doesn't work since the
+ * the values are not toasted. (Doing it here doesn't work since the
* caller has already allocated space for the array...)
*/
static void
@@ -1747,6 +1747,7 @@ Datum
array_cardinality(PG_FUNCTION_ARGS)
{
ArrayType *v = PG_GETARG_ARRAYTYPE_P(0);
+
PG_RETURN_INT32(ArrayGetNItems(ARR_NDIM(v), ARR_DIMS(v)));
}
@@ -2002,7 +2003,7 @@ array_get_slice(ArrayType *array,
memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int));
/*
- * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
+ * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
* copied the given lowerIndx values ... but that seems confusing.
*/
newlb = ARR_LBOUND(newarray);
@@ -2634,7 +2635,7 @@ array_set_slice(ArrayType *array,
/*
* array_map()
*
- * Map an array through an arbitrary function. Return a new array with
+ * Map an array through an arbitrary function. Return a new array with
* same dimensions and each source element transformed by fn(). Each
* source element is passed as the first argument to fn(); additional
* arguments to be passed to fn() can be specified by the caller.
@@ -2649,9 +2650,9 @@ array_set_slice(ArrayType *array,
* first argument position initially holds the input array value.
* * inpType: OID of element type of input array. This must be the same as,
* or binary-compatible with, the first argument type of fn().
- * * retType: OID of element type of output array. This must be the same as,
+ * * retType: OID of element type of output array. This must be the same as,
* or binary-compatible with, the result type of fn().
- * * amstate: workspace for array_map. Must be zeroed by caller before
+ * * amstate: workspace for array_map. Must be zeroed by caller before
* first call, and not touched after that.
*
* It is legitimate to pass a freshly-zeroed ArrayMapState on each call,
@@ -3505,7 +3506,7 @@ array_cmp(FunctionCallInfo fcinfo)
/*
* If arrays contain same data (up to end of shorter one), apply
- * additional rules to sort by dimensionality. The relative significance
+ * additional rules to sort by dimensionality. The relative significance
* of the different bits of information is historical; mainly we just care
* that we don't say "equal" for arrays of different dimensionality.
*/
@@ -3767,7 +3768,7 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, Oid collation,
/*
* We assume that the comparison operator is strict, so a NULL can't
- * match anything. XXX this diverges from the "NULL=NULL" behavior of
+ * match anything. XXX this diverges from the "NULL=NULL" behavior of
* array_eq, should we act like that?
*/
if (isnull1)
@@ -4258,7 +4259,7 @@ array_copy(char *destptr, int nitems,
*
* Note: this could certainly be optimized using standard bitblt methods.
* However, it's not clear that the typical Postgres array has enough elements
- * to make it worth worrying too much. For the moment, KISS.
+ * to make it worth worrying too much. For the moment, KISS.
*/
void
array_bitmap_copy(bits8 *destbitmap, int destoffset,
@@ -4455,7 +4456,7 @@ array_extract_slice(ArrayType *newarray,
* Insert a slice into an array.
*
* ndim/dim[]/lb[] are dimensions of the original array. A new array with
- * those same dimensions is to be constructed. destArray must already
+ * those same dimensions is to be constructed. destArray must already
* have been allocated and its header initialized.
*
* st[]/endp[] identify the slice to be replaced. Elements within the slice
@@ -5123,7 +5124,7 @@ array_unnest(PG_FUNCTION_ARGS)
* Get the array value and detoast if needed. We can't do this
* earlier because if we have to detoast, we want the detoasted copy
* to be in multi_call_memory_ctx, so it will go away when we're done
- * and not before. (If no detoast happens, we assume the originally
+ * and not before. (If no detoast happens, we assume the originally
* passed array will stick around till then.)
*/
arr = PG_GETARG_ARRAYTYPE_P(0);
@@ -5199,7 +5200,7 @@ array_unnest(PG_FUNCTION_ARGS)
*
* Find all array entries matching (not distinct from) search/search_isnull,
* and delete them if remove is true, else replace them with
- * replace/replace_isnull. Comparisons are done using the specified
+ * replace/replace_isnull. Comparisons are done using the specified
* collation. fcinfo is passed only for caching purposes.
*/
static ArrayType *
@@ -5271,7 +5272,7 @@ array_replace_internal(ArrayType *array,
typalign = typentry->typalign;
/*
- * Detoast values if they are toasted. The replacement value must be
+ * Detoast values if they are toasted. The replacement value must be
* detoasted for insertion into the result array, while detoasting the
* search value only once saves cycles.
*/
diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c
index 5b1afa0d8f..477ccadfb8 100644
--- a/src/backend/utils/adt/arrayutils.c
+++ b/src/backend/utils/adt/arrayutils.c
@@ -193,7 +193,7 @@ mda_next_tuple(int n, int *curr, const int *span)
/*
* ArrayGetIntegerTypmods: verify that argument is a 1-D cstring array,
- * and get the contents converted to integers. Returns a palloc'd array
+ * and get the contents converted to integers. Returns a palloc'd array
* and places the length at *n.
*/
int32 *
diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c
index 015875875b..6aba20de85 100644
--- a/src/backend/utils/adt/cash.c
+++ b/src/backend/utils/adt/cash.c
@@ -382,79 +382,79 @@ cash_out(PG_FUNCTION_ARGS)
case 0:
if (cs_precedes)
result = psprintf("(%s%s%s)",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("(%s%s%s)",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol);
break;
case 1:
default:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("%s%s%s%s%s",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol);
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol);
break;
case 2:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr,
- (sep_by_space == 2) ? " " : "",
- signsymbol);
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol);
else
result = psprintf("%s%s%s%s%s",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol,
- (sep_by_space == 2) ? " " : "",
- signsymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol);
break;
case 3:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("%s%s%s%s%s",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- csymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ csymbol);
break;
case 4:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- csymbol,
- (sep_by_space == 2) ? " " : "",
- signsymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ csymbol,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("%s%s%s%s%s",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol,
- (sep_by_space == 2) ? " " : "",
- signsymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol);
break;
}
diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c
index 99191e1d90..e0d974eea5 100644
--- a/src/backend/utils/adt/char.c
+++ b/src/backend/utils/adt/char.c
@@ -59,7 +59,7 @@ charout(PG_FUNCTION_ARGS)
* charrecv - converts external binary format to char
*
* The external representation is one byte, with no character set
- * conversion. This is somewhat dubious, perhaps, but in many
+ * conversion. This is somewhat dubious, perhaps, but in many
* cases people use char for a 1-byte binary type.
*/
Datum
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 06cc0cda0f..073104d4ba 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -1358,7 +1358,7 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -1706,7 +1706,7 @@ time_interval(PG_FUNCTION_ARGS)
* Convert interval to time data type.
*
* This is defined as producing the fractional-day portion of the interval.
- * Therefore, we can just ignore the months field. It is not real clear
+ * Therefore, we can just ignore the months field. It is not real clear
* what to do with negative intervals, but we choose to subtract the floor,
* so that, say, '-2 hours' becomes '22:00:00'.
*/
@@ -2695,7 +2695,7 @@ timetz_zone(PG_FUNCTION_ARGS)
pg_tz *tzp;
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index d200437e62..7632d1177e 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -351,7 +351,7 @@ j2date(int jd, int *year, int *month, int *day)
* j2day - convert Julian date to day-of-week (0..6 == Sun..Sat)
*
* Note: various places use the locution j2day(date - 1) to produce a
- * result according to the convention 0..6 = Mon..Sun. This is a bit of
+ * result according to the convention 0..6 = Mon..Sun. This is a bit of
* a crock, but will work as long as the computation here is just a modulo.
*/
int
@@ -819,10 +819,11 @@ DecodeDateTime(char **field, int *ftype, int nf,
switch (ftype[i])
{
case DTK_DATE:
+
/*
- * Integral julian day with attached time zone?
- * All other forms with JD will be separated into
- * distinct fields, so we handle just this case here.
+ * Integral julian day with attached time zone? All other
+ * forms with JD will be separated into distinct fields, so we
+ * handle just this case here.
*/
if (ptype == DTK_JULIAN)
{
@@ -849,6 +850,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
ptype = 0;
break;
}
+
/*
* Already have a date? Then this might be a time zone name
* with embedded punctuation (e.g. "America/New_York") or a
@@ -1158,17 +1160,18 @@ DecodeDateTime(char **field, int *ftype, int nf,
if (dterr < 0)
return dterr;
}
+
/*
* Is this a YMD or HMS specification, or a year number?
* YMD and HMS are required to be six digits or more, so
* if it is 5 digits, it is a year. If it is six or more
* more digits, we assume it is YMD or HMS unless no date
- * and no time values have been specified. This forces
- * 6+ digit years to be at the end of the string, or to use
+ * and no time values have been specified. This forces 6+
+ * digit years to be at the end of the string, or to use
* the ISO date specification.
*/
else if (flen >= 6 && (!(fmask & DTK_DATE_M) ||
- !(fmask & DTK_TIME_M)))
+ !(fmask & DTK_TIME_M)))
{
dterr = DecodeNumberField(flen, field[i], fmask,
&tmask, tm,
@@ -2490,7 +2493,7 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
/*
* Nothing so far; make a decision about what we think the input
- * is. There used to be lots of heuristics here, but the
+ * is. There used to be lots of heuristics here, but the
* consensus now is to be paranoid. It *must* be either
* YYYY-MM-DD (with a more-than-two-digit year field), or the
* field order defined by DateOrder.
@@ -2523,9 +2526,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
{
/*
* We are at the first numeric field of a date that included a
- * textual month name. We want to support the variants
+ * textual month name. We want to support the variants
* MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous
- * inputs. We will also accept MON-DD-YY or DD-MON-YY in
+ * inputs. We will also accept MON-DD-YY or DD-MON-YY in
* either DMY or MDY modes, as well as YY-MON-DD in YMD mode.
*/
if (flen >= 3 || DateOrder == DATEORDER_YMD)
@@ -2654,6 +2657,7 @@ DecodeNumberField(int len, char *str, int fmask,
if (len >= 6)
{
*tmask = DTK_DATE_M;
+
/*
* Start from end and consider first 2 as Day, next 2 as Month,
* and the rest as Year.
@@ -2890,7 +2894,7 @@ DecodeInterval(char **field, int *ftype, int nf, int range,
Assert(*field[i] == '-' || *field[i] == '+');
/*
- * Check for signed hh:mm or hh:mm:ss. If so, process exactly
+ * Check for signed hh:mm or hh:mm:ss. If so, process exactly
* like DTK_TIME case above, plus handling the sign.
*/
if (strchr(field[i] + 1, ':') != NULL &&
@@ -2978,8 +2982,8 @@ DecodeInterval(char **field, int *ftype, int nf, int range,
type = DTK_MONTH;
if (*field[i] == '-')
val2 = -val2;
- if (((double)val * MONTHS_PER_YEAR + val2) > INT_MAX ||
- ((double)val * MONTHS_PER_YEAR + val2) < INT_MIN)
+ if (((double) val * MONTHS_PER_YEAR + val2) > INT_MAX ||
+ ((double) val * MONTHS_PER_YEAR + val2) < INT_MIN)
return DTERR_FIELD_OVERFLOW;
val = val * MONTHS_PER_YEAR + val2;
fval = 0;
@@ -3327,7 +3331,7 @@ DecodeISO8601Interval(char *str,
return dterr;
/*
- * Note: we could step off the end of the string here. Code below
+ * Note: we could step off the end of the string here. Code below
* *must* exit the loop if unit == '\0'.
*/
unit = *str++;
@@ -4130,7 +4134,7 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
/*
* We've been burnt by stupid errors in the ordering of the datetkn tables
- * once too often. Arrange to check them during postmaster start.
+ * once too often. Arrange to check them during postmaster start.
*/
static bool
CheckDateTokenTable(const char *tablename, const datetkn *base, int nel)
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index 4b5d65c5ff..a79d5d587c 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -181,7 +181,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
/*
* just compare the two datums. NOTE: just comparing "len" bytes will
* not do the work, because we do not know how these bytes are aligned
- * inside the "Datum". We assume instead that any given datatype is
+ * inside the "Datum". We assume instead that any given datatype is
* consistent about how it fills extraneous bits in the Datum.
*/
res = (value1 == value2);
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index 68ab0e1906..8c663379ae 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -358,6 +358,7 @@ calculate_toast_table_size(Oid toastrelid)
foreach(lc, indexlist)
{
Relation toastIdxRel;
+
toastIdxRel = relation_open(lfirst_oid(lc),
AccessShareLock);
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
@@ -689,7 +690,7 @@ pg_size_pretty_numeric(PG_FUNCTION_ARGS)
* This is expected to be used in queries like
* SELECT pg_relation_filenode(oid) FROM pg_class;
* That leads to a couple of choices. We work from the pg_class row alone
- * rather than actually opening each relation, for efficiency. We don't
+ * rather than actually opening each relation, for efficiency. We don't
* fail if we can't find the relation --- some rows might be visible in
* the query's MVCC snapshot even though the relations have been dropped.
* (Note: we could avoid using the catcache, but there's little point
diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c
index 515481805a..bbca5d68ba 100644
--- a/src/backend/utils/adt/domains.c
+++ b/src/backend/utils/adt/domains.c
@@ -12,11 +12,11 @@
* The overhead required for constraint checking can be high, since examining
* the catalogs to discover the constraints for a given domain is not cheap.
* We have three mechanisms for minimizing this cost:
- * 1. In a nest of domains, we flatten the checking of all the levels
+ * 1. In a nest of domains, we flatten the checking of all the levels
* into just one operation.
- * 2. We cache the list of constraint items in the FmgrInfo struct
+ * 2. We cache the list of constraint items in the FmgrInfo struct
* passed by the caller.
- * 3. If there are CHECK constraints, we cache a standalone ExprContext
+ * 3. If there are CHECK constraints, we cache a standalone ExprContext
* to evaluate them in.
*
*
@@ -311,7 +311,7 @@ domain_recv(PG_FUNCTION_ARGS)
/*
* domain_check - check that a datum satisfies the constraints of a
- * domain. extra and mcxt can be passed if they are available from,
+ * domain. extra and mcxt can be passed if they are available from,
* say, a FmgrInfo structure, or they can be NULL, in which case the
* setup is repeated for each call.
*/
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index 774267ed5d..41b3eaa213 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -276,7 +276,7 @@ float4in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try to
+ * precision). We'd prefer not to throw error for that, so try to
* detect whether it's a "real" out-of-range condition by checking
* to see if the result is zero or huge.
*/
@@ -475,7 +475,7 @@ float8in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try to
+ * precision). We'd prefer not to throw error for that, so try to
* detect whether it's a "real" out-of-range condition by checking
* to see if the result is zero or huge.
*/
@@ -2054,7 +2054,7 @@ float8_stddev_samp(PG_FUNCTION_ARGS)
* in that order. Note that Y is the first argument to the aggregates!
*
* It might seem attractive to optimize this by having multiple accumulator
- * functions that only calculate the sums actually needed. But on most
+ * functions that only calculate the sums actually needed. But on most
* modern machines, a couple of extra floating-point multiplies will be
* insignificant compared to the other per-tuple overhead, so I've chosen
* to minimize code space instead.
diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c
index 5b75d34dcb..e1763a3764 100644
--- a/src/backend/utils/adt/format_type.c
+++ b/src/backend/utils/adt/format_type.c
@@ -44,14 +44,14 @@ static char *printTypmod(const char *typname, int32 typmod, Oid typmodout);
* double quoted if it contains funny characters or matches a keyword.
*
* If typemod is NULL then we are formatting a type name in a context where
- * no typemod is available, eg a function argument or result type. This
+ * no typemod is available, eg a function argument or result type. This
* yields a slightly different result from specifying typemod = -1 in some
* cases. Given typemod = -1 we feel compelled to produce an output that
* the parser will interpret as having typemod -1, so that pg_dump will
- * produce CREATE TABLE commands that recreate the original state. But
+ * produce CREATE TABLE commands that recreate the original state. But
* given NULL typemod, we assume that the parser's interpretation of
* typemod doesn't matter, and so we are willing to output a slightly
- * "prettier" representation of the same type. For example, type = bpchar
+ * "prettier" representation of the same type. For example, type = bpchar
* and typemod = NULL gets you "character", whereas typemod = -1 gets you
* "bpchar" --- the former will be interpreted as character(1) by the
* parser, which does not yield typemod -1.
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 2099ad0c30..15bcefd002 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1823,7 +1823,7 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
/*
* Note: we assume that toupper_l()/tolower_l() will not be so broken
- * as to need guard tests. When using the default collation, we apply
+ * as to need guard tests. When using the default collation, we apply
* the traditional Postgres behavior that forces ASCII-style treatment
* of I/i, but in non-default collations you get exactly what the
* collation says.
@@ -3629,7 +3629,7 @@ do_to_timestamp(text *date_txt, text *fmt,
{
/*
* The month and day field have not been set, so we use the
- * day-of-year field to populate them. Depending on the date mode,
+ * day-of-year field to populate them. Depending on the date mode,
* this field may be interpreted as a Gregorian day-of-year, or an ISO
* week date day-of-year.
*/
diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c
index 72cb4e991f..54391fd7ab 100644
--- a/src/backend/utils/adt/geo_ops.c
+++ b/src/backend/utils/adt/geo_ops.c
@@ -32,7 +32,10 @@
* Internal routines
*/
-enum path_delim { PATH_NONE, PATH_OPEN, PATH_CLOSED };
+enum path_delim
+{
+ PATH_NONE, PATH_OPEN, PATH_CLOSED
+};
static int point_inside(Point *p, int npts, Point *plist);
static int lseg_crossing(double x, double y, double px, double py);
@@ -1024,7 +1027,7 @@ line_out(PG_FUNCTION_ARGS)
Datum
line_recv(PG_FUNCTION_ARGS)
{
- StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+ StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
LINE *line;
line = (LINE *) palloc(sizeof(LINE));
@@ -1386,7 +1389,7 @@ path_in(PG_FUNCTION_ARGS)
}
base_size = sizeof(path->p[0]) * npts;
- size = offsetof(PATH, p[0]) + base_size;
+ size = offsetof(PATH, p[0]) +base_size;
/* Check for integer overflow */
if (base_size / npts != sizeof(path->p[0]) || size <= base_size)
@@ -3448,7 +3451,7 @@ poly_in(PG_FUNCTION_ARGS)
errmsg("invalid input syntax for type polygon: \"%s\"", str)));
base_size = sizeof(poly->p[0]) * npts;
- size = offsetof(POLYGON, p[0]) + base_size;
+ size = offsetof(POLYGON, p[0]) +base_size;
/* Check for integer overflow */
if (base_size / npts != sizeof(poly->p[0]) || size <= base_size)
diff --git a/src/backend/utils/adt/geo_selfuncs.c b/src/backend/utils/adt/geo_selfuncs.c
index 99ca8edbd0..4a2156d466 100644
--- a/src/backend/utils/adt/geo_selfuncs.c
+++ b/src/backend/utils/adt/geo_selfuncs.c
@@ -22,7 +22,7 @@
/*
- * Selectivity functions for geometric operators. These are bogus -- unless
+ * Selectivity functions for geometric operators. These are bogus -- unless
* we know the actual key distribution in the index, we can't make a good
* prediction of the selectivity of these operators.
*
@@ -34,7 +34,7 @@
* In general, GiST needs to search multiple subtrees in order to guarantee
* that all occurrences of the same key have been found. Because of this,
* the estimated cost for scanning the index ought to be higher than the
- * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
+ * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
* ought to be adjusted accordingly --- but until we can generate somewhat
* realistic numbers here, it hardly matters...
*/
diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c
index 5f2a3d361d..d33534ec17 100644
--- a/src/backend/utils/adt/inet_cidr_ntop.c
+++ b/src/backend/utils/adt/inet_cidr_ntop.c
@@ -196,7 +196,7 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
else
{
- /* Copy src to private buffer. Zero host part. */
+ /* Copy src to private buffer. Zero host part. */
p = (bits + 7) / 8;
memcpy(inbuf, src, p);
memset(inbuf + p, 0, 16 - p);
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index 669355e454..b8f56e5c2e 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -642,7 +642,7 @@ int4pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -663,8 +663,8 @@ int4mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -684,7 +684,7 @@ int4mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will
* overflow and thus incorrectly match).
@@ -794,7 +794,7 @@ int2pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -815,8 +815,8 @@ int2mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -897,7 +897,7 @@ int24pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -918,8 +918,8 @@ int24mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -939,7 +939,7 @@ int24mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -985,7 +985,7 @@ int42pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1006,8 +1006,8 @@ int42mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1027,7 +1027,7 @@ int42mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index e78eb2a202..96146e0fda 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -73,7 +73,7 @@ scanint8(const char *str, bool errorOK, int64 *result)
ptr++;
/*
- * Do an explicit check for INT64_MIN. Ugly though this is, it's
+ * Do an explicit check for INT64_MIN. Ugly though this is, it's
* cleaner than trying to get the loop below to handle it portably.
*/
if (strncmp(ptr, "9223372036854775808", 19) == 0)
@@ -519,7 +519,7 @@ int8pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -540,8 +540,8 @@ int8mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -561,7 +561,7 @@ int8mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself
* will overflow and thus incorrectly match).
@@ -764,7 +764,7 @@ int8dec(PG_FUNCTION_ARGS)
/*
* These functions are exactly like int8inc/int8dec but are used for
- * aggregates that count only non-null values. Since the functions are
+ * aggregates that count only non-null values. Since the functions are
* declared strict, the null checks happen before we ever get here, and all we
* need do is increment the state value. We could actually make these pg_proc
* entries point right at int8inc/int8dec, but then the opr_sanity regression
@@ -824,7 +824,7 @@ int84pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -845,8 +845,8 @@ int84mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -866,7 +866,7 @@ int84mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -933,7 +933,7 @@ int48pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -954,8 +954,8 @@ int48mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -975,7 +975,7 @@ int48mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -1021,7 +1021,7 @@ int82pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1042,8 +1042,8 @@ int82mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1063,7 +1063,7 @@ int82mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -1130,7 +1130,7 @@ int28pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1151,8 +1151,8 @@ int28mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1172,7 +1172,7 @@ int28mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index c34a1bb50b..16f4eccc06 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -598,10 +598,10 @@ json_lex(JsonLexContext *lex)
/*
* We're not dealing with a string, number, legal
- * punctuation mark, or end of string. The only legal
+ * punctuation mark, or end of string. The only legal
* tokens we might find here are true, false, and null,
* but for error reporting purposes we scan until we see a
- * non-alphanumeric character. That way, we can report
+ * non-alphanumeric character. That way, we can report
* the whole word as an unexpected token, rather than just
* some unintuitive prefix thereof.
*/
@@ -897,12 +897,12 @@ json_lex_string(JsonLexContext *lex)
* begin with a '0'.
*
* (3) An optional decimal part, consisting of a period ('.') followed by
- * one or more digits. (Note: While this part can be omitted
+ * one or more digits. (Note: While this part can be omitted
* completely, it's not OK to have only the decimal point without
* any digits afterwards.)
*
* (4) An optional exponent part, consisting of 'e' or 'E', optionally
- * followed by '+' or '-', followed by one or more digits. (Note:
+ * followed by '+' or '-', followed by one or more digits. (Note:
* As with the decimal part, if 'e' or 'E' is present, it must be
* followed by at least one digit.)
*
@@ -980,7 +980,7 @@ json_lex_number(JsonLexContext *lex, char *s, bool *num_err)
}
/*
- * Check for trailing garbage. As in json_lex(), any alphanumeric stuff
+ * Check for trailing garbage. As in json_lex(), any alphanumeric stuff
* here should be considered part of the token for error-reporting
* purposes.
*/
@@ -1805,7 +1805,7 @@ json_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c
index 781ab66ef2..cf5d6f2326 100644
--- a/src/backend/utils/adt/jsonb.c
+++ b/src/backend/utils/adt/jsonb.c
@@ -22,7 +22,7 @@ typedef struct JsonbInState
{
JsonbParseState *parseState;
JsonbValue *res;
-} JsonbInState;
+} JsonbInState;
static inline Datum jsonb_from_cstring(char *json, int len);
static size_t checkStringLen(size_t len);
@@ -31,9 +31,9 @@ static void jsonb_in_object_end(void *pstate);
static void jsonb_in_array_start(void *pstate);
static void jsonb_in_array_end(void *pstate);
static void jsonb_in_object_field_start(void *pstate, char *fname, bool isnull);
-static void jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal);
+static void jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal);
static void jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype);
-char *JsonbToCString(StringInfo out, char *in, int estimated_len);
+char *JsonbToCString(StringInfo out, char *in, int estimated_len);
/*
* jsonb type input function
@@ -245,7 +245,7 @@ jsonb_in_object_field_start(void *pstate, char *fname, bool isnull)
JsonbInState *_state = (JsonbInState *) pstate;
JsonbValue v;
- Assert (fname != NULL);
+ Assert(fname != NULL);
v.type = jbvString;
v.val.string.len = checkStringLen(strlen(fname));
v.val.string.val = pnstrdup(fname, v.val.string.len);
@@ -255,7 +255,7 @@ jsonb_in_object_field_start(void *pstate, char *fname, bool isnull)
}
static void
-jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal)
+jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal)
{
switch (scalarVal->type)
{
@@ -267,8 +267,8 @@ jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal)
break;
case jbvNumeric:
appendStringInfoString(out,
- DatumGetCString(DirectFunctionCall1(numeric_out,
- PointerGetDatum(scalarVal->val.numeric))));
+ DatumGetCString(DirectFunctionCall1(numeric_out,
+ PointerGetDatum(scalarVal->val.numeric))));
break;
case jbvBool:
if (scalarVal->val.boolean)
@@ -296,21 +296,23 @@ jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype)
{
case JSON_TOKEN_STRING:
- Assert (token != NULL);
+ Assert(token != NULL);
v.type = jbvString;
v.val.string.len = checkStringLen(strlen(token));
v.val.string.val = pnstrdup(token, v.val.string.len);
v.estSize += v.val.string.len;
break;
case JSON_TOKEN_NUMBER:
+
/*
- * No need to check size of numeric values, because maximum numeric
- * size is well below the JsonbValue restriction
+ * No need to check size of numeric values, because maximum
+ * numeric size is well below the JsonbValue restriction
*/
- Assert (token != NULL);
+ Assert(token != NULL);
v.type = jbvNumeric;
v.val.numeric = DatumGetNumeric(DirectFunctionCall3(numeric_in, CStringGetDatum(token), 0, -1));
- v.estSize += VARSIZE_ANY(v.val.numeric) + sizeof(JEntry) /* alignment */ ;
+
+ v.estSize += VARSIZE_ANY(v.val.numeric) +sizeof(JEntry) /* alignment */ ;
break;
case JSON_TOKEN_TRUE:
v.type = jbvBool;
diff --git a/src/backend/utils/adt/jsonb_gin.c b/src/backend/utils/adt/jsonb_gin.c
index 62546ebaf2..9f8c178ab1 100644
--- a/src/backend/utils/adt/jsonb_gin.c
+++ b/src/backend/utils/adt/jsonb_gin.c
@@ -22,12 +22,12 @@
typedef struct PathHashStack
{
- uint32 hash;
+ uint32 hash;
struct PathHashStack *parent;
-} PathHashStack;
+} PathHashStack;
static text *make_text_key(const char *str, int len, char flag);
-static text *make_scalar_key(const JsonbValue * scalarVal, char flag);
+static text *make_scalar_key(const JsonbValue *scalarVal, char flag);
/*
*
@@ -97,14 +97,14 @@ gin_extract_jsonb(PG_FUNCTION_ARGS)
* JsonbExistsStrategyNumber. Our definition of existence does not
* allow for checking the existence of a non-jbvString element (just
* like the definition of the underlying operator), because the
- * operator takes a text rhs argument (which is taken as a proxy for an
- * equivalent Jsonb string).
+ * operator takes a text rhs argument (which is taken as a proxy for
+ * an equivalent Jsonb string).
*
* The way existence is represented does not preclude an alternative
* existence operator, that takes as its rhs value an arbitrarily
- * internally-typed Jsonb. The only reason that isn't the case here is
- * that the existence operator is only really intended to determine if
- * an object has a certain key (object pair keys are of course
+ * internally-typed Jsonb. The only reason that isn't the case here
+ * is that the existence operator is only really intended to determine
+ * if an object has a certain key (object pair keys are of course
* invariably strings), which is extended to jsonb arrays. You could
* think of the default Jsonb definition of existence as being
* equivalent to a definition where all types of scalar array elements
@@ -116,11 +116,11 @@ gin_extract_jsonb(PG_FUNCTION_ARGS)
* JsonbExistsStrategyNumber, since we know that keys are strings for
* both objects and arrays, and don't have to further account for type
* mismatch. Not having to set the reset flag makes it less than
- * tempting to tighten up the definition of existence to preclude array
- * elements entirely, which would arguably be a simpler alternative.
- * In any case the infrastructure used to implement the existence
- * operator could trivially support this hypothetical, slightly
- * distinct definition of existence.
+ * tempting to tighten up the definition of existence to preclude
+ * array elements entirely, which would arguably be a simpler
+ * alternative. In any case the infrastructure used to implement the
+ * existence operator could trivially support this hypothetical,
+ * slightly distinct definition of existence.
*/
switch (r)
{
@@ -290,8 +290,10 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS)
{
GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* Jsonb *query = PG_GETARG_JSONB(2); */
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
GinTernaryValue res = GIN_TRUE;
@@ -299,7 +301,7 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS)
if (strategy == JsonbContainsStrategyNumber)
{
- bool has_maybe = false;
+ bool has_maybe = false;
/*
* All extracted keys must be present. Combination of GIN_MAYBE and
@@ -323,8 +325,9 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS)
/*
* Index doesn't have information about correspondence of Jsonb keys
* and values (as distinct from GIN keys, which a key/value pair is
- * stored as), so invariably we recheck. This is also reflected in how
- * GIN_MAYBE is given in response to there being no GIN_MAYBE input.
+ * stored as), so invariably we recheck. This is also reflected in
+ * how GIN_MAYBE is given in response to there being no GIN_MAYBE
+ * input.
*/
if (!has_maybe && res == GIN_TRUE)
res = GIN_MAYBE;
@@ -379,8 +382,10 @@ gin_consistent_jsonb_hash(PG_FUNCTION_ARGS)
{
bool *check = (bool *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* Jsonb *query = PG_GETARG_JSONB(2); */
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
bool res = true;
@@ -390,13 +395,13 @@ gin_consistent_jsonb_hash(PG_FUNCTION_ARGS)
elog(ERROR, "unrecognized strategy number: %d", strategy);
/*
- * jsonb_hash_ops index doesn't have information about correspondence
- * of Jsonb keys and values (as distinct from GIN keys, which a
- * key/value pair is stored as), so invariably we recheck. Besides,
- * there are some special rules around the containment of raw scalar
- * arrays and regular arrays that are not represented here. However,
- * if all of the keys are not present, that's sufficient reason to
- * return false and finish immediately.
+ * jsonb_hash_ops index doesn't have information about correspondence of
+ * Jsonb keys and values (as distinct from GIN keys, which a key/value
+ * pair is stored as), so invariably we recheck. Besides, there are some
+ * special rules around the containment of raw scalar arrays and regular
+ * arrays that are not represented here. However, if all of the keys are
+ * not present, that's sufficient reason to return false and finish
+ * immediately.
*/
*recheck = true;
for (i = 0; i < nkeys; i++)
@@ -416,12 +421,14 @@ gin_triconsistent_jsonb_hash(PG_FUNCTION_ARGS)
{
GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* Jsonb *query = PG_GETARG_JSONB(2); */
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
GinTernaryValue res = GIN_TRUE;
- int32 i;
- bool has_maybe = false;
+ int32 i;
+ bool has_maybe = false;
if (strategy != JsonbContainsStrategyNumber)
elog(ERROR, "unrecognized strategy number: %d", strategy);
@@ -447,10 +454,10 @@ gin_triconsistent_jsonb_hash(PG_FUNCTION_ARGS)
/*
* jsonb_hash_ops index doesn't have information about correspondence of
- * Jsonb keys and values (as distinct from GIN keys, which for this opclass
- * are a hash of a pair, or a hash of just an element), so invariably we
- * recheck. This is also reflected in how GIN_MAYBE is given in response
- * to there being no GIN_MAYBE input.
+ * Jsonb keys and values (as distinct from GIN keys, which for this
+ * opclass are a hash of a pair, or a hash of just an element), so
+ * invariably we recheck. This is also reflected in how GIN_MAYBE is
+ * given in response to there being no GIN_MAYBE input.
*/
if (!has_maybe && res == GIN_TRUE)
res = GIN_MAYBE;
@@ -488,7 +495,7 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS)
while ((r = JsonbIteratorNext(&it, &v, false)) != WJB_DONE)
{
- PathHashStack *tmp;
+ PathHashStack *tmp;
if (i >= total)
{
@@ -513,10 +520,10 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS)
/*
* We pass forward hashes from previous container nesting
* levels so that nested arrays with an outermost nested
- * object will have element hashes mixed with the outermost
- * key. It's also somewhat useful to have nested objects
- * innermost values have hashes that are a function of not
- * just their own key, but outer keys too.
+ * object will have element hashes mixed with the
+ * outermost key. It's also somewhat useful to have
+ * nested objects innermost values have hashes that are a
+ * function of not just their own key, but outer keys too.
*/
stack->hash = tmp->hash;
}
@@ -526,7 +533,7 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS)
* At least nested level, initialize with stable container
* type proxy value
*/
- stack->hash = (r == WJB_BEGIN_ARRAY)? JB_FARRAY:JB_FOBJECT;
+ stack->hash = (r == WJB_BEGIN_ARRAY) ? JB_FARRAY : JB_FOBJECT;
}
stack->parent = tmp;
break;
@@ -607,7 +614,7 @@ make_text_key(const char *str, int len, char flag)
* Create a textual representation of a jsonbValue for GIN storage.
*/
static text *
-make_scalar_key(const JsonbValue * scalarVal, char flag)
+make_scalar_key(const JsonbValue *scalarVal, char flag)
{
text *item;
char *cstr;
@@ -621,6 +628,7 @@ make_scalar_key(const JsonbValue * scalarVal, char flag)
item = make_text_key(scalarVal->val.boolean ? "t" : "f", 1, flag);
break;
case jbvNumeric:
+
/*
* A normalized textual representation, free of trailing zeroes is
* is required.
diff --git a/src/backend/utils/adt/jsonb_op.c b/src/backend/utils/adt/jsonb_op.c
index cfddccbbbb..38bd567673 100644
--- a/src/backend/utils/adt/jsonb_op.c
+++ b/src/backend/utils/adt/jsonb_op.c
@@ -69,7 +69,7 @@ jsonb_exists_any(PG_FUNCTION_ARGS)
if (findJsonbValueFromSuperHeader(VARDATA(jb),
JB_FOBJECT | JB_FARRAY,
plowbound,
- arrKey->val.array.elems + i) != NULL)
+ arrKey->val.array.elems + i) != NULL)
PG_RETURN_BOOL(true);
}
@@ -103,7 +103,7 @@ jsonb_exists_all(PG_FUNCTION_ARGS)
if (findJsonbValueFromSuperHeader(VARDATA(jb),
JB_FOBJECT | JB_FARRAY,
plowbound,
- arrKey->val.array.elems + i) == NULL)
+ arrKey->val.array.elems + i) == NULL)
PG_RETURN_BOOL(false);
}
@@ -116,7 +116,8 @@ jsonb_contains(PG_FUNCTION_ARGS)
Jsonb *val = PG_GETARG_JSONB(0);
Jsonb *tmpl = PG_GETARG_JSONB(1);
- JsonbIterator *it1, *it2;
+ JsonbIterator *it1,
+ *it2;
if (JB_ROOT_COUNT(val) < JB_ROOT_COUNT(tmpl) ||
JB_ROOT_IS_OBJECT(val) != JB_ROOT_IS_OBJECT(tmpl))
@@ -135,7 +136,8 @@ jsonb_contained(PG_FUNCTION_ARGS)
Jsonb *tmpl = PG_GETARG_JSONB(0);
Jsonb *val = PG_GETARG_JSONB(1);
- JsonbIterator *it1, *it2;
+ JsonbIterator *it1,
+ *it2;
if (JB_ROOT_COUNT(val) < JB_ROOT_COUNT(tmpl) ||
JB_ROOT_IS_OBJECT(val) != JB_ROOT_IS_OBJECT(tmpl))
@@ -209,7 +211,6 @@ jsonb_le(PG_FUNCTION_ARGS)
Datum
jsonb_ge(PG_FUNCTION_ARGS)
{
-
Jsonb *jba = PG_GETARG_JSONB(0);
Jsonb *jbb = PG_GETARG_JSONB(1);
bool res;
@@ -270,7 +271,7 @@ jsonb_hash(PG_FUNCTION_ARGS)
{
switch (r)
{
- /* Rotation is left to JsonbHashScalarValue() */
+ /* Rotation is left to JsonbHashScalarValue() */
case WJB_BEGIN_ARRAY:
hash ^= JB_FARRAY;
break;
diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c
index 1ac145b1cd..1caaa4a9cc 100644
--- a/src/backend/utils/adt/jsonb_util.c
+++ b/src/backend/utils/adt/jsonb_util.c
@@ -45,10 +45,10 @@
*/
typedef struct convertLevel
{
- uint32 i; /* Iterates once per element, or once per pair */
- uint32 *header; /* Pointer to current container header */
- JEntry *meta; /* This level's metadata */
- char *begin; /* Pointer into convertState.buffer */
+ uint32 i; /* Iterates once per element, or once per pair */
+ uint32 *header; /* Pointer to current container header */
+ JEntry *meta; /* This level's metadata */
+ char *begin; /* Pointer into convertState.buffer */
} convertLevel;
/*
@@ -57,41 +57,41 @@ typedef struct convertLevel
typedef struct convertState
{
/* Preallocated buffer in which to form varlena/Jsonb value */
- Jsonb *buffer;
+ Jsonb *buffer;
/* Pointer into buffer */
- char *ptr;
+ char *ptr;
/* State for */
- convertLevel *allState, /* Overall state array */
- *contPtr; /* Cur container pointer (in allState) */
+ convertLevel *allState, /* Overall state array */
+ *contPtr; /* Cur container pointer (in allState) */
/* Current size of buffer containing allState array */
- Size levelSz;
-
-} convertState;
-
-static int compareJsonbScalarValue(JsonbValue * a, JsonbValue * b);
-static int lexicalCompareJsonbStringValue(const void *a, const void *b);
-static Size convertJsonb(JsonbValue * val, Jsonb* buffer);
-static inline short addPaddingInt(convertState * cstate);
-static void walkJsonbValueConversion(JsonbValue * val, convertState * cstate,
- uint32 nestlevel);
-static void putJsonbValueConversion(convertState * cstate, JsonbValue * val,
- uint32 flags, uint32 level);
-static void putScalarConversion(convertState * cstate, JsonbValue * scalarVal,
- uint32 level, uint32 i);
-static void iteratorFromContainerBuf(JsonbIterator * it, char *buffer);
-static bool formIterIsContainer(JsonbIterator ** it, JsonbValue * val,
- JEntry * ent, bool skipNested);
-static JsonbIterator *freeAndGetParent(JsonbIterator * it);
-static JsonbParseState *pushState(JsonbParseState ** pstate);
-static void appendKey(JsonbParseState * pstate, JsonbValue * scalarVal);
-static void appendValue(JsonbParseState * pstate, JsonbValue * scalarVal);
-static void appendElement(JsonbParseState * pstate, JsonbValue * scalarVal);
-static int lengthCompareJsonbStringValue(const void *a, const void *b, void *arg);
-static int lengthCompareJsonbPair(const void *a, const void *b, void *arg);
-static void uniqueifyJsonbObject(JsonbValue * object);
-static void uniqueifyJsonbArray(JsonbValue * array);
+ Size levelSz;
+
+} convertState;
+
+static int compareJsonbScalarValue(JsonbValue *a, JsonbValue *b);
+static int lexicalCompareJsonbStringValue(const void *a, const void *b);
+static Size convertJsonb(JsonbValue *val, Jsonb *buffer);
+static inline short addPaddingInt(convertState *cstate);
+static void walkJsonbValueConversion(JsonbValue *val, convertState *cstate,
+ uint32 nestlevel);
+static void putJsonbValueConversion(convertState *cstate, JsonbValue *val,
+ uint32 flags, uint32 level);
+static void putScalarConversion(convertState *cstate, JsonbValue *scalarVal,
+ uint32 level, uint32 i);
+static void iteratorFromContainerBuf(JsonbIterator *it, char *buffer);
+static bool formIterIsContainer(JsonbIterator **it, JsonbValue *val,
+ JEntry *ent, bool skipNested);
+static JsonbIterator *freeAndGetParent(JsonbIterator *it);
+static JsonbParseState *pushState(JsonbParseState **pstate);
+static void appendKey(JsonbParseState *pstate, JsonbValue *scalarVal);
+static void appendValue(JsonbParseState *pstate, JsonbValue *scalarVal);
+static void appendElement(JsonbParseState *pstate, JsonbValue *scalarVal);
+static int lengthCompareJsonbStringValue(const void *a, const void *b, void *arg);
+static int lengthCompareJsonbPair(const void *a, const void *b, void *arg);
+static void uniqueifyJsonbObject(JsonbValue *object);
+static void uniqueifyJsonbArray(JsonbValue *array);
/*
* Turn an in-memory JsonbValue into a Jsonb for on-disk storage.
@@ -107,7 +107,7 @@ static void uniqueifyJsonbArray(JsonbValue * array);
* inconvenient to deal with a great amount of other state.
*/
Jsonb *
-JsonbValueToJsonb(JsonbValue * val)
+JsonbValueToJsonb(JsonbValue *val)
{
Jsonb *out;
Size sz;
@@ -164,7 +164,7 @@ int
compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
{
JsonbIterator *ita,
- *itb;
+ *itb;
int res = 0;
ita = JsonbIteratorInit(a);
@@ -182,9 +182,9 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
/*
* To a limited extent we'll redundantly iterate over an array/object
- * while re-performing the same test without any reasonable expectation
- * of the same container types having differing lengths (as when we
- * process a WJB_BEGIN_OBJECT, and later the corresponding
+ * while re-performing the same test without any reasonable
+ * expectation of the same container types having differing lengths
+ * (as when we process a WJB_BEGIN_OBJECT, and later the corresponding
* WJB_END_OBJECT), but no matter.
*/
if (ra == rb)
@@ -208,9 +208,10 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
res = compareJsonbScalarValue(&va, &vb);
break;
case jbvArray:
+
/*
- * This could be a "raw scalar" pseudo array. That's a
- * special case here though, since we still want the
+ * This could be a "raw scalar" pseudo array. That's
+ * a special case here though, since we still want the
* general type-based comparisons to apply, and as far
* as we're concerned a pseudo array is just a scalar.
*/
@@ -258,12 +259,14 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
while (ita != NULL)
{
JsonbIterator *i = ita->parent;
+
pfree(ita);
ita = i;
}
while (itb != NULL)
{
JsonbIterator *i = itb->parent;
+
pfree(itb);
itb = i;
}
@@ -313,12 +316,12 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
*/
JsonbValue *
findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
- uint32 *lowbound, JsonbValue * key)
+ uint32 *lowbound, JsonbValue *key)
{
- uint32 superheader = *(uint32 *) sheader;
- JEntry *array = (JEntry *) (sheader + sizeof(uint32));
- int count = (superheader & JB_CMASK);
- JsonbValue *result = palloc(sizeof(JsonbValue));
+ uint32 superheader = *(uint32 *) sheader;
+ JEntry *array = (JEntry *) (sheader + sizeof(uint32));
+ int count = (superheader & JB_CMASK);
+ JsonbValue *result = palloc(sizeof(JsonbValue));
Assert((flags & ~(JB_FARRAY | JB_FOBJECT)) == 0);
@@ -347,6 +350,7 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
{
result->type = jbvNumeric;
result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*e)));
+
result->estSize = 2 * sizeof(JEntry) +
VARSIZE_ANY(result->val.numeric);
}
@@ -381,8 +385,8 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
JsonbValue candidate;
/*
- * Note how we compensate for the fact that we're iterating through
- * pairs (not entries) throughout.
+ * Note how we compensate for the fact that we're iterating
+ * through pairs (not entries) throughout.
*/
stopMiddle = stopLow + (count - stopLow) / 2;
@@ -419,6 +423,7 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
{
result->type = jbvNumeric;
result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*v)));
+
result->estSize = 2 * sizeof(JEntry) +
VARSIZE_ANY(result->val.numeric);
}
@@ -431,8 +436,8 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
else
{
/*
- * See header comments to understand why this never happens
- * with arrays
+ * See header comments to understand why this never
+ * happens with arrays
*/
result->type = jbvBinary;
result->val.binary.data = data + INTALIGN(JBE_OFF(*v));
@@ -508,6 +513,7 @@ getIthJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 i)
{
result->type = jbvNumeric;
result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*e)));
+
result->estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(result->val.numeric);
}
else if (JBE_ISBOOL(*e))
@@ -541,7 +547,7 @@ getIthJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 i)
* "raw scalar" pseudo array to append that.
*/
JsonbValue *
-pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
+pushJsonbValue(JsonbParseState **pstate, int seq, JsonbValue *scalarVal)
{
JsonbValue *result = NULL;
@@ -555,7 +561,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
(*pstate)->contVal.estSize = 3 * sizeof(JEntry);
(*pstate)->contVal.val.array.nElems = 0;
(*pstate)->contVal.val.array.rawScalar = (scalarVal &&
- scalarVal->val.array.rawScalar);
+ scalarVal->val.array.rawScalar);
if (scalarVal && scalarVal->val.array.nElems > 0)
{
/* Assume that this array is still really a scalar */
@@ -567,7 +573,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
(*pstate)->size = 4;
}
(*pstate)->contVal.val.array.elems = palloc(sizeof(JsonbValue) *
- (*pstate)->size);
+ (*pstate)->size);
break;
case WJB_BEGIN_OBJECT:
Assert(!scalarVal);
@@ -578,7 +584,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
(*pstate)->contVal.val.object.nPairs = 0;
(*pstate)->size = 4;
(*pstate)->contVal.val.object.pairs = palloc(sizeof(JsonbPair) *
- (*pstate)->size);
+ (*pstate)->size);
break;
case WJB_KEY:
Assert(scalarVal->type == jbvString);
@@ -674,9 +680,9 @@ JsonbIteratorInit(JsonbSuperHeader sheader)
* garbage.
*/
int
-JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
+JsonbIteratorNext(JsonbIterator **it, JsonbValue *val, bool skipNested)
{
- JsonbIterState state;
+ JsonbIterState state;
/* Guard against stack overflow due to overly complex Jsonb */
check_stack_depth();
@@ -694,9 +700,10 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
/* Set v to array on first array call */
val->type = jbvArray;
val->val.array.nElems = (*it)->nElems;
+
/*
- * v->val.array.elems is not actually set, because we aren't doing a
- * full conversion
+ * v->val.array.elems is not actually set, because we aren't doing
+ * a full conversion
*/
val->val.array.rawScalar = (*it)->isScalar;
(*it)->i = 0;
@@ -709,8 +716,8 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
if ((*it)->i >= (*it)->nElems)
{
/*
- * All elements within array already processed. Report this to
- * caller, and give it back original parent iterator (which
+ * All elements within array already processed. Report this
+ * to caller, and give it back original parent iterator (which
* independently tracks iteration progress at its level of
* nesting).
*/
@@ -741,6 +748,7 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
/* Set v to object on first object call */
val->type = jbvObject;
val->val.object.nPairs = (*it)->nElems;
+
/*
* v->val.object.pairs is not actually set, because we aren't
* doing a full conversion
@@ -756,9 +764,9 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
{
/*
* All pairs within object already processed. Report this to
- * caller, and give it back original containing iterator (which
- * independently tracks iteration progress at its level of
- * nesting).
+ * caller, and give it back original containing iterator
+ * (which independently tracks iteration progress at its level
+ * of nesting).
*/
*it = freeAndGetParent(*it);
return WJB_END_OBJECT;
@@ -787,8 +795,8 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
/*
* Value may be a container, in which case we recurse with new,
- * child iterator. If it is, don't bother !skipNested callers with
- * dealing with the jbvBinary representation.
+ * child iterator. If it is, don't bother !skipNested callers
+ * with dealing with the jbvBinary representation.
*/
if (formIterIsContainer(it, val, &(*it)->meta[((*it)->i++) * 2 + 1],
skipNested))
@@ -815,17 +823,18 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
* We determine if mContained is contained within val.
*/
bool
-JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
+JsonbDeepContains(JsonbIterator **val, JsonbIterator **mContained)
{
uint32 rval,
rcont;
JsonbValue vval,
vcontained;
+
/*
* Guard against stack overflow due to overly complex Jsonb.
*
- * Functions called here independently take this precaution, but that might
- * not be sufficient since this is also a recursive function.
+ * Functions called here independently take this precaution, but that
+ * might not be sufficient since this is also a recursive function.
*/
check_stack_depth();
@@ -898,7 +907,8 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
else
{
/* Nested container value (object or array) */
- JsonbIterator *nestval, *nestContained;
+ JsonbIterator *nestval,
+ *nestContained;
Assert(lhsVal->type == jbvBinary);
Assert(vcontained.type == jbvBinary);
@@ -922,8 +932,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
* In other words, the mapping of container nodes in the rhs
* "vcontained" Jsonb to internal nodes on the lhs is
* injective, and parent-child edges on the rhs must be mapped
- * to parent-child edges on the lhs to satisfy the condition of
- * containment (plus of course the mapped nodes must be equal).
+ * to parent-child edges on the lhs to satisfy the condition
+ * of containment (plus of course the mapped nodes must be
+ * equal).
*/
if (!JsonbDeepContains(&nestval, &nestContained))
return false;
@@ -942,10 +953,10 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
* arrays.
*
* A raw scalar may contain another raw scalar, and an array may
- * contain a raw scalar, but a raw scalar may not contain an array. We
- * don't do something like this for the object case, since objects can
- * only contain pairs, never raw scalars (a pair is represented by an
- * rhs object argument with a single contained pair).
+ * contain a raw scalar, but a raw scalar may not contain an array.
+ * We don't do something like this for the object case, since objects
+ * can only contain pairs, never raw scalars (a pair is represented by
+ * an rhs object argument with a single contained pair).
*/
if (vval.val.array.rawScalar && !vcontained.val.array.rawScalar)
return false;
@@ -956,8 +967,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
rcont = JsonbIteratorNext(mContained, &vcontained, true);
/*
- * When we get through caller's rhs "is it contained within?" array
- * without failing to find one of its values, it's contained.
+ * When we get through caller's rhs "is it contained within?"
+ * array without failing to find one of its values, it's
+ * contained.
*/
if (rcont == WJB_END_ARRAY)
return true;
@@ -989,7 +1001,7 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
for (i = 0; i < nLhsElems; i++)
{
- /* Store all lhs elements in temp array*/
+ /* Store all lhs elements in temp array */
rcont = JsonbIteratorNext(val, &vval, true);
Assert(rcont == WJB_ELEM);
@@ -1009,8 +1021,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
for (i = 0; i < nLhsElems; i++)
{
/* Nested container value (object or array) */
- JsonbIterator *nestval, *nestContained;
- bool contains;
+ JsonbIterator *nestval,
+ *nestContained;
+ bool contains;
nestval = JsonbIteratorInit(lhsConts[i].val.binary.data);
nestContained = JsonbIteratorInit(vcontained.val.binary.data);
@@ -1069,9 +1082,9 @@ arrayToJsonbSortedArray(ArrayType *array)
/*
* A text array uses at least eight bytes per element, so any overflow in
* "key_count * sizeof(JsonbPair)" is small enough for palloc() to catch.
- * However, credible improvements to the array format could invalidate that
- * assumption. Therefore, use an explicit check rather than relying on
- * palloc() to complain.
+ * However, credible improvements to the array format could invalidate
+ * that assumption. Therefore, use an explicit check rather than relying
+ * on palloc() to complain.
*/
if (elem_count > JSONB_MAX_PAIRS)
ereport(ERROR,
@@ -1108,9 +1121,9 @@ arrayToJsonbSortedArray(ArrayType *array)
* flags.
*/
void
-JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash)
+JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash)
{
- int tmp;
+ int tmp;
/*
* Combine hash values of successive keys, values and elements by rotating
@@ -1131,11 +1144,11 @@ JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash)
case jbvNumeric:
/* Must be unaffected by trailing zeroes */
tmp = DatumGetInt32(DirectFunctionCall1(hash_numeric,
- NumericGetDatum(scalarVal->val.numeric)));
+ NumericGetDatum(scalarVal->val.numeric)));
*hash ^= tmp;
return;
case jbvBool:
- *hash ^= scalarVal->val.boolean? 0x02:0x04;
+ *hash ^= scalarVal->val.boolean ? 0x02 : 0x04;
return;
default:
elog(ERROR, "invalid jsonb scalar type");
@@ -1150,7 +1163,7 @@ JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash)
* within a single jsonb.
*/
static int
-compareJsonbScalarValue(JsonbValue * aScalar, JsonbValue * bScalar)
+compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar)
{
if (aScalar->type == bScalar->type)
{
@@ -1162,8 +1175,8 @@ compareJsonbScalarValue(JsonbValue * aScalar, JsonbValue * bScalar)
return lengthCompareJsonbStringValue(aScalar, bScalar, NULL);
case jbvNumeric:
return DatumGetInt32(DirectFunctionCall2(numeric_cmp,
- PointerGetDatum(aScalar->val.numeric),
- PointerGetDatum(bScalar->val.numeric)));
+ PointerGetDatum(aScalar->val.numeric),
+ PointerGetDatum(bScalar->val.numeric)));
case jbvBool:
if (aScalar->val.boolean != bScalar->val.boolean)
return (aScalar->val.boolean > bScalar->val.boolean) ? 1 : -1;
@@ -1201,10 +1214,10 @@ lexicalCompareJsonbStringValue(const void *a, const void *b)
* sufficiently large to fit the value
*/
static Size
-convertJsonb(JsonbValue * val, Jsonb *buffer)
+convertJsonb(JsonbValue *val, Jsonb *buffer)
{
- convertState state;
- Size len;
+ convertState state;
+ Size len;
/* Should not already have binary representation */
Assert(val->type != jbvBinary);
@@ -1232,7 +1245,7 @@ convertJsonb(JsonbValue * val, Jsonb *buffer)
* token (in a manner similar to generic iteration).
*/
static void
-walkJsonbValueConversion(JsonbValue * val, convertState * cstate,
+walkJsonbValueConversion(JsonbValue *val, convertState *cstate,
uint32 nestlevel)
{
int i;
@@ -1290,9 +1303,11 @@ walkJsonbValueConversion(JsonbValue * val, convertState * cstate,
* access to conversion buffer.
*/
static inline
-short addPaddingInt(convertState * cstate)
+short
+addPaddingInt(convertState *cstate)
{
- short padlen, p;
+ short padlen,
+ p;
padlen = INTALIGN(cstate->ptr - VARDATA(cstate->buffer)) -
(cstate->ptr - VARDATA(cstate->buffer));
@@ -1320,14 +1335,14 @@ short addPaddingInt(convertState * cstate)
* and the end (i.e. there is one call per sequential processing WJB_* token).
*/
static void
-putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags,
+putJsonbValueConversion(convertState *cstate, JsonbValue *val, uint32 flags,
uint32 level)
{
if (level == cstate->levelSz)
{
cstate->levelSz *= 2;
cstate->allState = repalloc(cstate->allState,
- sizeof(convertLevel) * cstate->levelSz);
+ sizeof(convertLevel) * cstate->levelSz);
}
cstate->contPtr = cstate->allState + level;
@@ -1385,9 +1400,9 @@ putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags,
}
else if (flags & (WJB_END_ARRAY | WJB_END_OBJECT))
{
- convertLevel *prevPtr; /* Prev container pointer */
- uint32 len,
- i;
+ convertLevel *prevPtr; /* Prev container pointer */
+ uint32 len,
+ i;
Assert(((flags & WJB_END_ARRAY) && val->type == jbvArray) ||
((flags & WJB_END_OBJECT) && val->type == jbvObject));
@@ -1443,10 +1458,10 @@ putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags,
* metadata peculiar to each scalar type.
*/
static void
-putScalarConversion(convertState * cstate, JsonbValue * scalarVal, uint32 level,
+putScalarConversion(convertState *cstate, JsonbValue *scalarVal, uint32 level,
uint32 i)
{
- int numlen;
+ int numlen;
short padlen;
cstate->contPtr = cstate->allState + level;
@@ -1509,7 +1524,7 @@ putScalarConversion(convertState * cstate, JsonbValue * scalarVal, uint32 level,
* container type.
*/
static void
-iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader)
+iteratorFromContainerBuf(JsonbIterator *it, JsonbSuperHeader sheader)
{
uint32 superheader = *(uint32 *) sheader;
@@ -1531,6 +1546,7 @@ iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader)
Assert(!it->isScalar || it->nElems == 1);
break;
case JB_FOBJECT:
+
/*
* Offset reflects that nElems indicates JsonbPairs in an object.
* Each key and each value contain Jentry metadata just the same.
@@ -1562,7 +1578,7 @@ iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader)
* anywhere).
*/
static bool
-formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
+formIterIsContainer(JsonbIterator **it, JsonbValue *val, JEntry *ent,
bool skipNested)
{
if (JBE_ISNULL(*ent))
@@ -1585,6 +1601,7 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
{
val->type = jbvNumeric;
val->val.numeric = (Numeric) ((*it)->dataProper + INTALIGN(JBE_OFF(*ent)));
+
val->estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(val->val.numeric);
return false;
@@ -1609,8 +1626,8 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
else
{
/*
- * Must be container type, so setup caller's iterator to point to that,
- * and return indication of that.
+ * Must be container type, so setup caller's iterator to point to
+ * that, and return indication of that.
*
* Get child iterator.
*/
@@ -1627,11 +1644,11 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
}
/*
- * JsonbIteratorNext() worker: Return parent, while freeing memory for current
+ * JsonbIteratorNext() worker: Return parent, while freeing memory for current
* iterator
*/
static JsonbIterator *
-freeAndGetParent(JsonbIterator * it)
+freeAndGetParent(JsonbIterator *it)
{
JsonbIterator *v = it->parent;
@@ -1643,7 +1660,7 @@ freeAndGetParent(JsonbIterator * it)
* pushJsonbValue() worker: Iteration-like forming of Jsonb
*/
static JsonbParseState *
-pushState(JsonbParseState ** pstate)
+pushState(JsonbParseState **pstate)
{
JsonbParseState *ns = palloc(sizeof(JsonbParseState));
@@ -1655,7 +1672,7 @@ pushState(JsonbParseState ** pstate)
* pushJsonbValue() worker: Append a pair key to state when generating a Jsonb
*/
static void
-appendKey(JsonbParseState * pstate, JsonbValue * string)
+appendKey(JsonbParseState *pstate, JsonbValue *string)
{
JsonbValue *object = &pstate->contVal;
@@ -1672,7 +1689,7 @@ appendKey(JsonbParseState * pstate, JsonbValue * string)
{
pstate->size *= 2;
object->val.object.pairs = repalloc(object->val.object.pairs,
- sizeof(JsonbPair) * pstate->size);
+ sizeof(JsonbPair) * pstate->size);
}
object->val.object.pairs[object->val.object.nPairs].key = *string;
@@ -1686,7 +1703,7 @@ appendKey(JsonbParseState * pstate, JsonbValue * string)
* Jsonb
*/
static void
-appendValue(JsonbParseState * pstate, JsonbValue * scalarVal)
+appendValue(JsonbParseState *pstate, JsonbValue *scalarVal)
{
JsonbValue *object = &pstate->contVal;
@@ -1700,7 +1717,7 @@ appendValue(JsonbParseState * pstate, JsonbValue * scalarVal)
* pushJsonbValue() worker: Append an element to state when generating a Jsonb
*/
static void
-appendElement(JsonbParseState * pstate, JsonbValue * scalarVal)
+appendElement(JsonbParseState *pstate, JsonbValue *scalarVal)
{
JsonbValue *array = &pstate->contVal;
@@ -1716,7 +1733,7 @@ appendElement(JsonbParseState * pstate, JsonbValue * scalarVal)
{
pstate->size *= 2;
array->val.array.elems = repalloc(array->val.array.elems,
- sizeof(JsonbValue) * pstate->size);
+ sizeof(JsonbValue) * pstate->size);
}
array->val.array.elems[array->val.array.nElems++] = *scalarVal;
@@ -1797,7 +1814,7 @@ lengthCompareJsonbPair(const void *a, const void *b, void *binequal)
* Sort and unique-ify pairs in JsonbValue object
*/
static void
-uniqueifyJsonbObject(JsonbValue * object)
+uniqueifyJsonbObject(JsonbValue *object)
{
bool hasNonUniq = false;
@@ -1838,15 +1855,15 @@ uniqueifyJsonbObject(JsonbValue * object)
* Sorting uses internal ordering.
*/
static void
-uniqueifyJsonbArray(JsonbValue * array)
+uniqueifyJsonbArray(JsonbValue *array)
{
- bool hasNonUniq = false;
+ bool hasNonUniq = false;
Assert(array->type == jbvArray);
/*
- * Actually sort values, determining if any were equal on the basis of full
- * binary equality (rather than just having the same string length).
+ * Actually sort values, determining if any were equal on the basis of
+ * full binary equality (rather than just having the same string length).
*/
if (array->val.array.nElems > 1)
qsort_arg(array->val.array.elems, array->val.array.nElems,
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 2423b737c9..6b1ce9b3a9 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -104,11 +104,12 @@ static void populate_recordset_array_element_start(void *state, bool isnull);
/* worker function for populate_recordset and to_recordset */
static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo,
bool have_record_arg);
+
/* Worker that takes care of common setup for us */
static JsonbValue *findJsonbValueFromSuperHeaderLen(JsonbSuperHeader sheader,
- uint32 flags,
- char *key,
- uint32 keylen);
+ uint32 flags,
+ char *key,
+ uint32 keylen);
/* search type classification for json_get* functions */
typedef enum
@@ -235,8 +236,8 @@ typedef struct PopulateRecordsetState
} PopulateRecordsetState;
/* Turn a jsonb object into a record */
-static void make_row_from_rec_and_jsonb(Jsonb * element,
- PopulateRecordsetState *state);
+static void make_row_from_rec_and_jsonb(Jsonb *element,
+ PopulateRecordsetState *state);
/*
* SQL function json_object_keys
@@ -791,7 +792,7 @@ get_path_all(FunctionCallInfo fcinfo, bool as_text)
result = get_worker(json, NULL, -1, tpath, ipath, npath, as_text);
if (result != NULL)
- PG_RETURN_TEXT_P(result);
+ PG_RETURN_TEXT_P(result);
else
/* null is NULL, regardless */
PG_RETURN_NULL();
@@ -1178,7 +1179,7 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
jbvp = findJsonbValueFromSuperHeaderLen(superHeader,
JB_FOBJECT,
VARDATA_ANY(pathtext[i]),
- VARSIZE_ANY_EXHDR(pathtext[i]));
+ VARSIZE_ANY_EXHDR(pathtext[i]));
}
else if (have_array)
{
@@ -1209,8 +1210,8 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
if (jbvp->type == jbvBinary)
{
- JsonbIterator *it = JsonbIteratorInit(jbvp->val.binary.data);
- int r;
+ JsonbIterator *it = JsonbIteratorInit(jbvp->val.binary.data);
+ int r;
r = JsonbIteratorNext(&it, &tv, true);
superHeader = (JsonbSuperHeader) jbvp->val.binary.data;
@@ -1932,7 +1933,7 @@ elements_array_element_end(void *state, bool isnull)
text *val;
HeapTuple tuple;
Datum values[1];
- bool nulls[1] = {false};
+ bool nulls[1] = {false};
/* skip over nested objects */
if (_state->lex->lex_level != 1)
@@ -2035,7 +2036,7 @@ json_to_record(PG_FUNCTION_ARGS)
static inline Datum
populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg)
{
- int json_arg_num = have_record_arg ? 1 : 0;
+ int json_arg_num = have_record_arg ? 1 : 0;
Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num);
text *json;
Jsonb *jb = NULL;
@@ -2060,7 +2061,7 @@ populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg)
if (have_record_arg)
{
- Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
+ Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
if (!type_is_rowtype(argtype))
ereport(ERROR,
@@ -2275,7 +2276,7 @@ populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg)
s = pnstrdup((v->val.boolean) ? "t" : "f", 1);
else if (v->type == jbvNumeric)
s = DatumGetCString(DirectFunctionCall1(numeric_out,
- PointerGetDatum(v->val.numeric)));
+ PointerGetDatum(v->val.numeric)));
else if (!use_json_as_text)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -2476,7 +2477,7 @@ json_to_recordset(PG_FUNCTION_ARGS)
}
static void
-make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state)
+make_row_from_rec_and_jsonb(Jsonb *element, PopulateRecordsetState *state)
{
Datum *values;
bool *nulls;
@@ -2575,7 +2576,7 @@ make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state)
s = pnstrdup((v->val.boolean) ? "t" : "f", 1);
else if (v->type == jbvNumeric)
s = DatumGetCString(DirectFunctionCall1(numeric_out,
- PointerGetDatum(v->val.numeric)));
+ PointerGetDatum(v->val.numeric)));
else if (!state->use_json_as_text)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -2603,7 +2604,7 @@ make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state)
static inline Datum
populate_recordset_worker(FunctionCallInfo fcinfo, bool have_record_arg)
{
- int json_arg_num = have_record_arg ? 1 : 0;
+ int json_arg_num = have_record_arg ? 1 : 0;
Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num);
bool use_json_as_text;
ReturnSetInfo *rsi;
@@ -2620,7 +2621,7 @@ populate_recordset_worker(FunctionCallInfo fcinfo, bool have_record_arg)
if (have_record_arg)
{
- Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
+ Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
if (!type_is_rowtype(argtype))
ereport(ERROR,
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 3d5f3d538b..bcd9e2182d 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -76,12 +76,12 @@ wchareq(char *p1, char *p2)
/*
* Formerly we had a routine iwchareq() here that tried to do case-insensitive
- * comparison of multibyte characters. It did not work at all, however,
+ * comparison of multibyte characters. It did not work at all, however,
* because it relied on tolower() which has a single-byte API ... and
* towlower() wouldn't be much better since we have no suitably cheap way
* of getting a single character transformed to the system's wchar_t format.
* So now, we just downcase the strings using lower() and apply regular LIKE
- * comparison. This should be revisited when we install better locale support.
+ * comparison. This should be revisited when we install better locale support.
*/
/*
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 241f738d60..4eeb6314fa 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -96,7 +96,7 @@ pg_signal_backend(int pid, int sig)
/*
* BackendPidGetProc returns NULL if the pid isn't valid; but by the time
* we reach kill(), a process for which we get a valid proc here might
- * have terminated on its own. There's no way to acquire a lock on an
+ * have terminated on its own. There's no way to acquire a lock on an
* arbitrary process to prevent that. But since so far all the callers of
* this mechanism involve some request for ending the process anyway, that
* it might end on its own first is not a problem.
@@ -120,7 +120,7 @@ pg_signal_backend(int pid, int sig)
* recycled for a new process, before reaching here? Then we'd be trying
* to kill the wrong thing. Seems near impossible when sequential pid
* assignment and wraparound is used. Perhaps it could happen on a system
- * where pid re-use is randomized. That race condition possibility seems
+ * where pid re-use is randomized. That race condition possibility seems
* too unlikely to worry about.
*/
@@ -140,7 +140,7 @@ pg_signal_backend(int pid, int sig)
}
/*
- * Signal to cancel a backend process. This is allowed if you are superuser or
+ * Signal to cancel a backend process. This is allowed if you are superuser or
* have the same role as the process being canceled.
*/
Datum
@@ -254,7 +254,7 @@ pg_tablespace_databases(PG_FUNCTION_ARGS)
fctx->location = psprintf("base");
else
fctx->location = psprintf("pg_tblspc/%u/%s", tablespaceOid,
- TABLESPACE_VERSION_DIRECTORY);
+ TABLESPACE_VERSION_DIRECTORY);
fctx->dirdesc = AllocateDir(fctx->location);
@@ -326,7 +326,7 @@ pg_tablespace_location(PG_FUNCTION_ARGS)
/*
* It's useful to apply this function to pg_class.reltablespace, wherein
- * zero means "the database's default tablespace". So, rather than
+ * zero means "the database's default tablespace". So, rather than
* throwing an error for zero, we choose to assume that's what is meant.
*/
if (tablespaceOid == InvalidOid)
@@ -384,7 +384,7 @@ pg_sleep(PG_FUNCTION_ARGS)
* loop.
*
* By computing the intended stop time initially, we avoid accumulation of
- * extra delay across multiple sleeps. This also ensures we won't delay
+ * extra delay across multiple sleeps. This also ensures we won't delay
* less than the specified time when WaitLatch is terminated early by a
* non-query-cancelling signal such as SIGHUP.
*/
@@ -547,7 +547,7 @@ pg_relation_is_updatable(PG_FUNCTION_ARGS)
* pg_column_is_updatable - determine whether a column is updatable
*
* This function encapsulates the decision about just what
- * information_schema.columns.is_updatable actually means. It's not clear
+ * information_schema.columns.is_updatable actually means. It's not clear
* whether deletability of the column's relation should be required, so
* we want that decision in C code where we could change it without initdb.
*/
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index 74d24aa065..a6d30851df 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -118,26 +118,24 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm * tm, char **tzn)
if (tzp != NULL)
{
- *tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */
+ *tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */
+ /*
+ * XXX FreeBSD man pages indicate that this should work - tgl 97/04/23
+ */
+ if (tzn != NULL)
+ {
/*
- * XXX FreeBSD man pages indicate that this should work - tgl
- * 97/04/23
+ * Copy no more than MAXTZLEN bytes of timezone to tzn, in case it
+ * contains an error message, which doesn't fit in the buffer
*/
- if (tzn != NULL)
- {
- /*
- * Copy no more than MAXTZLEN bytes of timezone to tzn, in
- * case it contains an error message, which doesn't fit in the
- * buffer
- */
- StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1);
- if (strlen(tm->tm_zone) > MAXTZLEN)
- ereport(WARNING,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid time zone name: \"%s\"",
- tm->tm_zone)));
- }
+ StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1);
+ if (strlen(tm->tm_zone) > MAXTZLEN)
+ ereport(WARNING,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid time zone name: \"%s\"",
+ tm->tm_zone)));
+ }
}
else
tm->tm_isdst = -1;
@@ -175,7 +173,7 @@ tm2abstime(struct pg_tm * tm, int tz)
sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE;
/*
- * check for overflow. We need a little slop here because the H/M/S plus
+ * check for overflow. We need a little slop here because the H/M/S plus
* TZ offset could add up to more than 1 day.
*/
if ((day >= MAX_DAYNUM - 10 && sec < 0) ||
@@ -1140,7 +1138,7 @@ tintervalsame(PG_FUNCTION_ARGS)
* 1. The interval length computations overflow at 2^31 seconds, causing
* intervals longer than that to sort oddly compared to those shorter.
* 2. infinity and minus infinity (NOEND_ABSTIME and NOSTART_ABSTIME) are
- * just ordinary integers. Since this code doesn't handle them specially,
+ * just ordinary integers. Since this code doesn't handle them specially,
* it's possible for [a b] to be considered longer than [c infinity] for
* finite abstimes a, b, c. In combination with the previous point, the
* interval [-infinity infinity] is treated as being shorter than many finite
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index 8bdf5778d8..69c7ac182f 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -39,7 +39,7 @@ network_in(char *src, bool is_cidr)
dst = (inet *) palloc0(sizeof(inet));
/*
- * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
+ * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
* will have a : somewhere in them (several, in fact) so if there is one
* present, assume it's V6, otherwise assume it's V4.
*/
@@ -144,7 +144,7 @@ cidr_out(PG_FUNCTION_ARGS)
* family, bits, is_cidr, address length, address in network byte order.
*
* Presence of is_cidr is largely for historical reasons, though it might
- * allow some code-sharing on the client side. We send it correctly on
+ * allow some code-sharing on the client side. We send it correctly on
* output, but ignore the value on input.
*/
static inet *
@@ -1401,7 +1401,7 @@ inetmi(PG_FUNCTION_ARGS)
/*
* We form the difference using the traditional complement, increment,
* and add rule, with the increment part being handled by starting the
- * carry off at 1. If you don't think integer arithmetic is done in
+ * carry off at 1. If you don't think integer arithmetic is done in
* two's complement, too bad.
*/
int nb = ip_addrsize(ip);
@@ -1423,7 +1423,7 @@ inetmi(PG_FUNCTION_ARGS)
else
{
/*
- * Input wider than int64: check for overflow. All bytes to
+ * Input wider than int64: check for overflow. All bytes to
* the left of what will fit should be 0 or 0xFF, depending on
* sign of the now-complete result.
*/
@@ -1454,9 +1454,9 @@ inetmi(PG_FUNCTION_ARGS)
* XXX This should go away someday!
*
* This is a kluge needed because we don't yet support zones in stored inet
- * values. Since the result of getnameinfo() might include a zone spec,
+ * values. Since the result of getnameinfo() might include a zone spec,
* call this to remove it anywhere we want to feed getnameinfo's output to
- * network_in. Beats failing entirely.
+ * network_in. Beats failing entirely.
*
* An alternative approach would be to let network_in ignore %-parts for
* itself, but that would mean we'd silently drop zone specs in user input,
diff --git a/src/backend/utils/adt/network_gist.c b/src/backend/utils/adt/network_gist.c
index 0a826ae90a..69b9d10474 100644
--- a/src/backend/utils/adt/network_gist.c
+++ b/src/backend/utils/adt/network_gist.c
@@ -7,7 +7,7 @@
* "union" of a set of INET/CIDR values. It works like this:
* 1. If the values are not all of the same IP address family, the "union"
* is a dummy value with family number zero, minbits zero, commonbits zero,
- * address all zeroes. Otherwise:
+ * address all zeroes. Otherwise:
* 2. The union has the common IP address family number.
* 3. The union's minbits value is the smallest netmask length ("ip_bits")
* of all the input values.
@@ -202,8 +202,8 @@ inet_gist_consistent(PG_FUNCTION_ARGS)
*
* Compare available common prefix bits to the query, but not beyond
* either the query's netmask or the minimum netmask among the represented
- * values. If these bits don't match the query, we have our answer (and
- * may or may not need to descend, depending on the operator). If they do
+ * values. If these bits don't match the query, we have our answer (and
+ * may or may not need to descend, depending on the operator). If they do
* match, and we are not at a leaf, we descend in all cases.
*
* Note this is the final check for operators that only consider the
@@ -682,7 +682,7 @@ inet_gist_picksplit(PG_FUNCTION_ARGS)
{
/*
* If there's more than 2 families, all but maxfamily go into the
- * left union. This could only happen if the inputs include some
+ * left union. This could only happen if the inputs include some
* IPv4, some IPv6, and some already-multiple-family unions.
*/
tmp = DatumGetInetKeyP(ent[i].key);
@@ -741,7 +741,7 @@ inet_gist_picksplit(PG_FUNCTION_ARGS)
}
/*
- * Compute the union value for each side from scratch. In most cases we
+ * Compute the union value for each side from scratch. In most cases we
* could approximate the union values with what we already know, but this
* ensures that each side has minbits and commonbits set as high as
* possible.
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index bf4f29d14d..19d0bdcbb9 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -50,7 +50,7 @@
* Numeric values are represented in a base-NBASE floating point format.
* Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed
* and wide enough to store a digit. We assume that NBASE*NBASE can fit in
- * an int. Although the purely calculational routines could handle any even
+ * an int. Although the purely calculational routines could handle any even
* NBASE that's less than sqrt(INT_MAX), in practice we are only interested
* in NBASE a power of ten, so that I/O conversions and decimal rounding
* are easy. Also, it's actually more efficient if NBASE is rather less than
@@ -95,11 +95,11 @@ typedef int16 NumericDigit;
* If the high bits of the first word of a NumericChoice (n_header, or
* n_short.n_header, or n_long.n_sign_dscale) are NUMERIC_SHORT, then the
* numeric follows the NumericShort format; if they are NUMERIC_POS or
- * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
+ * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
* it is a NaN. We currently always store a NaN using just two bytes (i.e.
* only n_header), but previous releases used only the NumericLong format,
* so we might find 4-byte NaNs on disk if a database has been migrated using
- * pg_upgrade. In either case, when the high bits indicate a NaN, the
+ * pg_upgrade. In either case, when the high bits indicate a NaN, the
* remaining bits are never examined. Currently, we always initialize these
* to zero, but it might be possible to use them for some other purpose in
* the future.
@@ -207,19 +207,19 @@ struct NumericData
: ((n)->choice.n_long.n_weight))
/* ----------
- * NumericVar is the format we use for arithmetic. The digit-array part
+ * NumericVar is the format we use for arithmetic. The digit-array part
* is the same as the NumericData storage format, but the header is more
* complex.
*
* The value represented by a NumericVar is determined by the sign, weight,
* ndigits, and digits[] array.
* Note: the first digit of a NumericVar's value is assumed to be multiplied
- * by NBASE ** weight. Another way to say it is that there are weight+1
+ * by NBASE ** weight. Another way to say it is that there are weight+1
* digits before the decimal point. It is possible to have weight < 0.
*
* buf points at the physical start of the palloc'd digit buffer for the
- * NumericVar. digits points at the first digit in actual use (the one
- * with the specified weight). We normally leave an unused digit or two
+ * NumericVar. digits points at the first digit in actual use (the one
+ * with the specified weight). We normally leave an unused digit or two
* (preset to zeroes) between buf and digits, so that there is room to store
* a carry out of the top digit without reallocating space. We just need to
* decrement digits (and increment weight) to make room for the carry digit.
@@ -596,7 +596,7 @@ numeric_maximum_size(int32 typmod)
* In most cases, the size of a numeric will be smaller than the value
* computed below, because the varlena header will typically get toasted
* down to a single byte before being stored on disk, and it may also be
- * possible to use a short numeric header. But our job here is to compute
+ * possible to use a short numeric header. But our job here is to compute
* the worst case.
*/
return NUMERIC_HDRSZ + (numeric_digits * sizeof(NumericDigit));
@@ -636,7 +636,8 @@ numeric_normalize(Numeric num)
{
NumericVar x;
char *str;
- int orig, last;
+ int orig,
+ last;
/*
* Handle NaN
@@ -754,7 +755,7 @@ numeric_send(PG_FUNCTION_ARGS)
*
* Flatten calls to numeric's length coercion function that solely represent
* increases in allowable precision. Scale changes mutate every datum, so
- * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
+ * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
* unconstrained numeric, so a change from an unconstrained numeric to any
* constrained numeric is also unoptimizable.
*/
@@ -784,7 +785,7 @@ numeric_transform(PG_FUNCTION_ARGS)
* If new_typmod < VARHDRSZ, the destination is unconstrained; that's
* always OK. If old_typmod >= VARHDRSZ, the source is constrained,
* and we're OK if the scale is unchanged and the precision is not
- * decreasing. See further notes in function header comment.
+ * decreasing. See further notes in function header comment.
*/
if (new_typmod < (int32) VARHDRSZ ||
(old_typmod >= (int32) VARHDRSZ &&
@@ -996,7 +997,7 @@ numeric_uminus(PG_FUNCTION_ARGS)
/*
* The packed format is known to be totally zero digit trimmed always. So
- * we can identify a ZERO by the fact that there are no digits at all. Do
+ * we can identify a ZERO by the fact that there are no digits at all. Do
* nothing to a zero.
*/
if (NUMERIC_NDIGITS(num) != 0)
@@ -1972,7 +1973,7 @@ numeric_sqrt(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -2023,7 +2024,7 @@ numeric_exp(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -2517,7 +2518,7 @@ typedef struct NumericAggState
NumericVar sumX; /* sum of processed numbers */
NumericVar sumX2; /* sum of squares of processed numbers */
int maxScale; /* maximum scale seen so far */
- int64 maxScaleCount; /* number of values seen with maximum scale */
+ int64 maxScaleCount; /* number of values seen with maximum scale */
int64 NaNcount; /* count of NaN values (not included in N!) */
} NumericAggState;
@@ -2652,8 +2653,8 @@ do_numeric_discard(NumericAggState *state, Numeric newval)
if (state->maxScaleCount > 1 || state->maxScale == 0)
{
/*
- * Some remaining inputs have same dscale, or dscale hasn't
- * gotten above zero anyway
+ * Some remaining inputs have same dscale, or dscale hasn't gotten
+ * above zero anyway
*/
state->maxScaleCount--;
}
@@ -2767,9 +2768,9 @@ numeric_accum_inv(PG_FUNCTION_ARGS)
/*
* Integer data types all use Numeric accumulators to share code and
- * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
+ * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
* is overkill for the N and sum(X) values, but definitely not overkill
- * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
+ * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
* for stddev/variance --- there are faster special-purpose accumulator
* routines for SUM and AVG of these datatypes.
*/
@@ -2965,7 +2966,7 @@ numeric_avg(PG_FUNCTION_ARGS)
if (state == NULL || (state->N + state->NaNcount) == 0)
PG_RETURN_NULL();
- if (state->NaNcount > 0) /* there was at least one NaN input */
+ if (state->NaNcount > 0) /* there was at least one NaN input */
PG_RETURN_NUMERIC(make_result(&const_nan));
N_datum = DirectFunctionCall1(int8_numeric, Int64GetDatum(state->N));
@@ -2985,7 +2986,7 @@ numeric_sum(PG_FUNCTION_ARGS)
if (state == NULL || (state->N + state->NaNcount) == 0)
PG_RETURN_NULL();
- if (state->NaNcount > 0) /* there was at least one NaN input */
+ if (state->NaNcount > 0) /* there was at least one NaN input */
PG_RETURN_NUMERIC(make_result(&const_nan));
PG_RETURN_NUMERIC(make_result(&(state->sumX)));
@@ -3167,7 +3168,7 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
* the initial condition of the transition data value needs to be NULL. This
* means we can't rely on ExecAgg to automatically insert the first non-null
* data value into the transition data: it doesn't know how to do the type
- * conversion. The upshot is that these routines have to be marked non-strict
+ * conversion. The upshot is that these routines have to be marked non-strict
* and handle substitution of the first non-null input themselves.
*
* Note: these functions are used only in plain aggregation mode.
@@ -3653,7 +3654,7 @@ set_var_from_str(const char *str, const char *cp, NumericVar *dest)
/*
* We first parse the string to extract decimal digits and determine the
- * correct decimal weight. Then convert to NBASE representation.
+ * correct decimal weight. Then convert to NBASE representation.
*/
switch (*cp)
{
@@ -4261,7 +4262,7 @@ apply_typmod(NumericVar *var, int32 typmod)
/*
* Convert numeric to int8, rounding if needed.
*
- * If overflow, return FALSE (no error is raised). Return TRUE if okay.
+ * If overflow, return FALSE (no error is raised). Return TRUE if okay.
*/
static bool
numericvar_to_int8(NumericVar *var, int64 *result)
@@ -4732,7 +4733,7 @@ sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
* mul_var() -
*
* Multiplication on variable level. Product of var1 * var2 is stored
- * in result. Result is rounded to no more than rscale fractional digits.
+ * in result. Result is rounded to no more than rscale fractional digits.
*/
static void
mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
@@ -4776,7 +4777,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Determine number of result digits to compute. If the exact result
* would have more than rscale fractional digits, truncate the computation
- * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
+ * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
* or both inputs have fewer digits than they really do.
*/
res_ndigits = var1ndigits + var2ndigits + 1;
@@ -5019,7 +5020,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
*
* We need the first divisor digit to be >= NBASE/2. If it isn't,
* make it so by scaling up both the divisor and dividend by the
- * factor "d". (The reason for allocating dividend[0] above is to
+ * factor "d". (The reason for allocating dividend[0] above is to
* leave room for possible carry here.)
*/
if (divisor[1] < HALF_NBASE)
@@ -5063,7 +5064,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* If next2digits are 0, then quotient digit must be 0 and there's
- * no need to adjust the working dividend. It's worth testing
+ * no need to adjust the working dividend. It's worth testing
* here to fall out ASAP when processing trailing zeroes in a
* dividend.
*/
@@ -5081,7 +5082,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Adjust quotient digit if it's too large. Knuth proves that
* after this step, the quotient digit will be either correct or
- * just one too large. (Note: it's OK to use dividend[j+2] here
+ * just one too large. (Note: it's OK to use dividend[j+2] here
* because we know the divisor length is at least 2.)
*/
while (divisor2 * qhat >
@@ -5256,7 +5257,7 @@ div_var_fast(NumericVar *var1, NumericVar *var2, NumericVar *result,
* dividend's digits (plus appended zeroes to reach the desired precision
* including guard digits). Each step of the main loop computes an
* (approximate) quotient digit and stores it into div[], removing one
- * position of dividend space. A final pass of carry propagation takes
+ * position of dividend space. A final pass of carry propagation takes
* care of any mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
@@ -6106,7 +6107,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale)
/*
* The general case repeatedly multiplies base according to the bit
- * pattern of exp. We do the multiplications with some extra precision.
+ * pattern of exp. We do the multiplications with some extra precision.
*/
neg = (exp < 0);
exp = Abs(exp);
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index 8945ef43f0..2badb558f0 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -318,7 +318,7 @@ oidparse(Node *node)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid OID
+ * constants by the lexer. Accept these if they are valid OID
* strings.
*/
return oidin_subr(strVal(node), NULL);
diff --git a/src/backend/utils/adt/orderedsetaggs.c b/src/backend/utils/adt/orderedsetaggs.c
index 99577a549e..efb0411c22 100644
--- a/src/backend/utils/adt/orderedsetaggs.c
+++ b/src/backend/utils/adt/orderedsetaggs.c
@@ -462,7 +462,7 @@ percentile_disc_final(PG_FUNCTION_ARGS)
/*
* Note: we *cannot* clean up the tuplesort object here, because the value
- * to be returned is allocated inside its sortcontext. We could use
+ * to be returned is allocated inside its sortcontext. We could use
* datumCopy to copy it out of there, but it doesn't seem worth the
* trouble, since the cleanup callback will clear the tuplesort later.
*/
@@ -580,7 +580,7 @@ percentile_cont_final_common(FunctionCallInfo fcinfo,
/*
* Note: we *cannot* clean up the tuplesort object here, because the value
- * to be returned may be allocated inside its sortcontext. We could use
+ * to be returned may be allocated inside its sortcontext. We could use
* datumCopy to copy it out of there, but it doesn't seem worth the
* trouble, since the cleanup callback will clear the tuplesort later.
*/
@@ -1086,7 +1086,7 @@ mode_final(PG_FUNCTION_ARGS)
/*
* Note: we *cannot* clean up the tuplesort object here, because the value
- * to be returned is allocated inside its sortcontext. We could use
+ * to be returned is allocated inside its sortcontext. We could use
* datumCopy to copy it out of there, but it doesn't seem worth the
* trouble, since the cleanup callback will clear the tuplesort later.
*/
@@ -1331,7 +1331,7 @@ hypothetical_dense_rank_final(PG_FUNCTION_ARGS)
/*
* We alternate fetching into tupslot and extraslot so that we have the
- * previous row available for comparisons. This is accomplished by
+ * previous row available for comparisons. This is accomplished by
* swapping the slot pointer variables after each row.
*/
extraslot = MakeSingleTupleTableSlot(osastate->qstate->tupdesc);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 0c8474df54..94bb5a47bb 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -20,12 +20,12 @@
*
* The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are also
* settable at run-time. However, we don't actually set those locale
- * categories permanently. This would have bizarre effects like no
+ * categories permanently. This would have bizarre effects like no
* longer accepting standard floating-point literals in some locales.
* Instead, we only set the locales briefly when needed, cache the
* required information obtained from localeconv(), and set them back.
* The cached information is only used by the formatting functions
- * (to_char, etc.) and the money type. For the user, this should all be
+ * (to_char, etc.) and the money type. For the user, this should all be
* transparent.
*
* !!! NOW HEAR THIS !!!
@@ -39,7 +39,7 @@
* fail = true;
* setlocale(category, save);
* DOES NOT WORK RELIABLY: on some platforms the second setlocale() call
- * will change the memory save is pointing at. To do this sort of thing
+ * will change the memory save is pointing at. To do this sort of thing
* safely, you *must* pstrdup what setlocale returns the first time.
*
* FYI, The Open Group locale standard is defined here:
@@ -243,7 +243,7 @@ pg_perm_setlocale(int category, const char *locale)
* Is the locale name valid for the locale category?
*
* If successful, and canonname isn't NULL, a palloc'd copy of the locale's
- * canonical name is stored there. This is especially useful for figuring out
+ * canonical name is stored there. This is especially useful for figuring out
* what locale name "" means (ie, the server environment value). (Actually,
* it seems that on most implementations that's the only thing it's good for;
* we could wish that setlocale gave back a canonically spelled version of
@@ -286,7 +286,7 @@ check_locale(int category, const char *locale, char **canonname)
*
* For most locale categories, the assign hook doesn't actually set the locale
* permanently, just reset flags so that the next use will cache the
- * appropriate values. (See explanation at the top of this file.)
+ * appropriate values. (See explanation at the top of this file.)
*
* Note: we accept value = "" as selecting the postmaster's environment
* value, whatever it was (so long as the environment setting is legal).
@@ -463,6 +463,7 @@ PGLC_localeconv(void)
save_lc_numeric = pstrdup(save_lc_numeric);
#ifdef WIN32
+
/*
* Ideally, monetary and numeric local symbols could be returned in any
* server encoding. Unfortunately, the WIN32 API does not allow
@@ -644,6 +645,7 @@ cache_locale_time(void)
save_lc_time = pstrdup(save_lc_time);
#ifdef WIN32
+
/*
* On WIN32, there is no way to get locale-specific time values in a
* specified locale, like we do for monetary/numeric. We can only get
@@ -729,13 +731,13 @@ cache_locale_time(void)
* Convert a Windows setlocale() argument to a Unix-style one.
*
* Regardless of platform, we install message catalogs under a Unix-style
- * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
+ * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
* following that style will elicit localized interface strings.
*
* Before Visual Studio 2012 (msvcr110.dll), Windows setlocale() accepted "C"
* (but not "c") and strings of the form <Language>[_<Country>][.<CodePage>],
* case-insensitive. setlocale() returns the fully-qualified form; for
- * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
+ * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
* setlocale() and _create_locale() select a "locale identifier"[1] and store
* it in an undocumented _locale_t field. From that LCID, we can retrieve the
* ISO 639 language and the ISO 3166 country. Character encoding does not
@@ -746,12 +748,12 @@ cache_locale_time(void)
* Studio 2012, setlocale() accepts locale names in addition to the strings it
* accepted historically. It does not standardize them; setlocale("Th-tH")
* returns "Th-tH". setlocale(category, "") still returns a traditional
- * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
+ * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
* content to carry locale names instead of locale identifiers.
*
* MinGW headers declare _create_locale(), but msvcrt.dll lacks that symbol.
* IsoLocaleName() always fails in a MinGW-built postgres.exe, so only
- * Unix-style values of the lc_messages GUC can elicit localized messages. In
+ * Unix-style values of the lc_messages GUC can elicit localized messages. In
* particular, every lc_messages setting that initdb can select automatically
* will yield only C-locale messages. XXX This could be fixed by running the
* fully-qualified locale name through a lookup table.
@@ -795,7 +797,7 @@ IsoLocaleName(const char *winlocname)
* need not standardize letter case here. So long as we do not ship
* message catalogs for which it would matter, we also need not
* translate the script/variant portion, e.g. uz-Cyrl-UZ to
- * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
+ * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
*
* Note that the locale name can be less-specific than the value we
* would derive under earlier Visual Studio releases. For example,
@@ -850,7 +852,7 @@ IsoLocaleName(const char *winlocname)
* could fail if the locale is C, so str_tolower() shouldn't call it
* in that case.
*
- * Note that we currently lack any way to flush the cache. Since we don't
+ * Note that we currently lack any way to flush the cache. Since we don't
* support ALTER COLLATION, this is OK. The worst case is that someone
* drops a collation, and a useless cache entry hangs around in existing
* backends.
@@ -1044,7 +1046,7 @@ report_newlocale_failure(const char *localename)
/*
- * Create a locale_t from a collation OID. Results are cached for the
+ * Create a locale_t from a collation OID. Results are cached for the
* lifetime of the backend. Thus, do not free the result with freelocale().
*
* As a special optimization, the default/database collation returns 0.
@@ -1170,6 +1172,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
return 0;
#ifdef WIN32
+
/*
* On Windows, the "Unicode" locales assume UTF16 not UTF8 encoding, and
* for some reason mbstowcs and wcstombs won't do this for us, so we use
@@ -1226,7 +1229,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
* This has almost the API of mbstowcs_l(), except that *from need not be
* null-terminated; instead, the number of input bytes is specified as
* fromlen. Also, we ereport() rather than returning -1 for invalid
- * input encoding. tolen is the maximum number of wchar_t's to store at *to.
+ * input encoding. tolen is the maximum number of wchar_t's to store at *to.
* The output will be zero-terminated iff there is room.
*/
size_t
diff --git a/src/backend/utils/adt/pg_lsn.c b/src/backend/utils/adt/pg_lsn.c
index e2b528a243..d1448aee7b 100644
--- a/src/backend/utils/adt/pg_lsn.c
+++ b/src/backend/utils/adt/pg_lsn.c
@@ -29,8 +29,10 @@ Datum
pg_lsn_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
- int len1, len2;
- uint32 id, off;
+ int len1,
+ len2;
+ uint32 id,
+ off;
XLogRecPtr result;
/* Sanity check input format. */
@@ -38,12 +40,12 @@ pg_lsn_in(PG_FUNCTION_ARGS)
if (len1 < 1 || len1 > MAXPG_LSNCOMPONENT || str[len1] != '/')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
+ errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
len2 = strspn(str + len1 + 1, "0123456789abcdefABCDEF");
if (len2 < 1 || len2 > MAXPG_LSNCOMPONENT || str[len1 + 1 + len2] != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
+ errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
/* Decode result. */
id = (uint32) strtoul(str, NULL, 16);
@@ -59,7 +61,8 @@ pg_lsn_out(PG_FUNCTION_ARGS)
XLogRecPtr lsn = PG_GETARG_LSN(0);
char buf[MAXPG_LSNLEN + 1];
char *result;
- uint32 id, off;
+ uint32 id,
+ off;
/* Decode ID and offset */
id = (uint32) (lsn >> 32);
@@ -83,7 +86,7 @@ pg_lsn_recv(PG_FUNCTION_ARGS)
Datum
pg_lsn_send(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn = PG_GETARG_LSN(0);
+ XLogRecPtr lsn = PG_GETARG_LSN(0);
StringInfoData buf;
pq_begintypsend(&buf);
@@ -99,8 +102,8 @@ pg_lsn_send(PG_FUNCTION_ARGS)
Datum
pg_lsn_eq(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 == lsn2);
}
@@ -108,8 +111,8 @@ pg_lsn_eq(PG_FUNCTION_ARGS)
Datum
pg_lsn_ne(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 != lsn2);
}
@@ -117,8 +120,8 @@ pg_lsn_ne(PG_FUNCTION_ARGS)
Datum
pg_lsn_lt(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 < lsn2);
}
@@ -126,8 +129,8 @@ pg_lsn_lt(PG_FUNCTION_ARGS)
Datum
pg_lsn_gt(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 > lsn2);
}
@@ -135,8 +138,8 @@ pg_lsn_gt(PG_FUNCTION_ARGS)
Datum
pg_lsn_le(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 <= lsn2);
}
@@ -144,8 +147,8 @@ pg_lsn_le(PG_FUNCTION_ARGS)
Datum
pg_lsn_ge(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 >= lsn2);
}
@@ -158,8 +161,8 @@ pg_lsn_ge(PG_FUNCTION_ARGS)
Datum
pg_lsn_mi(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
char buf[256];
Datum result;
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index 30f1c0ab1f..fe088901f0 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -576,9 +576,9 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
/*
* Experiments suggest that these hash sizes work pretty well. A large
- * hash table minimizes collision, but has a higher startup cost. For
- * a small input, the startup cost dominates. The table size must be
- * a power of two.
+ * hash table minimizes collision, but has a higher startup cost. For a
+ * small input, the startup cost dominates. The table size must be a power
+ * of two.
*/
if (slen < 128)
hashsz = 512;
@@ -615,7 +615,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
/*
* If we've emitted more than first_success_by bytes without finding
- * anything compressible at all, fail. This lets us fall out
+ * anything compressible at all, fail. This lets us fall out
* reasonably quickly when looking at incompressible input (such as
* pre-compressed data).
*/
@@ -639,7 +639,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend, mask);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
found_match = true;
}
@@ -653,7 +653,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend, mask);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
}
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index bf3084fce6..44ccd37e99 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -1797,5 +1797,5 @@ pg_stat_get_archiver(PG_FUNCTION_ARGS)
/* Returns the record as Datum */
PG_RETURN_DATUM(HeapTupleGetDatum(
- heap_form_tuple(tupdesc, values, nulls)));
+ heap_form_tuple(tupdesc, values, nulls)));
}
diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c
index a553c1abf1..475ce13abf 100644
--- a/src/backend/utils/adt/pseudotypes.c
+++ b/src/backend/utils/adt/pseudotypes.c
@@ -6,7 +6,7 @@
* A pseudo-type isn't really a type and never has any operations, but
* we do need to supply input and output functions to satisfy the links
* in the pseudo-type's entry in pg_type. In most cases the functions
- * just throw an error if invoked. (XXX the error messages here cover
+ * just throw an error if invoked. (XXX the error messages here cover
* the most common case, but might be confusing in some contexts. Can
* we do better?)
*
@@ -139,7 +139,7 @@ anyarray_out(PG_FUNCTION_ARGS)
* anyarray_recv - binary input routine for pseudo-type ANYARRAY.
*
* XXX this could actually be made to work, since the incoming array
- * data will contain the element type OID. Need to think through
+ * data will contain the element type OID. Need to think through
* type-safety issues before allowing it, however.
*/
Datum
@@ -216,7 +216,7 @@ anyrange_out(PG_FUNCTION_ARGS)
* void_in - input routine for pseudo-type VOID.
*
* We allow this so that PL functions can return VOID without any special
- * hack in the PL handler. Whatever value the PL thinks it's returning
+ * hack in the PL handler. Whatever value the PL thinks it's returning
* will just be ignored.
*/
Datum
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 38b51035ae..bc8a480ed3 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -1441,7 +1441,7 @@ tstzrange_subdiff(PG_FUNCTION_ARGS)
*
* This is for use by range-related functions that follow the convention
* of using the fn_extra field as a pointer to the type cache entry for
- * the range type. Functions that need to cache more information than
+ * the range type. Functions that need to cache more information than
* that must fend for themselves.
*/
TypeCacheEntry *
@@ -1465,7 +1465,7 @@ range_get_typcache(FunctionCallInfo fcinfo, Oid rngtypid)
* range_serialize: construct a range value from bounds and empty-flag
*
* This does not force canonicalization of the range value. In most cases,
- * external callers should only be canonicalization functions. Note that
+ * external callers should only be canonicalization functions. Note that
* we perform some datatype-independent canonicalization checks anyway.
*/
RangeType *
@@ -1802,7 +1802,7 @@ range_cmp_bounds(TypeCacheEntry *typcache, RangeBound *b1, RangeBound *b2)
* Compare two range boundary point values, returning <0, 0, or >0 according
* to whether b1 is less than, equal to, or greater than b2.
*
- * This is similar to but simpler than range_cmp_bounds(). We just compare
+ * This is similar to but simpler than range_cmp_bounds(). We just compare
* the values held in b1 and b2, ignoring inclusive/exclusive flags. The
* lower/upper flags only matter for infinities, where they tell us if the
* infinity is plus or minus.
@@ -2283,7 +2283,7 @@ range_contains_elem_internal(TypeCacheEntry *typcache, RangeType *r, Datum val)
/*
* datum_compute_size() and datum_write() are used to insert the bound
- * values into a range object. They are modeled after heaptuple.c's
+ * values into a range object. They are modeled after heaptuple.c's
* heap_compute_data_size() and heap_fill_tuple(), but we need not handle
* null values here. TYPE_IS_PACKABLE must test the same conditions as
* heaptuple.c's ATT_IS_PACKABLE macro.
diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c
index 13c87ea4a3..2bd28f5038 100644
--- a/src/backend/utils/adt/rangetypes_gist.c
+++ b/src/backend/utils/adt/rangetypes_gist.c
@@ -300,7 +300,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
else if (orig_lower.infinite && orig_upper.infinite)
{
/*
- * Original range requires broadening. (-inf; +inf) is most far
+ * Original range requires broadening. (-inf; +inf) is most far
* from normal range in this case.
*/
*penalty = 2 * CONTAIN_EMPTY_PENALTY;
@@ -497,7 +497,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
/*
* The GiST PickSplit method for ranges
*
- * Primarily, we try to segregate ranges of different classes. If splitting
+ * Primarily, we try to segregate ranges of different classes. If splitting
* ranges of the same class, use the appropriate split method for that class.
*/
Datum
@@ -668,7 +668,7 @@ range_gist_same(PG_FUNCTION_ARGS)
/*
* range_eq will ignore the RANGE_CONTAIN_EMPTY flag, so we have to check
- * that for ourselves. More generally, if the entries have been properly
+ * that for ourselves. More generally, if the entries have been properly
* normalized, then unequal flags bytes must mean unequal ranges ... so
* let's just test all the flag bits at once.
*/
@@ -816,7 +816,7 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
/*
* Empty ranges are contained by anything, so if key is or
- * contains any empty ranges, we must descend into it. Otherwise,
+ * contains any empty ranges, we must descend into it. Otherwise,
* descend only if key overlaps the query.
*/
if (RangeIsOrContainsEmpty(key))
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 7c5b0d53bc..caf45ef85f 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -142,7 +142,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
char errMsg[100];
/*
- * Look for a match among previously compiled REs. Since the data
+ * Look for a match among previously compiled REs. Since the data
* structure is self-organizing with most-used entries at the front, our
* search strategy can just be to scan from the front.
*/
@@ -192,7 +192,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
/*
* Here and in other places in this file, do CHECK_FOR_INTERRUPTS
- * before reporting a regex error. This is so that if the regex
+ * before reporting a regex error. This is so that if the regex
* library aborts and returns REG_CANCEL, we don't print an error
* message that implies the regex was invalid.
*/
@@ -298,7 +298,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len,
* dat_len --- the length of the data string
* nmatch, pmatch --- optional return area for match details
*
- * Data is given in the database encoding. We internally
+ * Data is given in the database encoding. We internally
* convert to array of pg_wchar which is what Spencer's regex package wants.
*/
static bool
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 6210f45a19..c0314ee532 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -85,7 +85,7 @@ regprocin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_proc for a unique match. This is needed for
+ * just search pg_proc for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -165,8 +165,8 @@ to_regproc(PG_FUNCTION_ARGS)
FuncCandidateList clist;
/*
- * Parse the name into components and see if it matches any pg_proc entries
- * in the current search path.
+ * Parse the name into components and see if it matches any pg_proc
+ * entries in the current search path.
*/
names = stringToQualifiedNameList(pro_name);
clist = FuncnameGetCandidates(names, -1, NIL, false, false, true);
@@ -295,7 +295,7 @@ regprocedurein(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -339,7 +339,7 @@ to_regprocedure(PG_FUNCTION_ARGS)
/*
* Parse the name and arguments, look up potential matches in the current
* namespace search list, and scan to see which one exactly matches the
- * given argument types. (There will not be more than one match.)
+ * given argument types. (There will not be more than one match.)
*/
parseNameAndArgTypes(pro_name, false, &names, &nargs, argtypes);
@@ -376,7 +376,7 @@ format_procedure_qualified(Oid procedure_oid)
* Routine to produce regprocedure names; see format_procedure above.
*
* force_qualify says whether to schema-qualify; if true, the name is always
- * qualified regardless of search_path visibility. Otherwise the name is only
+ * qualified regardless of search_path visibility. Otherwise the name is only
* qualified if the function is not in path.
*/
static char *
@@ -510,7 +510,7 @@ regoperin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_operator for a unique match. This is needed for
+ * just search pg_operator for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -724,7 +724,7 @@ regoperatorin(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -770,7 +770,7 @@ to_regoperator(PG_FUNCTION_ARGS)
/*
* Parse the name and arguments, look up potential matches in the current
* namespace search list, and scan to see which one exactly matches the
- * given argument types. (There will not be more than one match.)
+ * given argument types. (There will not be more than one match.)
*/
parseNameAndArgTypes(opr_name_or_oid, true, &names, &nargs, argtypes);
if (nargs == 1)
@@ -1006,8 +1006,8 @@ to_regclass(PG_FUNCTION_ARGS)
List *names;
/*
- * Parse the name into components and see if it matches any pg_class entries
- * in the current search path.
+ * Parse the name into components and see if it matches any pg_class
+ * entries in the current search path.
*/
names = stringToQualifiedNameList(class_name);
@@ -1045,7 +1045,7 @@ regclassout(PG_FUNCTION_ARGS)
/*
* In bootstrap mode, skip the fancy namespace stuff and just return
- * the class name. (This path is only needed for debugging output
+ * the class name. (This path is only needed for debugging output
* anyway.)
*/
if (IsBootstrapProcessingMode())
@@ -1560,7 +1560,7 @@ stringToQualifiedNameList(const char *string)
/*
* Given a C string, parse it into a qualified function or operator name
- * followed by a parenthesized list of type names. Reduce the
+ * followed by a parenthesized list of type names. Reduce the
* type names to an array of OIDs (returned into *nargs and *argtypes;
* the argtypes array should be of size FUNC_MAX_ARGS). The function or
* operator name is returned to *names as a List of Strings.
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 1e1e616fa4..d30847b34e 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -698,7 +698,7 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action)
/*
* If another PK row now exists providing the old key values, we
- * should not do anything. However, this check should only be
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -922,7 +922,7 @@ ri_restrict_upd(TriggerData *trigdata, bool is_no_action)
/*
* If another PK row now exists providing the old key values, we
- * should not do anything. However, this check should only be
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -1850,7 +1850,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE would remove such rows, while SET NULL is certain
* to result in rows that satisfy the FK constraint.)
*/
@@ -2041,7 +2041,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE must change the FK key values, while SET NULL is
* certain to result in rows that satisfy the FK constraint.)
*/
@@ -2397,7 +2397,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
* Temporarily increase work_mem so that the check query can be executed
* more efficiently. It seems okay to do this because the query is simple
* enough to not use a multiple of work_mem, and one typically would not
- * have many large foreign-key validations happening concurrently. So
+ * have many large foreign-key validations happening concurrently. So
* this seems to meet the criteria for being considered a "maintenance"
* operation, and accordingly we use maintenance_work_mem.
*
@@ -2451,7 +2451,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
/*
* The columns to look at in the result tuple are 1..N, not whatever
- * they are in the fk_rel. Hack up riinfo so that the subroutines
+ * they are in the fk_rel. Hack up riinfo so that the subroutines
* called here will behave properly.
*
* In addition to this, we have to pass the correct tupdesc to
@@ -3180,7 +3180,7 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
errhint("This is most likely due to a rule having rewritten the query.")));
/*
- * Determine which relation to complain about. If tupdesc wasn't passed
+ * Determine which relation to complain about. If tupdesc wasn't passed
* by caller, assume the violator tuple came from there.
*/
onfk = (queryno == RI_PLAN_CHECK_LOOKUPPK);
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index 521c3daea7..9543d01d49 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -279,7 +279,7 @@ record_in(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -623,7 +623,7 @@ record_recv(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -861,7 +861,7 @@ record_cmp(FunctionCallInfo fcinfo)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1097,7 +1097,7 @@ record_eq(PG_FUNCTION_ARGS)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1356,7 +1356,7 @@ record_image_cmp(FunctionCallInfo fcinfo)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1390,11 +1390,12 @@ record_image_cmp(FunctionCallInfo fcinfo)
format_type_be(tupdesc2->attrs[i2]->atttypid),
j + 1)));
- /*
- * The same type should have the same length (or both should be variable).
- */
- Assert(tupdesc1->attrs[i1]->attlen ==
- tupdesc2->attrs[i2]->attlen);
+ /*
+ * The same type should have the same length (or both should be
+ * variable).
+ */
+ Assert(tupdesc1->attrs[i1]->attlen ==
+ tupdesc2->attrs[i2]->attlen);
/*
* We consider two NULLs equal; NULL > not-NULL.
@@ -1421,8 +1422,8 @@ record_image_cmp(FunctionCallInfo fcinfo)
{
Size len1,
len2;
- struct varlena *arg1val;
- struct varlena *arg2val;
+ struct varlena *arg1val;
+ struct varlena *arg2val;
len1 = toast_raw_datum_size(values1[i1]);
len2 = toast_raw_datum_size(values2[i2]);
@@ -1632,7 +1633,7 @@ record_image_eq(PG_FUNCTION_ARGS)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1690,8 +1691,8 @@ record_image_eq(PG_FUNCTION_ARGS)
result = false;
else
{
- struct varlena *arg1val;
- struct varlena *arg2val;
+ struct varlena *arg1val;
+ struct varlena *arg2val;
arg1val = PG_DETOAST_DATUM_PACKED(values1[i1]);
arg2val = PG_DETOAST_DATUM_PACKED(values2[i2]);
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 36d9953108..a30d8febf8 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -155,11 +155,11 @@ typedef struct
*
* Selecting aliases is unreasonably complicated because of the need to dump
* rules/views whose underlying tables may have had columns added, deleted, or
- * renamed since the query was parsed. We must nonetheless print the rule/view
+ * renamed since the query was parsed. We must nonetheless print the rule/view
* in a form that can be reloaded and will produce the same results as before.
*
* For each RTE used in the query, we must assign column aliases that are
- * unique within that RTE. SQL does not require this of the original query,
+ * unique within that RTE. SQL does not require this of the original query,
* but due to factors such as *-expansion we need to be able to uniquely
* reference every column in a decompiled query. As long as we qualify all
* column references, per-RTE uniqueness is sufficient for that.
@@ -214,8 +214,8 @@ typedef struct
/*
* new_colnames is an array containing column aliases to use for columns
* that would exist if the query was re-parsed against the current
- * definitions of its base tables. This is what to print as the column
- * alias list for the RTE. This array does not include dropped columns,
+ * definitions of its base tables. This is what to print as the column
+ * alias list for the RTE. This array does not include dropped columns,
* but it will include columns added since original parsing. Indexes in
* it therefore have little to do with current varattno values. As above,
* entries are unique unless this is for an unnamed JOIN RTE. (In such an
@@ -1077,7 +1077,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno,
context = deparse_context_for(get_relation_name(indrelid), indrelid);
/*
- * Start the index definition. Note that the index's name should never be
+ * Start the index definition. Note that the index's name should never be
* schema-qualified, but the indexed rel's name may be.
*/
initStringInfo(&buf);
@@ -1304,9 +1304,9 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
HeapTuple tup;
Form_pg_constraint conForm;
StringInfoData buf;
- SysScanDesc scandesc;
+ SysScanDesc scandesc;
ScanKeyData scankey[1];
- Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot());
+ Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot());
Relation relation = heap_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scankey[0],
@@ -1315,15 +1315,15 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
ObjectIdGetDatum(constraintId));
scandesc = systable_beginscan(relation,
- ConstraintOidIndexId,
- true,
- snapshot,
- 1,
- scankey);
+ ConstraintOidIndexId,
+ true,
+ snapshot,
+ 1,
+ scankey);
/*
- * We later use the tuple with SysCacheGetAttr() as if we
- * had obtained it via SearchSysCache, which works fine.
+ * We later use the tuple with SysCacheGetAttr() as if we had obtained it
+ * via SearchSysCache, which works fine.
*/
tup = systable_getnext(scandesc);
@@ -1806,7 +1806,7 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS)
SysScanDesc scan;
HeapTuple tup;
- /* Look up table name. Can't lock it - we might not have privileges. */
+ /* Look up table name. Can't lock it - we might not have privileges. */
tablerv = makeRangeVarFromNameList(textToQualifiedNameList(tablename));
tableOid = RangeVarGetRelid(tablerv, NoLock, false);
@@ -2406,8 +2406,10 @@ pg_get_function_arg_default(PG_FUNCTION_ARGS)
proc = (Form_pg_proc) GETSTRUCT(proctup);
- /* Calculate index into proargdefaults: proargdefaults corresponds to the
- * last N input arguments, where N = pronargdefaults. */
+ /*
+ * Calculate index into proargdefaults: proargdefaults corresponds to the
+ * last N input arguments, where N = pronargdefaults.
+ */
nth_default = nth_inputarg - 1 - (proc->pronargs - proc->pronargdefaults);
if (nth_default < 0 || nth_default >= list_length(argdefaults))
@@ -2444,7 +2446,7 @@ deparse_expression(Node *expr, List *dpcontext,
* tree (ie, not the raw output of gram.y).
*
* dpcontext is a list of deparse_namespace nodes representing the context
- * for interpreting Vars in the node tree. It can be NIL if no Vars are
+ * for interpreting Vars in the node tree. It can be NIL if no Vars are
* expected.
*
* forceprefix is TRUE to force all Vars to be prefixed with their table names.
@@ -2484,7 +2486,7 @@ deparse_expression_pretty(Node *expr, List *dpcontext,
*
* Given the reference name (alias) and OID of a relation, build deparsing
* context for an expression referencing only that relation (as varno 1,
- * varlevelsup 0). This is sufficient for many uses of deparse_expression.
+ * varlevelsup 0). This is sufficient for many uses of deparse_expression.
* ----------
*/
List *
@@ -2555,7 +2557,7 @@ deparse_context_for_planstate(Node *planstate, List *ancestors,
dpns->ctes = NIL;
/*
- * Set up column name aliases. We will get rather bogus results for join
+ * Set up column name aliases. We will get rather bogus results for join
* RTEs, but that doesn't matter because plan trees don't contain any join
* alias Vars.
*/
@@ -3113,7 +3115,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/*
* Scan the columns, select a unique alias for each one, and store it in
* colinfo->colnames and colinfo->new_colnames. The former array has NULL
- * entries for dropped columns, the latter omits them. Also mark
+ * entries for dropped columns, the latter omits them. Also mark
* new_colnames entries as to whether they are new since parse time; this
* is the case for entries beyond the length of rte->eref->colnames.
*/
@@ -3168,7 +3170,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/*
* For a relation RTE, we need only print the alias column names if any
- * are different from the underlying "real" names. For a function RTE,
+ * are different from the underlying "real" names. For a function RTE,
* always emit a complete column alias list; this is to protect against
* possible instability of the default column names (eg, from altering
* parameter names). For other RTE types, print if we changed anything OR
@@ -3631,7 +3633,7 @@ identify_join_columns(JoinExpr *j, RangeTblEntry *jrte,
/*
* If there's a USING clause, deconstruct the join quals to identify the
- * merged columns. This is a tad painful but if we cannot rely on the
+ * merged columns. This is a tad painful but if we cannot rely on the
* column names, there is no other representation of which columns were
* joined by USING. (Unless the join type is FULL, we can't tell from the
* joinaliasvars list which columns are merged.) Note: we assume that the
@@ -3765,7 +3767,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps)
* We special-case Append and MergeAppend to pretend that the first child
* plan is the OUTER referent; we have to interpret OUTER Vars in their
* tlists according to one of the children, and the first one is the most
- * natural choice. Likewise special-case ModifyTable to pretend that the
+ * natural choice. Likewise special-case ModifyTable to pretend that the
* first child plan is the OUTER referent; this is to support RETURNING
* lists containing references to non-target relations.
*/
@@ -4167,8 +4169,8 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace,
/*
* Before we begin to examine the query, acquire locks on referenced
- * relations, and fix up deleted columns in JOIN RTEs. This ensures
- * consistent results. Note we assume it's OK to scribble on the passed
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
* querytree!
*
* We are only deparsing the query (we are not about to execute it), so we
@@ -4641,7 +4643,7 @@ get_target_list(List *targetList, deparse_context *context,
}
/*
- * Figure out what the result column should be called. In the context
+ * Figure out what the result column should be called. In the context
* of a view, use the view's tuple descriptor (so as to pick up the
* effects of any column RENAME that's been done on the view).
* Otherwise, just use what we can find in the TLE.
@@ -4863,7 +4865,7 @@ get_rule_sortgroupclause(SortGroupClause *srt, List *tlist, bool force_colno,
* expression is a constant, force it to be dumped with an explicit cast
* as decoration --- this is because a simple integer constant is
* ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
- * dump it without any decoration. Otherwise, just dump the expression
+ * dump it without any decoration. Otherwise, just dump the expression
* normally.
*/
if (force_colno)
@@ -5558,8 +5560,8 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* If it's an unnamed join, look at the expansion of the alias variable.
* If it's a simple reference to one of the input vars, then recursively
- * print the name of that var instead. When it's not a simple reference,
- * we have to just print the unqualified join column name. (This can only
+ * print the name of that var instead. When it's not a simple reference,
+ * we have to just print the unqualified join column name. (This can only
* happen with "dangerous" merged columns in a JOIN USING; we took pains
* previously to make the unqualified column name unique in such cases.)
*
@@ -5587,7 +5589,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* Unnamed join has no refname. (Note: since it's unnamed, there is
* no way the user could have referenced it to create a whole-row Var
- * for it. So we don't have to cover that case below.)
+ * for it. So we don't have to cover that case below.)
*/
Assert(refname == NULL);
}
@@ -5628,7 +5630,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
- * Get the name of a field of an expression of composite type. The
+ * Get the name of a field of an expression of composite type. The
* expression is usually a Var, but we handle other cases too.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
@@ -5638,7 +5640,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
* could also be RECORD. Since no actual table or view column is allowed to
* have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE
* or to a subquery output. We drill down to find the ultimate defining
- * expression and attempt to infer the field name from it. We ereport if we
+ * expression and attempt to infer the field name from it. We ereport if we
* can't determine the name.
*
* Similarly, a PARAM of type RECORD has to refer to some expression of
@@ -6003,7 +6005,7 @@ get_name_for_var_field(Var *var, int fieldno,
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass to
+ * get_expr_result_type() can do anything with it. If not, pass to
* lookup_rowtype_tupdesc() which will probably fail, but will give an
* appropriate error message while failing.
*/
@@ -6021,7 +6023,7 @@ get_name_for_var_field(Var *var, int fieldno,
* reference a parameter supplied by an upper NestLoop or SubPlan plan node.
*
* If successful, return the expression and set *dpns_p and *ancestor_cell_p
- * appropriately for calling push_ancestor_plan(). If no referent can be
+ * appropriately for calling push_ancestor_plan(). If no referent can be
* found, return NULL.
*/
static Node *
@@ -6153,7 +6155,7 @@ get_parameter(Param *param, deparse_context *context)
/*
* If it's a PARAM_EXEC parameter, try to locate the expression from which
- * the parameter was computed. Note that failing to find a referent isn't
+ * the parameter was computed. Note that failing to find a referent isn't
* an error, since the Param might well be a subplan output rather than an
* input.
*/
@@ -6631,10 +6633,10 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* If there's a refassgnexpr, we want to print the node in the
- * format "array[subscripts] := refassgnexpr". This is not
+ * format "array[subscripts] := refassgnexpr". This is not
* legal SQL, so decompilation of INSERT or UPDATE statements
* should always use processIndirection as part of the
- * statement-level syntax. We should only see this when
+ * statement-level syntax. We should only see this when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement.
*/
@@ -6793,7 +6795,7 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* We cannot see an already-planned subplan in rule deparsing,
- * only while EXPLAINing a query plan. We don't try to
+ * only while EXPLAINing a query plan. We don't try to
* reconstruct the original SQL, just reference the subplan
* that appears elsewhere in EXPLAIN's result.
*/
@@ -6866,14 +6868,14 @@ get_rule_expr(Node *node, deparse_context *context,
* There is no good way to represent a FieldStore as real SQL,
* so decompilation of INSERT or UPDATE statements should
* always use processIndirection as part of the
- * statement-level syntax. We should only get here when
+ * statement-level syntax. We should only get here when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement. The plan case is even harder than
* ordinary rules would be, because the planner tries to
* collapse multiple assignments to the same field or subfield
* into one FieldStore; so we can see a list of target fields
* not just one, and the arguments could be FieldStores
- * themselves. We don't bother to try to print the target
+ * themselves. We don't bother to try to print the target
* field names; we just print the source arguments, with a
* ROW() around them if there's more than one. This isn't
* terribly complete, but it's probably good enough for
@@ -7668,7 +7670,7 @@ get_agg_expr(Aggref *aggref, deparse_context *context)
{
/*
* Ordered-set aggregates do not use "*" syntax. Also, we needn't
- * worry about inserting VARIADIC. So we can just dump the direct
+ * worry about inserting VARIADIC. So we can just dump the direct
* args as-is.
*/
Assert(!aggref->aggvariadic);
@@ -7810,7 +7812,7 @@ get_coercion_expr(Node *arg, deparse_context *context,
* Since parse_coerce.c doesn't immediately collapse application of
* length-coercion functions to constants, what we'll typically see in
* such cases is a Const with typmod -1 and a length-coercion function
- * right above it. Avoid generating redundant output. However, beware of
+ * right above it. Avoid generating redundant output. However, beware of
* suppressing casts when the user actually wrote something like
* 'foo'::text::char(3).
*/
@@ -7892,7 +7894,7 @@ get_const_expr(Const *constval, deparse_context *context, int showtype)
/*
* These types are printed without quotes unless they contain
* values that aren't accepted by the scanner unquoted (e.g.,
- * 'NaN'). Note that strtod() and friends might accept NaN,
+ * 'NaN'). Note that strtod() and friends might accept NaN,
* so we can't use that to test.
*
* In reality we only need to defend against infinity and NaN,
@@ -8416,7 +8418,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
else if (rte->rtekind == RTE_FUNCTION)
{
/*
- * For a function RTE, always print alias. This covers possible
+ * For a function RTE, always print alias. This covers possible
* renaming of the function and/or instability of the
* FigureColname rules for things that aren't simple functions.
* Note we'd need to force it anyway for the columndef list case.
@@ -8672,7 +8674,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype,
if (!OidIsValid(actual_datatype) ||
GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass)
{
- /* Okay, we need the opclass name. Do we need to qualify it? */
+ /* Okay, we need the opclass name. Do we need to qualify it? */
opcname = NameStr(opcrec->opcname);
if (OpclassIsVisible(opclass))
appendStringInfo(buf, " %s", quote_identifier(opcname));
@@ -8967,13 +8969,13 @@ generate_relation_name(Oid relid, List *namespaces)
* generate_function_name
* Compute the name to display for a function specified by OID,
* given that it is being called with the specified actual arg names and
- * types. (Those matter because of ambiguous-function resolution rules.)
+ * types. (Those matter because of ambiguous-function resolution rules.)
*
* If we're dealing with a potentially variadic function (in practice, this
* means a FuncExpr or Aggref, not some other way of calling a function), then
* has_variadic must specify whether variadic arguments have been merged,
* and *use_variadic_p will be set to indicate whether to print VARIADIC in
- * the output. For non-FuncExpr cases, has_variadic should be FALSE and
+ * the output. For non-FuncExpr cases, has_variadic should be FALSE and
* use_variadic_p can be NULL.
*
* The result includes all necessary quoting and schema-prefixing.
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 1ffc0160b7..e932ccf0da 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -72,7 +72,7 @@
* float8 oprjoin (internal, oid, internal, int2, internal);
*
* (Before Postgres 8.4, join estimators had only the first four of these
- * parameters. That signature is still allowed, but deprecated.) The
+ * parameters. That signature is still allowed, but deprecated.) The
* relationship between jointype and sjinfo is explained in the comments for
* clause_selectivity() --- the short version is that jointype is usually
* best ignored in favor of examining sjinfo.
@@ -209,7 +209,7 @@ static List *add_predicate_to_quals(IndexOptInfo *index, List *indexQuals);
*
* Note: this routine is also used to estimate selectivity for some
* operators that are not "=" but have comparable selectivity behavior,
- * such as "~=" (geometric approximate-match). Even for "=", we must
+ * such as "~=" (geometric approximate-match). Even for "=", we must
* keep in mind that the left and right datatypes may differ.
*/
Datum
@@ -273,7 +273,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -296,7 +296,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* Is the constant "=" to any of the column's most common values?
* (Although the given operator may not really be "=", we will assume
- * that seeing whether it returns TRUE is an appropriate test. If you
+ * that seeing whether it returns TRUE is an appropriate test. If you
* don't like this, maybe you shouldn't be using eqsel for your
* operator...)
*/
@@ -408,7 +408,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -432,7 +432,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
* result averaged over all possible values whether common or
* uncommon. (Essentially, we are assuming that the not-yet-known
* comparison value is equally likely to be any of the possible
- * values, regardless of their frequency in the table. Is that a good
+ * values, regardless of their frequency in the table. Is that a good
* idea?)
*/
selec = 1.0 - stats->stanullfrac;
@@ -655,7 +655,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
* essentially using the histogram just as a representative sample. However,
* small histograms are unlikely to be all that representative, so the caller
* should be prepared to fall back on some other estimation approach when the
- * histogram is missing or very small. It may also be prudent to combine this
+ * histogram is missing or very small. It may also be prudent to combine this
* approach with another one when the histogram is small.
*
* If the actual histogram size is not at least min_hist_size, we won't bother
@@ -673,7 +673,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
*
* Note that the result disregards both the most-common-values (if any) and
* null entries. The caller is expected to combine this result with
- * statistics for those portions of the column population. It may also be
+ * statistics for those portions of the column population. It may also be
* prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
*/
double
@@ -786,7 +786,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
*
* If the binary search accesses the first or last histogram
* entry, we try to replace that endpoint with the true column min
- * or max as found by get_actual_variable_range(). This
+ * or max as found by get_actual_variable_range(). This
* ameliorates misestimates when the min or max is moving as a
* result of changes since the last ANALYZE. Note that this could
* result in effectively including MCVs into the histogram that
@@ -890,7 +890,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* Watch out for the possibility that we got a NaN or
- * Infinity from the division. This can happen
+ * Infinity from the division. This can happen
* despite the previous checks, if for example "low"
* is -Infinity.
*/
@@ -905,7 +905,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
* Ideally we'd produce an error here, on the grounds that
* the given operator shouldn't have scalarXXsel
* registered as its selectivity func unless we can deal
- * with its operand types. But currently, all manner of
+ * with its operand types. But currently, all manner of
* stuff is invoking scalarXXsel, so give a default
* estimate until that can be fixed.
*/
@@ -931,7 +931,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* The histogram boundaries are only approximate to begin with,
- * and may well be out of date anyway. Therefore, don't believe
+ * and may well be out of date anyway. Therefore, don't believe
* extremely small or large selectivity estimates --- unless we
* got actual current endpoint values from the table.
*/
@@ -1128,7 +1128,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If this is for a NOT LIKE or similar operator, get the corresponding
- * positive-match operator and work with that. Set result to the correct
+ * positive-match operator and work with that. Set result to the correct
* default estimate, too.
*/
if (negate)
@@ -1214,7 +1214,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* Pull out any fixed prefix implied by the pattern, and estimate the
- * fractional selectivity of the remainder of the pattern. Unlike many of
+ * fractional selectivity of the remainder of the pattern. Unlike many of
* the other functions in this file, we use the pattern operator's actual
* collation for this step. This is not because we expect the collation
* to make a big difference in the selectivity estimate (it seldom would),
@@ -1332,7 +1332,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If we have most-common-values info, add up the fractions of the MCV
* entries that satisfy MCV OP PATTERN. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
+ * directly to the result selectivity. Also add up the total fraction
* represented by MCV entries.
*/
mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
@@ -1838,7 +1838,7 @@ scalararraysel(PlannerInfo *root,
/*
* For generic operators, we assume the probability of success is
- * independent for each array element. But for "= ANY" or "<> ALL",
+ * independent for each array element. But for "= ANY" or "<> ALL",
* if the array elements are distinct (which'd typically be the case)
* then the probabilities are disjoint, and we should just sum them.
*
@@ -2253,9 +2253,9 @@ eqjoinsel_inner(Oid operator,
if (have_mcvs1 && have_mcvs2)
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2287,7 +2287,7 @@ eqjoinsel_inner(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2452,7 +2452,7 @@ eqjoinsel_semi(Oid operator,
/*
* We clamp nd2 to be not more than what we estimate the inner relation's
- * size to be. This is intuitively somewhat reasonable since obviously
+ * size to be. This is intuitively somewhat reasonable since obviously
* there can't be more than that many distinct values coming from the
* inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
* likewise) is that this is the only pathway by which restriction clauses
@@ -2497,9 +2497,9 @@ eqjoinsel_semi(Oid operator,
if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2530,7 +2530,7 @@ eqjoinsel_semi(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2567,7 +2567,7 @@ eqjoinsel_semi(Oid operator,
/*
* Now we need to estimate the fraction of relation 1 that has at
- * least one join partner. We know for certain that the matched MCVs
+ * least one join partner. We know for certain that the matched MCVs
* do, so that gives us a lower bound, but we're really in the dark
* about everything else. Our crude approach is: if nd1 <= nd2 then
* assume all non-null rel1 rows have join partners, else assume for
@@ -3165,11 +3165,11 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* case (all possible cross-product terms actually appear as groups) since
* very often the grouped-by Vars are highly correlated. Our current approach
* is as follows:
- * 1. Expressions yielding boolean are assumed to contribute two groups,
+ * 1. Expressions yielding boolean are assumed to contribute two groups,
* independently of their content, and are ignored in the subsequent
- * steps. This is mainly because tests like "col IS NULL" break the
+ * steps. This is mainly because tests like "col IS NULL" break the
* heuristic used in step 2 especially badly.
- * 2. Reduce the given expressions to a list of unique Vars used. For
+ * 2. Reduce the given expressions to a list of unique Vars used. For
* example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
* It is clearly correct not to count the same Var more than once.
* It is also reasonable to treat f(x) the same as x: f() cannot
@@ -3179,14 +3179,14 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* As a special case, if a GROUP BY expression can be matched to an
* expressional index for which we have statistics, then we treat the
* whole expression as though it were just a Var.
- * 3. If the list contains Vars of different relations that are known equal
+ * 3. If the list contains Vars of different relations that are known equal
* due to equivalence classes, then drop all but one of the Vars from each
* known-equal set, keeping the one with smallest estimated # of values
* (since the extra values of the others can't appear in joined rows).
* Note the reason we only consider Vars of different relations is that
* if we considered ones of the same rel, we'd be double-counting the
* restriction selectivity of the equality in the next step.
- * 4. For Vars within a single source rel, we multiply together the numbers
+ * 4. For Vars within a single source rel, we multiply together the numbers
* of values, clamp to the number of rows in the rel (divided by 10 if
* more than one Var), and then multiply by the selectivity of the
* restriction clauses for that rel. When there's more than one Var,
@@ -3197,7 +3197,7 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* by the restriction selectivity is effectively assuming that the
* restriction clauses are independent of the grouping, which is a crummy
* assumption, but it's hard to do better.
- * 5. If there are Vars from multiple rels, we repeat step 4 for each such
+ * 5. If there are Vars from multiple rels, we repeat step 4 for each such
* rel, and multiply the results together.
* Note that rels not containing grouped Vars are ignored completely, as are
* join clauses. Such rels cannot increase the number of groups, and we
@@ -3228,7 +3228,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
return 1.0;
/*
- * Count groups derived from boolean grouping expressions. For other
+ * Count groups derived from boolean grouping expressions. For other
* expressions, find the unique Vars used, treating an expression as a Var
* if we can find stats for it. For each one, record the statistical
* estimate of number of distinct values (total in its table, without
@@ -3317,7 +3317,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* Group Vars by relation and estimate total numdistinct.
*
* For each iteration of the outer loop, we process the frontmost Var in
- * varinfos, plus all other Vars in the same relation. We remove these
+ * varinfos, plus all other Vars in the same relation. We remove these
* Vars from the newvarinfos list for the next iteration. This is the
* easiest way to group Vars of same rel together.
*/
@@ -3418,11 +3418,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* distribution, so this will have to do for now.
*
* We are passed the number of buckets the executor will use for the given
- * input relation. If the data were perfectly distributed, with the same
+ * input relation. If the data were perfectly distributed, with the same
* number of tuples going into each available bucket, then the bucketsize
* fraction would be 1/nbuckets. But this happy state of affairs will occur
* only if (a) there are at least nbuckets distinct data values, and (b)
- * we have a not-too-skewed data distribution. Otherwise the buckets will
+ * we have a not-too-skewed data distribution. Otherwise the buckets will
* be nonuniformly occupied. If the other relation in the join has a key
* distribution similar to this one's, then the most-loaded buckets are
* exactly those that will be probed most often. Therefore, the "average"
@@ -3595,7 +3595,7 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
* operators to estimate selectivity for the other's. This is outright
* wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
- * majority of cases that we do it anyway. Should think about more
+ * majority of cases that we do it anyway. Should think about more
* rigorous ways to do it.
*/
switch (valuetypid)
@@ -3950,6 +3950,7 @@ convert_string_datum(Datum value, Oid typid)
xfrmlen = strxfrm(NULL, val, 0);
#endif
#ifdef WIN32
+
/*
* On Windows, strxfrm returns INT_MAX when an error occurs. Instead
* of trying to allocate this much memory (and fail), just return the
@@ -4178,7 +4179,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
right = (Node *) lsecond(args);
/*
- * Examine both sides. Note that when varRelid is nonzero, Vars of other
+ * Examine both sides. Note that when varRelid is nonzero, Vars of other
* relations will be treated as pseudoconstants.
*/
examine_variable(root, left, varRelid, vardata);
@@ -4323,7 +4324,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
/*
* Okay, it's a more complicated expression. Determine variable
- * membership. Note that when varRelid isn't zero, only vars of that
+ * membership. Note that when varRelid isn't zero, only vars of that
* relation are considered "real" vars.
*/
varnos = pull_varnos(basenode);
@@ -4372,13 +4373,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (onerel)
{
/*
- * We have an expression in vars of a single relation. Try to match
+ * We have an expression in vars of a single relation. Try to match
* it to expressional index columns, in hopes of finding some
* statistics.
*
* XXX it's conceivable that there are multiple matches with different
* index opfamilies; if so, we need to pick one that matches the
- * operator we are estimating for. FIXME later.
+ * operator we are estimating for. FIXME later.
*/
ListCell *ilist;
@@ -4580,7 +4581,7 @@ examine_simple_variable(PlannerInfo *root, Var *var,
*
* This is probably a harsher restriction than necessary; it's
* certainly OK for the selectivity estimator (which is a C function,
- * and therefore omnipotent anyway) to look at the statistics. But
+ * and therefore omnipotent anyway) to look at the statistics. But
* many selectivity estimators will happily *invoke the operator
* function* to try to work out a good estimate - and that's not OK.
* So for now, don't dig down for stats.
@@ -4633,7 +4634,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
*isdefault = false;
/*
- * Determine the stadistinct value to use. There are cases where we can
+ * Determine the stadistinct value to use. There are cases where we can
* get an estimate even without a pg_statistic entry, or can get a better
* value than is in pg_statistic.
*/
@@ -4757,7 +4758,7 @@ get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
/*
* XXX It's very tempting to try to use the actual column min and max, if
- * we can get them relatively-cheaply with an index probe. However, since
+ * we can get them relatively-cheaply with an index probe. However, since
* this function is called many times during join planning, that could
* have unpleasant effects on planning speed. Need more investigation
* before enabling this.
@@ -5008,7 +5009,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
* and it can be very expensive if a lot of uncommitted rows
* exist at the end of the index (because we'll laboriously
* fetch each one and reject it). What seems like a good
- * compromise is to use SnapshotDirty. That will accept
+ * compromise is to use SnapshotDirty. That will accept
* uncommitted rows, and thus avoid fetching multiple heap
* tuples in this scenario. On the other hand, it will reject
* known-dead rows, and thus not give a bogus answer when the
@@ -5147,7 +5148,7 @@ find_join_input_rel(PlannerInfo *root, Relids relids)
* Check whether char is a letter (and, hence, subject to case-folding)
*
* In multibyte character sets, we can't use isalpha, and it does not seem
- * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
+ * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
* any multibyte char is potentially case-varying.
*/
static int
@@ -5399,7 +5400,7 @@ pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
* together with info about MCVs and NULLs.
*
* We use the >= and < operators from the specified btree opfamily to do the
- * estimation. The given variable and Const must be of the associated
+ * estimation. The given variable and Const must be of the associated
* datatype.
*
* XXX Note: we make use of the upper bound to estimate operator selectivity
@@ -5458,7 +5459,7 @@ prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
/*
* Merge the two selectivities in the same way as for a range query
- * (see clauselist_selectivity()). Note that we don't need to worry
+ * (see clauselist_selectivity()). Note that we don't need to worry
* about double-exclusion of nulls, since ineq_histogram_selectivity
* doesn't count those anyway.
*/
@@ -5695,7 +5696,7 @@ byte_increment(unsigned char *ptr, int len)
* that is not a bulletproof guarantee that an extension of the string might
* not sort after it; an example is that "foo " is less than "foo!", but it
* is not clear that a "dictionary" sort ordering will consider "foo!" less
- * than "foo bar". CAUTION: Therefore, this function should be used only for
+ * than "foo bar". CAUTION: Therefore, this function should be used only for
* estimation purposes when working in a non-C collation.
*
* To try to catch most cases where an extended string might otherwise sort
@@ -5952,7 +5953,7 @@ string_to_bytea_const(const char *str, size_t str_len)
* genericcostestimate is a general-purpose estimator that can be used for
* most index types. In some cases we use genericcostestimate as the base
* code and then incorporate additional index-type-specific knowledge in
- * the type-specific calling function. To avoid code duplication, we make
+ * the type-specific calling function. To avoid code duplication, we make
* genericcostestimate return a number of intermediate values as well as
* its preliminary estimates of the output cost values. The GenericCosts
* struct includes all these values.
@@ -6072,7 +6073,7 @@ genericcostestimate(PlannerInfo *root,
*
* In practice access to upper index levels is often nearly free because
* those tend to stay in cache under load; moreover, the cost involved is
- * highly dependent on index type. We therefore ignore such costs here
+ * highly dependent on index type. We therefore ignore such costs here
* and leave it to the caller to add a suitable charge if needed.
*/
if (index->pages > 1 && index->tuples > 1)
@@ -6091,9 +6092,9 @@ genericcostestimate(PlannerInfo *root,
* The above calculations are all per-index-scan. However, if we are in a
* nestloop inner scan, we can expect the scan to be repeated (with
* different search keys) for each row of the outer relation. Likewise,
- * ScalarArrayOpExpr quals result in multiple index scans. This creates
+ * ScalarArrayOpExpr quals result in multiple index scans. This creates
* the potential for cache effects to reduce the number of disk page
- * fetches needed. We want to estimate the average per-scan I/O cost in
+ * fetches needed. We want to estimate the average per-scan I/O cost in
* the presence of caching.
*
* We use the Mackert-Lohman formula (see costsize.c for details) to
@@ -6140,7 +6141,7 @@ genericcostestimate(PlannerInfo *root,
* evaluated once at the start of the scan to reduce them to runtime keys
* to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
* CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
- * indexqual operator. Because we have numIndexTuples as a per-scan
+ * indexqual operator. Because we have numIndexTuples as a per-scan
* number, we have to multiply by num_sa_scans to get the correct result
* for ScalarArrayOpExpr cases. Similarly add in costs for any index
* ORDER BY expressions.
@@ -6187,16 +6188,16 @@ genericcostestimate(PlannerInfo *root,
* ANDing the index predicate with the explicitly given indexquals produces
* a more accurate idea of the index's selectivity. However, we need to be
* careful not to insert redundant clauses, because clauselist_selectivity()
- * is easily fooled into computing a too-low selectivity estimate. Our
+ * is easily fooled into computing a too-low selectivity estimate. Our
* approach is to add only the predicate clause(s) that cannot be proven to
- * be implied by the given indexquals. This successfully handles cases such
+ * be implied by the given indexquals. This successfully handles cases such
* as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
* There are many other cases where we won't detect redundancy, leading to a
* too-low selectivity estimate, which will bias the system in favor of using
- * partial indexes where possible. That is not necessarily bad though.
+ * partial indexes where possible. That is not necessarily bad though.
*
* Note that indexQuals contains RestrictInfo nodes while the indpred
- * does not, so the output list will be mixed. This is OK for both
+ * does not, so the output list will be mixed. This is OK for both
* predicate_implied_by() and clauselist_selectivity(), but might be
* problematic if the result were passed to other things.
*/
@@ -6255,7 +6256,7 @@ btcostestimate(PG_FUNCTION_ARGS)
* the index scan). Additional quals can suppress visits to the heap, so
* it's OK to count them in indexSelectivity, but they should not count
* for estimating numIndexTuples. So we must examine the given indexquals
- * to find out which ones count as boundary quals. We rely on the
+ * to find out which ones count as boundary quals. We rely on the
* knowledge that they are given in index column order.
*
* For a RowCompareExpr, we consider only the first column, just as
@@ -6594,7 +6595,7 @@ hashcostestimate(PG_FUNCTION_ARGS)
* because the hash AM makes sure that's always one page.
*
* Likewise, we could consider charging some CPU for each index tuple in
- * the bucket, if we knew how many there were. But the per-tuple cost is
+ * the bucket, if we knew how many there were. But the per-tuple cost is
* just a hash value comparison, not a general datatype-dependent
* comparison, so any such charge ought to be quite a bit less than
* cpu_operator_cost; which makes it probably not worth worrying about.
@@ -6652,7 +6653,7 @@ gistcostestimate(PG_FUNCTION_ARGS)
/*
* Add a CPU-cost component to represent the costs of initial descent. We
* just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6714,7 +6715,7 @@ spgcostestimate(PG_FUNCTION_ARGS)
/*
* Add a CPU-cost component to represent the costs of initial descent. We
* just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6791,7 +6792,7 @@ gincost_pattern(IndexOptInfo *index, int indexcol,
/*
* Get the operator's strategy number and declared input data types within
- * the index opfamily. (We don't need the latter, but we use
+ * the index opfamily. (We don't need the latter, but we use
* get_op_opfamily_properties because it will throw error if it fails to
* find a matching pg_amop entry.)
*/
@@ -6937,7 +6938,7 @@ gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, OpExpr *clause,
* each of which involves one value from the RHS array, plus all the
* non-array quals (if any). To model this, we average the counts across
* the RHS elements, and add the averages to the counts in *counts (which
- * correspond to per-indexscan costs). We also multiply counts->arrayScans
+ * correspond to per-indexscan costs). We also multiply counts->arrayScans
* by N, causing gincostestimate to scale up its estimates accordingly.
*/
static bool
@@ -7107,7 +7108,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* nPendingPages can be trusted, but the other fields are as of the last
- * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
+ * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
* growth since then. If the fields are zero (implying no VACUUM at all,
* and an index created pre-9.1), assume all pages are entry pages.
*/
@@ -7252,7 +7253,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* Add an estimate of entry pages read by partial match algorithm. It's a
- * scan over leaf pages in entry tree. We haven't any useful stats here,
+ * scan over leaf pages in entry tree. We haven't any useful stats here,
* so estimate it as proportion.
*/
entryPagesFetched += ceil(numEntryPages * counts.partialEntries / numEntries);
@@ -7294,17 +7295,17 @@ gincostestimate(PG_FUNCTION_ARGS)
*
* We assume every entry to have the same number of items, and that there
* is no overlap between them. (XXX: tsvector and array opclasses collect
- * statistics on the frequency of individual keys; it would be nice to
- * use those here.)
+ * statistics on the frequency of individual keys; it would be nice to use
+ * those here.)
*/
dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
/*
- * If there is a lot of overlap among the entries, in particular if one
- * of the entries is very frequent, the above calculation can grossly
- * under-estimate. As a simple cross-check, calculate a lower bound
- * based on the overall selectivity of the quals. At a minimum, we must
- * read one item pointer for each matching entry.
+ * If there is a lot of overlap among the entries, in particular if one of
+ * the entries is very frequent, the above calculation can grossly
+ * under-estimate. As a simple cross-check, calculate a lower bound based
+ * on the overall selectivity of the quals. At a minimum, we must read
+ * one item pointer for each matching entry.
*
* The width of each item pointer varies, based on the level of
* compression. We don't have statistics on that, but an average of
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index efc1e9b992..11007c6d89 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -41,7 +41,7 @@
#error -ffast-math is known to break this code
#endif
-#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0))
+#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0))
#ifndef INT64_MAX
#define INT64_MAX INT64CONST(0x7FFFFFFFFFFFFFFF)
@@ -391,7 +391,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -488,7 +488,7 @@ timestamptz_in(PG_FUNCTION_ARGS)
* if it's acceptable. Otherwise, an error is thrown.
*/
static int
-parse_sane_timezone(struct pg_tm *tm, text *zone)
+parse_sane_timezone(struct pg_tm * tm, text *zone)
{
char tzname[TZ_STRLEN_MAX + 1];
int rt;
@@ -497,7 +497,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone)
text_to_cstring_buffer(zone, tzname, sizeof(tzname));
/*
- * Look up the requested timezone. First we try to interpret it as a
+ * Look up the requested timezone. First we try to interpret it as a
* numeric timezone specification; if DecodeTimezone decides it doesn't
* like the format, we look in the date token table (to handle cases like
* "EST"), and if that also fails, we look in the timezone database (to
@@ -507,7 +507,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone)
* offset abbreviations.)
*
* Note pg_tzset happily parses numeric input that DecodeTimezone would
- * reject. To avoid having it accept input that would otherwise be seen
+ * reject. To avoid having it accept input that would otherwise be seen
* as invalid, it's enough to disallow having a digit in the first
* position of our input string.
*/
@@ -528,7 +528,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone)
if (rt == DTERR_TZDISP_OVERFLOW)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("numeric time zone \"%s\" out of range", tzname)));
+ errmsg("numeric time zone \"%s\" out of range", tzname)));
else if (rt != DTERR_BAD_FORMAT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -997,7 +997,7 @@ interval_send(PG_FUNCTION_ARGS)
/*
* The interval typmod stores a "range" in its high 16 bits and a "precision"
- * in its low 16 bits. Both contribute to defining the resolution of the
+ * in its low 16 bits. Both contribute to defining the resolution of the
* type. Range addresses resolution granules larger than one second, and
* precision specifies resolution below one second. This representation can
* express all SQL standard resolutions, but we implement them all in terms of
@@ -1205,7 +1205,7 @@ interval_transform(PG_FUNCTION_ARGS)
/*
* Temporally-smaller fields occupy higher positions in the range
- * bitmap. Since only the temporally-smallest bit matters for length
+ * bitmap. Since only the temporally-smallest bit matters for length
* coercion purposes, we compare the last-set bits in the ranges.
* Precision, which is to say, sub-second precision, only affects
* ranges that include SECOND.
@@ -1294,7 +1294,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
* that fields to the right of the last one specified are zeroed out,
* but those to the left of it remain valid. Thus for example there
* is no operational difference between INTERVAL YEAR TO MONTH and
- * INTERVAL MONTH. In some cases we could meaningfully enforce that
+ * INTERVAL MONTH. In some cases we could meaningfully enforce that
* higher-order fields are zero; for example INTERVAL DAY could reject
* nonzero "month" field. However that seems a bit pointless when we
* can't do it consistently. (We cannot enforce a range limit on the
@@ -1304,9 +1304,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
*
* Note: before PG 8.4 we interpreted a limited set of fields as
* actually causing a "modulo" operation on a given value, potentially
- * losing high-order as well as low-order information. But there is
+ * losing high-order as well as low-order information. But there is
* no support for such behavior in the standard, and it seems fairly
- * undesirable on data consistency grounds anyway. Now we only
+ * undesirable on data consistency grounds anyway. Now we only
* perform truncation or rounding of low-order fields.
*/
if (range == INTERVAL_FULL_RANGE)
@@ -1426,7 +1426,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
/*
* Note: this round-to-nearest code is not completely consistent
* about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
+ * values. On most platforms, rint() will implement
* round-to-nearest-even, but the integer code always rounds up
* (away from zero). Is it worth trying to be consistent?
*/
@@ -1470,7 +1470,7 @@ make_interval(PG_FUNCTION_ARGS)
Interval *result;
/*
- * Reject out-of-range inputs. We really ought to check the integer
+ * Reject out-of-range inputs. We really ought to check the integer
* inputs as well, but it's not entirely clear what limits to apply.
*/
if (isinf(secs) || isnan(secs))
@@ -1718,7 +1718,7 @@ timestamptz_to_time_t(TimestampTz t)
* Produce a C-string representation of a TimestampTz.
*
* This is mostly for use in emitting messages. The primary difference
- * from timestamptz_out is that we force the output format to ISO. Note
+ * from timestamptz_out is that we force the output format to ISO. Note
* also that the result is in a static buffer, not pstrdup'd.
*/
const char *
@@ -1862,7 +1862,7 @@ recalc_t:
*
* First, convert to an integral timestamp, avoiding possibly
* platform-specific roundoff-in-wrong-direction errors, and adjust to
- * Unix epoch. Then see if we can convert to pg_time_t without loss. This
+ * Unix epoch. Then see if we can convert to pg_time_t without loss. This
* coding avoids hardwiring any assumptions about the width of pg_time_t,
* so it should behave sanely on machines without int64.
*/
@@ -2010,7 +2010,7 @@ recalc:
int
tm2interval(struct pg_tm * tm, fsec_t fsec, Interval *span)
{
- double total_months = (double)tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon;
+ double total_months = (double) tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon;
if (total_months > INT_MAX || total_months < INT_MIN)
return -1;
@@ -4888,7 +4888,7 @@ timestamp_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMPTZ(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
@@ -5061,7 +5061,7 @@ timestamptz_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMP(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c
index df47105d0b..bdef47f093 100644
--- a/src/backend/utils/adt/tsginidx.c
+++ b/src/backend/utils/adt/tsginidx.c
@@ -204,9 +204,12 @@ checkcondition_gin(void *checkval, QueryOperand *val)
*/
static GinTernaryValue
TS_execute_ternary(QueryItem *curitem, void *checkval,
- GinTernaryValue (*chkcond) (void *checkval, QueryOperand *val))
+ GinTernaryValue (*chkcond) (void *checkval, QueryOperand *val))
{
- GinTernaryValue val1, val2, result;
+ GinTernaryValue val1,
+ val2,
+ result;
+
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
@@ -223,7 +226,7 @@ TS_execute_ternary(QueryItem *curitem, void *checkval,
case OP_AND:
val1 = TS_execute_ternary(curitem + curitem->qoperator.left,
- checkval, chkcond);
+ checkval, chkcond);
if (val1 == GIN_FALSE)
return GIN_FALSE;
val2 = TS_execute_ternary(curitem + 1, checkval, chkcond);
@@ -236,7 +239,7 @@ TS_execute_ternary(QueryItem *curitem, void *checkval,
case OP_OR:
val1 = TS_execute_ternary(curitem + curitem->qoperator.left,
- checkval, chkcond);
+ checkval, chkcond);
if (val1 == GIN_TRUE)
return GIN_TRUE;
val2 = TS_execute_ternary(curitem + 1, checkval, chkcond);
@@ -339,7 +342,7 @@ gin_tsquery_triconsistent(PG_FUNCTION_ARGS)
* Formerly, gin_extract_tsvector had only two arguments. Now it has three,
* but we still need a pg_proc entry with two args to support reloading
* pre-9.1 contrib/tsearch2 opclass declarations. This compatibility
- * function should go away eventually. (Note: you might say "hey, but the
+ * function should go away eventually. (Note: you might say "hey, but the
* code above is only *using* two args, so let's just declare it that way".
* If you try that you'll find the opr_sanity regression test complains.)
*/
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 502ca44e04..72b9f99dbc 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -257,7 +257,7 @@ bpcharsend(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
@@ -584,7 +584,7 @@ varchar_transform(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index aab4897f61..f8d9fec34e 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -591,7 +591,7 @@ textlen(PG_FUNCTION_ARGS)
* Does the real work for textlen()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed form. We can avoid decompressing it at all
* in some cases.
*/
@@ -763,7 +763,7 @@ text_substr_no_len(PG_FUNCTION_ARGS)
* Does the real work for text_substr() and text_substr_no_len()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed/toasted form. We can avoid detoasting all
* of it in some cases.
*
@@ -1113,7 +1113,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* searched (t1) and the "needle" is the pattern being sought (t2).
*
* If the needle is empty or bigger than the haystack then there is no
- * point in wasting cycles initializing the table. We also choose not to
+ * point in wasting cycles initializing the table. We also choose not to
* use B-M-H for needles of length 1, since the skip table can't possibly
* save anything in that case.
*/
@@ -1129,7 +1129,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* declaration of TextPositionState allows up to 256 elements, but for
* short search problems we don't really want to have to initialize so
* many elements --- it would take too long in comparison to the
- * actual search time. So we choose a useful skip table size based on
+ * actual search time. So we choose a useful skip table size based on
* the haystack length minus the needle length. The closer the needle
* length is to the haystack length the less useful skipping becomes.
*
@@ -1161,7 +1161,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
state->skiptable[i] = len2;
/*
- * Now examine the needle. For each character except the last one,
+ * Now examine the needle. For each character except the last one,
* set the corresponding table element to the appropriate skip
* distance. Note that when two characters share the same skip table
* entry, the one later in the needle must determine the skip
@@ -1249,11 +1249,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[(unsigned char) *hptr & skiptablemask];
@@ -1305,11 +1305,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[*hptr & skiptablemask];
@@ -1344,7 +1344,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
/*
* Unfortunately, there is no strncoll(), so in the non-C locale case we
- * have to do some memory copying. This turns out to be significantly
+ * have to do some memory copying. This turns out to be significantly
* slower, so we optimize the case where LC_COLLATE is C. We also try to
* optimize relatively-short strings by avoiding palloc/pfree overhead.
*/
@@ -2334,7 +2334,7 @@ textToQualifiedNameList(text *textval)
* SplitIdentifierString --- parse a string containing identifiers
*
* This is the guts of textToQualifiedNameList, and is exported for use in
- * other situations such as parsing GUC variables. In the GUC case, it's
+ * other situations such as parsing GUC variables. In the GUC case, it's
* important to avoid memory leaks, so the API is designed to minimize the
* amount of stuff that needs to be allocated and freed.
*
@@ -2342,7 +2342,7 @@ textToQualifiedNameList(text *textval)
* rawstring: the input string; must be overwritable! On return, it's
* been modified to contain the separated identifiers.
* separator: the separator punctuation expected between identifiers
- * (typically '.' or ','). Whitespace may also appear around
+ * (typically '.' or ','). Whitespace may also appear around
* identifiers.
* Outputs:
* namelist: filled with a palloc'd list of pointers to identifiers within
@@ -2411,7 +2411,7 @@ SplitIdentifierString(char *rawstring, char separator,
*
* XXX because we want to overwrite the input in-place, we cannot
* support a downcasing transformation that increases the string
- * length. This is not a problem given the current implementation
+ * length. This is not a problem given the current implementation
* of downcase_truncate_identifier, but we'll probably have to do
* something about this someday.
*/
@@ -2468,7 +2468,7 @@ SplitIdentifierString(char *rawstring, char separator,
* Inputs:
* rawstring: the input string; must be modifiable!
* separator: the separator punctuation expected between directories
- * (typically ',' or ';'). Whitespace may also appear around
+ * (typically ',' or ';'). Whitespace may also appear around
* directories.
* Outputs:
* namelist: filled with a palloc'd list of directory names.
@@ -2875,7 +2875,7 @@ check_replace_text_has_escape_char(const text *replace_text)
* appendStringInfoRegexpSubstr
*
* Append replace_text to str, substituting regexp back references for
- * \n escapes. start_ptr is the start of the match in the source string,
+ * \n escapes. start_ptr is the start of the match in the source string,
* at logical character position data_pos.
*/
static void
@@ -2958,7 +2958,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/*
- * Copy the text that is back reference of regexp. Note so and eo
+ * Copy the text that is back reference of regexp. Note so and eo
* are counted in characters not bytes.
*/
char *chunk_start;
@@ -3836,7 +3836,7 @@ concat_internal(const char *sepstr, int argidx,
/*
* Non-null argument had better be an array. We assume that any call
* context that could let get_fn_expr_variadic return true will have
- * checked that a VARIADIC-labeled parameter actually is an array. So
+ * checked that a VARIADIC-labeled parameter actually is an array. So
* it should be okay to just Assert that it's an array rather than
* doing a full-fledged error check.
*/
@@ -4237,7 +4237,7 @@ text_format(PG_FUNCTION_ARGS)
/*
* Get the appropriate typOutput function, reusing previous one if
- * same type as previous argument. That's particularly useful in the
+ * same type as previous argument. That's particularly useful in the
* variadic-array case, but often saves work even for ordinary calls.
*/
if (typid != prev_type)
@@ -4329,12 +4329,12 @@ text_format_parse_digits(const char **ptr, const char *end_ptr, int *value)
*
* Inputs are start_ptr (the position after '%') and end_ptr (string end + 1).
* Output parameters:
- * argpos: argument position for value to be printed. -1 means unspecified.
- * widthpos: argument position for width. Zero means the argument position
+ * argpos: argument position for value to be printed. -1 means unspecified.
+ * widthpos: argument position for width. Zero means the argument position
* was unspecified (ie, take the next arg) and -1 means no width
* argument (width was omitted or specified as a constant).
* flags: bitmask of flags.
- * width: directly-specified width value. Zero means the width was omitted
+ * width: directly-specified width value. Zero means the width was omitted
* (note it's not necessary to distinguish this case from an explicit
* zero width value).
*
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 765469c623..422be69bd6 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -19,7 +19,7 @@
* fail. For one thing, this avoids having to manage variant catalog
* installations. But it also has nice effects such as that you can
* dump a database containing XML type data even if the server is not
- * linked with libxml. Thus, make sure xml_out() works even if nothing
+ * linked with libxml. Thus, make sure xml_out() works even if nothing
* else does.
*/
@@ -286,7 +286,7 @@ xml_out(PG_FUNCTION_ARGS)
xmltype *x = PG_GETARG_XML_P(0);
/*
- * xml_out removes the encoding property in all cases. This is because we
+ * xml_out removes the encoding property in all cases. This is because we
* cannot control from here whether the datum will be converted to a
* different client encoding, so we'd do more harm than good by including
* it.
@@ -454,7 +454,7 @@ xmlcomment(PG_FUNCTION_ARGS)
/*
* TODO: xmlconcat needs to merge the notations and unparsed entities
- * of the argument values. Not very important in practice, though.
+ * of the argument values. Not very important in practice, though.
*/
xmltype *
xmlconcat(List *args)
@@ -589,7 +589,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
/*
* We first evaluate all the arguments, then start up libxml and create
- * the result. This avoids issues if one of the arguments involves a call
+ * the result. This avoids issues if one of the arguments involves a call
* to some other function or subsystem that wants to use libxml on its own
* terms.
*/
@@ -926,7 +926,7 @@ pg_xml_init_library(void)
* pg_xml_init --- set up for use of libxml and register an error handler
*
* This should be called by each function that is about to use libxml
- * facilities and requires error handling. It initializes libxml with
+ * facilities and requires error handling. It initializes libxml with
* pg_xml_init_library() and establishes our libxml error handler.
*
* strictness determines which errors are reported and which are ignored.
@@ -972,7 +972,7 @@ pg_xml_init(PgXmlStrictness strictness)
/*
* Verify that xmlSetStructuredErrorFunc set the context variable we
- * expected it to. If not, the error context pointer we just saved is not
+ * expected it to. If not, the error context pointer we just saved is not
* the correct thing to restore, and since that leaves us without a way to
* restore the context in pg_xml_done, we must fail.
*
@@ -1129,7 +1129,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
int utf8len;
/*
- * Only initialize libxml. We don't need error handling here, but we do
+ * Only initialize libxml. We don't need error handling here, but we do
* need to make sure libxml is initialized before calling any of its
* functions. Note that this is safe (and a no-op) if caller has already
* done pg_xml_init().
@@ -1272,7 +1272,7 @@ finished:
/*
* Write an XML declaration. On output, we adjust the XML declaration
- * as follows. (These rules are the moral equivalent of the clause
+ * as follows. (These rules are the moral equivalent of the clause
* "Serialization of an XML value" in the SQL standard.)
*
* We try to avoid generating an XML declaration if possible. This is
@@ -1496,7 +1496,7 @@ xml_pstrdup(const char *string)
/*
* xmlPgEntityLoader --- entity loader callback function
*
- * Silently prevent any external entity URL from being loaded. We don't want
+ * Silently prevent any external entity URL from being loaded. We don't want
* to throw an error, so instead make the entity appear to expand to an empty
* string.
*
@@ -1665,8 +1665,8 @@ xml_errorHandler(void *data, xmlErrorPtr error)
chopStringInfoNewlines(errorBuf);
/*
- * Legacy error handling mode. err_occurred is never set, we just add the
- * message to err_buf. This mode exists because the xml2 contrib module
+ * Legacy error handling mode. err_occurred is never set, we just add the
+ * message to err_buf. This mode exists because the xml2 contrib module
* uses our error-handling infrastructure, but we don't want to change its
* behaviour since it's deprecated anyway. This is also why we don't
* distinguish between notices, warnings and errors here --- the old-style
@@ -1887,7 +1887,7 @@ map_sql_identifier_to_xml_name(char *ident, bool fully_escaped,
static char *
unicode_to_sqlchar(pg_wchar c)
{
- char utf8string[8]; /* need room for trailing zero */
+ char utf8string[8]; /* need room for trailing zero */
char *result;
memset(utf8string, 0, sizeof(utf8string));
@@ -1939,8 +1939,8 @@ map_xml_name_to_sql_identifier(char *name)
*
* When xml_escape_strings is true, then certain characters in string
* values are replaced by entity references (&lt; etc.), as specified
- * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
- * wanted. The false case is mainly useful when the resulting value
+ * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
+ * wanted. The false case is mainly useful when the resulting value
* is used with xmlTextWriterWriteAttribute() to write out an
* attribute, because that function does the escaping itself.
*/
@@ -2221,13 +2221,13 @@ _SPI_strdup(const char *s)
*
* There are two kinds of mappings: Mapping SQL data (table contents)
* to XML documents, and mapping SQL structure (the "schema") to XML
- * Schema. And there are functions that do both at the same time.
+ * Schema. And there are functions that do both at the same time.
*
* Then you can map a database, a schema, or a table, each in both
* ways. This breaks down recursively: Mapping a database invokes
* mapping schemas, which invokes mapping tables, which invokes
* mapping rows, which invokes mapping columns, although you can't
- * call the last two from the outside. Because of this, there are a
+ * call the last two from the outside. Because of this, there are a
* number of xyz_internal() functions which are to be called both from
* the function manager wrapper and from some upper layer in a
* recursive call.
@@ -2236,7 +2236,7 @@ _SPI_strdup(const char *s)
* nulls, tableforest, and targetns mean.
*
* Some style guidelines for XML output: Use double quotes for quoting
- * XML attributes. Indent XML elements by two spaces, but remember
+ * XML attributes. Indent XML elements by two spaces, but remember
* that a lot of code is called recursively at different levels, so
* it's better not to indent rather than create output that indents
* and outdents weirdly. Add newlines to make the output look nice.
@@ -2400,12 +2400,12 @@ cursor_to_xml(PG_FUNCTION_ARGS)
* Write the start tag of the root element of a data mapping.
*
* top_level means that this is the very top level of the eventual
- * output. For example, when the user calls table_to_xml, then a call
+ * output. For example, when the user calls table_to_xml, then a call
* with a table name to this function is the top level. When the user
* calls database_to_xml, then a call with a schema name to this
* function is not the top level. If top_level is false, then the XML
* namespace declarations are omitted, because they supposedly already
- * appeared earlier in the output. Repeating them is not wrong, but
+ * appeared earlier in the output. Repeating them is not wrong, but
* it looks ugly.
*/
static void
@@ -2937,7 +2937,7 @@ map_multipart_sql_identifier_to_xml_name(char *a, char *b, char *c, char *d)
if (a)
appendStringInfoString(&result,
- map_sql_identifier_to_xml_name(a, true, true));
+ map_sql_identifier_to_xml_name(a, true, true));
if (b)
appendStringInfo(&result, ".%s",
map_sql_identifier_to_xml_name(b, true, true));
@@ -3348,7 +3348,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list)
* SQL/XML:2008 sections 9.5 and 9.6.
*
* (The distinction between 9.5 and 9.6 is basically that 9.6 adds
- * a name attribute, which this function does. The name-less version
+ * a name attribute, which this function does. The name-less version
* 9.5 doesn't appear to be required anywhere.)
*/
static const char *
@@ -3362,11 +3362,11 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
if (typeoid == XMLOID)
{
appendStringInfoString(&result,
- "<xsd:complexType mixed=\"true\">\n"
- " <xsd:sequence>\n"
- " <xsd:any name=\"element\" minOccurs=\"0\" maxOccurs=\"unbounded\" processContents=\"skip\"/>\n"
- " </xsd:sequence>\n"
- "</xsd:complexType>\n");
+ "<xsd:complexType mixed=\"true\">\n"
+ " <xsd:sequence>\n"
+ " <xsd:any name=\"element\" minOccurs=\"0\" maxOccurs=\"unbounded\" processContents=\"skip\"/>\n"
+ " </xsd:sequence>\n"
+ "</xsd:complexType>\n");
}
else
{
@@ -3440,12 +3440,12 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
case FLOAT8OID:
appendStringInfoString(&result,
- " <xsd:restriction base=\"xsd:double\"></xsd:restriction>\n");
+ " <xsd:restriction base=\"xsd:double\"></xsd:restriction>\n");
break;
case BOOLOID:
appendStringInfoString(&result,
- " <xsd:restriction base=\"xsd:boolean\"></xsd:restriction>\n");
+ " <xsd:restriction base=\"xsd:boolean\"></xsd:restriction>\n");
break;
case TIMEOID:
@@ -3496,9 +3496,9 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
case DATEOID:
appendStringInfoString(&result,
- " <xsd:restriction base=\"xsd:date\">\n"
- " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}\"/>\n"
- " </xsd:restriction>\n");
+ " <xsd:restriction base=\"xsd:date\">\n"
+ " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}\"/>\n"
+ " </xsd:restriction>\n");
break;
default:
@@ -3524,7 +3524,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
/*
* Map an SQL row to an XML element, taking the row from the active
- * SPI cursor. See also SQL/XML:2008 section 9.10.
+ * SPI cursor. See also SQL/XML:2008 section 9.10.
*/
static void
SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 516f40ba84..5fcf0dd7c7 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -46,7 +46,7 @@ typedef struct
* Flush all cache entries when pg_attribute is updated.
*
* When pg_attribute is updated, we must flush the cache entry at least
- * for that attribute. Currently, we just flush them all. Since attribute
+ * for that attribute. Currently, we just flush them all. Since attribute
* options are not currently used in performance-critical paths (such as
* query execution), this seems OK.
*/
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index d17b6b0ba5..954b435bff 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -836,9 +836,10 @@ RehashCatCache(CatCache *cp)
for (i = 0; i < cp->cc_nbuckets; i++)
{
dlist_mutable_iter iter;
+
dlist_foreach_modify(iter, &cp->cc_bucket[i])
{
- CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
+ CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
dlist_delete(iter.cur);
@@ -856,7 +857,7 @@ RehashCatCache(CatCache *cp)
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
+ * descriptor and set up the hash and equality function links. We assume
* that the relcache entry can be opened at this point!
*/
#ifdef CACHEDEBUG
@@ -1081,7 +1082,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
* if necessary (on the first access to a particular cache).
*
* The result is NULL if not found, or a pointer to a HeapTuple in
- * the cache. The caller must not modify the tuple, and must call
+ * the cache. The caller must not modify the tuple, and must call
* ReleaseCatCache() when done with it.
*
* The search key values should be expressed as Datums of the key columns'
@@ -1214,8 +1215,8 @@ SearchCatCache(CatCache *cache,
* the relation --- for example, due to shared-cache-inval messages being
* processed during heap_open(). This is OK. It's even possible for one
* of those lookups to find and enter the very same tuple we are trying to
- * fetch here. If that happens, we will enter a second copy of the tuple
- * into the cache. The first copy will never be referenced again, and
+ * fetch here. If that happens, we will enter a second copy of the tuple
+ * into the cache. The first copy will never be referenced again, and
* will eventually age out of the cache, so there's no functional problem.
* This case is rare enough that it's not worth expending extra cycles to
* detect.
@@ -1254,7 +1255,7 @@ SearchCatCache(CatCache *cache,
*
* In bootstrap mode, we don't build negative entries, because the cache
* invalidation mechanism isn't alive and can't clear them if the tuple
- * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
+ * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
* cache inval for that.)
*/
if (ct == NULL)
@@ -1584,7 +1585,7 @@ SearchCatCacheList(CatCache *cache,
/*
* We are now past the last thing that could trigger an elog before we
* have finished building the CatCList and remembering it in the
- * resource owner. So it's OK to fall out of the PG_TRY, and indeed
+ * resource owner. So it's OK to fall out of the PG_TRY, and indeed
* we'd better do so before we start marking the members as belonging
* to the list.
*/
@@ -1673,7 +1674,7 @@ ReleaseCatCacheList(CatCList *list)
/*
* CatalogCacheCreateEntry
* Create a new CatCTup entry, copying the given HeapTuple and other
- * supplied data into it. The new entry initially has refcount 0.
+ * supplied data into it. The new entry initially has refcount 0.
*/
static CatCTup *
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
@@ -1724,8 +1725,8 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
CacheHdr->ch_ntup++;
/*
- * If the hash table has become too full, enlarge the buckets array.
- * Quite arbitrarily, we enlarge when fill factor > 2.
+ * If the hash table has become too full, enlarge the buckets array. Quite
+ * arbitrarily, we enlarge when fill factor > 2.
*/
if (cache->cc_ntup > cache->cc_nbuckets * 2)
RehashCatCache(cache);
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 115bcac5d2..59714697c6 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -29,23 +29,23 @@
*
* If we successfully complete the transaction, we have to broadcast all
* these invalidation events to other backends (via the SI message queue)
- * so that they can flush obsolete entries from their caches. Note we have
+ * so that they can flush obsolete entries from their caches. Note we have
* to record the transaction commit before sending SI messages, otherwise
* the other backends won't see our updated tuples as good.
*
* When a subtransaction aborts, we can process and discard any events
- * it has queued. When a subtransaction commits, we just add its events
+ * it has queued. When a subtransaction commits, we just add its events
* to the pending lists of the parent transaction.
*
* In short, we need to remember until xact end every insert or delete
- * of a tuple that might be in the system caches. Updates are treated as
+ * of a tuple that might be in the system caches. Updates are treated as
* two events, delete + insert, for simplicity. (If the update doesn't
* change the tuple hash value, catcache.c optimizes this into one event.)
*
* We do not need to register EVERY tuple operation in this way, just those
- * on tuples in relations that have associated catcaches. We do, however,
+ * on tuples in relations that have associated catcaches. We do, however,
* have to register every operation on every tuple that *could* be in a
- * catcache, whether or not it currently is in our cache. Also, if the
+ * catcache, whether or not it currently is in our cache. Also, if the
* tuple is in a relation that has multiple catcaches, we need to register
* an invalidation message for each such catcache. catcache.c's
* PrepareToInvalidateCacheTuple() routine provides the knowledge of which
@@ -113,7 +113,7 @@
/*
* To minimize palloc traffic, we keep pending requests in successively-
* larger chunks (a slightly more sophisticated version of an expansible
- * array). All request types can be stored as SharedInvalidationMessage
+ * array). All request types can be stored as SharedInvalidationMessage
* records. The ordering of requests within a list is never significant.
*/
typedef struct InvalidationChunk
@@ -650,7 +650,7 @@ AcceptInvalidationMessages(void)
*
* If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
* slows things by at least a factor of 10000, so I wouldn't suggest
- * trying to run the entire regression tests that way. It's useful to try
+ * trying to run the entire regression tests that way. It's useful to try
* a few simple tests, to make sure that cache reload isn't subject to
* internal cache-flush hazards, but after you've done a few thousand
* recursive reloads it's unlikely you'll learn more.
@@ -863,12 +863,12 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
* If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
* to the shared invalidation message queue. Note that these will be read
* not only by other backends, but also by our own backend at the next
- * transaction start (via AcceptInvalidationMessages). This means that
+ * transaction start (via AcceptInvalidationMessages). This means that
* we can skip immediate local processing of anything that's still in
* CurrentCmdInvalidMsgs, and just send that list out too.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
* since they'll not have seen our changed tuples anyway. We can forget
* about CurrentCmdInvalidMsgs too, since those changes haven't touched
* the caches yet.
@@ -927,11 +927,11 @@ AtEOXact_Inval(bool isCommit)
* parent's PriorCmdInvalidMsgs list.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
* We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
* touched the caches yet.
*
- * In any case, pop the transaction stack. We need not physically free memory
+ * In any case, pop the transaction stack. We need not physically free memory
* here, since CurTransactionContext is about to be emptied anyway
* (if aborting). Beware of the possibility of aborting the same nesting
* level twice, though.
@@ -987,7 +987,7 @@ AtEOSubXact_Inval(bool isCommit)
* in a transaction.
*
* Here, we send no messages to the shared queue, since we don't know yet if
- * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
+ * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
* list, so as to flush our caches of any entries we have outdated in the
* current command. We then move the current-cmd list over to become part
* of the prior-cmds list.
@@ -1094,7 +1094,7 @@ CacheInvalidateHeapTuple(Relation relation,
* This essentially means that only backends in this same database
* will react to the relcache flush request. This is in fact
* appropriate, since only those backends could see our pg_attribute
- * change anyway. It looks a bit ugly though. (In practice, shared
+ * change anyway. It looks a bit ugly though. (In practice, shared
* relations can't have schema changes after bootstrap, so we should
* never come here for a shared rel anyway.)
*/
@@ -1106,7 +1106,7 @@ CacheInvalidateHeapTuple(Relation relation,
/*
* When a pg_index row is updated, we should send out a relcache inval
- * for the index relation. As above, we don't know the shared status
+ * for the index relation. As above, we don't know the shared status
* of the index, but in practice it doesn't matter since indexes of
* shared catalogs can't have such updates.
*/
@@ -1214,7 +1214,7 @@ CacheInvalidateRelcacheByRelid(Oid relid)
*
* Sending this type of invalidation msg forces other backends to close open
* smgr entries for the rel. This should be done to flush dangling open-file
- * references when the physical rel is being dropped or truncated. Because
+ * references when the physical rel is being dropped or truncated. Because
* these are nontransactional (i.e., not-rollback-able) operations, we just
* send the inval message immediately without any queuing.
*
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index a4ce7163ea..4b5ef99531 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -186,13 +186,13 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
* (This indicates that the operator is not a valid ordering operator.)
*
* Note: the operator could be registered in multiple families, for example
- * if someone were to build a "reverse sort" opfamily. This would result in
+ * if someone were to build a "reverse sort" opfamily. This would result in
* uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST
* or NULLS LAST, as well as inefficient planning due to failure to match up
* pathkeys that should be the same. So we want a determinate result here.
* Because of the way the syscache search works, we'll use the interpretation
* associated with the opfamily with smallest OID, which is probably
- * determinate enough. Since there is no longer any particularly good reason
+ * determinate enough. Since there is no longer any particularly good reason
* to build reverse-sort opfamilies, it doesn't seem worth expending any
* additional effort on ensuring consistency.
*/
@@ -403,7 +403,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
*
* The planner currently uses simple equal() tests to compare the lists
* returned by this function, which makes the list order relevant, though
- * strictly speaking it should not be. Because of the way syscache list
+ * strictly speaking it should not be. Because of the way syscache list
* searches are handled, in normal operation the result will be sorted by OID
* so everything works fine. If running with system index usage disabled,
* the result ordering is unspecified and hence the planner might fail to
@@ -1212,7 +1212,7 @@ op_mergejoinable(Oid opno, Oid inputtype)
*
* In some cases (currently only array_eq), hashjoinability depends on the
* specific input data type the operator is invoked for, so that must be
- * passed as well. We currently assume that only one input's type is needed
+ * passed as well. We currently assume that only one input's type is needed
* to check this --- by convention, pass the left input's data type.
*/
bool
@@ -1880,7 +1880,7 @@ get_typbyval(Oid typid)
* A two-fer: given the type OID, return both typlen and typbyval.
*
* Since both pieces of info are needed to know how to copy a Datum,
- * many places need both. Might as well get them with one cache lookup
+ * many places need both. Might as well get them with one cache lookup
* instead of two. Also, this routine raises an error instead of
* returning a bogus value when given a bad type OID.
*/
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index d492cbb55e..d03d3b3cdf 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -11,7 +11,7 @@
* The logic for choosing generic or custom plans is in choose_custom_plan,
* which see for comments.
*
- * Cache invalidation is driven off sinval events. Any CachedPlanSource
+ * Cache invalidation is driven off sinval events. Any CachedPlanSource
* that matches the event is marked invalid, as is its generic CachedPlan
* if it has one. When (and if) the next demand for a cached plan occurs,
* parse analysis and rewrite is repeated to build a new valid query tree,
@@ -27,7 +27,7 @@
* caller to notice changes and cope with them.
*
* Currently, we track exactly the dependencies of plans on relations and
- * user-defined functions. On relcache invalidation events or pg_proc
+ * user-defined functions. On relcache invalidation events or pg_proc
* syscache invalidation events, we invalidate just those plans that depend
* on the particular object being modified. (Note: this scheme assumes
* that any table modification that requires replanning will generate a
@@ -123,7 +123,7 @@ InitPlanCache(void)
* CreateCachedPlan: initially create a plan cache entry.
*
* Creation of a cached plan is divided into two steps, CreateCachedPlan and
- * CompleteCachedPlan. CreateCachedPlan should be called after running the
+ * CompleteCachedPlan. CreateCachedPlan should be called after running the
* query through raw_parser, but before doing parse analysis and rewrite;
* CompleteCachedPlan is called after that. The reason for this arrangement
* is that it can save one round of copying of the raw parse tree, since
@@ -217,7 +217,7 @@ CreateCachedPlan(Node *raw_parse_tree,
* in that context.
*
* A one-shot plan cannot be saved or copied, since we make no effort to
- * preserve the raw parse tree unmodified. There is also no support for
+ * preserve the raw parse tree unmodified. There is also no support for
* invalidation, so plan use must be completed in the current transaction,
* and DDL that might invalidate the querytree_list must be avoided as well.
*
@@ -274,13 +274,13 @@ CreateOneShotCachedPlan(Node *raw_parse_tree,
* CompleteCachedPlan: second step of creating a plan cache entry.
*
* Pass in the analyzed-and-rewritten form of the query, as well as the
- * required subsidiary data about parameters and such. All passed values will
+ * required subsidiary data about parameters and such. All passed values will
* be copied into the CachedPlanSource's memory, except as specified below.
* After this is called, GetCachedPlan can be called to obtain a plan, and
* optionally the CachedPlanSource can be saved using SaveCachedPlan.
*
* If querytree_context is not NULL, the querytree_list must be stored in that
- * context (but the other parameters need not be). The querytree_list is not
+ * context (but the other parameters need not be). The querytree_list is not
* copied, rather the given context is kept as the initial query_context of
* the CachedPlanSource. (It should have been created as a child of the
* caller's working memory context, but it will now be reparented to belong
@@ -374,7 +374,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This
+ * Also save the current search_path in the query_context. (This
* should not generate much extra cruft either, since almost certainly
* the path is already valid.) Again, we don't really need this for
* one-shot plans; and we *must* skip this for transaction control
@@ -421,7 +421,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
* This is guaranteed not to throw error, except for the caller-error case
* of trying to save a one-shot plan. Callers typically depend on that
* since this is called just before or just after adding a pointer to the
- * CachedPlanSource to some permanent data structure of their own. Up until
+ * CachedPlanSource to some permanent data structure of their own. Up until
* this is done, a CachedPlanSource is just transient data that will go away
* automatically on transaction abort.
*/
@@ -442,13 +442,13 @@ SaveCachedPlan(CachedPlanSource *plansource)
* plans from the CachedPlanSource. If there is a generic plan, moving it
* into CacheMemoryContext would be pretty risky since it's unclear
* whether the caller has taken suitable care with making references
- * long-lived. Best thing to do seems to be to discard the plan.
+ * long-lived. Best thing to do seems to be to discard the plan.
*/
ReleaseGenericPlan(plansource);
/*
* Reparent the source memory context under CacheMemoryContext so that it
- * will live indefinitely. The query_context follows along since it's
+ * will live indefinitely. The query_context follows along since it's
* already a child of the other one.
*/
MemoryContextSetParent(plansource->context, CacheMemoryContext);
@@ -466,7 +466,7 @@ SaveCachedPlan(CachedPlanSource *plansource)
* DropCachedPlan: destroy a cached plan.
*
* Actually this only destroys the CachedPlanSource: any referenced CachedPlan
- * is released, but not destroyed until its refcount goes to zero. That
+ * is released, but not destroyed until its refcount goes to zero. That
* handles the situation where DropCachedPlan is called while the plan is
* still in use.
*/
@@ -617,7 +617,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
plansource->search_path = NULL;
/*
- * Free the query_context. We don't really expect MemoryContextDelete to
+ * Free the query_context. We don't really expect MemoryContextDelete to
* fail, but just in case, make sure the CachedPlanSource is left in a
* reasonably sane state. (The generic plan won't get unlinked yet, but
* that's acceptable.)
@@ -675,7 +675,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
PopActiveSnapshot();
/*
- * Check or update the result tupdesc. XXX should we use a weaker
+ * Check or update the result tupdesc. XXX should we use a weaker
* condition than equalTupleDescs() here?
*
* We assume the parameter types didn't change from the first time, so no
@@ -726,7 +726,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This should
+ * Also save the current search_path in the query_context. (This should
* not generate much extra cruft either, since almost certainly the path
* is already valid.)
*/
@@ -860,7 +860,7 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
* we ought to be holding sufficient locks to prevent any invalidation.
* However, if we're building a custom plan after having built and
* rejected a generic plan, it's possible to reach here with is_valid
- * false due to an invalidation while making the generic plan. In theory
+ * false due to an invalidation while making the generic plan. In theory
* the invalidation must be a false positive, perhaps a consequence of an
* sinval reset event or the CLOBBER_CACHE_ALWAYS debug code. But for
* safety, let's treat it as real and redo the RevalidateCachedQuery call.
@@ -1043,7 +1043,7 @@ cached_plan_cost(CachedPlan *plan, bool include_planner)
* on the number of relations in the finished plan's rangetable.
* Join planning effort actually scales much worse than linearly
* in the number of relations --- but only until the join collapse
- * limits kick in. Also, while inheritance child relations surely
+ * limits kick in. Also, while inheritance child relations surely
* add to planning effort, they don't make the join situation
* worse. So the actual shape of the planning cost curve versus
* number of relations isn't all that obvious. It will take
@@ -1153,7 +1153,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
/*
* If we choose to plan again, we need to re-copy the query_list,
- * since the planner probably scribbled on it. We can force
+ * since the planner probably scribbled on it. We can force
* BuildCachedPlan to do that by passing NIL.
*/
qlist = NIL;
@@ -1203,7 +1203,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
*
* Note: useResOwner = false is used for releasing references that are in
* persistent data structures, such as the parent CachedPlanSource or a
- * Portal. Transient references should be protected by a resource owner.
+ * Portal. Transient references should be protected by a resource owner.
*/
void
ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
@@ -1267,7 +1267,7 @@ CachedPlanSetParentContext(CachedPlanSource *plansource,
*
* This is a convenience routine that does the equivalent of
* CreateCachedPlan + CompleteCachedPlan, using the data stored in the
- * input CachedPlanSource. The result is therefore "unsaved" (regardless
+ * input CachedPlanSource. The result is therefore "unsaved" (regardless
* of the state of the source), and we don't copy any generic plan either.
* The result will be currently valid, or not, the same as the source.
*/
@@ -1420,7 +1420,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
{
/*
* Ignore utility statements, except those (such as EXPLAIN) that
- * contain a parsed-but-not-planned query. Note: it's okay to use
+ * contain a parsed-but-not-planned query. Note: it's okay to use
* ScanQueryForLocks, even though the query hasn't been through
* rule rewriting, because rewriting doesn't change the query
* representation.
@@ -1616,7 +1616,7 @@ plan_list_is_transient(List *stmt_list)
/*
* PlanCacheComputeResultDesc: given a list of analyzed-and-rewritten Queries,
- * determine the result tupledesc it will produce. Returns NULL if the
+ * determine the result tupledesc it will produce. Returns NULL if the
* execution will not return tuples.
*
* Note: the result is created or copied into current memory context.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index c947bff4fc..5ff0d9e4fd 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -124,7 +124,7 @@ bool criticalSharedRelcachesBuilt = false;
/*
* This counter counts relcache inval events received since backend startup
- * (but only for rels that are actually in cache). Presently, we use it only
+ * (but only for rels that are actually in cache). Presently, we use it only
* to detect whether data about to be written by write_relcache_init_file()
* might already be obsolete.
*/
@@ -167,8 +167,8 @@ static bool eoxact_list_overflowed = false;
* we don't need to access individual items except at EOXact.
*/
static TupleDesc *EOXactTupleDescArray;
-static int NextEOXactTupleDescNum = 0;
-static int EOXactTupleDescArrayLen = 0;
+static int NextEOXactTupleDescNum = 0;
+static int EOXactTupleDescArrayLen = 0;
/*
* macros to manipulate the lookup hashtables
@@ -495,7 +495,7 @@ RelationBuildTupleDesc(Relation relation)
Int16GetDatum(0));
/*
- * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
+ * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
* built the critical relcache entries (this includes initdb and startup
* without a pg_internal.init file).
*/
@@ -558,7 +558,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* The attcacheoff values we read from pg_attribute should all be -1
- * ("unknown"). Verify this if assert checking is on. They will be
+ * ("unknown"). Verify this if assert checking is on. They will be
* computed when and if needed during tuple access.
*/
#ifdef USE_ASSERT_CHECKING
@@ -572,7 +572,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* However, we can easily set the attcacheoff value for the first
- * attribute: it must be zero. This eliminates the need for special cases
+ * attribute: it must be zero. This eliminates the need for special cases
* for attnum=1 that used to exist in fastgetattr() and index_getattr().
*/
if (relation->rd_rel->relnatts > 0)
@@ -628,7 +628,7 @@ RelationBuildTupleDesc(Relation relation)
* each relcache entry that has associated rules. The context is used
* just for rule info, not for any other subsidiary data of the relcache
* entry, because that keeps the update logic in RelationClearRelation()
- * manageable. The other subsidiary data structures are simple enough
+ * manageable. The other subsidiary data structures are simple enough
* to be easy to free explicitly, anyway.
*/
static void
@@ -736,9 +736,9 @@ RelationBuildRuleLock(Relation relation)
/*
* We want the rule's table references to be checked as though by the
- * table owner, not the user referencing the rule. Therefore, scan
+ * table owner, not the user referencing the rule. Therefore, scan
* through the rule's actions and set the checkAsUser field on all
- * rtable entries. We have to look at the qual as well, in case it
+ * rtable entries. We have to look at the qual as well, in case it
* contains sublinks.
*
* The reason for doing this when the rule is loaded, rather than when
@@ -1014,25 +1014,24 @@ RelationInitPhysicalAddr(Relation relation)
if (relation->rd_rel->relfilenode)
{
/*
- * Even if we are using a decoding snapshot that doesn't represent
- * the current state of the catalog we need to make sure the
- * filenode points to the current file since the older file will
- * be gone (or truncated). The new file will still contain older
- * rows so lookups in them will work correctly. This wouldn't work
- * correctly if rewrites were allowed to change the schema in a
- * noncompatible way, but those are prevented both on catalog
- * tables and on user tables declared as additional catalog
- * tables.
+ * Even if we are using a decoding snapshot that doesn't represent the
+ * current state of the catalog we need to make sure the filenode
+ * points to the current file since the older file will be gone (or
+ * truncated). The new file will still contain older rows so lookups
+ * in them will work correctly. This wouldn't work correctly if
+ * rewrites were allowed to change the schema in a noncompatible way,
+ * but those are prevented both on catalog tables and on user tables
+ * declared as additional catalog tables.
*/
if (HistoricSnapshotActive()
&& RelationIsAccessibleInLogicalDecoding(relation)
&& IsTransactionState())
{
- HeapTuple phys_tuple;
- Form_pg_class physrel;
+ HeapTuple phys_tuple;
+ Form_pg_class physrel;
phys_tuple = ScanPgRelation(RelationGetRelid(relation),
- RelationGetRelid(relation) != ClassOidIndexId,
+ RelationGetRelid(relation) != ClassOidIndexId,
true);
if (!HeapTupleIsValid(phys_tuple))
elog(ERROR, "could not find pg_class entry for %u",
@@ -1113,7 +1112,7 @@ RelationInitIndexAccessInfo(Relation relation)
amsupport = aform->amsupport;
/*
- * Make the private context to hold index access info. The reason we need
+ * Make the private context to hold index access info. The reason we need
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*
@@ -1161,7 +1160,7 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* indcollation cannot be referenced directly through the C struct,
- * because it comes after the variable-width indkey field. Must extract
+ * because it comes after the variable-width indkey field. Must extract
* the datum the hard way...
*/
indcollDatum = fastgetattr(relation->rd_indextuple,
@@ -1186,7 +1185,7 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* Fill the support procedure OID array, as well as the info about
- * opfamilies and opclass input types. (aminfo and supportinfo are left
+ * opfamilies and opclass input types. (aminfo and supportinfo are left
* as zeroes, and are filled on-the-fly when used)
*/
IndexSupportInitialize(indclass, relation->rd_support,
@@ -1274,7 +1273,7 @@ IndexSupportInitialize(oidvector *indclass,
* Note there is no provision for flushing the cache. This is OK at the
* moment because there is no way to ALTER any interesting properties of an
* existing opclass --- all you can do is drop it, which will result in
- * a useless but harmless dead entry in the cache. To support altering
+ * a useless but harmless dead entry in the cache. To support altering
* opclass membership (not the same as opfamily membership!), we'd need to
* be able to flush this cache as well as the contents of relcache entries
* for indexes.
@@ -1383,7 +1382,7 @@ LookupOpclassInfo(Oid operatorClassOid,
heap_close(rel, AccessShareLock);
/*
- * Scan pg_amproc to obtain support procs for the opclass. We only fetch
+ * Scan pg_amproc to obtain support procs for the opclass. We only fetch
* the default ones (those with lefttype = righttype = opcintype).
*/
if (numSupport > 0)
@@ -1889,11 +1888,11 @@ RelationDestroyRelation(Relation relation, bool remember_tupdesc)
{
/*
* If we Rebuilt a relcache entry during a transaction then its
- * possible we did that because the TupDesc changed as the result
- * of an ALTER TABLE that ran at less than AccessExclusiveLock.
- * It's possible someone copied that TupDesc, in which case the
- * copy would point to free'd memory. So if we rebuild an entry
- * we keep the TupDesc around until end of transaction, to be safe.
+ * possible we did that because the TupDesc changed as the result of
+ * an ALTER TABLE that ran at less than AccessExclusiveLock. It's
+ * possible someone copied that TupDesc, in which case the copy would
+ * point to free'd memory. So if we rebuild an entry we keep the
+ * TupDesc around until end of transaction, to be safe.
*/
if (remember_tupdesc)
RememberToFreeTupleDescAtEOX(relation->rd_att);
@@ -1928,7 +1927,7 @@ RelationDestroyRelation(Relation relation, bool remember_tupdesc)
*
* NB: when rebuilding, we'd better hold some lock on the relation,
* else the catalog data we need to read could be changing under us.
- * Also, a rel to be rebuilt had better have refcnt > 0. This is because
+ * Also, a rel to be rebuilt had better have refcnt > 0. This is because
* an sinval reset could happen while we're accessing the catalogs, and
* the rel would get blown away underneath us by RelationCacheInvalidate
* if it has zero refcnt.
@@ -1951,7 +1950,7 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* Make sure smgr and lower levels close the relation's files, if they
* weren't closed already. If the relation is not getting deleted, the
- * next smgr access should reopen the files automatically. This ensures
+ * next smgr access should reopen the files automatically. This ensures
* that the low-level file access state is updated after, say, a vacuum
* truncation.
*/
@@ -2047,7 +2046,7 @@ RelationClearRelation(Relation relation, bool rebuild)
* over from the old entry). This is to avoid trouble in case an
* error causes us to lose control partway through. The old entry
* will still be marked !rd_isvalid, so we'll try to rebuild it again
- * on next access. Meanwhile it's not any less valid than it was
+ * on next access. Meanwhile it's not any less valid than it was
* before, so any code that might expect to continue accessing it
* isn't hurt by the rebuild failure. (Consider for example a
* subtransaction that ALTERs a table and then gets canceled partway
@@ -2237,7 +2236,7 @@ RelationCacheInvalidateEntry(Oid relationId)
/*
* RelationCacheInvalidate
* Blow away cached relation descriptors that have zero reference counts,
- * and rebuild those with positive reference counts. Also reset the smgr
+ * and rebuild those with positive reference counts. Also reset the smgr
* relation cache and re-read relation mapping data.
*
* This is currently used only to recover from SI message buffer overflow,
@@ -2250,7 +2249,7 @@ RelationCacheInvalidateEntry(Oid relationId)
* We do this in two phases: the first pass deletes deletable items, and
* the second one rebuilds the rebuildable items. This is essential for
* safety, because hash_seq_search only copes with concurrent deletion of
- * the element it is currently visiting. If a second SI overflow were to
+ * the element it is currently visiting. If a second SI overflow were to
* occur while we are walking the table, resulting in recursive entry to
* this routine, we could crash because the inner invocation blows away
* the entry next to be visited by the outer scan. But this way is OK,
@@ -2385,7 +2384,8 @@ RememberToFreeTupleDescAtEOX(TupleDesc td)
{
if (EOXactTupleDescArray == NULL)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
+
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
EOXactTupleDescArray = (TupleDesc *) palloc(16 * sizeof(TupleDesc));
@@ -2395,12 +2395,12 @@ RememberToFreeTupleDescAtEOX(TupleDesc td)
}
else if (NextEOXactTupleDescNum >= EOXactTupleDescArrayLen)
{
- int32 newlen = EOXactTupleDescArrayLen * 2;
+ int32 newlen = EOXactTupleDescArrayLen * 2;
Assert(EOXactTupleDescArrayLen > 0);
EOXactTupleDescArray = (TupleDesc *) repalloc(EOXactTupleDescArray,
- newlen * sizeof(TupleDesc));
+ newlen * sizeof(TupleDesc));
EOXactTupleDescArrayLen = newlen;
}
@@ -2437,7 +2437,7 @@ AtEOXact_RelationCache(bool isCommit)
* For simplicity, eoxact_list[] entries are not deleted till end of
* top-level transaction, even though we could remove them at
* subtransaction end in some cases, or remove relations from the list if
- * they are cleared for other reasons. Therefore we should expect the
+ * they are cleared for other reasons. Therefore we should expect the
* case that list entries are not found in the hashtable; if not, there's
* nothing to do for them.
*/
@@ -2498,7 +2498,7 @@ AtEOXact_cleanup(Relation relation, bool isCommit)
* transaction calls. (That seems bogus, but it's not worth fixing.)
*
* Note: ideally this check would be applied to every relcache entry, not
- * just those that have eoxact work to do. But it's not worth forcing a
+ * just those that have eoxact work to do. But it's not worth forcing a
* scan of the whole relcache just for this. (Moreover, doing so would
* mean that assert-enabled testing never tests the hash_search code path
* above, which seems a bad idea.)
@@ -2809,7 +2809,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* Insert relation physical and logical identifiers (OIDs) into the right
- * places. For a mapped relation, we set relfilenode to zero and rely on
+ * places. For a mapped relation, we set relfilenode to zero and rely on
* RelationInitPhysicalAddr to consult the map.
*/
rel->rd_rel->relisshared = shared_relation;
@@ -3052,7 +3052,7 @@ RelationCacheInitializePhase2(void)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
/*
- * Try to load the shared relcache cache file. If unsuccessful, bootstrap
+ * Try to load the shared relcache cache file. If unsuccessful, bootstrap
* the cache with pre-made descriptors for the critical shared catalogs.
*/
if (!load_relcache_init_file(true))
@@ -3132,9 +3132,9 @@ RelationCacheInitializePhase3(void)
/*
* If we didn't get the critical system indexes loaded into relcache, do
- * so now. These are critical because the catcache and/or opclass cache
+ * so now. These are critical because the catcache and/or opclass cache
* depend on them for fetches done during relcache load. Thus, we have an
- * infinite-recursion problem. We can break the recursion by doing
+ * infinite-recursion problem. We can break the recursion by doing
* heapscans instead of indexscans at certain key spots. To avoid hobbling
* performance, we only want to do that until we have the critical indexes
* loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to
@@ -3151,7 +3151,7 @@ RelationCacheInitializePhase3(void)
* RewriteRelRulenameIndexId and TriggerRelidNameIndexId are not critical
* in the same way as the others, because the critical catalogs don't
* (currently) have any rules or triggers, and so these indexes can be
- * rebuilt without inducing recursion. However they are used during
+ * rebuilt without inducing recursion. However they are used during
* relcache load when a rel does have rules or triggers, so we choose to
* nail them for performance reasons.
*/
@@ -3182,7 +3182,7 @@ RelationCacheInitializePhase3(void)
*
* DatabaseNameIndexId isn't critical for relcache loading, but rather for
* initial lookup of MyDatabaseId, without which we'll never find any
- * non-shared catalogs at all. Autovacuum calls InitPostgres with a
+ * non-shared catalogs at all. Autovacuum calls InitPostgres with a
* database OID, so it instead depends on DatabaseOidIndexId. We also
* need to nail up some indexes on pg_authid and pg_auth_members for use
* during client authentication.
@@ -3617,7 +3617,7 @@ RelationGetIndexList(Relation relation)
/*
* We build the list we intend to return (in the caller's context) while
- * doing the scan. After successfully completing the scan, we copy that
+ * doing the scan. After successfully completing the scan, we copy that
* list into the relcache entry. This avoids cache-context memory leakage
* if we get some sort of error partway through.
*/
@@ -3655,7 +3655,7 @@ RelationGetIndexList(Relation relation)
/*
* indclass cannot be referenced directly through the C struct,
- * because it comes after the variable-width indkey field. Must
+ * because it comes after the variable-width indkey field. Must
* extract the datum the hard way...
*/
indclassDatum = heap_getattr(htup,
@@ -3970,16 +3970,16 @@ RelationGetIndexPredicate(Relation relation)
Bitmapset *
RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
{
- Bitmapset *indexattrs; /* indexed columns */
- Bitmapset *uindexattrs; /* columns in unique indexes */
- Bitmapset *idindexattrs; /* columns in the replica identity */
+ Bitmapset *indexattrs; /* indexed columns */
+ Bitmapset *uindexattrs; /* columns in unique indexes */
+ Bitmapset *idindexattrs; /* columns in the replica identity */
List *indexoidlist;
ListCell *l;
MemoryContext oldcxt;
/* Quick exit if we already computed the result. */
if (relation->rd_indexattr != NULL)
- switch(attrKind)
+ switch (attrKind)
{
case INDEX_ATTR_BITMAP_IDENTITY_KEY:
return bms_copy(relation->rd_idattr);
@@ -4023,8 +4023,8 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Relation indexDesc;
IndexInfo *indexInfo;
int i;
- bool isKey; /* candidate key */
- bool isIDKey; /* replica identity index */
+ bool isKey; /* candidate key */
+ bool isIDKey; /* replica identity index */
indexDesc = index_open(indexOid, AccessShareLock);
@@ -4052,7 +4052,7 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
if (isIDKey)
idindexattrs = bms_add_member(idindexattrs,
- attrnum - FirstLowInvalidHeapAttributeNumber);
+ attrnum - FirstLowInvalidHeapAttributeNumber);
if (isKey)
uindexattrs = bms_add_member(uindexattrs,
@@ -4079,7 +4079,7 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
MemoryContextSwitchTo(oldcxt);
/* We return our original working copy for caller to play with */
- switch(attrKind)
+ switch (attrKind)
{
case INDEX_ATTR_BITMAP_IDENTITY_KEY:
return idindexattrs;
@@ -4268,7 +4268,7 @@ errtablecol(Relation rel, int attnum)
* given directly rather than extracted from the relation's catalog data.
*
* Don't use this directly unless errtablecol() is inconvenient for some
- * reason. This might possibly be needed during intermediate states in ALTER
+ * reason. This might possibly be needed during intermediate states in ALTER
* TABLE, for instance.
*/
int
@@ -4688,7 +4688,7 @@ load_relcache_init_file(bool shared)
return true;
/*
- * init file is broken, so do it the hard way. We don't bother trying to
+ * init file is broken, so do it the hard way. We don't bother trying to
* free the clutter we just allocated; it's not in the relcache so it
* won't hurt.
*/
@@ -4753,7 +4753,7 @@ write_relcache_init_file(bool shared)
}
/*
- * Write a magic number to serve as a file version identifier. We can
+ * Write a magic number to serve as a file version identifier. We can
* change the magic number whenever the relcache layout changes.
*/
magic = RELCACHE_INIT_FILEMAGIC;
@@ -4978,7 +4978,7 @@ RelationCacheInitFilePostInvalidate(void)
*
* We used to keep the init files across restarts, but that is unsafe in PITR
* scenarios, and even in simple crash-recovery cases there are windows for
- * the init files to become out-of-sync with the database. So now we just
+ * the init files to become out-of-sync with the database. So now we just
* remove them during startup and expect the first backend launch to rebuild
* them. Of course, this has to happen in each database of the cluster.
*/
diff --git a/src/backend/utils/cache/relfilenodemap.c b/src/backend/utils/cache/relfilenodemap.c
index 1b009be0fd..557ff6148d 100644
--- a/src/backend/utils/cache/relfilenodemap.c
+++ b/src/backend/utils/cache/relfilenodemap.c
@@ -43,7 +43,7 @@ typedef struct
typedef struct
{
- RelfilenodeMapKey key; /* lookup key - must be first */
+ RelfilenodeMapKey key; /* lookup key - must be first */
Oid relid; /* pg_class.oid */
} RelfilenodeMapEntry;
@@ -143,10 +143,10 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
{
RelfilenodeMapKey key;
RelfilenodeMapEntry *entry;
- bool found;
+ bool found;
SysScanDesc scandesc;
- Relation relation;
- HeapTuple ntp;
+ Relation relation;
+ HeapTuple ntp;
ScanKeyData skey[2];
Oid relid;
@@ -222,8 +222,9 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
- bool isnull;
- Oid check;
+ bool isnull;
+ Oid check;
+
check = fastgetattr(ntp, Anum_pg_class_reltablespace,
RelationGetDescr(relation),
&isnull);
diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c
index ebbe4d3bbe..95a2689fd4 100644
--- a/src/backend/utils/cache/relmapper.c
+++ b/src/backend/utils/cache/relmapper.c
@@ -23,7 +23,7 @@
* mapped catalogs can only be relocated by operations such as VACUUM FULL
* and CLUSTER, which make no transactionally-significant changes: it must be
* safe for the new file to replace the old, even if the transaction itself
- * aborts. An important factor here is that the indexes and toast table of
+ * aborts. An important factor here is that the indexes and toast table of
* a mapped catalog must also be mapped, so that the rewrites/relocations of
* all these files commit in a single map file update rather than being tied
* to transaction commit.
@@ -57,13 +57,13 @@
/*
* The map file is critical data: we have no automatic method for recovering
* from loss or corruption of it. We use a CRC so that we can detect
- * corruption. To minimize the risk of failed updates, the map file should
+ * corruption. To minimize the risk of failed updates, the map file should
* be kept to no more than one standard-size disk sector (ie 512 bytes),
* and we use overwrite-in-place rather than playing renaming games.
* The struct layout below is designed to occupy exactly 512 bytes, which
* might make filesystem updates a bit more efficient.
*
- * Entries in the mappings[] array are in no particular order. We could
+ * Entries in the mappings[] array are in no particular order. We could
* speed searching by insisting on OID order, but it really shouldn't be
* worth the trouble given the intended size of the mapping sets.
*/
@@ -90,7 +90,7 @@ typedef struct RelMapFile
/*
* The currently known contents of the shared map file and our database's
- * local map file are stored here. These can be reloaded from disk
+ * local map file are stored here. These can be reloaded from disk
* immediately whenever we receive an update sinval message.
*/
static RelMapFile shared_map;
@@ -346,7 +346,7 @@ merge_map_updates(RelMapFile *map, const RelMapFile *updates, bool add_okay)
* RelationMapRemoveMapping
*
* Remove a relation's entry in the map. This is only allowed for "active"
- * (but not committed) local mappings. We need it so we can back out the
+ * (but not committed) local mappings. We need it so we can back out the
* entry for the transient target file when doing VACUUM FULL/CLUSTER on
* a mapped relation.
*/
@@ -374,7 +374,7 @@ RelationMapRemoveMapping(Oid relationId)
* RelationMapInvalidate
*
* This routine is invoked for SI cache flush messages. We must re-read
- * the indicated map file. However, we might receive a SI message in a
+ * the indicated map file. However, we might receive a SI message in a
* process that hasn't yet, and might never, load the mapping files;
* for example the autovacuum launcher, which *must not* try to read
* a local map since it is attached to no particular database.
@@ -442,7 +442,7 @@ AtCCI_RelationMap(void)
*
* During commit, this must be called as late as possible before the actual
* transaction commit, so as to minimize the window where the transaction
- * could still roll back after committing map changes. Although nothing
+ * could still roll back after committing map changes. Although nothing
* critically bad happens in such a case, we still would prefer that it
* not happen, since we'd possibly be losing useful updates to the relations'
* pg_class row(s).
@@ -509,7 +509,7 @@ AtPrepare_RelationMap(void)
/*
* CheckPointRelationMap
*
- * This is called during a checkpoint. It must ensure that any relation map
+ * This is called during a checkpoint. It must ensure that any relation map
* updates that were WAL-logged before the start of the checkpoint are
* securely flushed to disk and will not need to be replayed later. This
* seems unlikely to be a performance-critical issue, so we use a simple
@@ -700,7 +700,7 @@ load_relmap_file(bool shared)
*
* Because this may be called during WAL replay when MyDatabaseId,
* DatabasePath, etc aren't valid, we require the caller to pass in suitable
- * values. The caller is also responsible for being sure no concurrent
+ * values. The caller is also responsible for being sure no concurrent
* map update could be happening.
*/
static void
@@ -820,7 +820,7 @@ write_relmap_file(bool shared, RelMapFile *newmap,
/*
* Make sure that the files listed in the map are not deleted if the outer
- * transaction aborts. This had better be within the critical section
+ * transaction aborts. This had better be within the critical section
* too: it's not likely to fail, but if it did, we'd arrive at transaction
* abort with the files still vulnerable. PANICing will leave things in a
* good state on-disk.
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index 93b42bbc1a..24e8679e25 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -4,7 +4,7 @@
* Tablespace cache management.
*
* We cache the parsed version of spcoptions for each tablespace to avoid
- * needing to reparse on every lookup. Right now, there doesn't appear to
+ * needing to reparse on every lookup. Right now, there doesn't appear to
* be a measurable performance gain from doing this, but that might change
* in the future as we add more options.
*
@@ -128,7 +128,7 @@ get_tablespace(Oid spcid)
return spc;
/*
- * Not found in TableSpace cache. Check catcache. If we don't find a
+ * Not found in TableSpace cache. Check catcache. If we don't find a
* valid HeapTuple, it must mean someone has managed to request tablespace
* details for a non-existent tablespace. We'll just treat that case as
* if no options were specified.
@@ -158,7 +158,7 @@ get_tablespace(Oid spcid)
}
/*
- * Now create the cache entry. It's important to do this only after
+ * Now create the cache entry. It's important to do this only after
* reading the pg_tablespace entry, since doing so could cause a cache
* flush.
*/
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index 79df5b6835..94d951ce05 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -803,16 +803,17 @@ static CatCache *SysCache[
static int SysCacheSize = lengthof(cacheinfo);
static bool CacheInitialized = false;
-static Oid SysCacheRelationOid[lengthof(cacheinfo)];
-static int SysCacheRelationOidSize;
+static Oid SysCacheRelationOid[
+ lengthof(cacheinfo)];
+static int SysCacheRelationOidSize;
-static int oid_compare(const void *a, const void *b);
+static int oid_compare(const void *a, const void *b);
/*
* InitCatalogCache - initialize the caches
*
* Note that no database access is done here; we only allocate memory
- * and initialize the cache structure. Interrogation of the database
+ * and initialize the cache structure. Interrogation of the database
* to complete initialization of a cache happens upon first use
* of that cache.
*/
@@ -1063,7 +1064,7 @@ SearchSysCacheExistsAttName(Oid relid, const char *attname)
* extract a specific attribute.
*
* This is equivalent to using heap_getattr() on a tuple fetched
- * from a non-cached relation. Usually, this is only used for attributes
+ * from a non-cached relation. Usually, this is only used for attributes
* that could be NULL or variable length; the fixed-size attributes in
* a system table are accessed just by mapping the tuple onto the C struct
* declarations from include/catalog/.
@@ -1176,12 +1177,12 @@ RelationInvalidatesSnapshotsOnly(Oid relid)
bool
RelationHasSysCache(Oid relid)
{
- int low = 0,
- high = SysCacheRelationOidSize - 1;
+ int low = 0,
+ high = SysCacheRelationOidSize - 1;
while (low <= high)
{
- int middle = low + (high - low) / 2;
+ int middle = low + (high - low) / 2;
if (SysCacheRelationOid[middle] == relid)
return true;
@@ -1201,8 +1202,8 @@ RelationHasSysCache(Oid relid)
static int
oid_compare(const void *a, const void *b)
{
- Oid oa = *((Oid *) a);
- Oid ob = *((Oid *) b);
+ Oid oa = *((Oid *) a);
+ Oid ob = *((Oid *) b);
if (oa == ob)
return 0;
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index ad370337fe..8c6c7fcd22 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -11,7 +11,7 @@
*
* Several seemingly-odd choices have been made to support use of the type
* cache by generic array and record handling routines, such as array_eq(),
- * record_cmp(), and hash_array(). Because those routines are used as index
+ * record_cmp(), and hash_array(). Because those routines are used as index
* support operations, they cannot leak memory. To allow them to execute
* efficiently, all information that they would like to re-use across calls
* is kept in the type cache.
@@ -101,7 +101,7 @@ typedef struct TypeCacheEnumData
*
* Stored record types are remembered in a linear array of TupleDescs,
* which can be indexed quickly with the assigned typmod. There is also
- * a hash table to speed searches for matching TupleDescs. The hash key
+ * a hash table to speed searches for matching TupleDescs. The hash key
* uses just the first N columns' type OIDs, and so we may have multiple
* entries with the same hash key.
*/
@@ -482,7 +482,7 @@ load_typcache_tupdesc(TypeCacheEntry *typentry)
/*
* Link to the tupdesc and increment its refcount (we assert it's a
- * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
+ * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
* because the reference mustn't be entered in the current resource owner;
* it can outlive the current query.
*/
@@ -1074,7 +1074,7 @@ load_enum_cache_data(TypeCacheEntry *tcache)
/*
* Read all the information for members of the enum type. We collect the
* info in working memory in the caller's context, and then transfer it to
- * permanent memory in CacheMemoryContext. This minimizes the risk of
+ * permanent memory in CacheMemoryContext. This minimizes the risk of
* leaking memory from CacheMemoryContext in the event of an error partway
* through.
*/
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 977fc66418..0d92dcd036 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -5,7 +5,7 @@
*
* Because of the extremely high rate at which log messages can be generated,
* we need to be mindful of the performance cost of obtaining any information
- * that may be logged. Also, it's important to keep in mind that this code may
+ * that may be logged. Also, it's important to keep in mind that this code may
* get called from within an aborted transaction, in which case operations
* such as syscache lookups are unsafe.
*
@@ -15,23 +15,23 @@
* if we run out of memory, it's important to be able to report that fact.
* There are a number of considerations that go into this.
*
- * First, distinguish between re-entrant use and actual recursion. It
+ * First, distinguish between re-entrant use and actual recursion. It
* is possible for an error or warning message to be emitted while the
- * parameters for an error message are being computed. In this case
+ * parameters for an error message are being computed. In this case
* errstart has been called for the outer message, and some field values
- * may have already been saved, but we are not actually recursing. We handle
- * this by providing a (small) stack of ErrorData records. The inner message
+ * may have already been saved, but we are not actually recursing. We handle
+ * this by providing a (small) stack of ErrorData records. The inner message
* can be computed and sent without disturbing the state of the outer message.
* (If the inner message is actually an error, this isn't very interesting
* because control won't come back to the outer message generator ... but
* if the inner message is only debug or log data, this is critical.)
*
* Second, actual recursion will occur if an error is reported by one of
- * the elog.c routines or something they call. By far the most probable
+ * the elog.c routines or something they call. By far the most probable
* scenario of this sort is "out of memory"; and it's also the nastiest
* to handle because we'd likely also run out of memory while trying to
* report this error! Our escape hatch for this case is to reset the
- * ErrorContext to empty before trying to process the inner error. Since
+ * ErrorContext to empty before trying to process the inner error. Since
* ErrorContext is guaranteed to have at least 8K of space in it (see mcxt.c),
* we should be able to process an "out of memory" message successfully.
* Since we lose the prior error state due to the reset, we won't be able
@@ -116,7 +116,7 @@ char *Log_destination_string = NULL;
/*
* Max string length to send to syslog(). Note that this doesn't count the
* sequence-number prefix we add, and of course it doesn't count the prefix
- * added by syslog itself. Solaris and sysklogd truncate the final message
+ * added by syslog itself. Solaris and sysklogd truncate the final message
* at 1024 bytes, so this value leaves 124 bytes for those prefixes. (Most
* other syslog implementations seem to have limits of 2KB or so.)
*/
@@ -244,7 +244,7 @@ errstart(int elevel, const char *filename, int lineno,
{
/*
* If we are inside a critical section, all errors become PANIC
- * errors. See miscadmin.h.
+ * errors. See miscadmin.h.
*/
if (CritSectionCount > 0)
elevel = PANIC;
@@ -257,7 +257,7 @@ errstart(int elevel, const char *filename, int lineno,
*
* 2. ExitOnAnyError mode switch is set (initdb uses this).
*
- * 3. the error occurred after proc_exit has begun to run. (It's
+ * 3. the error occurred after proc_exit has begun to run. (It's
* proc_exit's responsibility to see that this doesn't turn into
* infinite recursion!)
*/
@@ -350,7 +350,7 @@ errstart(int elevel, const char *filename, int lineno,
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -424,8 +424,8 @@ errfinish(int dummy,...)
* may not be re-entrant.
*
* Note: other places that save-and-clear ImmediateInterruptOK also do
- * HOLD_INTERRUPTS(), but that should not be necessary here since we
- * don't call anything that could turn on ImmediateInterruptOK.
+ * HOLD_INTERRUPTS(), but that should not be necessary here since we don't
+ * call anything that could turn on ImmediateInterruptOK.
*/
save_ImmediateInterruptOK = ImmediateInterruptOK;
ImmediateInterruptOK = false;
@@ -458,7 +458,7 @@ errfinish(int dummy,...)
*
* Reset InterruptHoldoffCount in case we ereport'd from inside an
* interrupt holdoff section. (We assume here that no handler will
- * itself be inside a holdoff section. If necessary, such a handler
+ * itself be inside a holdoff section. If necessary, such a handler
* could save and restore InterruptHoldoffCount for itself, but this
* should make life easier for most.)
*
@@ -484,7 +484,7 @@ errfinish(int dummy,...)
* progress, so that we can report the message before dying. (Without
* this, pq_putmessage will refuse to send the message at all, which is
* what we want for NOTICE messages, but not for fatal exits.) This hack
- * is necessary because of poor design of old-style copy protocol. Note
+ * is necessary because of poor design of old-style copy protocol. Note
* we must do this even if client is fool enough to have set
* client_min_messages above FATAL, so don't look at output_to_client.
*/
@@ -606,7 +606,7 @@ errcode(int sqlerrcode)
/*
* errcode_for_file_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of disk file access.
*
* NOTE: the primary error message string should generally include %m
@@ -677,7 +677,7 @@ errcode_for_file_access(void)
/*
* errcode_for_socket_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of socket access.
*
* NOTE: the primary error message string should generally include %m
@@ -715,7 +715,7 @@ errcode_for_socket_access(void)
* This macro handles expansion of a format string and associated parameters;
* it's common code for errmsg(), errdetail(), etc. Must be called inside
* a routine that is declared like "const char *fmt, ..." and has an edata
- * pointer set up. The message is assigned to edata->targetfield, or
+ * pointer set up. The message is assigned to edata->targetfield, or
* appended to it if appendval is true. The message is subject to translation
* if translateit is true.
*
@@ -1296,7 +1296,7 @@ elog_start(const char *filename, int lineno, const char *funcname)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery. Note that the message is intentionally not localized,
* else failure to convert it to client encoding could cause further
@@ -1467,7 +1467,7 @@ EmitErrorReport(void)
/*
* CopyErrorData --- obtain a copy of the topmost error stack entry
*
- * This is only for use in error handler code. The data is copied into the
+ * This is only for use in error handler code. The data is copied into the
* current memory context, so callers should always switch away from
* ErrorContext first; otherwise it will be lost when FlushErrorState is done.
*/
@@ -1581,7 +1581,7 @@ FlushErrorState(void)
*
* A handler can do CopyErrorData/FlushErrorState to get out of the error
* subsystem, then do some processing, and finally ReThrowError to re-throw
- * the original error. This is slower than just PG_RE_THROW() but should
+ * the original error. This is slower than just PG_RE_THROW() but should
* be used if the "some processing" is likely to incur another error.
*/
void
@@ -1598,7 +1598,7 @@ ReThrowError(ErrorData *edata)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -1713,8 +1713,8 @@ pg_re_throw(void)
char *
GetErrorContextStack(void)
{
- ErrorData *edata;
- ErrorContextCallback *econtext;
+ ErrorData *edata;
+ ErrorContextCallback *econtext;
/*
* Okay, crank up a stack entry to store the info in.
@@ -1724,7 +1724,7 @@ GetErrorContextStack(void)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -1749,8 +1749,8 @@ GetErrorContextStack(void)
* into edata->context.
*
* Errors occurring in callback functions should go through the regular
- * error handling code which should handle any recursive errors, though
- * we double-check above, just in case.
+ * error handling code which should handle any recursive errors, though we
+ * double-check above, just in case.
*/
for (econtext = error_context_stack;
econtext != NULL;
@@ -1833,7 +1833,7 @@ set_syslog_parameters(const char *ident, int facility)
{
/*
* guc.c is likely to call us repeatedly with same parameters, so don't
- * thrash the syslog connection unnecessarily. Also, we do not re-open
+ * thrash the syslog connection unnecessarily. Also, we do not re-open
* the connection until needed, since this routine will get called whether
* or not Log_destination actually mentions syslog.
*
@@ -2069,6 +2069,7 @@ write_console(const char *line, int len)
int rc;
#ifdef WIN32
+
/*
* Try to convert the message to UTF16 and write it with WriteConsoleW().
* Fall back on write() if anything fails.
@@ -2186,14 +2187,14 @@ setup_formatted_start_time(void)
static const char *
process_log_prefix_padding(const char *p, int *ppadding)
{
- int paddingsign = 1;
- int padding = 0;
+ int paddingsign = 1;
+ int padding = 0;
if (*p == '-')
{
p++;
- if (*p == '\0') /* Did the buf end in %- ? */
+ if (*p == '\0') /* Did the buf end in %- ? */
return NULL;
paddingsign = -1;
}
@@ -2268,9 +2269,9 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
* process_log_prefix_padding moves p past the padding number if it
* exists.
*
- * Note: Since only '-', '0' to '9' are valid formatting characters
- * we can do a quick check here to pre-check for formatting. If the
- * char is not formatting then we can skip a useless function call.
+ * Note: Since only '-', '0' to '9' are valid formatting characters we
+ * can do a quick check here to pre-check for formatting. If the char
+ * is not formatting then we can skip a useless function call.
*
* Further note: At least on some platforms, passing %*s rather than
* %s to appendStringInfo() is substantially slower, so many of the
@@ -2337,9 +2338,10 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
case 'c':
if (padding != 0)
{
- char strfbuf[128];
+ char strfbuf[128];
+
snprintf(strfbuf, sizeof(strfbuf) - 1, "%lx.%x",
- (long) (MyStartTime), MyProcPid);
+ (long) (MyStartTime), MyProcPid);
appendStringInfo(buf, "%*s", padding, strfbuf);
}
else
@@ -2411,14 +2413,15 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
if (MyProcPort->remote_port && MyProcPort->remote_port[0] != '\0')
{
/*
- * This option is slightly special as the port number
- * may be appended onto the end. Here we need to build
- * 1 string which contains the remote_host and optionally
- * the remote_port (if set) so we can properly align the
- * string.
+ * This option is slightly special as the port
+ * number may be appended onto the end. Here we
+ * need to build 1 string which contains the
+ * remote_host and optionally the remote_port (if
+ * set) so we can properly align the string.
*/
- char *hostport;
+ char *hostport;
+
hostport = psprintf("%s(%s)", MyProcPort->remote_host, MyProcPort->remote_port);
appendStringInfo(buf, "%*s", padding, hostport);
pfree(hostport);
@@ -2433,7 +2436,7 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
if (MyProcPort->remote_port &&
MyProcPort->remote_port[0] != '\0')
appendStringInfo(buf, "(%s)",
- MyProcPort->remote_port);
+ MyProcPort->remote_port);
}
}
@@ -2465,9 +2468,10 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
{
if (padding != 0)
{
- char strfbuf[128];
+ char strfbuf[128];
+
snprintf(strfbuf, sizeof(strfbuf) - 1, "%d/%u",
- MyProc->backendId, MyProc->lxid);
+ MyProc->backendId, MyProc->lxid);
appendStringInfo(buf, "%*s", padding, strfbuf);
}
else
@@ -2898,6 +2902,7 @@ send_message_to_server_log(ErrorData *edata)
if (redirection_done && !am_syslogger)
write_pipe_chunks(buf.data, buf.len, LOG_DESTINATION_STDERR);
#ifdef WIN32
+
/*
* In a win32 service environment, there is no usable stderr. Capture
* anything going there and write it to the eventlog instead.
@@ -2951,7 +2956,7 @@ send_message_to_server_log(ErrorData *edata)
*
* Note: when there are multiple backends writing into the syslogger pipe,
* it's critical that each write go into the pipe indivisibly, and not
- * get interleaved with data from other processes. Fortunately, the POSIX
+ * get interleaved with data from other processes. Fortunately, the POSIX
* spec requires that writes to pipes be atomic so long as they are not
* more than PIPE_BUF bytes long. So we divide long messages into chunks
* that are no more than that length, and send one chunk per write() call.
@@ -3271,7 +3276,7 @@ useful_strerror(int errnum)
str = strerror(errnum);
/*
- * Some strerror()s return an empty string for out-of-range errno. This
+ * Some strerror()s return an empty string for out-of-range errno. This
* is ANSI C spec compliant, but not exactly useful. Also, we may get
* back strings of question marks if libc cannot transcode the message to
* the codeset specified by LC_CTYPE. If we get nothing useful, first try
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index fa01ade544..042af7877b 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -131,7 +131,7 @@ load_external_function(char *filename, char *funcname,
/*
* This function loads a shlib file without looking up any particular
- * function in it. If the same shlib has previously been loaded,
+ * function in it. If the same shlib has previously been loaded,
* unload and reload it.
*
* When 'restricted' is true, only libraries in the presumed-secure
@@ -171,7 +171,7 @@ lookup_external_function(void *filehandle, char *funcname)
/*
* Load the specified dynamic-link library file, unless it already is
- * loaded. Return the pg_dl* handle for the file.
+ * loaded. Return the pg_dl* handle for the file.
*
* Note: libname is expected to be an exact name for the library file.
*/
@@ -473,7 +473,7 @@ file_exists(const char *name)
* If name contains a slash, check if the file exists, if so return
* the name. Else (no slash) try to expand using search path (see
* find_in_dynamic_libpath below); if that works, return the fully
- * expanded file name. If the previous failed, append DLSUFFIX and
+ * expanded file name. If the previous failed, append DLSUFFIX and
* try again. If all fails, just return the original name.
*
* The result will always be freshly palloc'd.
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 1fe45da50c..a95be056da 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -96,7 +96,7 @@ static Datum fmgr_security_definer(PG_FUNCTION_ARGS);
/*
- * Lookup routines for builtin-function table. We can search by either Oid
+ * Lookup routines for builtin-function table. We can search by either Oid
* or name, but search by Oid is much faster.
*/
@@ -578,7 +578,7 @@ clear_external_function_hash(void *filehandle)
* Copy an FmgrInfo struct
*
* This is inherently somewhat bogus since we can't reliably duplicate
- * language-dependent subsidiary info. We cheat by zeroing fn_extra,
+ * language-dependent subsidiary info. We cheat by zeroing fn_extra,
* instead, meaning that subsidiary info will have to be recomputed.
*/
void
@@ -858,7 +858,7 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
/*
- * Support for security-definer and proconfig-using functions. We support
+ * Support for security-definer and proconfig-using functions. We support
* both of these features using the same call handler, because they are
* often used together and it would be inefficient (as well as notationally
* messy) to have two levels of call handler involved.
@@ -878,7 +878,7 @@ struct fmgr_security_definer_cache
* (All this info is cached for the duration of the current query.)
* To execute a call, we temporarily replace the flinfo with the cached
* and looked-up one, while keeping the outer fcinfo (which contains all
- * the actual arguments, etc.) intact. This is not re-entrant, but then
+ * the actual arguments, etc.) intact. This is not re-entrant, but then
* the fcinfo itself can't be used re-entrantly anyway.
*/
static Datum
@@ -958,7 +958,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* We don't need to restore GUC or userid settings on error, because the
- * ensuing xact or subxact abort will do that. The PG_TRY block is only
+ * ensuing xact or subxact abort will do that. The PG_TRY block is only
* needed to clean up the flinfo link.
*/
save_flinfo = fcinfo->flinfo;
@@ -1011,7 +1011,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* These are for invocation of a specifically named function with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. Also, the function cannot be one that needs to
+ * are allowed to be NULL. Also, the function cannot be one that needs to
* look at FmgrInfo, since there won't be any.
*/
Datum
@@ -1556,8 +1556,8 @@ FunctionCall9Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
/*
* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. These are essentially fmgr_info() followed
- * by FunctionCallN(). If the same function is to be invoked repeatedly,
+ * are allowed to be NULL. These are essentially fmgr_info() followed
+ * by FunctionCallN(). If the same function is to be invoked repeatedly,
* do the fmgr_info() once and then use FunctionCallN().
*/
Datum
@@ -1886,7 +1886,7 @@ OidFunctionCall9Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
*
* One important difference from the bare function call is that we will
* push any active SPI context, allowing SPI-using I/O functions to be
- * called from other SPI functions without extra notation. This is a hack,
+ * called from other SPI functions without extra notation. This is a hack,
* but the alternative of expecting all SPI functions to do SPI_push/SPI_pop
* around I/O calls seems worse.
*/
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index 45108306b2..f2824e0876 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -136,7 +136,7 @@ per_MultiFuncCall(PG_FUNCTION_ARGS)
* FuncCallContext is pointing to it), but in most usage patterns the
* tuples stored in it will be in the function's per-tuple context. So at
* the beginning of each call, the Slot will hold a dangling pointer to an
- * already-recycled tuple. We clear it out here.
+ * already-recycled tuple. We clear it out here.
*
* Note: use of retval->slot is obsolete as of 8.0, and we expect that it
* will always be NULL. This is just here for backwards compatibility in
@@ -192,13 +192,13 @@ shutdown_MultiFuncCall(Datum arg)
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result.
*
* One hard case that this handles is resolution of actual rowtypes for
* functions returning RECORD (from either the function's OUT parameter
- * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
+ * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
* only when we couldn't resolve the actual rowtype for lack of information.
*
* The other hard case that this handles is resolution of polymorphism.
@@ -281,7 +281,7 @@ get_func_result_type(Oid functionId,
/*
* internal_get_result_type -- workhorse code implementing all the above
*
- * funcid must always be supplied. call_expr and rsinfo can be NULL if not
+ * funcid must always be supplied. call_expr and rsinfo can be NULL if not
* available. We will return TYPEFUNC_RECORD, and store NULL into
* *resultTupleDesc, if we cannot deduce the complete result rowtype from
* the available information.
@@ -448,7 +448,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
return true;
/*
- * Otherwise, extract actual datatype(s) from input arguments. (We assume
+ * Otherwise, extract actual datatype(s) from input arguments. (We assume
* the parser already validated consistency of the arguments.)
*/
if (!call_expr)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 4ae1eb1ca3..2b99e4b521 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -5,19 +5,19 @@
*
* dynahash.c supports both local-to-a-backend hash tables and hash tables in
* shared memory. For shared hash tables, it is the caller's responsibility
- * to provide appropriate access interlocking. The simplest convention is
- * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
+ * to provide appropriate access interlocking. The simplest convention is
+ * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
* hash_seq_search) need only shared lock, but any update requires exclusive
* lock. For heavily-used shared tables, the single-lock approach creates a
* concurrency bottleneck, so we also support "partitioned" locking wherein
* there are multiple LWLocks guarding distinct subsets of the table. To use
* a hash table in partitioned mode, the HASH_PARTITION flag must be given
- * to hash_create. This prevents any attempt to split buckets on-the-fly.
+ * to hash_create. This prevents any attempt to split buckets on-the-fly.
* Therefore, each hash bucket chain operates independently, and no fields
* of the hash header change after init except nentries and freeList.
* A partitioned table uses a spinlock to guard changes of those two fields.
* This lets any subset of the hash buckets be treated as a separately
- * lockable partition. We expect callers to use the low-order bits of a
+ * lockable partition. We expect callers to use the low-order bits of a
* lookup key's hash value as a partition number --- this will work because
* of the way calc_bucket() maps hash values to bucket numbers.
*
@@ -81,7 +81,7 @@
* Constants
*
* A hash table has a top-level "directory", each of whose entries points
- * to a "segment" of ssize bucket headers. The maximum number of hash
+ * to a "segment" of ssize bucket headers. The maximum number of hash
* buckets is thus dsize * ssize (but dsize may be expansible). Of course,
* the number of records in the table can be larger, but we don't want a
* whole lot of records per bucket or performance goes down.
@@ -89,7 +89,7 @@
* In a hash table allocated in shared memory, the directory cannot be
* expanded because it must stay at a fixed address. The directory size
* should be selected using hash_select_dirsize (and you'd better have
- * a good idea of the maximum number of entries!). For non-shared hash
+ * a good idea of the maximum number of entries!). For non-shared hash
* tables, the initial directory size can be left at the default.
*/
#define DEF_SEGSIZE 256
@@ -341,7 +341,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
{
/*
* ctl structure and directory are preallocated for shared memory
- * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
+ * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
* well.
*/
hashp->hctl = info->hctl;
@@ -790,7 +790,7 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val)
* the result is a dangling pointer that shouldn't be dereferenced!)
*
* HASH_ENTER will normally ereport a generic "out of memory" error if
- * it is unable to create a new entry. The HASH_ENTER_NULL operation is
+ * it is unable to create a new entry. The HASH_ENTER_NULL operation is
* the same except it will return NULL if out of memory. Note that
* HASH_ENTER_NULL cannot be used with the default palloc-based allocator,
* since palloc internally ereports on out-of-memory.
@@ -1042,7 +1042,7 @@ hash_update_hash_key(HTAB *hashp,
hashp->tabname);
/*
- * Lookup the existing element using its saved hash value. We need to do
+ * Lookup the existing element using its saved hash value. We need to do
* this to be able to unlink it from its hash chain, but as a side benefit
* we can verify the validity of the passed existingEntry pointer.
*/
@@ -1119,7 +1119,7 @@ hash_update_hash_key(HTAB *hashp,
/*
* If old and new hash values belong to the same bucket, we need not
* change any chain links, and indeed should not since this simplistic
- * update will corrupt the list if currBucket is the last element. (We
+ * update will corrupt the list if currBucket is the last element. (We
* cannot fall out earlier, however, since we need to scan the bucket to
* check for duplicate keys.)
*/
@@ -1405,7 +1405,7 @@ expand_table(HTAB *hashp)
}
/*
- * Relocate records to the new bucket. NOTE: because of the way the hash
+ * Relocate records to the new bucket. NOTE: because of the way the hash
* masking is done in calc_bucket, only one old bucket can need to be
* split at this point. With a different way of reducing the hash value,
* that might not be true!
@@ -1554,7 +1554,7 @@ hash_corrupted(HTAB *hashp)
{
/*
* If the corruption is in a shared hashtable, we'd better force a
- * systemwide restart. Otherwise, just shut down this one backend.
+ * systemwide restart. Otherwise, just shut down this one backend.
*/
if (hashp->isshared)
elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
@@ -1599,7 +1599,7 @@ next_pow2_int(long num)
/************************* SEQ SCAN TRACKING ************************/
/*
- * We track active hash_seq_search scans here. The need for this mechanism
+ * We track active hash_seq_search scans here. The need for this mechanism
* comes from the fact that a scan will get confused if a bucket split occurs
* while it's in progress: it might visit entries twice, or even miss some
* entirely (if it's partway through the same bucket that splits). Hence
@@ -1619,7 +1619,7 @@ next_pow2_int(long num)
*
* This arrangement is reasonably robust if a transient hashtable is deleted
* without notifying us. The absolute worst case is we might inhibit splits
- * in another table created later at exactly the same address. We will give
+ * in another table created later at exactly the same address. We will give
* a warning at transaction end for reference leaks, so any bugs leading to
* lack of notification should be easy to catch.
*/
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 6115ce3f33..a703c67ead 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -59,7 +59,7 @@ static List *lock_files = NIL;
*
* NOTE: "ignoring system indexes" means we do not use the system indexes
* for lookups (either in hardwired catalog accesses or in planner-generated
- * plans). We do, however, still update the indexes when a catalog
+ * plans). We do, however, still update the indexes when a catalog
* modification is made.
* ----------------------------------------------------------------
*/
@@ -230,7 +230,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
* Currently there are two valid bits in SecurityRestrictionContext:
*
* SECURITY_LOCAL_USERID_CHANGE indicates that we are inside an operation
- * that is temporarily changing CurrentUserId via these functions. This is
+ * that is temporarily changing CurrentUserId via these functions. This is
* needed to indicate that the actual value of CurrentUserId is not in sync
* with guc.c's internal state, so SET ROLE has to be disallowed.
*
@@ -251,7 +251,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
* ever throw any kind of error. This is because they are used by
* StartTransaction and AbortTransaction to save/restore the settings,
* and during the first transaction within a backend, the value to be saved
- * and perhaps restored is indeed invalid. We have to be able to get
+ * and perhaps restored is indeed invalid. We have to be able to get
* through AbortTransaction without asserting in case InitPostgres fails.
*/
void
@@ -291,7 +291,7 @@ InSecurityRestrictedOperation(void)
/*
* These are obsolete versions of Get/SetUserIdAndSecContext that are
* only provided for bug-compatibility with some rather dubious code in
- * pljava. We allow the userid to be set, but only when not inside a
+ * pljava. We allow the userid to be set, but only when not inside a
* security restriction context.
*/
void
@@ -394,7 +394,7 @@ InitializeSessionUserId(const char *rolename)
* Check connection limit for this role.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -493,7 +493,7 @@ GetCurrentRoleId(void)
* Change Role ID while running (SET ROLE)
*
* If roleid is InvalidOid, we are doing SET ROLE NONE: revert to the
- * session user authorization. In this case the is_superuser argument
+ * session user authorization. In this case the is_superuser argument
* is ignored.
*
* When roleid is not InvalidOid, the caller must have checked whether
@@ -561,7 +561,7 @@ GetUserNameFromId(Oid roleid)
* ($DATADIR/postmaster.pid) and Unix-socket-file lockfiles ($SOCKFILE.lock).
* Both kinds of files contain the same info initially, although we can add
* more information to a data-directory lockfile after it's created, using
- * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
+ * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
* of these lockfiles.
*
* On successful lockfile creation, a proc_exit callback to remove the
@@ -650,7 +650,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
my_gp_pid = 0;
/*
- * We need a loop here because of race conditions. But don't loop forever
+ * We need a loop here because of race conditions. But don't loop forever
* (for example, a non-writable $PGDATA directory might cause a failure
* that won't go away). 100 tries seems like plenty.
*/
@@ -659,7 +659,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
- * Think not to make the file protection weaker than 0600. See
+ * Think not to make the file protection weaker than 0600. See
* comments below.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
@@ -727,7 +727,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
* implies that the existing process has a different userid than we
* do, which means it cannot be a competing postmaster. A postmaster
* cannot successfully attach to a data directory owned by a userid
- * other than its own. (This is now checked directly in
+ * other than its own. (This is now checked directly in
* checkDataDir(), but has been true for a long time because of the
* restriction that the data directory isn't group- or
* world-accessible.) Also, since we create the lockfiles mode 600,
@@ -765,9 +765,9 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * No, the creating process did not exist. However, it could be that
+ * No, the creating process did not exist. However, it could be that
* the postmaster crashed (or more likely was kill -9'd by a clueless
- * admin) but has left orphan backends behind. Check for this by
+ * admin) but has left orphan backends behind. Check for this by
* looking to see if there is an associated shmem segment that is
* still in use.
*
@@ -808,7 +808,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Looks like nobody's home. Unlink the file and try again to create
- * it. Need a loop because of possible race condition against other
+ * it. Need a loop because of possible race condition against other
* would-be creators.
*/
if (unlink(filename) < 0)
@@ -822,8 +822,8 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * Successfully created the file, now fill it. See comment in miscadmin.h
- * about the contents. Note that we write the same first five lines into
+ * Successfully created the file, now fill it. See comment in miscadmin.h
+ * about the contents. Note that we write the same first five lines into
* both datadir and socket lockfiles; although more stuff may get added to
* the datadir lockfile later.
*/
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 89a7c9e15c..ed936d7fad 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -80,7 +80,7 @@ static void process_settings(Oid databaseid, Oid roleid);
* GetDatabaseTuple -- fetch the pg_database row for a database
*
* This is used during backend startup when we don't yet have any access to
- * system catalogs in general. In the worst case, we can seqscan pg_database
+ * system catalogs in general. In the worst case, we can seqscan pg_database
* using nothing but the hard-wired descriptor that relcache.c creates for
* pg_database. In more typical cases, relcache.c was able to load
* descriptors for both pg_database and its indexes from the shared relcache
@@ -104,7 +104,7 @@ GetDatabaseTuple(const char *dbname)
CStringGetDatum(dbname));
/*
- * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
+ * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
* built the critical shared relcache entries (i.e., we're starting up
* without a shared relcache cache file).
*/
@@ -147,7 +147,7 @@ GetDatabaseTupleByOid(Oid dboid)
ObjectIdGetDatum(dboid));
/*
- * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
+ * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
* built the critical shared relcache entries (i.e., we're starting up
* without a shared relcache cache file).
*/
@@ -186,7 +186,7 @@ PerformAuthentication(Port *port)
* In EXEC_BACKEND case, we didn't inherit the contents of pg_hba.conf
* etcetera from the postmaster, and have to load them ourselves.
*
- * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
+ * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
*/
#ifdef EXEC_BACKEND
if (!load_hba())
@@ -310,7 +310,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
name)));
/*
- * Check privilege to connect to the database. (The am_superuser test
+ * Check privilege to connect to the database. (The am_superuser test
* is redundant, but since we have the flag, might as well check it
* and save a few cycles.)
*/
@@ -326,7 +326,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
* Check connection limit for this database.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -454,7 +454,7 @@ InitializeMaxBackends(void)
/* the extra unit accounts for the autovacuum launcher */
MaxBackends = MaxConnections + autovacuum_max_workers + 1 +
- + max_worker_processes;
+ +max_worker_processes;
/* internal error because the values were all checked previously */
if (MaxBackends > MAX_BACKENDS)
@@ -491,7 +491,7 @@ BaseInit(void)
* Initialize POSTGRES.
*
* The database can be specified by name, using the in_dbname parameter, or by
- * OID, using the dboid parameter. In the latter case, the actual database
+ * OID, using the dboid parameter. In the latter case, the actual database
* name can be returned to the caller in out_dbname. If out_dbname isn't
* NULL, it must point to a buffer of size NAMEDATALEN.
*
@@ -912,7 +912,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
/*
* Now process any command-line switches and any additional GUC variable
- * settings passed in the startup packet. We couldn't do this before
+ * settings passed in the startup packet. We couldn't do this before
* because we didn't know if client is a superuser.
*/
if (MyProcPort != NULL)
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index ca96539055..6861572655 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -231,7 +231,7 @@ static unsigned short BinarySearchRange
/*
* NOTE: big5 high_byte: 0xa1-0xfe, low_byte: 0x40-0x7e,
* 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix is
- * 0x9d. [region_low, region_high] We
+ * 0x9d. [region_low, region_high] We
* should remember big5 has two different regions (above).
* There is a bias for the distance between these regions.
* 0xa1 - 0x7e + bias = 1 (Distance between 0xa1 and 0x7e is
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index ad255ee693..665ac10f06 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -55,7 +55,7 @@
/*
* We maintain a simple linked list caching the fmgr lookup info for the
* currently selected conversion functions, as well as any that have been
- * selected previously in the current session. (We remember previous
+ * selected previously in the current session. (We remember previous
* settings because we must be able to restore a previous setting during
* transaction rollback, without doing any fresh catalog accesses.)
*
@@ -102,7 +102,7 @@ static int cliplen(const char *str, int len, int limit);
/*
- * Prepare for a future call to SetClientEncoding. Success should mean
+ * Prepare for a future call to SetClientEncoding. Success should mean
* that SetClientEncoding is guaranteed to succeed for this encoding request.
*
* (But note that success before backend_startup_complete does not guarantee
@@ -174,7 +174,7 @@ PrepareClientEncoding(int encoding)
/*
* We cannot yet remove any older entry for the same encoding pair,
- * since it could still be in use. SetClientEncoding will clean up.
+ * since it could still be in use. SetClientEncoding will clean up.
*/
return 0; /* success */
@@ -183,8 +183,8 @@ PrepareClientEncoding(int encoding)
{
/*
* If we're not in a live transaction, the only thing we can do is
- * restore a previous setting using the cache. This covers all
- * transaction-rollback cases. The only case it might not work for is
+ * restore a previous setting using the cache. This covers all
+ * transaction-rollback cases. The only case it might not work for is
* trying to change client_encoding on the fly by editing
* postgresql.conf and SIGHUP'ing. Which would probably be a stupid
* thing to do anyway.
@@ -564,7 +564,7 @@ pg_client_to_server(const char *s, int len)
* See the notes about string conversion functions at the top of this file.
*
* Unlike the other string conversion functions, this will apply validation
- * even if encoding == DatabaseEncoding->encoding. This is because this is
+ * even if encoding == DatabaseEncoding->encoding. This is because this is
* used to process data coming in from outside the database, and we never
* want to just assume validity.
*/
@@ -592,7 +592,7 @@ pg_any_to_server(const char *s, int len, int encoding)
* the selected client_encoding. If the client encoding is ASCII-safe
* then we just do a straight validation under that encoding. For an
* ASCII-unsafe encoding we have a problem: we dare not pass such data
- * to the parser but we have no way to convert it. We compromise by
+ * to the parser but we have no way to convert it. We compromise by
* rejecting the data if it contains any non-ASCII characters.
*/
if (PG_VALID_BE_ENCODING(encoding))
@@ -965,11 +965,11 @@ raw_pg_bind_textdomain_codeset(const char *domainname, int encoding)
* On most platforms, gettext defaults to the codeset implied by LC_CTYPE.
* When that matches the database encoding, we don't need to do anything. In
* CREATE DATABASE, we enforce or trust that the locale's codeset matches the
- * database encoding, except for the C locale. (On Windows, we also permit a
+ * database encoding, except for the C locale. (On Windows, we also permit a
* discrepancy under the UTF8 encoding.) For the C locale, explicitly bind
* gettext to the right codeset.
*
- * On Windows, gettext defaults to the Windows ANSI code page. This is a
+ * On Windows, gettext defaults to the Windows ANSI code page. This is a
* convenient departure for software that passes the strings to Windows ANSI
* APIs, but we don't do that. Compel gettext to use database encoding or,
* failing that, the LC_CTYPE encoding as it would on other platforms.
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index 5f65f1bc89..0cc753e668 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -1512,7 +1512,7 @@ pg_utf8_islegal(const unsigned char *source, int length)
*
* Not knowing anything about the properties of the encoding in use, we just
* keep incrementing the last byte until we get a validly-encoded result,
- * or we run out of values to try. We don't bother to try incrementing
+ * or we run out of values to try. We don't bother to try incrementing
* higher-order bytes, so there's no growth in runtime for wider characters.
* (If we did try to do that, we'd need to consider the likelihood that 255
* is not a valid final byte in the encoding.)
@@ -1542,7 +1542,7 @@ pg_generic_charinc(unsigned char *charptr, int len)
* For a one-byte character less than 0x7F, we just increment the byte.
*
* For a multibyte character, every byte but the first must fall between 0x80
- * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
+ * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
* the last byte that's not already at its maximum value. If we can't find a
* byte that's less than the maximum allowable value, we simply fail. We also
* need some special-case logic to skip regions used for surrogate pair
@@ -1803,8 +1803,8 @@ int
pg_encoding_verifymb(int encoding, const char *mbstr, int len)
{
return (PG_VALID_ENCODING(encoding) ?
- ((*pg_wchar_table[encoding].mbverify) ((const unsigned char *) mbstr, len)) :
- ((*pg_wchar_table[PG_SQL_ASCII].mbverify) ((const unsigned char *) mbstr, len)));
+ ((*pg_wchar_table[encoding].mbverify) ((const unsigned char *) mbstr, len)) :
+ ((*pg_wchar_table[PG_SQL_ASCII].mbverify) ((const unsigned char *) mbstr, len)));
}
/*
diff --git a/src/backend/utils/mb/wstrcmp.c b/src/backend/utils/mb/wstrcmp.c
index 64a9cf848e..dad3ae023a 100644
--- a/src/backend/utils/mb/wstrcmp.c
+++ b/src/backend/utils/mb/wstrcmp.c
@@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/mb/wstrncmp.c b/src/backend/utils/mb/wstrncmp.c
index 87c1f5afda..ea4823fc6f 100644
--- a/src/backend/utils/mb/wstrncmp.c
+++ b/src/backend/utils/mb/wstrncmp.c
@@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 15020c4508..0401cd4d2a 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -496,7 +496,7 @@ static int max_identifier_length;
static int block_size;
static int segment_size;
static int wal_block_size;
-static bool data_checksums;
+static bool data_checksums;
static int wal_segment_size;
static bool integer_datetimes;
static int effective_io_concurrency;
@@ -2148,7 +2148,7 @@ static struct config_int ConfigureNamesInt[] =
NULL
},
&max_replication_slots,
- 0, 0, MAX_BACKENDS /* XXX?*/,
+ 0, 0, MAX_BACKENDS /* XXX? */ ,
NULL, NULL, NULL
},
@@ -2543,7 +2543,11 @@ static struct config_int ConfigureNamesInt[] =
{"track_activity_query_size", PGC_POSTMASTER, RESOURCES_MEM,
gettext_noop("Sets the size reserved for pg_stat_activity.query, in bytes."),
NULL,
- /* There is no _bytes_ unit, so the user can't supply units for this. */
+
+ /*
+ * There is no _bytes_ unit, so the user can't supply units for
+ * this.
+ */
},
&pgstat_track_activity_query_size,
1024, 100, 102400,
@@ -3778,7 +3782,7 @@ get_guc_variables(void)
/*
- * Build the sorted array. This is split out so that it could be
+ * Build the sorted array. This is split out so that it could be
* re-executed after startup (eg, we could allow loadable modules to
* add vars, and then we'd need to re-sort).
*/
@@ -3935,7 +3939,7 @@ add_placeholder_variable(const char *name, int elevel)
/*
* The char* is allocated at the end of the struct since we have no
- * 'static' place to point to. Note that the current value, as well as
+ * 'static' place to point to. Note that the current value, as well as
* the boot and reset values, start out NULL.
*/
var->variable = (char **) (var + 1);
@@ -3977,7 +3981,7 @@ find_option(const char *name, bool create_placeholders, int elevel)
return *res;
/*
- * See if the name is an obsolete name for a variable. We assume that the
+ * See if the name is an obsolete name for a variable. We assume that the
* set of supported old names is short enough that a brute-force search is
* the best way.
*/
@@ -4636,7 +4640,7 @@ NewGUCNestLevel(void)
/*
* Do GUC processing at transaction or subtransaction commit or abort, or
* when exiting a function that has proconfig settings, or when undoing a
- * transient assignment to some GUC variables. (The name is thus a bit of
+ * transient assignment to some GUC variables. (The name is thus a bit of
* a misnomer; perhaps it should be ExitGUCNestLevel or some such.)
* During abort, we discard all GUC settings that were applied at nesting
* levels >= nestLevel. nestLevel == 1 corresponds to the main transaction.
@@ -5357,7 +5361,7 @@ validate_conf_option(struct config_generic * record, const char *name,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires a Boolean value",
+ errmsg("parameter \"%s\" requires a Boolean value",
name)));
return 0;
}
@@ -5387,8 +5391,8 @@ validate_conf_option(struct config_generic * record, const char *name,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": \"%s\"",
- name, value),
+ errmsg("invalid value for parameter \"%s\": \"%s\"",
+ name, value),
hintmsg ? errhint("%s", _(hintmsg)) : 0));
return 0;
}
@@ -5398,7 +5402,7 @@ validate_conf_option(struct config_generic * record, const char *name,
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%d is outside the valid range for parameter \"%s\" (%d .. %d)",
- *((int *) newval), name, conf->min, conf->max)));
+ *((int *) newval), name, conf->min, conf->max)));
return 0;
}
@@ -5425,7 +5429,7 @@ validate_conf_option(struct config_generic * record, const char *name,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires a numeric value",
+ errmsg("parameter \"%s\" requires a numeric value",
name)));
return 0;
}
@@ -5435,7 +5439,7 @@ validate_conf_option(struct config_generic * record, const char *name,
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%g is outside the valid range for parameter \"%s\" (%g .. %g)",
- *((double *) newval), name, conf->min, conf->max)));
+ *((double *) newval), name, conf->min, conf->max)));
return 0;
}
@@ -5512,9 +5516,9 @@ validate_conf_option(struct config_generic * record, const char *name,
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": \"%s\"",
- name, value),
- hintmsg ? errhint("%s", _(hintmsg)) : 0));
+ errmsg("invalid value for parameter \"%s\": \"%s\"",
+ name, value),
+ hintmsg ? errhint("%s", _(hintmsg)) : 0));
if (hintmsg != NULL)
pfree(hintmsg);
@@ -5667,7 +5671,7 @@ set_config_option(const char *name, const char *value,
* If a PGC_BACKEND parameter is changed in the config file,
* we want to accept the new value in the postmaster (whence
* it will propagate to subsequently-started backends), but
- * ignore it in existing backends. This is a tad klugy, but
+ * ignore it in existing backends. This is a tad klugy, but
* necessary because we don't re-read the config file during
* backend start.
*
@@ -5724,7 +5728,7 @@ set_config_option(const char *name, const char *value,
* An exception might be made if the reset value is assumed to be "safe".
*
* Note: this flag is currently used for "session_authorization" and
- * "role". We need to prohibit changing these inside a local userid
+ * "role". We need to prohibit changing these inside a local userid
* context because when we exit it, GUC won't be notified, leaving things
* out of sync. (This could be fixed by forcing a new GUC nesting level,
* but that would change behavior in possibly-undesirable ways.) Also, we
@@ -6515,7 +6519,7 @@ flatten_set_variable_args(const char *name, List *args)
else
{
/*
- * Plain string literal or identifier. For quote mode,
+ * Plain string literal or identifier. For quote mode,
* quote it if it's not a vanilla identifier.
*/
if (flags & GUC_LIST_QUOTE)
@@ -6681,7 +6685,7 @@ replace_auto_config_value(ConfigVariable **head_p, ConfigVariable **tail_p,
* configuration file (PG_AUTOCONF_FILENAME) intact.
*/
void
-AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
+AlterSystemSetConfigFile(AlterSystemStmt *altersysstmt)
{
char *name;
char *value;
@@ -6698,7 +6702,7 @@ AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to execute ALTER SYSTEM command"))));
+ (errmsg("must be superuser to execute ALTER SYSTEM command"))));
/*
* Validate the name and arguments [value1, value2 ... ].
@@ -6724,7 +6728,7 @@ AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->context == PGC_INTERNAL) ||
(record->flags & GUC_DISALLOW_IN_FILE))
@@ -6737,7 +6741,7 @@ AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
ERROR, true, NULL,
&newextra))
ereport(ERROR,
- (errmsg("invalid value for parameter \"%s\": \"%s\"", name, value)));
+ (errmsg("invalid value for parameter \"%s\": \"%s\"", name, value)));
/*
@@ -6754,8 +6758,8 @@ AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
* One backend is allowed to operate on file PG_AUTOCONF_FILENAME, to
* ensure that we need to update the contents of the file with
* AutoFileLock. To ensure crash safety, first the contents are written to
- * a temporary file which is then renameed to PG_AUTOCONF_FILENAME. In case
- * there exists a temp file from previous crash, that can be reused.
+ * a temporary file which is then renameed to PG_AUTOCONF_FILENAME. In
+ * case there exists a temp file from previous crash, that can be reused.
*/
LWLockAcquire(AutoFileLock, LW_EXCLUSIVE);
@@ -6846,6 +6850,7 @@ ExecSetVariableStmt(VariableSetStmt *stmt, bool isTopLevel)
0);
break;
case VAR_SET_MULTI:
+
/*
* Special-case SQL syntaxes. The TRANSACTION and SESSION
* CHARACTERISTICS cases effectively set more than one variable
@@ -7131,7 +7136,7 @@ define_custom_variable(struct config_generic * variable)
* variable. Essentially, we need to duplicate all the active and stacked
* values, but with appropriate validation and datatype adjustment.
*
- * If an assignment fails, we report a WARNING and keep going. We don't
+ * If an assignment fails, we report a WARNING and keep going. We don't
* want to throw ERROR for bad values, because it'd bollix the add-on
* module that's presumably halfway through getting loaded. In such cases
* the default or previous state will become active instead.
@@ -7159,7 +7164,7 @@ define_custom_variable(struct config_generic * variable)
/*
* Free up as much as we conveniently can of the placeholder structure.
* (This neglects any stack items, so it's possible for some memory to be
- * leaked. Since this can only happen once per session per variable, it
+ * leaked. Since this can only happen once per session per variable, it
* doesn't seem worth spending much code on.)
*/
set_string_field(pHolder, pHolder->variable, NULL);
@@ -7232,7 +7237,7 @@ reapply_stacked_values(struct config_generic * variable,
else
{
/*
- * We are at the end of the stack. If the active/previous value is
+ * We are at the end of the stack. If the active/previous value is
* different from the reset value, it must represent a previously
* committed session value. Apply it, and then drop the stack entry
* that set_config_option will have created under the impression that
@@ -8424,7 +8429,7 @@ ParseLongOption(const char *string, char **name, char **value)
/*
* Handle options fetched from pg_db_role_setting.setconfig,
- * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
+ * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
*
* The array parameter must be an array of TEXT (it must not be NULL).
*/
@@ -8705,7 +8710,7 @@ GUCArrayReset(ArrayType *array)
* Validate a proposed option setting for GUCArrayAdd/Delete/Reset.
*
* name is the option name. value is the proposed value for the Add case,
- * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
+ * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
* not an error to have no permissions to set the option.
*
* Returns TRUE if OK, FALSE if skipIfNoPermissions is true and user does not
@@ -8786,7 +8791,7 @@ validate_option_array_item(const char *name, const char *value,
* ERRCODE_INVALID_PARAMETER_VALUE SQLSTATE for check hook failures.
*
* Note that GUC_check_errmsg() etc are just macros that result in a direct
- * assignment to the associated variables. That is ugly, but forced by the
+ * assignment to the associated variables. That is ugly, but forced by the
* limitations of C's macro mechanisms.
*/
void
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index 6294ca30ed..3aeceae67c 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -109,7 +109,7 @@ static char **save_argv;
* from being clobbered by subsequent ps_display actions.
*
* (The original argv[] will not be overwritten by this routine, but may be
- * overwritten during init_ps_display. Also, the physical location of the
+ * overwritten during init_ps_display. Also, the physical location of the
* environment strings may be moved, so this should be called before any code
* that might try to hang onto a getenv() result.)
*/
@@ -210,7 +210,7 @@ save_ps_display_args(int argc, char **argv)
/*
* Call this once during subprocess startup to set the identification
- * values. At this point, the original argv[] array may be overwritten.
+ * values. At this point, the original argv[] array may be overwritten.
*/
void
init_ps_display(const char *username, const char *dbname,
@@ -360,7 +360,7 @@ set_ps_display(const char *activity, bool force)
/*
* Returns what's currently in the ps display, in case someone needs
- * it. Note that only the activity part is returned. On some platforms
+ * it. Note that only the activity part is returned. On some platforms
* the string will not be null-terminated, so return the effective
* length into *displen.
*/
diff --git a/src/backend/utils/misc/rbtree.c b/src/backend/utils/misc/rbtree.c
index 556045dd4b..e3efd4c08b 100644
--- a/src/backend/utils/misc/rbtree.c
+++ b/src/backend/utils/misc/rbtree.c
@@ -13,7 +13,7 @@
*
* Red-black trees are a type of balanced binary tree wherein (1) any child of
* a red node is always black, and (2) every path from root to leaf traverses
- * an equal number of black nodes. From these properties, it follows that the
+ * an equal number of black nodes. From these properties, it follows that the
* longest path from root to leaf is only about twice as long as the shortest,
* so lookups are guaranteed to run in O(lg n) time.
*
@@ -102,7 +102,7 @@ static RBNode sentinel = {InitialState, RBBLACK, RBNIL, RBNIL, NULL};
* valid data! freefunc can be NULL if caller doesn't require retail
* space reclamation.
*
- * The RBTree node is palloc'd in the caller's memory context. Note that
+ * The RBTree node is palloc'd in the caller's memory context. Note that
* all contents of the tree are actually allocated by the caller, not here.
*
* Since tree contents are managed by the caller, there is currently not
@@ -282,10 +282,10 @@ rb_rotate_right(RBTree *rb, RBNode *x)
/*
* Maintain Red-Black tree balance after inserting node x.
*
- * The newly inserted node is always initially marked red. That may lead to
+ * The newly inserted node is always initially marked red. That may lead to
* a situation where a red node has a red child, which is prohibited. We can
* always fix the problem by a series of color changes and/or "rotations",
- * which move the problem progressively higher up in the tree. If one of the
+ * which move the problem progressively higher up in the tree. If one of the
* two red nodes is the root, we can always fix the problem by changing the
* root from red to black.
*
@@ -296,7 +296,7 @@ static void
rb_insert_fixup(RBTree *rb, RBNode *x)
{
/*
- * x is always a red node. Initially, it is the newly inserted node. Each
+ * x is always a red node. Initially, it is the newly inserted node. Each
* iteration of this loop moves it higher up in the tree.
*/
while (x != rb->root && x->parent->color == RBRED)
@@ -481,7 +481,7 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
while (x != rb->root && x->color == RBBLACK)
{
/*
- * Left and right cases are symmetric. Any nodes that are children of
+ * Left and right cases are symmetric. Any nodes that are children of
* x have a black-height one less than the remainder of the nodes in
* the tree. We rotate and recolor nodes to move the problem up the
* tree: at some stage we'll either fix the problem, or reach the root
diff --git a/src/backend/utils/misc/timeout.c b/src/backend/utils/misc/timeout.c
index f56a63b704..ec259a6f8e 100644
--- a/src/backend/utils/misc/timeout.c
+++ b/src/backend/utils/misc/timeout.c
@@ -57,7 +57,7 @@ static timeout_params *volatile active_timeouts[MAX_TIMEOUTS];
* Note that we don't bother to reset any pending timer interrupt when we
* disable the signal handler; it's not really worth the cycles to do so,
* since the probability of the interrupt actually occurring while we have
- * it disabled is low. See comments in schedule_alarm() about that.
+ * it disabled is low. See comments in schedule_alarm() about that.
*/
static volatile sig_atomic_t alarm_enabled = false;
@@ -69,7 +69,7 @@ static volatile sig_atomic_t alarm_enabled = false;
* Internal helper functions
*
* For all of these, it is caller's responsibility to protect them from
- * interruption by the signal handler. Generally, call disable_alarm()
+ * interruption by the signal handler. Generally, call disable_alarm()
* first to prevent interruption, then update state, and last call
* schedule_alarm(), which will re-enable the signal handler if needed.
*****************************************************************************/
@@ -144,7 +144,7 @@ enable_timeout(TimeoutId id, TimestampTz now, TimestampTz fin_time)
Assert(all_timeouts[id].timeout_handler != NULL);
/*
- * If this timeout was already active, momentarily disable it. We
+ * If this timeout was already active, momentarily disable it. We
* interpret the call as a directive to reschedule the timeout.
*/
i = find_active_timeout(id);
@@ -152,7 +152,7 @@ enable_timeout(TimeoutId id, TimestampTz now, TimestampTz fin_time)
remove_timeout_index(i);
/*
- * Find out the index where to insert the new timeout. We sort by
+ * Find out the index where to insert the new timeout. We sort by
* fin_time, and for equal fin_time by priority.
*/
for (i = 0; i < num_active_timeouts; i++)
@@ -214,18 +214,18 @@ schedule_alarm(TimestampTz now)
*
* Because we didn't bother to reset the timer in disable_alarm(),
* it's possible that a previously-set interrupt will fire between
- * enable_alarm() and setitimer(). This is safe, however. There are
+ * enable_alarm() and setitimer(). This is safe, however. There are
* two possible outcomes:
*
* 1. The signal handler finds nothing to do (because the nearest
* timeout event is still in the future). It will re-set the timer
- * and return. Then we'll overwrite the timer value with a new one.
+ * and return. Then we'll overwrite the timer value with a new one.
* This will mean that the timer fires a little later than we
* intended, but only by the amount of time it takes for the signal
* handler to do nothing useful, which shouldn't be much.
*
* 2. The signal handler executes and removes one or more timeout
- * events. When it returns, either the queue is now empty or the
+ * events. When it returns, either the queue is now empty or the
* frontmost event is later than the one we looked at above. So we'll
* overwrite the timer value with one that is too soon (plus or minus
* the signal handler's execution time), causing a useless interrupt
@@ -266,14 +266,14 @@ handle_sig_alarm(SIGNAL_ARGS)
* mainline is waiting for a lock). If SIGINT or similar arrives while
* this code is running, we'd lose control and perhaps leave our data
* structures in an inconsistent state. Disable immediate interrupts, and
- * just to be real sure, bump the holdoff counter as well. (The reason
+ * just to be real sure, bump the holdoff counter as well. (The reason
* for this belt-and-suspenders-too approach is to make sure that nothing
* bad happens if a timeout handler calls code that manipulates
* ImmediateInterruptOK.)
*
* Note: it's possible for a SIGINT to interrupt handle_sig_alarm before
* we manage to do this; the net effect would be as if the SIGALRM event
- * had been silently lost. Therefore error recovery must include some
+ * had been silently lost. Therefore error recovery must include some
* action that will allow any lost interrupt to be rescheduled. Disabling
* some or all timeouts is sufficient, or if that's not appropriate,
* reschedule_timeouts() can be called. Also, the signal blocking hazard
@@ -434,7 +434,7 @@ RegisterTimeout(TimeoutId id, timeout_handler_proc handler)
*
* This can be used during error recovery in case query cancel resulted in loss
* of a SIGALRM event (due to longjmp'ing out of handle_sig_alarm before it
- * could do anything). But note it's not necessary if any of the public
+ * could do anything). But note it's not necessary if any of the public
* enable_ or disable_timeout functions are called in the same area, since
* those all do schedule_alarm() internally if needed.
*/
@@ -503,7 +503,7 @@ enable_timeout_at(TimeoutId id, TimestampTz fin_time)
* Enable multiple timeouts at once.
*
* This works like calling enable_timeout_after() and/or enable_timeout_at()
- * multiple times. Use this to reduce the number of GetCurrentTimestamp()
+ * multiple times. Use this to reduce the number of GetCurrentTimestamp()
* and setitimer() calls needed to establish multiple timeouts.
*/
void
diff --git a/src/backend/utils/misc/tzparser.c b/src/backend/utils/misc/tzparser.c
index 24ccbe5685..6a5a7b39ab 100644
--- a/src/backend/utils/misc/tzparser.c
+++ b/src/backend/utils/misc/tzparser.c
@@ -4,7 +4,7 @@
* Functions for parsing timezone offset files
*
* Note: this code is invoked from the check_hook for the GUC variable
- * timezone_abbreviations. Therefore, it should report problems using
+ * timezone_abbreviations. Therefore, it should report problems using
* GUC_check_errmsg() and related functions, and try to avoid throwing
* elog(ERROR). This is not completely bulletproof at present --- in
* particular out-of-memory will throw an error. Could probably fix with
@@ -179,7 +179,7 @@ addToArray(tzEntry **base, int *arraysize, int n,
/*
* Search the array for a duplicate; as a useful side effect, the array is
- * maintained in sorted order. We use strcmp() to ensure we match the
+ * maintained in sorted order. We use strcmp() to ensure we match the
* sort order datetime.c expects.
*/
arrayptr = *base;
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 099200cb66..743455e4bc 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -38,7 +38,7 @@
* request, even if it was much larger than necessary. This led to more
* and more wasted space in allocated chunks over time. To fix, get rid
* of the midrange behavior: we now handle only "small" power-of-2-size
- * chunks as chunks. Anything "large" is passed off to malloc(). Change
+ * chunks as chunks. Anything "large" is passed off to malloc(). Change
* the number of freelists to change the small/large boundary.
*
*
@@ -54,7 +54,7 @@
* Thus, if someone makes the common error of writing past what they've
* requested, the problem is likely to go unnoticed ... until the day when
* there *isn't* any wasted space, perhaps because of different memory
- * alignment on a new platform, or some other effect. To catch this sort
+ * alignment on a new platform, or some other effect. To catch this sort
* of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond
* the requested space whenever the request is less than the actual chunk
* size, and verifies that the byte is undamaged when the chunk is freed.
@@ -189,7 +189,7 @@ typedef AllocSetContext *AllocSet;
/*
* AllocBlock
* An AllocBlock is the unit of memory that is obtained by aset.c
- * from malloc(). It contains one or more AllocChunks, which are
+ * from malloc(). It contains one or more AllocChunks, which are
* the units requested by palloc() and freed by pfree(). AllocChunks
* cannot be returned to malloc() individually, instead they are put
* on freelists by pfree() and re-used by the next palloc() that has
@@ -326,7 +326,7 @@ AllocSetFreeIndex(Size size)
/*
* At this point we need to obtain log2(tsize)+1, ie, the number of
- * not-all-zero bits at the right. We used to do this with a
+ * not-all-zero bits at the right. We used to do this with a
* shift-and-count loop, but this function is enough of a hotspot to
* justify micro-optimization effort. The best approach seems to be
* to use a lookup table. Note that this code assumes that
@@ -544,7 +544,7 @@ AllocSetInit(MemoryContext context)
* Actually, this routine has some discretion about what to do.
* It should mark all allocated chunks freed, but it need not necessarily
* give back all the resources the set owns. Our actual implementation is
- * that we hang onto any "keeper" block specified for the set. In this way,
+ * that we hang onto any "keeper" block specified for the set. In this way,
* we don't thrash malloc() when a context is repeatedly reset after small
* allocations, which is typical behavior for per-tuple contexts.
*/
@@ -794,7 +794,7 @@ AllocSetAlloc(MemoryContext context, Size size)
/*
* In most cases, we'll get back the index of the next larger
- * freelist than the one we need to put this chunk on. The
+ * freelist than the one we need to put this chunk on. The
* exception is when availchunk is exactly a power of 2.
*/
if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
@@ -953,7 +953,7 @@ AllocSetFree(MemoryContext context, void *pointer)
{
/*
* Big chunks are certain to have been allocated as single-chunk
- * blocks. Find the containing block and return it to malloc().
+ * blocks. Find the containing block and return it to malloc().
*/
AllocBlock block = set->blocks;
AllocBlock prevblock = NULL;
@@ -1080,7 +1080,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
if (oldsize > set->allocChunkLimit)
{
/*
- * The chunk must have been allocated as a single-chunk block. Find
+ * The chunk must have been allocated as a single-chunk block. Find
* the containing block and use realloc() to make it bigger with
* minimum space wastage.
*/
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 132b04d7c9..e83e76dc0f 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -183,7 +183,7 @@ MemoryContextResetChildren(MemoryContext context)
*
* The type-specific delete routine removes all subsidiary storage
* for the context, but we have to delete the context node itself,
- * as well as recurse to get the children. We must also delink the
+ * as well as recurse to get the children. We must also delink the
* node from its parent, if it has one.
*/
void
@@ -487,22 +487,22 @@ MemoryContextContains(MemoryContext context, void *pointer)
* we want to be sure that we don't leave the context tree invalid
* in case of failure (such as insufficient memory to allocate the
* context node itself). The procedure goes like this:
- * 1. Context-type-specific routine first calls MemoryContextCreate(),
+ * 1. Context-type-specific routine first calls MemoryContextCreate(),
* passing the appropriate tag/size/methods values (the methods
* pointer will ordinarily point to statically allocated data).
* The parent and name parameters usually come from the caller.
- * 2. MemoryContextCreate() attempts to allocate the context node,
+ * 2. MemoryContextCreate() attempts to allocate the context node,
* plus space for the name. If this fails we can ereport() with no
* damage done.
- * 3. We fill in all of the type-independent MemoryContext fields.
- * 4. We call the type-specific init routine (using the methods pointer).
+ * 3. We fill in all of the type-independent MemoryContext fields.
+ * 4. We call the type-specific init routine (using the methods pointer).
* The init routine is required to make the node minimally valid
* with zero chance of failure --- it can't allocate more memory,
* for example.
- * 5. Now we have a minimally valid node that can behave correctly
+ * 5. Now we have a minimally valid node that can behave correctly
* when told to reset or delete itself. We link the node to its
* parent (if any), making the node part of the context tree.
- * 6. We return to the context-type-specific routine, which finishes
+ * 6. We return to the context-type-specific routine, which finishes
* up type-specific initialization. This routine can now do things
* that might fail (like allocate more memory), so long as it's
* sure the node is left in a state that delete will handle.
@@ -514,7 +514,7 @@ MemoryContextContains(MemoryContext context, void *pointer)
*
* Normally, the context node and the name are allocated from
* TopMemoryContext (NOT from the parent context, since the node must
- * survive resets of its parent context!). However, this routine is itself
+ * survive resets of its parent context!). However, this routine is itself
* used to create TopMemoryContext! If we see that TopMemoryContext is NULL,
* we assume we are creating TopMemoryContext and use malloc() to allocate
* the node.
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index f5e5ad4fe9..c1b13c360f 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -144,14 +144,14 @@ GetPortalByName(const char *name)
* Get the "primary" stmt within a portal, ie, the one marked canSetTag.
*
* Returns NULL if no such stmt. If multiple PlannedStmt structs within the
- * portal are marked canSetTag, returns the first one. Neither of these
+ * portal are marked canSetTag, returns the first one. Neither of these
* cases should occur in present usages of this function.
*
* Copes if given a list of Querys --- can't happen in a portal, but this
* code also supports plancache.c, which needs both cases.
*
* Note: the reason this is just handed a List is so that plancache.c
- * can share the code. For use with a portal, use PortalGetPrimaryStmt
+ * can share the code. For use with a portal, use PortalGetPrimaryStmt
* rather than calling this directly.
*/
Node *
@@ -277,7 +277,7 @@ CreateNewPortal(void)
* you can pass a constant string, perhaps "(query not available)".)
*
* commandTag shall be NULL if and only if the original query string
- * (before rewriting) was an empty string. Also, the passed commandTag must
+ * (before rewriting) was an empty string. Also, the passed commandTag must
* be a pointer to a constant string, since it is not copied.
*
* If cplan is provided, then it is a cached plan containing the stmts, and
@@ -480,14 +480,14 @@ PortalDrop(Portal portal, bool isTopCommit)
/*
* Allow portalcmds.c to clean up the state it knows about, in particular
- * shutting down the executor if still active. This step potentially runs
+ * shutting down the executor if still active. This step potentially runs
* user-defined code so failure has to be expected. It's the cleanup
* hook's responsibility to not try to do that more than once, in the case
* that failure occurs and then we come back to drop the portal again
* during transaction abort.
*
* Note: in most paths of control, this will have been done already in
- * MarkPortalDone or MarkPortalFailed. We're just making sure.
+ * MarkPortalDone or MarkPortalFailed. We're just making sure.
*/
if (PointerIsValid(portal->cleanup))
{
@@ -507,12 +507,12 @@ PortalDrop(Portal portal, bool isTopCommit)
PortalReleaseCachedPlan(portal);
/*
- * Release any resources still attached to the portal. There are several
+ * Release any resources still attached to the portal. There are several
* cases being covered here:
*
* Top transaction commit (indicated by isTopCommit): normally we should
* do nothing here and let the regular end-of-transaction resource
- * releasing mechanism handle these resources too. However, if we have a
+ * releasing mechanism handle these resources too. However, if we have a
* FAILED portal (eg, a cursor that got an error), we'd better clean up
* its resources to avoid resource-leakage warning messages.
*
@@ -524,7 +524,7 @@ PortalDrop(Portal portal, bool isTopCommit)
* cleaned up in transaction abort.
*
* Ordinary portal drop: must release resources. However, if the portal
- * is not FAILED then we do not release its locks. The locks become the
+ * is not FAILED then we do not release its locks. The locks become the
* responsibility of the transaction's ResourceOwner (since it is the
* parent of the portal's owner) and will be released when the transaction
* eventually ends.
@@ -611,7 +611,7 @@ PortalHashTableDeleteAll(void)
* Holdable cursors created in this transaction need to be converted to
* materialized form, since we are going to close down the executor and
* release locks. Non-holdable portals created in this transaction are
- * simply removed. Portals remaining from prior transactions should be
+ * simply removed. Portals remaining from prior transactions should be
* left untouched.
*
* Returns TRUE if any portals changed state (possibly causing user-defined
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index 0da061a4dc..0955bccb3e 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -177,7 +177,7 @@ ResourceOwnerCreate(ResourceOwner parent, const char *name)
* but don't delete the owner objects themselves.
*
* Note that this executes just one phase of release, and so typically
- * must be called three times. We do it this way because (a) we want to
+ * must be called three times. We do it this way because (a) we want to
* do all the recursion separately for each phase, thereby preserving
* the needed order of operations; and (b) xact.c may have other operations
* to do between the phases.
@@ -251,7 +251,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
*
* During a commit, there shouldn't be any remaining pins --- that
* would indicate failure to clean up the executor correctly --- so
- * issue warnings. In the abort case, just clean up quietly.
+ * issue warnings. In the abort case, just clean up quietly.
*
* We are careful to do the releasing back-to-front, so as to avoid
* O(N^2) behavior in ResourceOwnerForgetBuffer().
@@ -439,7 +439,7 @@ ResourceOwnerDelete(ResourceOwner owner)
/*
* We delink the owner from its parent before deleting it, so that if
* there's an error we won't have deleted/busted owners still attached to
- * the owner tree. Better a leak than a crash.
+ * the owner tree. Better a leak than a crash.
*/
ResourceOwnerNewParent(owner, NULL);
@@ -633,7 +633,7 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
/*
* Scan back-to-front because it's more likely we are releasing a
- * recently pinned buffer. This isn't always the case of course, but
+ * recently pinned buffer. This isn't always the case of course, but
* it's the way to bet.
*/
for (i = nb1; i >= 0; i--)
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index d27e0bf1d8..106b917f72 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -7,14 +7,14 @@
* tuplesort.c). Merging is an ideal algorithm for tape devices, but if
* we implement it on disk by creating a separate file for each "tape",
* there is an annoying problem: the peak space usage is at least twice
- * the volume of actual data to be sorted. (This must be so because each
+ * the volume of actual data to be sorted. (This must be so because each
* datum will appear in both the input and output tapes of the final
- * merge pass. For seven-tape polyphase merge, which is otherwise a
+ * merge pass. For seven-tape polyphase merge, which is otherwise a
* pretty good algorithm, peak usage is more like 4x actual data volume.)
*
* We can work around this problem by recognizing that any one tape
* dataset (with the possible exception of the final output) is written
- * and read exactly once in a perfectly sequential manner. Therefore,
+ * and read exactly once in a perfectly sequential manner. Therefore,
* a datum once read will not be required again, and we can recycle its
* space for use by the new tape dataset(s) being generated. In this way,
* the total space usage is essentially just the actual data volume, plus
@@ -55,7 +55,7 @@
* To support the above policy of writing to the lowest free block,
* ltsGetFreeBlock sorts the list of free block numbers into decreasing
* order each time it is asked for a block and the list isn't currently
- * sorted. This is an efficient way to handle it because we expect cycles
+ * sorted. This is an efficient way to handle it because we expect cycles
* of releasing many blocks followed by re-using many blocks, due to
* tuplesort.c's "preread" behavior.
*
@@ -117,7 +117,7 @@ typedef struct LogicalTape
/*
* The total data volume in the logical tape is numFullBlocks * BLCKSZ +
- * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
+ * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
* only at completion of a write phase.
*/
long numFullBlocks; /* number of complete blocks in log tape */
@@ -157,7 +157,7 @@ struct LogicalTapeSet
*
* If blocksSorted is true then the block numbers in freeBlocks are in
* *decreasing* order, so that removing the last entry gives us the lowest
- * free block. We re-sort the blocks whenever a block is demanded; this
+ * free block. We re-sort the blocks whenever a block is demanded; this
* should be reasonably efficient given the expected usage pattern.
*/
bool forgetFreeSpace; /* are we remembering free blocks? */
@@ -218,7 +218,7 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
/*
* Read a block-sized buffer from the specified block of the underlying file.
*
- * No need for an error return convention; we ereport() on any error. This
+ * No need for an error return convention; we ereport() on any error. This
* module should never attempt to read a block it doesn't know is there.
*/
static void
@@ -353,7 +353,7 @@ ltsRecordBlockNum(LogicalTapeSet *lts, IndirectBlock *indirect,
/*
* Reset a logical tape's indirect-block hierarchy after a write pass
- * to prepare for reading. We dump out partly-filled blocks except
+ * to prepare for reading. We dump out partly-filled blocks except
* at the top of the hierarchy, and we rewind each level to the start.
* This call returns the first data block number, or -1L if the tape
* is empty.
@@ -540,7 +540,7 @@ LogicalTapeSetCreate(int ntapes)
/*
* Initialize per-tape structs. Note we allocate the I/O buffer and
* first-level indirect block for a tape only when it is first actually
- * written to. This avoids wasting memory space when tuplesort.c
+ * written to. This avoids wasting memory space when tuplesort.c
* overestimates the number of tapes needed.
*/
for (i = 0; i < ntapes; i++)
@@ -591,7 +591,7 @@ LogicalTapeSetClose(LogicalTapeSet *lts)
* Mark a logical tape set as not needing management of free space anymore.
*
* This should be called if the caller does not intend to write any more data
- * into the tape set, but is reading from un-frozen tapes. Since no more
+ * into the tape set, but is reading from un-frozen tapes. Since no more
* writes are planned, remembering free blocks is no longer useful. Setting
* this flag lets us avoid wasting time and space in ltsReleaseBlock(), which
* is not designed to handle large numbers of free blocks.
@@ -732,7 +732,7 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
else
{
/*
- * Completion of a read phase. Rewind and prepare for write.
+ * Completion of a read phase. Rewind and prepare for write.
*
* NOTE: we assume the caller has read the tape to the end; otherwise
* untouched data and indirect blocks will not have been freed. We
@@ -826,7 +826,7 @@ LogicalTapeRead(LogicalTapeSet *lts, int tapenum,
*
* This *must* be called just at the end of a write pass, before the
* tape is rewound (after rewind is too late!). It performs a rewind
- * and switch to read mode "for free". An immediately following rewind-
+ * and switch to read mode "for free". An immediately following rewind-
* for-read call is OK but not necessary.
*/
void
@@ -862,7 +862,7 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum)
}
/*
- * Backspace the tape a given number of bytes. (We also support a more
+ * Backspace the tape a given number of bytes. (We also support a more
* general seek interface, see below.)
*
* *Only* a frozen-for-read tape can be backed up; we don't support
@@ -966,7 +966,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
return false;
/*
- * OK, advance or back up to the target block. This implementation would
+ * OK, advance or back up to the target block. This implementation would
* be pretty inefficient for long seeks, but we really aren't expecting
* that (a seek over one tuple is typical).
*/
@@ -999,7 +999,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
* Obtain current position in a form suitable for a later LogicalTapeSeek.
*
* NOTE: it'd be OK to do this during write phase with intention of using
- * the position for a seek after freezing. Not clear if anyone needs that.
+ * the position for a seek after freezing. Not clear if anyone needs that.
*/
void
LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 8b520c160c..aa0f6d8e04 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -6,7 +6,7 @@
* This module handles sorting of heap tuples, index tuples, or single
* Datums (and could easily support other kinds of sortable objects,
* if necessary). It works efficiently for both small and large amounts
- * of data. Small amounts are sorted in-memory using qsort(). Large
+ * of data. Small amounts are sorted in-memory using qsort(). Large
* amounts are sorted using temporary files and a standard external sort
* algorithm.
*
@@ -40,7 +40,7 @@
* into sorted runs in temporary tapes, emitting just enough tuples at each
* step to get back within the workMem limit. Whenever the run number at
* the top of the heap changes, we begin a new run with a new output tape
- * (selected per Algorithm D). After the end of the input is reached,
+ * (selected per Algorithm D). After the end of the input is reached,
* we dump out remaining tuples in memory into a final run (or two),
* then merge the runs using Algorithm D.
*
@@ -57,17 +57,17 @@
* access at all, defeating the read-ahead methods used by most Unix kernels.
* Worse, the output tape gets written into a very random sequence of blocks
* of the temp file, ensuring that things will be even worse when it comes
- * time to read that tape. A straightforward merge pass thus ends up doing a
+ * time to read that tape. A straightforward merge pass thus ends up doing a
* lot of waiting for disk seeks. We can improve matters by prereading from
* each source tape sequentially, loading about workMem/M bytes from each tape
* in turn. Then we run the merge algorithm, writing but not reading until
- * one of the preloaded tuple series runs out. Then we switch back to preread
+ * one of the preloaded tuple series runs out. Then we switch back to preread
* mode, fill memory again, and repeat. This approach helps to localize both
* read and write accesses.
*
* When the caller requests random access to the sort result, we form
* the final sorted run on a logical tape which is then "frozen", so
- * that we can access it randomly. When the caller does not need random
+ * that we can access it randomly. When the caller does not need random
* access, we return from tuplesort_performsort() as soon as we are down
* to one run per logical tape. The final merge is then performed
* on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
@@ -77,7 +77,7 @@
* grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
* to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
* tape drives are expensive beasts, and in particular that there will always
- * be many more runs than tape drives. In our implementation a "tape drive"
+ * be many more runs than tape drives. In our implementation a "tape drive"
* doesn't cost much more than a few Kb of memory buffers, so we can afford
* to have lots of them. In particular, if we can have as many tape drives
* as sorted runs, we can eliminate any repeated I/O at all. In the current
@@ -134,28 +134,28 @@ bool optimize_bounded_sort = true;
/*
- * The objects we actually sort are SortTuple structs. These contain
+ * The objects we actually sort are SortTuple structs. These contain
* a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
* which is a separate palloc chunk --- we assume it is just one chunk and
* can be freed by a simple pfree(). SortTuples also contain the tuple's
* first key column in Datum/nullflag format, and an index integer.
*
* Storing the first key column lets us save heap_getattr or index_getattr
- * calls during tuple comparisons. We could extract and save all the key
+ * calls during tuple comparisons. We could extract and save all the key
* columns not just the first, but this would increase code complexity and
* overhead, and wouldn't actually save any comparison cycles in the common
* case where the first key determines the comparison result. Note that
* for a pass-by-reference datatype, datum1 points into the "tuple" storage.
*
* When sorting single Datums, the data value is represented directly by
- * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
+ * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
* then datum1 points to a separately palloc'd data value that is also pointed
* to by the "tuple" pointer; otherwise "tuple" is NULL.
*
* While building initial runs, tupindex holds the tuple's run number. During
* merge passes, we re-use it to hold the input tape number that each tuple in
* the heap was read from, or to hold the index of the next tuple pre-read
- * from the same tape in the case of pre-read entries. tupindex goes unused
+ * from the same tape in the case of pre-read entries. tupindex goes unused
* if the sort occurs entirely in memory.
*/
typedef struct
@@ -238,7 +238,7 @@ struct Tuplesortstate
void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() the out-of-line data (not the SortTuple struct!), and increase
@@ -264,7 +264,7 @@ struct Tuplesortstate
void (*reversedirection) (Tuplesortstate *state);
/*
- * This array holds the tuples now in sort memory. If we are in state
+ * This array holds the tuples now in sort memory. If we are in state
* INITIAL, the tuples are in no particular order; if we are in state
* SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
* and FINALMERGE, the tuples are organized in "heap" order per Algorithm
@@ -412,7 +412,7 @@ struct Tuplesortstate
* If state->randomAccess is true, then the stored representation of the
* tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* randomAccess is not true, the write/read routines may omit the extra
* length word.
*
@@ -422,7 +422,7 @@ struct Tuplesortstate
* the back length word (if present).
*
* The write/read routines can make use of the tuple description data
- * stored in the Tuplesortstate record, if needed. They are also expected
+ * stored in the Tuplesortstate record, if needed. They are also expected
* to adjust state->availMem by the amount of memory space (not tape space!)
* released or consumed. There is no error return from either writetup
* or readtup; they should ereport() on failure.
@@ -519,7 +519,7 @@ static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
*
* After calling tuplesort_begin, the caller should call tuplesort_putXXX
* zero or more times, then call tuplesort_performsort when all the tuples
- * have been supplied. After performsort, retrieve the tuples in sorted
+ * have been supplied. After performsort, retrieve the tuples in sorted
* order by calling tuplesort_getXXX until it returns false/NULL. (If random
* access was requested, rescan, markpos, and restorepos can also be called.)
* Call tuplesort_end to terminate the operation and release memory/disk space.
@@ -859,7 +859,7 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
*
* Advise tuplesort that at most the first N result tuples are required.
*
- * Must be called before inserting any tuples. (Actually, we could allow it
+ * Must be called before inserting any tuples. (Actually, we could allow it
* as long as the sort hasn't spilled to disk, but there seems no need for
* delayed calls at the moment.)
*
@@ -1008,7 +1008,7 @@ grow_memtuples(Tuplesortstate *state)
* strategy and instead increase as much as we safely can.
*
* To stay within allowedMem, we can't increase memtupsize by more
- * than availMem / sizeof(SortTuple) elements. In practice, we want
+ * than availMem / sizeof(SortTuple) elements. In practice, we want
* to increase it by considerably less, because we need to leave some
* space for the tuples to which the new array slots will refer. We
* assume the new tuples will be about the same size as the tuples
@@ -1062,9 +1062,9 @@ grow_memtuples(Tuplesortstate *state)
* We need to be sure that we do not cause LACKMEM to become true, else
* the space management algorithm will go nuts. The code above should
* never generate a dangerous request, but to be safe, check explicitly
- * that the array growth fits within availMem. (We could still cause
+ * that the array growth fits within availMem. (We could still cause
* LACKMEM if the memory chunk overhead associated with the memtuples
- * array were to increase. That shouldn't happen with any sane value of
+ * array were to increase. That shouldn't happen with any sane value of
* allowedMem, because at any array size large enough to risk LACKMEM,
* palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.)
@@ -1200,7 +1200,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
case TSS_INITIAL:
/*
- * Save the tuple into the unsorted array. First, grow the array
+ * Save the tuple into the unsorted array. First, grow the array
* as needed. Note that we try to grow the array when there is
* still one free slot remaining --- if we fail, there'll still be
* room to store the incoming tuple, and then we'll switch to
@@ -1221,7 +1221,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
* enough tuples to meet the bound.
*
* Note that once we enter TSS_BOUNDED state we will always try to
- * complete the sort that way. In the worst case, if later input
+ * complete the sort that way. In the worst case, if later input
* tuples are larger than earlier ones, this might cause us to
* exceed workMem significantly.
*/
@@ -1359,7 +1359,7 @@ tuplesort_performsort(Tuplesortstate *state)
/*
* We were able to accumulate all the tuples required for output
- * in memory, using a heap to eliminate excess tuples. Now we
+ * in memory, using a heap to eliminate excess tuples. Now we
* have to transform the heap to a properly-sorted array.
*/
sort_bounded_heap(state);
@@ -1373,7 +1373,7 @@ tuplesort_performsort(Tuplesortstate *state)
case TSS_BUILDRUNS:
/*
- * Finish tape-based sort. First, flush all tuples remaining in
+ * Finish tape-based sort. First, flush all tuples remaining in
* memory out to tape; then merge until we have a single remaining
* run (or, if !randomAccess, one run per tape). Note that
* mergeruns sets the correct state->status.
@@ -1434,7 +1434,7 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
/*
* Complain if caller tries to retrieve more tuples than
- * originally asked for in a bounded sort. This is because
+ * originally asked for in a bounded sort. This is because
* returning EOF here might be the wrong thing.
*/
if (state->bounded && state->current >= state->bound)
@@ -1640,7 +1640,7 @@ tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
/*
* Fetch the next tuple in either forward or back direction.
- * Returns NULL if no more tuples. If *should_free is set, the
+ * Returns NULL if no more tuples. If *should_free is set, the
* caller must pfree the returned tuple when done with it.
*/
HeapTuple
@@ -1659,7 +1659,7 @@ tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
/*
* Fetch the next index tuple in either forward or back direction.
- * Returns NULL if no more tuples. If *should_free is set, the
+ * Returns NULL if no more tuples. If *should_free is set, the
* caller must pfree the returned tuple when done with it.
*/
IndexTuple
@@ -1729,7 +1729,7 @@ tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
/*
* We don't actually support backwards skip yet, because no callers need
- * it. The API is designed to allow for that later, though.
+ * it. The API is designed to allow for that later, though.
*/
Assert(forward);
Assert(ntuples >= 0);
@@ -1747,7 +1747,7 @@ tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
/*
* Complain if caller tries to retrieve more tuples than
- * originally asked for in a bounded sort. This is because
+ * originally asked for in a bounded sort. This is because
* returning EOF here might be the wrong thing.
*/
if (state->bounded && state->current >= state->bound)
@@ -1800,7 +1800,7 @@ tuplesort_merge_order(int64 allowedMem)
/*
* We need one tape for each merge input, plus another one for the output,
- * and each of these tapes needs buffer space. In addition we want
+ * and each of these tapes needs buffer space. In addition we want
* MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
* count).
*
@@ -1854,11 +1854,12 @@ inittapes(Tuplesortstate *state)
* don't decrease it to the point that we have no room for tuples. (That
* case is only likely to occur if sorting pass-by-value Datums; in all
* other scenarios the memtuples[] array is unlikely to occupy more than
- * half of allowedMem. In the pass-by-value case it's not important to
+ * half of allowedMem. In the pass-by-value case it's not important to
* account for tuple space, so we don't care if LACKMEM becomes
* inaccurate.)
*/
- tapeSpace = (int64) maxTapes * TAPE_BUFFER_OVERHEAD;
+ tapeSpace = (int64) maxTapes *TAPE_BUFFER_OVERHEAD;
+
if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
USEMEM(state, tapeSpace);
@@ -1978,7 +1979,7 @@ mergeruns(Tuplesortstate *state)
/*
* If we produced only one initial run (quite likely if the total data
* volume is between 1X and 2X workMem), we can just use that tape as the
- * finished output, rather than doing a useless merge. (This obvious
+ * finished output, rather than doing a useless merge. (This obvious
* optimization is not in Knuth's algorithm.)
*/
if (state->currentRun == 1)
@@ -2084,7 +2085,7 @@ mergeruns(Tuplesortstate *state)
* the loop without performing the last iteration of step D6, we have not
* rearranged the tape unit assignment, and therefore the result is on
* TAPE[T]. We need to do it this way so that we can freeze the final
- * output tape while rewinding it. The last iteration of step D6 would be
+ * output tape while rewinding it. The last iteration of step D6 would be
* a waste of cycles anyway...
*/
state->result_tape = state->tp_tapenum[state->tapeRange];
@@ -2168,7 +2169,7 @@ mergeonerun(Tuplesortstate *state)
* beginmerge - initialize for a merge pass
*
* We decrease the counts of real and dummy runs for each tape, and mark
- * which tapes contain active input runs in mergeactive[]. Then, load
+ * which tapes contain active input runs in mergeactive[]. Then, load
* as many tuples as we can from each active input tape, and finally
* fill the merge heap with the first tuple from each active tape.
*/
@@ -2261,7 +2262,7 @@ beginmerge(Tuplesortstate *state)
* This routine exists to improve sequentiality of reads during a merge pass,
* as explained in the header comments of this file. Load tuples from each
* active source tape until the tape's run is exhausted or it has used up
- * its fair share of available memory. In any case, we guarantee that there
+ * its fair share of available memory. In any case, we guarantee that there
* is at least one preread tuple available from each unexhausted input tape.
*
* We invoke this routine at the start of a merge pass for initial load,
@@ -2524,7 +2525,7 @@ tuplesort_get_stats(Tuplesortstate *state,
* accurately once we have begun to return tuples to the caller (since we
* don't account for pfree's the caller is expected to do), so we cannot
* rely on availMem in a disk sort. This does not seem worth the overhead
- * to fix. Is it worth creating an API for the memory context code to
+ * to fix. Is it worth creating an API for the memory context code to
* tell us how much is actually used in sortcontext?
*/
if (state->tapeset)
@@ -2562,7 +2563,7 @@ tuplesort_get_stats(Tuplesortstate *state,
/*
* Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
*
- * Compare two SortTuples. If checkIndex is true, use the tuple index
+ * Compare two SortTuples. If checkIndex is true, use the tuple index
* as the front of the sort key; otherwise, no.
*/
@@ -2667,7 +2668,7 @@ sort_bounded_heap(Tuplesortstate *state)
/*
* Insert a new tuple into an empty or existing heap, maintaining the
- * heap invariant. Caller is responsible for ensuring there's room.
+ * heap invariant. Caller is responsible for ensuring there's room.
*
* Note: we assume *tuple is a temporary variable that can be scribbled on.
* For some callers, tuple actually points to a memtuples[] entry above the
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index a5a56be91b..8b968a8b62 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -8,7 +8,7 @@
* a dumbed-down version of tuplesort.c; it does no sorting of tuples
* but can only store and regurgitate a sequence of tuples. However,
* because no sort is required, it is allowed to start reading the sequence
- * before it has all been written. This is particularly useful for cursors,
+ * before it has all been written. This is particularly useful for cursors,
* because it allows random access within the already-scanned portion of
* a query without having to process the underlying scan to completion.
* Also, it is possible to support multiple independent read pointers.
@@ -17,7 +17,7 @@
* space limit specified by the caller.
*
* The (approximate) amount of memory allowed to the tuplestore is specified
- * in kilobytes by the caller. We absorb tuples and simply store them in an
+ * in kilobytes by the caller. We absorb tuples and simply store them in an
* in-memory array as long as we haven't exceeded maxKBytes. If we do exceed
* maxKBytes, we dump all the tuples into a temp file and then read from that
* when needed.
@@ -29,7 +29,7 @@
* When the caller requests backward-scan capability, we write the temp file
* in a format that allows either forward or backward scan. Otherwise, only
* forward scan is allowed. A request for backward scan must be made before
- * putting any tuples into the tuplestore. Rewind is normally allowed but
+ * putting any tuples into the tuplestore. Rewind is normally allowed but
* can be turned off via tuplestore_set_eflags; turning off rewind for all
* read pointers enables truncation of the tuplestore at the oldest read point
* for minimal memory usage. (The caller must explicitly call tuplestore_trim
@@ -64,7 +64,7 @@
/*
- * Possible states of a Tuplestore object. These denote the states that
+ * Possible states of a Tuplestore object. These denote the states that
* persist between calls of Tuplestore routines.
*/
typedef enum
@@ -83,7 +83,7 @@ typedef enum
*
* Special case: if eof_reached is true, then the pointer's read position is
* implicitly equal to the write position, and current/file/offset aren't
- * maintained. This way we need not update all the read pointers each time
+ * maintained. This way we need not update all the read pointers each time
* we write.
*/
typedef struct
@@ -128,7 +128,7 @@ struct Tuplestorestate
void *(*copytup) (Tuplestorestate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() it, and increase state->availMem by the amount of memory space
@@ -197,7 +197,7 @@ struct Tuplestorestate
* If state->backward is true, then the stored representation of
* the tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* state->backward is not set, the write/read routines may omit the extra
* length word.
*
@@ -295,7 +295,7 @@ tuplestore_begin_common(int eflags, bool interXact, int maxKBytes)
* tuple store are allowed.
*
* interXact: if true, the files used for on-disk storage persist beyond the
- * end of the current transaction. NOTE: It's the caller's responsibility to
+ * end of the current transaction. NOTE: It's the caller's responsibility to
* create such a tuplestore in a memory context and resource owner that will
* also survive transaction boundaries, and to ensure the tuplestore is closed
* when it's no longer wanted.
@@ -334,7 +334,7 @@ tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
* any data into the tuplestore.
*
* eflags is a bitmask following the meanings used for executor node
- * startup flags (see executor.h). tuplestore pays attention to these bits:
+ * startup flags (see executor.h). tuplestore pays attention to these bits:
* EXEC_FLAG_REWIND need rewind to start
* EXEC_FLAG_BACKWARD need backward fetch
* If tuplestore_set_eflags is not called, REWIND is allowed, and BACKWARD
@@ -633,9 +633,9 @@ grow_memtuples(Tuplestorestate *state)
* We need to be sure that we do not cause LACKMEM to become true, else
* the space management algorithm will go nuts. The code above should
* never generate a dangerous request, but to be safe, check explicitly
- * that the array growth fits within availMem. (We could still cause
+ * that the array growth fits within availMem. (We could still cause
* LACKMEM if the memory chunk overhead associated with the memtuples
- * array were to increase. That shouldn't happen with any sane value of
+ * array were to increase. That shouldn't happen with any sane value of
* allowedMem, because at any array size large enough to risk LACKMEM,
* palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.)
@@ -704,7 +704,7 @@ tuplestore_puttuple(Tuplestorestate *state, HeapTuple tuple)
MemoryContext oldcxt = MemoryContextSwitchTo(state->context);
/*
- * Copy the tuple. (Must do this even in WRITEFILE case. Note that
+ * Copy the tuple. (Must do this even in WRITEFILE case. Note that
* COPYTUP includes USEMEM, so we needn't do that here.)
*/
tuple = COPYTUP(state, tuple);
@@ -861,7 +861,7 @@ tuplestore_puttuple_common(Tuplestorestate *state, void *tuple)
/*
* Fetch the next tuple in either forward or back direction.
- * Returns NULL if no more tuples. If should_free is set, the
+ * Returns NULL if no more tuples. If should_free is set, the
* caller must pfree the returned tuple when done with it.
*
* Backward scan is only allowed if randomAccess was set true or
@@ -1090,7 +1090,7 @@ tuplestore_advance(Tuplestorestate *state, bool forward)
/*
* Advance over N tuples in either forward or back direction,
- * without returning any data. N<=0 is a no-op.
+ * without returning any data. N<=0 is a no-op.
* Returns TRUE if successful, FALSE if ran out of tuples.
*/
bool
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index a067806989..52b612b15a 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -15,7 +15,7 @@
* this module.
*
* To allow reusing existing combo cids, we also keep a hash table that
- * maps cmin,cmax pairs to combo cids. This keeps the data structure size
+ * maps cmin,cmax pairs to combo cids. This keeps the data structure size
* reasonable in most cases, since the number of unique pairs used by any
* one transaction is likely to be small.
*
@@ -148,8 +148,8 @@ HeapTupleHeaderAdjustCmax(HeapTupleHeader tup,
/*
* If we're marking a tuple deleted that was inserted by (any
* subtransaction of) our transaction, we need to use a combo command id.
- * Test for HeapTupleHeaderXminCommitted() first, because it's cheaper than a
- * TransactionIdIsCurrentTransactionId call.
+ * Test for HeapTupleHeaderXminCommitted() first, because it's cheaper
+ * than a TransactionIdIsCurrentTransactionId call.
*/
if (!HeapTupleHeaderXminCommitted(tup) &&
TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmin(tup)))
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index 9802fa7ded..2834753d73 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -11,7 +11,7 @@
* regd_count and count it in RegisteredSnapshots, but this reference is not
* tracked by a resource owner. We used to use the TopTransactionResourceOwner
* to track this snapshot reference, but that introduces logical circularity
- * and thus makes it impossible to clean up in a sane fashion. It's better to
+ * and thus makes it impossible to clean up in a sane fashion. It's better to
* handle this reference as an internally-tracked registration, so that this
* module is entirely lower-level than ResourceOwners.
*
@@ -24,9 +24,9 @@
* transaction).
*
* These arrangements let us reset MyPgXact->xmin when there are no snapshots
- * referenced by this transaction. (One possible improvement would be to be
+ * referenced by this transaction. (One possible improvement would be to be
* able to advance Xmin when the snapshot with the earliest Xmin is no longer
- * referenced. That's a bit harder though, it requires more locking, and
+ * referenced. That's a bit harder though, it requires more locking, and
* anyway it should be rather uncommon to keep temporary snapshots referenced
* for too long.)
*
@@ -63,7 +63,7 @@
* CurrentSnapshot points to the only snapshot taken in transaction-snapshot
* mode, and to the latest one taken in a read-committed transaction.
* SecondarySnapshot is a snapshot that's always up-to-date as of the current
- * instant, even in transaction-snapshot mode. It should only be used for
+ * instant, even in transaction-snapshot mode. It should only be used for
* special-purpose code (say, RI checking.) CatalogSnapshot points to an
* MVCC snapshot intended to be used for catalog scans; we must refresh it
* whenever a system catalog change occurs.
@@ -135,7 +135,7 @@ static int RegisteredSnapshots = 0;
bool FirstSnapshotSet = false;
/*
- * Remember the serializable transaction snapshot, if any. We cannot trust
+ * Remember the serializable transaction snapshot, if any. We cannot trust
* FirstSnapshotSet in combination with IsolationUsesXactSnapshot(), because
* GUC may be reset before us, changing the value of IsolationUsesXactSnapshot.
*/
@@ -169,9 +169,9 @@ Snapshot
GetTransactionSnapshot(void)
{
/*
- * Return historic snapshot if doing logical decoding. We'll never
- * need a non-historic transaction snapshot in this (sub-)transaction, so
- * there's no need to be careful to set one up for later calls to
+ * Return historic snapshot if doing logical decoding. We'll never need a
+ * non-historic transaction snapshot in this (sub-)transaction, so there's
+ * no need to be careful to set one up for later calls to
* GetTransactionSnapshot().
*/
if (HistoricSnapshotActive())
@@ -238,8 +238,7 @@ GetLatestSnapshot(void)
{
/*
* So far there are no cases requiring support for GetLatestSnapshot()
- * during logical decoding, but it wouldn't be hard to add if
- * required.
+ * during logical decoding, but it wouldn't be hard to add if required.
*/
Assert(!HistoricSnapshotActive());
@@ -283,12 +282,11 @@ Snapshot
GetNonHistoricCatalogSnapshot(Oid relid)
{
/*
- * If the caller is trying to scan a relation that has no syscache,
- * no catcache invalidations will be sent when it is updated. For a
- * a few key relations, snapshot invalidations are sent instead. If
- * we're trying to scan a relation for which neither catcache nor
- * snapshot invalidations are sent, we must refresh the snapshot every
- * time.
+ * If the caller is trying to scan a relation that has no syscache, no
+ * catcache invalidations will be sent when it is updated. For a a few
+ * key relations, snapshot invalidations are sent instead. If we're
+ * trying to scan a relation for which neither catcache nor snapshot
+ * invalidations are sent, we must refresh the snapshot every time.
*/
if (!CatalogSnapshotStale && !RelationInvalidatesSnapshotsOnly(relid) &&
!RelationHasSysCache(relid))
@@ -403,7 +401,7 @@ SetTransactionSnapshot(Snapshot sourcesnap, TransactionId sourcexid)
/*
* In transaction-snapshot mode, the first snapshot must live until end of
- * xact, so we must make a copy of it. Furthermore, if we're running in
+ * xact, so we must make a copy of it. Furthermore, if we're running in
* serializable mode, predicate.c needs to do its own processing.
*/
if (IsolationUsesXactSnapshot())
@@ -499,7 +497,7 @@ FreeSnapshot(Snapshot snapshot)
*
* If the passed snapshot is a statically-allocated one, or it is possibly
* subject to a future command counter update, create a new long-lived copy
- * with active refcount=1. Otherwise, only increment the refcount.
+ * with active refcount=1. Otherwise, only increment the refcount.
*/
void
PushActiveSnapshot(Snapshot snap)
@@ -868,7 +866,7 @@ ExportSnapshot(Snapshot snapshot)
* However, we haven't got enough information to do that, since we don't
* know if we're at top level or not. For example, we could be inside a
* plpgsql function that is going to fire off other transactions via
- * dblink. Rather than disallow perfectly legitimate usages, don't make a
+ * dblink. Rather than disallow perfectly legitimate usages, don't make a
* check.
*
* Also note that we don't make any restriction on the transaction's
@@ -1081,7 +1079,7 @@ parseXidFromText(const char *prefix, char **s, const char *filename)
/*
* ImportSnapshot
- * Import a previously exported snapshot. The argument should be a
+ * Import a previously exported snapshot. The argument should be a
* filename in SNAPSHOT_EXPORT_DIR. Load the snapshot from that file.
* This is called by "SET TRANSACTION SNAPSHOT 'foo'".
*/
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index 89f5ff85e9..75cd53e1fd 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -20,7 +20,7 @@
* TransactionIdDidCommit will both return true. If we check only
* TransactionIdDidCommit, we could consider a tuple committed when a
* later GetSnapshotData call will still think the originating transaction
- * is in progress, which leads to application-level inconsistency. The
+ * is in progress, which leads to application-level inconsistency. The
* upshot is that we gotta check TransactionIdIsInProgress first in all
* code paths, except for a few cases where we are looking at
* subtransactions of our own main transaction and so there can't be any
@@ -89,13 +89,13 @@ static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot);
* just refrain from setting the hint bit until some future re-examination
* of the tuple.
*
- * We can always set hint bits when marking a transaction aborted. (Some
+ * We can always set hint bits when marking a transaction aborted. (Some
* code in heapam.c relies on that!)
*
* Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then
* we can always set the hint bits, since pre-9.0 VACUUM FULL always used
* synchronous commits and didn't move tuples that weren't previously
- * hinted. (This is not known by this subroutine, but is applied by its
+ * hinted. (This is not known by this subroutine, but is applied by its
* callers.) Note: old-style VACUUM FULL is gone, but we have to keep this
* module's support for MOVED_OFF/MOVED_IN flag bits for as long as we
* support in-place update from pre-9.0 databases.
@@ -165,6 +165,7 @@ bool
HeapTupleSatisfiesSelf(HeapTuple htup, Snapshot snapshot, Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -343,7 +344,7 @@ HeapTupleSatisfiesAny(HeapTuple htup, Snapshot snapshot, Buffer buffer)
* This is a simplified version that only checks for VACUUM moving conditions.
* It's appropriate for TOAST usage because TOAST really doesn't want to do
* its own time qual checks; if you can see the main table row that contains
- * a TOAST reference, you should be able to see the TOASTed value. However,
+ * a TOAST reference, you should be able to see the TOASTed value. However,
* vacuuming a TOAST table is independent of the main table, and in case such
* a vacuum fails partway through, we'd better do this much checking.
*
@@ -355,6 +356,7 @@ HeapTupleSatisfiesToast(HeapTuple htup, Snapshot snapshot,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -440,6 +442,7 @@ HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -497,7 +500,7 @@ HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleHeaderGetRawXmax(tuple);
@@ -600,7 +603,7 @@ HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
{
/*
* If it's only locked but neither EXCL_LOCK nor KEYSHR_LOCK is
- * set, it cannot possibly be running. Otherwise need to check.
+ * set, it cannot possibly be running. Otherwise need to check.
*/
if ((tuple->t_infomask & (HEAP_XMAX_EXCL_LOCK |
HEAP_XMAX_KEYSHR_LOCK)) &&
@@ -712,6 +715,7 @@ HeapTupleSatisfiesDirty(HeapTuple htup, Snapshot snapshot,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -908,6 +912,7 @@ HeapTupleSatisfiesMVCC(HeapTuple htup, Snapshot snapshot,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -1100,7 +1105,7 @@ HeapTupleSatisfiesMVCC(HeapTuple htup, Snapshot snapshot,
* we mainly want to know is if a tuple is potentially visible to *any*
* running transaction. If so, it can't be removed yet by VACUUM.
*
- * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
+ * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
* deleted by XIDs >= OldestXmin are deemed "recently dead"; they might
* still be visible to some open transaction, so we can't remove them,
* even if we see that the deleting transaction has committed.
@@ -1110,6 +1115,7 @@ HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -1192,7 +1198,7 @@ HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
}
/*
- * Okay, the inserter committed, so it was good at some point. Now what
+ * Okay, the inserter committed, so it was good at some point. Now what
* about the deleting transaction?
*/
if (tuple->t_infomask & HEAP_XMAX_INVALID)
@@ -1332,7 +1338,7 @@ HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
* in lieu of HeapTupleSatisifesVacuum when the tuple has just been
* tested by HeapTupleSatisfiesMVCC and, therefore, any hint bits that
* can be set should already be set. We assume that if no hint bits
- * either for xmin or xmax, the transaction is still running. This is
+ * either for xmin or xmax, the transaction is still running. This is
* therefore faster than HeapTupleSatisfiesVacuum, because we don't
* consult CLOG (and also because we don't need to give an exact answer,
* just whether or not the tuple is surely dead).
@@ -1341,6 +1347,7 @@ bool
HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -1397,7 +1404,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
/*
* Make a quick range check to eliminate most XIDs without looking at the
- * xip arrays. Note that this is OK even if we convert a subxact XID to
+ * xip arrays. Note that this is OK even if we convert a subxact XID to
* its parent below, because a subxact with XID < xmin has surely also got
* a parent with XID < xmin, while one with XID >= xmax must belong to a
* parent that was not yet committed at the time of this snapshot.
@@ -1574,7 +1581,7 @@ TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
*/
bool
HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
- Buffer buffer)
+ Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
@@ -1598,8 +1605,8 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
/*
* another transaction might have (tried to) delete this tuple or
- * cmin/cmax was stored in a combocid. So we need to lookup the
- * actual values externally.
+ * cmin/cmax was stored in a combocid. So we need to lookup the actual
+ * values externally.
*/
resolved = ResolveCminCmaxDuringDecoding(HistoricSnapshotGetTupleCids(), snapshot,
htup, buffer,
@@ -1611,7 +1618,7 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
Assert(cmin != InvalidCommandId);
if (cmin >= snapshot->curcid)
- return false; /* inserted after scan started */
+ return false; /* inserted after scan started */
/* fall through */
}
/* committed before our xmin horizon. Do a normal visibility check. */
@@ -1632,13 +1639,14 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
return false;
}
/* check if it's a committed transaction in [xmin, xmax) */
- else if(TransactionIdInArray(xmin, snapshot->xip, snapshot->xcnt))
+ else if (TransactionIdInArray(xmin, snapshot->xip, snapshot->xcnt))
{
/* fall through */
}
+
/*
- * none of the above, i.e. between [xmin, xmax) but hasn't
- * committed. I.e. invisible.
+ * none of the above, i.e. between [xmin, xmax) but hasn't committed. I.e.
+ * invisible.
*/
else
{
@@ -1653,9 +1661,10 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
/* locked tuples are always visible */
else if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
return true;
+
/*
- * We can see multis here if we're looking at user tables or if
- * somebody SELECT ... FOR SHARE/UPDATE a system table.
+ * We can see multis here if we're looking at user tables or if somebody
+ * SELECT ... FOR SHARE/UPDATE a system table.
*/
else if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
@@ -1665,9 +1674,9 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
/* check if it's one of our txids, toplevel is also in there */
if (TransactionIdInArray(xmax, snapshot->subxip, snapshot->subxcnt))
{
- bool resolved;
- CommandId cmin;
- CommandId cmax = HeapTupleHeaderGetRawCommandId(tuple);
+ bool resolved;
+ CommandId cmin;
+ CommandId cmax = HeapTupleHeaderGetRawCommandId(tuple);
/* Lookup actual cmin/cmax values */
resolved = ResolveCminCmaxDuringDecoding(HistoricSnapshotGetTupleCids(), snapshot,
@@ -1680,9 +1689,9 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
Assert(cmax != InvalidCommandId);
if (cmax >= snapshot->curcid)
- return true; /* deleted after scan started */
+ return true; /* deleted after scan started */
else
- return false; /* deleted before scan started */
+ return false; /* deleted before scan started */
}
/* below xmin horizon, normal transaction state is valid */
else if (TransactionIdPrecedes(xmax, snapshot->xmin))
diff --git a/src/bin/initdb/findtimezone.c b/src/bin/initdb/findtimezone.c
index bc1f2c2c97..f0255d85af 100644
--- a/src/bin/initdb/findtimezone.c
+++ b/src/bin/initdb/findtimezone.c
@@ -52,7 +52,7 @@ pg_TZDIR(void)
* This is simpler than the backend function of the same name because
* we assume that the input string has the correct case already, so there
* is no need for case-folding. (This is obviously true if we got the file
- * name from the filesystem to start with. The only other place it can come
+ * name from the filesystem to start with. The only other place it can come
* from is the environment variable TZ, and there seems no need to allow
* case variation in that; other programs aren't likely to.)
*
@@ -471,7 +471,7 @@ identify_system_timezone(void)
return resultbuf;
/*
- * Did not find the timezone. Fallback to use a GMT zone. Note that the
+ * Did not find the timezone. Fallback to use a GMT zone. Note that the
* Olson timezone database names the GMT-offset zones in POSIX style: plus
* is west of Greenwich. It's unfortunate that this is opposite of SQL
* conventions. Should we therefore change the names? Probably not...
@@ -490,7 +490,7 @@ identify_system_timezone(void)
* Recursively scan the timezone database looking for the best match to
* the system timezone behavior.
*
- * tzdir points to a buffer of size MAXPGPATH. On entry, it holds the
+ * tzdir points to a buffer of size MAXPGPATH. On entry, it holds the
* pathname of a directory containing TZ files. We internally modify it
* to hold pathnames of sub-directories and files, but must restore it
* to its original contents before exit.
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index 83b7f6e24d..2a51916106 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -18,7 +18,7 @@
* to produce a new database.
*
* For largely-historical reasons, the template1 database is the one built
- * by the basic bootstrap process. After it is complete, template0 and
+ * by the basic bootstrap process. After it is complete, template0 and
* the default database, postgres, are made just by copying template1.
*
* To create template1, we run the postgres (backend) program in bootstrap
@@ -581,7 +581,7 @@ walkdir(char *path, void (*action) (char *fname, bool isdir))
/*
* It's important to fsync the destination directory itself as individual
* file fsyncs don't guarantee that the directory entry for the file is
- * synced. Recent versions of ext4 have made the window much wider but
+ * synced. Recent versions of ext4 have made the window much wider but
* it's been an issue for ext3 and other filesystems in the past.
*/
(*action) (path, true);
@@ -614,7 +614,7 @@ pre_sync_fname(char *fname, bool isdir)
}
/*
- * Prefer sync_file_range, else use posix_fadvise. We ignore any error
+ * Prefer sync_file_range, else use posix_fadvise. We ignore any error
* here since this operation is only a hint anyway.
*/
#if defined(HAVE_SYNC_FILE_RANGE)
@@ -772,7 +772,7 @@ exit_nicely(void)
static char *
get_id(void)
{
- const char *username;
+ const char *username;
#ifndef WIN32
if (geteuid() == 0) /* 0 is root's uid */
@@ -1057,13 +1057,13 @@ static char *
choose_dsm_implementation(void)
{
#ifdef HAVE_SHM_OPEN
- int ntries = 10;
+ int ntries = 10;
while (ntries > 0)
{
- uint32 handle;
- char name[64];
- int fd;
+ uint32 handle;
+ char name[64];
+ int fd;
handle = random();
snprintf(name, 64, "/PostgreSQL.%u", handle);
@@ -1976,7 +1976,7 @@ setup_collation(void)
*/
if (normalize_locale_name(alias, localebuf))
{
- char *quoted_alias = escape_quotes(alias);
+ char *quoted_alias = escape_quotes(alias);
PG_CMD_PRINTF3("INSERT INTO tmp_pg_collation VALUES (E'%s', E'%s', %d);\n",
quoted_alias, quoted_locale, enc);
@@ -1992,7 +1992,7 @@ setup_collation(void)
* When copying collations to the final location, eliminate aliases that
* conflict with an existing locale name for the same encoding. For
* example, "br_FR.iso88591" is normalized to "br_FR", both for encoding
- * LATIN1. But the unnormalized locale "br_FR" already exists for LATIN1.
+ * LATIN1. But the unnormalized locale "br_FR" already exists for LATIN1.
* Prefer the alias that matches the OS locale name, else the first locale
* name by sort order (arbitrary choice to be deterministic).
*
@@ -2099,7 +2099,7 @@ setup_dictionary(void)
/*
* Set up privileges
*
- * We mark most system catalogs as world-readable. We don't currently have
+ * We mark most system catalogs as world-readable. We don't currently have
* to touch functions, languages, or databases, because their default
* permissions are OK.
*
@@ -2532,7 +2532,7 @@ locale_date_order(const char *locale)
* Is the locale name valid for the locale category?
*
* If successful, and canonname isn't NULL, a malloc'd copy of the locale's
- * canonical name is stored there. This is especially useful for figuring out
+ * canonical name is stored there. This is especially useful for figuring out
* what locale name "" means (ie, the environment value). (Actually,
* it seems that on most implementations that's the only thing it's good for;
* we could wish that setlocale gave back a canonically spelled version of
@@ -2886,6 +2886,7 @@ void
get_restricted_token(void)
{
#ifdef WIN32
+
/*
* Before we execute another program, make sure that we are running with a
* restricted token. If not, re-execute ourselves with one.
diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl
index ad6d74cc0d..a7bea76f52 100644
--- a/src/bin/initdb/t/001_initdb.pl
+++ b/src/bin/initdb/t/001_initdb.pl
@@ -9,29 +9,36 @@ program_help_ok('initdb');
program_version_ok('initdb');
program_options_handling_ok('initdb');
-command_ok(['initdb', "$tempdir/data"], 'basic initdb');
-command_fails(['initdb', "$tempdir/data"], 'existing data directory');
-command_ok(['initdb', '-N', "$tempdir/data2"], 'nosync');
-command_ok(['initdb', '-S', "$tempdir/data2"], 'sync only');
-command_fails(['initdb', '-S', "$tempdir/data3"], 'sync missing data directory');
+command_ok([ 'initdb', "$tempdir/data" ], 'basic initdb');
+command_fails([ 'initdb', "$tempdir/data" ], 'existing data directory');
+command_ok([ 'initdb', '-N', "$tempdir/data2" ], 'nosync');
+command_ok([ 'initdb', '-S', "$tempdir/data2" ], 'sync only');
+command_fails([ 'initdb', '-S', "$tempdir/data3" ],
+ 'sync missing data directory');
mkdir "$tempdir/data4" or BAIL_OUT($!);
-command_ok(['initdb', "$tempdir/data4"], 'existing empty data directory');
+command_ok([ 'initdb', "$tempdir/data4" ], 'existing empty data directory');
system_or_bail "rm -rf $tempdir/*";
-command_ok(['initdb', "$tempdir/data", '-X', "$tempdir/pgxlog"], 'separate xlog directory');
+command_ok([ 'initdb', "$tempdir/data", '-X', "$tempdir/pgxlog" ],
+ 'separate xlog directory');
system_or_bail "rm -rf $tempdir/*";
-command_fails(['initdb', "$tempdir/data", '-X', 'pgxlog'], 'relative xlog directory not allowed');
+command_fails(
+ [ 'initdb', "$tempdir/data", '-X', 'pgxlog' ],
+ 'relative xlog directory not allowed');
system_or_bail "rm -rf $tempdir/*";
mkdir "$tempdir/pgxlog";
-command_ok(['initdb', "$tempdir/data", '-X', "$tempdir/pgxlog"], 'existing empty xlog directory');
+command_ok([ 'initdb', "$tempdir/data", '-X', "$tempdir/pgxlog" ],
+ 'existing empty xlog directory');
system_or_bail "rm -rf $tempdir/*";
mkdir "$tempdir/pgxlog";
mkdir "$tempdir/pgxlog/lost+found";
-command_fails(['initdb', "$tempdir/data", '-X', "$tempdir/pgxlog"], 'existing nonempty xlog directory');
+command_fails([ 'initdb', "$tempdir/data", '-X', "$tempdir/pgxlog" ],
+ 'existing nonempty xlog directory');
system_or_bail "rm -rf $tempdir/*";
-command_ok(['initdb', "$tempdir/data", '-T', 'german'], 'select default dictionary');
+command_ok([ 'initdb', "$tempdir/data", '-T', 'german' ],
+ 'select default dictionary');
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index 1a468fa1b7..154c52d190 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -40,8 +40,8 @@
typedef struct TablespaceListCell
{
struct TablespaceListCell *next;
- char old_dir[MAXPGPATH];
- char new_dir[MAXPGPATH];
+ char old_dir[MAXPGPATH];
+ char new_dir[MAXPGPATH];
} TablespaceListCell;
typedef struct TablespaceList
@@ -54,15 +54,15 @@ typedef struct TablespaceList
static char *basedir = NULL;
static TablespaceList tablespace_dirs = {NULL, NULL};
static char *xlog_dir = "";
-static char format = 'p'; /* p(lain)/t(ar) */
+static char format = 'p'; /* p(lain)/t(ar) */
static char *label = "pg_basebackup base backup";
-static bool showprogress = false;
-static int verbose = 0;
+static bool showprogress = false;
+static int verbose = 0;
static int compresslevel = 0;
-static bool includewal = false;
-static bool streamwal = false;
-static bool fastcheckpoint = false;
-static bool writerecoveryconf = false;
+static bool includewal = false;
+static bool streamwal = false;
+static bool fastcheckpoint = false;
+static bool writerecoveryconf = false;
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
static pg_time_t last_progress_report = 0;
static int32 maxrate = 0; /* no limit by default */
@@ -113,18 +113,20 @@ static void update_tablespace_symlink(Oid oid, const char *old_dir);
static void tablespace_list_append(const char *arg);
-static void disconnect_and_exit(int code)
+static void
+disconnect_and_exit(int code)
{
if (conn != NULL)
PQfinish(conn);
#ifndef WIN32
+
/*
- * On windows, our background thread dies along with the process.
- * But on Unix, if we have started a subprocess, we want to kill
- * it off so it doesn't remain running trying to stream data.
+ * On windows, our background thread dies along with the process. But on
+ * Unix, if we have started a subprocess, we want to kill it off so it
+ * doesn't remain running trying to stream data.
*/
- if (bgchild> 0)
+ if (bgchild > 0)
kill(bgchild, SIGTERM);
#endif
@@ -140,21 +142,21 @@ static void
tablespace_list_append(const char *arg)
{
TablespaceListCell *cell = (TablespaceListCell *) pg_malloc0(sizeof(TablespaceListCell));
- char *dst;
- char *dst_ptr;
- const char *arg_ptr;
+ char *dst;
+ char *dst_ptr;
+ const char *arg_ptr;
dst_ptr = dst = cell->old_dir;
for (arg_ptr = arg; *arg_ptr; arg_ptr++)
{
if (dst_ptr - dst >= MAXPGPATH)
{
- fprintf(stderr, _("%s: directory name too long\n"), progname);
+ fprintf(stderr, _("%s: directory name too long\n"), progname);
exit(1);
}
if (*arg_ptr == '\\' && *(arg_ptr + 1) == '=')
- ; /* skip backslash escaping = */
+ ; /* skip backslash escaping = */
else if (*arg_ptr == '=' && (arg_ptr == arg || *(arg_ptr - 1) != '\\'))
{
if (*cell->new_dir)
@@ -177,10 +179,12 @@ tablespace_list_append(const char *arg)
exit(1);
}
- /* This check isn't absolutely necessary. But all tablespaces are created
+ /*
+ * This check isn't absolutely necessary. But all tablespaces are created
* with absolute directories, so specifying a non-absolute path here would
* just never match, possibly confusing users. It's also good to be
- * consistent with the new_dir check. */
+ * consistent with the new_dir check.
+ */
if (!is_absolute_path(cell->old_dir))
{
fprintf(stderr, _("%s: old directory not absolute in tablespace mapping: %s\n"),
@@ -232,7 +236,7 @@ usage(void)
printf(_(" -R, --write-recovery-conf\n"
" write recovery.conf after backup\n"));
printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n"
- " relocate tablespace in OLDDIR to NEWDIR\n"));
+ " relocate tablespace in OLDDIR to NEWDIR\n"));
printf(_(" -x, --xlog include required WAL files in backup (fetch mode)\n"));
printf(_(" -X, --xlog-method=fetch|stream\n"
" include required WAL files with specified method\n"));
@@ -296,7 +300,7 @@ reached_end_position(XLogRecPtr segendpos, uint32 timeline,
lo;
MemSet(xlogend, 0, sizeof(xlogend));
- r = read(bgpipe[0], xlogend, sizeof(xlogend)-1);
+ r = read(bgpipe[0], xlogend, sizeof(xlogend) - 1);
if (r < 0)
{
fprintf(stderr, _("%s: could not read from ready pipe: %s\n"),
@@ -536,7 +540,7 @@ progress_report(int tablespacenum, const char *filename, bool force)
now = time(NULL);
if (now == last_progress_report && !force)
- return; /* Max once per second */
+ return; /* Max once per second */
last_progress_report = now;
percent = totalsize ? (int) ((totaldone / 1024) * 100 / totalsize) : 0;
@@ -614,7 +618,7 @@ parse_max_rate(char *src)
{
double result;
char *after_num;
- char *suffix = NULL;
+ char *suffix = NULL;
errno = 0;
result = strtod(src, &after_num);
@@ -644,8 +648,8 @@ parse_max_rate(char *src)
}
/*
- * Evaluate suffix, after skipping over possible whitespace.
- * Lack of suffix means kilobytes.
+ * Evaluate suffix, after skipping over possible whitespace. Lack of
+ * suffix means kilobytes.
*/
while (*after_num != '\0' && isspace((unsigned char) *after_num))
after_num++;
@@ -681,8 +685,8 @@ parse_max_rate(char *src)
if ((uint64) result != (uint64) ((uint32) result))
{
fprintf(stderr,
- _("%s: transfer rate \"%s\" exceeds integer range\n"),
- progname, src);
+ _("%s: transfer rate \"%s\" exceeds integer range\n"),
+ progname, src);
exit(1);
}
@@ -1114,7 +1118,7 @@ update_tablespace_symlink(Oid oid, const char *old_dir)
if (strcmp(old_dir, new_dir) != 0)
{
- char *linkloc = psprintf("%s/pg_tblspc/%d", basedir, oid);
+ char *linkloc = psprintf("%s/pg_tblspc/%d", basedir, oid);
if (unlink(linkloc) != 0 && errno != ENOENT)
{
@@ -1742,7 +1746,8 @@ BaseBackup(void)
*/
if (format == 'p' && !PQgetisnull(res, i, 1))
{
- char *path = (char *) get_tablespace_mapping(PQgetvalue(res, i, 1));
+ char *path = (char *) get_tablespace_mapping(PQgetvalue(res, i, 1));
+
verify_dir_is_empty_or_create(path);
}
}
@@ -1791,7 +1796,8 @@ BaseBackup(void)
{
for (i = 0; i < PQntuples(res); i++)
{
- Oid tblspc_oid = atooid(PQgetvalue(res, i, 0));
+ Oid tblspc_oid = atooid(PQgetvalue(res, i, 0));
+
if (tblspc_oid)
update_tablespace_symlink(tblspc_oid, PQgetvalue(res, i, 1));
}
diff --git a/src/bin/pg_basebackup/pg_receivexlog.c b/src/bin/pg_basebackup/pg_receivexlog.c
index 406610a862..9640838906 100644
--- a/src/bin/pg_basebackup/pg_receivexlog.c
+++ b/src/bin/pg_basebackup/pg_receivexlog.c
@@ -228,9 +228,9 @@ FindStreamingStart(uint32 *tli)
XLogRecPtr high_ptr;
/*
- * Move the starting pointer to the start of the next segment, if
- * the highest one we saw was completed. Otherwise start streaming
- * from the beginning of the .partial segment.
+ * Move the starting pointer to the start of the next segment, if the
+ * highest one we saw was completed. Otherwise start streaming from
+ * the beginning of the .partial segment.
*/
if (!high_ispartial)
high_segno++;
diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c
index fe902cf969..194d10faa5 100644
--- a/src/bin/pg_basebackup/pg_recvlogical.c
+++ b/src/bin/pg_basebackup/pg_recvlogical.c
@@ -31,27 +31,27 @@
#define RECONNECT_SLEEP_TIME 5
/* Global Options */
-static char *outfile = NULL;
-static int verbose = 0;
-static int noloop = 0;
-static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
-static int fsync_interval = 10 * 1000; /* 10 sec = default */
+static char *outfile = NULL;
+static int verbose = 0;
+static int noloop = 0;
+static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
+static int fsync_interval = 10 * 1000; /* 10 sec = default */
static XLogRecPtr startpos = InvalidXLogRecPtr;
-static bool do_create_slot = false;
-static bool do_start_slot = false;
-static bool do_drop_slot = false;
+static bool do_create_slot = false;
+static bool do_start_slot = false;
+static bool do_drop_slot = false;
/* filled pairwise with option, value. value may be NULL */
-static char **options;
-static size_t noptions = 0;
+static char **options;
+static size_t noptions = 0;
static const char *plugin = "test_decoding";
/* Global State */
-static int outfd = -1;
+static int outfd = -1;
static volatile sig_atomic_t time_to_abort = false;
static volatile sig_atomic_t output_reopen = false;
-static int64 output_last_fsync = -1;
-static bool output_unsynced = false;
+static int64 output_last_fsync = -1;
+static bool output_unsynced = false;
static XLogRecPtr output_written_lsn = InvalidXLogRecPtr;
static XLogRecPtr output_fsync_lsn = InvalidXLogRecPtr;
@@ -111,8 +111,8 @@ sendFeedback(PGconn *conn, int64 now, bool force, bool replyRequested)
/*
* we normally don't want to send superfluous feedbacks, but if it's
- * because of a timeout we need to, otherwise wal_sender_timeout will
- * kill us.
+ * because of a timeout we need to, otherwise wal_sender_timeout will kill
+ * us.
*/
if (!force &&
last_written_lsn == output_written_lsn &&
@@ -121,21 +121,21 @@ sendFeedback(PGconn *conn, int64 now, bool force, bool replyRequested)
if (verbose)
fprintf(stderr,
- _("%s: confirming write up to %X/%X, flush to %X/%X (slot %s)\n"),
+ _("%s: confirming write up to %X/%X, flush to %X/%X (slot %s)\n"),
progname,
- (uint32) (output_written_lsn >> 32), (uint32) output_written_lsn,
+ (uint32) (output_written_lsn >> 32), (uint32) output_written_lsn,
(uint32) (output_fsync_lsn >> 32), (uint32) output_fsync_lsn,
replication_slot);
replybuf[len] = 'r';
len += 1;
- fe_sendint64(output_written_lsn, &replybuf[len]); /* write */
+ fe_sendint64(output_written_lsn, &replybuf[len]); /* write */
len += 8;
fe_sendint64(output_fsync_lsn, &replybuf[len]); /* flush */
len += 8;
- fe_sendint64(InvalidXLogRecPtr, &replybuf[len]); /* apply */
+ fe_sendint64(InvalidXLogRecPtr, &replybuf[len]); /* apply */
len += 8;
- fe_sendint64(now, &replybuf[len]); /* sendTime */
+ fe_sendint64(now, &replybuf[len]); /* sendTime */
len += 8;
replybuf[len] = replyRequested ? 1 : 0; /* replyRequested */
len += 1;
@@ -227,7 +227,7 @@ StreamLog(void)
/* Initiate the replication stream at specified location */
appendPQExpBuffer(query, "START_REPLICATION SLOT \"%s\" LOGICAL %X/%X",
- replication_slot, (uint32) (startpos >> 32), (uint32) startpos);
+ replication_slot, (uint32) (startpos >> 32), (uint32) startpos);
/* print options if there are any */
if (noptions)
@@ -549,7 +549,7 @@ StreamLog(void)
if (outfd != -1 && strcmp(outfile, "-") != 0)
{
- int64 t = feGetCurrentTimestamp();
+ int64 t = feGetCurrentTimestamp();
/* no need to jump to error on failure here, we're finishing anyway */
OutputFsync(t);
@@ -693,8 +693,8 @@ main(int argc, char **argv)
/* replication options */
case 'o':
{
- char *data = pg_strdup(optarg);
- char *val = strchr(data, '=');
+ char *data = pg_strdup(optarg);
+ char *val = strchr(data, '=');
if (val != NULL)
{
@@ -704,7 +704,7 @@ main(int argc, char **argv)
}
noptions += 1;
- options = pg_realloc(options, sizeof(char*) * noptions * 2);
+ options = pg_realloc(options, sizeof(char *) * noptions * 2);
options[(noptions - 1) * 2] = data;
options[(noptions - 1) * 2 + 1] = val;
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index 2bf0448464..33d2911a0e 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -328,9 +328,9 @@ sendFeedback(PGconn *conn, XLogRecPtr blockpos, int64 now, bool replyRequested)
else
fe_sendint64(InvalidXLogRecPtr, &replybuf[len]); /* flush */
len += 8;
- fe_sendint64(InvalidXLogRecPtr, &replybuf[len]); /* apply */
+ fe_sendint64(InvalidXLogRecPtr, &replybuf[len]); /* apply */
len += 8;
- fe_sendint64(now, &replybuf[len]); /* sendTime */
+ fe_sendint64(now, &replybuf[len]); /* sendTime */
len += 8;
replybuf[len] = replyRequested ? 1 : 0; /* replyRequested */
len += 1;
@@ -437,8 +437,8 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
* reporting the flush position makes one eligible as a synchronous
* replica. People shouldn't include generic names in
* synchronous_standby_names, but we've protected them against it so
- * far, so let's continue to do so in the situations when possible.
- * If they've got a slot, though, we need to report the flush position,
+ * far, so let's continue to do so in the situations when possible. If
+ * they've got a slot, though, we need to report the flush position,
* so that the master can remove WAL.
*/
reportFlushPosition = true;
@@ -766,7 +766,7 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
now = feGetCurrentTimestamp();
if (still_sending && standby_message_timeout > 0 &&
feTimestampDifferenceExceeds(last_status, now,
- standby_message_timeout))
+ standby_message_timeout))
{
/* Time to send feedback! */
if (!sendFeedback(conn, blockpos, now, false))
diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c
index e440dc4e24..1100260c05 100644
--- a/src/bin/pg_basebackup/streamutil.c
+++ b/src/bin/pg_basebackup/streamutil.c
@@ -255,7 +255,7 @@ feGetCurrentTimestamp(void)
*/
void
feTimestampDifference(int64 start_time, int64 stop_time,
- long *secs, int *microsecs)
+ long *secs, int *microsecs)
{
int64 diff = stop_time - start_time;
@@ -277,8 +277,8 @@ feTimestampDifference(int64 start_time, int64 stop_time,
*/
bool
feTimestampDifferenceExceeds(int64 start_time,
- int64 stop_time,
- int msec)
+ int64 stop_time,
+ int msec)
{
int64 diff = stop_time - start_time;
diff --git a/src/bin/pg_basebackup/streamutil.h b/src/bin/pg_basebackup/streamutil.h
index d0f3799d1e..c36a37bf15 100644
--- a/src/bin/pg_basebackup/streamutil.h
+++ b/src/bin/pg_basebackup/streamutil.h
@@ -16,9 +16,9 @@ extern PGconn *GetConnection(void);
extern int64 feGetCurrentTimestamp(void);
extern void feTimestampDifference(int64 start_time, int64 stop_time,
- long *secs, int *microsecs);
+ long *secs, int *microsecs);
extern bool feTimestampDifferenceExceeds(int64 start_time, int64 stop_time,
- int msec);
+ int msec);
extern void fe_sendint64(int64 i, char *buf);
extern int64 fe_recvint64(char *buf);
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 38f4692955..597fb60a52 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -11,8 +11,11 @@ program_options_handling_ok('pg_basebackup');
my $tempdir = tempdir;
start_test_server $tempdir;
-command_fails(['pg_basebackup'], 'pg_basebackup needs target directory specified');
-command_fails(['pg_basebackup', '-D', "$tempdir/backup"], 'pg_basebackup fails because of hba');
+command_fails(['pg_basebackup'],
+ 'pg_basebackup needs target directory specified');
+command_fails(
+ [ 'pg_basebackup', '-D', "$tempdir/backup" ],
+ 'pg_basebackup fails because of hba');
open HBA, ">>$tempdir/pgdata/pg_hba.conf";
print HBA "local replication all trust\n";
@@ -21,7 +24,9 @@ print HBA "host replication all ::1/128 trust\n";
close HBA;
system_or_bail 'pg_ctl', '-s', '-D', "$tempdir/pgdata", 'reload';
-command_fails(['pg_basebackup', '-D', "$tempdir/backup"], 'pg_basebackup fails because of WAL configuration');
+command_fails(
+ [ 'pg_basebackup', '-D', "$tempdir/backup" ],
+ 'pg_basebackup fails because of WAL configuration');
open CONF, ">>$tempdir/pgdata/postgresql.conf";
print CONF "max_wal_senders = 10\n";
@@ -29,62 +34,79 @@ print CONF "wal_level = archive\n";
close CONF;
restart_test_server;
-command_ok(['pg_basebackup', '-D', "$tempdir/backup"], 'pg_basebackup runs');
+command_ok([ 'pg_basebackup', '-D', "$tempdir/backup" ],
+ 'pg_basebackup runs');
ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
-command_ok(['pg_basebackup', '-D', "$tempdir/backup2", '--xlogdir', "$tempdir/xlog2"], 'separate xlog directory');
+command_ok(
+ [ 'pg_basebackup', '-D', "$tempdir/backup2", '--xlogdir',
+ "$tempdir/xlog2" ],
+ 'separate xlog directory');
ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
-ok(-d "$tempdir/xlog2/", 'xlog directory was created');
+ok(-d "$tempdir/xlog2/", 'xlog directory was created');
-command_ok(['pg_basebackup', '-D', "$tempdir/tarbackup", '-Ft'], 'tar format');
+command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup", '-Ft' ],
+ 'tar format');
ok(-f "$tempdir/tarbackup/base.tar", 'backup tar was created');
mkdir "$tempdir/tblspc1";
psql 'postgres', "CREATE TABLESPACE tblspc1 LOCATION '$tempdir/tblspc1';";
psql 'postgres', "CREATE TABLE test1 (a int) TABLESPACE tblspc1;";
-command_ok(['pg_basebackup', '-D', "$tempdir/tarbackup2", '-Ft'], 'tar format with tablespaces');
+command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup2", '-Ft' ],
+ 'tar format with tablespaces');
ok(-f "$tempdir/tarbackup2/base.tar", 'backup tar was created');
my @tblspc_tars = glob "$tempdir/tarbackup2/[0-9]*.tar";
is(scalar(@tblspc_tars), 1, 'one tablespace tar was created');
-command_fails(['pg_basebackup', '-D', "$tempdir/backup1", '-Fp'],
- 'plain format with tablespaces fails without tablespace mapping');
+command_fails(
+ [ 'pg_basebackup', '-D', "$tempdir/backup1", '-Fp' ],
+ 'plain format with tablespaces fails without tablespace mapping');
-command_ok(['pg_basebackup', '-D', "$tempdir/backup1", '-Fp',
- "-T$tempdir/tblspc1=$tempdir/tbackup/tblspc1"],
- 'plain format with tablespaces succeeds with tablespace mapping');
+command_ok(
+ [ 'pg_basebackup', '-D',
+ "$tempdir/backup1", '-Fp',
+ "-T$tempdir/tblspc1=$tempdir/tbackup/tblspc1" ],
+ 'plain format with tablespaces succeeds with tablespace mapping');
ok(-d "$tempdir/tbackup/tblspc1", 'tablespace was relocated');
opendir(my $dh, "$tempdir/pgdata/pg_tblspc") or die;
-ok((grep { -l "$tempdir/backup1/pg_tblspc/$_" and readlink "$tempdir/backup1/pg_tblspc/$_" eq "$tempdir/tbackup/tblspc1" } readdir($dh)),
- "tablespace symlink was updated");
+ok( ( grep
+ {
+ -l "$tempdir/backup1/pg_tblspc/$_"
+ and readlink "$tempdir/backup1/pg_tblspc/$_" eq
+ "$tempdir/tbackup/tblspc1"
+ } readdir($dh)),
+ "tablespace symlink was updated");
closedir $dh;
mkdir "$tempdir/tbl=spc2";
psql 'postgres', "DROP TABLE test1;";
psql 'postgres', "DROP TABLESPACE tblspc1;";
psql 'postgres', "CREATE TABLESPACE tblspc2 LOCATION '$tempdir/tbl=spc2';";
-command_ok(['pg_basebackup', '-D', "$tempdir/backup3", '-Fp',
- "-T$tempdir/tbl\\=spc2=$tempdir/tbackup/tbl\\=spc2"],
- 'mapping tablespace with = sign in path');
+command_ok(
+ [ 'pg_basebackup', '-D',
+ "$tempdir/backup3", '-Fp',
+ "-T$tempdir/tbl\\=spc2=$tempdir/tbackup/tbl\\=spc2" ],
+ 'mapping tablespace with = sign in path');
ok(-d "$tempdir/tbackup/tbl=spc2", 'tablespace with = sign was relocated');
psql 'postgres', "DROP TABLESPACE tblspc2;";
-command_fails(['pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp',
- "-T=/foo"],
- '-T with empty old directory fails');
-command_fails(['pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp',
- "-T/foo="],
- '-T with empty new directory fails');
-command_fails(['pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp',
- "-T/foo=/bar=/baz"],
- '-T with multiple = fails');
-command_fails(['pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp',
- "-Tfoo=/bar"],
- '-T with old directory not absolute fails');
-command_fails(['pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp',
- "-T/foo=bar"],
- '-T with new directory not absolute fails');
-command_fails(['pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp',
- "-Tfoo"],
- '-T with invalid format fails');
+command_fails(
+ [ 'pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp', "-T=/foo" ],
+ '-T with empty old directory fails');
+command_fails(
+ [ 'pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=" ],
+ '-T with empty new directory fails');
+command_fails(
+ [ 'pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp',
+ "-T/foo=/bar=/baz" ],
+ '-T with multiple = fails');
+command_fails(
+ [ 'pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo=/bar" ],
+ '-T with old directory not absolute fails');
+command_fails(
+ [ 'pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=bar" ],
+ '-T with new directory not absolute fails');
+command_fails(
+ [ 'pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo" ],
+ '-T with invalid format fails');
diff --git a/src/bin/pg_config/t/001_pg_config.pl b/src/bin/pg_config/t/001_pg_config.pl
index 200f394fd7..c911798dbc 100644
--- a/src/bin/pg_config/t/001_pg_config.pl
+++ b/src/bin/pg_config/t/001_pg_config.pl
@@ -6,7 +6,11 @@ use Test::More tests => 7;
program_help_ok('pg_config');
program_version_ok('pg_config');
program_options_handling_ok('pg_config');
-command_like(['pg_config', '--bindir'], qr/bin/, 'pg_config single option'); # XXX might be wrong
-command_like(['pg_config', '--bindir', '--libdir'], qr/bin.*\n.*lib/, 'pg_config two options');
-command_like(['pg_config', '--libdir', '--bindir'], qr/lib.*\n.*bin/, 'pg_config two options different order');
-command_like(['pg_config'], qr/.*\n.*\n.*/, 'pg_config without options prints many lines');
+command_like([ 'pg_config', '--bindir' ], qr/bin/, 'pg_config single option')
+ ; # XXX might be wrong
+command_like([ 'pg_config', '--bindir', '--libdir' ],
+ qr/bin.*\n.*lib/, 'pg_config two options');
+command_like([ 'pg_config', '--libdir', '--bindir' ],
+ qr/lib.*\n.*bin/, 'pg_config two options different order');
+command_like(['pg_config'], qr/.*\n.*\n.*/,
+ 'pg_config without options prints many lines');
diff --git a/src/bin/pg_controldata/pg_controldata.c b/src/bin/pg_controldata/pg_controldata.c
index ea96fe1461..24e211668d 100644
--- a/src/bin/pg_controldata/pg_controldata.c
+++ b/src/bin/pg_controldata/pg_controldata.c
@@ -12,7 +12,7 @@
/*
* We have to use postgres.h not postgres_fe.h here, because there's so much
* backend-only stuff in the XLOG include files we need. But we need a
- * frontend-ish environment otherwise. Hence this ugly hack.
+ * frontend-ish environment otherwise. Hence this ugly hack.
*/
#define FRONTEND 1
diff --git a/src/bin/pg_controldata/t/001_pg_controldata.pl b/src/bin/pg_controldata/t/001_pg_controldata.pl
index ca89d87535..84c2fb25f3 100644
--- a/src/bin/pg_controldata/t/001_pg_controldata.pl
+++ b/src/bin/pg_controldata/t/001_pg_controldata.pl
@@ -9,6 +9,8 @@ program_help_ok('pg_controldata');
program_version_ok('pg_controldata');
program_options_handling_ok('pg_controldata');
command_fails(['pg_controldata'], 'pg_controldata without arguments fails');
-command_fails(['pg_controldata', 'nonexistent'], 'pg_controldata with nonexistent directory fails');
+command_fails([ 'pg_controldata', 'nonexistent' ],
+ 'pg_controldata with nonexistent directory fails');
system_or_bail "initdb -D $tempdir/data -A trust >/dev/null";
-command_like(['pg_controldata', "$tempdir/data"], qr/checkpoint/, 'pg_controldata produces output');
+command_like([ 'pg_controldata', "$tempdir/data" ],
+ qr/checkpoint/, 'pg_controldata produces output');
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index 473d653406..d45a5262af 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -257,14 +257,16 @@ get_pgpid(bool is_status_request)
{
if (errno == ENOENT)
printf(_("%s: directory \"%s\" does not exist\n"), progname,
- pg_data);
+ pg_data);
else
printf(_("%s: cannot access directory \"%s\"\n"), progname,
- pg_data);
+ pg_data);
+
/*
- * The Linux Standard Base Core Specification 3.1 says this should return
- * '4, program or service status is unknown'
- * https://refspecs.linuxbase.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
+ * The Linux Standard Base Core Specification 3.1 says this should
+ * return '4, program or service status is unknown'
+ * https://refspecs.linuxbase.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-g
+ * eneric/iniscrptact.html
*/
exit(is_status_request ? 4 : 1);
}
@@ -272,7 +274,7 @@ get_pgpid(bool is_status_request)
if (stat(version_file, &statbuf) != 0 && errno == ENOENT)
{
printf(_("%s: directory \"%s\" is not a database cluster directory\n"),
- progname, pg_data);
+ progname, pg_data);
exit(is_status_request ? 4 : 1);
}
@@ -401,8 +403,8 @@ readfile(const char *path)
void
free_readfile(char **optlines)
{
- char *curr_line = NULL;
- int i = 0;
+ char *curr_line = NULL;
+ int i = 0;
if (!optlines)
return;
@@ -506,7 +508,7 @@ test_postmaster_connection(bool do_checkpoint)
* 6 9.1+ server, shared memory not created
* 7 9.1+ server, shared memory created
*
- * This code does not support pre-9.1 servers. On Unix machines
+ * This code does not support pre-9.1 servers. On Unix machines
* we could consider extracting the port number from the shmem
* key, but that (a) is not robust, and (b) doesn't help with
* finding out the socket directory. And it wouldn't work anyway
@@ -539,7 +541,7 @@ test_postmaster_connection(bool do_checkpoint)
time_t pmstart;
/*
- * Make sanity checks. If it's for a standalone backend
+ * Make sanity checks. If it's for a standalone backend
* (negative PID), or the recorded start time is before
* pg_ctl started, then either we are looking at the wrong
* data directory, or this is a pre-existing pidfile that
@@ -660,7 +662,7 @@ test_postmaster_connection(bool do_checkpoint)
/*
* If we've been able to identify the child postmaster's PID, check
- * the process is still alive. This covers cases where the postmaster
+ * the process is still alive. This covers cases where the postmaster
* successfully created the pidfile but then crashed without removing
* it.
*/
@@ -755,7 +757,7 @@ read_post_opts(void)
{
*arg1 = '\0'; /* terminate so we get only program
* name */
- post_opts = pg_strdup(arg1 + 1); /* point past whitespace */
+ post_opts = pg_strdup(arg1 + 1); /* point past whitespace */
}
if (exec_path == NULL)
exec_path = pg_strdup(optline);
@@ -1162,8 +1164,8 @@ do_promote(void)
}
/*
- * For 9.3 onwards, "fast" promotion is performed. Promotion
- * with a full checkpoint is still possible by writing a file called
+ * For 9.3 onwards, "fast" promotion is performed. Promotion with a full
+ * checkpoint is still possible by writing a file called
* "fallback_promote" instead of "promote"
*/
snprintf(promote_file, MAXPGPATH, "%s/promote", pg_data);
@@ -1211,7 +1213,7 @@ postmaster_is_alive(pid_t pid)
* postmaster we are after.
*
* Don't believe that our own PID or parent shell's PID is the postmaster,
- * either. (Windows hasn't got getppid(), though.)
+ * either. (Windows hasn't got getppid(), though.)
*/
if (pid == getpid())
return false;
@@ -1273,7 +1275,8 @@ do_status(void)
/*
* The Linux Standard Base Core Specification 3.1 says this should return
* '3, program is not running'
- * https://refspecs.linuxbase.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
+ * https://refspecs.linuxbase.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-gener
+ * ic/iniscrptact.html
*/
exit(3);
}
@@ -1298,20 +1301,23 @@ static bool
IsWindowsXPOrGreater(void)
{
OSVERSIONINFO osv;
+
osv.dwOSVersionInfoSize = sizeof(osv);
- /* Windows XP = Version 5.1 */
- return (!GetVersionEx(&osv) || /* could not get version */
+ /* Windows XP = Version 5.1 */
+ return (!GetVersionEx(&osv) || /* could not get version */
osv.dwMajorVersion > 5 || (osv.dwMajorVersion == 5 && osv.dwMinorVersion >= 1));
}
-static bool IsWindows7OrGreater(void)
+static bool
+IsWindows7OrGreater(void)
{
OSVERSIONINFO osv;
+
osv.dwOSVersionInfoSize = sizeof(osv);
- /* Windows 7 = Version 6.0 */
- return (!GetVersionEx(&osv) || /* could not get version */
+ /* Windows 7 = Version 6.0 */
+ return (!GetVersionEx(&osv) || /* could not get version */
osv.dwMajorVersion > 6 || (osv.dwMajorVersion == 6 && osv.dwMinorVersion >= 0));
}
#endif
diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl
index 49c1b1a66a..f90dabffe5 100644
--- a/src/bin/pg_ctl/t/001_start_stop.pl
+++ b/src/bin/pg_ctl/t/001_start_stop.pl
@@ -9,17 +9,23 @@ program_help_ok('pg_ctl');
program_version_ok('pg_ctl');
program_options_handling_ok('pg_ctl');
-command_ok(['pg_ctl', 'initdb', '-D', "$tempdir/data"], 'pg_ctl initdb');
+command_ok([ 'pg_ctl', 'initdb', '-D', "$tempdir/data" ], 'pg_ctl initdb');
open CONF, ">>$tempdir/data/postgresql.conf";
print CONF "listen_addresses = ''\n";
print CONF "unix_socket_directories = '$tempdir'\n";
close CONF;
-command_ok(['pg_ctl', 'start', '-D', "$tempdir/data", '-w'], 'pg_ctl start -w');
-command_ok(['pg_ctl', 'start', '-D', "$tempdir/data", '-w'], 'second pg_ctl start succeeds');
-command_ok(['pg_ctl', 'stop', '-D', "$tempdir/data", '-w', '-m', 'fast'], 'pg_ctl stop -w');
-command_fails(['pg_ctl', 'stop', '-D', "$tempdir/data", '-w', '-m', 'fast'], 'second pg_ctl stop fails');
+command_ok([ 'pg_ctl', 'start', '-D', "$tempdir/data", '-w' ],
+ 'pg_ctl start -w');
+command_ok([ 'pg_ctl', 'start', '-D', "$tempdir/data", '-w' ],
+ 'second pg_ctl start succeeds');
+command_ok([ 'pg_ctl', 'stop', '-D', "$tempdir/data", '-w', '-m', 'fast' ],
+ 'pg_ctl stop -w');
+command_fails([ 'pg_ctl', 'stop', '-D', "$tempdir/data", '-w', '-m', 'fast' ],
+ 'second pg_ctl stop fails');
-command_ok(['pg_ctl', 'restart', '-D', "$tempdir/data", '-w', '-m', 'fast'], 'pg_ctl restart with server not running');
-command_ok(['pg_ctl', 'restart', '-D', "$tempdir/data", '-w', '-m', 'fast'], 'pg_ctl restart with server running');
+command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data", '-w', '-m', 'fast' ],
+ 'pg_ctl restart with server not running');
+command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data", '-w', '-m', 'fast' ],
+ 'pg_ctl restart with server running');
system_or_bail 'pg_ctl', '-s', 'stop', '-D', "$tempdir/data", '-m', 'fast';
diff --git a/src/bin/pg_ctl/t/002_status.pl b/src/bin/pg_ctl/t/002_status.pl
index e41c22fd5b..bd39747652 100644
--- a/src/bin/pg_ctl/t/002_status.pl
+++ b/src/bin/pg_ctl/t/002_status.pl
@@ -11,9 +11,12 @@ print CONF "listen_addresses = ''\n";
print CONF "unix_socket_directories = '$tempdir'\n";
close CONF;
-command_exit_is(['pg_ctl', 'status', '-D', "$tempdir/data"], 3, 'pg_ctl status with server not running');
+command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/data" ],
+ 3, 'pg_ctl status with server not running');
-system_or_bail 'pg_ctl', '-s', '-l', "$tempdir/logfile", '-D', "$tempdir/data", '-w', 'start';
-command_exit_is(['pg_ctl', 'status', '-D', "$tempdir/data"], 0, 'pg_ctl status with server running');
+system_or_bail 'pg_ctl', '-s', '-l', "$tempdir/logfile", '-D',
+ "$tempdir/data", '-w', 'start';
+command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/data" ],
+ 0, 'pg_ctl status with server running');
system_or_bail 'pg_ctl', '-s', 'stop', '-D', "$tempdir/data", '-m', 'fast';
diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c
index 4d35ae54cc..94e9147b13 100644
--- a/src/bin/pg_dump/common.c
+++ b/src/bin/pg_dump/common.c
@@ -37,7 +37,7 @@ static int numCatalogIds = 0;
/*
* These variables are static to avoid the notational cruft of having to pass
- * them into findTableByOid() and friends. For each of these arrays, we
+ * them into findTableByOid() and friends. For each of these arrays, we
* build a sorted-by-OID index array immediately after it's built, and then
* we use binary search in findTableByOid() and friends. (qsort'ing the base
* arrays themselves would be simpler, but it doesn't work because pg_dump.c
@@ -487,7 +487,7 @@ findObjectByDumpId(DumpId dumpId)
*
* We use binary search in a sorted list that is built on first call.
* If AssignDumpId() and findObjectByCatalogId() calls were freely intermixed,
- * the code would work, but possibly be very slow. In the current usage
+ * the code would work, but possibly be very slow. In the current usage
* pattern that does not happen, indeed we build the list at most twice.
*/
DumpableObject *
diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c
index 06056b1c34..2e2a447ae7 100644
--- a/src/bin/pg_dump/compress_io.c
+++ b/src/bin/pg_dump/compress_io.c
@@ -561,7 +561,7 @@ cfopen(const char *path, const char *mode, int compression)
int
cfread(void *ptr, int size, cfp *fp)
{
- int ret;
+ int ret;
if (size == 0)
return 0;
@@ -598,7 +598,7 @@ cfwrite(const void *ptr, int size, cfp *fp)
int
cfgetc(cfp *fp)
{
- int ret;
+ int ret;
#ifdef HAVE_LIBZ
if (fp->compressedfp)
@@ -608,10 +608,10 @@ cfgetc(cfp *fp)
{
if (!gzeof(fp->compressedfp))
exit_horribly(modulename,
- "could not read from input file: %s\n", strerror(errno));
+ "could not read from input file: %s\n", strerror(errno));
else
exit_horribly(modulename,
- "could not read from input file: end of file\n");
+ "could not read from input file: end of file\n");
}
}
else
diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c
index 6161d88832..259c472337 100644
--- a/src/bin/pg_dump/dumputils.c
+++ b/src/bin/pg_dump/dumputils.c
@@ -184,7 +184,7 @@ fmtQualifiedId(int remoteVersion, const char *schema, const char *id)
* standard_conforming_strings settings.
*
* This is essentially equivalent to libpq's PQescapeStringInternal,
- * except for the output buffer structure. We need it in situations
+ * except for the output buffer structure. We need it in situations
* where we do not have a PGconn available. Where we do,
* appendStringLiteralConn is a better choice.
*/
@@ -359,7 +359,7 @@ appendByteaLiteral(PQExpBuffer buf, const unsigned char *str, size_t length,
/*
* This implementation is hard-wired to produce hex-format output. We do
* not know the server version the output will be loaded into, so making
- * an intelligent format choice is impossible. It might be better to
+ * an intelligent format choice is impossible. It might be better to
* always use the old escaped format.
*/
if (!enlargePQExpBuffer(buf, 2 * length + 5))
@@ -393,7 +393,7 @@ appendByteaLiteral(PQExpBuffer buf, const unsigned char *str, size_t length,
* into individual items.
*
* On success, returns true and sets *itemarray and *nitems to describe
- * an array of individual strings. On parse failure, returns false;
+ * an array of individual strings. On parse failure, returns false;
* *itemarray may exist or be NULL.
*
* NOTE: free'ing itemarray is sufficient to deallocate the working storage.
@@ -533,7 +533,7 @@ buildACLCommands(const char *name, const char *subname,
/*
* At the end, these two will be pasted together to form the result. But
* the owner privileges need to go before the other ones to keep the
- * dependencies valid. In recent versions this is normally the case, but
+ * dependencies valid. In recent versions this is normally the case, but
* in old versions they come after the PUBLIC privileges and that results
* in problems if we need to run REVOKE on the owner privileges.
*/
@@ -706,7 +706,7 @@ buildDefaultACLCommands(const char *type, const char *nspname,
/*
* We incorporate the target role directly into the command, rather than
- * playing around with SET ROLE or anything like that. This is so that a
+ * playing around with SET ROLE or anything like that. This is so that a
* permissions error leads to nothing happening, rather than changing
* default privileges for the wrong user.
*/
@@ -734,7 +734,7 @@ buildDefaultACLCommands(const char *type, const char *nspname,
*
* The returned grantee string will be the dequoted username or groupname
* (preceded with "group " in the latter case). The returned grantor is
- * the dequoted grantor name or empty. Privilege characters are decoded
+ * the dequoted grantor name or empty. Privilege characters are decoded
* and split between privileges with grant option (privswgo) and without
* (privs).
*
@@ -973,7 +973,7 @@ AddAcl(PQExpBuffer aclbuf, const char *keyword, const char *subname)
* namevar: name of query variable to match against an object-name pattern.
* altnamevar: NULL, or name of an alternative variable to match against name.
* visibilityrule: clause to use if we want to restrict to visible objects
- * (for example, "pg_catalog.pg_table_is_visible(p.oid)"). Can be NULL.
+ * (for example, "pg_catalog.pg_table_is_visible(p.oid)"). Can be NULL.
*
* Formatting note: the text already present in buf should end with a newline.
* The appended text, if any, will end with one too.
@@ -1020,7 +1020,7 @@ processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern,
* last alternatives which is not what we want.
*
* Note: the result of this pass is the actual regexp pattern(s) we want
- * to execute. Quoting/escaping into SQL literal format will be done
+ * to execute. Quoting/escaping into SQL literal format will be done
* below using appendStringLiteralConn().
*/
appendPQExpBufferStr(&namebuf, "^(");
diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c
index afe941f80b..caedbb8b4a 100644
--- a/src/bin/pg_dump/parallel.c
+++ b/src/bin/pg_dump/parallel.c
@@ -166,7 +166,7 @@ GetMyPSlot(ParallelState *pstate)
}
/*
- * Fail and die, with a message to stderr. Parameters as for write_msg.
+ * Fail and die, with a message to stderr. Parameters as for write_msg.
*
* This is defined in parallel.c, because in parallel mode, things are more
* complicated. If the worker process does exit_horribly(), we forward its
@@ -673,7 +673,7 @@ ParallelBackupEnd(ArchiveHandle *AH, ParallelState *pstate)
* AH->MasterStartParallelItemPtr, a routine of the output format. This
* function's arguments are the parents archive handle AH (containing the full
* catalog information), the TocEntry that the worker should work on and a
- * T_Action act indicating whether this is a backup or a restore item. The
+ * T_Action act indicating whether this is a backup or a restore item. The
* function then converts the TocEntry assignment into a string that is then
* sent over to the worker process. In the simplest case that would be
* something like "DUMP 1234", with 1234 being the TocEntry id.
@@ -840,8 +840,8 @@ lockTableNoWait(ArchiveHandle *AH, TocEntry *te)
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
exit_horribly(modulename,
"could not obtain lock on relation \"%s\"\n"
- "This usually means that someone requested an ACCESS EXCLUSIVE lock "
- "on the table after the pg_dump parent process had gotten the "
+ "This usually means that someone requested an ACCESS EXCLUSIVE lock "
+ "on the table after the pg_dump parent process had gotten the "
"initial ACCESS SHARE lock on the table.\n", qualId);
PQclear(res);
@@ -923,7 +923,7 @@ WaitForCommands(ArchiveHandle *AH, int pipefd[2])
}
else
exit_horribly(modulename,
- "unrecognized command on communication channel: %s\n",
+ "unrecognized command on communication channel: %s\n",
command);
/* command was pg_malloc'd and we are responsible for free()ing it. */
@@ -1251,7 +1251,7 @@ sendMessageToWorker(ParallelState *pstate, int worker, const char *str)
if (!aborting)
#endif
exit_horribly(modulename,
- "could not write to the communication channel: %s\n",
+ "could not write to the communication channel: %s\n",
strerror(errno));
}
}
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index 08ace67db4..25780cfc1a 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -208,7 +208,7 @@ extern RestoreOptions *NewRestoreOptions(void);
extern void SortTocFromFile(Archive *AHX, RestoreOptions *ropt);
/* Convenience functions used only when writing DATA */
-extern void archputs(const char *s, Archive *AH);
+extern void archputs(const char *s, Archive *AH);
extern int
archprintf(Archive *AH, const char *fmt,...)
/* This extension allows gcc to check the format string */
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 2fa21193f5..e782438a86 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -423,7 +423,7 @@ RestoreArchive(Archive *AHX)
{
if (!ropt->if_exists)
{
- /* No --if-exists? Then just use the original */
+ /* No --if-exists? Then just use the original */
ahprintf(AH, "%s", te->dropStmt);
}
else
@@ -432,7 +432,7 @@ RestoreArchive(Archive *AHX)
char *mark;
char *dropStmt = pg_strdup(te->dropStmt);
char *dropStmtPtr = dropStmt;
- PQExpBuffer ftStmt = createPQExpBuffer();
+ PQExpBuffer ftStmt = createPQExpBuffer();
/*
* Need to inject IF EXISTS clause after ALTER TABLE
@@ -449,12 +449,12 @@ RestoreArchive(Archive *AHX)
* ALTER TABLE..ALTER COLUMN..DROP DEFAULT does not
* support the IF EXISTS clause, and therefore we
* simply emit the original command for such objects.
- * For other objects, we need to extract the first part
- * of the DROP which includes the object type. Most of
- * the time this matches te->desc, so search for that;
- * however for the different kinds of CONSTRAINTs, we
- * know to search for hardcoded "DROP CONSTRAINT"
- * instead.
+ * For other objects, we need to extract the first
+ * part of the DROP which includes the object type.
+ * Most of the time this matches te->desc, so search
+ * for that; however for the different kinds of
+ * CONSTRAINTs, we know to search for hardcoded "DROP
+ * CONSTRAINT" instead.
*/
if (strcmp(te->desc, "DEFAULT") == 0)
appendPQExpBuffer(ftStmt, "%s", dropStmt);
@@ -712,8 +712,8 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
/*
* In parallel restore, if we created the table earlier in
* the run then we wrap the COPY in a transaction and
- * precede it with a TRUNCATE. If archiving is not on
- * this prevents WAL-logging the COPY. This obtains a
+ * precede it with a TRUNCATE. If archiving is not on
+ * this prevents WAL-logging the COPY. This obtains a
* speedup similar to that from using single_txn mode in
* non-parallel restores.
*/
@@ -1492,8 +1492,8 @@ dump_lo_buf(ArchiveHandle *AH)
void
ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
{
- int bytes_written = 0;
-
+ int bytes_written = 0;
+
if (AH->writingBlob)
{
size_t remaining = size * nmemb;
@@ -1518,6 +1518,7 @@ ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
bytes_written = GZWRITE(ptr, size, nmemb, AH->OF);
else if (AH->CustomOutPtr)
bytes_written = AH->CustomOutPtr (AH, ptr, size * nmemb);
+
else
{
/*
@@ -1525,7 +1526,7 @@ ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
* connected then send it to the DB.
*/
if (RestoringToDB(AH))
- bytes_written = ExecuteSqlCommandBuf(AH, (const char *) ptr, size * nmemb);
+ bytes_written = ExecuteSqlCommandBuf(AH, (const char *) ptr, size * nmemb);
else
bytes_written = fwrite(ptr, size, nmemb, AH->OF) * size;
}
@@ -1623,7 +1624,7 @@ _moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te)
* items.
*
* The arrays are indexed by dump ID (so entry zero is unused). Note that the
- * array entries run only up to maxDumpId. We might see dependency dump IDs
+ * array entries run only up to maxDumpId. We might see dependency dump IDs
* beyond that (if the dump was partial); so always check the array bound
* before trying to touch an array entry.
*/
@@ -1647,7 +1648,7 @@ buildTocEntryArrays(ArchiveHandle *AH)
/*
* tableDataId provides the TABLE DATA item's dump ID for each TABLE
- * TOC entry that has a DATA item. We compute this by reversing the
+ * TOC entry that has a DATA item. We compute this by reversing the
* TABLE DATA item's dependency, knowing that a TABLE DATA item has
* just one dependency and it is the TABLE item.
*/
@@ -1838,8 +1839,8 @@ WriteStr(ArchiveHandle *AH, const char *c)
if (c)
{
- int len = strlen(c);
-
+ int len = strlen(c);
+
res = WriteInt(AH, len);
(*AH->WriteBufPtr) (AH, c, len);
res += len;
@@ -1958,7 +1959,7 @@ _discoverArchiveFormat(ArchiveHandle *AH)
if (strncmp(sig, "PGDMP", 5) == 0)
{
- int byteread;
+ int byteread;
/*
* Finish reading (most of) a custom-format header.
@@ -2709,7 +2710,7 @@ _doSetSessionAuth(ArchiveHandle *AH, const char *user)
appendPQExpBufferStr(cmd, "SET SESSION AUTHORIZATION ");
/*
- * SQL requires a string literal here. Might as well be correct.
+ * SQL requires a string literal here. Might as well be correct.
*/
if (user && *user)
appendStringLiteralAHX(cmd, user, AH);
@@ -2840,7 +2841,7 @@ _becomeUser(ArchiveHandle *AH, const char *user)
}
/*
- * Become the owner of the given TOC entry object. If
+ * Become the owner of the given TOC entry object. If
* changes in ownership are not allowed, this doesn't do anything.
*/
static void
@@ -2995,7 +2996,7 @@ _getObjectDescription(PQExpBuffer buf, TocEntry *te, ArchiveHandle *AH)
strcmp(type, "FOREIGN TABLE") == 0 ||
strcmp(type, "TEXT SEARCH DICTIONARY") == 0 ||
strcmp(type, "TEXT SEARCH CONFIGURATION") == 0 ||
- /* non-schema-specified objects */
+ /* non-schema-specified objects */
strcmp(type, "DATABASE") == 0 ||
strcmp(type, "PROCEDURAL LANGUAGE") == 0 ||
strcmp(type, "SCHEMA") == 0 ||
@@ -3310,7 +3311,7 @@ ReadHead(ArchiveHandle *AH)
/*
* If we haven't already read the header, do so.
*
- * NB: this code must agree with _discoverArchiveFormat(). Maybe find a
+ * NB: this code must agree with _discoverArchiveFormat(). Maybe find a
* way to unify the cases?
*/
if (!AH->readHeader)
@@ -3419,7 +3420,7 @@ checkSeek(FILE *fp)
return false;
/*
- * Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
+ * Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
* this with fseeko(fp, 0, SEEK_CUR). But some platforms treat that as a
* successful no-op even on files that are otherwise unseekable.
*/
@@ -3459,7 +3460,7 @@ dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim)
*
* Work is done in three phases.
* First we process all SECTION_PRE_DATA tocEntries, in a single connection,
- * just as for a standard restore. Second we process the remaining non-ACL
+ * just as for a standard restore. Second we process the remaining non-ACL
* steps in parallel worker children (threads on Windows, processes on Unix),
* each of which connects separately to the database. Finally we process all
* the ACL entries in a single connection (that happens back in
@@ -3481,7 +3482,7 @@ restore_toc_entries_prefork(ArchiveHandle *AH)
* Do all the early stuff in a single connection in the parent. There's no
* great point in running it in parallel, in fact it will actually run
* faster in a single connection because we avoid all the connection and
- * setup overhead. Also, pre-9.2 pg_dump versions were not very good
+ * setup overhead. Also, pre-9.2 pg_dump versions were not very good
* about showing all the dependencies of SECTION_PRE_DATA items, so we do
* not risk trying to process them out-of-order.
*
@@ -3527,7 +3528,7 @@ restore_toc_entries_prefork(ArchiveHandle *AH)
}
/*
- * Now close parent connection in prep for parallel steps. We do this
+ * Now close parent connection in prep for parallel steps. We do this
* mainly to ensure that we don't exceed the specified number of parallel
* connections.
*/
@@ -3572,7 +3573,7 @@ restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate,
/*
* Initialize the lists of ready items, the list for pending items has
- * already been initialized in the caller. After this setup, the pending
+ * already been initialized in the caller. After this setup, the pending
* list is everything that needs to be done but is blocked by one or more
* dependencies, while the ready list contains items that have no
* remaining dependencies. Note: we don't yet filter out entries that
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index 92ec1d89c6..c163f29baf 100644
--- a/src/bin/pg_dump/pg_backup_archiver.h
+++ b/src/bin/pg_dump/pg_backup_archiver.h
@@ -132,7 +132,7 @@ struct ParallelState;
exit_horribly(modulename, "could not write to output file: %s\n", \
strerror(errno)); \
} while (0)
-
+
typedef enum T_Action
{
ACT_DUMP,
diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c
index 5f3a910089..06cd0a7864 100644
--- a/src/bin/pg_dump/pg_backup_custom.c
+++ b/src/bin/pg_dump/pg_backup_custom.c
@@ -610,7 +610,7 @@ static int
_WriteByte(ArchiveHandle *AH, const int i)
{
lclContext *ctx = (lclContext *) AH->formatData;
- int res;
+ int res;
if ((res = fputc(i, AH->FH)) == EOF)
WRITE_ERROR_EXIT;
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index 980d68fdab..762a9a8781 100644
--- a/src/bin/pg_dump/pg_backup_db.c
+++ b/src/bin/pg_dump/pg_backup_db.c
@@ -60,7 +60,7 @@ _check_database_version(ArchiveHandle *AH)
/*
* Reconnect to the server. If dbname is not NULL, use that database,
* else the one associated with the archive handle. If username is
- * not NULL, use that user name, else the one from the handle. If
+ * not NULL, use that user name, else the one from the handle. If
* both the database and the user match the existing connection already,
* nothing will be done.
*
@@ -101,7 +101,7 @@ ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *username)
*
* Note: it's not really all that sensible to use a single-entry password
* cache if the username keeps changing. In current usage, however, the
- * username never does change, so one savedPassword is sufficient. We do
+ * username never does change, so one savedPassword is sufficient. We do
* update the cache on the off chance that the password has changed since the
* start of the run.
*/
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index 2cd9b7e9f8..39e29d8022 100644
--- a/src/bin/pg_dump/pg_backup_directory.c
+++ b/src/bin/pg_dump/pg_backup_directory.c
@@ -357,7 +357,7 @@ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
/* Are we aborting? */
checkAborting(AH);
-
+
if (dLen > 0 && cfwrite(data, dLen, ctx->dataFH) != dLen)
WRITE_ERROR_EXIT;
@@ -408,7 +408,7 @@ _PrintFileData(ArchiveHandle *AH, char *filename, RestoreOptions *ropt)
ahwrite(buf, 1, cnt, AH);
free(buf);
- if (cfclose(cfp) != 0)
+ if (cfclose(cfp) !=0)
exit_horribly(modulename, "could not close data file: %s\n",
strerror(errno));
}
@@ -543,8 +543,8 @@ _ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
lclContext *ctx = (lclContext *) AH->formatData;
/*
- * If there was an I/O error, we already exited in cfread(),
- * so here we exit on short reads.
+ * If there was an I/O error, we already exited in cfread(), so here we
+ * exit on short reads.
*/
if (cfread(buf, len, ctx->dataFH) != len)
exit_horribly(modulename,
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index f505393057..0cb72650b0 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -560,7 +560,7 @@ _tarReadRaw(ArchiveHandle *AH, void *buf, size_t len, TAR_MEMBER *th, FILE *fh)
res = GZREAD(&((char *) buf)[used], 1, len, th->zFH);
if (res != len && !GZEOF(fh))
exit_horribly(modulename,
- "could not read from input file: %s\n", strerror(errno));
+ "could not read from input file: %s\n", strerror(errno));
}
else
{
@@ -821,7 +821,7 @@ _ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
/* We already would have exited for errors on reads, must be EOF */
exit_horribly(modulename,
"could not read from input file: end of file\n");
-
+
ctx->filePos += len;
return;
}
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index c2bb6161b2..e52591606f 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -86,10 +86,10 @@ bool g_verbose; /* User wants verbose narration of our
* activities. */
/* various user-settable parameters */
-static bool schemaOnly;
-static bool dataOnly;
+static bool schemaOnly;
+static bool dataOnly;
static int dumpSections; /* bitmask of chosen sections */
-static bool aclsSkip;
+static bool aclsSkip;
static const char *lockWaitTimeout;
/* subquery used to convert user ID (eg, datdba) to user name */
@@ -227,7 +227,7 @@ static void makeTableDataInfo(TableInfo *tbinfo, bool oids);
static void buildMatViewRefreshDependencies(Archive *fout);
static void getTableDataFKConstraints(void);
static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
- bool is_agg);
+ bool is_agg);
static char *format_function_arguments_old(Archive *fout,
FuncInfo *finfo, int nallargs,
char **allargtypes,
@@ -1267,7 +1267,7 @@ selectDumpableTable(TableInfo *tbinfo)
* Mark a type as to be dumped or not
*
* If it's a table's rowtype or an autogenerated array type, we also apply a
- * special type code to facilitate sorting into the desired order. (We don't
+ * special type code to facilitate sorting into the desired order. (We don't
* want to consider those to be ordinary types because that would bring tables
* up into the datatype part of the dump order.) We still set the object's
* dump flag; that's not going to cause the dummy type to be dumped, but we
@@ -1669,7 +1669,7 @@ dumpTableData_insert(Archive *fout, void *dcontext)
/*
* These types are printed without quotes unless
* they contain values that aren't accepted by the
- * scanner unquoted (e.g., 'NaN'). Note that
+ * scanner unquoted (e.g., 'NaN'). Note that
* strtod() and friends might accept NaN, so we
* can't use that to test.
*
@@ -1930,31 +1930,31 @@ buildMatViewRefreshDependencies(Archive *fout)
query = createPQExpBuffer();
appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
- "( "
+ "( "
"SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
- "FROM pg_depend d1 "
- "JOIN pg_class c1 ON c1.oid = d1.objid "
- "AND c1.relkind = 'm' "
- "JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
+ "FROM pg_depend d1 "
+ "JOIN pg_class c1 ON c1.oid = d1.objid "
+ "AND c1.relkind = 'm' "
+ "JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
"JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
- "AND d2.objid = r1.oid "
- "AND d2.refobjid <> d1.objid "
- "JOIN pg_class c2 ON c2.oid = d2.refobjid "
- "AND c2.relkind IN ('m','v') "
- "WHERE d1.classid = 'pg_class'::regclass "
- "UNION "
- "SELECT w.objid, d3.refobjid, c3.relkind "
- "FROM w "
- "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
+ "AND d2.objid = r1.oid "
+ "AND d2.refobjid <> d1.objid "
+ "JOIN pg_class c2 ON c2.oid = d2.refobjid "
+ "AND c2.relkind IN ('m','v') "
+ "WHERE d1.classid = 'pg_class'::regclass "
+ "UNION "
+ "SELECT w.objid, d3.refobjid, c3.relkind "
+ "FROM w "
+ "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
"JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
- "AND d3.objid = r3.oid "
- "AND d3.refobjid <> w.refobjid "
- "JOIN pg_class c3 ON c3.oid = d3.refobjid "
- "AND c3.relkind IN ('m','v') "
- ") "
+ "AND d3.objid = r3.oid "
+ "AND d3.refobjid <> w.refobjid "
+ "JOIN pg_class c3 ON c3.oid = d3.refobjid "
+ "AND c3.relkind IN ('m','v') "
+ ") "
"SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
- "FROM w "
- "WHERE refrelkind = 'm'");
+ "FROM w "
+ "WHERE refrelkind = 'm'");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
@@ -2065,7 +2065,7 @@ getTableDataFKConstraints(void)
/*
* guessConstraintInheritance:
* In pre-8.4 databases, we can't tell for certain which constraints
- * are inherited. We assume a CHECK constraint is inherited if its name
+ * are inherited. We assume a CHECK constraint is inherited if its name
* matches the name of any constraint in the parent. Originally this code
* tried to compare the expression texts, but that can fail for various
* reasons --- for example, if the parent and child tables are in different
@@ -2830,7 +2830,7 @@ binary_upgrade_set_pg_class_oids(Archive *fout,
pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
appendPQExpBufferStr(upgrade_buffer,
- "\n-- For binary upgrade, must preserve pg_class oids\n");
+ "\n-- For binary upgrade, must preserve pg_class oids\n");
if (!is_index)
{
@@ -3020,7 +3020,7 @@ getNamespaces(Archive *fout, int *numNamespaces)
* getNamespaces
*
* NB: for pre-7.3 source database, we use object OID to guess whether it's
- * a system object or not. In 7.3 and later there is no guessing, and we
+ * a system object or not. In 7.3 and later there is no guessing, and we
* don't use objoid at all.
*/
static NamespaceInfo *
@@ -3179,7 +3179,7 @@ getTypes(Archive *fout, int *numTypes)
* auto-generated array type by checking the element type's typarray.
* (Before that the test is capable of generating false positives.) We
* still check for name beginning with '_', though, so as to avoid the
- * cost of the subselect probe for all standard types. This would have to
+ * cost of the subselect probe for all standard types. This would have to
* be revisited if the backend ever allows renaming of array types.
*/
@@ -3324,8 +3324,8 @@ getTypes(Archive *fout, int *numTypes)
/*
* If it's a base type, make a DumpableObject representing a shell
- * definition of the type. We will need to dump that ahead of the I/O
- * functions for the type. Similarly, range types need a shell
+ * definition of the type. We will need to dump that ahead of the I/O
+ * functions for the type. Similarly, range types need a shell
* definition in case they have a canonicalize function.
*
* Note: the shell type doesn't have a catId. You might think it
@@ -4078,7 +4078,7 @@ getFuncs(Archive *fout, int *numFuncs)
* Find all user-defined functions. Normally we can exclude functions in
* pg_catalog, which is worth doing since there are several thousand of
* 'em. However, there are some extensions that create functions in
- * pg_catalog. In normal dumps we can still ignore those --- but in
+ * pg_catalog. In normal dumps we can still ignore those --- but in
* binary-upgrade mode, we must dump the member objects of the extension,
* so be sure to fetch any such functions.
*
@@ -4106,12 +4106,12 @@ getFuncs(Archive *fout, int *numFuncs)
username_subquery);
if (fout->remoteVersion >= 90200)
appendPQExpBufferStr(query,
- "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
+ "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
"WHERE classid = 'pg_proc'::regclass AND "
"objid = p.oid AND deptype = 'i')");
if (binary_upgrade && fout->remoteVersion >= 90100)
appendPQExpBufferStr(query,
- "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
+ "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
"classid = 'pg_proc'::regclass AND "
"objid = p.oid AND "
"refclassid = 'pg_extension'::regclass AND "
@@ -4312,9 +4312,9 @@ getTables(Archive *fout, int *numTables)
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
- "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
+ "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
"CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
- "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
+ "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
"array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
"FROM pg_class c "
"LEFT JOIN pg_depend d ON "
@@ -4351,9 +4351,9 @@ getTables(Archive *fout, int *numTables)
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
- "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
+ "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
"CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
- "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
+ "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
"array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
"FROM pg_class c "
"LEFT JOIN pg_depend d ON "
@@ -4796,7 +4796,7 @@ getTables(Archive *fout, int *numTables)
selectDumpableTable(&tblinfo[i]);
tblinfo[i].interesting = tblinfo[i].dobj.dump;
- tblinfo[i].postponed_def = false; /* might get set during sort */
+ tblinfo[i].postponed_def = false; /* might get set during sort */
/*
* Read-lock target tables to make sure they aren't DROPPED or altered
@@ -5240,7 +5240,7 @@ getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
/*
* In pre-7.4 releases, indkeys may contain more entries than
* indnkeys says (since indnkeys will be 1 for a functional
- * index). We don't actually care about this case since we don't
+ * index). We don't actually care about this case since we don't
* examine indkeys except for indexes associated with PRIMARY and
* UNIQUE constraints, which are never functional indexes. But we
* have to allocate enough space to keep parseOidArray from
@@ -5489,7 +5489,7 @@ getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
/*
* Make the domain depend on the constraint, ensuring it won't be
- * output till any constraint dependencies are OK. If the constraint
+ * output till any constraint dependencies are OK. If the constraint
* has not been validated, it's going to be dumped after the domain
* anyway, so this doesn't matter.
*/
@@ -6152,7 +6152,7 @@ getCasts(Archive *fout, int *numCasts)
"FROM pg_type t1, pg_type t2, pg_proc p "
"WHERE p.pronargs = 1 AND "
"p.proargtypes[0] = t1.oid AND "
- "p.prorettype = t2.oid AND p.proname = t2.typname "
+ "p.prorettype = t2.oid AND p.proname = t2.typname "
"ORDER BY 3,4");
}
@@ -6325,7 +6325,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
else if (fout->remoteVersion >= 90100)
{
/*
- * attcollation is new in 9.1. Since we only want to dump COLLATE
+ * attcollation is new in 9.1. Since we only want to dump COLLATE
* clauses for attributes whose collation is different from their
* type's default, we use a CASE here to suppress uninteresting
* attcollations cheaply.
@@ -6580,7 +6580,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
/*
* Defaults on a VIEW must always be dumped as separate ALTER
- * TABLE commands. Defaults on regular tables are dumped as
+ * TABLE commands. Defaults on regular tables are dumped as
* part of the CREATE TABLE if possible, which it won't be if
* the column is not going to be emitted explicitly.
*/
@@ -6777,7 +6777,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
/*
* If the constraint is inherited, this will be detected later
- * (in pre-8.4 databases). We also detect later if the
+ * (in pre-8.4 databases). We also detect later if the
* constraint must be split out from the table definition.
*/
}
@@ -7619,7 +7619,7 @@ findComments(Archive *fout, Oid classoid, Oid objoid,
/*
* Pre-7.2, pg_description does not contain classoid, so collectComments
- * just stores a zero. If there's a collision on object OID, well, you
+ * just stores a zero. If there's a collision on object OID, well, you
* get duplicate comments.
*/
if (fout->remoteVersion < 70200)
@@ -7996,7 +7996,7 @@ dumpExtension(Archive *fout, ExtensionInfo *extinfo)
/*
* We unconditionally create the extension, so we must drop it if it
- * exists. This could happen if the user deleted 'plpgsql' and then
+ * exists. This could happen if the user deleted 'plpgsql' and then
* readded it, causing its oid to be greater than FirstNormalObjectId.
* The FirstNormalObjectId test was kept to avoid repeatedly dropping
* and recreating extensions like 'plpgsql'.
@@ -8004,7 +8004,7 @@ dumpExtension(Archive *fout, ExtensionInfo *extinfo)
appendPQExpBuffer(q, "DROP EXTENSION IF EXISTS %s;\n", qextname);
appendPQExpBufferStr(q,
- "SELECT binary_upgrade.create_empty_extension(");
+ "SELECT binary_upgrade.create_empty_extension(");
appendStringLiteralAH(q, extinfo->dobj.name, fout);
appendPQExpBufferStr(q, ", ");
appendStringLiteralAH(q, extinfo->namespace, fout);
@@ -8015,7 +8015,7 @@ dumpExtension(Archive *fout, ExtensionInfo *extinfo)
/*
* Note that we're pushing extconfig (an OID array) back into
- * pg_extension exactly as-is. This is OK because pg_class OIDs are
+ * pg_extension exactly as-is. This is OK because pg_class OIDs are
* preserved in binary upgrade.
*/
if (strlen(extinfo->extconfig) > 2)
@@ -8944,7 +8944,7 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
if (fout->remoteVersion >= 90100)
{
/*
- * attcollation is new in 9.1. Since we only want to dump COLLATE
+ * attcollation is new in 9.1. Since we only want to dump COLLATE
* clauses for attributes whose collation is different from their
* type's default, we use a CASE here to suppress uninteresting
* attcollations cheaply. atttypid will be 0 for dropped columns;
@@ -9258,7 +9258,7 @@ dumpShellType(Archive *fout, ShellTypeInfo *stinfo)
/*
* Note the lack of a DROP command for the shell type; any required DROP
- * is driven off the base type entry, instead. This interacts with
+ * is driven off the base type entry, instead. This interacts with
* _printTocEntry()'s use of the presence of a DROP command to decide
* whether an entry needs an ALTER OWNER command. We don't want to alter
* the shell type's owner immediately on creation; that should happen only
@@ -9295,7 +9295,7 @@ dumpShellType(Archive *fout, ShellTypeInfo *stinfo)
*
* For some backwards compatibility with the older behavior, we forcibly
* dump a PL if its handler function (and validator if any) are in a
- * dumpable namespace. That case is not checked here.
+ * dumpable namespace. That case is not checked here.
*
* Also, if the PL belongs to an extension, we do not use this heuristic.
* That case isn't checked here either.
@@ -9619,7 +9619,7 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
PQExpBuffer asPart;
PGresult *res;
char *funcsig; /* identity signature */
- char *funcfullsig = NULL; /* full signature */
+ char *funcfullsig = NULL; /* full signature */
char *funcsig_tag;
char *proretset;
char *prosrc;
@@ -9982,7 +9982,7 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
/*
* COST and ROWS are emitted only if present and not default, so as not to
- * break backwards-compatibility of the dump without need. Keep this code
+ * break backwards-compatibility of the dump without need. Keep this code
* in sync with the defaults in functioncmds.c.
*/
if (strcmp(procost, "0") != 0)
@@ -10779,7 +10779,7 @@ dumpOpclass(Archive *fout, OpclassInfo *opcinfo)
* XXX RECHECK is gone as of 8.4, but we'll still print it if dumping an
* older server's opclass in which it is used. This is to avoid
* hard-to-detect breakage if a newer pg_dump is used to dump from an
- * older server and then reload into that old version. This can go away
+ * older server and then reload into that old version. This can go away
* once 8.3 is so old as to not be of interest to anyone.
*/
resetPQExpBuffer(query);
@@ -11057,7 +11057,7 @@ dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo)
* XXX RECHECK is gone as of 8.4, but we'll still print it if dumping an
* older server's opclass in which it is used. This is to avoid
* hard-to-detect breakage if a newer pg_dump is used to dump from an
- * older server and then reload into that old version. This can go away
+ * older server and then reload into that old version. This can go away
* once 8.3 is so old as to not be of interest to anyone.
*/
if (fout->remoteVersion >= 90100)
@@ -11606,8 +11606,8 @@ dumpAgg(Archive *fout, AggInfo *agginfo)
"aggtransspace, agginitval, "
"aggmtransspace, aggminitval, "
"true AS convertok, "
- "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs, "
- "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs "
+ "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs, "
+ "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs "
"FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
"WHERE a.aggfnoid = p.oid "
"AND p.oid = '%u'::pg_catalog.oid",
@@ -11625,8 +11625,8 @@ dumpAgg(Archive *fout, AggInfo *agginfo)
"0 AS aggtransspace, agginitval, "
"0 AS aggmtransspace, NULL AS aggminitval, "
"true AS convertok, "
- "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs, "
- "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs "
+ "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs, "
+ "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs "
"FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
"WHERE a.aggfnoid = p.oid "
"AND p.oid = '%u'::pg_catalog.oid",
@@ -11644,7 +11644,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo)
"0 AS aggtransspace, agginitval, "
"0 AS aggmtransspace, NULL AS aggminitval, "
"true AS convertok "
- "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
+ "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
"WHERE a.aggfnoid = p.oid "
"AND p.oid = '%u'::pg_catalog.oid",
agginfo->aggfn.dobj.catId.oid);
@@ -12478,7 +12478,7 @@ dumpUserMappings(Archive *fout,
* to fail if run by a non-superuser. Note that the view will show
* umoptions as null if the user hasn't got privileges for the associated
* server; this means that pg_dump will dump such a mapping, but with no
- * OPTIONS clause. A possible alternative is to skip such mappings
+ * OPTIONS clause. A possible alternative is to skip such mappings
* altogether, but it's not clear that that's an improvement.
*/
selectSourceSchema(fout, "pg_catalog");
@@ -12620,7 +12620,7 @@ dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo)
* 'type' must be one of
* TABLE, SEQUENCE, FUNCTION, LANGUAGE, SCHEMA, DATABASE, TABLESPACE,
* FOREIGN DATA WRAPPER, SERVER, or LARGE OBJECT.
- * 'name' is the formatted name of the object. Must be quoted etc. already.
+ * 'name' is the formatted name of the object. Must be quoted etc. already.
* 'subname' is the formatted name of the sub-object, if any. Must be quoted.
* 'tag' is the tag for the archive entry (typ. unquoted name of object).
* 'nspname' is the namespace the object is in (NULL if none).
@@ -12991,7 +12991,7 @@ dumpTable(Archive *fout, TableInfo *tbinfo)
tbinfo->relacl);
/*
- * Handle column ACLs, if any. Note: we pull these with a separate
+ * Handle column ACLs, if any. Note: we pull these with a separate
* query rather than trying to fetch them during getTableAttrs, so
* that we won't miss ACLs on system columns.
*/
@@ -13427,7 +13427,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
* physical column order, including dropped columns, as in the
* original. Therefore, we create dropped columns above and drop them
* here, also updating their attlen/attalign values so that the
- * dropped column can be skipped properly. (We do not bother with
+ * dropped column can be skipped properly. (We do not bother with
* restoring the original attbyval setting.) Also, inheritance
* relationships are set up by doing ALTER INHERIT rather than using
* an INHERITS clause --- the latter would possibly mess up the column
@@ -13436,7 +13436,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
* Analogously, we set up typed tables using ALTER TABLE / OF here.
*/
if (binary_upgrade && (tbinfo->relkind == RELKIND_RELATION ||
- tbinfo->relkind == RELKIND_FOREIGN_TABLE) )
+ tbinfo->relkind == RELKIND_FOREIGN_TABLE))
{
for (j = 0; j < tbinfo->numatts; j++)
{
@@ -13469,8 +13469,8 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
Assert(tbinfo->relkind != RELKIND_FOREIGN_TABLE);
appendPQExpBufferStr(q, "\n-- For binary upgrade, recreate inherited column.\n");
appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_attribute\n"
- "SET attislocal = false\n"
- "WHERE attname = ");
+ "SET attislocal = false\n"
+ "WHERE attname = ");
appendStringLiteralAH(q, tbinfo->attnames[j], fout);
appendPQExpBufferStr(q, "\n AND attrelid = ");
appendStringLiteralAH(q, fmtId(tbinfo->dobj.name), fout);
@@ -13492,8 +13492,8 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
fmtId(constr->dobj.name));
appendPQExpBuffer(q, "%s;\n", constr->condef);
appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_constraint\n"
- "SET conislocal = false\n"
- "WHERE contype = 'c' AND conname = ");
+ "SET conislocal = false\n"
+ "WHERE contype = 'c' AND conname = ");
appendStringLiteralAH(q, constr->dobj.name, fout);
appendPQExpBufferStr(q, "\n AND conrelid = ");
appendStringLiteralAH(q, fmtId(tbinfo->dobj.name), fout);
@@ -13555,8 +13555,8 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
{
appendPQExpBufferStr(q, "\n-- For binary upgrade, mark materialized view as populated\n");
appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_class\n"
- "SET relispopulated = 't'\n"
- "WHERE oid = ");
+ "SET relispopulated = 't'\n"
+ "WHERE oid = ");
appendStringLiteralAH(q, fmtId(tbinfo->dobj.name), fout);
appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
}
@@ -13839,10 +13839,10 @@ dumpIndex(Archive *fout, IndxInfo *indxinfo)
/*
* If there's an associated constraint, don't dump the index per se, but
- * do dump any comment for it. (This is safe because dependency ordering
- * will have ensured the constraint is emitted first.) Note that the
- * emitted comment has to be shown as depending on the constraint, not
- * the index, in such cases.
+ * do dump any comment for it. (This is safe because dependency ordering
+ * will have ensured the constraint is emitted first.) Note that the
+ * emitted comment has to be shown as depending on the constraint, not the
+ * index, in such cases.
*/
if (!is_constraint)
{
@@ -14188,7 +14188,7 @@ findLastBuiltinOid_V71(Archive *fout, const char *dbname)
* find the last built in oid
*
* For 7.0, we do this by assuming that the last thing that initdb does is to
- * create the pg_indexes view. This sucks in general, but seeing that 7.0.x
+ * create the pg_indexes view. This sucks in general, but seeing that 7.0.x
* initdb won't be changing anymore, it'll do.
*/
static Oid
@@ -15176,7 +15176,7 @@ getDependencies(Archive *fout)
/*
* Ordinarily, table rowtypes have implicit dependencies on their
- * tables. However, for a composite type the implicit dependency goes
+ * tables. However, for a composite type the implicit dependency goes
* the other way in pg_depend; which is the right thing for DROP but
* it doesn't produce the dependency ordering we need. So in that one
* case, we reverse the direction of the dependency.
@@ -15313,7 +15313,7 @@ addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
* chains linking through objects that don't appear explicitly in the dump.
* For example, a view will depend on its _RETURN rule while the _RETURN rule
* will depend on other objects --- but the rule will not appear as a separate
- * object in the dump. We need to adjust the view's dependencies to include
+ * object in the dump. We need to adjust the view's dependencies to include
* whatever the rule depends on that is included in the dump.
*
* Just to make things more complicated, there are also "special" dependencies
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index e01015eb0a..daf98bcb54 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -65,10 +65,10 @@ typedef struct SimpleOidList
*
* NOTE: the structures described here live for the entire pg_dump run;
* and in most cases we make a struct for every object we can find in the
- * catalogs, not only those we are actually going to dump. Hence, it's
+ * catalogs, not only those we are actually going to dump. Hence, it's
* best to store a minimal amount of per-object info in these structs,
* and retrieve additional per-object info when and if we dump a specific
- * object. In particular, try to avoid retrieving expensive-to-compute
+ * object. In particular, try to avoid retrieving expensive-to-compute
* information until it's known to be needed. We do, however, have to
* store enough info to determine whether an object should be dumped and
* what order to dump in.
@@ -366,12 +366,12 @@ typedef struct _evttriggerInfo
} EventTriggerInfo;
/*
- * struct ConstraintInfo is used for all constraint types. However we
+ * struct ConstraintInfo is used for all constraint types. However we
* use a different objType for foreign key constraints, to make it easier
* to sort them the way we want.
*
* Note: condeferrable and condeferred are currently only valid for
- * unique/primary-key constraints. Otherwise that info is in condef.
+ * unique/primary-key constraints. Otherwise that info is in condef.
*/
typedef struct _constraintInfo
{
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index 359093214a..1b505a0fca 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -23,7 +23,7 @@ static const char *modulename = gettext_noop("sorter");
/*
* Sort priority for object types when dumping a pre-7.3 database.
* Objects are sorted by priority levels, and within an equal priority level
- * by OID. (This is a relatively crude hack to provide semi-reasonable
+ * by OID. (This is a relatively crude hack to provide semi-reasonable
* behavior for old databases without full dependency info.) Note: collations,
* extensions, text search, foreign-data, materialized view, event trigger,
* and default ACL objects can't really happen here, so the rather bogus
@@ -387,11 +387,11 @@ sortDumpableObjects(DumpableObject **objs, int numObjs,
* TopoSort -- topological sort of a dump list
*
* Generate a re-ordering of the dump list that satisfies all the dependency
- * constraints shown in the dump list. (Each such constraint is a fact of a
+ * constraints shown in the dump list. (Each such constraint is a fact of a
* partial ordering.) Minimize rearrangement of the list not needed to
* achieve the partial ordering.
*
- * The input is the list of numObjs objects in objs[]. This list is not
+ * The input is the list of numObjs objects in objs[]. This list is not
* modified.
*
* Returns TRUE if able to build an ordering that satisfies all the
@@ -434,7 +434,7 @@ TopoSort(DumpableObject **objs,
* linked list of items-ready-to-output as Knuth does, we maintain a heap
* of their item numbers, which we can use as a priority queue. This
* turns the algorithm from O(N) to O(N log N) because each insertion or
- * removal of a heap item takes O(log N) time. However, that's still
+ * removal of a heap item takes O(log N) time. However, that's still
* plenty fast enough for this application.
*/
@@ -492,9 +492,9 @@ TopoSort(DumpableObject **objs,
}
/*--------------------
- * Now emit objects, working backwards in the output list. At each step,
+ * Now emit objects, working backwards in the output list. At each step,
* we use the priority heap to select the last item that has no remaining
- * before-constraints. We remove that item from the heap, output it to
+ * before-constraints. We remove that item from the heap, output it to
* ordering[], and decrease the beforeConstraints count of each of the
* items it was constrained against. Whenever an item's beforeConstraints
* count is thereby decreased to zero, we insert it into the priority heap
@@ -622,7 +622,7 @@ removeHeapElement(int *heap, int heapLength)
* before trying TopoSort again. We can safely repair loops that are
* disjoint (have no members in common); if we find overlapping loops
* then we repair only the first one found, because the action taken to
- * repair the first might have repaired the other as well. (If not,
+ * repair the first might have repaired the other as well. (If not,
* we'll fix it on the next go-round.)
*
* objs[] lists the objects TopoSort couldn't sort
@@ -672,7 +672,7 @@ findDependencyLoops(DumpableObject **objs, int nObjs, int totObjs)
{
/*
* There's no loop starting at this object, but mark it processed
- * anyway. This is not necessary for correctness, but saves later
+ * anyway. This is not necessary for correctness, but saves later
* invocations of findLoop() from uselessly chasing references to
* such an object.
*/
@@ -714,7 +714,7 @@ findLoop(DumpableObject *obj,
int i;
/*
- * Reject if obj is already processed. This test prevents us from finding
+ * Reject if obj is already processed. This test prevents us from finding
* loops that overlap previously-processed loops.
*/
if (processed[obj->dumpId])
@@ -772,7 +772,7 @@ findLoop(DumpableObject *obj,
* A user-defined datatype will have a dependency loop with each of its
* I/O functions (since those have the datatype as input or output).
* Similarly, a range type will have a loop with its canonicalize function,
- * if any. Break the loop by making the function depend on the associated
+ * if any. Break the loop by making the function depend on the associated
* shell type, instead.
*/
static void
@@ -846,7 +846,7 @@ repairViewRuleMultiLoop(DumpableObject *viewobj,
/*
* If a matview is involved in a multi-object loop, we can't currently fix
- * that by splitting off the rule. As a stopgap, we try to fix it by
+ * that by splitting off the rule. As a stopgap, we try to fix it by
* dropping the constraint that the matview be dumped in the pre-data section.
* This is sufficient to handle cases where a matview depends on some unique
* index, as can happen if it has a GROUP BY for example.
@@ -1179,7 +1179,7 @@ repairDependencyLoop(DumpableObject **loop,
/*
* If all the objects are TABLE_DATA items, what we must have is a
* circular set of foreign key constraints (or a single self-referential
- * table). Print an appropriate complaint and break the loop arbitrarily.
+ * table). Print an appropriate complaint and break the loop arbitrarily.
*/
for (i = 0; i < nLoop; i++)
{
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 208e49bbcd..0cc4329b1a 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -475,9 +475,9 @@ main(int argc, char *argv[])
if (!data_only)
{
/*
- * If asked to --clean, do that first. We can avoid detailed
+ * If asked to --clean, do that first. We can avoid detailed
* dependency analysis because databases never depend on each other,
- * and tablespaces never depend on each other. Roles could have
+ * and tablespaces never depend on each other. Roles could have
* grants to each other, but DROP ROLE will clean those up silently.
*/
if (output_clean)
@@ -772,7 +772,7 @@ dumpRoles(PGconn *conn)
* will acquire the right properties even if it already exists (ie, it
* won't hurt for the CREATE to fail). This is particularly important
* for the role we are connected as, since even with --clean we will
- * have failed to drop it. binary_upgrade cannot generate any errors,
+ * have failed to drop it. binary_upgrade cannot generate any errors,
* so we assume the current role is already created.
*/
if (!binary_upgrade ||
@@ -1202,7 +1202,7 @@ dumpCreateDB(PGconn *conn)
* commands for just those databases with values different from defaults.
*
* We consider template0's encoding and locale (or, pre-7.1, template1's)
- * to define the installation default. Pre-8.4 installations do not have
+ * to define the installation default. Pre-8.4 installations do not have
* per-database locale settings; for them, every database must necessarily
* be using the installation default, so there's no need to do anything
* (which is good, since in very old versions there is no good way to find
diff --git a/src/bin/pg_resetxlog/pg_resetxlog.c b/src/bin/pg_resetxlog/pg_resetxlog.c
index b5d480e7b8..8671c0a4a3 100644
--- a/src/bin/pg_resetxlog/pg_resetxlog.c
+++ b/src/bin/pg_resetxlog/pg_resetxlog.c
@@ -31,7 +31,7 @@
/*
* We have to use postgres.h not postgres_fe.h here, because there's so much
* backend-only stuff in the XLOG include files we need. But we need a
- * frontend-ish environment otherwise. Hence this ugly hack.
+ * frontend-ish environment otherwise. Hence this ugly hack.
*/
#define FRONTEND 1
@@ -802,7 +802,7 @@ FindEndOfXLOG(void)
/*
* Initialize the max() computation using the last checkpoint address from
- * old pg_control. Note that for the moment we are working with segment
+ * old pg_control. Note that for the moment we are working with segment
* numbering according to the old xlog seg size.
*/
segs_per_xlogid = (UINT64CONST(0x0000000100000000) / ControlFile.xlog_seg_size);
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index dabcd680ff..e49cf445b3 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -1048,7 +1048,7 @@ exec_command(const char *cmd,
{
/* list all variables */
- int i;
+ int i;
static const char *const my_list[] = {
"border", "columns", "expanded", "fieldsep",
"footer", "format", "linestyle", "null",
@@ -1930,17 +1930,17 @@ editFile(const char *fname, int lineno)
#ifndef WIN32
if (lineno > 0)
sys = psprintf("exec %s %s%d '%s'",
- editorName, editor_lineno_arg, lineno, fname);
+ editorName, editor_lineno_arg, lineno, fname);
else
sys = psprintf("exec %s '%s'",
- editorName, fname);
+ editorName, fname);
#else
if (lineno > 0)
sys = psprintf("\"%s\" %s%d \"%s\"",
- editorName, editor_lineno_arg, lineno, fname);
+ editorName, editor_lineno_arg, lineno, fname);
else
sys = psprintf("\"%s\" \"%s\"",
- editorName, fname);
+ editorName, fname);
#endif
result = system(sys);
if (result == -1)
@@ -2463,7 +2463,7 @@ printPsetInfo(const char *param, struct printQueryOpt *popt)
printf(_("Border style (%s) unset.\n"), param);
else
printf(_("Border style (%s) is %d.\n"), param,
- popt->topt.border);
+ popt->topt.border);
}
/* show the target width for the wrapped format */
@@ -2473,7 +2473,7 @@ printPsetInfo(const char *param, struct printQueryOpt *popt)
printf(_("Target width (%s) unset.\n"), param);
else
printf(_("Target width (%s) is %d.\n"), param,
- popt->topt.columns);
+ popt->topt.columns);
}
/* show expanded/vertical mode */
@@ -2494,7 +2494,7 @@ printPsetInfo(const char *param, struct printQueryOpt *popt)
printf(_("Field separator (%s) is zero byte.\n"), param);
else
printf(_("Field separator (%s) is \"%s\".\n"), param,
- popt->topt.fieldSep.separator);
+ popt->topt.fieldSep.separator);
}
else if (strcmp(param, "fieldsep_zero") == 0)
@@ -2518,21 +2518,21 @@ printPsetInfo(const char *param, struct printQueryOpt *popt)
printf(_("Output format (%s) is aligned.\n"), param);
else
printf(_("Output format (%s) is %s.\n"), param,
- _align2string(popt->topt.format));
+ _align2string(popt->topt.format));
}
/* show table line style */
else if (strcmp(param, "linestyle") == 0)
{
printf(_("Line style (%s) is %s.\n"), param,
- get_line_style(&popt->topt)->name);
+ get_line_style(&popt->topt)->name);
}
/* show null display */
else if (strcmp(param, "null") == 0)
{
printf(_("Null display (%s) is \"%s\".\n"), param,
- popt->nullPrint ? popt->nullPrint : "");
+ popt->nullPrint ? popt->nullPrint : "");
}
/* show locale-aware numeric output */
@@ -2564,7 +2564,7 @@ printPsetInfo(const char *param, struct printQueryOpt *popt)
printf(_("Record separator (%s) is <newline>.\n"), param);
else
printf(_("Record separator (%s) is \"%s\".\n"), param,
- popt->topt.recordSep.separator);
+ popt->topt.recordSep.separator);
}
else if (strcmp(param, "recordsep_zero") == 0)
@@ -2577,7 +2577,7 @@ printPsetInfo(const char *param, struct printQueryOpt *popt)
{
if (popt->topt.tableAttr)
printf(_("Table attribute (%s) is \"%s\".\n"), param,
- popt->topt.tableAttr);
+ popt->topt.tableAttr);
else
printf(_("Table attributes (%s) unset.\n"), param);
}
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 6968adfd42..60169a2a7d 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -270,7 +270,7 @@ ConnectionUp(void)
* see if it can be restored.
*
* Returns true if either the connection was still there, or it could be
- * restored successfully; false otherwise. If, however, there was no
+ * restored successfully; false otherwise. If, however, there was no
* connection and the session is non-interactive, this will exit the program
* with a code of EXIT_BADCONN.
*/
diff --git a/src/bin/psql/copy.c b/src/bin/psql/copy.c
index d706206cc9..c759abfc85 100644
--- a/src/bin/psql/copy.c
+++ b/src/bin/psql/copy.c
@@ -339,7 +339,7 @@ do_copy(const char *args)
if (!options->program)
{
struct stat st;
- int result;
+ int result;
/* make sure the specified file is not a directory */
if ((result = fstat(fileno(copystream), &st)) < 0)
@@ -628,7 +628,8 @@ handleCopyIn(PGconn *conn, FILE *copystream, bool isbinary, PGresult **res)
/*
* This code erroneously assumes '\.' on a line alone
* inside a quoted CSV string terminates the \copy.
- * http://www.postgresql.org/message-id/E1TdNVQ-0001ju-GO@wrigleys.postgresql.org
+ * http://www.postgresql.org/message-id/E1TdNVQ-0001ju-GO@w
+ * rigleys.postgresql.org
*/
if (strcmp(buf, "\\.\n") == 0 ||
strcmp(buf, "\\.\r\n") == 0)
@@ -677,7 +678,7 @@ copyin_cleanup:
* COPY FROM STDIN commands. We keep trying PQputCopyEnd() in the hope
* it'll work eventually. (What's actually likely to happen is that in
* attempting to flush the data, libpq will eventually realize that the
- * connection is lost. But that's fine; it will get us out of COPY_IN
+ * connection is lost. But that's fine; it will get us out of COPY_IN
* state, which is what we need.)
*/
while (*res = PQgetResult(conn), PQresultStatus(*res) == PGRES_COPY_IN)
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index ffdeda8d57..951b7ee3cd 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -106,7 +106,7 @@ describeAggregates(const char *pattern, bool verbose, bool showSystem)
if (!showSystem && !pattern)
appendPQExpBufferStr(&buf, " AND n.nspname <> 'pg_catalog'\n"
- " AND n.nspname <> 'information_schema'\n");
+ " AND n.nspname <> 'information_schema'\n");
processSQLNamePattern(pset.db, &buf, pattern, true, false,
"n.nspname", "p.proname", NULL,
@@ -449,7 +449,7 @@ describeFunctions(const char *functypes, const char *pattern, bool verbose, bool
if (!showSystem && !pattern)
appendPQExpBufferStr(&buf, " AND n.nspname <> 'pg_catalog'\n"
- " AND n.nspname <> 'information_schema'\n");
+ " AND n.nspname <> 'information_schema'\n");
appendPQExpBufferStr(&buf, "ORDER BY 1, 2, 4;");
@@ -542,7 +542,7 @@ describeTypes(const char *pattern, bool verbose, bool showSystem)
*/
appendPQExpBufferStr(&buf, "WHERE (t.typrelid = 0 ");
appendPQExpBufferStr(&buf, "OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c "
- "WHERE c.oid = t.typrelid))\n");
+ "WHERE c.oid = t.typrelid))\n");
/*
* do not include array types (before 8.3 we have to use the assumption
@@ -555,7 +555,7 @@ describeTypes(const char *pattern, bool verbose, bool showSystem)
if (!showSystem && !pattern)
appendPQExpBufferStr(&buf, " AND n.nspname <> 'pg_catalog'\n"
- " AND n.nspname <> 'information_schema'\n");
+ " AND n.nspname <> 'information_schema'\n");
/* Match name pattern against either internal or external name */
processSQLNamePattern(pset.db, &buf, pattern, true, false,
@@ -777,7 +777,7 @@ permissionsList(const char *pattern)
appendPQExpBufferStr(&buf, "\nFROM pg_catalog.pg_class c\n"
" LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
- "WHERE c.relkind IN ('r', 'v', 'm', 'S', 'f')\n");
+ "WHERE c.relkind IN ('r', 'v', 'm', 'S', 'f')\n");
/*
* Unless a schema pattern is specified, we suppress system and temp
@@ -958,7 +958,7 @@ objectDescription(const char *pattern, bool showSystem)
if (!showSystem && !pattern)
appendPQExpBufferStr(&buf, " AND n.nspname <> 'pg_catalog'\n"
- " AND n.nspname <> 'information_schema'\n");
+ " AND n.nspname <> 'information_schema'\n");
processSQLNamePattern(pset.db, &buf, pattern, true, false,
"n.nspname", "o.opcname", NULL,
@@ -987,7 +987,7 @@ objectDescription(const char *pattern, bool showSystem)
if (!showSystem && !pattern)
appendPQExpBufferStr(&buf, " AND n.nspname <> 'pg_catalog'\n"
- " AND n.nspname <> 'information_schema'\n");
+ " AND n.nspname <> 'information_schema'\n");
processSQLNamePattern(pset.db, &buf, pattern, true, false,
"n.nspname", "opf.opfname", NULL,
@@ -1363,8 +1363,8 @@ describeOneTableDetails(const char *schemaname,
appendPQExpBufferStr(&buf, ",\n NULL AS indexdef");
if (tableinfo.relkind == 'f' && pset.sversion >= 90200)
appendPQExpBufferStr(&buf, ",\n CASE WHEN attfdwoptions IS NULL THEN '' ELSE "
- " '(' || array_to_string(ARRAY(SELECT quote_ident(option_name) || ' ' || quote_literal(option_value) FROM "
- " pg_options_to_table(attfdwoptions)), ', ') || ')' END AS attfdwoptions");
+ " '(' || array_to_string(ARRAY(SELECT quote_ident(option_name) || ' ' || quote_literal(option_value) FROM "
+ " pg_options_to_table(attfdwoptions)), ', ') || ')' END AS attfdwoptions");
else
appendPQExpBufferStr(&buf, ",\n NULL AS attfdwoptions");
if (verbose)
@@ -1612,13 +1612,13 @@ describeOneTableDetails(const char *schemaname,
if (pset.sversion >= 90000)
appendPQExpBufferStr(&buf,
" (NOT i.indimmediate) AND "
- "EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
+ "EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
"WHERE conrelid = i.indrelid AND "
"conindid = i.indexrelid AND "
"contype IN ('p','u','x') AND "
"condeferrable) AS condeferrable,\n"
" (NOT i.indimmediate) AND "
- "EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
+ "EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
"WHERE conrelid = i.indrelid AND "
"conindid = i.indexrelid AND "
"contype IN ('p','u','x') AND "
@@ -1755,11 +1755,11 @@ describeOneTableDetails(const char *schemaname,
if (pset.sversion >= 90000)
appendPQExpBufferStr(&buf,
"pg_catalog.pg_get_constraintdef(con.oid, true), "
- "contype, condeferrable, condeferred");
+ "contype, condeferrable, condeferred");
else
appendPQExpBufferStr(&buf,
- "null AS constraintdef, null AS contype, "
- "false AS condeferrable, false AS condeferred");
+ "null AS constraintdef, null AS contype, "
+ "false AS condeferrable, false AS condeferred");
if (pset.sversion >= 90400)
appendPQExpBufferStr(&buf, ", i.indisreplident");
else
@@ -2188,7 +2188,7 @@ describeOneTableDetails(const char *schemaname,
printfPQExpBuffer(&buf, _("Disabled triggers:"));
break;
case 2:
- printfPQExpBuffer(&buf, _("Disabled internal triggers:"));
+ printfPQExpBuffer(&buf, _("Disabled internal triggers:"));
break;
case 3:
printfPQExpBuffer(&buf, _("Triggers firing always:"));
@@ -2346,10 +2346,11 @@ describeOneTableDetails(const char *schemaname,
}
if (verbose && (tableinfo.relkind == 'r' || tableinfo.relkind == 'm') &&
- /*
- * No need to display default values; we already display a
- * REPLICA IDENTITY marker on indexes.
- */
+
+ /*
+ * No need to display default values; we already display a REPLICA
+ * IDENTITY marker on indexes.
+ */
tableinfo.relreplident != 'i' &&
((strcmp(schemaname, "pg_catalog") != 0 && tableinfo.relreplident != 'd') ||
(strcmp(schemaname, "pg_catalog") == 0 && tableinfo.relreplident != 'n')))
@@ -2817,7 +2818,7 @@ listTables(const char *tabtypes, const char *pattern, bool verbose, bool showSys
if (showForeign)
appendPQExpBufferStr(&buf, "'f',");
- appendPQExpBufferStr(&buf, "''"); /* dummy */
+ appendPQExpBufferStr(&buf, "''"); /* dummy */
appendPQExpBufferStr(&buf, ")\n");
if (!showSystem && !pattern)
@@ -2997,7 +2998,7 @@ listDomains(const char *pattern, bool verbose, bool showSystem)
if (verbose)
appendPQExpBufferStr(&buf,
" LEFT JOIN pg_catalog.pg_description d "
- "ON d.classoid = t.tableoid AND d.objoid = t.oid "
+ "ON d.classoid = t.tableoid AND d.objoid = t.oid "
"AND d.objsubid = 0\n");
appendPQExpBufferStr(&buf, "WHERE t.typtype = 'd'\n");
@@ -3039,7 +3040,7 @@ listConversions(const char *pattern, bool verbose, bool showSystem)
PGresult *res;
printQueryOpt myopt = pset.popt;
static const bool translate_columns[] =
- {false, false, false, false, true, false};
+ {false, false, false, false, true, false};
initPQExpBuffer(&buf);
@@ -3078,7 +3079,7 @@ listConversions(const char *pattern, bool verbose, bool showSystem)
if (!showSystem && !pattern)
appendPQExpBufferStr(&buf, " AND n.nspname <> 'pg_catalog'\n"
- " AND n.nspname <> 'information_schema'\n");
+ " AND n.nspname <> 'information_schema'\n");
processSQLNamePattern(pset.db, &buf, pattern, true, false,
"n.nspname", "c.conname", NULL,
@@ -3129,7 +3130,7 @@ listEventTriggers(const char *pattern, bool verbose)
" when 'D' then '%s' end as \"%s\",\n"
" e.evtfoid::pg_catalog.regproc as \"%s\", "
"pg_catalog.array_to_string(array(select x"
- " from pg_catalog.unnest(evttags) as t(x)), ', ') as \"%s\"",
+ " from pg_catalog.unnest(evttags) as t(x)), ', ') as \"%s\"",
gettext_noop("Name"),
gettext_noop("Event"),
gettext_noop("Owner"),
@@ -3307,11 +3308,11 @@ listCollations(const char *pattern, bool verbose, bool showSystem)
appendPQExpBufferStr(&buf,
"\nFROM pg_catalog.pg_collation c, pg_catalog.pg_namespace n\n"
- "WHERE n.oid = c.collnamespace\n");
+ "WHERE n.oid = c.collnamespace\n");
if (!showSystem && !pattern)
appendPQExpBufferStr(&buf, " AND n.nspname <> 'pg_catalog'\n"
- " AND n.nspname <> 'information_schema'\n");
+ " AND n.nspname <> 'information_schema'\n");
/*
* Hide collations that aren't usable in the current database's encoding.
@@ -4106,7 +4107,7 @@ listForeignServers(const char *pattern, bool verbose)
if (verbose)
appendPQExpBufferStr(&buf,
"LEFT JOIN pg_description d\n "
- "ON d.classoid = s.tableoid AND d.objoid = s.oid "
+ "ON d.classoid = s.tableoid AND d.objoid = s.oid "
"AND d.objsubid = 0\n");
processSQLNamePattern(pset.db, &buf, pattern, false, false,
@@ -4426,7 +4427,7 @@ listOneExtensionContents(const char *extname, const char *oid)
* printACLColumn
*
* Helper function for consistently formatting ACL (privilege) columns.
- * The proper targetlist entry is appended to buf. Note lack of any
+ * The proper targetlist entry is appended to buf. Note lack of any
* whitespace or comma decoration.
*/
static void
diff --git a/src/bin/psql/input.c b/src/bin/psql/input.c
index 8351080c01..aa32a3f5c1 100644
--- a/src/bin/psql/input.c
+++ b/src/bin/psql/input.c
@@ -38,7 +38,7 @@ static int history_lines_added;
* Preserve newlines in saved queries by mapping '\n' to NL_IN_HISTORY
*
* It is assumed NL_IN_HISTORY will never be entered by the user
- * nor appear inside a multi-byte string. 0x00 is not properly
+ * nor appear inside a multi-byte string. 0x00 is not properly
* handled by the readline routines so it can not be used
* for this purpose.
*/
@@ -157,7 +157,7 @@ pg_send_history(PQExpBuffer history_buf)
*
* Caller *must* have set up sigint_interrupt_jmp before calling.
*
- * Note: we re-use a static PQExpBuffer for each call. This is to avoid
+ * Note: we re-use a static PQExpBuffer for each call. This is to avoid
* leaking memory if interrupted by SIGINT.
*/
char *
@@ -393,7 +393,7 @@ saveHistory(char *fname, int max_lines, bool appendFlag, bool encodeFlag)
/* truncate what we have ... */
if (max_lines >= 0)
stifle_history(max_lines);
- /* ... and overwrite file. Tough luck for concurrent sessions. */
+ /* ... and overwrite file. Tough luck for concurrent sessions. */
errno = 0;
(void) write_history(fname);
if (errno == 0)
diff --git a/src/bin/psql/large_obj.c b/src/bin/psql/large_obj.c
index e8ab4bdb4e..a26ace8b32 100644
--- a/src/bin/psql/large_obj.c
+++ b/src/bin/psql/large_obj.c
@@ -47,7 +47,7 @@ print_lo_result(const char *fmt,...)
/*
- * Prepare to do a large-object operation. We *must* be inside a transaction
+ * Prepare to do a large-object operation. We *must* be inside a transaction
* block for all these operations, so start one if needed.
*
* Returns TRUE if okay, FALSE if failed. *own_transaction is set to indicate
diff --git a/src/bin/psql/mainloop.c b/src/bin/psql/mainloop.c
index d2b813dd6a..c3aff208bf 100644
--- a/src/bin/psql/mainloop.c
+++ b/src/bin/psql/mainloop.c
@@ -277,7 +277,7 @@ MainLoop(FILE *source)
* If we added a newline to query_buf, and nothing else has
* been inserted in query_buf by the lexer, then strip off the
* newline again. This avoids any change to query_buf when a
- * line contains only a backslash command. Also, in this
+ * line contains only a backslash command. Also, in this
* situation we force out any previous lines as a separate
* history entry; we don't want SQL and backslash commands
* intermixed in history if at all possible.
@@ -419,7 +419,7 @@ MainLoop(FILE *source)
* psqlscan.c is #include'd here instead of being compiled on its own.
* This is because we need postgres_fe.h to be read before any system
* include files, else things tend to break on platforms that have
- * multiple infrastructures for stdio.h and so on. flex is absolutely
+ * multiple infrastructures for stdio.h and so on. flex is absolutely
* uncooperative about that, so we can't compile psqlscan.c on its own.
*/
#include "psqlscan.c"
diff --git a/src/bin/psql/mbprint.c b/src/bin/psql/mbprint.c
index ef3346ff05..470de90735 100644
--- a/src/bin/psql/mbprint.c
+++ b/src/bin/psql/mbprint.c
@@ -20,7 +20,7 @@
* To avoid version-skew problems, this file must not use declarations
* from pg_wchar.h: the encoding IDs we are dealing with are determined
* by the libpq.so we are linked with, and that might not match the
- * numbers we see at compile time. (If this file were inside libpq,
+ * numbers we see at compile time. (If this file were inside libpq,
* the problem would go away...)
*
* Hence, we have our own definition of pg_wchar, and we get the values
diff --git a/src/bin/psql/print.c b/src/bin/psql/print.c
index 08fe907d8d..62850d8de5 100644
--- a/src/bin/psql/print.c
+++ b/src/bin/psql/print.c
@@ -253,7 +253,7 @@ print_separator(struct separator sep, FILE *fout)
/*
* Return the list of explicitly-requested footers or, when applicable, the
- * default "(xx rows)" footer. Always omit the default footer when given
+ * default "(xx rows)" footer. Always omit the default footer when given
* non-default footers, "\pset footer off", or a specific instruction to that
* effect from a calling backslash command. Vertical formats number each row,
* making the default footer redundant; they do not call this function.
@@ -689,7 +689,7 @@ print_aligned_text(const printTableContent *cont, FILE *fout)
* Optional optimized word wrap. Shrink columns with a high max/avg
* ratio. Slighly bias against wider columns. (Increases chance a
* narrow column will fit in its cell.) If available columns is
- * positive... and greater than the width of the unshrinkable column
+ * positive... and greater than the width of the unshrinkable column
* headers
*/
if (output_columns > 0 && output_columns >= total_header_width)
@@ -1257,17 +1257,20 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
if (cont->opt->format == PRINT_WRAPPED)
{
- /* Calculate the available width to wrap the columns to after
+ /*
+ * Calculate the available width to wrap the columns to after
* subtracting the maximum header width and separators. At a minimum
- * enough to print "[ RECORD N ]" */
- unsigned int width, swidth;
+ * enough to print "[ RECORD N ]"
+ */
+ unsigned int width,
+ swidth;
if (opt_border == 0)
- swidth = 1; /* "header data" */
+ swidth = 1; /* "header data" */
else if (opt_border == 1)
- swidth = 3; /* "header | data" */
+ swidth = 3; /* "header | data" */
else
- swidth = 7; /* "| header | data |" */
+ swidth = 7; /* "| header | data |" */
/* Wrap to maximum width */
width = dwidth + swidth + hwidth;
@@ -1280,13 +1283,14 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
/* Wrap to minimum width */
if (!opt_tuples_only)
{
- int delta = 1 + log10(cont->nrows) - width;
+ int delta = 1 + log10(cont->nrows) - width;
+
if (opt_border == 0)
- delta += 6; /* "* RECORD " */
+ delta += 6; /* "* RECORD " */
else if (opt_border == 1)
- delta += 10; /* "-[ RECORD ]" */
+ delta += 10; /* "-[ RECORD ]" */
else
- delta += 15; /* "+-[ RECORD ]-+" */
+ delta += 15; /* "+-[ RECORD ]-+" */
if (delta > 0)
dwidth += delta;
@@ -1333,8 +1337,10 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
pg_wcsformat((const unsigned char *) *ptr, strlen(*ptr), encoding,
dlineptr, dheight);
- /* Loop through header and data in parallel dealing with newlines and
- * wrapped lines until they're both exhausted */
+ /*
+ * Loop through header and data in parallel dealing with newlines and
+ * wrapped lines until they're both exhausted
+ */
dline = hline = 0;
dcomplete = hcomplete = 0;
offset = 0;
@@ -1348,8 +1354,10 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
/* Header (never wrapped so just need to deal with newlines) */
if (!hcomplete)
{
- int swidth, twidth = hwidth + 1;
- fputs(hline? format->header_nl_left: " ", fout);
+ int swidth,
+ twidth = hwidth + 1;
+
+ fputs(hline ? format->header_nl_left : " ", fout);
strlen_max_width(hlineptr[hline].ptr, &twidth,
encoding);
fprintf(fout, "%-s", hlineptr[hline].ptr);
@@ -1393,16 +1401,16 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
/* Data */
if (!dcomplete)
{
- int target_width,
- bytes_to_output,
- swidth;
+ int target_width,
+ bytes_to_output,
+ swidth;
- fputs(!dcomplete && !offset? " ": format->wrap_left, fout);
+ fputs(!dcomplete && !offset ? " " : format->wrap_left, fout);
target_width = dwidth;
bytes_to_output = strlen_max_width(dlineptr[dline].ptr + offset,
&target_width, encoding);
- fputnbytes(fout, (char *)(dlineptr[dline].ptr + offset),
+ fputnbytes(fout, (char *) (dlineptr[dline].ptr + offset),
bytes_to_output);
chars_to_output -= target_width;
@@ -1440,8 +1448,10 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
}
else
{
- /* data exhausted (this can occur if header is longer than the
- * data due to newlines in the header) */
+ /*
+ * data exhausted (this can occur if header is longer than the
+ * data due to newlines in the header)
+ */
if (opt_border < 2)
fputs("\n", fout);
else
@@ -2510,7 +2520,7 @@ printTableAddCell(printTableContent *const content, char *cell,
* strdup'd, so there is no need to keep the original footer string around.
*
* Footers are never translated by the function. If you want the footer
- * translated you must do so yourself, before calling printTableAddFooter. The
+ * translated you must do so yourself, before calling printTableAddFooter. The
* reason this works differently to headers and cells is that footers tend to
* be made of up individually translated components, rather than being
* translated as a whole.
@@ -2846,7 +2856,7 @@ get_line_style(const printTableOpt *opt)
/*
* Compute the byte distance to the end of the string or *target_width
- * display character positions, whichever comes first. Update *target_width
+ * display character positions, whichever comes first. Update *target_width
* to be the number of display character positions actually filled.
*/
static int
diff --git a/src/bin/psql/settings.h b/src/bin/psql/settings.h
index eecffb1073..0a60e6817b 100644
--- a/src/bin/psql/settings.h
+++ b/src/bin/psql/settings.h
@@ -96,7 +96,7 @@ typedef struct _psqlSettings
/*
* The remaining fields are set by assign hooks associated with entries in
- * "vars". They should not be set directly except by those hook
+ * "vars". They should not be set directly except by those hook
* functions.
*/
bool autocommit;
diff --git a/src/bin/psql/stringutils.c b/src/bin/psql/stringutils.c
index 6049ab56c3..9ea3e19215 100644
--- a/src/bin/psql/stringutils.c
+++ b/src/bin/psql/stringutils.c
@@ -74,7 +74,7 @@ strtokx(const char *s,
/*
* We may need extra space to insert delimiter nulls for adjacent
- * tokens. 2X the space is a gross overestimate, but it's unlikely
+ * tokens. 2X the space is a gross overestimate, but it's unlikely
* that this code will be used on huge strings anyway.
*/
storage = pg_malloc(2 * strlen(s) + 1);
@@ -104,7 +104,7 @@ strtokx(const char *s,
{
/*
* If not at end of string, we need to insert a null to terminate the
- * returned token. We can just overwrite the next character if it
+ * returned token. We can just overwrite the next character if it
* happens to be in the whitespace set ... otherwise move over the
* rest of the string to make room. (This is why we allocated extra
* space above).
@@ -158,7 +158,7 @@ strtokx(const char *s,
/*
* If not at end of string, we need to insert a null to terminate the
- * returned token. See notes above.
+ * returned token. See notes above.
*/
if (*p != '\0')
{
@@ -181,7 +181,7 @@ strtokx(const char *s,
}
/*
- * Otherwise no quoting character. Scan till next whitespace, delimiter
+ * Otherwise no quoting character. Scan till next whitespace, delimiter
* or quote. NB: at this point, *start is known not to be '\0',
* whitespace, delim, or quote, so we will consume at least one character.
*/
@@ -207,7 +207,7 @@ strtokx(const char *s,
/*
* If not at end of string, we need to insert a null to terminate the
- * returned token. See notes above.
+ * returned token. See notes above.
*/
if (*p != '\0')
{
@@ -274,7 +274,7 @@ strip_quotes(char *source, char quote, char escape, int encoding)
/*
* quote_if_needed
*
- * Opposite of strip_quotes(). If "source" denotes itself literally without
+ * Opposite of strip_quotes(). If "source" denotes itself literally without
* quoting or escaping, returns NULL. Otherwise, returns a malloc'd copy with
* quoting and escaping applied:
*
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index 6d26ffcd7d..3bb727f05c 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -86,7 +86,7 @@ typedef struct SchemaQuery
/*
* Selection condition --- only rows meeting this condition are candidates
- * to display. If catname mentions multiple tables, include the necessary
+ * to display. If catname mentions multiple tables, include the necessary
* join condition here. For example, "c.relkind = 'r'". Write NULL (not
* an empty string) if not needed.
*/
@@ -481,7 +481,7 @@ static const SchemaQuery Query_for_list_of_matviews = {
* restricted to names matching a partially entered name. In these queries,
* the first %s will be replaced by the text entered so far (suitably escaped
* to become a SQL literal string). %d will be replaced by the length of the
- * string (in unescaped form). A second and third %s, if present, will be
+ * string (in unescaped form). A second and third %s, if present, will be
* replaced by a suitably-escaped version of the string provided in
* completion_info_charp. A fourth and fifth %s are similarly replaced by
* completion_info_charp2.
@@ -942,7 +942,7 @@ psql_completion(const char *text, int start, int end)
{"AGGREGATE", "COLLATION", "CONVERSION", "DATABASE", "DEFAULT PRIVILEGES", "DOMAIN",
"EVENT TRIGGER", "EXTENSION", "FOREIGN DATA WRAPPER", "FOREIGN TABLE", "FUNCTION",
"GROUP", "INDEX", "LANGUAGE", "LARGE OBJECT", "MATERIALIZED VIEW", "OPERATOR",
- "ROLE", "RULE", "SCHEMA", "SERVER", "SEQUENCE", "SYSTEM SET", "TABLE",
+ "ROLE", "RULE", "SCHEMA", "SERVER", "SEQUENCE", "SYSTEM SET", "TABLE",
"TABLESPACE", "TEXT SEARCH", "TRIGGER", "TYPE",
"USER", "USER MAPPING FOR", "VIEW", NULL};
@@ -1023,7 +1023,7 @@ psql_completion(const char *text, int start, int end)
pg_strcasecmp(prev2_wd, "TRIGGER") == 0)
{
static const char *const list_ALTER_EVENT_TRIGGER[] =
- {"DISABLE", "ENABLE", "OWNER TO", "RENAME TO", NULL};
+ {"DISABLE", "ENABLE", "OWNER TO", "RENAME TO", NULL};
COMPLETE_WITH_LIST(list_ALTER_EVENT_TRIGGER);
}
@@ -1035,7 +1035,7 @@ psql_completion(const char *text, int start, int end)
pg_strcasecmp(prev_wd, "ENABLE") == 0)
{
static const char *const list_ALTER_EVENT_TRIGGER_ENABLE[] =
- {"REPLICA", "ALWAYS", NULL};
+ {"REPLICA", "ALWAYS", NULL};
COMPLETE_WITH_LIST(list_ALTER_EVENT_TRIGGER_ENABLE);
}
@@ -1385,7 +1385,7 @@ psql_completion(const char *text, int start, int end)
static const char *const list_ALTER2[] =
{"ADD", "ALTER", "CLUSTER ON", "DISABLE", "DROP", "ENABLE", "INHERIT",
"NO INHERIT", "RENAME", "RESET", "OWNER TO", "SET",
- "VALIDATE CONSTRAINT", "REPLICA IDENTITY", NULL};
+ "VALIDATE CONSTRAINT", "REPLICA IDENTITY", NULL};
COMPLETE_WITH_LIST(list_ALTER2);
}
@@ -3451,9 +3451,9 @@ psql_completion(const char *text, int start, int end)
{
static const char *const my_list[] =
{"border", "columns", "expanded", "fieldsep", "fieldsep_zero",
- "footer", "format", "linestyle", "null", "numericlocale",
- "pager", "recordsep", "recordsep_zero", "tableattr", "title",
- "tuples_only", NULL};
+ "footer", "format", "linestyle", "null", "numericlocale",
+ "pager", "recordsep", "recordsep_zero", "tableattr", "title",
+ "tuples_only", NULL};
COMPLETE_WITH_LIST_CS(my_list);
}
@@ -3721,7 +3721,7 @@ _complete_from_query(int is_schema_query, const char *text, int state)
/*
* When fetching relation names, suppress system catalogs unless
- * the input-so-far begins with "pg_". This is a compromise
+ * the input-so-far begins with "pg_". This is a compromise
* between not offering system catalogs for completion at all, and
* having them swamp the result when the input is just "p".
*/
@@ -4094,7 +4094,7 @@ exec_query(const char *query)
/*
- * Return the nwords word(s) before point. Words are returned right to left,
+ * Return the nwords word(s) before point. Words are returned right to left,
* that is, previous_words[0] gets the last word before point.
* If we run out of words, remaining array elements are set to empty strings.
* Each array element is filled with a malloc'd string.
diff --git a/src/bin/psql/variables.c b/src/bin/psql/variables.c
index 33d79fe9b0..1f871d7fa0 100644
--- a/src/bin/psql/variables.c
+++ b/src/bin/psql/variables.c
@@ -15,7 +15,7 @@
* Check whether a variable's name is allowed.
*
* We allow any non-ASCII character, as well as ASCII letters, digits, and
- * underscore. Keep this in sync with the definition of variable_char in
+ * underscore. Keep this in sync with the definition of variable_char in
* psqlscan.l.
*/
static bool
diff --git a/src/bin/scripts/common.c b/src/bin/scripts/common.c
index 7a7e8d9fdc..311fed5090 100644
--- a/src/bin/scripts/common.c
+++ b/src/bin/scripts/common.c
@@ -244,7 +244,7 @@ executeMaintenanceCommand(PGconn *conn, const char *query, bool echo)
}
/*
- * Check yes/no answer in a localized way. 1=yes, 0=no, -1=neither.
+ * Check yes/no answer in a localized way. 1=yes, 0=no, -1=neither.
*/
/* translator: abbreviation for "yes" */
diff --git a/src/bin/scripts/createuser.c b/src/bin/scripts/createuser.c
index 24c4beb243..2d49bc2f1e 100644
--- a/src/bin/scripts/createuser.c
+++ b/src/bin/scripts/createuser.c
@@ -310,6 +310,7 @@ main(int argc, char *argv[])
if (roles.head != NULL)
{
SimpleStringListCell *cell;
+
appendPQExpBufferStr(&sql, " IN ROLE ");
for (cell = roles.head; cell; cell = cell->next)
diff --git a/src/bin/scripts/pg_isready.c b/src/bin/scripts/pg_isready.c
index e1fbc54496..7707bf13ea 100644
--- a/src/bin/scripts/pg_isready.c
+++ b/src/bin/scripts/pg_isready.c
@@ -41,7 +41,7 @@ main(int argc, char **argv)
bool quiet = false;
- PGPing rv;
+ PGPing rv;
PQconninfoOption *opts = NULL;
PQconninfoOption *defs = NULL;
PQconninfoOption *opt;
diff --git a/src/bin/scripts/t/010_clusterdb.pl b/src/bin/scripts/t/010_clusterdb.pl
index 371b2dd2d3..fe22cdbb4e 100644
--- a/src/bin/scripts/t/010_clusterdb.pl
+++ b/src/bin/scripts/t/010_clusterdb.pl
@@ -10,9 +10,17 @@ program_options_handling_ok('clusterdb');
my $tempdir = tempdir;
start_test_server $tempdir;
-issues_sql_like(['clusterdb', 'postgres'], qr/statement: CLUSTER;/, 'SQL CLUSTER run');
+issues_sql_like(
+ [ 'clusterdb', 'postgres' ],
+ qr/statement: CLUSTER;/,
+ 'SQL CLUSTER run');
-command_fails(['clusterdb', '-t', 'nonexistent', 'postgres'], 'fails with nonexistent table');
+command_fails([ 'clusterdb', '-t', 'nonexistent', 'postgres' ],
+ 'fails with nonexistent table');
-psql 'postgres', 'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x';
-issues_sql_like(['clusterdb', 'postgres', '-t', 'test1'], qr/statement: CLUSTER test1;/, 'cluster specific table');
+psql 'postgres',
+'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x';
+issues_sql_like(
+ [ 'clusterdb', 'postgres', '-t', 'test1' ],
+ qr/statement: CLUSTER test1;/,
+ 'cluster specific table');
diff --git a/src/bin/scripts/t/011_clusterdb_all.pl b/src/bin/scripts/t/011_clusterdb_all.pl
index 304c4befa5..eb2016497e 100644
--- a/src/bin/scripts/t/011_clusterdb_all.pl
+++ b/src/bin/scripts/t/011_clusterdb_all.pl
@@ -6,4 +6,7 @@ use Test::More tests => 1;
my $tempdir = tempdir;
start_test_server $tempdir;
-issues_sql_like(['clusterdb', '-a'], qr/statement: CLUSTER.*statement: CLUSTER/s, 'cluster all databases');
+issues_sql_like(
+ [ 'clusterdb', '-a' ],
+ qr/statement: CLUSTER.*statement: CLUSTER/s,
+ 'cluster all databases');
diff --git a/src/bin/scripts/t/020_createdb.pl b/src/bin/scripts/t/020_createdb.pl
index 8b82a2bd96..a8e8f3b4d1 100644
--- a/src/bin/scripts/t/020_createdb.pl
+++ b/src/bin/scripts/t/020_createdb.pl
@@ -10,7 +10,13 @@ program_options_handling_ok('createdb');
my $tempdir = tempdir;
start_test_server $tempdir;
-issues_sql_like(['createdb', 'foobar1'], qr/statement: CREATE DATABASE foobar1/, 'SQL CREATE DATABASE run');
-issues_sql_like(['createdb', 'foobar2', '-l', 'C', '-E', 'LATIN1', '-T', 'template0'], qr/statement: CREATE DATABASE foobar2 ENCODING 'LATIN1'/, 'create database with encoding');
+issues_sql_like(
+ [ 'createdb', 'foobar1' ],
+ qr/statement: CREATE DATABASE foobar1/,
+ 'SQL CREATE DATABASE run');
+issues_sql_like(
+ [ 'createdb', 'foobar2', '-l', 'C', '-E', 'LATIN1', '-T', 'template0' ],
+ qr/statement: CREATE DATABASE foobar2 ENCODING 'LATIN1'/,
+ 'create database with encoding');
-command_fails(['createdb', 'foobar1'], 'fails if database already exists');
+command_fails([ 'createdb', 'foobar1' ], 'fails if database already exists');
diff --git a/src/bin/scripts/t/030_createlang.pl b/src/bin/scripts/t/030_createlang.pl
index 9a87f4c89c..292021611b 100644
--- a/src/bin/scripts/t/030_createlang.pl
+++ b/src/bin/scripts/t/030_createlang.pl
@@ -10,9 +10,15 @@ program_options_handling_ok('createlang');
my $tempdir = tempdir;
start_test_server $tempdir;
-command_fails(['createlang', 'plpgsql', 'postgres'], 'fails if language already exists');
+command_fails(
+ [ 'createlang', 'plpgsql', 'postgres' ],
+ 'fails if language already exists');
psql 'postgres', 'DROP EXTENSION plpgsql';
-issues_sql_like(['createlang', 'plpgsql', 'postgres'], qr/statement: CREATE EXTENSION "plpgsql"/, 'SQL CREATE EXTENSION run');
+issues_sql_like(
+ [ 'createlang', 'plpgsql', 'postgres' ],
+ qr/statement: CREATE EXTENSION "plpgsql"/,
+ 'SQL CREATE EXTENSION run');
-command_like(['createlang', '--list', 'postgres'], qr/plpgsql/, 'list output');
+command_like([ 'createlang', '--list', 'postgres' ],
+ qr/plpgsql/, 'list output');
diff --git a/src/bin/scripts/t/040_createuser.pl b/src/bin/scripts/t/040_createuser.pl
index 922873ab37..8837c2b6e9 100644
--- a/src/bin/scripts/t/040_createuser.pl
+++ b/src/bin/scripts/t/040_createuser.pl
@@ -10,17 +10,21 @@ program_options_handling_ok('createuser');
my $tempdir = tempdir;
start_test_server $tempdir;
-issues_sql_like(['createuser', 'user1'],
- qr/statement: CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN;/,
- 'SQL CREATE USER run');
-issues_sql_like(['createuser', '-L', 'role1'],
- qr/statement: CREATE ROLE role1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN;/,
- 'create a non-login role');
-issues_sql_like(['createuser', '-r', 'user2'],
- qr/statement: CREATE ROLE user2 NOSUPERUSER NOCREATEDB CREATEROLE INHERIT LOGIN;/,
- 'create a CREATEROLE user');
-issues_sql_like(['createuser', '-s', 'user3'],
- qr/statement: CREATE ROLE user3 SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN;/,
- 'create a superuser');
+issues_sql_like(
+ [ 'createuser', 'user1' ],
+qr/statement: CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN;/,
+ 'SQL CREATE USER run');
+issues_sql_like(
+ [ 'createuser', '-L', 'role1' ],
+qr/statement: CREATE ROLE role1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN;/,
+ 'create a non-login role');
+issues_sql_like(
+ [ 'createuser', '-r', 'user2' ],
+qr/statement: CREATE ROLE user2 NOSUPERUSER NOCREATEDB CREATEROLE INHERIT LOGIN;/,
+ 'create a CREATEROLE user');
+issues_sql_like(
+ [ 'createuser', '-s', 'user3' ],
+qr/statement: CREATE ROLE user3 SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN;/,
+ 'create a superuser');
-command_fails(['createuser', 'user1'], 'fails if role already exists');
+command_fails([ 'createuser', 'user1' ], 'fails if role already exists');
diff --git a/src/bin/scripts/t/050_dropdb.pl b/src/bin/scripts/t/050_dropdb.pl
index 3662dd01c1..04a8789d88 100644
--- a/src/bin/scripts/t/050_dropdb.pl
+++ b/src/bin/scripts/t/050_dropdb.pl
@@ -11,6 +11,9 @@ my $tempdir = tempdir;
start_test_server $tempdir;
psql 'postgres', 'CREATE DATABASE foobar1';
-issues_sql_like(['dropdb', 'foobar1'], qr/statement: DROP DATABASE foobar1/, 'SQL DROP DATABASE run');
+issues_sql_like(
+ [ 'dropdb', 'foobar1' ],
+ qr/statement: DROP DATABASE foobar1/,
+ 'SQL DROP DATABASE run');
-command_fails(['dropdb', 'nonexistent'], 'fails with nonexistent database');
+command_fails([ 'dropdb', 'nonexistent' ], 'fails with nonexistent database');
diff --git a/src/bin/scripts/t/060_droplang.pl b/src/bin/scripts/t/060_droplang.pl
index 47cb48f117..09fb2f3b07 100644
--- a/src/bin/scripts/t/060_droplang.pl
+++ b/src/bin/scripts/t/060_droplang.pl
@@ -10,6 +10,11 @@ program_options_handling_ok('droplang');
my $tempdir = tempdir;
start_test_server $tempdir;
-issues_sql_like(['droplang', 'plpgsql', 'postgres'], qr/statement: DROP EXTENSION "plpgsql"/, 'SQL DROP EXTENSION run');
+issues_sql_like(
+ [ 'droplang', 'plpgsql', 'postgres' ],
+ qr/statement: DROP EXTENSION "plpgsql"/,
+ 'SQL DROP EXTENSION run');
-command_fails(['droplang', 'nonexistent', 'postgres'], 'fails with nonexistent language');
+command_fails(
+ [ 'droplang', 'nonexistent', 'postgres' ],
+ 'fails with nonexistent language');
diff --git a/src/bin/scripts/t/070_dropuser.pl b/src/bin/scripts/t/070_dropuser.pl
index 495636ae84..9e0587d68f 100644
--- a/src/bin/scripts/t/070_dropuser.pl
+++ b/src/bin/scripts/t/070_dropuser.pl
@@ -11,6 +11,9 @@ my $tempdir = tempdir;
start_test_server $tempdir;
psql 'postgres', 'CREATE ROLE foobar1';
-issues_sql_like(['dropuser', 'foobar1'], qr/statement: DROP ROLE foobar1/, 'SQL DROP ROLE run');
+issues_sql_like(
+ [ 'dropuser', 'foobar1' ],
+ qr/statement: DROP ROLE foobar1/,
+ 'SQL DROP ROLE run');
-command_fails(['dropuser', 'nonexistent'], 'fails with nonexistent user');
+command_fails([ 'dropuser', 'nonexistent' ], 'fails with nonexistent user');
diff --git a/src/bin/scripts/t/090_reindexdb.pl b/src/bin/scripts/t/090_reindexdb.pl
index 18756e86aa..24b927ce22 100644
--- a/src/bin/scripts/t/090_reindexdb.pl
+++ b/src/bin/scripts/t/090_reindexdb.pl
@@ -12,10 +12,23 @@ start_test_server $tempdir;
$ENV{PGOPTIONS} = '--client-min-messages=WARNING';
-issues_sql_like(['reindexdb', 'postgres'], qr/statement: REINDEX DATABASE postgres;/, 'SQL REINDEX run');
+issues_sql_like(
+ [ 'reindexdb', 'postgres' ],
+ qr/statement: REINDEX DATABASE postgres;/,
+ 'SQL REINDEX run');
-psql 'postgres', 'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a);';
-issues_sql_like(['reindexdb', 'postgres', '-t', 'test1'], qr/statement: REINDEX TABLE test1;/, 'reindex specific table');
-issues_sql_like(['reindexdb', 'postgres', '-i', 'test1x'], qr/statement: REINDEX INDEX test1x;/, 'reindex specific index');
+psql 'postgres',
+ 'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a);';
+issues_sql_like(
+ [ 'reindexdb', 'postgres', '-t', 'test1' ],
+ qr/statement: REINDEX TABLE test1;/,
+ 'reindex specific table');
+issues_sql_like(
+ [ 'reindexdb', 'postgres', '-i', 'test1x' ],
+ qr/statement: REINDEX INDEX test1x;/,
+ 'reindex specific index');
-issues_sql_like(['reindexdb', 'postgres', '-s'], qr/statement: REINDEX SYSTEM postgres;/, 'reindex system tables');
+issues_sql_like(
+ [ 'reindexdb', 'postgres', '-s' ],
+ qr/statement: REINDEX SYSTEM postgres;/,
+ 'reindex system tables');
diff --git a/src/bin/scripts/t/091_reindexdb_all.pl b/src/bin/scripts/t/091_reindexdb_all.pl
index eee8ba8ed9..6c5c59e749 100644
--- a/src/bin/scripts/t/091_reindexdb_all.pl
+++ b/src/bin/scripts/t/091_reindexdb_all.pl
@@ -8,4 +8,7 @@ start_test_server $tempdir;
$ENV{PGOPTIONS} = '--client-min-messages=WARNING';
-issues_sql_like(['reindexdb', '-a'], qr/statement: REINDEX.*statement: REINDEX/s, 'reindex all databases');
+issues_sql_like(
+ [ 'reindexdb', '-a' ],
+ qr/statement: REINDEX.*statement: REINDEX/s,
+ 'reindex all databases');
diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl
index 39f1cec8a1..78a40fa791 100644
--- a/src/bin/scripts/t/100_vacuumdb.pl
+++ b/src/bin/scripts/t/100_vacuumdb.pl
@@ -10,8 +10,23 @@ program_options_handling_ok('vacuumdb');
my $tempdir = tempdir;
start_test_server $tempdir;
-issues_sql_like(['vacuumdb', 'postgres'], qr/statement: VACUUM;/, 'SQL VACUUM run');
-issues_sql_like(['vacuumdb', '-f', 'postgres'], qr/statement: VACUUM \(FULL\);/, 'vacuumdb -f');
-issues_sql_like(['vacuumdb', '-F', 'postgres'], qr/statement: VACUUM \(FREEZE\);/, 'vacuumdb -F');
-issues_sql_like(['vacuumdb', '-z', 'postgres'], qr/statement: VACUUM \(ANALYZE\);/, 'vacuumdb -z');
-issues_sql_like(['vacuumdb', '-Z', 'postgres'], qr/statement: ANALYZE;/, 'vacuumdb -z');
+issues_sql_like(
+ [ 'vacuumdb', 'postgres' ],
+ qr/statement: VACUUM;/,
+ 'SQL VACUUM run');
+issues_sql_like(
+ [ 'vacuumdb', '-f', 'postgres' ],
+ qr/statement: VACUUM \(FULL\);/,
+ 'vacuumdb -f');
+issues_sql_like(
+ [ 'vacuumdb', '-F', 'postgres' ],
+ qr/statement: VACUUM \(FREEZE\);/,
+ 'vacuumdb -F');
+issues_sql_like(
+ [ 'vacuumdb', '-z', 'postgres' ],
+ qr/statement: VACUUM \(ANALYZE\);/,
+ 'vacuumdb -z');
+issues_sql_like(
+ [ 'vacuumdb', '-Z', 'postgres' ],
+ qr/statement: ANALYZE;/,
+ 'vacuumdb -z');
diff --git a/src/bin/scripts/t/101_vacuumdb_all.pl b/src/bin/scripts/t/101_vacuumdb_all.pl
index b5779bcf12..f2120e0bff 100644
--- a/src/bin/scripts/t/101_vacuumdb_all.pl
+++ b/src/bin/scripts/t/101_vacuumdb_all.pl
@@ -6,4 +6,7 @@ use Test::More tests => 1;
my $tempdir = tempdir;
start_test_server $tempdir;
-issues_sql_like(['vacuumdb', '-a'], qr/statement: VACUUM.*statement: VACUUM/s, 'vacuum all databases');
+issues_sql_like(
+ [ 'vacuumdb', '-a' ],
+ qr/statement: VACUUM.*statement: VACUUM/s,
+ 'vacuum all databases');
diff --git a/src/bin/scripts/t/102_vacuumdb_stages.pl b/src/bin/scripts/t/102_vacuumdb_stages.pl
index 743743edba..4b032d3aba 100644
--- a/src/bin/scripts/t/102_vacuumdb_stages.pl
+++ b/src/bin/scripts/t/102_vacuumdb_stages.pl
@@ -6,11 +6,12 @@ use Test::More tests => 1;
my $tempdir = tempdir;
start_test_server $tempdir;
-issues_sql_like(['vacuumdb', '--analyze-in-stages', 'postgres'],
- qr/.*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0;
+issues_sql_like(
+ [ 'vacuumdb', '--analyze-in-stages', 'postgres' ],
+qr/.*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0;
.*statement:\ ANALYZE.*
.*statement:\ SET\ default_statistics_target=10;\ RESET\ vacuum_cost_delay;
.*statement:\ ANALYZE.*
.*statement:\ RESET\ default_statistics_target;
.*statement:\ ANALYZE/sx,
- 'analyze three times');
+ 'analyze three times');
diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c
index 1e288ef978..0cfe5b0bc9 100644
--- a/src/bin/scripts/vacuumdb.c
+++ b/src/bin/scripts/vacuumdb.c
@@ -16,7 +16,7 @@
static void vacuum_one_database(const char *dbname, bool full, bool verbose,
- bool and_analyze, bool analyze_only, bool analyze_in_stages, bool freeze,
+ bool and_analyze, bool analyze_only, bool analyze_in_stages, bool freeze,
const char *table, const char *host, const char *port,
const char *username, enum trivalue prompt_password,
const char *progname, bool echo);
@@ -254,7 +254,7 @@ run_vacuum_command(PGconn *conn, const char *sql, bool echo, const char *dbname,
static void
vacuum_one_database(const char *dbname, bool full, bool verbose, bool and_analyze,
- bool analyze_only, bool analyze_in_stages, bool freeze, const char *table,
+ bool analyze_only, bool analyze_in_stages, bool freeze, const char *table,
const char *host, const char *port,
const char *username, enum trivalue prompt_password,
const char *progname, bool echo)
@@ -334,7 +334,7 @@ vacuum_one_database(const char *dbname, bool full, bool verbose, bool and_analyz
gettext_noop("Generating medium optimizer statistics (10 targets)"),
gettext_noop("Generating default (full) optimizer statistics")
};
- int i;
+ int i;
for (i = 0; i < 3; i++)
{
@@ -353,7 +353,7 @@ vacuum_one_database(const char *dbname, bool full, bool verbose, bool and_analyz
static void
vacuum_all_databases(bool full, bool verbose, bool and_analyze, bool analyze_only,
- bool analyze_in_stages, bool freeze, const char *maintenance_db,
+ bool analyze_in_stages, bool freeze, const char *maintenance_db,
const char *host, const char *port,
const char *username, enum trivalue prompt_password,
const char *progname, bool echo, bool quiet)
@@ -406,7 +406,7 @@ help(const char *progname)
printf(_(" -z, --analyze update optimizer statistics\n"));
printf(_(" -Z, --analyze-only only update optimizer statistics\n"));
printf(_(" --analyze-in-stages only update optimizer statistics, in multiple\n"
- " stages for faster results\n"));
+ " stages for faster results\n"));
printf(_(" -?, --help show this help, then exit\n"));
printf(_("\nConnection options:\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
diff --git a/src/common/psprintf.c b/src/common/psprintf.c
index b01e195360..e1320a6db0 100644
--- a/src/common/psprintf.c
+++ b/src/common/psprintf.c
@@ -34,7 +34,7 @@
* psprintf
*
* Format text data under the control of fmt (an sprintf-style format string)
- * and return it in an allocated-on-demand buffer. The buffer is allocated
+ * and return it in an allocated-on-demand buffer. The buffer is allocated
* with palloc in the backend, or malloc in frontend builds. Caller is
* responsible to free the buffer when no longer needed, if appropriate.
*
@@ -54,7 +54,7 @@ psprintf(const char *fmt,...)
size_t newlen;
/*
- * Allocate result buffer. Note that in frontend this maps to malloc
+ * Allocate result buffer. Note that in frontend this maps to malloc
* with exit-on-error.
*/
result = (char *) palloc(len);
@@ -152,13 +152,14 @@ pvsnprintf(char *buf, size_t len, const char *fmt, va_list args)
{
/*
* This appears to be a C99-compliant vsnprintf, so believe its
- * estimate of the required space. (If it's wrong, the logic will
+ * estimate of the required space. (If it's wrong, the logic will
* still work, but we may loop multiple times.) Note that the space
* needed should be only nprinted+1 bytes, but we'd better allocate
* one more than that so that the test above will succeed next time.
*
* In the corner case where the required space just barely overflows,
- * fall through so that we'll error out below (possibly after looping).
+ * fall through so that we'll error out below (possibly after
+ * looping).
*/
if ((size_t) nprinted <= MaxAllocSize - 2)
return nprinted + 2;
diff --git a/src/common/relpath.c b/src/common/relpath.c
index fc8b2732ea..5b9ee213f3 100644
--- a/src/common/relpath.c
+++ b/src/common/relpath.c
@@ -68,7 +68,7 @@ forkname_to_number(const char *forkName)
* forkname_chars
* We use this to figure out whether a filename could be a relation
* fork (as opposed to an oddly named stray file that somehow ended
- * up in the database directory). If the passed string begins with
+ * up in the database directory). If the passed string begins with
* a fork name (other than the main fork name), we return its length,
* and set *fork (if not NULL) to the fork number. If not, we return 0.
*
diff --git a/src/include/access/attnum.h b/src/include/access/attnum.h
index 50320d27f3..ae7be34e81 100644
--- a/src/include/access/attnum.h
+++ b/src/include/access/attnum.h
@@ -16,7 +16,7 @@
/*
- * user defined attribute numbers start at 1. -ay 2/95
+ * user defined attribute numbers start at 1. -ay 2/95
*/
typedef int16 AttrNumber;
diff --git a/src/include/access/genam.h b/src/include/access/genam.h
index a51f4c45fc..d99158fb39 100644
--- a/src/include/access/genam.h
+++ b/src/include/access/genam.h
@@ -91,7 +91,7 @@ typedef struct SysScanDescData *SysScanDesc;
* blocking to see if a conflicting transaction commits.
*
* For deferrable unique constraints, UNIQUE_CHECK_PARTIAL is specified at
- * insertion time. The index AM should test if the tuple is unique, but
+ * insertion time. The index AM should test if the tuple is unique, but
* should not throw error, block, or prevent the insertion if the tuple
* appears not to be unique. We'll recheck later when it is time for the
* constraint to be enforced. The AM must return true if the tuple is
@@ -100,7 +100,7 @@ typedef struct SysScanDescData *SysScanDesc;
*
* When it is time to recheck the deferred constraint, a pseudo-insertion
* call is made with UNIQUE_CHECK_EXISTING. The tuple is already in the
- * index in this case, so it should not be inserted again. Rather, just
+ * index in this case, so it should not be inserted again. Rather, just
* check for conflicting live tuples (possibly blocking).
*/
typedef enum IndexUniqueCheck
diff --git a/src/include/access/gin.h b/src/include/access/gin.h
index f1894274fb..4cda0ecbab 100644
--- a/src/include/access/gin.h
+++ b/src/include/access/gin.h
@@ -55,10 +55,10 @@ typedef struct GinStatsData
*/
typedef char GinTernaryValue;
-#define GIN_FALSE 0 /* item is not present / does not match */
-#define GIN_TRUE 1 /* item is present / matches */
-#define GIN_MAYBE 2 /* don't know if item is present / don't know if
- * matches */
+#define GIN_FALSE 0 /* item is not present / does not match */
+#define GIN_TRUE 1 /* item is present / matches */
+#define GIN_MAYBE 2 /* don't know if item is present / don't know
+ * if matches */
#define DatumGetGinTernaryValue(X) ((GinTernaryValue)(X))
#define GinTernaryValueGetDatum(X) ((Datum)(X))
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index 3aa4276c0b..3baa9f5e1a 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -32,8 +32,9 @@
typedef struct GinPageOpaqueData
{
BlockNumber rightlink; /* next page if any */
- OffsetNumber maxoff; /* number of PostingItems on GIN_DATA & ~GIN_LEAF page.
- * On GIN_LIST page, number of heap tuples. */
+ OffsetNumber maxoff; /* number of PostingItems on GIN_DATA &
+ * ~GIN_LEAF page. On GIN_LIST page, number of
+ * heap tuples. */
uint16 flags; /* see bit definitions below */
} GinPageOpaqueData;
@@ -45,7 +46,8 @@ typedef GinPageOpaqueData *GinPageOpaque;
#define GIN_META (1 << 3)
#define GIN_LIST (1 << 4)
#define GIN_LIST_FULLROW (1 << 5) /* makes sense only on GIN_LIST page */
-#define GIN_INCOMPLETE_SPLIT (1 << 6) /* page was split, but parent not updated */
+#define GIN_INCOMPLETE_SPLIT (1 << 6) /* page was split, but parent not
+ * updated */
#define GIN_COMPRESSED (1 << 7)
/* Page numbers of fixed-location pages */
@@ -119,8 +121,8 @@ typedef struct GinMetaPageData
#define GinPageSetList(page) ( GinPageGetOpaque(page)->flags |= GIN_LIST )
#define GinPageHasFullRow(page) ( GinPageGetOpaque(page)->flags & GIN_LIST_FULLROW )
#define GinPageSetFullRow(page) ( GinPageGetOpaque(page)->flags |= GIN_LIST_FULLROW )
-#define GinPageIsCompressed(page) ( GinPageGetOpaque(page)->flags & GIN_COMPRESSED )
-#define GinPageSetCompressed(page) ( GinPageGetOpaque(page)->flags |= GIN_COMPRESSED )
+#define GinPageIsCompressed(page) ( GinPageGetOpaque(page)->flags & GIN_COMPRESSED )
+#define GinPageSetCompressed(page) ( GinPageGetOpaque(page)->flags |= GIN_COMPRESSED )
#define GinPageIsDeleted(page) ( GinPageGetOpaque(page)->flags & GIN_DELETED)
#define GinPageSetDeleted(page) ( GinPageGetOpaque(page)->flags |= GIN_DELETED)
@@ -371,9 +373,9 @@ typedef struct GinState
*/
typedef struct
{
- ItemPointerData first; /* first item in this posting list (unpacked) */
- uint16 nbytes; /* number of bytes that follow */
- unsigned char bytes[1]; /* varbyte encoded items (variable length) */
+ ItemPointerData first; /* first item in this posting list (unpacked) */
+ uint16 nbytes; /* number of bytes that follow */
+ unsigned char bytes[1]; /* varbyte encoded items (variable length) */
} GinPostingList;
#define SizeOfGinPostingList(plist) (offsetof(GinPostingList, bytes) + SHORTALIGN((plist)->nbytes) )
@@ -404,14 +406,14 @@ typedef struct
{
RelFileNode node;
BlockNumber blkno;
- uint16 flags; /* GIN_SPLIT_ISLEAF and/or GIN_SPLIT_ISDATA */
+ uint16 flags; /* GIN_SPLIT_ISLEAF and/or GIN_SPLIT_ISDATA */
/*
* FOLLOWS:
*
* 1. if not leaf page, block numbers of the left and right child pages
- * whose split this insertion finishes. As BlockIdData[2] (beware of adding
- * fields before this that would make them not 16-bit aligned)
+ * whose split this insertion finishes. As BlockIdData[2] (beware of
+ * adding fields before this that would make them not 16-bit aligned)
*
* 2. an ginxlogInsertEntry or ginxlogRecompressDataLeaf struct, depending
* on tree type.
@@ -426,7 +428,7 @@ typedef struct
{
OffsetNumber offset;
bool isDelete;
- IndexTupleData tuple; /* variable length */
+ IndexTupleData tuple; /* variable length */
} ginxlogInsertEntry;
@@ -444,8 +446,8 @@ typedef struct
*/
typedef struct
{
- uint8 segno; /* segment this action applies to */
- char type; /* action type (see below) */
+ uint8 segno; /* segment this action applies to */
+ char type; /* action type (see below) */
/*
* Action-specific data follows. For INSERT and REPLACE actions that is a
@@ -453,14 +455,14 @@ typedef struct
* added, followed by the items themselves as ItemPointers. DELETE actions
* have no further data.
*/
-} ginxlogSegmentAction;
+} ginxlogSegmentAction;
/* Action types */
-#define GIN_SEGMENT_UNMODIFIED 0 /* no action (not used in WAL records) */
-#define GIN_SEGMENT_DELETE 1 /* a whole segment is removed */
-#define GIN_SEGMENT_INSERT 2 /* a whole segment is added */
-#define GIN_SEGMENT_REPLACE 3 /* a segment is replaced */
-#define GIN_SEGMENT_ADDITEMS 4 /* items are added to existing segment */
+#define GIN_SEGMENT_UNMODIFIED 0 /* no action (not used in WAL records) */
+#define GIN_SEGMENT_DELETE 1 /* a whole segment is removed */
+#define GIN_SEGMENT_INSERT 2 /* a whole segment is added */
+#define GIN_SEGMENT_REPLACE 3 /* a segment is replaced */
+#define GIN_SEGMENT_ADDITEMS 4 /* items are added to existing segment */
typedef struct
{
@@ -476,9 +478,10 @@ typedef struct ginxlogSplit
RelFileNode node;
BlockNumber lblkno;
BlockNumber rblkno;
- BlockNumber rrlink; /* right link, or root's blocknumber if root split */
- BlockNumber leftChildBlkno; /* valid on a non-leaf split */
- BlockNumber rightChildBlkno;
+ BlockNumber rrlink; /* right link, or root's blocknumber if root
+ * split */
+ BlockNumber leftChildBlkno; /* valid on a non-leaf split */
+ BlockNumber rightChildBlkno;
uint16 flags;
/* follows: one of the following structs */
@@ -726,7 +729,7 @@ extern ItemPointer ginReadTuple(GinState *ginstate, OffsetNumber attnum,
/* gindatapage.c */
extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerData advancePast);
-extern int GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
+extern int GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm);
extern BlockNumber createPostingTree(Relation index,
ItemPointerData *items, uint32 nitems,
GinStatsData *buildStats);
@@ -763,7 +766,7 @@ extern void ginVacuumPostingTreeLeaf(Relation rel, Buffer buf, GinVacuumState *g
*
* In each GinScanKeyData, nentries is the true number of entries, while
* nuserentries is the number that extractQueryFn returned (which is what
- * we report to consistentFn). The "user" entries must come first.
+ * we report to consistentFn). The "user" entries must come first.
*/
typedef struct GinScanKeyData *GinScanKey;
@@ -780,8 +783,8 @@ typedef struct GinScanKeyData
GinScanEntry *scanEntry;
/*
- * At least one of the entries in requiredEntries must be present for
- * a tuple to match the overall qual.
+ * At least one of the entries in requiredEntries must be present for a
+ * tuple to match the overall qual.
*
* additionalEntries contains entries that are needed by the consistent
* function to decide if an item matches, but are not sufficient to
@@ -946,8 +949,8 @@ extern void ginInsertCleanup(GinState *ginstate,
/* ginpostinglist.c */
extern GinPostingList *ginCompressPostingList(const ItemPointer ptrs, int nptrs,
- int maxsize, int *nwritten);
-extern int ginPostingListDecodeAllSegmentsToTbm(GinPostingList *ptr, int totalsize, TIDBitmap *tbm);
+ int maxsize, int *nwritten);
+extern int ginPostingListDecodeAllSegmentsToTbm(GinPostingList *ptr, int totalsize, TIDBitmap *tbm);
extern ItemPointer ginPostingListDecodeAllSegments(GinPostingList *ptr, int len, int *ndecoded);
extern ItemPointer ginPostingListDecode(GinPostingList *ptr, int *ndecoded);
@@ -965,8 +968,8 @@ extern ItemPointer ginMergeItemPointers(ItemPointerData *a, uint32 na,
static inline int
ginCompareItemPointers(ItemPointer a, ItemPointer b)
{
- uint64 ia = (uint64) a->ip_blkid.bi_hi << 32 | (uint64) a->ip_blkid.bi_lo << 16 | a->ip_posid;
- uint64 ib = (uint64) b->ip_blkid.bi_hi << 32 | (uint64) b->ip_blkid.bi_lo << 16 | b->ip_posid;
+ uint64 ia = (uint64) a->ip_blkid.bi_hi << 32 | (uint64) a->ip_blkid.bi_lo << 16 | a->ip_posid;
+ uint64 ib = (uint64) b->ip_blkid.bi_hi << 32 | (uint64) b->ip_blkid.bi_lo << 16 | b->ip_posid;
if (ia == ib)
return 0;
diff --git a/src/include/access/gist.h b/src/include/access/gist.h
index a32066a0bb..ef5aed4d3e 100644
--- a/src/include/access/gist.h
+++ b/src/include/access/gist.h
@@ -98,11 +98,11 @@ typedef GISTPageOpaqueData *GISTPageOpaque;
* the union keys for each side.
*
* If spl_ldatum_exists and spl_rdatum_exists are true, then we are performing
- * a "secondary split" using a non-first index column. In this case some
+ * a "secondary split" using a non-first index column. In this case some
* decisions have already been made about a page split, and the set of tuples
* being passed to PickSplit is just the tuples about which we are undecided.
* spl_ldatum/spl_rdatum then contain the union keys for the tuples already
- * chosen to go left or right. Ideally the PickSplit method should take those
+ * chosen to go left or right. Ideally the PickSplit method should take those
* keys into account while deciding what to do with the remaining tuples, ie
* it should try to "build out" from those unions so as to minimally expand
* them. If it does so, it should union the given tuples' keys into the
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 4ff47f2031..d89bcea39d 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -185,7 +185,7 @@ typedef HashMetaPageData *HashMetaPage;
#define ALL_SET ((uint32) ~0)
/*
- * Bitmap pages do not contain tuples. They do contain the standard
+ * Bitmap pages do not contain tuples. They do contain the standard
* page headers and trailers; however, everything in between is a
* giant bit array. The number of bits that fit on a page obviously
* depends on the page size and the header/trailer overhead. We require
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 0f802577c7..493839f60e 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -59,7 +59,7 @@ typedef enum LockTupleMode
* replacement is really a match.
* cmax is the outdating command's CID, but only when the failure code is
* HeapTupleSelfUpdated (i.e., something in the current transaction outdated
- * the tuple); otherwise cmax is zero. (We make this restriction because
+ * the tuple); otherwise cmax is zero. (We make this restriction because
* HeapTupleHeaderGetCmax doesn't work for tuples outdated in other
* transactions.)
*/
@@ -106,7 +106,7 @@ typedef struct HeapScanDescData *HeapScanDesc;
extern HeapScanDesc heap_beginscan(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key);
extern HeapScanDesc heap_beginscan_catalog(Relation relation, int nkeys,
- ScanKey key);
+ ScanKey key);
extern HeapScanDesc heap_beginscan_strat(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key,
bool allow_strat, bool allow_sync);
diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h
index 4d8cdf0f17..cfdd1ffbef 100644
--- a/src/include/access/heapam_xlog.h
+++ b/src/include/access/heapam_xlog.h
@@ -43,7 +43,7 @@
*/
#define XLOG_HEAP_INIT_PAGE 0x80
/*
- * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes
+ * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes
* are associated with RM_HEAP2_ID, but are not logically different from
* the ones above associated with RM_HEAP_ID. XLOG_HEAP_OPMASK applies to
* these, too.
@@ -71,7 +71,7 @@
#define XLOG_HEAP_SUFFIX_FROM_OLD (1<<6)
/* convenience macro for checking whether any form of old tuple was logged */
-#define XLOG_HEAP_CONTAINS_OLD \
+#define XLOG_HEAP_CONTAINS_OLD \
(XLOG_HEAP_CONTAINS_OLD_TUPLE | XLOG_HEAP_CONTAINS_OLD_KEY)
/*
@@ -126,11 +126,11 @@ typedef struct xl_heap_header
*/
typedef struct xl_heap_header_len
{
- uint16 t_len;
+ uint16 t_len;
xl_heap_header header;
} xl_heap_header_len;
-#define SizeOfHeapHeaderLen (offsetof(xl_heap_header_len, header) + SizeOfHeapHeader)
+#define SizeOfHeapHeaderLen (offsetof(xl_heap_header_len, header) + SizeOfHeapHeader)
/* This is what we need to know about insert */
typedef struct xl_heap_insert
@@ -179,7 +179,7 @@ typedef struct xl_heap_update
TransactionId old_xmax; /* xmax of the old tuple */
TransactionId new_xmax; /* xmax of the new tuple */
ItemPointerData newtid; /* new inserted tuple id */
- uint8 old_infobits_set; /* infomask bits to set on old tuple */
+ uint8 old_infobits_set; /* infomask bits to set on old tuple */
uint8 flags;
/*
@@ -335,18 +335,20 @@ typedef struct xl_heap_new_cid
* transactions
*/
TransactionId top_xid;
- CommandId cmin;
- CommandId cmax;
+ CommandId cmin;
+ CommandId cmax;
+
/*
- * don't really need the combocid since we have the actual values
- * right in this struct, but the padding makes it free and its
- * useful for debugging.
+ * don't really need the combocid since we have the actual values right in
+ * this struct, but the padding makes it free and its useful for
+ * debugging.
*/
- CommandId combocid;
+ CommandId combocid;
+
/*
* Store the relfilenode/ctid pair to facilitate lookups.
*/
- xl_heaptid target;
+ xl_heaptid target;
} xl_heap_new_cid;
#define SizeOfHeapNewCid (offsetof(xl_heap_new_cid, target) + SizeOfHeapTid)
@@ -354,12 +356,12 @@ typedef struct xl_heap_new_cid
/* logical rewrite xlog record header */
typedef struct xl_heap_rewrite_mapping
{
- TransactionId mapped_xid; /* xid that might need to see the row */
- Oid mapped_db; /* DbOid or InvalidOid for shared rels */
- Oid mapped_rel; /* Oid of the mapped relation */
- off_t offset; /* How far have we written so far */
- uint32 num_mappings; /* Number of in-memory mappings */
- XLogRecPtr start_lsn; /* Insert LSN at begin of rewrite */
+ TransactionId mapped_xid; /* xid that might need to see the row */
+ Oid mapped_db; /* DbOid or InvalidOid for shared rels */
+ Oid mapped_rel; /* Oid of the mapped relation */
+ off_t offset; /* How far have we written so far */
+ uint32 num_mappings; /* Number of in-memory mappings */
+ XLogRecPtr start_lsn; /* Insert LSN at begin of rewrite */
} xl_heap_rewrite_mapping;
extern void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
diff --git a/src/include/access/htup.h b/src/include/access/htup.h
index 178f6dc4a8..cb61081b67 100644
--- a/src/include/access/htup.h
+++ b/src/include/access/htup.h
@@ -45,12 +45,12 @@ typedef MinimalTupleData *MinimalTuple;
* This is the output format of heap_form_tuple and related routines.
*
* * Separately allocated tuple: t_data points to a palloc'd chunk that
- * is not adjacent to the HeapTupleData. (This case is deprecated since
+ * is not adjacent to the HeapTupleData. (This case is deprecated since
* it's difficult to tell apart from case #1. It should be used only in
* limited contexts where the code knows that case #1 will never apply.)
*
* * Separately allocated minimal tuple: t_data points MINIMAL_TUPLE_OFFSET
- * bytes before the start of a MinimalTuple. As with the previous case,
+ * bytes before the start of a MinimalTuple. As with the previous case,
* this can't be told apart from case #1 by inspection; code setting up
* or destroying this representation has to know what it's doing.
*
diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h
index 039d4b4cd9..294d21bd18 100644
--- a/src/include/access/htup_details.h
+++ b/src/include/access/htup_details.h
@@ -70,7 +70,7 @@
*
* We store five "virtual" fields Xmin, Cmin, Xmax, Cmax, and Xvac in three
* physical fields. Xmin and Xmax are always really stored, but Cmin, Cmax
- * and Xvac share a field. This works because we know that Cmin and Cmax
+ * and Xvac share a field. This works because we know that Cmin and Cmax
* are only interesting for the lifetime of the inserting and deleting
* transaction respectively. If a tuple is inserted and deleted in the same
* transaction, we store a "combo" command id that can be mapped to the real
@@ -82,7 +82,7 @@
* ie, an insert-in-progress or delete-in-progress tuple.)
*
* A word about t_ctid: whenever a new tuple is stored on disk, its t_ctid
- * is initialized with its own TID (location). If the tuple is ever updated,
+ * is initialized with its own TID (location). If the tuple is ever updated,
* its t_ctid is changed to point to the replacement version of the tuple.
* Thus, a tuple is the latest version of its row iff XMAX is invalid or
* t_ctid points to itself (in which case, if XMAX is valid, the tuple is
@@ -97,10 +97,10 @@
* check fails, one may assume that there is no live descendant version.
*
* Following the fixed header fields, the nulls bitmap is stored (beginning
- * at t_bits). The bitmap is *not* stored if t_infomask shows that there
+ * at t_bits). The bitmap is *not* stored if t_infomask shows that there
* are no nulls in the tuple. If an OID field is present (as indicated by
* t_infomask), then it is stored just before the user data, which begins at
- * the offset shown by t_hoff. Note that t_hoff must be a multiple of
+ * the offset shown by t_hoff. Note that t_hoff must be a multiple of
* MAXALIGN.
*/
@@ -242,7 +242,7 @@ struct HeapTupleHeaderData
/*
* HeapTupleHeader accessor macros
*
- * Note: beware of multiple evaluations of "tup" argument. But the Set
+ * Note: beware of multiple evaluations of "tup" argument. But the Set
* macros evaluate their other argument only once.
*/
@@ -528,7 +528,7 @@ do { \
* MinimalTuple is an alternative representation that is used for transient
* tuples inside the executor, in places where transaction status information
* is not required, the tuple rowtype is known, and shaving off a few bytes
- * is worthwhile because we need to store many tuples. The representation
+ * is worthwhile because we need to store many tuples. The representation
* is chosen so that tuple access routines can work with either full or
* minimal tuples via a HeapTupleData pointer structure. The access routines
* see no difference, except that they must not access the transaction status
@@ -552,7 +552,7 @@ do { \
* the MINIMAL_TUPLE_OFFSET distance. t_len does not include that, however.
*
* MINIMAL_TUPLE_DATA_OFFSET is the offset to the first useful (non-pad) data
- * other than the length word. tuplesort.c and tuplestore.c use this to avoid
+ * other than the length word. tuplesort.c and tuplestore.c use this to avoid
* writing the padding to disk.
*/
#define MINIMAL_TUPLE_OFFSET \
@@ -698,7 +698,7 @@ extern Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
* and set *isnull == true. Otherwise, we set *isnull == false.
*
* <tup> is the pointer to the heap tuple. <attnum> is the attribute
- * number of the column (field) caller wants. <tupleDesc> is a
+ * number of the column (field) caller wants. <tupleDesc> is a
* pointer to the structure describing the row and all its fields.
* ----------------
*/
diff --git a/src/include/access/itup.h b/src/include/access/itup.h
index 99bc097c52..de17936b10 100644
--- a/src/include/access/itup.h
+++ b/src/include/access/itup.h
@@ -22,7 +22,7 @@
/*
* Index tuple header structure
*
- * All index tuples start with IndexTupleData. If the HasNulls bit is set,
+ * All index tuples start with IndexTupleData. If the HasNulls bit is set,
* this is followed by an IndexAttributeBitMapData. The index attribute
* values follow, beginning at a MAXALIGN boundary.
*
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index 1a8b16d45e..f2817590c4 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -36,9 +36,9 @@ typedef uint16 BTCycleId;
* and status. If the page is deleted, we replace the level with the
* next-transaction-ID value indicating when it is safe to reclaim the page.
*
- * We also store a "vacuum cycle ID". When a page is split while VACUUM is
+ * We also store a "vacuum cycle ID". When a page is split while VACUUM is
* processing the index, a nonzero value associated with the VACUUM run is
- * stored into both halves of the split page. (If VACUUM is not running,
+ * stored into both halves of the split page. (If VACUUM is not running,
* both pages receive zero cycleids.) This allows VACUUM to detect whether
* a page was split since it started, with a small probability of false match
* if the page was last split some exact multiple of MAX_BT_CYCLE_ID VACUUMs
@@ -73,10 +73,10 @@ typedef BTPageOpaqueData *BTPageOpaque;
#define BTP_HALF_DEAD (1 << 4) /* empty, but still in tree */
#define BTP_SPLIT_END (1 << 5) /* rightmost page of split group */
#define BTP_HAS_GARBAGE (1 << 6) /* page has LP_DEAD tuples */
-#define BTP_INCOMPLETE_SPLIT (1 << 7) /* right sibling's downlink is missing */
+#define BTP_INCOMPLETE_SPLIT (1 << 7) /* right sibling's downlink is missing */
/*
- * The max allowed value of a cycle ID is a bit less than 64K. This is
+ * The max allowed value of a cycle ID is a bit less than 64K. This is
* for convenience of pg_filedump and similar utilities: we want to use
* the last 2 bytes of special space as an index type indicator, and
* restricting cycle ID lets btree use that space for vacuum cycle IDs
@@ -218,9 +218,9 @@ typedef struct BTMetaPageData
#define XLOG_BTREE_SPLIT_R_ROOT 0x60 /* as above, new item on right */
#define XLOG_BTREE_DELETE 0x70 /* delete leaf index tuples for a page */
#define XLOG_BTREE_UNLINK_PAGE 0x80 /* delete a half-dead page */
-#define XLOG_BTREE_UNLINK_PAGE_META 0x90 /* same, and update metapage */
+#define XLOG_BTREE_UNLINK_PAGE_META 0x90 /* same, and update metapage */
#define XLOG_BTREE_NEWROOT 0xA0 /* new root page */
-#define XLOG_BTREE_MARK_PAGE_HALFDEAD 0xB0 /* mark a leaf as half-dead */
+#define XLOG_BTREE_MARK_PAGE_HALFDEAD 0xB0 /* mark a leaf as half-dead */
#define XLOG_BTREE_VACUUM 0xC0 /* delete entries on a page during
* vacuum */
#define XLOG_BTREE_REUSE_PAGE 0xD0 /* old page is about to be reused from
@@ -273,9 +273,9 @@ typedef struct xl_btree_insert
* Note: the four XLOG_BTREE_SPLIT xl_info codes all use this data record.
* The _L and _R variants indicate whether the inserted tuple went into the
* left or right split page (and thus, whether newitemoff and the new item
- * are stored or not). The _ROOT variants indicate that we are splitting
+ * are stored or not). The _ROOT variants indicate that we are splitting
* the root page, and thus that a newroot record rather than an insert or
- * split record should follow. Note that a split record never carries a
+ * split record should follow. Note that a split record never carries a
* metapage update --- we'll do that in the parent-level update.
*/
typedef struct xl_btree_split
@@ -295,11 +295,11 @@ typedef struct xl_btree_split
*
* If level > 0, an IndexTuple representing the HIKEY of the left page
* follows. We don't need this on leaf pages, because it's the same as
- * the leftmost key in the new right page. Also, it's suppressed if
+ * the leftmost key in the new right page. Also, it's suppressed if
* XLogInsert chooses to store the left page's whole page image.
*
- * If level > 0, BlockNumber of the page whose incomplete-split flag
- * this insertion clears. (not aligned)
+ * If level > 0, BlockNumber of the page whose incomplete-split flag this
+ * insertion clears. (not aligned)
*
* Last are the right page's tuples in the form used by _bt_restore_page.
*/
@@ -387,7 +387,7 @@ typedef struct xl_btree_mark_page_halfdead
BlockNumber topparent; /* topmost internal page in the branch */
} xl_btree_mark_page_halfdead;
-#define SizeOfBtreeMarkPageHalfDead (offsetof(xl_btree_mark_page_halfdead, topparent) + sizeof(BlockNumber))
+#define SizeOfBtreeMarkPageHalfDead (offsetof(xl_btree_mark_page_halfdead, topparent) + sizeof(BlockNumber))
/*
* This is what we need to know about deletion of a btree page. Note we do
@@ -396,19 +396,19 @@ typedef struct xl_btree_mark_page_halfdead
*/
typedef struct xl_btree_unlink_page
{
- RelFileNode node;
+ RelFileNode node;
BlockNumber deadblk; /* target block being deleted */
BlockNumber leftsib; /* target block's left sibling, if any */
BlockNumber rightsib; /* target block's right sibling */
/*
- * Information needed to recreate the leaf page, when target is an internal
- * page.
+ * Information needed to recreate the leaf page, when target is an
+ * internal page.
*/
- BlockNumber leafblk;
- BlockNumber leafleftsib;
- BlockNumber leafrightsib;
- BlockNumber topparent; /* next child down in the branch */
+ BlockNumber leafblk;
+ BlockNumber leafleftsib;
+ BlockNumber leafrightsib;
+ BlockNumber topparent; /* next child down in the branch */
TransactionId btpo_xact; /* value of btpo.xact for use in recovery */
/* xl_btree_metadata FOLLOWS IF XLOG_BTREE_UNLINK_PAGE_META */
@@ -446,12 +446,12 @@ typedef struct xl_btree_newroot
/*
* When a new operator class is declared, we require that the user
* supply us with an amproc procedure (BTORDER_PROC) for determining
- * whether, for two keys a and b, a < b, a = b, or a > b. This routine
+ * whether, for two keys a and b, a < b, a = b, or a > b. This routine
* must return < 0, 0, > 0, respectively, in these three cases. (It must
* not return INT_MIN, since we may negate the result before using it.)
*
* To facilitate accelerated sorting, an operator class may choose to
- * offer a second procedure (BTSORTSUPPORT_PROC). For full details, see
+ * offer a second procedure (BTSORTSUPPORT_PROC). For full details, see
* src/include/utils/sortsupport.h.
*/
@@ -490,7 +490,7 @@ typedef BTStackData *BTStack;
* BTScanOpaqueData is the btree-private state needed for an indexscan.
* This consists of preprocessed scan keys (see _bt_preprocess_keys() for
* details of the preprocessing), information about the current location
- * of the scan, and information about the marked location, if any. (We use
+ * of the scan, and information about the marked location, if any. (We use
* BTScanPosData to represent the data needed for each of current and marked
* locations.) In addition we can remember some known-killed index entries
* that must be marked before we can move off the current page.
@@ -498,9 +498,9 @@ typedef BTStackData *BTStack;
* Index scans work a page at a time: we pin and read-lock the page, identify
* all the matching items on the page and save them in BTScanPosData, then
* release the read-lock while returning the items to the caller for
- * processing. This approach minimizes lock/unlock traffic. Note that we
+ * processing. This approach minimizes lock/unlock traffic. Note that we
* keep the pin on the index page until the caller is done with all the items
- * (this is needed for VACUUM synchronization, see nbtree/README). When we
+ * (this is needed for VACUUM synchronization, see nbtree/README). When we
* are ready to step to the next page, if the caller has told us any of the
* items were killed, we re-lock the page to mark them killed, then unlock.
* Finally we drop the pin and step to the next page in the appropriate
@@ -612,7 +612,7 @@ typedef BTScanOpaqueData *BTScanOpaque;
/*
* We use some private sk_flags bits in preprocessed scan keys. We're allowed
- * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
+ * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
* index's indoption[] array entry for the index attribute.
*/
#define SK_BT_REQFWD 0x00010000 /* required to continue forward scan */
diff --git a/src/include/access/reloptions.h b/src/include/access/reloptions.h
index a54d56395c..81ff3286cf 100644
--- a/src/include/access/reloptions.h
+++ b/src/include/access/reloptions.h
@@ -197,7 +197,7 @@ typedef struct
* "base" is a pointer to the reloptions structure, and "offset" is an integer
* variable that must be initialized to sizeof(reloptions structure). This
* struct must have been allocated with enough space to hold any string option
- * present, including terminating \0 for every option. SET_VARSIZE() must be
+ * present, including terminating \0 for every option. SET_VARSIZE() must be
* called on the struct with this offset as the second argument, after all the
* string options have been processed.
*/
diff --git a/src/include/access/rewriteheap.h b/src/include/access/rewriteheap.h
index 07df3b4f2b..4051c83852 100644
--- a/src/include/access/rewriteheap.h
+++ b/src/include/access/rewriteheap.h
@@ -34,10 +34,10 @@ extern bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple oldTuple);
*/
typedef struct LogicalRewriteMappingData
{
- RelFileNode old_node;
- RelFileNode new_node;
- ItemPointerData old_tid;
- ItemPointerData new_tid;
+ RelFileNode old_node;
+ RelFileNode new_node;
+ ItemPointerData old_tid;
+ ItemPointerData new_tid;
} LogicalRewriteMappingData;
/* ---
@@ -52,6 +52,6 @@ typedef struct LogicalRewriteMappingData
* ---
*/
#define LOGICAL_REWRITE_FORMAT "map-%x-%x-%X_%X-%x-%x"
-void CheckPointLogicalRewriteHeap(void);
+void CheckPointLogicalRewriteHeap(void);
#endif /* REWRITE_HEAP_H */
diff --git a/src/include/access/rmgr.h b/src/include/access/rmgr.h
index 51110b9cfc..1b577a2050 100644
--- a/src/include/access/rmgr.h
+++ b/src/include/access/rmgr.h
@@ -26,7 +26,7 @@ typedef enum RmgrIds
{
#include "access/rmgrlist.h"
RM_NEXT_ID
-} RmgrIds;
+} RmgrIds;
#undef PG_RMGR
diff --git a/src/include/access/rmgrlist.h b/src/include/access/rmgrlist.h
index 6449eeaf90..662fb77b42 100644
--- a/src/include/access/rmgrlist.h
+++ b/src/include/access/rmgrlist.h
@@ -2,7 +2,7 @@
* rmgrlist.h
*
* The resource manager list is kept in its own source file for possible
- * use by automatic tools. The exact representation of a rmgr is determined
+ * use by automatic tools. The exact representation of a rmgr is determined
* by the PG_RMGR macro, which is not defined in this file; it can be
* defined by the caller for special purposes.
*
diff --git a/src/include/access/skey.h b/src/include/access/skey.h
index 41e467ba93..bb96808719 100644
--- a/src/include/access/skey.h
+++ b/src/include/access/skey.h
@@ -42,7 +42,7 @@ typedef uint16 StrategyNumber;
/*
* A ScanKey represents the application of a comparison operator between
- * a table or index column and a constant. When it's part of an array of
+ * a table or index column and a constant. When it's part of an array of
* ScanKeys, the comparison conditions are implicitly ANDed. The index
* column is the left argument of the operator, if it's a binary operator.
* (The data structure can support unary indexable operators too; in that
@@ -115,7 +115,7 @@ typedef ScanKeyData *ScanKey;
* must be sorted according to the leading column number.
*
* The subsidiary ScanKey array appears in logical column order of the row
- * comparison, which may be different from index column order. The array
+ * comparison, which may be different from index column order. The array
* elements are like a normal ScanKey array except that:
* sk_flags must include SK_ROW_MEMBER, plus SK_ROW_END in the last
* element (needed since row header does not include a count)
diff --git a/src/include/access/slru.h b/src/include/access/slru.h
index c7b4186ffa..8eb22a4973 100644
--- a/src/include/access/slru.h
+++ b/src/include/access/slru.h
@@ -31,7 +31,7 @@
* segment and page numbers in SimpleLruTruncate (see PagePrecedes()).
*
* Note: slru.c currently assumes that segment file names will be four hex
- * digits. This sets a lower bound on the segment size (64K transactions
+ * digits. This sets a lower bound on the segment size (64K transactions
* for 32-bit TransactionIds).
*/
#define SLRU_PAGES_PER_SEGMENT 32
@@ -55,7 +55,7 @@ typedef enum
*/
typedef struct SlruSharedData
{
- LWLock *ControlLock;
+ LWLock *ControlLock;
/* Number of buffers managed by this SLRU structure */
int num_slots;
@@ -69,7 +69,7 @@ typedef struct SlruSharedData
bool *page_dirty;
int *page_number;
int *page_lru_count;
- LWLock **buffer_locks;
+ LWLock **buffer_locks;
/*
* Optional array of WAL flush LSNs associated with entries in the SLRU
diff --git a/src/include/access/spgist_private.h b/src/include/access/spgist_private.h
index 8e10ab28a4..7ad7f344f3 100644
--- a/src/include/access/spgist_private.h
+++ b/src/include/access/spgist_private.h
@@ -185,7 +185,7 @@ typedef struct SpGistCache
/*
- * SPGiST tuple types. Note: inner, leaf, and dead tuple structs
+ * SPGiST tuple types. Note: inner, leaf, and dead tuple structs
* must have the same tupstate field in the same position! Real inner and
* leaf tuples always have tupstate = LIVE; if the state is something else,
* use the SpGistDeadTuple struct to inspect the tuple.
diff --git a/src/include/access/transam.h b/src/include/access/transam.h
index a9774e9f59..32d1b290e0 100644
--- a/src/include/access/transam.h
+++ b/src/include/access/transam.h
@@ -78,7 +78,7 @@
* using the OID generator. (We start the generator at 10000.)
*
* OIDs beginning at 16384 are assigned from the OID generator
- * during normal multiuser operation. (We force the generator up to
+ * during normal multiuser operation. (We force the generator up to
* 16384 as soon as we are in normal operation.)
*
* The choices of 10000 and 16384 are completely arbitrary, and can be moved
diff --git a/src/include/access/tupdesc.h b/src/include/access/tupdesc.h
index 710f1d1f14..083f4bdc40 100644
--- a/src/include/access/tupdesc.h
+++ b/src/include/access/tupdesc.h
@@ -55,7 +55,7 @@ typedef struct tupleConstr
* TupleDesc; with the exception that tdhasoid indicates if OID is present.
*
* If the tupdesc is known to correspond to a named rowtype (such as a table's
- * rowtype) then tdtypeid identifies that type and tdtypmod is -1. Otherwise
+ * rowtype) then tdtypeid identifies that type and tdtypmod is -1. Otherwise
* tdtypeid is RECORDOID, and tdtypmod can be either -1 for a fully anonymous
* row type, or a value >= 0 to allow the rowtype to be looked up in the
* typcache.c type cache.
diff --git a/src/include/access/tupmacs.h b/src/include/access/tupmacs.h
index 76fd49c88e..ff198fe75e 100644
--- a/src/include/access/tupmacs.h
+++ b/src/include/access/tupmacs.h
@@ -92,7 +92,7 @@
/*
* att_align_datum aligns the given offset as needed for a datum of alignment
- * requirement attalign and typlen attlen. attdatum is the Datum variable
+ * requirement attalign and typlen attlen. attdatum is the Datum variable
* we intend to pack into a tuple (it's only accessed if we are dealing with
* a varlena type). Note that this assumes the Datum will be stored as-is;
* callers that are intending to convert non-short varlena datums to short
@@ -111,7 +111,7 @@
* pointer; when accessing a varlena field we have to "peek" to see if we
* are looking at a pad byte or the first byte of a 1-byte-header datum.
* (A zero byte must be either a pad byte, or the first byte of a correctly
- * aligned 4-byte length word; in either case we can align safely. A non-zero
+ * aligned 4-byte length word; in either case we can align safely. A non-zero
* byte must be either a 1-byte length word, or the first byte of a correctly
* aligned 4-byte length word; in either case we need not align.)
*
diff --git a/src/include/access/tuptoaster.h b/src/include/access/tuptoaster.h
index e038e1a3ea..d8edd9e987 100644
--- a/src/include/access/tuptoaster.h
+++ b/src/include/access/tuptoaster.h
@@ -60,7 +60,7 @@
* The code will also consider moving MAIN data out-of-line, but only as a
* last resort if the previous steps haven't reached the target tuple size.
* In this phase we use a different target size, currently equal to the
- * largest tuple that will fit on a heap page. This is reasonable since
+ * largest tuple that will fit on a heap page. This is reasonable since
* the user has told us to keep the data in-line if at all possible.
*/
#define TOAST_TUPLES_PER_PAGE_MAIN 1
@@ -76,7 +76,7 @@
/*
* When we store an oversize datum externally, we divide it into chunks
- * containing at most TOAST_MAX_CHUNK_SIZE data bytes. This number *must*
+ * containing at most TOAST_MAX_CHUNK_SIZE data bytes. This number *must*
* be small enough that the completed toast-table tuple (including the
* ID and sequence fields and all overhead) will fit on a page.
* The coding here sets the size on the theory that we want to fit
@@ -223,6 +223,6 @@ extern Size toast_datum_size(Datum value);
* Return OID of valid index associated to a toast relation
* ----------
*/
-extern Oid toast_get_valid_index(Oid toastoid, LOCKMODE lock);
+extern Oid toast_get_valid_index(Oid toastoid, LOCKMODE lock);
#endif /* TUPTOASTER_H */
diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h
index 56cfe63d8c..1eaa5c1c21 100644
--- a/src/include/access/xlog.h
+++ b/src/include/access/xlog.h
@@ -31,11 +31,11 @@
* where there can be zero to four backup blocks (as signaled by xl_info flag
* bits). XLogRecord structs always start on MAXALIGN boundaries in the WAL
* files, and we round up SizeOfXLogRecord so that the rmgr data is also
- * guaranteed to begin on a MAXALIGN boundary. However, no padding is added
+ * guaranteed to begin on a MAXALIGN boundary. However, no padding is added
* to align BkpBlock structs or backup block data.
*
* NOTE: xl_len counts only the rmgr data, not the XLogRecord header,
- * and also not any backup blocks. xl_tot_len counts everything. Neither
+ * and also not any backup blocks. xl_tot_len counts everything. Neither
* length field is rounded up to an alignment boundary.
*/
typedef struct XLogRecord
@@ -100,7 +100,7 @@ extern int sync_method;
* value (ignoring InvalidBuffer) appearing in the rdata chain.
*
* When buffer is valid, caller must set buffer_std to indicate whether the
- * page uses standard pd_lower/pd_upper header fields. If this is true, then
+ * page uses standard pd_lower/pd_upper header fields. If this is true, then
* XLOG is allowed to omit the free space between pd_lower and pd_upper from
* the backed-up page image. Note that even when buffer_std is false, the
* page MUST have an LSN field as its first eight bytes!
diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h
index 708ce22a5d..f55dbacc8b 100644
--- a/src/include/access/xlog_internal.h
+++ b/src/include/access/xlog_internal.h
@@ -123,7 +123,7 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader;
* Compute ID and segment from an XLogRecPtr.
*
* For XLByteToSeg, do the computation at face value. For XLByteToPrevSeg,
- * a boundary byte is taken to be in the previous segment. This is suitable
+ * a boundary byte is taken to be in the previous segment. This is suitable
* for deciding which segment to write given a pointer to a record end,
* for example.
*/
@@ -261,7 +261,7 @@ extern XLogRecPtr RequestXLogSwitch(void);
extern void GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli);
/*
- * Exported for the functions in timeline.c and xlogarchive.c. Only valid
+ * Exported for the functions in timeline.c and xlogarchive.c. Only valid
* in the startup process.
*/
extern bool ArchiveRecoveryRequested;
diff --git a/src/include/access/xlogdefs.h b/src/include/access/xlogdefs.h
index 77587e97c2..3b8e738eac 100644
--- a/src/include/access/xlogdefs.h
+++ b/src/include/access/xlogdefs.h
@@ -49,7 +49,7 @@ typedef uint32 TimeLineID;
* read those buffers except during crash recovery or if wal_level != minimal,
* it is a win to use it in all cases where we sync on each write(). We could
* allow O_DIRECT with fsync(), but it is unclear if fsync() could process
- * writes not buffered in the kernel. Also, O_DIRECT is never enough to force
+ * writes not buffered in the kernel. Also, O_DIRECT is never enough to force
* data to the drives, it merely tries to bypass the kernel cache, so we still
* need O_SYNC/O_DSYNC.
*/
@@ -62,7 +62,7 @@ typedef uint32 TimeLineID;
/*
* This chunk of hackery attempts to determine which file sync methods
* are available on the current platform, and to choose an appropriate
- * default method. We assume that fsync() is always available, and that
+ * default method. We assume that fsync() is always available, and that
* configure determined whether fdatasync() is.
*/
#if defined(O_SYNC)
diff --git a/src/include/c.h b/src/include/c.h
index 30b8f51cb8..df22d50d4e 100644
--- a/src/include/c.h
+++ b/src/include/c.h
@@ -37,7 +37,7 @@
* 9) system-specific hacks
*
* NOTE: since this file is included by both frontend and backend modules, it's
- * almost certainly wrong to put an "extern" declaration here. typedefs and
+ * almost certainly wrong to put an "extern" declaration here. typedefs and
* macros are the kind of thing that might go here.
*
*----------------------------------------------------------------
@@ -117,7 +117,7 @@
/*
* Use this to mark string constants as needing translation at some later
- * time, rather than immediately. This is useful for cases where you need
+ * time, rather than immediately. This is useful for cases where you need
* access to the original string and translated string, and for cases where
* immediate translation is not possible, like when initializing global
* variables.
@@ -376,7 +376,7 @@ typedef struct
* Variable-length datatypes all share the 'struct varlena' header.
*
* NOTE: for TOASTable types, this is an oversimplification, since the value
- * may be compressed or moved out-of-line. However datatype-specific routines
+ * may be compressed or moved out-of-line. However datatype-specific routines
* are mostly content to deal with de-TOASTed values only, and of course
* client-side routines should never see a TOASTed value. But even in a
* de-TOASTed value, beware of touching vl_len_ directly, as its representation
@@ -406,7 +406,7 @@ typedef struct varlena VarChar; /* var-length char, ie SQL varchar(n) */
/*
* Specialized array types. These are physically laid out just the same
* as regular arrays (so that the regular array subscripting code works
- * with them). They exist as distinct types mostly for historical reasons:
+ * with them). They exist as distinct types mostly for historical reasons:
* they have nonstandard I/O behavior which we don't want to change for fear
* of breaking applications that look at the system catalogs. There is also
* an implementation issue for oidvector: it's part of the primary key for
@@ -449,7 +449,7 @@ typedef NameData *Name;
/*
* Support macros for escaping strings. escape_backslash should be TRUE
- * if generating a non-standard-conforming string. Prefixing a string
+ * if generating a non-standard-conforming string. Prefixing a string
* with ESCAPE_STRING_SYNTAX guarantees it is non-standard-conforming.
* Beware of multiple evaluation of the "ch" argument!
*/
@@ -580,7 +580,7 @@ typedef NameData *Name;
#define AssertArg(condition)
#define AssertState(condition)
#define Trap(condition, errorType)
-#define TrapMacro(condition, errorType) (true)
+#define TrapMacro(condition, errorType) (true)
#elif defined(FRONTEND)
@@ -635,7 +635,7 @@ typedef NameData *Name;
* throw a compile error using the "errmessage" (a string literal).
*
* gcc 4.6 and up supports _Static_assert(), but there are bizarre syntactic
- * placement restrictions. These macros make it safe to use as a statement
+ * placement restrictions. These macros make it safe to use as a statement
* or in an expression, respectively.
*
* Otherwise we fall back on a kluge that assumes the compiler will complain
@@ -717,7 +717,7 @@ typedef NameData *Name;
* datum) and add a null, do not do it with StrNCpy(..., len+1). That
* might seem to work, but it fetches one byte more than there is in the
* text object. One fine day you'll have a SIGSEGV because there isn't
- * another byte before the end of memory. Don't laugh, we've had real
+ * another byte before the end of memory. Don't laugh, we've had real
* live bug reports from real live users over exactly this mistake.
* Do it honestly with "memcpy(dst,src,len); dst[len] = '\0';", instead.
*/
@@ -743,7 +743,7 @@ typedef NameData *Name;
* Exactly the same as standard library function memset(), but considerably
* faster for zeroing small word-aligned structures (such as parsetree nodes).
* This has to be a macro because the main point is to avoid function-call
- * overhead. However, we have also found that the loop is faster than
+ * overhead. However, we have also found that the loop is faster than
* native libc memset() on some platforms, even those with assembler
* memset() functions. More research needs to be done, perhaps with
* MEMSET_LOOP_LIMIT tests in configure.
@@ -847,7 +847,7 @@ typedef NameData *Name;
*
* The function bodies must be defined in the module header prefixed by
* STATIC_IF_INLINE, protected by a cpp symbol that the module's .c file must
- * define. If the compiler doesn't support inline functions, the function
+ * define. If the compiler doesn't support inline functions, the function
* definitions are pulled in by the .c file as regular (not inline) symbols.
*
* The header must also declare the functions' prototypes, protected by
@@ -917,7 +917,7 @@ typedef NameData *Name;
* Section 9: system-specific hacks
*
* This should be limited to things that absolutely have to be
- * included in every source file. The port-specific header file
+ * included in every source file. The port-specific header file
* is usually a better place for this sort of thing.
* ----------------------------------------------------------------
*/
@@ -926,7 +926,7 @@ typedef NameData *Name;
* NOTE: this is also used for opening text files.
* WIN32 treats Control-Z as EOF in files opened in text mode.
* Therefore, we open files in binary mode on Win32 so we can read
- * literal control-Z. The other affect is that we see CRLF, but
+ * literal control-Z. The other affect is that we see CRLF, but
* that is OK because we can already handle those cleanly.
*/
#if defined(WIN32) || defined(__CYGWIN__)
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index 27bfa08eea..2eb78128be 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -4,7 +4,7 @@
* "Catalog version number" for PostgreSQL.
*
* The catalog version number is used to flag incompatible changes in
- * the PostgreSQL system catalogs. Whenever anyone changes the format of
+ * the PostgreSQL system catalogs. Whenever anyone changes the format of
* a system catalog relation, or adds, deletes, or modifies standard
* catalog entries in such a way that an updated backend wouldn't work
* with an old database (or vice versa), the catalog version number
diff --git a/src/include/catalog/dependency.h b/src/include/catalog/dependency.h
index 8948589f74..8ed259283a 100644
--- a/src/include/catalog/dependency.h
+++ b/src/include/catalog/dependency.h
@@ -58,7 +58,7 @@
* DEPENDENCY_PIN ('p'): there is no dependent object; this type of entry
* is a signal that the system itself depends on the referenced object,
* and so that object must never be deleted. Entries of this type are
- * created only during initdb. The fields for the dependent object
+ * created only during initdb. The fields for the dependent object
* contain zeroes.
*
* Other dependency flavors may be needed in future.
diff --git a/src/include/catalog/duplicate_oids b/src/include/catalog/duplicate_oids
index f3d1136a35..7342d618ed 100755
--- a/src/include/catalog/duplicate_oids
+++ b/src/include/catalog/duplicate_oids
@@ -10,23 +10,23 @@ BEGIN
my %oidcounts;
-while(<>)
+while (<>)
{
next if /^CATALOG\(.*BKI_BOOTSTRAP/;
- next unless
- /^DATA\(insert *OID *= *(\d+)/ ||
- /^CATALOG\([^,]*, *(\d+).*BKI_ROWTYPE_OID\((\d+)\)/ ||
- /^CATALOG\([^,]*, *(\d+)/ ||
- /^DECLARE_INDEX\([^,]*, *(\d+)/ ||
- /^DECLARE_UNIQUE_INDEX\([^,]*, *(\d+)/ ||
- /^DECLARE_TOAST\([^,]*, *(\d+), *(\d+)/;
+ next
+ unless /^DATA\(insert *OID *= *(\d+)/
+ || /^CATALOG\([^,]*, *(\d+).*BKI_ROWTYPE_OID\((\d+)\)/
+ || /^CATALOG\([^,]*, *(\d+)/
+ || /^DECLARE_INDEX\([^,]*, *(\d+)/
+ || /^DECLARE_UNIQUE_INDEX\([^,]*, *(\d+)/
+ || /^DECLARE_TOAST\([^,]*, *(\d+), *(\d+)/;
$oidcounts{$1}++;
$oidcounts{$2}++ if $2;
}
my $found = 0;
-foreach my $oid (sort {$a <=> $b} keys %oidcounts)
+foreach my $oid (sort { $a <=> $b } keys %oidcounts)
{
next unless $oidcounts{$oid} > 1;
$found = 1;
diff --git a/src/include/catalog/genbki.h b/src/include/catalog/genbki.h
index 4713b23d1d..cb40c07063 100644
--- a/src/include/catalog/genbki.h
+++ b/src/include/catalog/genbki.h
@@ -27,7 +27,7 @@
*
* Variable-length catalog fields (except possibly the first not nullable one)
* should not be visible in C structures, so they are made invisible by #ifdefs
- * of an undefined symbol. See also MARKNOTNULL in bootstrap.c for how this is
+ * of an undefined symbol. See also MARKNOTNULL in bootstrap.c for how this is
* handled.
*/
#undef CATALOG_VARLEN
diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h
index 2f9d391d28..77ce041cef 100644
--- a/src/include/catalog/namespace.h
+++ b/src/include/catalog/namespace.h
@@ -20,7 +20,7 @@
/*
* This structure holds a list of possible functions or operators
- * found by namespace lookup. Each function/operator is identified
+ * found by namespace lookup. Each function/operator is identified
* by OID and by argument types; the list must be pruned by type
* resolution rules that are embodied in the parser, not here.
* See FuncnameGetCandidates's comments for more info.
diff --git a/src/include/catalog/objectaccess.h b/src/include/catalog/objectaccess.h
index ac8260b63c..4fdd0567ca 100644
--- a/src/include/catalog/objectaccess.h
+++ b/src/include/catalog/objectaccess.h
@@ -12,7 +12,7 @@
/*
* Object access hooks are intended to be called just before or just after
- * performing certain actions on a SQL object. This is intended as
+ * performing certain actions on a SQL object. This is intended as
* infrastructure for security or logging pluggins.
*
* OAT_POST_CREATE should be invoked just after the object is created.
diff --git a/src/include/catalog/pg_aggregate.h b/src/include/catalog/pg_aggregate.h
index 0f9c11f64e..e69c0a210d 100644
--- a/src/include/catalog/pg_aggregate.h
+++ b/src/include/catalog/pg_aggregate.h
@@ -104,7 +104,7 @@ typedef FormData_pg_aggregate *Form_pg_aggregate;
#define Anum_pg_aggregate_aggminitval 17
/*
- * Symbolic values for aggkind column. We distinguish normal aggregates
+ * Symbolic values for aggkind column. We distinguish normal aggregates
* from ordered-set aggregates (which have two sets of arguments, namely
* direct and aggregated arguments) and from hypothetical-set aggregates
* (which are a subclass of ordered-set aggregates in which the last
diff --git a/src/include/catalog/pg_attrdef.h b/src/include/catalog/pg_attrdef.h
index f98cfacd88..b8ceefd7cd 100644
--- a/src/include/catalog/pg_attrdef.h
+++ b/src/include/catalog/pg_attrdef.h
@@ -22,7 +22,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_attrdef definition. cpp turns this into
+ * pg_attrdef definition. cpp turns this into
* typedef struct FormData_pg_attrdef
* ----------------
*/
diff --git a/src/include/catalog/pg_attribute.h b/src/include/catalog/pg_attribute.h
index 1113971de5..cdde814307 100644
--- a/src/include/catalog/pg_attribute.h
+++ b/src/include/catalog/pg_attribute.h
@@ -161,7 +161,7 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BK
/*
* ATTRIBUTE_FIXED_PART_SIZE is the size of the fixed-layout,
- * guaranteed-not-null part of a pg_attribute row. This is in fact as much
+ * guaranteed-not-null part of a pg_attribute row. This is in fact as much
* of the row as gets copied into tuple descriptors, so don't expect you
* can access fields beyond attcollation except in a real tuple!
*/
diff --git a/src/include/catalog/pg_authid.h b/src/include/catalog/pg_authid.h
index 939a3862b6..e7c32c9fa6 100644
--- a/src/include/catalog/pg_authid.h
+++ b/src/include/catalog/pg_authid.h
@@ -26,7 +26,7 @@
/*
* The CATALOG definition has to refer to the type of rolvaliduntil as
* "timestamptz" (lower case) so that bootstrap mode recognizes it. But
- * the C header files define this type as TimestampTz. Since the field is
+ * the C header files define this type as TimestampTz. Since the field is
* potentially-null and therefore can't be accessed directly from C code,
* there is no particular need for the C struct definition to show the
* field type as TimestampTz --- instead we just make it int.
diff --git a/src/include/catalog/pg_constraint.h b/src/include/catalog/pg_constraint.h
index 1f01637b2e..014b85bd20 100644
--- a/src/include/catalog/pg_constraint.h
+++ b/src/include/catalog/pg_constraint.h
@@ -38,7 +38,7 @@ CATALOG(pg_constraint,2606)
* relations. This is partly for backwards compatibility with past
* Postgres practice, and partly because we don't want to have to obtain a
* global lock to generate a globally unique name for a nameless
- * constraint. We associate a namespace with constraint names only for
+ * constraint. We associate a namespace with constraint names only for
* SQL-spec compatibility.
*/
NameData conname; /* name of this constraint */
@@ -57,7 +57,7 @@ CATALOG(pg_constraint,2606)
/*
* contypid links to the pg_type row for a domain if this is a domain
- * constraint. Otherwise it's 0.
+ * constraint. Otherwise it's 0.
*
* For SQL-style global ASSERTIONs, both conrelid and contypid would be
* zero. This is not presently supported, however.
@@ -76,7 +76,7 @@ CATALOG(pg_constraint,2606)
/*
* These fields, plus confkey, are only meaningful for a foreign-key
- * constraint. Otherwise confrelid is 0 and the char fields are spaces.
+ * constraint. Otherwise confrelid is 0 and the char fields are spaces.
*/
Oid confrelid; /* relation referenced by foreign key */
char confupdtype; /* foreign key's ON UPDATE action */
diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h
index 9d9c3b0bb0..05c5b748cb 100644
--- a/src/include/catalog/pg_control.h
+++ b/src/include/catalog/pg_control.h
@@ -102,9 +102,9 @@ typedef struct ControlFileData
uint64 system_identifier;
/*
- * Version identifier information. Keep these fields at the same offset,
+ * Version identifier information. Keep these fields at the same offset,
* especially pg_control_version; they won't be real useful if they move
- * around. (For historical reasons they must be 8 bytes into the file
+ * around. (For historical reasons they must be 8 bytes into the file
* rather than immediately at the front.)
*
* pg_control_version identifies the format of pg_control itself.
diff --git a/src/include/catalog/pg_db_role_setting.h b/src/include/catalog/pg_db_role_setting.h
index 9a58f525e5..054e87a0dd 100644
--- a/src/include/catalog/pg_db_role_setting.h
+++ b/src/include/catalog/pg_db_role_setting.h
@@ -26,7 +26,7 @@
#include "utils/snapshot.h"
/* ----------------
- * pg_db_role_setting definition. cpp turns this into
+ * pg_db_role_setting definition. cpp turns this into
* typedef struct FormData_pg_db_role_setting
* ----------------
*/
diff --git a/src/include/catalog/pg_default_acl.h b/src/include/catalog/pg_default_acl.h
index 93509d0ccf..749e2e431d 100644
--- a/src/include/catalog/pg_default_acl.h
+++ b/src/include/catalog/pg_default_acl.h
@@ -21,7 +21,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_default_acl definition. cpp turns this into
+ * pg_default_acl definition. cpp turns this into
* typedef struct FormData_pg_default_acl
* ----------------
*/
@@ -63,7 +63,7 @@ typedef FormData_pg_default_acl *Form_pg_default_acl;
/*
* Types of objects for which the user is allowed to specify default
- * permissions through pg_default_acl. These codes are used in the
+ * permissions through pg_default_acl. These codes are used in the
* defaclobjtype column.
*/
#define DEFACLOBJ_RELATION 'r' /* table, view */
diff --git a/src/include/catalog/pg_description.h b/src/include/catalog/pg_description.h
index d93f8900aa..b5f23b808f 100644
--- a/src/include/catalog/pg_description.h
+++ b/src/include/catalog/pg_description.h
@@ -6,12 +6,12 @@
* NOTE: an object is identified by the OID of the row that primarily
* defines the object, plus the OID of the table that that row appears in.
* For example, a function is identified by the OID of its pg_proc row
- * plus the pg_class OID of table pg_proc. This allows unique identification
+ * plus the pg_class OID of table pg_proc. This allows unique identification
* of objects without assuming that OIDs are unique across tables.
*
* Since attributes don't have OIDs of their own, we identify an attribute
* comment by the objoid+classoid of its parent table, plus an "objsubid"
- * giving the attribute column number. "objsubid" must be zero in a comment
+ * giving the attribute column number. "objsubid" must be zero in a comment
* for a table itself, so that it is distinct from any column comment.
* Currently, objsubid is unused and zero for all other kinds of objects,
* but perhaps it might be useful someday to associate comments with
@@ -39,7 +39,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_description definition. cpp turns this into
+ * pg_description definition. cpp turns this into
* typedef struct FormData_pg_description
* ----------------
*/
diff --git a/src/include/catalog/pg_event_trigger.h b/src/include/catalog/pg_event_trigger.h
index 61e8bb41fd..1284b88cec 100644
--- a/src/include/catalog/pg_event_trigger.h
+++ b/src/include/catalog/pg_event_trigger.h
@@ -22,7 +22,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_event_trigger definition. cpp turns this into
+ * pg_event_trigger definition. cpp turns this into
* typedef struct FormData_pg_event_trigger
* ----------------
*/
diff --git a/src/include/catalog/pg_index.h b/src/include/catalog/pg_index.h
index c3876477e3..8b8be3bfbf 100644
--- a/src/include/catalog/pg_index.h
+++ b/src/include/catalog/pg_index.h
@@ -42,7 +42,7 @@ CATALOG(pg_index,2610) BKI_WITHOUT_OIDS BKI_SCHEMA_MACRO
bool indcheckxmin; /* must we wait for xmin to be old? */
bool indisready; /* is this index ready for inserts? */
bool indislive; /* is this index alive at all? */
- bool indisreplident; /* is this index the identity for replication? */
+ bool indisreplident; /* is this index the identity for replication? */
/* variable-length fields start here, but we allow direct access to indkey */
int2vector indkey; /* column numbers of indexed cols, or 0 */
diff --git a/src/include/catalog/pg_largeobject.h b/src/include/catalog/pg_largeobject.h
index 8a4778787f..b341f92eb7 100644
--- a/src/include/catalog/pg_largeobject.h
+++ b/src/include/catalog/pg_largeobject.h
@@ -22,7 +22,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_largeobject definition. cpp turns this into
+ * pg_largeobject definition. cpp turns this into
* typedef struct FormData_pg_largeobject
* ----------------
*/
diff --git a/src/include/catalog/pg_opclass.h b/src/include/catalog/pg_opclass.h
index 49b24108de..ecf70639c8 100644
--- a/src/include/catalog/pg_opclass.h
+++ b/src/include/catalog/pg_opclass.h
@@ -17,10 +17,10 @@
* don't support partial indexes on system catalogs.)
*
* Normally opckeytype = InvalidOid (zero), indicating that the data stored
- * in the index is the same as the data in the indexed column. If opckeytype
+ * in the index is the same as the data in the indexed column. If opckeytype
* is nonzero then it indicates that a conversion step is needed to produce
* the stored index data, which will be of type opckeytype (which might be
- * the same or different from the input datatype). Performing such a
+ * the same or different from the input datatype). Performing such a
* conversion is the responsibility of the index access method --- not all
* AMs support this.
*
@@ -42,7 +42,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_opclass definition. cpp turns this into
+ * pg_opclass definition. cpp turns this into
* typedef struct FormData_pg_opclass
* ----------------
*/
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index 98c183bd5e..e601ccd09c 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -1470,7 +1470,7 @@ DESCR("natural exponential (e^x)");
/*
* This form of obj_description is now deprecated, since it will fail if
- * OIDs are not unique across system catalogs. Use the other form instead.
+ * OIDs are not unique across system catalogs. Use the other form instead.
*/
DATA(insert OID = 1348 ( obj_description PGNSP PGUID 14 100 0 0 0 f f f f t f s 1 0 25 "26" _null_ _null_ _null_ _null_ "select description from pg_catalog.pg_description where objoid = $1 and objsubid = 0" _null_ _null_ _null_ ));
DESCR("deprecated, use two-argument form instead");
@@ -1983,7 +1983,7 @@ DATA(insert OID = 2232 ( pg_get_function_identity_arguments PGNSP PGUID 12 1
DESCR("identity argument list of a function");
DATA(insert OID = 2165 ( pg_get_function_result PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 25 "26" _null_ _null_ _null_ _null_ pg_get_function_result _null_ _null_ _null_ ));
DESCR("result type of a function");
-DATA(insert OID = 3808 ( pg_get_function_arg_default PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "26 23" _null_ _null_ _null_ _null_ pg_get_function_arg_default _null_ _null_ _null_ ));
+DATA(insert OID = 3808 ( pg_get_function_arg_default PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "26 23" _null_ _null_ _null_ _null_ pg_get_function_arg_default _null_ _null_ _null_ ));
DESCR("function argument default");
DATA(insert OID = 1686 ( pg_get_keywords PGNSP PGUID 12 10 400 0 0 f f f f t t s 0 0 2249 "" "{25,18,25}" "{o,o,o}" "{word,catcode,catdesc}" _null_ pg_get_keywords _null_ _null_ _null_ ));
@@ -2655,7 +2655,7 @@ DATA(insert OID = 2878 ( pg_stat_get_live_tuples PGNSP PGUID 12 1 0 0 0 f f f f
DESCR("statistics: number of live tuples");
DATA(insert OID = 2879 ( pg_stat_get_dead_tuples PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 20 "26" _null_ _null_ _null_ _null_ pg_stat_get_dead_tuples _null_ _null_ _null_ ));
DESCR("statistics: number of dead tuples");
-DATA(insert OID = 3177 ( pg_stat_get_mod_since_analyze PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 20 "26" _null_ _null_ _null_ _null_ pg_stat_get_mod_since_analyze _null_ _null_ _null_ ));
+DATA(insert OID = 3177 ( pg_stat_get_mod_since_analyze PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 20 "26" _null_ _null_ _null_ _null_ pg_stat_get_mod_since_analyze _null_ _null_ _null_ ));
DESCR("statistics: number of tuples changed since last analyze");
DATA(insert OID = 1934 ( pg_stat_get_blocks_fetched PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 20 "26" _null_ _null_ _null_ _null_ pg_stat_get_blocks_fetched _null_ _null_ _null_ ));
DESCR("statistics: number of blocks fetched");
@@ -4054,7 +4054,7 @@ DATA(insert OID = 2774 ( ginqueryarrayextract PGNSP PGUID 12 1 0 0 0 f f f f t
DESCR("GIN array support");
DATA(insert OID = 2744 ( ginarrayconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 8 0 16 "2281 21 2277 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ ginarrayconsistent _null_ _null_ _null_ ));
DESCR("GIN array support");
-DATA(insert OID = 3920 ( ginarraytriconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 7 0 16 "2281 21 2277 23 2281 2281 2281" _null_ _null_ _null_ _null_ ginarraytriconsistent _null_ _null_ _null_ ));
+DATA(insert OID = 3920 ( ginarraytriconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 7 0 16 "2281 21 2277 23 2281 2281 2281" _null_ _null_ _null_ _null_ ginarraytriconsistent _null_ _null_ _null_ ));
DESCR("GIN array support");
DATA(insert OID = 3076 ( ginarrayextract PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2281 "2277 2281" _null_ _null_ _null_ _null_ ginarrayextract_2args _null_ _null_ _null_ ));
DESCR("GIN array support (obsolete)");
@@ -4213,13 +4213,13 @@ DATA(insert OID = 3198 ( json_build_array PGNSP PGUID 12 1 0 2276 0 f f f f
DESCR("build a json array from any inputs");
DATA(insert OID = 3199 ( json_build_array PGNSP PGUID 12 1 0 0 0 f f f f f f i 0 0 114 "" _null_ _null_ _null_ _null_ json_build_array_noargs _null_ _null_ _null_ ));
DESCR("build an empty json array");
-DATA(insert OID = 3200 ( json_build_object PGNSP PGUID 12 1 0 2276 0 f f f f f f i 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ json_build_object _null_ _null_ _null_ ));
+DATA(insert OID = 3200 ( json_build_object PGNSP PGUID 12 1 0 2276 0 f f f f f f i 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ json_build_object _null_ _null_ _null_ ));
DESCR("build a json object from pairwise key/value inputs");
-DATA(insert OID = 3201 ( json_build_object PGNSP PGUID 12 1 0 0 0 f f f f f f i 0 0 114 "" _null_ _null_ _null_ _null_ json_build_object_noargs _null_ _null_ _null_ ));
+DATA(insert OID = 3201 ( json_build_object PGNSP PGUID 12 1 0 0 0 f f f f f f i 0 0 114 "" _null_ _null_ _null_ _null_ json_build_object_noargs _null_ _null_ _null_ ));
DESCR("build an empty json object");
-DATA(insert OID = 3202 ( json_object PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "1009" _null_ _null_ _null_ _null_ json_object _null_ _null_ _null_ ));
+DATA(insert OID = 3202 ( json_object PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "1009" _null_ _null_ _null_ _null_ json_object _null_ _null_ _null_ ));
DESCR("map text arrayof key value pais to json object");
-DATA(insert OID = 3203 ( json_object PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 114 "1009 1009" _null_ _null_ _null_ _null_ json_object_two_arg _null_ _null_ _null_ ));
+DATA(insert OID = 3203 ( json_object PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 114 "1009 1009" _null_ _null_ _null_ _null_ json_object_two_arg _null_ _null_ _null_ ));
DESCR("map text arrayof key value pais to json object");
DATA(insert OID = 3176 ( to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2283" _null_ _null_ _null_ _null_ to_json _null_ _null_ _null_ ));
DESCR("map input to json");
@@ -4254,7 +4254,7 @@ DATA(insert OID = 3204 ( json_to_record PGNSP PGUID 12 1 0 0 0 f f f f f f s
DESCR("get record fields from a json object");
DATA(insert OID = 3205 ( json_to_recordset PGNSP PGUID 12 1 100 0 0 f f f f f t s 2 0 2249 "114 16" _null_ _null_ _null_ _null_ json_to_recordset _null_ _null_ _null_ ));
DESCR("get set of records with fields from a json array of objects");
-DATA(insert OID = 3968 ( json_typeof PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "114" _null_ _null_ _null_ _null_ json_typeof _null_ _null_ _null_ ));
+DATA(insert OID = 3968 ( json_typeof PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "114" _null_ _null_ _null_ _null_ json_typeof _null_ _null_ _null_ ));
DESCR("get the type of a json value");
/* uuid */
@@ -4871,7 +4871,7 @@ DATA(insert OID = 3846 ( make_date PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 1082
DESCR("construct date");
DATA(insert OID = 3847 ( make_time PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 1083 "23 23 701" _null_ _null_ "{hour,min,sec}" _null_ make_time _null_ _null_ _null_ ));
DESCR("construct time");
-DATA(insert OID = 3461 ( make_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i 6 0 1114 "23 23 23 23 23 701" _null_ _null_ "{year,month,mday,hour,min,sec}" _null_ make_timestamp _null_ _null_ _null_ ));
+DATA(insert OID = 3461 ( make_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i 6 0 1114 "23 23 23 23 23 701" _null_ _null_ "{year,month,mday,hour,min,sec}" _null_ make_timestamp _null_ _null_ _null_ ));
DESCR("construct timestamp");
DATA(insert OID = 3462 ( make_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s 6 0 1184 "23 23 23 23 23 701" _null_ _null_ "{year,month,mday,hour,min,sec}" _null_ make_timestamptz _null_ _null_ _null_ ));
DESCR("construct timestamp with time zone");
@@ -5045,7 +5045,7 @@ DESCR("aggregate final function");
#define PROVOLATILE_VOLATILE 'v' /* can change even within a scan */
/*
- * Symbolic values for proargmodes column. Note that these must agree with
+ * Symbolic values for proargmodes column. Note that these must agree with
* the FunctionParameterMode enum in parsenodes.h; we declare them here to
* be accessible from either header.
*/
diff --git a/src/include/catalog/pg_rewrite.h b/src/include/catalog/pg_rewrite.h
index 895d16b25d..37629fef3b 100644
--- a/src/include/catalog/pg_rewrite.h
+++ b/src/include/catalog/pg_rewrite.h
@@ -25,7 +25,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_rewrite definition. cpp turns this into
+ * pg_rewrite definition. cpp turns this into
* typedef struct FormData_pg_rewrite
* ----------------
*/
diff --git a/src/include/catalog/pg_shdepend.h b/src/include/catalog/pg_shdepend.h
index f62ef1c4ee..346dc2e393 100644
--- a/src/include/catalog/pg_shdepend.h
+++ b/src/include/catalog/pg_shdepend.h
@@ -33,7 +33,7 @@ CATALOG(pg_shdepend,1214) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
/*
* Identification of the dependent (referencing) object.
*
- * These fields are all zeroes for a DEPENDENCY_PIN entry. Also, dbid can
+ * These fields are all zeroes for a DEPENDENCY_PIN entry. Also, dbid can
* be zero to denote a shared object.
*/
Oid dbid; /* OID of database containing object */
diff --git a/src/include/catalog/pg_shdescription.h b/src/include/catalog/pg_shdescription.h
index 214133ccb7..50a516ad1c 100644
--- a/src/include/catalog/pg_shdescription.h
+++ b/src/include/catalog/pg_shdescription.h
@@ -7,7 +7,7 @@
* NOTE: an object is identified by the OID of the row that primarily
* defines the object, plus the OID of the table that that row appears in.
* For example, a database is identified by the OID of its pg_database row
- * plus the pg_class OID of table pg_database. This allows unique
+ * plus the pg_class OID of table pg_database. This allows unique
* identification of objects without assuming that OIDs are unique
* across tables.
*
@@ -32,7 +32,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_shdescription definition. cpp turns this into
+ * pg_shdescription definition. cpp turns this into
* typedef struct FormData_pg_shdescription
* ----------------
*/
diff --git a/src/include/catalog/pg_statistic.h b/src/include/catalog/pg_statistic.h
index 0b02e668e4..e6c00f619f 100644
--- a/src/include/catalog/pg_statistic.h
+++ b/src/include/catalog/pg_statistic.h
@@ -39,7 +39,7 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS
float4 stanullfrac;
/*
- * stawidth is the average width in bytes of non-null entries. For
+ * stawidth is the average width in bytes of non-null entries. For
* fixed-width datatypes this is of course the same as the typlen, but for
* var-width types it is more useful. Note that this is the average width
* of the data as actually stored, post-TOASTing (eg, for a
@@ -59,7 +59,7 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS
* The special negative case allows us to cope with columns that are
* unique (stadistinct = -1) or nearly so (for example, a column in
* which values appear about twice on the average could be represented
- * by stadistinct = -0.5). Because the number-of-rows statistic in
+ * by stadistinct = -0.5). Because the number-of-rows statistic in
* pg_class may be updated more frequently than pg_statistic is, it's
* important to be able to describe such situations as a multiple of
* the number of rows, rather than a fixed number of distinct values.
@@ -71,8 +71,8 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS
/* ----------------
* To allow keeping statistics on different kinds of datatypes,
* we do not hard-wire any particular meaning for the remaining
- * statistical fields. Instead, we provide several "slots" in which
- * statistical data can be placed. Each slot includes:
+ * statistical fields. Instead, we provide several "slots" in which
+ * statistical data can be placed. Each slot includes:
* kind integer code identifying kind of data (see below)
* op OID of associated operator, if needed
* numbers float4 array (for statistical values)
@@ -105,7 +105,7 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS
/*
* Values in these arrays are values of the column's data type, or of some
- * related type such as an array element type. We presently have to cheat
+ * related type such as an array element type. We presently have to cheat
* quite a bit to allow polymorphic arrays of this kind, but perhaps
* someday it'll be a less bogus facility.
*/
@@ -168,8 +168,8 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* operators.
*
* Code reading the pg_statistic relation should not assume that a particular
- * data "kind" will appear in any particular slot. Instead, search the
- * stakind fields to see if the desired data is available. (The standard
+ * data "kind" will appear in any particular slot. Instead, search the
+ * stakind fields to see if the desired data is available. (The standard
* function get_attstatsslot() may be used for this.)
*/
@@ -196,7 +196,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* the K most common non-null values appearing in the column, and stanumbers
* contains their frequencies (fractions of total row count). The values
* shall be ordered in decreasing frequency. Note that since the arrays are
- * variable-size, K may be chosen by the statistics collector. Values should
+ * variable-size, K may be chosen by the statistics collector. Values should
* not appear in MCV unless they have been observed to occur more than once;
* a unique column will have no MCV slot.
*/
@@ -208,13 +208,13 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* more than one histogram could appear, if a datatype has more than one
* useful sort operator.) stavalues contains M (>=2) non-null values that
* divide the non-null column data values into M-1 bins of approximately equal
- * population. The first stavalues item is the MIN and the last is the MAX.
+ * population. The first stavalues item is the MIN and the last is the MAX.
* stanumbers is not used and should be NULL. IMPORTANT POINT: if an MCV
* slot is also provided, then the histogram describes the data distribution
* *after removing the values listed in MCV* (thus, it's a "compressed
* histogram" in the technical parlance). This allows a more accurate
* representation of the distribution of a column with some very-common
- * values. In a column with only a few distinct values, it's possible that
+ * values. In a column with only a few distinct values, it's possible that
* the MCV list describes the entire data population; in this case the
* histogram reduces to empty and should be omitted.
*/
@@ -225,7 +225,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* of table tuples and the ordering of data values of this column, as seen
* by the "<" operator identified by staop. (As with the histogram, more
* than one entry could theoretically appear.) stavalues is not used and
- * should be NULL. stanumbers contains a single entry, the correlation
+ * should be NULL. stanumbers contains a single entry, the correlation
* coefficient between the sequence of data values and the sequence of
* their actual tuple positions. The coefficient ranges from +1 to -1.
*/
@@ -234,7 +234,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
/*
* A "most common elements" slot is similar to a "most common values" slot,
* except that it stores the most common non-null *elements* of the column
- * values. This is useful when the column datatype is an array or some other
+ * values. This is useful when the column datatype is an array or some other
* type with identifiable elements (for instance, tsvector). staop contains
* the equality operator appropriate to the element type. stavalues contains
* the most common element values, and stanumbers their frequencies. Unlike
@@ -258,7 +258,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
/*
* A "distinct elements count histogram" slot describes the distribution of
* the number of distinct element values present in each row of an array-type
- * column. Only non-null rows are considered, and only non-null elements.
+ * column. Only non-null rows are considered, and only non-null elements.
* staop contains the equality operator appropriate to the element type.
* stavalues is not used and should be NULL. The last member of stanumbers is
* the average count of distinct element values over all non-null rows. The
@@ -286,7 +286,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* Unlike a regular scalar histogram, this is actually two histograms combined
* into a single array, with the lower bounds of each value forming a
* histogram of lower bounds, and the upper bounds a histogram of upper
- * bounds. Only non-NULL, non-empty ranges are included.
+ * bounds. Only non-NULL, non-empty ranges are included.
*/
#define STATISTIC_KIND_BOUNDS_HISTOGRAM 7
diff --git a/src/include/catalog/pg_trigger.h b/src/include/catalog/pg_trigger.h
index c9752c3655..600a2f76c6 100644
--- a/src/include/catalog/pg_trigger.h
+++ b/src/include/catalog/pg_trigger.h
@@ -22,7 +22,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_trigger definition. cpp turns this into
+ * pg_trigger definition. cpp turns this into
* typedef struct FormData_pg_trigger
*
* Note: when tgconstraint is nonzero, tgconstrrelid, tgconstrindid,
diff --git a/src/include/catalog/pg_ts_dict.h b/src/include/catalog/pg_ts_dict.h
index 2409983d28..0968aeeada 100644
--- a/src/include/catalog/pg_ts_dict.h
+++ b/src/include/catalog/pg_ts_dict.h
@@ -24,7 +24,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_ts_dict definition. cpp turns this into
+ * pg_ts_dict definition. cpp turns this into
* typedef struct FormData_pg_ts_dict
* ----------------
*/
diff --git a/src/include/catalog/pg_ts_template.h b/src/include/catalog/pg_ts_template.h
index a2d7fb8395..504075b99b 100644
--- a/src/include/catalog/pg_ts_template.h
+++ b/src/include/catalog/pg_ts_template.h
@@ -24,7 +24,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_ts_template definition. cpp turns this into
+ * pg_ts_template definition. cpp turns this into
* typedef struct FormData_pg_ts_template
* ----------------
*/
diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h
index dcdc740266..2798f62305 100644
--- a/src/include/catalog/pg_type.h
+++ b/src/include/catalog/pg_type.h
@@ -42,7 +42,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
/*
* For a fixed-size type, typlen is the number of bytes we use to
- * represent a value of this type, e.g. 4 for an int4. But for a
+ * represent a value of this type, e.g. 4 for an int4. But for a
* variable-length type, typlen is negative. We use -1 to indicate a
* "varlena" type (one that has a length word), -2 to indicate a
* null-terminated C string.
@@ -51,7 +51,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
/*
* typbyval determines whether internal Postgres routines pass a value of
- * this type by value or by reference. typbyval had better be FALSE if
+ * this type by value or by reference. typbyval had better be FALSE if
* the length is not 1, 2, or 4 (or 8 on 8-byte-Datum machines).
* Variable-length types are always passed by reference. Note that
* typbyval can be false even if the length would allow pass-by-value;
@@ -71,7 +71,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
/*
* typcategory and typispreferred help the parser distinguish preferred
* and non-preferred coercions. The category can be any single ASCII
- * character (but not \0). The categories used for built-in types are
+ * character (but not \0). The categories used for built-in types are
* identified by the TYPCATEGORY macros below.
*/
char typcategory; /* arbitrary type classification */
@@ -80,7 +80,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
/*
* If typisdefined is false, the entry is only a placeholder (forward
- * reference). We know the type name, but not yet anything else about it.
+ * reference). We know the type name, but not yet anything else about it.
*/
bool typisdefined;
@@ -141,7 +141,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
* 'd' = DOUBLE alignment (8 bytes on many machines, but by no means all).
*
* See include/access/tupmacs.h for the macros that compute these
- * alignment requirements. Note also that we allow the nominal alignment
+ * alignment requirements. Note also that we allow the nominal alignment
* to be violated when storing "packed" varlenas; the TOAST mechanism
* takes care of hiding that from most code.
*
@@ -176,7 +176,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
/*
* Domains use typbasetype to show the base (or domain) type that the
- * domain is based on. Zero if the type is not a domain.
+ * domain is based on. Zero if the type is not a domain.
*/
Oid typbasetype;
diff --git a/src/include/catalog/toasting.h b/src/include/catalog/toasting.h
index 0947760118..a4af551523 100644
--- a/src/include/catalog/toasting.h
+++ b/src/include/catalog/toasting.h
@@ -21,9 +21,9 @@
*/
extern void NewRelationCreateToastTable(Oid relOid, Datum reloptions);
extern void NewHeapCreateToastTable(Oid relOid, Datum reloptions,
- LOCKMODE lockmode);
+ LOCKMODE lockmode);
extern void AlterTableCreateToastTable(Oid relOid, Datum reloptions,
- LOCKMODE lockmode);
+ LOCKMODE lockmode);
extern void BootstrapToastTable(char *relName,
Oid toastOid, Oid toastIndexOid);
diff --git a/src/include/commands/comment.h b/src/include/commands/comment.h
index 1927d77a55..05fe0c6744 100644
--- a/src/include/commands/comment.h
+++ b/src/include/commands/comment.h
@@ -24,7 +24,7 @@
* related routines. CommentObject() implements the SQL "COMMENT ON"
* command. DeleteComments() deletes all comments for an object.
* CreateComments creates (or deletes, if comment is NULL) a comment
- * for a specific key. There are versions of these two methods for
+ * for a specific key. There are versions of these two methods for
* both normal and shared objects.
*------------------------------------------------------------------
*/
diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h
index 5c0518ce13..e55f45ab26 100644
--- a/src/include/commands/tablecmds.h
+++ b/src/include/commands/tablecmds.h
@@ -79,5 +79,5 @@ extern void RangeVarCallbackOwnsTable(const RangeVar *relation,
Oid relId, Oid oldRelId, void *arg);
extern void RangeVarCallbackOwnsRelation(const RangeVar *relation,
- Oid relId, Oid oldRelId, void *noCatalogs);
+ Oid relId, Oid oldRelId, void *noCatalogs);
#endif /* TABLECMDS_H */
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index 058dc5f667..d33552a34b 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -25,12 +25,12 @@
/*----------
* ANALYZE builds one of these structs for each attribute (column) that is
- * to be analyzed. The struct and subsidiary data are in anl_context,
+ * to be analyzed. The struct and subsidiary data are in anl_context,
* so they live until the end of the ANALYZE operation.
*
* The type-specific typanalyze function is passed a pointer to this struct
* and must return TRUE to continue analysis, FALSE to skip analysis of this
- * column. In the TRUE case it must set the compute_stats and minrows fields,
+ * column. In the TRUE case it must set the compute_stats and minrows fields,
* and can optionally set extra_data to pass additional info to compute_stats.
* minrows is its request for the minimum number of sample rows to be gathered
* (but note this request might not be honored, eg if there are fewer rows
@@ -73,7 +73,7 @@ typedef struct VacAttrStats
* type-specific typanalyze function.
*
* Note: do not assume that the data being analyzed has the same datatype
- * shown in attr, ie do not trust attr->atttypid, attlen, etc. This is
+ * shown in attr, ie do not trust attr->atttypid, attlen, etc. This is
* because some index opclasses store a different type than the underlying
* column/expression. Instead use attrtypid, attrtypmod, and attrtype for
* information about the datatype being fed to the typanalyze function.
diff --git a/src/include/common/fe_memutils.h b/src/include/common/fe_memutils.h
index 3da1891ef2..61c1b6fd2d 100644
--- a/src/include/common/fe_memutils.h
+++ b/src/include/common/fe_memutils.h
@@ -24,9 +24,11 @@ extern void *repalloc(void *pointer, Size size);
extern void pfree(void *pointer);
/* sprintf into a palloc'd buffer --- these are in psprintf.c */
-extern char *psprintf(const char *fmt,...)
+extern char *
+psprintf(const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
-extern size_t pvsnprintf(char *buf, size_t len, const char *fmt, va_list args)
+extern size_t
+pvsnprintf(char *buf, size_t len, const char *fmt, va_list args)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 0)));
#endif /* FE_MEMUTILS_H */
diff --git a/src/include/common/relpath.h b/src/include/common/relpath.h
index cdd9316f08..4010c720f8 100644
--- a/src/include/common/relpath.h
+++ b/src/include/common/relpath.h
@@ -54,7 +54,7 @@ extern char *GetRelationPath(Oid dbNode, Oid spcNode, Oid relNode,
int backendId, ForkNumber forkNumber);
/*
- * Wrapper macros for GetRelationPath. Beware of multiple
+ * Wrapper macros for GetRelationPath. Beware of multiple
* evaluation of the RelFileNode or RelFileNodeBackend argument!
*/
diff --git a/src/include/datatype/timestamp.h b/src/include/datatype/timestamp.h
index 7d24b25376..a33821fa61 100644
--- a/src/include/datatype/timestamp.h
+++ b/src/include/datatype/timestamp.h
@@ -83,7 +83,7 @@ typedef struct
* DAYS_PER_MONTH is very imprecise. The more accurate value is
* 365.2425/12 = 30.436875, or '30 days 10:29:06'. Right now we only
* return an integral number of days, but someday perhaps we should
- * also return a 'time' value to be used as well. ISO 8601 suggests
+ * also return a 'time' value to be used as well. ISO 8601 suggests
* 30 days.
*/
#define DAYS_PER_MONTH 30 /* assumes exactly 30 days per month */
@@ -109,7 +109,7 @@ typedef struct
* We allow numeric timezone offsets up to 15:59:59 either way from Greenwich.
* Currently, the record holders for wackiest offsets in actual use are zones
* Asia/Manila, at -15:56:00 until 1844, and America/Metlakatla, at +15:13:42
- * until 1867. If we were to reject such values we would fail to dump and
+ * until 1867. If we were to reject such values we would fail to dump and
* restore old timestamptz values with these zone settings.
*/
#define MAX_TZDISP_HOUR 15 /* maximum allowed hour part */
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index eb78776a9d..5e4a15ca74 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -36,7 +36,7 @@
* REWIND indicates that the plan node should try to efficiently support
* rescans without parameter changes. (Nodes must support ExecReScan calls
* in any case, but if this flag was not given, they are at liberty to do it
- * through complete recalculation. Note that a parameter change forces a
+ * through complete recalculation. Note that a parameter change forces a
* full recalculation in any case.)
*
* BACKWARD indicates that the plan node must respect the es_direction flag.
@@ -51,7 +51,7 @@
* is responsible for there being a trigger context for them to be queued in.
*
* WITH/WITHOUT_OIDS tell the executor to emit tuples with or without space
- * for OIDs, respectively. These are currently used only for CREATE TABLE AS.
+ * for OIDs, respectively. These are currently used only for CREATE TABLE AS.
* If neither is set, the plan may or may not produce tuples including OIDs.
*/
#define EXEC_FLAG_EXPLAIN_ONLY 0x0001 /* EXPLAIN, no ANALYZE */
diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h
index 9d2e8ee8ea..3beae403ce 100644
--- a/src/include/executor/hashjoin.h
+++ b/src/include/executor/hashjoin.h
@@ -41,7 +41,7 @@
* If nbatch > 1 then tuples that don't belong in first batch get saved
* into inner-batch temp files. The same statements apply for the
* first scan of the outer relation, except we write tuples to outer-batch
- * temp files. After finishing the first scan, we do the following for
+ * temp files. After finishing the first scan, we do the following for
* each remaining batch:
* 1. Read tuples from inner batch file, load into hash buckets.
* 2. Read tuples from outer batch file, match to hash buckets and output.
@@ -132,7 +132,7 @@ typedef struct HashJoinTableData
/*
* These arrays are allocated for the life of the hash join, but only if
- * nbatch > 1. A file is opened only when we first write a tuple into it
+ * nbatch > 1. A file is opened only when we first write a tuple into it
* (otherwise its pointer remains NULL). Note that the zero'th array
* elements never get used, since we will process rather than dump out any
* tuples of batch zero.
diff --git a/src/include/executor/spi_priv.h b/src/include/executor/spi_priv.h
index 1577a2b768..05fb73978a 100644
--- a/src/include/executor/spi_priv.h
+++ b/src/include/executor/spi_priv.h
@@ -50,7 +50,7 @@ typedef struct
* adequate locks to prevent other backends from messing with the tables.
*
* For a saved plan, the plancxt is made a child of CacheMemoryContext
- * since it should persist until explicitly destroyed. Likewise, the
+ * since it should persist until explicitly destroyed. Likewise, the
* plancache entries will be under CacheMemoryContext since we tell
* plancache.c to save them. We rely on plancache.c to keep the cache
* entries up-to-date as needed in the face of invalidation events.
diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h
index aadbf6a2bd..faff25755e 100644
--- a/src/include/executor/tuptable.h
+++ b/src/include/executor/tuptable.h
@@ -34,7 +34,7 @@
*
* A "minimal" tuple is handled similarly to a palloc'd regular tuple.
* At present, minimal tuples never are stored in buffers, so there is no
- * parallel to case 1. Note that a minimal tuple has no "system columns".
+ * parallel to case 1. Note that a minimal tuple has no "system columns".
* (Actually, it could have an OID, but we have no need to access the OID.)
*
* A "virtual" tuple is an optimization used to minimize physical data
@@ -44,7 +44,7 @@
* a lower plan node's output TupleTableSlot, or to a function result
* constructed in a plan node's per-tuple econtext. It is the responsibility
* of the generating plan node to be sure these resources are not released
- * for as long as the virtual tuple needs to be valid. We only use virtual
+ * for as long as the virtual tuple needs to be valid. We only use virtual
* tuples in the result slots of plan nodes --- tuples to be copied anywhere
* else need to be "materialized" into physical tuples. Note also that a
* virtual tuple does not have any "system columns".
@@ -58,11 +58,11 @@
* payloads when this is the case.
*
* The Datum/isnull arrays of a TupleTableSlot serve double duty. When the
- * slot contains a virtual tuple, they are the authoritative data. When the
+ * slot contains a virtual tuple, they are the authoritative data. When the
* slot contains a physical tuple, the arrays contain data extracted from
* the tuple. (In this state, any pass-by-reference Datums point into
* the physical tuple.) The extracted information is built "lazily",
- * ie, only as needed. This serves to avoid repeated extraction of data
+ * ie, only as needed. This serves to avoid repeated extraction of data
* from the physical tuple.
*
* A TupleTableSlot can also be "empty", holding no valid data. This is
@@ -89,7 +89,7 @@
* buffer page.)
*
* tts_nvalid indicates the number of valid columns in the tts_values/isnull
- * arrays. When the slot is holding a "virtual" tuple this must be equal
+ * arrays. When the slot is holding a "virtual" tuple this must be equal
* to the descriptor's natts. When the slot is holding a physical tuple
* this is equal to the number of columns we have extracted (we always
* extract columns from left to right, so there are no holes).
@@ -103,7 +103,7 @@
* has only a minimal and not also a regular physical tuple, then tts_tuple
* points at tts_minhdr and the fields of that struct are set correctly
* for access to the minimal tuple; in particular, tts_minhdr.t_data points
- * MINIMAL_TUPLE_OFFSET bytes before tts_mintuple. This allows column
+ * MINIMAL_TUPLE_OFFSET bytes before tts_mintuple. This allows column
* extraction to treat the case identically to regular physical tuples.
*
* tts_slow/tts_off are saved state for slot_deform_tuple, and should not
diff --git a/src/include/fmgr.h b/src/include/fmgr.h
index edb97f6fbd..267403c410 100644
--- a/src/include/fmgr.h
+++ b/src/include/fmgr.h
@@ -104,7 +104,7 @@ extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo,
/*
* This macro initializes all the fields of a FunctionCallInfoData except
- * for the arg[] and argnull[] arrays. Performance testing has shown that
+ * for the arg[] and argnull[] arrays. Performance testing has shown that
* the fastest way to set up argnull[] for small numbers of arguments is to
* explicitly set each required element to false, so we don't try to zero
* out the argnull[] array in the macro.
@@ -121,7 +121,7 @@ extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo,
/*
* This macro invokes a function given a filled-in FunctionCallInfoData
- * struct. The macro result is the returned Datum --- but note that
+ * struct. The macro result is the returned Datum --- but note that
* caller must still check fcinfo->isnull! Also, if function is strict,
* it is caller's responsibility to verify that no null arguments are present
* before calling.
@@ -170,11 +170,11 @@ extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo,
* which are varlena types). pg_detoast_datum() gives you either the input
* datum (if not toasted) or a detoasted copy allocated with palloc().
* pg_detoast_datum_copy() always gives you a palloc'd copy --- use it
- * if you need a modifiable copy of the input. Caller is expected to have
+ * if you need a modifiable copy of the input. Caller is expected to have
* checked for null inputs first, if necessary.
*
* pg_detoast_datum_packed() will return packed (1-byte header) datums
- * unmodified. It will still expand an externally toasted or compressed datum.
+ * unmodified. It will still expand an externally toasted or compressed datum.
* The resulting datum can be accessed using VARSIZE_ANY() and VARDATA_ANY()
* (beware of multiple evaluations in those macros!)
*
@@ -205,7 +205,7 @@ extern struct varlena *pg_detoast_datum_packed(struct varlena * datum);
pg_detoast_datum_packed((struct varlena *) DatumGetPointer(datum))
/*
- * Support for cleaning up detoasted copies of inputs. This must only
+ * Support for cleaning up detoasted copies of inputs. This must only
* be used for pass-by-ref datatypes, and normally would only be used
* for toastable types. If the given pointer is different from the
* original argument, assume it's a palloc'd detoasted copy, and pfree it.
@@ -322,7 +322,7 @@ extern struct varlena *pg_detoast_datum_packed(struct varlena * datum);
* Dynamically loaded functions may use either the version-1 ("new style")
* or version-0 ("old style") calling convention. Version 1 is the call
* convention defined in this header file; version 0 is the old "plain C"
- * convention. A version-1 function must be accompanied by the macro call
+ * convention. A version-1 function must be accompanied by the macro call
*
* PG_FUNCTION_INFO_V1(function_name);
*
@@ -504,8 +504,8 @@ extern Datum FunctionCall9Coll(FmgrInfo *flinfo, Oid collation,
/* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. These are essentially FunctionLookup() followed
- * by FunctionCallN(). If the same function is to be invoked repeatedly,
+ * are allowed to be NULL. These are essentially FunctionLookup() followed
+ * by FunctionCallN(). If the same function is to be invoked repeatedly,
* do the FunctionLookup() once and then use FunctionCallN().
*/
extern Datum OidFunctionCall0Coll(Oid functionId, Oid collation);
@@ -663,7 +663,7 @@ extern fmExprContextPtr AggGetPerAggEContext(FunctionCallInfo fcinfo);
* We allow plugin modules to hook function entry/exit. This is intended
* as support for loadable security policy modules, which may want to
* perform additional privilege checks on function entry or exit, or to do
- * other internal bookkeeping. To make this possible, such modules must be
+ * other internal bookkeeping. To make this possible, such modules must be
* able not only to support normal function entry and exit, but also to trap
* the case where we bail out due to an error; and they must also be able to
* prevent inlining.
diff --git a/src/include/funcapi.h b/src/include/funcapi.h
index a3a12f7017..6590a088c9 100644
--- a/src/include/funcapi.h
+++ b/src/include/funcapi.h
@@ -129,7 +129,7 @@ typedef struct FuncCallContext
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result or the rowtype could not be
* determined. NB: the tupledesc should be copied if it is to be
diff --git a/src/include/lib/ilist.h b/src/include/lib/ilist.h
index 693e352896..f70474bd44 100644
--- a/src/include/lib/ilist.h
+++ b/src/include/lib/ilist.h
@@ -7,7 +7,7 @@
* lists that an object could be in. List links are embedded directly into
* the objects, and thus no extra memory management overhead is required.
* (Of course, if only a small proportion of existing objects are in a list,
- * the link fields in the remainder would be wasted space. But usually,
+ * the link fields in the remainder would be wasted space. But usually,
* it saves space to not have separately-allocated list nodes.)
*
* None of the functions here allocate any memory; they just manipulate
@@ -77,7 +77,7 @@
*
* While a simple iteration is useful, we sometimes also want to manipulate
* the list while iterating. There is a different iterator element and looping
- * construct for that. Suppose we want to delete tables that meet a certain
+ * construct for that. Suppose we want to delete tables that meet a certain
* criterion:
*
* dlist_mutable_iter miter;
@@ -213,7 +213,7 @@ typedef struct slist_head
*
* It's allowed to modify the list while iterating, with the exception of
* deleting the iterator's current node; deletion of that node requires
- * care if the iteration is to be continued afterward. (Doing so and also
+ * care if the iteration is to be continued afterward. (Doing so and also
* deleting or inserting adjacent list elements might misbehave; also, if
* the user frees the current node's storage, continuing the iteration is
* not safe.)
@@ -233,7 +233,7 @@ typedef struct slist_iter
* iteration use the 'cur' member.
*
* The only list modification allowed while iterating is to remove the current
- * node via slist_delete_current() (*not* slist_delete()). Insertion or
+ * node via slist_delete_current() (*not* slist_delete()). Insertion or
* deletion of nodes adjacent to the current node would misbehave.
*/
typedef struct slist_mutable_iter
@@ -271,7 +271,7 @@ extern void slist_check(slist_head *head);
/*
* We want the functions below to be inline; but if the compiler doesn't
- * support that, fall back on providing them as regular functions. See
+ * support that, fall back on providing them as regular functions. See
* STATIC_IF_INLINE in c.h.
*/
#ifndef PG_USE_INLINE
@@ -574,7 +574,7 @@ dlist_tail_node(dlist_head *head)
/*
* We want the functions below to be inline; but if the compiler doesn't
- * support that, fall back on providing them as regular functions. See
+ * support that, fall back on providing them as regular functions. See
* STATIC_IF_INLINE in c.h.
*/
#ifndef PG_USE_INLINE
@@ -740,7 +740,7 @@ slist_delete_current(slist_mutable_iter *iter)
*
* It's allowed to modify the list while iterating, with the exception of
* deleting the iterator's current node; deletion of that node requires
- * care if the iteration is to be continued afterward. (Doing so and also
+ * care if the iteration is to be continued afterward. (Doing so and also
* deleting or inserting adjacent list elements might misbehave; also, if
* the user frees the current node's storage, continuing the iteration is
* not safe.)
@@ -758,7 +758,7 @@ slist_delete_current(slist_mutable_iter *iter)
* Access the current element with iter.cur.
*
* The only list modification allowed while iterating is to remove the current
- * node via slist_delete_current() (*not* slist_delete()). Insertion or
+ * node via slist_delete_current() (*not* slist_delete()). Insertion or
* deletion of nodes adjacent to the current node would misbehave.
*/
#define slist_foreach_modify(iter, lhead) \
diff --git a/src/include/lib/stringinfo.h b/src/include/lib/stringinfo.h
index e36c495f21..4fff10a70c 100644
--- a/src/include/lib/stringinfo.h
+++ b/src/include/lib/stringinfo.h
@@ -60,7 +60,7 @@ typedef StringInfoData *StringInfo;
*
* NOTE: some routines build up a string using StringInfo, and then
* release the StringInfoData but return the data string itself to their
- * caller. At that point the data string looks like a plain palloc'd
+ * caller. At that point the data string looks like a plain palloc'd
* string.
*-------------------------
*/
@@ -100,7 +100,7 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
/*------------------------
* appendStringInfoVA
* Attempt to format text data under the control of fmt (an sprintf-style
- * format string) and append it to whatever is already in str. If successful
+ * format string) and append it to whatever is already in str. If successful
* return zero; if not (because there's not enough space), return an estimate
* of the space needed, without modifying str. Typically the caller should
* pass the return value to enlargeStringInfo() before trying again; see
diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h
index dbf3a20ed9..e78c565b1e 100644
--- a/src/include/libpq/libpq-be.h
+++ b/src/include/libpq/libpq-be.h
@@ -98,10 +98,10 @@ typedef struct
extern int ssl_renegotiation_limit;
/*
- * This is used by the postmaster in its communication with frontends. It
+ * This is used by the postmaster in its communication with frontends. It
* contains all state information needed during this communication before the
- * backend is run. The Port structure is kept in malloc'd memory and is
- * still available when a backend is running (see MyProcPort). The data
+ * backend is run. The Port structure is kept in malloc'd memory and is
+ * still available when a backend is running (see MyProcPort). The data
* it points to must also be malloc'd, or else palloc'd in TopMemoryContext,
* so that it survives into PostgresMain execution!
*
@@ -137,7 +137,7 @@ typedef struct Port
/*
* Information that needs to be saved from the startup packet and passed
- * into backend execution. "char *" fields are NULL if not set.
+ * into backend execution. "char *" fields are NULL if not set.
* guc_options points to a List of alternating option names and values.
*/
char *database_name;
diff --git a/src/include/libpq/pqcomm.h b/src/include/libpq/pqcomm.h
index 969fe5e105..d68a197c29 100644
--- a/src/include/libpq/pqcomm.h
+++ b/src/include/libpq/pqcomm.h
@@ -75,7 +75,7 @@ typedef struct
/*
* The maximum workable length of a socket path is what will fit into
- * struct sockaddr_un. This is usually only 100 or so bytes :-(.
+ * struct sockaddr_un. This is usually only 100 or so bytes :-(.
*
* For consistency, always pass a MAXPGPATH-sized buffer to UNIXSOCK_PATH(),
* then complain if the resulting string is >= UNIXSOCK_PATH_BUFLEN bytes.
@@ -153,7 +153,7 @@ extern bool Db_user_namespace;
/*
* In protocol 3.0 and later, the startup packet length is not fixed, but
- * we set an arbitrary limit on it anyway. This is just to prevent simple
+ * we set an arbitrary limit on it anyway. This is just to prevent simple
* denial-of-service attacks via sending enough data to run the server
* out of memory.
*/
diff --git a/src/include/mb/pg_wchar.h b/src/include/mb/pg_wchar.h
index 389f9e1480..09085f23ae 100644
--- a/src/include/mb/pg_wchar.h
+++ b/src/include/mb/pg_wchar.h
@@ -10,7 +10,7 @@
*
* NOTES
* This is used both by the backend and by libpq, but should not be
- * included by libpq client programs. In particular, a libpq client
+ * included by libpq client programs. In particular, a libpq client
* should not assume that the encoding IDs used by the version of libpq
* it's linked to match up with the IDs declared here.
*
@@ -45,13 +45,13 @@ typedef unsigned int pg_wchar;
* MULE Internal Encoding (MIC)
*
* This encoding follows the design used within XEmacs; it is meant to
- * subsume many externally-defined character sets. Each character includes
+ * subsume many externally-defined character sets. Each character includes
* identification of the character set it belongs to, so the encoding is
* general but somewhat bulky.
*
* Currently PostgreSQL supports 5 types of MULE character sets:
*
- * 1) 1-byte ASCII characters. Each byte is below 0x80.
+ * 1) 1-byte ASCII characters. Each byte is below 0x80.
*
* 2) "Official" single byte charsets such as ISO-8859-1 (Latin1).
* Each MULE character consists of 2 bytes: LC1 + C1, where LC1 is
@@ -65,7 +65,7 @@ typedef unsigned int pg_wchar;
* LCPRV1 is either 0x9a (if LC12 is in the range 0xa0 to 0xdf)
* or 0x9b (if LC12 is in the range 0xe0 to 0xef).
*
- * 4) "Official" multibyte charsets such as JIS X0208. Each MULE
+ * 4) "Official" multibyte charsets such as JIS X0208. Each MULE
* character consists of 3 bytes: LC2 + C1 + C2, where LC2 is
* an identifier for the charset (in the range 0x90 to 0x99) and C1
* and C2 form the character code (each in the range 0xa0 to 0xff).
@@ -304,7 +304,7 @@ typedef enum pg_enc
/*
* Table for mapping an encoding number to official encoding name and
- * possibly other subsidiary data. Be careful to check encoding number
+ * possibly other subsidiary data. Be careful to check encoding number
* before accessing a table entry!
*
* if (PG_VALID_ENCODING(encoding))
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index 0d61b82eb5..c2b786e666 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -39,7 +39,7 @@
* In both cases, we need to be able to clean up the current transaction
* gracefully, so we can't respond to the interrupt instantaneously ---
* there's no guarantee that internal data structures would be self-consistent
- * if the code is interrupted at an arbitrary instant. Instead, the signal
+ * if the code is interrupted at an arbitrary instant. Instead, the signal
* handlers set flags that are checked periodically during execution.
*
* The CHECK_FOR_INTERRUPTS() macro is called at strategically located spots
@@ -48,13 +48,13 @@
* might sometimes be called in contexts that do *not* want to allow a cancel
* or die interrupt. The HOLD_INTERRUPTS() and RESUME_INTERRUPTS() macros
* allow code to ensure that no cancel or die interrupt will be accepted,
- * even if CHECK_FOR_INTERRUPTS() gets called in a subroutine. The interrupt
+ * even if CHECK_FOR_INTERRUPTS() gets called in a subroutine. The interrupt
* will be held off until CHECK_FOR_INTERRUPTS() is done outside any
* HOLD_INTERRUPTS() ... RESUME_INTERRUPTS() section.
*
* Special mechanisms are used to let an interrupt be accepted when we are
* waiting for a lock or when we are waiting for command input (but, of
- * course, only if the interrupt holdoff counter is zero). See the
+ * course, only if the interrupt holdoff counter is zero). See the
* related code for details.
*
* A lost connection is handled similarly, although the loss of connection
@@ -65,7 +65,7 @@
* A related, but conceptually distinct, mechanism is the "critical section"
* mechanism. A critical section not only holds off cancel/die interrupts,
* but causes any ereport(ERROR) or ereport(FATAL) to become ereport(PANIC)
- * --- that is, a system-wide reset is forced. Needless to say, only really
+ * --- that is, a system-wide reset is forced. Needless to say, only really
* *critical* code should be marked as a critical section! Currently, this
* mechanism is only used for XLOG-related code.
*
@@ -266,7 +266,7 @@ extern int trace_recovery(int trace_level);
/*****************************************************************************
* pdir.h -- *
- * POSTGRES directory path definitions. *
+ * POSTGRES directory path definitions. *
*****************************************************************************/
/* flags to be OR'd to form sec_context */
@@ -304,7 +304,7 @@ extern bool superuser_arg(Oid roleid); /* given user is superuser */
/*****************************************************************************
* pmod.h -- *
- * POSTGRES processing mode definitions. *
+ * POSTGRES processing mode definitions. *
*****************************************************************************/
/*
@@ -319,7 +319,7 @@ extern bool superuser_arg(Oid roleid); /* given user is superuser */
* is used during the initial generation of template databases.
*
* Initialization mode: used while starting a backend, until all normal
- * initialization is complete. Some code behaves differently when executed
+ * initialization is complete. Some code behaves differently when executed
* in this mode to enable system bootstrapping.
*
* If a POSTGRES backend process is in normal mode, then all code may be
@@ -351,7 +351,7 @@ extern ProcessingMode Mode;
/*
- * Auxiliary-process type identifiers. These used to be in bootstrap.h
+ * Auxiliary-process type identifiers. These used to be in bootstrap.h
* but it seems saner to have them here, with the ProcessingMode stuff.
* The MyAuxProcType global is defined and set in bootstrap.c.
*/
@@ -382,7 +382,7 @@ extern AuxProcType MyAuxProcType;
/*****************************************************************************
* pinit.h -- *
- * POSTGRES initialization and cleanup definitions. *
+ * POSTGRES initialization and cleanup definitions. *
*****************************************************************************/
/* in utils/init/postinit.c */
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 6c94e8a7ae..0ab2a13697 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -88,14 +88,14 @@ typedef struct ExprContext_CB
*
* This class holds the "current context" information
* needed to evaluate expressions for doing tuple qualifications
- * and tuple projections. For example, if an expression refers
+ * and tuple projections. For example, if an expression refers
* to an attribute in the current inner tuple then we need to know
* what the current inner tuple is and so we look at the expression
* context.
*
* There are two memory contexts associated with an ExprContext:
* * ecxt_per_query_memory is a query-lifespan context, typically the same
- * context the ExprContext node itself is allocated in. This context
+ * context the ExprContext node itself is allocated in. This context
* can be used for purposes such as storing function call cache info.
* * ecxt_per_tuple_memory is a short-term context for expression results.
* As the name suggests, it will typically be reset once per tuple,
@@ -198,9 +198,9 @@ typedef struct ReturnSetInfo
* Nodes which need to do projections create one of these.
*
* ExecProject() evaluates the tlist, forms a tuple, and stores it
- * in the given slot. Note that the result will be a "virtual" tuple
+ * in the given slot. Note that the result will be a "virtual" tuple
* unless ExecMaterializeSlot() is then called to force it to be
- * converted to a physical tuple. The slot must have a tupledesc
+ * converted to a physical tuple. The slot must have a tupledesc
* that matches the output of the tlist!
*
* The planner very often produces tlists that consist entirely of
@@ -255,7 +255,7 @@ typedef struct ProjectionInfo
* in emitted tuples. For example, when we do an UPDATE query,
* the planner adds a "junk" entry to the targetlist so that the tuples
* returned to ExecutePlan() contain an extra attribute: the ctid of
- * the tuple to be updated. This is needed to do the update, but we
+ * the tuple to be updated. This is needed to do the update, but we
* don't want the ctid to be part of the stored new tuple! So, we
* apply a "junk filter" to remove the junk attributes and form the
* real output tuple. The junkfilter code also provides routines to
@@ -397,7 +397,7 @@ typedef struct EState
/*
* These fields are for re-evaluating plan quals when an updated tuple is
- * substituted in READ COMMITTED mode. es_epqTuple[] contains tuples that
+ * substituted in READ COMMITTED mode. es_epqTuple[] contains tuples that
* scan plan nodes should return instead of whatever they'd normally
* return, or NULL if nothing to return; es_epqTupleSet[] is true if a
* particular array entry is valid; and es_epqScanDone[] is state to
@@ -656,7 +656,7 @@ typedef struct FuncExprState
/*
* In some cases we need to compute a tuple descriptor for the function's
- * output. If so, it's stored here.
+ * output. If so, it's stored here.
*/
TupleDesc funcResultDesc;
bool funcReturnsTuple; /* valid when funcResultDesc isn't
@@ -680,7 +680,7 @@ typedef struct FuncExprState
/*
* Flag to remember whether we have registered a shutdown callback for
- * this FuncExprState. We do so only if funcResultStore or setArgsValid
+ * this FuncExprState. We do so only if funcResultStore or setArgsValid
* has been set at least once (since all the callback is for is to release
* the tuplestore or clear setArgsValid).
*/
@@ -1477,7 +1477,7 @@ typedef struct CteScanState
* WorkTableScanState information
*
* WorkTableScan nodes are used to scan the work table created by
- * a RecursiveUnion node. We locate the RecursiveUnion node
+ * a RecursiveUnion node. We locate the RecursiveUnion node
* during executor startup.
* ----------------
*/
@@ -1791,7 +1791,7 @@ typedef struct WindowAggState
* UniqueState information
*
* Unique nodes are used "on top of" sort nodes to discard
- * duplicate tuples returned from the sort phase. Basically
+ * duplicate tuples returned from the sort phase. Basically
* all it does is compare the current tuple from the subplan
* with the previously fetched tuple (stored in its result slot).
* If the two are identical in all interesting fields, then
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index 5b8df59bc6..bc58e16525 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -575,7 +575,7 @@ typedef enum JoinType
/*
* Semijoins and anti-semijoins (as defined in relational theory) do not
* appear in the SQL JOIN syntax, but there are standard idioms for
- * representing them (e.g., using EXISTS). The planner recognizes these
+ * representing them (e.g., using EXISTS). The planner recognizes these
* cases and converts them to joins. So the planner and executor must
* support these codes. NOTE: in JOIN_SEMI output, it is unspecified
* which matching RHS row is joined to. In JOIN_ANTI output, the row is
@@ -599,7 +599,7 @@ typedef enum JoinType
/*
* OUTER joins are those for which pushed-down quals must behave differently
* from the join's own quals. This is in fact everything except INNER and
- * SEMI joins. However, this macro must also exclude the JOIN_UNIQUE symbols
+ * SEMI joins. However, this macro must also exclude the JOIN_UNIQUE symbols
* since those are temporary proxies for what will eventually be an INNER
* join.
*
diff --git a/src/include/nodes/params.h b/src/include/nodes/params.h
index 47c39d2e6b..ae49c67d0c 100644
--- a/src/include/nodes/params.h
+++ b/src/include/nodes/params.h
@@ -22,20 +22,20 @@ struct ParseState;
* ParamListInfo
*
* ParamListInfo arrays are used to pass parameters into the executor
- * for parameterized plans. Each entry in the array defines the value
+ * for parameterized plans. Each entry in the array defines the value
* to be substituted for a PARAM_EXTERN parameter. The "paramid"
* of a PARAM_EXTERN Param can range from 1 to numParams.
*
* Although parameter numbers are normally consecutive, we allow
* ptype == InvalidOid to signal an unused array entry.
*
- * pflags is a flags field. Currently the only used bit is:
+ * pflags is a flags field. Currently the only used bit is:
* PARAM_FLAG_CONST signals the planner that it may treat this parameter
* as a constant (i.e., generate a plan that works only for this value
* of the parameter).
*
* There are two hook functions that can be associated with a ParamListInfo
- * array to support dynamic parameter handling. First, if paramFetch
+ * array to support dynamic parameter handling. First, if paramFetch
* isn't null and the executor requires a value for an invalid parameter
* (one with ptype == InvalidOid), the paramFetch hook is called to give
* it a chance to fill in the parameter value. Second, a parserSetup
@@ -85,7 +85,7 @@ typedef struct ParamListInfoData
* es_param_exec_vals or ecxt_param_exec_vals.
*
* If execPlan is not NULL, it points to a SubPlanState node that needs
- * to be executed to produce the value. (This is done so that we can have
+ * to be executed to produce the value. (This is done so that we can have
* lazy evaluation of InitPlans: they aren't executed until/unless a
* result value is needed.) Otherwise the value is assumed to be valid
* when needed.
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 18d4991008..7e560a19a3 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -128,7 +128,7 @@ typedef struct Query
List *targetList; /* target list (of TargetEntry) */
- List *withCheckOptions; /* a list of WithCheckOption's */
+ List *withCheckOptions; /* a list of WithCheckOption's */
List *returningList; /* return-values list (of TargetEntry) */
@@ -159,7 +159,7 @@ typedef struct Query
* Supporting data structures for Parse Trees
*
* Most of these node types appear in raw parsetrees output by the grammar,
- * and get transformed to something else by the analyzer. A few of them
+ * and get transformed to something else by the analyzer. A few of them
* are used as-is in transformed querytrees.
****************************************************************************/
@@ -173,7 +173,7 @@ typedef struct Query
* be prespecified in typemod, otherwise typemod is unused.
*
* If pct_type is TRUE, then names is actually a field name and we look up
- * the type of that field. Otherwise (the normal case), names is a type
+ * the type of that field. Otherwise (the normal case), names is a type
* name possibly qualified with schema and database name.
*/
typedef struct TypeName
@@ -192,7 +192,7 @@ typedef struct TypeName
/*
* ColumnRef - specifies a reference to a column, or possibly a whole tuple
*
- * The "fields" list must be nonempty. It can contain string Value nodes
+ * The "fields" list must be nonempty. It can contain string Value nodes
* (representing names) and A_Star nodes (representing occurrence of a '*').
* Currently, A_Star must appear only as the last list element --- the grammar
* is responsible for enforcing this!
@@ -468,7 +468,7 @@ typedef struct RangeSubselect
* RangeFunction - function call appearing in a FROM clause
*
* functions is a List because we use this to represent the construct
- * ROWS FROM(func1(...), func2(...), ...). Each element of this list is a
+ * ROWS FROM(func1(...), func2(...), ...). Each element of this list is a
* two-element sublist, the first element being the untransformed function
* call tree, and the second element being a possibly-empty list of ColumnDef
* nodes representing any columndef list attached to that function within the
@@ -497,7 +497,7 @@ typedef struct RangeFunction
* in either "raw" form (an untransformed parse tree) or "cooked" form
* (a post-parse-analysis, executable expression tree), depending on
* how this ColumnDef node was created (by parsing, or by inheritance
- * from an existing relation). We should never have both in the same node!
+ * from an existing relation). We should never have both in the same node!
*
* Similarly, we may have a COLLATE specification in either raw form
* (represented as a CollateClause with arg==NULL) or cooked form
@@ -569,7 +569,7 @@ typedef struct IndexElem
/*
* DefElem - a generic "name = value" option definition
*
- * In some contexts the name can be qualified. Also, certain SQL commands
+ * In some contexts the name can be qualified. Also, certain SQL commands
* allow a SET/ADD/DROP action to be attached to option settings, so it's
* convenient to carry a field for that too. (Note: currently, it is our
* practice that the grammar allows namespace and action only in statements
@@ -597,7 +597,7 @@ typedef struct DefElem
* LockingClause - raw representation of FOR [NO KEY] UPDATE/[KEY] SHARE
* options
*
- * Note: lockedRels == NIL means "all relations in query". Otherwise it
+ * Note: lockedRels == NIL means "all relations in query". Otherwise it
* is a list of RangeVar nodes. (We use RangeVar mainly because it carries
* a location field --- currently, parse analysis insists on unqualified
* names in LockingClause.)
@@ -661,8 +661,8 @@ typedef struct XmlSerialize
*
* In RELATION RTEs, the colnames in both alias and eref are indexed by
* physical attribute number; this means there must be colname entries for
- * dropped columns. When building an RTE we insert empty strings ("") for
- * dropped columns. Note however that a stored rule may have nonempty
+ * dropped columns. When building an RTE we insert empty strings ("") for
+ * dropped columns. Note however that a stored rule may have nonempty
* colnames for columns dropped since the rule was created (and for that
* matter the colnames might be out of date due to column renamings).
* The same comments apply to FUNCTION RTEs when a function's return type
@@ -670,9 +670,9 @@ typedef struct XmlSerialize
*
* In JOIN RTEs, the colnames in both alias and eref are one-to-one with
* joinaliasvars entries. A JOIN RTE will omit columns of its inputs when
- * those columns are known to be dropped at parse time. Again, however,
+ * those columns are known to be dropped at parse time. Again, however,
* a stored rule might contain entries for columns dropped since the rule
- * was created. (This is only possible for columns not actually referenced
+ * was created. (This is only possible for columns not actually referenced
* in the rule.) When loading a stored rule, we replace the joinaliasvars
* items for any such columns with null pointers. (We can't simply delete
* them from the joinaliasvars list, because that would affect the attnums
@@ -691,7 +691,7 @@ typedef struct XmlSerialize
* decompiled queries.
*
* requiredPerms and checkAsUser specify run-time access permissions
- * checks to be performed at query startup. The user must have *all*
+ * checks to be performed at query startup. The user must have *all*
* of the permissions that are OR'd together in requiredPerms (zero
* indicates no permissions checking). If checkAsUser is not zero,
* then do the permissions checks using the access rights of that user,
@@ -746,7 +746,7 @@ typedef struct RangeTblEntry
* Fields valid for a join RTE (else NULL/zero):
*
* joinaliasvars is a list of (usually) Vars corresponding to the columns
- * of the join result. An alias Var referencing column K of the join
+ * of the join result. An alias Var referencing column K of the join
* result can be replaced by the K'th element of joinaliasvars --- but to
* simplify the task of reverse-listing aliases correctly, we do not do
* that until planning time. In detail: an element of joinaliasvars can
@@ -843,9 +843,9 @@ typedef struct RangeTblFunction
typedef struct WithCheckOption
{
NodeTag type;
- char *viewname; /* name of view that specified the WCO */
- Node *qual; /* constraint qual to check */
- bool cascaded; /* true = WITH CASCADED CHECK OPTION */
+ char *viewname; /* name of view that specified the WCO */
+ Node *qual; /* constraint qual to check */
+ bool cascaded; /* true = WITH CASCADED CHECK OPTION */
} WithCheckOption;
/*
@@ -856,7 +856,7 @@ typedef struct WithCheckOption
* You might think that ORDER BY is only interested in defining ordering,
* and GROUP/DISTINCT are only interested in defining equality. However,
* one way to implement grouping is to sort and then apply a "uniq"-like
- * filter. So it's also interesting to keep track of possible sort operators
+ * filter. So it's also interesting to keep track of possible sort operators
* for GROUP/DISTINCT, and in particular to try to sort for the grouping
* in a way that will also yield a requested ORDER BY ordering. So we need
* to be able to compare ORDER BY and GROUP/DISTINCT lists, which motivates
@@ -876,15 +876,15 @@ typedef struct WithCheckOption
* here, but it's cheap to get it along with the sortop, and requiring it
* to be valid eases comparisons to grouping items.) Note that this isn't
* actually enough information to determine an ordering: if the sortop is
- * collation-sensitive, a collation OID is needed too. We don't store the
+ * collation-sensitive, a collation OID is needed too. We don't store the
* collation in SortGroupClause because it's not available at the time the
* parser builds the SortGroupClause; instead, consult the exposed collation
* of the referenced targetlist expression to find out what it is.
*
- * In a grouping item, eqop must be valid. If the eqop is a btree equality
+ * In a grouping item, eqop must be valid. If the eqop is a btree equality
* operator, then sortop should be set to a compatible ordering operator.
* We prefer to set eqop/sortop/nulls_first to match any ORDER BY item that
- * the query presents for the same tlist item. If there is none, we just
+ * the query presents for the same tlist item. If there is none, we just
* use the default ordering op for the datatype.
*
* If the tlist item's type has a hash opclass but no btree opclass, then
@@ -1140,7 +1140,7 @@ typedef struct SelectStmt
* range table. Its setOperations field shows the tree of set operations,
* with leaf SelectStmt nodes replaced by RangeTblRef nodes, and internal
* nodes replaced by SetOperationStmt nodes. Information about the output
- * column types is added, too. (Note that the child nodes do not necessarily
+ * column types is added, too. (Note that the child nodes do not necessarily
* produce these types directly, but we've checked that their output types
* can be coerced to the output column type.) Also, if it's not UNION ALL,
* information about the types' sort/group semantics is provided in the form
@@ -1446,7 +1446,7 @@ typedef struct AccessPriv
*
* Note: because of the parsing ambiguity with the GRANT <privileges>
* statement, granted_roles is a list of AccessPriv; the execution code
- * should complain if any column lists appear. grantee_roles is a list
+ * should complain if any column lists appear. grantee_roles is a list
* of role names, as Value strings.
* ----------------------
*/
@@ -1476,7 +1476,7 @@ typedef struct AlterDefaultPrivilegesStmt
* Copy Statement
*
* We support "COPY relation FROM file", "COPY relation TO file", and
- * "COPY (query) TO file". In any given CopyStmt, exactly one of "relation"
+ * "COPY (query) TO file". In any given CopyStmt, exactly one of "relation"
* and "query" must be non-NULL.
* ----------------------
*/
@@ -1575,7 +1575,7 @@ typedef struct CreateStmt
*
* If skip_validation is true then we skip checking that the existing rows
* in the table satisfy the constraint, and just install the catalog entries
- * for the constraint. A new FK constraint is marked as valid iff
+ * for the constraint. A new FK constraint is marked as valid iff
* initially_valid is true. (Usually skip_validation and initially_valid
* are inverses, but we can set both true if the table is known empty.)
*
@@ -1653,7 +1653,7 @@ typedef struct Constraint
char fk_upd_action; /* ON UPDATE action */
char fk_del_action; /* ON DELETE action */
List *old_conpfeqop; /* pg_constraint.conpfeqop of my former self */
- Oid old_pktable_oid; /* pg_constraint.confrelid of my former self */
+ Oid old_pktable_oid; /* pg_constraint.confrelid of my former self */
/* Fields used for constraints that allow a NOT VALID specification */
bool skip_validation; /* skip validation of existing rows? */
@@ -2094,7 +2094,7 @@ typedef struct SecLabelStmt
* Declare Cursor Statement
*
* Note: the "query" field of DeclareCursorStmt is only used in the raw grammar
- * output. After parse analysis it's set to null, and the Query points to the
+ * output. After parse analysis it's set to null, and the Query points to the
* DeclareCursorStmt, not vice versa.
* ----------------------
*/
@@ -2157,7 +2157,7 @@ typedef struct FetchStmt
*
* This represents creation of an index and/or an associated constraint.
* If isconstraint is true, we should create a pg_constraint entry along
- * with the index. But if indexOid isn't InvalidOid, we are not creating an
+ * with the index. But if indexOid isn't InvalidOid, we are not creating an
* index, just a UNIQUE/PKEY constraint using an existing index. isconstraint
* must always be true in this case, and the fields describing the index
* properties are empty.
@@ -2434,7 +2434,7 @@ typedef struct ViewStmt
Node *query; /* the SELECT query */
bool replace; /* replace an existing view? */
List *options; /* options from WITH clause */
- ViewCheckOption withCheckOption; /* WITH CHECK OPTION */
+ ViewCheckOption withCheckOption; /* WITH CHECK OPTION */
} ViewStmt;
/* ----------------------
@@ -2495,7 +2495,7 @@ typedef struct AlterSystemStmt
{
NodeTag type;
VariableSetStmt *setstmt; /* SET subcommand */
-} AlterSystemStmt;
+} AlterSystemStmt;
/* ----------------------
* Cluster Statement (support pbrown's cluster index implementation)
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 38c039c94c..3b9c683829 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -172,7 +172,7 @@ typedef struct ModifyTable
List *resultRelations; /* integer list of RT indexes */
int resultRelIndex; /* index of first resultRel in plan's list */
List *plans; /* plan(s) producing source data */
- List *withCheckOptionLists; /* per-target-table WCO lists */
+ List *withCheckOptionLists; /* per-target-table WCO lists */
List *returningLists; /* per-target-table RETURNING tlists */
List *fdwPrivLists; /* per-target-table FDW private data lists */
List *rowMarks; /* PlanRowMarks (non-locking only) */
@@ -231,7 +231,7 @@ typedef struct RecursiveUnion
* BitmapAnd node -
* Generate the intersection of the results of sub-plans.
*
- * The subplans must be of types that yield tuple bitmaps. The targetlist
+ * The subplans must be of types that yield tuple bitmaps. The targetlist
* and qual fields of the plan are unused and are always NIL.
* ----------------
*/
@@ -245,7 +245,7 @@ typedef struct BitmapAnd
* BitmapOr node -
* Generate the union of the results of sub-plans.
*
- * The subplans must be of types that yield tuple bitmaps. The targetlist
+ * The subplans must be of types that yield tuple bitmaps. The targetlist
* and qual fields of the plan are unused and are always NIL.
* ----------------
*/
@@ -279,7 +279,7 @@ typedef Scan SeqScan;
* in the same form it appeared in the query WHERE condition. Each should
* be of the form (indexkey OP comparisonval) or (comparisonval OP indexkey).
* The indexkey is a Var or expression referencing column(s) of the index's
- * base table. The comparisonval might be any expression, but it won't use
+ * base table. The comparisonval might be any expression, but it won't use
* any columns of the base table. The expressions are ordered by index
* column position (but items referencing the same index column can appear
* in any order). indexqualorig is used at runtime only if we have to recheck
@@ -294,7 +294,7 @@ typedef Scan SeqScan;
* that are being implemented by the index, while indexorderby is modified to
* have index column Vars on the left-hand side. Here, multiple expressions
* must appear in exactly the ORDER BY order, and this is not necessarily the
- * index column order. Only the expressions are provided, not the auxiliary
+ * index column order. Only the expressions are provided, not the auxiliary
* sort-order information from the ORDER BY SortGroupClauses; it's assumed
* that the sort ordering is fully determinable from the top-level operators.
* indexorderbyorig is unused at run time, but is needed for EXPLAIN.
@@ -346,7 +346,7 @@ typedef struct IndexOnlyScan
* bitmap index scan node
*
* BitmapIndexScan delivers a bitmap of potential tuple locations;
- * it does not access the heap itself. The bitmap is used by an
+ * it does not access the heap itself. The bitmap is used by an
* ancestor BitmapHeapScan node, possibly after passing through
* intermediate BitmapAnd and/or BitmapOr nodes to combine it with
* the results of other BitmapIndexScans.
@@ -406,7 +406,7 @@ typedef struct TidScan
* purposes.
*
* Note: we store the sub-plan in the type-specific subplan field, not in
- * the generic lefttree field as you might expect. This is because we do
+ * the generic lefttree field as you might expect. This is because we do
* not want plan-tree-traversal routines to recurse into the subplan without
* knowing that they are changing Query contexts.
* ----------------
@@ -774,7 +774,7 @@ typedef struct Limit
* fortunately the case is not performance-critical in practice. Note that
* we use ROW_MARK_COPY for non-target foreign tables, even if the FDW has a
* concept of rowid and so could theoretically support some form of
- * ROW_MARK_REFERENCE. Although copying the whole row value is inefficient,
+ * ROW_MARK_REFERENCE. Although copying the whole row value is inefficient,
* it's probably still faster than doing a second remote fetch, so it doesn't
* seem worth the extra complexity to permit ROW_MARK_REFERENCE.
*/
@@ -795,7 +795,7 @@ typedef enum RowMarkType
* plan-time representation of FOR [KEY] UPDATE/SHARE clauses
*
* When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we create a separate
- * PlanRowMark node for each non-target relation in the query. Relations that
+ * PlanRowMark node for each non-target relation in the query. Relations that
* are not specified as FOR UPDATE/SHARE are marked ROW_MARK_REFERENCE (if
* regular tables) or ROW_MARK_COPY (if not).
*
@@ -841,7 +841,7 @@ typedef struct PlanRowMark
*
* We track the objects on which a PlannedStmt depends in two ways:
* relations are recorded as a simple list of OIDs, and everything else
- * is represented as a list of PlanInvalItems. A PlanInvalItem is designed
+ * is represented as a list of PlanInvalItems. A PlanInvalItem is designed
* to be used with the syscache invalidation mechanism, so it identifies a
* system catalog entry by cache ID and hash value.
*/
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 9cce60b33b..4f03ef9232 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -33,7 +33,7 @@
*
* Note: colnames is a list of Value nodes (always strings). In Alias structs
* associated with RTEs, there may be entries corresponding to dropped
- * columns; these are normally empty strings (""). See parsenodes.h for info.
+ * columns; these are normally empty strings (""). See parsenodes.h for info.
*/
typedef struct Alias
{
@@ -234,7 +234,7 @@ typedef struct Param
* ressortgroupref indexes to let them be referenced by SortGroupClause
* entries in the aggorder and/or aggdistinct lists. This represents ORDER BY
* and DISTINCT operations to be applied to the aggregate input rows before
- * they are passed to the transition function. The grammar only allows a
+ * they are passed to the transition function. The grammar only allows a
* simple "DISTINCT" specifier for the arguments, but we use the full
* query-level representation to allow more code sharing.
*
@@ -293,7 +293,7 @@ typedef struct WindowFunc
* entire new modified array value.
*
* If reflowerindexpr = NIL, then we are fetching or storing a single array
- * element at the subscripts given by refupperindexpr. Otherwise we are
+ * element at the subscripts given by refupperindexpr. Otherwise we are
* fetching or storing an array slice, that is a rectangular subarray
* with lower and upper bounds given by the index expressions.
* reflowerindexpr must be the same length as refupperindexpr when it
@@ -340,7 +340,7 @@ typedef enum CoercionContext
* NB: equal() ignores CoercionForm fields, therefore this *must* not carry
* any semantically significant information. We need that behavior so that
* the planner will consider equivalent implicit and explicit casts to be
- * equivalent. In cases where those actually behave differently, the coercion
+ * equivalent. In cases where those actually behave differently, the coercion
* function's arguments will be different.
*/
typedef enum CoercionForm
@@ -459,7 +459,7 @@ typedef struct ScalarArrayOpExpr
*
* Notice the arguments are given as a List. For NOT, of course the list
* must always have exactly one element. For AND and OR, the executor can
- * handle any number of arguments. The parser generally treats AND and OR
+ * handle any number of arguments. The parser generally treats AND and OR
* as binary and so it typically only produces two-element lists, but the
* optimizer will flatten trees of AND and OR nodes to produce longer lists
* when possible. There are also a few special cases where more arguments
@@ -482,7 +482,7 @@ typedef struct BoolExpr
* SubLink
*
* A SubLink represents a subselect appearing in an expression, and in some
- * cases also the combining operator(s) just above it. The subLinkType
+ * cases also the combining operator(s) just above it. The subLinkType
* indicates the form of the expression represented:
* EXISTS_SUBLINK EXISTS(SELECT ...)
* ALL_SUBLINK (lefthand) op ALL (SELECT ...)
@@ -509,7 +509,7 @@ typedef struct BoolExpr
*
* NOTE: in the raw output of gram.y, testexpr contains just the raw form
* of the lefthand expression (if any), and operName is the String name of
- * the combining operator. Also, subselect is a raw parsetree. During parse
+ * the combining operator. Also, subselect is a raw parsetree. During parse
* analysis, the parser transforms testexpr into a complete boolean expression
* that compares the lefthand value(s) to PARAM_SUBLINK nodes representing the
* output columns of the subselect. And subselect is transformed to a Query.
@@ -567,7 +567,7 @@ typedef struct SubLink
* list). In this case testexpr is NULL to avoid duplication.
*
* The planner also derives lists of the values that need to be passed into
- * and out of the subplan. Input values are represented as a list "args" of
+ * and out of the subplan. Input values are represented as a list "args" of
* expressions to be evaluated in the outer-query context (currently these
* args are always just Vars, but in principle they could be any expression).
* The values are assigned to the global PARAM_EXEC params indexed by parParam
@@ -658,7 +658,7 @@ typedef struct FieldSelect
* portion of a column.
*
* A single FieldStore can actually represent updates of several different
- * fields. The parser only generates FieldStores with single-element lists,
+ * fields. The parser only generates FieldStores with single-element lists,
* but the planner will collapse multiple updates of the same base column
* into one FieldStore.
* ----------------
@@ -790,7 +790,7 @@ typedef struct CollateExpr
* and the testexpr in the second case.
*
* In the raw grammar output for the second form, the condition expressions
- * of the WHEN clauses are just the comparison values. Parse analysis
+ * of the WHEN clauses are just the comparison values. Parse analysis
* converts these to valid boolean expressions of the form
* CaseTestExpr '=' compexpr
* where the CaseTestExpr node is a placeholder that emits the correct
@@ -864,22 +864,22 @@ typedef struct ArrayExpr
*
* Note: the list of fields must have a one-for-one correspondence with
* physical fields of the associated rowtype, although it is okay for it
- * to be shorter than the rowtype. That is, the N'th list element must
+ * to be shorter than the rowtype. That is, the N'th list element must
* match up with the N'th physical field. When the N'th physical field
* is a dropped column (attisdropped) then the N'th list element can just
- * be a NULL constant. (This case can only occur for named composite types,
+ * be a NULL constant. (This case can only occur for named composite types,
* not RECORD types, since those are built from the RowExpr itself rather
* than vice versa.) It is important not to assume that length(args) is
* the same as the number of columns logically present in the rowtype.
*
* colnames provides field names in cases where the names can't easily be
- * obtained otherwise. Names *must* be provided if row_typeid is RECORDOID.
+ * obtained otherwise. Names *must* be provided if row_typeid is RECORDOID.
* If row_typeid identifies a known composite type, colnames can be NIL to
* indicate the type's cataloged field names apply. Note that colnames can
* be non-NIL even for a composite type, and typically is when the RowExpr
* was created by expanding a whole-row Var. This is so that we can retain
* the column alias names of the RTE that the Var referenced (which would
- * otherwise be very difficult to extract from the parsetree). Like the
+ * otherwise be very difficult to extract from the parsetree). Like the
* args list, colnames is one-for-one with physical fields of the rowtype.
*/
typedef struct RowExpr
@@ -892,7 +892,7 @@ typedef struct RowExpr
* Note: we deliberately do NOT store a typmod. Although a typmod will be
* associated with specific RECORD types at runtime, it will differ for
* different backends, and so cannot safely be stored in stored
- * parsetrees. We must assume typmod -1 for a RowExpr node.
+ * parsetrees. We must assume typmod -1 for a RowExpr node.
*
* We don't need to store a collation either. The result type is
* necessarily composite, and composite types never have a collation.
@@ -978,7 +978,7 @@ typedef struct MinMaxExpr
* 'args' carries all other arguments.
*
* Note: result type/typmod/collation are not stored, but can be deduced
- * from the XmlExprOp. The type/typmod fields are just used for display
+ * from the XmlExprOp. The type/typmod fields are just used for display
* purposes, and are NOT necessarily the true result type of the node.
* (We also use type == InvalidOid to mark a not-yet-parse-analyzed XmlExpr.)
*/
@@ -1064,8 +1064,8 @@ typedef struct BooleanTest
*
* CoerceToDomain represents the operation of coercing a value to a domain
* type. At runtime (and not before) the precise set of constraints to be
- * checked will be determined. If the value passes, it is returned as the
- * result; if not, an error is raised. Note that this is equivalent to
+ * checked will be determined. If the value passes, it is returned as the
+ * result; if not, an error is raised. Note that this is equivalent to
* RelabelType in the scenario where no constraints are applied.
*/
typedef struct CoerceToDomain
@@ -1081,7 +1081,7 @@ typedef struct CoerceToDomain
/*
* Placeholder node for the value to be processed by a domain's check
- * constraint. This is effectively like a Param, but can be implemented more
+ * constraint. This is effectively like a Param, but can be implemented more
* simply since we need only one replacement value at a time.
*
* Note: the typeId/typeMod/collation will be set from the domain's base type,
@@ -1101,7 +1101,7 @@ typedef struct CoerceToDomainValue
* Placeholder node for a DEFAULT marker in an INSERT or UPDATE command.
*
* This is not an executable expression: it must be replaced by the actual
- * column default expression during rewriting. But it is convenient to
+ * column default expression during rewriting. But it is convenient to
* treat it as an expression node during parsing and rewriting.
*/
typedef struct SetToDefault
@@ -1143,14 +1143,14 @@ typedef struct CurrentOfExpr
* single expression tree.
*
* In a SELECT's targetlist, resno should always be equal to the item's
- * ordinal position (counting from 1). However, in an INSERT or UPDATE
+ * ordinal position (counting from 1). However, in an INSERT or UPDATE
* targetlist, resno represents the attribute number of the destination
* column for the item; so there may be missing or out-of-order resnos.
* It is even legal to have duplicated resnos; consider
* UPDATE table SET arraycol[1] = ..., arraycol[2] = ..., ...
* The two meanings come together in the executor, because the planner
* transforms INSERT/UPDATE tlists into a normalized form with exactly
- * one entry for each column of the destination table. Before that's
+ * one entry for each column of the destination table. Before that's
* happened, however, it is risky to assume that resno == position.
* Generally get_tle_by_resno() should be used rather than list_nth()
* to fetch tlist entries by resno, and only in SELECT should you assume
@@ -1159,25 +1159,25 @@ typedef struct CurrentOfExpr
* resname is required to represent the correct column name in non-resjunk
* entries of top-level SELECT targetlists, since it will be used as the
* column title sent to the frontend. In most other contexts it is only
- * a debugging aid, and may be wrong or even NULL. (In particular, it may
+ * a debugging aid, and may be wrong or even NULL. (In particular, it may
* be wrong in a tlist from a stored rule, if the referenced column has been
- * renamed by ALTER TABLE since the rule was made. Also, the planner tends
+ * renamed by ALTER TABLE since the rule was made. Also, the planner tends
* to store NULL rather than look up a valid name for tlist entries in
* non-toplevel plan nodes.) In resjunk entries, resname should be either
* a specific system-generated name (such as "ctid") or NULL; anything else
* risks confusing ExecGetJunkAttribute!
*
* ressortgroupref is used in the representation of ORDER BY, GROUP BY, and
- * DISTINCT items. Targetlist entries with ressortgroupref=0 are not
+ * DISTINCT items. Targetlist entries with ressortgroupref=0 are not
* sort/group items. If ressortgroupref>0, then this item is an ORDER BY,
- * GROUP BY, and/or DISTINCT target value. No two entries in a targetlist
+ * GROUP BY, and/or DISTINCT target value. No two entries in a targetlist
* may have the same nonzero ressortgroupref --- but there is no particular
* meaning to the nonzero values, except as tags. (For example, one must
* not assume that lower ressortgroupref means a more significant sort key.)
* The order of the associated SortGroupClause lists determine the semantics.
*
* resorigtbl/resorigcol identify the source of the column, if it is a
- * simple reference to a column of a base table (or view). If it is not
+ * simple reference to a column of a base table (or view). If it is not
* a simple reference, these fields are zeroes.
*
* If resjunk is true then the column is a working column (such as a sort key)
@@ -1217,7 +1217,7 @@ typedef struct TargetEntry
*
* NOTE: the qualification expressions present in JoinExpr nodes are
* *in addition to* the query's main WHERE clause, which appears as the
- * qual of the top-level FromExpr. The reason for associating quals with
+ * qual of the top-level FromExpr. The reason for associating quals with
* specific nodes in the jointree is that the position of a qual is critical
* when outer joins are present. (If we enforce a qual too soon or too late,
* that may cause the outer join to produce the wrong set of NULL-extended
@@ -1253,7 +1253,7 @@ typedef struct RangeTblRef
* If he writes NATURAL then parse analysis generates the equivalent USING()
* list, and from that fills in "quals" with the right equality comparisons.
* If he writes USING() then "quals" is filled with equality comparisons.
- * If he writes ON() then only "quals" is set. Note that NATURAL/USING
+ * If he writes ON() then only "quals" is set. Note that NATURAL/USING
* are not equivalent to ON() since they also affect the output column list.
*
* alias is an Alias node representing the AS alias-clause attached to the
@@ -1262,7 +1262,7 @@ typedef struct RangeTblRef
* restricts visibility of the tables/columns inside it.
*
* During parse analysis, an RTE is created for the Join, and its index
- * is filled into rtindex. This RTE is present mainly so that Vars can
+ * is filled into rtindex. This RTE is present mainly so that Vars can
* be created that refer to the outputs of the join. The planner sometimes
* generates JoinExprs internally; these can have rtindex = 0 if there are
* no join alias variables referencing such joins.
diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h
index c607b36e3a..300136e80d 100644
--- a/src/include/nodes/relation.h
+++ b/src/include/nodes/relation.h
@@ -112,7 +112,7 @@ typedef struct PlannerGlobal
*
* This struct is conventionally called "root" in all the planner routines.
* It holds links to all of the planner's working state, in addition to the
- * original Query. Note that at present the planner extensively modifies
+ * original Query. Note that at present the planner extensively modifies
* the passed-in Query data structure; someday that should stop.
*----------
*/
@@ -132,7 +132,7 @@ typedef struct PlannerInfo
/*
* simple_rel_array holds pointers to "base rels" and "other rels" (see
- * comments for RelOptInfo for more info). It is indexed by rangetable
+ * comments for RelOptInfo for more info). It is indexed by rangetable
* index (so entry 0 is always wasted). Entries can be NULL when an RTE
* does not correspond to a base relation, such as a join RTE or an
* unreferenced view RTE; or if the RelOptInfo hasn't been made yet.
@@ -169,7 +169,7 @@ typedef struct PlannerInfo
* considered in this planning run. For small problems we just scan the
* list to do lookups, but when there are many join relations we build a
* hash table for faster lookups. The hash table is present and valid
- * when join_rel_hash is not NULL. Note that we still maintain the list
+ * when join_rel_hash is not NULL. Note that we still maintain the list
* even when using the hash table for lookups; this simplifies life for
* GEQO.
*/
@@ -291,7 +291,7 @@ typedef struct PlannerInfo
* Currently the only kind of otherrels are those made for member relations
* of an "append relation", that is an inheritance set or UNION ALL subquery.
* An append relation has a parent RTE that is a base rel, which represents
- * the entire append relation. The member RTEs are otherrels. The parent
+ * the entire append relation. The member RTEs are otherrels. The parent
* is present in the query join tree but the members are not. The member
* RTEs and otherrels are used to plan the scans of the individual tables or
* subqueries of the append set; then the parent baserel is given Append
@@ -303,7 +303,7 @@ typedef struct PlannerInfo
* alias Vars are expanded to non-aliased form during preprocess_expression.
*
* Parts of this data structure are specific to various scan and join
- * mechanisms. It didn't seem worth creating new node types for them.
+ * mechanisms. It didn't seem worth creating new node types for them.
*
* relids - Set of base-relation identifiers; it is a base relation
* if there is just one, a join relation if more than one
@@ -548,7 +548,7 @@ typedef struct IndexOptInfo
* equal to each other, where "equal" is according to the rules of the btree
* operator family(s) shown in ec_opfamilies, as well as the collation shown
* by ec_collation. (We restrict an EC to contain only equalities whose
- * operators belong to the same set of opfamilies. This could probably be
+ * operators belong to the same set of opfamilies. This could probably be
* relaxed, but for now it's not worth the trouble, since nearly all equality
* operators belong to only one btree opclass anyway. Similarly, we suppose
* that all or none of the input datatypes are collatable, so that a single
@@ -558,7 +558,7 @@ typedef struct IndexOptInfo
* us represent knowledge about different sort orderings being equivalent.
* Since every PathKey must reference an EquivalenceClass, we will end up
* with single-member EquivalenceClasses whenever a sort key expression has
- * not been equivalenced to anything else. It is also possible that such an
+ * not been equivalenced to anything else. It is also possible that such an
* EquivalenceClass will contain a volatile expression ("ORDER BY random()"),
* which is a case that can't arise otherwise since clauses containing
* volatile functions are never considered mergejoinable. We mark such
@@ -571,7 +571,7 @@ typedef struct IndexOptInfo
* We allow equality clauses appearing below the nullable side of an outer join
* to form EquivalenceClasses, but these have a slightly different meaning:
* the included values might be all NULL rather than all the same non-null
- * values. See src/backend/optimizer/README for more on that point.
+ * values. See src/backend/optimizer/README for more on that point.
*
* NB: if ec_merged isn't NULL, this class has been merged into another, and
* should be ignored in favor of using the pointed-to class.
@@ -607,7 +607,7 @@ typedef struct EquivalenceClass
*
* em_is_child signifies that this element was built by transposing a member
* for an appendrel parent relation to represent the corresponding expression
- * for an appendrel child. These members are used for determining the
+ * for an appendrel child. These members are used for determining the
* pathkeys of scans on the child relation and for explicitly sorting the
* child when necessary to build a MergeAppend path for the whole appendrel
* tree. An em_is_child member has no impact on the properties of the EC as a
@@ -621,7 +621,7 @@ typedef struct EquivalenceClass
*
* em_datatype is usually the same as exprType(em_expr), but can be
* different when dealing with a binary-compatible opfamily; in particular
- * anyarray_ops would never work without this. Use em_datatype when
+ * anyarray_ops would never work without this. Use em_datatype when
* looking up a specific btree operator to work with this expression.
*/
typedef struct EquivalenceMember
@@ -650,7 +650,7 @@ typedef struct EquivalenceMember
* information.)
*
* Note: pk_strategy is either BTLessStrategyNumber (for ASC) or
- * BTGreaterStrategyNumber (for DESC). We assume that all ordering-capable
+ * BTGreaterStrategyNumber (for DESC). We assume that all ordering-capable
* index types will use btree-compatible strategy numbers.
*/
typedef struct PathKey
@@ -701,7 +701,7 @@ typedef struct ParamPathInfo
* "param_info", if not NULL, links to a ParamPathInfo that identifies outer
* relation(s) that provide parameter values to each scan of this path.
* That means this path can only be joined to those rels by means of nestloop
- * joins with this path on the inside. Also note that a parameterized path
+ * joins with this path on the inside. Also note that a parameterized path
* is responsible for testing all "movable" joinclauses involving this rel
* and the specified outer rel(s).
*
@@ -809,7 +809,7 @@ typedef struct IndexPath
*
* The individual indexscans are represented by IndexPath nodes, and any
* logic on top of them is represented by a tree of BitmapAndPath and
- * BitmapOrPath nodes. Notice that we can use the same IndexPath node both
+ * BitmapOrPath nodes. Notice that we can use the same IndexPath node both
* to represent a regular (or index-only) index scan plan, and as the child
* of a BitmapHeapPath that represents scanning the same index using a
* BitmapIndexScan. The startup_cost and total_cost figures of an IndexPath
@@ -865,7 +865,7 @@ typedef struct TidPath
/*
* ForeignPath represents a potential scan of a foreign table
*
- * fdw_private stores FDW private data about the scan. While fdw_private is
+ * fdw_private stores FDW private data about the scan. While fdw_private is
* not actually touched by the core code during normal operations, it's
* generally a good idea to use a representation that can be dumped by
* nodeToString(), so that you can examine the structure during debugging
@@ -942,7 +942,7 @@ typedef struct MaterialPath
*
* This is unlike the other Path nodes in that it can actually generate
* different plans: either hash-based or sort-based implementation, or a
- * no-op if the input path can be proven distinct already. The decision
+ * no-op if the input path can be proven distinct already. The decision
* is sufficiently localized that it's not worth having separate Path node
* types. (Note: in the no-op case, we could eliminate the UniquePath node
* entirely and just return the subpath; but it's convenient to have a
@@ -1068,7 +1068,7 @@ typedef struct HashPath
* When we construct a join rel that includes all the base rels referenced
* in a multi-relation restriction clause, we place that clause into the
* joinrestrictinfo lists of paths for the join rel, if neither left nor
- * right sub-path includes all base rels referenced in the clause. The clause
+ * right sub-path includes all base rels referenced in the clause. The clause
* will be applied at that join level, and will not propagate any further up
* the join tree. (Note: the "predicate migration" code was once intended to
* push restriction clauses up and down the plan tree based on evaluation
@@ -1108,13 +1108,13 @@ typedef struct HashPath
* that appeared elsewhere in the tree and were pushed down to the join rel
* because they used no other rels. That's what the is_pushed_down flag is
* for; it tells us that a qual is not an OUTER JOIN qual for the set of base
- * rels listed in required_relids. A clause that originally came from WHERE
+ * rels listed in required_relids. A clause that originally came from WHERE
* or an INNER JOIN condition will *always* have its is_pushed_down flag set.
* It's possible for an OUTER JOIN clause to be marked is_pushed_down too,
* if we decide that it can be pushed down into the nullable side of the join.
* In that case it acts as a plain filter qual for wherever it gets evaluated.
* (In short, is_pushed_down is only false for non-degenerate outer join
- * conditions. Possibly we should rename it to reflect that meaning?)
+ * conditions. Possibly we should rename it to reflect that meaning?)
*
* RestrictInfo nodes also contain an outerjoin_delayed flag, which is true
* if the clause's applicability must be delayed due to any outer joins
@@ -1136,7 +1136,7 @@ typedef struct HashPath
* outer join(s). A clause that is not outerjoin_delayed can be enforced
* anywhere it is computable.
*
- * In general, the referenced clause might be arbitrarily complex. The
+ * In general, the referenced clause might be arbitrarily complex. The
* kinds of clauses we can handle as indexscan quals, mergejoin clauses,
* or hashjoin clauses are limited (e.g., no volatile functions). The code
* for each kind of path is responsible for identifying the restrict clauses
@@ -1161,7 +1161,7 @@ typedef struct HashPath
*
* The pseudoconstant flag is set true if the clause contains no Vars of
* the current query level and no volatile functions. Such a clause can be
- * pulled out and used as a one-time qual in a gating Result node. We keep
+ * pulled out and used as a one-time qual in a gating Result node. We keep
* pseudoconstant clauses in the same lists as other RestrictInfos so that
* the regular clause-pushing machinery can assign them to the correct join
* level, but they need to be treated specially for cost and selectivity
@@ -1171,7 +1171,7 @@ typedef struct HashPath
*
* When join clauses are generated from EquivalenceClasses, there may be
* several equally valid ways to enforce join equivalence, of which we need
- * apply only one. We mark clauses of this kind by setting parent_ec to
+ * apply only one. We mark clauses of this kind by setting parent_ec to
* point to the generating EquivalenceClass. Multiple clauses with the same
* parent_ec in the same join are redundant.
*/
@@ -1264,8 +1264,8 @@ typedef struct MergeScanSelCache
/*
* Placeholder node for an expression to be evaluated below the top level
- * of a plan tree. This is used during planning to represent the contained
- * expression. At the end of the planning process it is replaced by either
+ * of a plan tree. This is used during planning to represent the contained
+ * expression. At the end of the planning process it is replaced by either
* the contained expression or a Var referring to a lower-level evaluation of
* the contained expression. Typically the evaluation occurs below an outer
* join, and Var references above the outer join might thereby yield NULL
@@ -1289,9 +1289,9 @@ typedef struct PlaceHolderVar
* "Special join" info.
*
* One-sided outer joins constrain the order of joining partially but not
- * completely. We flatten such joins into the planner's top-level list of
+ * completely. We flatten such joins into the planner's top-level list of
* relations to join, but record information about each outer join in a
- * SpecialJoinInfo struct. These structs are kept in the PlannerInfo node's
+ * SpecialJoinInfo struct. These structs are kept in the PlannerInfo node's
* join_info_list.
*
* Similarly, semijoins and antijoins created by flattening IN (subselect)
@@ -1319,7 +1319,7 @@ typedef struct PlaceHolderVar
* to be evaluated after this join is formed (because it references the RHS).
* Any outer joins that have such a clause and this join in their RHS cannot
* commute with this join, because that would leave noplace to check the
- * pushed-down clause. (We don't track this for FULL JOINs, either.)
+ * pushed-down clause. (We don't track this for FULL JOINs, either.)
*
* join_quals is an implicit-AND list of the quals syntactically associated
* with the join (they may or may not end up being applied at the join level).
@@ -1379,7 +1379,7 @@ typedef struct SpecialJoinInfo
* If any LATERAL RTEs were flattened into the parent query, it is possible
* that the query now contains PlaceHolderVars containing lateral references,
* representing expressions that need to be evaluated at particular spots in
- * the jointree but contain lateral references to Vars from elsewhere. These
+ * the jointree but contain lateral references to Vars from elsewhere. These
* give rise to LateralJoinInfos in which lateral_rhs is the evaluation point
* of a PlaceHolderVar and lateral_lhs is the set of lateral rels it needs.
*/
@@ -1441,7 +1441,7 @@ typedef struct AppendRelInfo
/*
* For an inheritance appendrel, the parent and child are both regular
* relations, and we store their rowtype OIDs here for use in translating
- * whole-row Vars. For a UNION-ALL appendrel, the parent and child are
+ * whole-row Vars. For a UNION-ALL appendrel, the parent and child are
* both subqueries with no named rowtype, and we store InvalidOid here.
*/
Oid parent_reltype; /* OID of parent's composite type */
@@ -1453,14 +1453,14 @@ typedef struct AppendRelInfo
* used to translate Vars referencing the parent rel into references to
* the child. A list element is NULL if it corresponds to a dropped
* column of the parent (this is only possible for inheritance cases, not
- * UNION ALL). The list elements are always simple Vars for inheritance
+ * UNION ALL). The list elements are always simple Vars for inheritance
* cases, but can be arbitrary expressions in UNION ALL cases.
*
* Notice we only store entries for user columns (attno > 0). Whole-row
* Vars are special-cased, and system columns (attno < 0) need no special
* translation since their attnos are the same for all tables.
*
- * Caution: the Vars have varlevelsup = 0. Be careful to adjust as needed
+ * Caution: the Vars have varlevelsup = 0. Be careful to adjust as needed
* when copying into a subquery.
*/
List *translated_vars; /* Expressions in the child's Vars */
@@ -1477,7 +1477,7 @@ typedef struct AppendRelInfo
* For each distinct placeholder expression generated during planning, we
* store a PlaceHolderInfo node in the PlannerInfo node's placeholder_list.
* This stores info that is needed centrally rather than in each copy of the
- * PlaceHolderVar. The phid fields identify which PlaceHolderInfo goes with
+ * PlaceHolderVar. The phid fields identify which PlaceHolderInfo goes with
* each PlaceHolderVar. Note that phid is unique throughout a planner run,
* not just within a query level --- this is so that we need not reassign ID's
* when pulling a subquery into its parent.
@@ -1547,11 +1547,11 @@ typedef struct MinMaxAggInfo
*
* A Var: the slot represents a variable of this level that must be passed
* down because subqueries have outer references to it, or must be passed
- * from a NestLoop node to its inner scan. The varlevelsup value in the Var
+ * from a NestLoop node to its inner scan. The varlevelsup value in the Var
* will always be zero.
*
* A PlaceHolderVar: this works much like the Var case, except that the
- * entry is a PlaceHolderVar node with a contained expression. The PHV
+ * entry is a PlaceHolderVar node with a contained expression. The PHV
* will have phlevelsup = 0, and the contained expression is adjusted
* to match in level.
*
diff --git a/src/include/nodes/replnodes.h b/src/include/nodes/replnodes.h
index aac75fd102..0f94a40312 100644
--- a/src/include/nodes/replnodes.h
+++ b/src/include/nodes/replnodes.h
@@ -17,7 +17,8 @@
#include "access/xlogdefs.h"
#include "nodes/pg_list.h"
-typedef enum ReplicationKind {
+typedef enum ReplicationKind
+{
REPLICATION_KIND_PHYSICAL,
REPLICATION_KIND_LOGICAL
} ReplicationKind;
@@ -51,9 +52,9 @@ typedef struct BaseBackupCmd
typedef struct CreateReplicationSlotCmd
{
NodeTag type;
- char *slotname;
+ char *slotname;
ReplicationKind kind;
- char *plugin;
+ char *plugin;
} CreateReplicationSlotCmd;
@@ -64,7 +65,7 @@ typedef struct CreateReplicationSlotCmd
typedef struct DropReplicationSlotCmd
{
NodeTag type;
- char *slotname;
+ char *slotname;
} DropReplicationSlotCmd;
@@ -79,7 +80,7 @@ typedef struct StartReplicationCmd
char *slotname;
TimeLineID timeline;
XLogRecPtr startpoint;
- List *options;
+ List *options;
} StartReplicationCmd;
diff --git a/src/include/nodes/tidbitmap.h b/src/include/nodes/tidbitmap.h
index d8cdceee2b..444d4d8ae3 100644
--- a/src/include/nodes/tidbitmap.h
+++ b/src/include/nodes/tidbitmap.h
@@ -26,7 +26,7 @@
/*
- * Actual bitmap representation is private to tidbitmap.c. Callers can
+ * Actual bitmap representation is private to tidbitmap.c. Callers can
* do IsA(x, TIDBitmap) on it, but nothing else.
*/
typedef struct TIDBitmap TIDBitmap;
diff --git a/src/include/nodes/value.h b/src/include/nodes/value.h
index 8eef3e1fda..45e5ec3906 100644
--- a/src/include/nodes/value.h
+++ b/src/include/nodes/value.h
@@ -29,7 +29,7 @@
*
* (Before Postgres 7.0, we used a double to represent T_Float,
* but that creates loss-of-precision problems when the value is
- * ultimately destined to be converted to NUMERIC. Since Value nodes
+ * ultimately destined to be converted to NUMERIC. Since Value nodes
* are only used in the parsing process, not for runtime data, it's
* better to use the more general representation.)
*
diff --git a/src/include/parser/gramparse.h b/src/include/parser/gramparse.h
index 871c310122..90b74cc312 100644
--- a/src/include/parser/gramparse.h
+++ b/src/include/parser/gramparse.h
@@ -29,7 +29,7 @@
#include "parser/gram.h"
/*
- * The YY_EXTRA data that a flex scanner allows us to pass around. Private
+ * The YY_EXTRA data that a flex scanner allows us to pass around. Private
* state needed for raw parsing/lexing goes here.
*/
typedef struct base_yy_extra_type
diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h
index 85598e8783..4ce802a128 100644
--- a/src/include/parser/parse_node.h
+++ b/src/include/parser/parse_node.h
@@ -87,7 +87,7 @@ typedef Node *(*CoerceParamHook) (ParseState *pstate, Param *param,
* links to current parse state of outer query.
*
* p_sourcetext: source string that generated the raw parsetree being
- * analyzed, or NULL if not available. (The string is used only to
+ * analyzed, or NULL if not available. (The string is used only to
* generate cursor positions in error messages: we need it to convert
* byte-wise locations in parse structures to character-wise cursor
* positions.)
@@ -116,7 +116,7 @@ typedef Node *(*CoerceParamHook) (ParseState *pstate, Param *param,
* is not an RTE, rather "visibility" means you could make an RTE from it.
*
* p_future_ctes: list of CommonTableExprs (WITH items) that are not yet
- * visible due to scope rules. This is used to help improve error messages.
+ * visible due to scope rules. This is used to help improve error messages.
*
* p_parent_cte: CommonTableExpr that immediately contains the current query,
* if any.
@@ -182,7 +182,7 @@ struct ParseState
*
* While processing the FROM clause, namespace items may appear with
* p_lateral_only set, meaning they are visible only to LATERAL
- * subexpressions. (The pstate's p_lateral_active flag tells whether we are
+ * subexpressions. (The pstate's p_lateral_active flag tells whether we are
* inside such a subexpression at the moment.) If p_lateral_ok is not set,
* it's an error to actually use such a namespace item. One might think it
* would be better to just exclude such items from visibility, but the wording
diff --git a/src/include/parser/scanner.h b/src/include/parser/scanner.h
index cf8a620f3d..1f2d185234 100644
--- a/src/include/parser/scanner.h
+++ b/src/include/parser/scanner.h
@@ -4,7 +4,7 @@
* API for the core scanner (flex machine)
*
* The core scanner is also used by PL/pgsql, so we provide a public API
- * for it. However, the rest of the backend is only expected to use the
+ * for it. However, the rest of the backend is only expected to use the
* higher-level API provided by parser.h.
*
*
@@ -58,7 +58,7 @@ typedef union core_YYSTYPE
/*
* The YY_EXTRA data that a flex scanner allows us to pass around.
- * Private state needed by the core scanner goes here. Note that the actual
+ * Private state needed by the core scanner goes here. Note that the actual
* yy_extra struct may be larger and have this as its first component, thus
* allowing the calling parser to keep some fields of its own in YY_EXTRA.
*/
diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index 44c42bc96c..d1f99fbafe 100644
--- a/src/include/pg_config_manual.h
+++ b/src/include/pg_config_manual.h
@@ -3,7 +3,7 @@
*
* This file contains various configuration symbols and limits. In
* all cases, changing them is only useful in very rare situations or
- * for developers. If you edit any of these, be sure to do a *full*
+ * for developers. If you edit any of these, be sure to do a *full*
* rebuild (and an initdb if noted).
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
@@ -62,12 +62,12 @@
* may improve performance, but supplying a real spinlock implementation is
* probably far better.
*/
-#define NUM_SPINLOCK_SEMAPHORES 1024
+#define NUM_SPINLOCK_SEMAPHORES 1024
/*
* Define this if you want to allow the lo_import and lo_export SQL
- * functions to be executed by ordinary users. By default these
- * functions are only available to the Postgres superuser. CAUTION:
+ * functions to be executed by ordinary users. By default these
+ * functions are only available to the Postgres superuser. CAUTION:
* These functions are SECURITY HOLES since they can read and write
* any file that the PostgreSQL server has permission to access. If
* you turn this on, don't say we didn't warn you.
@@ -146,7 +146,7 @@
/*
* This is the default directory in which AF_UNIX socket files are
- * placed. Caution: changing this risks breaking your existing client
+ * placed. Caution: changing this risks breaking your existing client
* applications, which are likely to continue to look in the old
* directory. But if you just hate the idea of sockets in /tmp,
* here's where to twiddle it. You can also override this at runtime
@@ -159,7 +159,7 @@
* MAX_RANDOM_VALUE. Currently, all known implementations yield
* 0..2^31-1, so we just hardwire this constant. We could do a
* configure test if it proves to be necessary. CAUTION: Think not to
- * replace this with RAND_MAX. RAND_MAX defines the maximum value of
+ * replace this with RAND_MAX. RAND_MAX defines the maximum value of
* the older rand() function, which is often different from --- and
* considerably inferior to --- random().
*/
@@ -198,7 +198,7 @@
/*
* On PPC machines, decide whether to use LWSYNC instructions in place of
- * ISYNC and SYNC. This provides slightly better performance, but will
+ * ISYNC and SYNC. This provides slightly better performance, but will
* result in illegal-instruction failures on some pre-POWER4 machines.
* By default we use LWSYNC when building for 64-bit PPC, which should be
* safe in nearly all cases.
@@ -251,7 +251,7 @@
/*
* Define this to check memory allocation errors (scribbling on more
- * bytes than were allocated). Right now, this gets defined
+ * bytes than were allocated). Right now, this gets defined
* automatically if --enable-cassert or USE_VALGRIND.
*/
#if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
@@ -261,7 +261,7 @@
/*
* Define this to cause palloc()'d memory to be filled with random data, to
* facilitate catching code that depends on the contents of uninitialized
- * memory. Caution: this is horrendously expensive.
+ * memory. Caution: this is horrendously expensive.
*/
/* #define RANDOMIZE_ALLOCATED_MEMORY */
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 5f131f8272..d9de09fea0 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -129,7 +129,7 @@ typedef enum PgStat_Single_Reset_Type
*
* Many of the event counters are nontransactional, ie, we count events
* in committed and aborted transactions alike. For these, we just count
- * directly in the PgStat_TableStatus. However, delta_live_tuples,
+ * directly in the PgStat_TableStatus. However, delta_live_tuples,
* delta_dead_tuples, and changed_tuples must be derived from event counts
* with awareness of whether the transaction or subtransaction committed or
* aborted. Hence, we also keep a stack of per-(sub)transaction status
@@ -367,10 +367,10 @@ typedef struct PgStat_MsgAnalyze
*/
typedef struct PgStat_MsgArchiver
{
- PgStat_MsgHdr m_hdr;
- bool m_failed; /* Failed attempt */
- char m_xlog[MAX_XFN_CHARS + 1];
- TimestampTz m_timestamp;
+ PgStat_MsgHdr m_hdr;
+ bool m_failed; /* Failed attempt */
+ char m_xlog[MAX_XFN_CHARS + 1];
+ TimestampTz m_timestamp;
} PgStat_MsgArchiver;
/* ----------
@@ -636,10 +636,12 @@ typedef struct PgStat_StatFuncEntry
typedef struct PgStat_ArchiverStats
{
PgStat_Counter archived_count; /* archival successes */
- char last_archived_wal[MAX_XFN_CHARS + 1]; /* last WAL file archived */
- TimestampTz last_archived_timestamp; /* last archival success time */
- PgStat_Counter failed_count; /* failed archival attempts */
- char last_failed_wal[MAX_XFN_CHARS + 1]; /* WAL file involved in last failure */
+ char last_archived_wal[MAX_XFN_CHARS + 1]; /* last WAL file
+ * archived */
+ TimestampTz last_archived_timestamp; /* last archival success time */
+ PgStat_Counter failed_count; /* failed archival attempts */
+ char last_failed_wal[MAX_XFN_CHARS + 1]; /* WAL file involved in
+ * last failure */
TimestampTz last_failed_timestamp; /* last archival failure time */
TimestampTz stat_reset_timestamp;
} PgStat_ArchiverStats;
@@ -757,8 +759,8 @@ typedef struct LocalPgBackendStatus
TransactionId backend_xid;
/*
- * The xmin of the current session if available, InvalidTransactionId
- * if not.
+ * The xmin of the current session if available, InvalidTransactionId if
+ * not.
*/
TransactionId backend_xmin;
} LocalPgBackendStatus;
diff --git a/src/include/port.h b/src/include/port.h
index 21c8a05d0b..0c2b236921 100644
--- a/src/include/port.h
+++ b/src/include/port.h
@@ -130,7 +130,7 @@ extern unsigned char pg_ascii_tolower(unsigned char ch);
/*
* Versions of libintl >= 0.13 try to replace printf() and friends with
- * macros to their own versions that understand the %$ format. We do the
+ * macros to their own versions that understand the %$ format. We do the
* same, so disable their macros, if they exist.
*/
#ifdef vsnprintf
@@ -305,7 +305,7 @@ extern FILE *pgwin32_fopen(const char *, const char *);
* system() and popen() replacements to enclose the command in an extra
* pair of quotes.
*/
-extern int pgwin32_system(const char *command);
+extern int pgwin32_system(const char *command);
extern FILE *pgwin32_popen(const char *command, const char *type);
#define system(a) pgwin32_system(a)
diff --git a/src/include/port/linux.h b/src/include/port/linux.h
index bcaa42dc4e..7a6e46cdbb 100644
--- a/src/include/port/linux.h
+++ b/src/include/port/linux.h
@@ -4,7 +4,7 @@
* As of July 2007, all known versions of the Linux kernel will sometimes
* return EIDRM for a shmctl() operation when EINVAL is correct (it happens
* when the low-order 15 bits of the supplied shm ID match the slot number
- * assigned to a newer shmem segment). We deal with this by assuming that
+ * assigned to a newer shmem segment). We deal with this by assuming that
* EIDRM means EINVAL in PGSharedMemoryIsInUse(). This is reasonably safe
* since in fact Linux has no excuse for ever returning EIDRM; it doesn't
* track removed segments in a way that would allow distinguishing them from
diff --git a/src/include/port/win32.h b/src/include/port/win32.h
index 974807f584..550c3ecff4 100644
--- a/src/include/port/win32.h
+++ b/src/include/port/win32.h
@@ -120,7 +120,7 @@
* Signal stuff
*
* For WIN32, there is no wait() call so there are no wait() macros
- * to interpret the return value of system(). Instead, system()
+ * to interpret the return value of system(). Instead, system()
* return values < 0x100 are used for exit() termination, and higher
* values are used to indicated non-exit() termination, which is
* similar to a unix-style signal exit (think SIGSEGV ==
@@ -158,7 +158,7 @@
* NTSTATUS.H from the Windows NT DDK.
*
* Some day we might want to print descriptions for the most common
- * exceptions, rather than printing an include file name. We could use
+ * exceptions, rather than printing an include file name. We could use
* RtlNtStatusToDosError() and pass to FormatMessage(), which can print
* the text of error values, but MinGW does not support
* RtlNtStatusToDosError().
diff --git a/src/include/portability/instr_time.h b/src/include/portability/instr_time.h
index f353b7969e..91f38693f8 100644
--- a/src/include/portability/instr_time.h
+++ b/src/include/portability/instr_time.h
@@ -10,8 +10,8 @@
* high-precision-timing APIs on yet other platforms.
*
* The basic data type is instr_time, which all callers should treat as an
- * opaque typedef. instr_time can store either an absolute time (of
- * unspecified reference time) or an interval. The operations provided
+ * opaque typedef. instr_time can store either an absolute time (of
+ * unspecified reference time) or an interval. The operations provided
* for it are:
*
* INSTR_TIME_IS_ZERO(t) is t equal to zero?
diff --git a/src/include/postgres.h b/src/include/postgres.h
index a8a206d988..00fbaaf91b 100644
--- a/src/include/postgres.h
+++ b/src/include/postgres.h
@@ -33,7 +33,7 @@
* in the backend environment, but are of no interest outside the backend.
*
* Simple type definitions live in c.h, where they are shared with
- * postgres_fe.h. We do that since those type definitions are needed by
+ * postgres_fe.h. We do that since those type definitions are needed by
* frontend modules that want to deal with binary data transmission to or
* from the backend. Type definitions in this file should be for
* representations that never escape the backend, such as Datum or
@@ -71,7 +71,7 @@ typedef struct varatt_external
int32 va_extsize; /* External saved size (doesn't) */
Oid va_valueid; /* Unique ID of value within TOAST table */
Oid va_toastrelid; /* RelID of TOAST table containing it */
-} varatt_external;
+} varatt_external;
/*
* Out-of-line Datum thats stored in memory in contrast to varatt_external
@@ -83,7 +83,7 @@ typedef struct varatt_external
typedef struct varatt_indirect
{
struct varlena *pointer; /* Pointer to in-memory varlena */
-} varatt_indirect;
+} varatt_indirect;
/*
@@ -158,7 +158,7 @@ typedef struct
* The "xxx" bits are the length field (which includes itself in all cases).
* In the big-endian case we mask to extract the length, in the little-endian
* case we shift. Note that in both cases the flag bits are in the physically
- * first byte. Also, it is not possible for a 1-byte length word to be zero;
+ * first byte. Also, it is not possible for a 1-byte length word to be zero;
* this lets us disambiguate alignment padding bytes from the start of an
* unaligned datum. (We now *require* pad bytes to be filled with zero!)
*
diff --git a/src/include/postgres_ext.h b/src/include/postgres_ext.h
index 48d5dd31e5..74c344c704 100644
--- a/src/include/postgres_ext.h
+++ b/src/include/postgres_ext.h
@@ -7,7 +7,7 @@
* For example, the Oid type is part of the API of libpq and other libraries.
*
* Declarations which are specific to a particular interface should
- * go in the header file for that interface (such as libpq-fe.h). This
+ * go in the header file for that interface (such as libpq-fe.h). This
* file is only for fundamental Postgres declarations.
*
* User-written C functions don't count as "external to Postgres."
diff --git a/src/include/postmaster/bgworker.h b/src/include/postmaster/bgworker.h
index 78d6c0e09d..c9550cc887 100644
--- a/src/include/postmaster/bgworker.h
+++ b/src/include/postmaster/bgworker.h
@@ -6,7 +6,7 @@
* including normal transactions.
*
* Any external module loaded via shared_preload_libraries can register a
- * worker. Workers can also be registered dynamically at runtime. In either
+ * worker. Workers can also be registered dynamically at runtime. In either
* case, the worker process is forked from the postmaster and runs the
* user-supplied "main" function. This code may connect to a database and
* run transactions. Workers can remain active indefinitely, but will be
@@ -77,7 +77,7 @@ typedef enum
typedef struct BackgroundWorker
{
- char bgw_name[BGW_MAXLEN];
+ char bgw_name[BGW_MAXLEN];
int bgw_flags;
BgWorkerStartTime bgw_start_time;
int bgw_restart_time; /* in seconds, or BGW_NEVER_RESTART */
@@ -85,7 +85,7 @@ typedef struct BackgroundWorker
char bgw_library_name[BGW_MAXLEN]; /* only if bgw_main is NULL */
char bgw_function_name[BGW_MAXLEN]; /* only if bgw_main is NULL */
Datum bgw_main_arg;
- pid_t bgw_notify_pid; /* SIGUSR1 this backend on start/stop */
+ pid_t bgw_notify_pid; /* SIGUSR1 this backend on start/stop */
} BackgroundWorker;
typedef enum BgwHandleStatus
@@ -104,12 +104,13 @@ extern void RegisterBackgroundWorker(BackgroundWorker *worker);
/* Register a new bgworker from a regular backend */
extern bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker,
- BackgroundWorkerHandle **handle);
+ BackgroundWorkerHandle **handle);
/* Query the status of a bgworker */
extern BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle,
pid_t *pidp);
-extern BgwHandleStatus WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *
+extern BgwHandleStatus
+WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *
handle, pid_t *pid);
/* Terminate a bgworker */
diff --git a/src/include/postmaster/bgworker_internals.h b/src/include/postmaster/bgworker_internals.h
index 117cebb436..55401860d8 100644
--- a/src/include/postmaster/bgworker_internals.h
+++ b/src/include/postmaster/bgworker_internals.h
@@ -20,13 +20,13 @@
* List of background workers, private to postmaster.
*
* A worker that requests a database connection during registration will have
- * rw_backend set, and will be present in BackendList. Note: do not rely on
+ * rw_backend set, and will be present in BackendList. Note: do not rely on
* rw_backend being non-NULL for shmem-connected workers!
*/
typedef struct RegisteredBgWorker
{
BackgroundWorker rw_worker; /* its registry entry */
- struct bkend *rw_backend; /* its BackendList entry, or NULL */
+ struct bkend *rw_backend; /* its BackendList entry, or NULL */
pid_t rw_pid; /* 0 if not running */
int rw_child_slot;
TimestampTz rw_crashed_at; /* if not 0, time it last crashed */
diff --git a/src/include/postmaster/syslogger.h b/src/include/postmaster/syslogger.h
index 12e6fe7c9f..da462cd700 100644
--- a/src/include/postmaster/syslogger.h
+++ b/src/include/postmaster/syslogger.h
@@ -20,7 +20,7 @@
* here is to divide long messages into chunks that are not more than
* PIPE_BUF bytes long, which according to POSIX spec must be written into
* the pipe atomically. The pipe reader then uses the protocol headers to
- * reassemble the parts of a message into a single string. The reader can
+ * reassemble the parts of a message into a single string. The reader can
* also cope with non-protocol data coming down the pipe, though we cannot
* guarantee long strings won't get split apart.
*
diff --git a/src/include/regex/regcustom.h b/src/include/regex/regcustom.h
index 04849f291f..dbb461a0ce 100644
--- a/src/include/regex/regcustom.h
+++ b/src/include/regex/regcustom.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/include/regex/regex.h b/src/include/regex/regex.h
index 2c7fa4df46..3020b0ff0f 100644
--- a/src/include/regex/regex.h
+++ b/src/include/regex/regex.h
@@ -3,7 +3,7 @@
/*
* regular expressions
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/include/regex/regexport.h b/src/include/regex/regexport.h
index 7df1d8fda3..90a27c5442 100644
--- a/src/include/regex/regexport.h
+++ b/src/include/regex/regexport.h
@@ -9,7 +9,7 @@
*
* An NFA contains one or more states, numbered 0..N-1. There is an initial
* state, as well as a final state --- reaching the final state denotes
- * successful matching of an input string. Each state except the final one
+ * successful matching of an input string. Each state except the final one
* has some out-arcs that lead to successor states, each arc being labeled
* with a color that represents one or more concrete character codes.
* (The colors of a state's out-arcs need not be distinct, since this is an
diff --git a/src/include/regex/regguts.h b/src/include/regex/regguts.h
index 5361411481..7d5d85577d 100644
--- a/src/include/regex/regguts.h
+++ b/src/include/regex/regguts.h
@@ -1,7 +1,7 @@
/*
* Internal interface definitions, etc., for the reg package
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -126,8 +126,8 @@
/*
- * We dissect a chr into byts for colormap table indexing. Here we define
- * a byt, which will be the same as a byte on most machines... The exact
+ * We dissect a chr into byts for colormap table indexing. Here we define
+ * a byt, which will be the same as a byte on most machines... The exact
* size of a byt is not critical, but about 8 bits is good, and extraction
* of 8-bit chunks is sometimes especially fast.
*/
@@ -156,9 +156,9 @@ typedef int pcolor; /* what color promotes to */
/*
* A colormap is a tree -- more precisely, a DAG -- indexed at each level
- * by a byt of the chr, to map the chr to a color efficiently. Because
+ * by a byt of the chr, to map the chr to a color efficiently. Because
* lower sections of the tree can be shared, it can exploit the usual
- * sparseness of such a mapping table. The tree is always NBYTS levels
+ * sparseness of such a mapping table. The tree is always NBYTS levels
* deep (in the past it was shallower during construction but was "filled"
* to full depth at the end of that); areas that are unaltered as yet point
* to "fill blocks" which are entirely WHITE in color.
@@ -187,12 +187,12 @@ union tree
*
* If "sub" is not NOSUB then it is the number of the color's current
* subcolor, i.e. we are in process of dividing this color (character
- * equivalence class) into two colors. See src/backend/regex/README for
+ * equivalence class) into two colors. See src/backend/regex/README for
* discussion of subcolors.
*
* Currently-unused colors have the FREECOL bit set and are linked into a
* freelist using their "sub" fields, but only if their color numbers are
- * less than colormap.max. Any array entries beyond "max" are just garbage.
+ * less than colormap.max. Any array entries beyond "max" are just garbage.
*/
struct colordesc
{
diff --git a/src/include/replication/basebackup.h b/src/include/replication/basebackup.h
index 3dbc4bc9ef..988bce7f8b 100644
--- a/src/include/replication/basebackup.h
+++ b/src/include/replication/basebackup.h
@@ -17,8 +17,8 @@
/*
* Minimum and maximum values of MAX_RATE option in BASE_BACKUP command.
*/
-#define MAX_RATE_LOWER 32
-#define MAX_RATE_UPPER 1048576
+#define MAX_RATE_LOWER 32
+#define MAX_RATE_UPPER 1048576
extern void SendBaseBackup(BaseBackupCmd *cmd);
diff --git a/src/include/replication/decode.h b/src/include/replication/decode.h
index 7f55d789a2..d9e30776af 100644
--- a/src/include/replication/decode.h
+++ b/src/include/replication/decode.h
@@ -14,6 +14,6 @@
#include "replication/logical.h"
void LogicalDecodingProcessRecord(LogicalDecodingContext *ctx,
- XLogRecord *record);
+ XLogRecord *record);
#endif
diff --git a/src/include/replication/logical.h b/src/include/replication/logical.h
index e65c8b8075..26be127bf7 100644
--- a/src/include/replication/logical.h
+++ b/src/include/replication/logical.h
@@ -69,32 +69,32 @@ typedef struct LogicalDecodingContext
/*
* State for writing output.
*/
- bool accept_writes;
- bool prepared_write;
- XLogRecPtr write_location;
+ bool accept_writes;
+ bool prepared_write;
+ XLogRecPtr write_location;
TransactionId write_xid;
} LogicalDecodingContext;
extern void CheckLogicalDecodingRequirements(void);
extern LogicalDecodingContext *CreateInitDecodingContext(char *plugin,
- List *output_plugin_options,
- XLogPageReadCB read_page,
- LogicalOutputPluginWriterPrepareWrite prepare_write,
- LogicalOutputPluginWriterWrite do_write);
+ List *output_plugin_options,
+ XLogPageReadCB read_page,
+ LogicalOutputPluginWriterPrepareWrite prepare_write,
+ LogicalOutputPluginWriterWrite do_write);
extern LogicalDecodingContext *CreateDecodingContext(
- XLogRecPtr start_lsn,
- List *output_plugin_options,
- XLogPageReadCB read_page,
- LogicalOutputPluginWriterPrepareWrite prepare_write,
- LogicalOutputPluginWriterWrite do_write);
+ XLogRecPtr start_lsn,
+ List *output_plugin_options,
+ XLogPageReadCB read_page,
+ LogicalOutputPluginWriterPrepareWrite prepare_write,
+ LogicalOutputPluginWriterWrite do_write);
extern void DecodingContextFindStartpoint(LogicalDecodingContext *ctx);
extern bool DecodingContextReady(LogicalDecodingContext *ctx);
extern void FreeDecodingContext(LogicalDecodingContext *ctx);
extern void LogicalIncreaseXminForSlot(XLogRecPtr lsn, TransactionId xmin);
extern void LogicalIncreaseRestartDecodingForSlot(XLogRecPtr current_lsn,
- XLogRecPtr restart_lsn);
+ XLogRecPtr restart_lsn);
extern void LogicalConfirmReceivedLocation(XLogRecPtr lsn);
#endif
diff --git a/src/include/replication/output_plugin.h b/src/include/replication/output_plugin.h
index c47c24c8db..a58e68d30a 100644
--- a/src/include/replication/output_plugin.h
+++ b/src/include/replication/output_plugin.h
@@ -32,7 +32,7 @@ typedef struct OutputPluginOptions
* Type of the shared library symbol _PG_output_plugin_init that is looked up
* when loading an output plugin shared library.
*/
-typedef void (*LogicalOutputPluginInit)(struct OutputPluginCallbacks *cb);
+typedef void (*LogicalOutputPluginInit) (struct OutputPluginCallbacks *cb);
/*
* Callback that gets called in a user-defined plugin. ctx->private_data can
@@ -43,8 +43,8 @@ typedef void (*LogicalOutputPluginInit)(struct OutputPluginCallbacks *cb);
*/
typedef void (*LogicalDecodeStartupCB) (
struct LogicalDecodingContext *ctx,
- OutputPluginOptions *options,
- bool is_init
+ OutputPluginOptions *options,
+ bool is_init
);
/*
@@ -92,7 +92,7 @@ typedef struct OutputPluginCallbacks
LogicalDecodeShutdownCB shutdown_cb;
} OutputPluginCallbacks;
-void OutputPluginPrepareWrite(struct LogicalDecodingContext *ctx, bool last_write);
-void OutputPluginWrite(struct LogicalDecodingContext *ctx, bool last_write);
+void OutputPluginPrepareWrite(struct LogicalDecodingContext *ctx, bool last_write);
+void OutputPluginWrite(struct LogicalDecodingContext *ctx, bool last_write);
#endif /* OUTPUT_PLUGIN_H */
diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h
index 04ff002990..eaea5884ef 100644
--- a/src/include/replication/reorderbuffer.h
+++ b/src/include/replication/reorderbuffer.h
@@ -1,6 +1,6 @@
/*
* reorderbuffer.h
- * PostgreSQL logical replay/reorder buffer management.
+ * PostgreSQL logical replay/reorder buffer management.
*
* Copyright (c) 2012-2014, PostgreSQL Global Development Group
*
@@ -79,7 +79,7 @@ typedef struct ReorderBufferChange
ReorderBufferTupleBuf *oldtuple;
/* valid for INSERT || UPDATE */
ReorderBufferTupleBuf *newtuple;
- } tp;
+ } tp;
/* New snapshot, set when action == *_INTERNAL_SNAPSHOT */
Snapshot snapshot;
@@ -102,7 +102,7 @@ typedef struct ReorderBufferChange
CommandId cmax;
CommandId combocid;
} tuplecid;
- } data;
+ } data;
/*
* While in use this is how a change is linked into a transactions,
@@ -161,7 +161,7 @@ typedef struct ReorderBufferTXN
/*
* Commit time, only known when we read the actual commit record.
*/
- TimestampTz commit_time;
+ TimestampTz commit_time;
/*
* Base snapshot or NULL.
@@ -329,12 +329,12 @@ ReorderBufferChange *ReorderBufferGetChange(ReorderBuffer *);
void ReorderBufferReturnChange(ReorderBuffer *, ReorderBufferChange *);
void ReorderBufferQueueChange(ReorderBuffer *, TransactionId, XLogRecPtr lsn, ReorderBufferChange *);
-void ReorderBufferCommit(ReorderBuffer *, TransactionId,
- XLogRecPtr commit_lsn, XLogRecPtr end_lsn,
- TimestampTz commit_time);
+void ReorderBufferCommit(ReorderBuffer *, TransactionId,
+ XLogRecPtr commit_lsn, XLogRecPtr end_lsn,
+ TimestampTz commit_time);
void ReorderBufferAssignChild(ReorderBuffer *, TransactionId, TransactionId, XLogRecPtr commit_lsn);
-void ReorderBufferCommitChild(ReorderBuffer *, TransactionId, TransactionId,
- XLogRecPtr commit_lsn, XLogRecPtr end_lsn);
+void ReorderBufferCommitChild(ReorderBuffer *, TransactionId, TransactionId,
+ XLogRecPtr commit_lsn, XLogRecPtr end_lsn);
void ReorderBufferAbort(ReorderBuffer *, TransactionId, XLogRecPtr lsn);
void ReorderBufferAbortOld(ReorderBuffer *, TransactionId xid);
void ReorderBufferForget(ReorderBuffer *, TransactionId, XLogRecPtr lsn);
diff --git a/src/include/replication/slot.h b/src/include/replication/slot.h
index c354c9133b..341e829bbb 100644
--- a/src/include/replication/slot.h
+++ b/src/include/replication/slot.h
@@ -96,11 +96,11 @@ typedef struct ReplicationSlot
* data that's still needed for decoding purposes, even after a crash;
* otherwise, decoding will produce wrong answers. Ordinary streaming
* replication also needs to prevent old row versions from being removed
- * too soon, but the worst consequence we might encounter there is unwanted
- * query cancellations on the standby. Thus, for logical decoding,
- * this value represents the latest xmin that has actually been
- * written to disk, whereas for streaming replication, it's just the
- * same as the persistent value (data.xmin).
+ * too soon, but the worst consequence we might encounter there is
+ * unwanted query cancellations on the standby. Thus, for logical
+ * decoding, this value represents the latest xmin that has actually been
+ * written to disk, whereas for streaming replication, it's just the same
+ * as the persistent value (data.xmin).
*/
TransactionId effective_xmin;
TransactionId effective_catalog_xmin;
@@ -148,7 +148,7 @@ extern void ReplicationSlotsShmemInit(void);
/* management of individual slots */
extern void ReplicationSlotCreate(const char *name, bool db_specific,
- ReplicationSlotPersistency p);
+ ReplicationSlotPersistency p);
extern void ReplicationSlotPersist(void);
extern void ReplicationSlotDrop(const char *name);
@@ -175,4 +175,4 @@ extern Datum pg_create_logical_replication_slot(PG_FUNCTION_ARGS);
extern Datum pg_drop_replication_slot(PG_FUNCTION_ARGS);
extern Datum pg_get_replication_slots(PG_FUNCTION_ARGS);
-#endif /* SLOT_H */
+#endif /* SLOT_H */
diff --git a/src/include/replication/snapbuild.h b/src/include/replication/snapbuild.h
index 087c0e510d..e5d61ff3c4 100644
--- a/src/include/replication/snapbuild.h
+++ b/src/include/replication/snapbuild.h
@@ -54,7 +54,7 @@ struct xl_running_xacts;
extern void CheckPointSnapBuild(void);
extern SnapBuild *AllocateSnapshotBuilder(struct ReorderBuffer *cache,
- TransactionId xmin_horizon, XLogRecPtr start_lsn);
+ TransactionId xmin_horizon, XLogRecPtr start_lsn);
extern void FreeSnapshotBuilder(SnapBuild *cache);
extern void SnapBuildSnapDecRefcount(Snapshot snap);
@@ -67,17 +67,17 @@ extern SnapBuildState SnapBuildCurrentState(SnapBuild *snapstate);
extern bool SnapBuildXactNeedsSkip(SnapBuild *snapstate, XLogRecPtr ptr);
extern void SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn,
- TransactionId xid, int nsubxacts,
- TransactionId *subxacts);
+ TransactionId xid, int nsubxacts,
+ TransactionId *subxacts);
extern void SnapBuildAbortTxn(SnapBuild *builder, XLogRecPtr lsn,
- TransactionId xid, int nsubxacts,
- TransactionId *subxacts);
+ TransactionId xid, int nsubxacts,
+ TransactionId *subxacts);
extern bool SnapBuildProcessChange(SnapBuild *builder, TransactionId xid,
- XLogRecPtr lsn);
+ XLogRecPtr lsn);
extern void SnapBuildProcessNewCid(SnapBuild *builder, TransactionId xid,
- XLogRecPtr lsn, struct xl_heap_new_cid *cid);
+ XLogRecPtr lsn, struct xl_heap_new_cid *cid);
extern void SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn,
- struct xl_running_xacts *running);
+ struct xl_running_xacts *running);
extern void SnapBuildSerializationPoint(SnapBuild *builder, XLogRecPtr lsn);
#endif /* SNAPBUILD_H */
diff --git a/src/include/replication/walreceiver.h b/src/include/replication/walreceiver.h
index 3d9401059b..7a249f14ca 100644
--- a/src/include/replication/walreceiver.h
+++ b/src/include/replication/walreceiver.h
@@ -70,7 +70,7 @@ typedef struct
/*
* receivedUpto-1 is the last byte position that has already been
- * received, and receivedTLI is the timeline it came from. At the first
+ * received, and receivedTLI is the timeline it came from. At the first
* startup of walreceiver, these are set to receiveStart and
* receiveStartTLI. After that, walreceiver updates these whenever it
* flushes the received WAL to disk.
@@ -81,7 +81,7 @@ typedef struct
/*
* latestChunkStart is the starting byte position of the current "batch"
* of received WAL. It's actually the same as the previous value of
- * receivedUpto before the last flush to disk. Startup process can use
+ * receivedUpto before the last flush to disk. Startup process can use
* this to detect whether it's keeping up or not.
*/
XLogRecPtr latestChunkStart;
@@ -104,8 +104,8 @@ typedef struct
char conninfo[MAXCONNINFO];
/*
- * replication slot name; is also used for walreceiver to connect with
- * the primary
+ * replication slot name; is also used for walreceiver to connect with the
+ * primary
*/
char slotname[NAMEDATALEN];
diff --git a/src/include/rewrite/rewriteHandler.h b/src/include/rewrite/rewriteHandler.h
index 1b5121314d..930b8f3c8d 100644
--- a/src/include/rewrite/rewriteHandler.h
+++ b/src/include/rewrite/rewriteHandler.h
@@ -25,9 +25,9 @@ extern void AcquireRewriteLocks(Query *parsetree,
extern Node *build_column_default(Relation rel, int attrno);
extern Query *get_view_query(Relation view);
extern const char *view_query_is_auto_updatable(Query *viewquery,
- bool check_cols);
-extern int relation_is_updatable(Oid reloid,
- bool include_triggers,
- Bitmapset *include_cols);
+ bool check_cols);
+extern int relation_is_updatable(Oid reloid,
+ bool include_triggers,
+ Bitmapset *include_cols);
#endif /* REWRITEHANDLER_H */
diff --git a/src/include/snowball/header.h b/src/include/snowball/header.h
index 9afa3c6906..848805c56e 100644
--- a/src/include/snowball/header.h
+++ b/src/include/snowball/header.h
@@ -4,7 +4,7 @@
* Replacement header file for Snowball stemmer modules
*
* The Snowball stemmer modules do #include "header.h", and think they
- * are including snowball/libstemmer/header.h. We adjust the CPPFLAGS
+ * are including snowball/libstemmer/header.h. We adjust the CPPFLAGS
* so that this file is found instead, and thereby we can modify the
* headers they see. The main point here is to ensure that pg_config.h
* is included before any system headers such as <stdio.h>; without that,
diff --git a/src/include/storage/barrier.h b/src/include/storage/barrier.h
index 82ddccd3a2..bc61de0ff1 100644
--- a/src/include/storage/barrier.h
+++ b/src/include/storage/barrier.h
@@ -33,7 +33,7 @@ extern slock_t dummy_spinlock;
*
* A read barrier must act as a compiler barrier, and in addition must
* guarantee that any loads issued prior to the barrier are completed before
- * any loads issued after the barrier. Similarly, a write barrier acts
+ * any loads issued after the barrier. Similarly, a write barrier acts
* as a compiler barrier, and also orders stores. Read and write barriers
* are thus weaker than a full memory barrier, but stronger than a compiler
* barrier. In practice, on machines with strong memory ordering, read and
diff --git a/src/include/storage/block.h b/src/include/storage/block.h
index bc503cfacc..0a61103cf5 100644
--- a/src/include/storage/block.h
+++ b/src/include/storage/block.h
@@ -37,7 +37,7 @@ typedef uint32 BlockNumber;
/*
* BlockId:
*
- * this is a storage type for BlockNumber. in other words, this type
+ * this is a storage type for BlockNumber. in other words, this type
* is used for on-disk structures (e.g., in HeapTupleData) whereas
* BlockNumber is the type on which calculations are performed (e.g.,
* in access method code).
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index 93a0030c3e..c019013e72 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -114,9 +114,9 @@ typedef struct buftag
*
* Note: buf_hdr_lock must be held to examine or change the tag, flags,
* usage_count, refcount, or wait_backend_pid fields. buf_id field never
- * changes after initialization, so does not need locking. freeNext is
+ * changes after initialization, so does not need locking. freeNext is
* protected by the BufFreelistLock not buf_hdr_lock. The LWLocks can take
- * care of themselves. The buf_hdr_lock is *not* used to control access to
+ * care of themselves. The buf_hdr_lock is *not* used to control access to
* the data in the buffer!
*
* An exception is that if we have the buffer pinned, its tag can't change
@@ -127,7 +127,7 @@ typedef struct buftag
*
* We can't physically remove items from a disk page if another backend has
* the buffer pinned. Hence, a backend may need to wait for all other pins
- * to go away. This is signaled by storing its own PID into
+ * to go away. This is signaled by storing its own PID into
* wait_backend_pid and setting flag bit BM_PIN_COUNT_WAITER. At present,
* there can be only one such waiter per buffer.
*
@@ -147,7 +147,7 @@ typedef struct sbufdesc
int buf_id; /* buffer's index number (from 0) */
int freeNext; /* link in freelist chain */
- LWLock *io_in_progress_lock; /* to wait for I/O to complete */
+ LWLock *io_in_progress_lock; /* to wait for I/O to complete */
LWLock *content_lock; /* to lock access to buffer contents */
} BufferDesc;
diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h
index c222c3229f..d96e375f3f 100644
--- a/src/include/storage/bufpage.h
+++ b/src/include/storage/bufpage.h
@@ -27,7 +27,7 @@
* disk page is always a slotted page of the form:
*
* +----------------+---------------------------------+
- * | PageHeaderData | linp1 linp2 linp3 ... |
+ * | PageHeaderData | linp1 linp2 linp3 ... |
* +-----------+----+---------------------------------+
* | ... linpN | |
* +-----------+--------------------------------------+
@@ -35,7 +35,7 @@
* | |
* | v pd_upper |
* +-------------+------------------------------------+
- * | | tupleN ... |
+ * | | tupleN ... |
* +-------------+------------------+-----------------+
* | ... tuple3 tuple2 tuple1 | "special space" |
* +--------------------------------+-----------------+
@@ -66,7 +66,7 @@
*
* AM-specific per-page data (if any) is kept in the area marked "special
* space"; each AM has an "opaque" structure defined somewhere that is
- * stored as the page trailer. an access method should always
+ * stored as the page trailer. an access method should always
* initialize its pages with PageInit and then set its own opaque
* fields.
*/
@@ -128,7 +128,7 @@ typedef struct
* there are no flag bits relating to checksums.
*
* pd_prune_xid is a hint field that helps determine whether pruning will be
- * useful. It is currently unused in index pages.
+ * useful. It is currently unused in index pages.
*
* The page version number and page size are packed together into a single
* uint16 field. This is for historical reasons: before PostgreSQL 7.3,
diff --git a/src/include/storage/dsm.h b/src/include/storage/dsm.h
index 272787adc6..1d0110d4b2 100644
--- a/src/include/storage/dsm.h
+++ b/src/include/storage/dsm.h
@@ -18,7 +18,7 @@
typedef struct dsm_segment dsm_segment;
/* Startup and shutdown functions. */
-struct PGShmemHeader; /* avoid including pg_shmem.h */
+struct PGShmemHeader; /* avoid including pg_shmem.h */
extern void dsm_cleanup_using_control_segment(dsm_handle old_control_handle);
extern void dsm_postmaster_startup(struct PGShmemHeader *);
extern void dsm_backend_shutdown(void);
@@ -50,7 +50,7 @@ typedef void (*on_dsm_detach_callback) (dsm_segment *, Datum arg);
extern void on_dsm_detach(dsm_segment *seg,
on_dsm_detach_callback function, Datum arg);
extern void cancel_on_dsm_detach(dsm_segment *seg,
- on_dsm_detach_callback function, Datum arg);
+ on_dsm_detach_callback function, Datum arg);
extern void reset_on_dsm_detach(void);
#endif /* DSM_H */
diff --git a/src/include/storage/dsm_impl.h b/src/include/storage/dsm_impl.h
index fda551489f..6e2a013411 100644
--- a/src/include/storage/dsm_impl.h
+++ b/src/include/storage/dsm_impl.h
@@ -40,7 +40,7 @@
#endif
/* GUC. */
-extern int dynamic_shared_memory_type;
+extern int dynamic_shared_memory_type;
/*
* Directory for on-disk state.
diff --git a/src/include/storage/ipc.h b/src/include/storage/ipc.h
index 8b9f10b785..52aff5bbe5 100644
--- a/src/include/storage/ipc.h
+++ b/src/include/storage/ipc.h
@@ -4,7 +4,7 @@
* POSTGRES inter-process communication definitions.
*
* This file is misnamed, as it no longer has much of anything directly
- * to do with IPC. The functionality here is concerned with managing
+ * to do with IPC. The functionality here is concerned with managing
* exit-time cleanup for either a postmaster or a backend.
*
*
diff --git a/src/include/storage/itemid.h b/src/include/storage/itemid.h
index a91cf97917..bf2c4bd826 100644
--- a/src/include/storage/itemid.h
+++ b/src/include/storage/itemid.h
@@ -31,7 +31,7 @@ typedef struct ItemIdData
typedef ItemIdData *ItemId;
/*
- * lp_flags has these possible states. An UNUSED line pointer is available
+ * lp_flags has these possible states. An UNUSED line pointer is available
* for immediate re-use, the other states are not.
*/
#define LP_UNUSED 0 /* unused (should always have lp_len=0) */
diff --git a/src/include/storage/itemptr.h b/src/include/storage/itemptr.h
index 0b81d53f5f..78766d0698 100644
--- a/src/include/storage/itemptr.h
+++ b/src/include/storage/itemptr.h
@@ -29,7 +29,7 @@
* tuple header on disk, it's very important not to waste space with
* structure padding bytes. The struct is designed to be six bytes long
* (it contains three int16 fields) but a few compilers will pad it to
- * eight bytes unless coerced. We apply appropriate persuasion where
+ * eight bytes unless coerced. We apply appropriate persuasion where
* possible, and to cope with unpersuadable compilers, we try to use
* "SizeOfIptrData" rather than "sizeof(ItemPointerData)" when computing
* on-disk sizes.
diff --git a/src/include/storage/large_object.h b/src/include/storage/large_object.h
index a85b108c38..0d81a4bc1b 100644
--- a/src/include/storage/large_object.h
+++ b/src/include/storage/large_object.h
@@ -70,7 +70,7 @@ typedef struct LargeObjectDesc
#define LOBLKSIZE (BLCKSZ / 4)
/*
- * Maximum length in bytes for a large object. To make this larger, we'd
+ * Maximum length in bytes for a large object. To make this larger, we'd
* have to widen pg_largeobject.pageno as well as various internal variables.
*/
#define MAX_LARGE_OBJECT_SIZE ((int64) INT_MAX * LOBLKSIZE)
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index ceeab9fc8a..4c49e3c6e6 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -43,7 +43,7 @@ extern bool Debug_deadlocks;
/*
* Top-level transactions are identified by VirtualTransactionIDs comprising
* the BackendId of the backend running the xact, plus a locally-assigned
- * LocalTransactionId. These are guaranteed unique over the short term,
+ * LocalTransactionId. These are guaranteed unique over the short term,
* but will be reused after a database restart; hence they should never
* be stored on disk.
*
@@ -157,7 +157,7 @@ typedef uint16 LOCKMETHODID;
/*
* LOCKTAG is the key information needed to look up a LOCK item in the
- * lock hashtable. A LOCKTAG value uniquely identifies a lockable object.
+ * lock hashtable. A LOCKTAG value uniquely identifies a lockable object.
*
* The LockTagType enum defines the different kinds of objects we can lock.
* We can handle up to 256 different LockTagTypes.
@@ -210,7 +210,7 @@ typedef struct LOCKTAG
/*
* These macros define how we map logical IDs of lockable objects into
- * the physical fields of LOCKTAG. Use these to set up LOCKTAG values,
+ * the physical fields of LOCKTAG. Use these to set up LOCKTAG values,
* rather than accessing the fields directly. Note multiple eval of target!
*/
#define SET_LOCKTAG_RELATION(locktag,dboid,reloid) \
@@ -322,14 +322,14 @@ typedef struct LOCK
* a PROCLOCK struct.
*
* PROCLOCKTAG is the key information needed to look up a PROCLOCK item in the
- * proclock hashtable. A PROCLOCKTAG value uniquely identifies the combination
+ * proclock hashtable. A PROCLOCKTAG value uniquely identifies the combination
* of a lockable object and a holder/waiter for that object. (We can use
* pointers here because the PROCLOCKTAG need only be unique for the lifespan
* of the PROCLOCK, and it will never outlive the lock or the proc.)
*
* Internally to a backend, it is possible for the same lock to be held
* for different purposes: the backend tracks transaction locks separately
- * from session locks. However, this is not reflected in the shared-memory
+ * from session locks. However, this is not reflected in the shared-memory
* state: we only track which backend(s) hold the lock. This is OK since a
* backend can never block itself.
*
@@ -340,7 +340,7 @@ typedef struct LOCK
* as soon as convenient.
*
* releaseMask is workspace for LockReleaseAll(): it shows the locks due
- * to be released during the current call. This must only be examined or
+ * to be released during the current call. This must only be examined or
* set by the backend owning the PROCLOCK.
*
* Each PROCLOCK object is linked into lists for both the associated LOCK
@@ -373,7 +373,7 @@ typedef struct PROCLOCK
/*
* Each backend also maintains a local hash table with information about each
- * lock it is currently interested in. In particular the local table counts
+ * lock it is currently interested in. In particular the local table counts
* the number of times that lock has been acquired. This allows multiple
* requests for the same lock to be executed without additional accesses to
* shared memory. We also track the number of lock acquisitions per
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index 3a1953383e..175fae3a88 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -50,8 +50,8 @@ typedef struct LWLock
char exclusive; /* # of exclusive holders (0 or 1) */
int shared; /* # of shared holders (0..MaxBackends) */
int tranche; /* tranche ID */
- struct PGPROC *head; /* head of list of waiting PGPROCs */
- struct PGPROC *tail; /* tail of list of waiting PGPROCs */
+ struct PGPROC *head; /* head of list of waiting PGPROCs */
+ struct PGPROC *tail; /* tail of list of waiting PGPROCs */
/* tail is undefined when head is NULL */
} LWLock;
@@ -150,7 +150,7 @@ extern PGDLLIMPORT LWLockPadded *MainLWLockArray;
#define BUFFER_MAPPING_LWLOCK_OFFSET NUM_INDIVIDUAL_LWLOCKS
#define LOCK_MANAGER_LWLOCK_OFFSET \
(BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS)
-#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
+#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
(NUM_INDIVIDUAL_LWLOCKS + NUM_LOCK_PARTITIONS)
#define NUM_FIXED_LWLOCKS \
(PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS)
@@ -205,7 +205,7 @@ extern LWLock *LWLockAssign(void);
* mapped at the same address in all coordinating backends, so storing the
* registration in the main shared memory segment wouldn't work for that case.
*/
-extern int LWLockNewTrancheId(void);
+extern int LWLockNewTrancheId(void);
extern void LWLockRegisterTranche(int, LWLockTranche *);
extern void LWLockInitialize(LWLock *, int tranche_id);
diff --git a/src/include/storage/pg_sema.h b/src/include/storage/pg_sema.h
index 51ded817e7..c53aa9795b 100644
--- a/src/include/storage/pg_sema.h
+++ b/src/include/storage/pg_sema.h
@@ -6,7 +6,7 @@
* PostgreSQL requires counting semaphores (the kind that keep track of
* multiple unlock operations, and will allow an equal number of subsequent
* lock operations before blocking). The underlying implementation is
- * not the same on every platform. This file defines the API that must
+ * not the same on every platform. This file defines the API that must
* be provided by each port.
*
*
diff --git a/src/include/storage/pg_shmem.h b/src/include/storage/pg_shmem.h
index ab28ebee84..76bba445bd 100644
--- a/src/include/storage/pg_shmem.h
+++ b/src/include/storage/pg_shmem.h
@@ -10,7 +10,7 @@
*
* To simplify life for the SysV implementation, the ID is assumed to
* consist of two unsigned long values (these are key and ID in SysV
- * terms). Other platforms may ignore the second value if they need
+ * terms). Other platforms may ignore the second value if they need
* only one ID number.
*
*
@@ -42,7 +42,7 @@ typedef struct PGShmemHeader /* standard header for all Postgres shmem */
} PGShmemHeader;
/* GUC variable */
-extern int huge_pages;
+extern int huge_pages;
/* Possible values for huge_pages */
typedef enum
@@ -50,7 +50,7 @@ typedef enum
HUGE_PAGES_OFF,
HUGE_PAGES_ON,
HUGE_PAGES_TRY
-} HugePagesType;
+} HugePagesType;
#ifndef WIN32
extern unsigned long UsedShmemSegID;
diff --git a/src/include/storage/pos.h b/src/include/storage/pos.h
index bc41502a65..662a717e3c 100644
--- a/src/include/storage/pos.h
+++ b/src/include/storage/pos.h
@@ -20,7 +20,7 @@
* been changed to just <offset> as the notion of having multiple pages
* within a block has been removed.
*
- * the 'offset' abstraction is somewhat confusing. it is NOT a byte
+ * the 'offset' abstraction is somewhat confusing. it is NOT a byte
* offset within the page; instead, it is an offset into the line
* pointer array contained on every page that store (heap or index)
* tuples.
diff --git a/src/include/storage/predicate_internals.h b/src/include/storage/predicate_internals.h
index 9652d00c2c..afbd782a21 100644
--- a/src/include/storage/predicate_internals.h
+++ b/src/include/storage/predicate_internals.h
@@ -128,7 +128,7 @@ typedef struct SERIALIZABLEXACT
* The following types are used to provide an ad hoc list for holding
* SERIALIZABLEXACT objects. An HTAB is overkill, since there is no need to
* access these by key -- there are direct pointers to these objects where
- * needed. If a shared memory list is created, these types can probably be
+ * needed. If a shared memory list is created, these types can probably be
* eliminated in favor of using the general solution.
*/
typedef struct PredXactListElementData
@@ -311,9 +311,9 @@ typedef struct PREDICATELOCKTAG
* The PREDICATELOCK struct represents an individual lock.
*
* An entry can be created here when the related database object is read, or
- * by promotion of multiple finer-grained targets. All entries related to a
+ * by promotion of multiple finer-grained targets. All entries related to a
* serializable transaction are removed when that serializable transaction is
- * cleaned up. Entries can also be removed when they are combined into a
+ * cleaned up. Entries can also be removed when they are combined into a
* single coarser-grained lock entry.
*/
typedef struct PREDICATELOCK
@@ -384,7 +384,7 @@ typedef struct PredicateLockData
/*
* These macros define how we map logical IDs of lockable objects into the
- * physical fields of PREDICATELOCKTARGETTAG. Use these to set up values,
+ * physical fields of PREDICATELOCKTARGETTAG. Use these to set up values,
* rather than accessing the fields directly. Note multiple eval of target!
*/
#define SET_PREDICATELOCKTARGETTAG_RELATION(locktag,dboid,reloid) \
@@ -450,7 +450,7 @@ typedef struct TwoPhasePredicateXactRecord
typedef struct TwoPhasePredicateLockRecord
{
PREDICATELOCKTARGETTAG target;
- uint32 filler; /* to avoid length change in back-patched fix */
+ uint32 filler; /* to avoid length change in back-patched fix */
} TwoPhasePredicateLockRecord;
typedef struct TwoPhasePredicateRecord
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index 5218b448cd..c23f4da5b6 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -21,7 +21,7 @@
/*
* Each backend advertises up to PGPROC_MAX_CACHED_SUBXIDS TransactionIds
- * for non-aborted subtransactions of its current top transaction. These
+ * for non-aborted subtransactions of its current top transaction. These
* have to be treated as running XIDs by other backends.
*
* We also keep track of whether the cache overflowed (ie, the transaction has
@@ -41,8 +41,9 @@ struct XidCache
#define PROC_IS_AUTOVACUUM 0x01 /* is it an autovac worker? */
#define PROC_IN_VACUUM 0x02 /* currently running lazy vacuum */
#define PROC_IN_ANALYZE 0x04 /* currently running analyze */
-#define PROC_VACUUM_FOR_WRAPAROUND 0x08 /* set by autovac only */
-#define PROC_IN_LOGICAL_DECODING 0x10 /* currently doing logical decoding */
+#define PROC_VACUUM_FOR_WRAPAROUND 0x08 /* set by autovac only */
+#define PROC_IN_LOGICAL_DECODING 0x10 /* currently doing logical
+ * decoding */
/* flags reset at EOXact */
#define PROC_VACUUM_STATE_MASK \
@@ -60,7 +61,7 @@ struct XidCache
* Each backend has a PGPROC struct in shared memory. There is also a list of
* currently-unused PGPROC structs that will be reallocated to new backends.
*
- * links: list link for any list the PGPROC is in. When waiting for a lock,
+ * links: list link for any list the PGPROC is in. When waiting for a lock,
* the PGPROC is linked into that lock's waitProcs queue. A recycled PGPROC
* is linked into ProcGlobal's freeProcs list.
*
@@ -132,7 +133,7 @@ struct PGPROC
struct XidCache subxids; /* cache for subtransaction XIDs */
- /* Per-backend LWLock. Protects fields below. */
+ /* Per-backend LWLock. Protects fields below. */
LWLock *backendLock; /* protects the fields below */
/* Lock manager data, recording fast-path locks taken by this backend. */
@@ -151,7 +152,7 @@ extern PGDLLIMPORT struct PGXACT *MyPgXact;
/*
* Prior to PostgreSQL 9.2, the fields below were stored as part of the
- * PGPROC. However, benchmarking revealed that packing these particular
+ * PGPROC. However, benchmarking revealed that packing these particular
* members into a separate array as tightly as possible sped up GetSnapshotData
* considerably on systems with many CPU cores, by reducing the number of
* cache lines needing to be fetched. Thus, think very carefully before adding
diff --git a/src/include/storage/procarray.h b/src/include/storage/procarray.h
index d0b4103a09..0c4611bda2 100644
--- a/src/include/storage/procarray.h
+++ b/src/include/storage/procarray.h
@@ -83,6 +83,6 @@ extern void ProcArraySetReplicationSlotXmin(TransactionId xmin,
TransactionId catalog_xmin, bool already_locked);
extern void ProcArrayGetReplicationSlotXmin(TransactionId *xmin,
- TransactionId *catalog_xmin);
+ TransactionId *catalog_xmin);
#endif /* PROCARRAY_H */
diff --git a/src/include/storage/relfilenode.h b/src/include/storage/relfilenode.h
index d5b772ca9f..d5809dd4a0 100644
--- a/src/include/storage/relfilenode.h
+++ b/src/include/storage/relfilenode.h
@@ -27,7 +27,7 @@
* spcNode identifies the tablespace of the relation. It corresponds to
* pg_tablespace.oid.
*
- * dbNode identifies the database of the relation. It is zero for
+ * dbNode identifies the database of the relation. It is zero for
* "shared" relations (those common to all databases of a cluster).
* Nonzero dbNode values correspond to pg_database.oid.
*
@@ -50,7 +50,7 @@
* is a "mapped" relation, whose current true filenode number is available
* from relmapper.c. Again, this case is NOT allowed in RelFileNodes.
*
- * Note: various places use RelFileNode in hashtable keys. Therefore,
+ * Note: various places use RelFileNode in hashtable keys. Therefore,
* there *must not* be any unused padding bytes in this struct. That
* should be safe as long as all the fields are of type Oid.
*/
@@ -63,7 +63,7 @@ typedef struct RelFileNode
/*
* Augmenting a relfilenode with the backend ID provides all the information
- * we need to locate the physical storage. The backend ID is InvalidBackendId
+ * we need to locate the physical storage. The backend ID is InvalidBackendId
* for regular relations (those accessible to more than one backend), or the
* owning backend's ID for backend-local relations. Backend-local relations
* are always transient and removed in case of a database crash; they are
diff --git a/src/include/storage/shm_mq.h b/src/include/storage/shm_mq.h
index c7dd90532b..5bae3807af 100644
--- a/src/include/storage/shm_mq.h
+++ b/src/include/storage/shm_mq.h
@@ -28,9 +28,9 @@ typedef struct shm_mq_handle shm_mq_handle;
/* Possible results of a send or receive operation. */
typedef enum
{
- SHM_MQ_SUCCESS, /* Sent or received a message. */
- SHM_MQ_WOULD_BLOCK, /* Not completed; retry later. */
- SHM_MQ_DETACHED /* Other process has detached queue. */
+ SHM_MQ_SUCCESS, /* Sent or received a message. */
+ SHM_MQ_WOULD_BLOCK, /* Not completed; retry later. */
+ SHM_MQ_DETACHED /* Other process has detached queue. */
} shm_mq_result;
/*
diff --git a/src/include/storage/shm_toc.h b/src/include/storage/shm_toc.h
index cb5477e685..6f0804aeef 100644
--- a/src/include/storage/shm_toc.h
+++ b/src/include/storage/shm_toc.h
@@ -40,8 +40,8 @@ extern void *shm_toc_lookup(shm_toc *toc, uint64 key);
*/
typedef struct
{
- Size space_for_chunks;
- Size number_of_keys;
+ Size space_for_chunks;
+ Size number_of_keys;
} shm_toc_estimator;
#define shm_toc_initialize_estimator(e) \
diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h
index d5bb850337..812ea95e9b 100644
--- a/src/include/storage/sinval.h
+++ b/src/include/storage/sinval.h
@@ -34,8 +34,8 @@
* updates and deletions in system catalogs (see CacheInvalidateHeapTuple).
* An update can generate two inval events, one for the old tuple and one for
* the new, but this is reduced to one event if the tuple's hash key doesn't
- * change. Note that the inval events themselves don't actually say whether
- * the tuple is being inserted or deleted. Also, since we transmit only a
+ * change. Note that the inval events themselves don't actually say whether
+ * the tuple is being inserted or deleted. Also, since we transmit only a
* hash key, there is a small risk of unnecessary invalidations due to chance
* matches of hash keys.
*
diff --git a/src/include/storage/sinvaladt.h b/src/include/storage/sinvaladt.h
index 9b45b3efef..72f532e4af 100644
--- a/src/include/storage/sinvaladt.h
+++ b/src/include/storage/sinvaladt.h
@@ -4,7 +4,7 @@
* POSTGRES shared cache invalidation data manager.
*
* The shared cache invalidation manager is responsible for transmitting
- * invalidation messages between backends. Any message sent by any backend
+ * invalidation messages between backends. Any message sent by any backend
* must be delivered to all already-running backends before it can be
* forgotten. (If we run out of space, we instead deliver a "RESET"
* message to backends that have fallen too far behind.)
diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h
index c7ab235ba4..ba7c909451 100644
--- a/src/include/storage/smgr.h
+++ b/src/include/storage/smgr.h
@@ -29,7 +29,7 @@
*
* An SMgrRelation may have an "owner", which is just a pointer to it from
* somewhere else; smgr.c will clear this pointer if the SMgrRelation is
- * closed. We use this to avoid dangling pointers from relcache to smgr
+ * closed. We use this to avoid dangling pointers from relcache to smgr
* without having to make the smgr explicitly aware of relcache. There
* can't be more than one "owner" pointer per SMgrRelation, but that's
* all we need.
@@ -48,7 +48,7 @@ typedef struct SMgrRelationData
/*
* These next three fields are not actually used or manipulated by smgr,
* except that they are reset to InvalidBlockNumber upon a cache flush
- * event (in particular, upon truncation of the relation). Higher levels
+ * event (in particular, upon truncation of the relation). Higher levels
* store cached state here so that it will be reset when truncation
* happens. In all three cases, InvalidBlockNumber means "unknown".
*/
@@ -60,7 +60,7 @@ typedef struct SMgrRelationData
/*
* Fields below here are intended to be private to smgr.c and its
- * submodules. Do not touch them from elsewhere.
+ * submodules. Do not touch them from elsewhere.
*/
int smgr_which; /* storage manager selector */
diff --git a/src/include/storage/spin.h b/src/include/storage/spin.h
index 7ee2fedf44..b5fd964c0f 100644
--- a/src/include/storage/spin.h
+++ b/src/include/storage/spin.h
@@ -72,11 +72,11 @@
extern int SpinlockSemas(void);
-extern Size SpinlockSemaSize(void);
+extern Size SpinlockSemaSize(void);
#ifndef HAVE_SPINLOCKS
-extern void SpinlockSemaInit(PGSemaphore);
-extern PGSemaphore SpinlockSemaArray;
+extern void SpinlockSemaInit(PGSemaphore);
+extern PGSemaphore SpinlockSemaArray;
#endif
#endif /* SPIN_H */
diff --git a/src/include/tcop/dest.h b/src/include/tcop/dest.h
index d7affce86d..d53a6c8c12 100644
--- a/src/include/tcop/dest.h
+++ b/src/include/tcop/dest.h
@@ -29,14 +29,14 @@
*
* CreateDestReceiver returns a receiver object appropriate to the specified
* destination. The executor, as well as utility statements that can return
- * tuples, are passed the resulting DestReceiver* pointer. Each executor run
+ * tuples, are passed the resulting DestReceiver* pointer. Each executor run
* or utility execution calls the receiver's rStartup method, then the
* receiveSlot method (zero or more times), then the rShutdown method.
* The same receiver object may be re-used multiple times; eventually it is
* destroyed by calling its rDestroy method.
*
* In some cases, receiver objects require additional parameters that must
- * be passed to them after calling CreateDestReceiver. Since the set of
+ * be passed to them after calling CreateDestReceiver. Since the set of
* parameters varies for different receiver types, this is not handled by
* this module, but by direct calls from the calling code to receiver type
* specific functions.
@@ -45,10 +45,10 @@
* allocated object (for destination types that require no local state),
* in which case rDestroy is a no-op. Alternatively it can be a palloc'd
* object that has DestReceiver as its first field and contains additional
- * fields (see printtup.c for an example). These additional fields are then
+ * fields (see printtup.c for an example). These additional fields are then
* accessible to the DestReceiver functions by casting the DestReceiver*
- * pointer passed to them. The palloc'd object is pfree'd by the rDestroy
- * method. Note that the caller of CreateDestReceiver should take care to
+ * pointer passed to them. The palloc'd object is pfree'd by the rDestroy
+ * method. Note that the caller of CreateDestReceiver should take care to
* do so in a memory context that is long-lived enough for the receiver
* object not to disappear while still needed.
*
@@ -79,7 +79,7 @@
* destination. Someday this will probably need to be improved.
*
* Note: only the values DestNone, DestDebug, DestRemote are legal for the
- * global variable whereToSendOutput. The other values may be used
+ * global variable whereToSendOutput. The other values may be used
* as the destination for individual commands.
* ----------------
*/
diff --git a/src/include/tcop/tcopdebug.h b/src/include/tcop/tcopdebug.h
index 940e996ad8..63e45667bb 100644
--- a/src/include/tcop/tcopdebug.h
+++ b/src/include/tcop/tcopdebug.h
@@ -24,7 +24,7 @@
/* ----------------
* TCOP_SHOWSTATS controls whether or not buffer and
- * access method statistics are shown for each query. -cim 2/9/89
+ * access method statistics are shown for each query. -cim 2/9/89
* ----------------
*/
#undef TCOP_SHOWSTATS
diff --git a/src/include/utils/acl.h b/src/include/utils/acl.h
index f27e2fb791..9430baa4a0 100644
--- a/src/include/utils/acl.h
+++ b/src/include/utils/acl.h
@@ -81,11 +81,11 @@ typedef struct AclItem
/*
* Definitions for convenient access to Acl (array of AclItem).
* These are standard PostgreSQL arrays, but are restricted to have one
- * dimension and no nulls. We also ignore the lower bound when reading,
+ * dimension and no nulls. We also ignore the lower bound when reading,
* and set it to one when writing.
*
* CAUTION: as of PostgreSQL 7.1, these arrays are toastable (just like all
- * other array types). Therefore, be careful to detoast them with the
+ * other array types). Therefore, be careful to detoast them with the
* macros provided, unless you know for certain that a particular array
* can't have been toasted.
*/
diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h
index 33b6dca191..bbb5d398a7 100644
--- a/src/include/utils/builtins.h
+++ b/src/include/utils/builtins.h
@@ -318,7 +318,7 @@ extern Datum btnamecmp(PG_FUNCTION_ARGS);
extern Datum bttextcmp(PG_FUNCTION_ARGS);
/*
- * Per-opclass sort support functions for new btrees. Like the
+ * Per-opclass sort support functions for new btrees. Like the
* functions above, these are stored in pg_amproc; most are defined in
* access/nbtree/nbtcompare.c
*/
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index dac1ac53ce..697516b81b 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -78,13 +78,13 @@ typedef struct catctup
/*
* Each tuple in a cache is a member of a dlist that stores the elements
- * of its hash bucket. We keep each dlist in LRU order to speed repeated
+ * of its hash bucket. We keep each dlist in LRU order to speed repeated
* lookups.
*/
dlist_node cache_elem; /* list member of per-bucket list */
/*
- * The tuple may also be a member of at most one CatCList. (If a single
+ * The tuple may also be a member of at most one CatCList. (If a single
* catcache is list-searched with varying numbers of keys, we may have to
* make multiple entries for the same tuple because of this restriction.
* Currently, that's not expected to be common, so we accept the potential
@@ -101,7 +101,7 @@ typedef struct catctup
*
* A negative cache entry is an assertion that there is no tuple matching
* a particular key. This is just as useful as a normal entry so far as
- * avoiding catalog searches is concerned. Management of positive and
+ * avoiding catalog searches is concerned. Management of positive and
* negative entries is identical.
*/
int refcount; /* number of active references */
@@ -120,7 +120,7 @@ typedef struct catclist
/*
* A CatCList describes the result of a partial search, ie, a search using
- * only the first K key columns of an N-key cache. We form the keys used
+ * only the first K key columns of an N-key cache. We form the keys used
* into a tuple (with other attributes NULL) to represent the stored key
* set. The CatCList object contains links to cache entries for all the
* table rows satisfying the partial key. (Note: none of these will be
diff --git a/src/include/utils/datetime.h b/src/include/utils/datetime.h
index fc3a1f611d..2e69503f96 100644
--- a/src/include/utils/datetime.h
+++ b/src/include/utils/datetime.h
@@ -261,7 +261,7 @@ extern const int day_tab[2][13];
/*
* Datetime input parsing routines (ParseDateTime, DecodeDateTime, etc)
- * return zero or a positive value on success. On failure, they return
+ * return zero or a positive value on success. On failure, they return
* one of these negative code values. DateTimeParseError may be used to
* produce a correct ereport.
*/
@@ -283,7 +283,7 @@ extern int ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
extern int DecodeDateTime(char **field, int *ftype,
int nf, int *dtype,
struct pg_tm * tm, fsec_t *fsec, int *tzp);
-extern int DecodeTimezone(char *str, int *tzp);
+extern int DecodeTimezone(char *str, int *tzp);
extern int DecodeTimeOnly(char **field, int *ftype,
int nf, int *dtype,
struct pg_tm * tm, fsec_t *fsec, int *tzp);
diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h
index 427d52d878..92073be0ca 100644
--- a/src/include/utils/elog.h
+++ b/src/include/utils/elog.h
@@ -89,13 +89,13 @@
* ... other errxxx() fields as needed ...));
*
* The error level is required, and so is a primary error message (errmsg
- * or errmsg_internal). All else is optional. errcode() defaults to
+ * or errmsg_internal). All else is optional. errcode() defaults to
* ERRCODE_INTERNAL_ERROR if elevel is ERROR or more, ERRCODE_WARNING
* if elevel is WARNING, or ERRCODE_SUCCESSFUL_COMPLETION if elevel is
* NOTICE or below.
*
* ereport_domain() allows a message domain to be specified, for modules that
- * wish to use a different message catalog from the backend's. To avoid having
+ * wish to use a different message catalog from the backend's. To avoid having
* one copy of the default text domain per .o file, we define it as NULL here
* and have errstart insert the default text domain. Modules can either use
* ereport_domain() directly, or preferably they can override the TEXTDOMAIN
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index be68f35d37..686a6a1d44 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -40,7 +40,7 @@
* configuration file, or by client request in the connection startup
* packet (e.g., from libpq's PGOPTIONS variable). Furthermore, an
* already-started backend will ignore changes to such an option in the
- * configuration file. The idea is that these options are fixed for a
+ * configuration file. The idea is that these options are fixed for a
* given backend once it's started, but they can vary across backends.
*
* SUSET options can be set at postmaster startup, with the SIGHUP
@@ -326,7 +326,7 @@ extern bool parse_real(const char *value, double *result);
extern int set_config_option(const char *name, const char *value,
GucContext context, GucSource source,
GucAction action, bool changeVal, int elevel);
-extern void AlterSystemSetConfigFile(AlterSystemStmt * setstmt);
+extern void AlterSystemSetConfigFile(AlterSystemStmt *setstmt);
extern char *GetConfigOptionByName(const char *name, const char **varname);
extern void GetConfigOptionByNum(int varnum, const char **values, bool *noshow);
extern int GetNumConfigOptions(void);
diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h
index 81b06d68af..77974a193b 100644
--- a/src/include/utils/hsearch.h
+++ b/src/include/utils/hsearch.h
@@ -30,7 +30,7 @@ typedef int (*HashCompareFunc) (const void *key1, const void *key2,
Size keysize);
/*
- * Key copying functions must have this signature. The return value is not
+ * Key copying functions must have this signature. The return value is not
* used. (The definition is set up to allow memcpy() and strncpy() to be
* used directly.)
*/
diff --git a/src/include/utils/inet.h b/src/include/utils/inet.h
index bd31c7169a..8905a307f8 100644
--- a/src/include/utils/inet.h
+++ b/src/include/utils/inet.h
@@ -40,7 +40,7 @@ typedef struct
/*
* Both INET and CIDR addresses are represented within Postgres as varlena
* objects, ie, there is a varlena header in front of the struct type
- * depicted above. This struct depicts what we actually have in memory
+ * depicted above. This struct depicts what we actually have in memory
* in "uncompressed" cases. Note that since the maximum data size is only
* 18 bytes, INET/CIDR will invariably be stored into tuples using the
* 1-byte-header varlena format. However, we have to be prepared to cope
@@ -54,7 +54,7 @@ typedef struct
} inet;
/*
- * Access macros. We use VARDATA_ANY so that we can process short-header
+ * Access macros. We use VARDATA_ANY so that we can process short-header
* varlena values without detoasting them. This requires a trick:
* VARDATA_ANY assumes the varlena header is already filled in, which is
* not the case when constructing a new value (until SET_INET_VARSIZE is
diff --git a/src/include/utils/jsonapi.h b/src/include/utils/jsonapi.h
index e4a2bd565d..889364fb30 100644
--- a/src/include/utils/jsonapi.h
+++ b/src/include/utils/jsonapi.h
@@ -110,7 +110,7 @@ extern void pg_parse_json(JsonLexContext *lex, JsonSemAction *sem);
*/
extern JsonLexContext *makeJsonLexContext(text *json, bool need_escapes);
extern JsonLexContext *makeJsonLexContextCstringLen(char *json,
- int len,
- bool need_escapes);
+ int len,
+ bool need_escapes);
#endif /* JSONAPI_H */
diff --git a/src/include/utils/jsonb.h b/src/include/utils/jsonb.h
index 00a6d4f9e0..dea64ad780 100644
--- a/src/include/utils/jsonb.h
+++ b/src/include/utils/jsonb.h
@@ -29,14 +29,14 @@
/* Get information on varlena Jsonb */
#define JB_ROOT_COUNT(jbp_) ( *(uint32*) VARDATA(jbp_) & JB_CMASK)
-#define JB_ROOT_IS_SCALAR(jbp_) ( *(uint32*) VARDATA(jbp_) & JB_FSCALAR)
-#define JB_ROOT_IS_OBJECT(jbp_) ( *(uint32*) VARDATA(jbp_) & JB_FOBJECT)
+#define JB_ROOT_IS_SCALAR(jbp_) ( *(uint32*) VARDATA(jbp_) & JB_FSCALAR)
+#define JB_ROOT_IS_OBJECT(jbp_) ( *(uint32*) VARDATA(jbp_) & JB_FOBJECT)
#define JB_ROOT_IS_ARRAY(jbp_) ( *(uint32*) VARDATA(jbp_) & JB_FARRAY)
/* Jentry macros */
#define JENTRY_POSMASK 0x0FFFFFFF
#define JENTRY_ISFIRST 0x80000000
-#define JENTRY_TYPEMASK (~(JENTRY_POSMASK | JENTRY_ISFIRST))
+#define JENTRY_TYPEMASK (~(JENTRY_POSMASK | JENTRY_ISFIRST))
#define JENTRY_ISSTRING 0x00000000
#define JENTRY_ISNUMERIC 0x10000000
#define JENTRY_ISNEST 0x20000000
@@ -55,9 +55,9 @@
#define JBE_ISBOOL_FALSE(je_) (JBE_ISBOOL(je_) && !JBE_ISBOOL_TRUE(je_))
/* Get offset for Jentry */
-#define JBE_ENDPOS(je_) ((je_).header & JENTRY_POSMASK)
-#define JBE_OFF(je_) (JBE_ISFIRST(je_) ? 0 : JBE_ENDPOS((&(je_))[-1]))
-#define JBE_LEN(je_) (JBE_ISFIRST(je_) ? \
+#define JBE_ENDPOS(je_) ((je_).header & JENTRY_POSMASK)
+#define JBE_OFF(je_) (JBE_ISFIRST(je_) ? 0 : JBE_ENDPOS((&(je_))[-1]))
+#define JBE_LEN(je_) (JBE_ISFIRST(je_) ? \
JBE_ENDPOS(je_) \
: JBE_ENDPOS(je_) - JBE_ENDPOS((&(je_))[-1]))
@@ -98,7 +98,7 @@
typedef struct JsonbPair JsonbPair;
typedef struct JsonbValue JsonbValue;
-typedef char* JsonbSuperHeader;
+typedef char *JsonbSuperHeader;
/*
* Jsonbs are varlena objects, so must meet the varlena convention that the
@@ -128,19 +128,19 @@ typedef struct
* have one per element.
*
* The position offset points to the _end_ so that we can get the length by
- * subtraction from the previous entry. The JENTRY_ISFIRST flag indicates if
+ * subtraction from the previous entry. The JENTRY_ISFIRST flag indicates if
* there is a previous entry.
*/
typedef struct
{
- uint32 header; /* Shares some flags with superheader */
-} JEntry;
+ uint32 header; /* Shares some flags with superheader */
+} JEntry;
#define IsAJsonbScalar(jsonbval) ((jsonbval)->type >= jbvNull && \
(jsonbval)->type <= jbvBool)
/*
- * JsonbValue: In-memory representation of Jsonb. This is a convenient
+ * JsonbValue: In-memory representation of Jsonb. This is a convenient
* deserialized representation, that can easily support using the "val"
* union across underlying types during manipulation. The Jsonb on-disk
* representation has various alignment considerations.
@@ -159,40 +159,39 @@ struct JsonbValue
jbvObject,
/* Binary (i.e. struct Jsonb) jbvArray/jbvObject */
jbvBinary
- } type; /* Influences sort order */
+ } type; /* Influences sort order */
- int estSize; /* Estimated size of node (including
- * subnodes) */
+ int estSize; /* Estimated size of node (including subnodes) */
union
{
- Numeric numeric;
+ Numeric numeric;
bool boolean;
struct
{
int len;
- char *val; /* Not necessarily null-terminated */
- } string; /* String primitive type */
+ char *val; /* Not necessarily null-terminated */
+ } string; /* String primitive type */
struct
{
int nElems;
JsonbValue *elems;
- bool rawScalar; /* Top-level "raw scalar" array? */
- } array; /* Array container type */
+ bool rawScalar; /* Top-level "raw scalar" array? */
+ } array; /* Array container type */
struct
{
- int nPairs; /* 1 pair, 2 elements */
+ int nPairs; /* 1 pair, 2 elements */
JsonbPair *pairs;
- } object; /* Associative container type */
+ } object; /* Associative container type */
struct
{
int len;
char *data;
- } binary;
- } val;
+ } binary;
+ } val;
};
/*
@@ -236,11 +235,11 @@ typedef struct JsonbIterator
char *buffer;
/* Current value */
- uint32 containerType; /* Never of value JB_FSCALAR, since
- * scalars will appear in pseudo-arrays */
- uint32 nElems; /* Number of elements in metaArray
- * (will be nPairs for objects) */
- bool isScalar; /* Pseudo-array scalar value? */
+ uint32 containerType; /* Never of value JB_FSCALAR, since scalars
+ * will appear in pseudo-arrays */
+ uint32 nElems; /* Number of elements in metaArray (will be
+ * nPairs for objects) */
+ bool isScalar; /* Pseudo-array scalar value? */
JEntry *meta;
/* Current item in buffer (up to nElems, but must * 2 for objects) */
@@ -287,6 +286,7 @@ extern Datum gin_extract_jsonb(PG_FUNCTION_ARGS);
extern Datum gin_extract_jsonb_query(PG_FUNCTION_ARGS);
extern Datum gin_consistent_jsonb(PG_FUNCTION_ARGS);
extern Datum gin_triconsistent_jsonb(PG_FUNCTION_ARGS);
+
/* GIN hash opclass functions */
extern Datum gin_extract_jsonb_hash(PG_FUNCTION_ARGS);
extern Datum gin_extract_jsonb_query_hash(PG_FUNCTION_ARGS);
@@ -294,27 +294,27 @@ extern Datum gin_consistent_jsonb_hash(PG_FUNCTION_ARGS);
extern Datum gin_triconsistent_jsonb_hash(PG_FUNCTION_ARGS);
/* Support functions */
-extern int compareJsonbSuperHeaderValue(JsonbSuperHeader a,
- JsonbSuperHeader b);
+extern int compareJsonbSuperHeaderValue(JsonbSuperHeader a,
+ JsonbSuperHeader b);
extern JsonbValue *findJsonbValueFromSuperHeader(JsonbSuperHeader sheader,
- uint32 flags,
- uint32 *lowbound,
- JsonbValue *key);
+ uint32 flags,
+ uint32 *lowbound,
+ JsonbValue *key);
extern JsonbValue *getIthJsonbValueFromSuperHeader(JsonbSuperHeader sheader,
- uint32 i);
-extern JsonbValue *pushJsonbValue(JsonbParseState ** pstate, int seq,
- JsonbValue *scalarVal);
+ uint32 i);
+extern JsonbValue *pushJsonbValue(JsonbParseState **pstate, int seq,
+ JsonbValue *scalarVal);
extern JsonbIterator *JsonbIteratorInit(JsonbSuperHeader buffer);
extern int JsonbIteratorNext(JsonbIterator **it, JsonbValue *val,
- bool skipNested);
+ bool skipNested);
extern Jsonb *JsonbValueToJsonb(JsonbValue *val);
-extern bool JsonbDeepContains(JsonbIterator ** val,
- JsonbIterator ** mContained);
+extern bool JsonbDeepContains(JsonbIterator **val,
+ JsonbIterator **mContained);
extern JsonbValue *arrayToJsonbSortedArray(ArrayType *a);
-extern void JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash);
+extern void JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash);
/* jsonb.c support function */
extern char *JsonbToCString(StringInfo out, JsonbSuperHeader in,
- int estimated_len);
+ int estimated_len);
#endif /* __JSONB_H__ */
diff --git a/src/include/utils/memutils.h b/src/include/utils/memutils.h
index 16b250b1f6..59d0aecfbb 100644
--- a/src/include/utils/memutils.h
+++ b/src/include/utils/memutils.h
@@ -49,8 +49,8 @@
* All chunks allocated by any memory context manager are required to be
* preceded by a StandardChunkHeader at a spacing of STANDARDCHUNKHEADERSIZE.
* A currently-allocated chunk must contain a backpointer to its owning
- * context as well as the allocated size of the chunk. The backpointer is
- * used by pfree() and repalloc() to find the context to call. The allocated
+ * context as well as the allocated size of the chunk. The backpointer is
+ * used by pfree() and repalloc() to find the context to call. The allocated
* size is not absolutely essential, but it's expected to be needed by any
* reasonable implementation.
*/
diff --git a/src/include/utils/palloc.h b/src/include/utils/palloc.h
index d99be84e2d..999bfbe75f 100644
--- a/src/include/utils/palloc.h
+++ b/src/include/utils/palloc.h
@@ -6,9 +6,9 @@
* This file contains the basic memory allocation interface that is
* needed by almost every backend module. It is included directly by
* postgres.h, so the definitions here are automatically available
- * everywhere. Keep it lean!
+ * everywhere. Keep it lean!
*
- * Memory allocation occurs within "contexts". Every chunk obtained from
+ * Memory allocation occurs within "contexts". Every chunk obtained from
* palloc()/MemoryContextAlloc() is allocated within a specific context.
* The entire contents of a context can be freed easily and quickly by
* resetting or deleting the context --- this is both faster and less
@@ -29,7 +29,7 @@
#define PALLOC_H
/*
- * Type MemoryContextData is declared in nodes/memnodes.h. Most users
+ * Type MemoryContextData is declared in nodes/memnodes.h. Most users
* of memory allocation should just treat it as an abstract type, so we
* do not provide the struct contents here.
*/
@@ -107,9 +107,11 @@ extern char *pstrdup(const char *in);
extern char *pnstrdup(const char *in, Size len);
/* sprintf into a palloc'd buffer --- these are in psprintf.c */
-extern char *psprintf(const char *fmt,...)
+extern char *
+psprintf(const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
-extern size_t pvsnprintf(char *buf, size_t len, const char *fmt, va_list args)
+extern size_t
+pvsnprintf(char *buf, size_t len, const char *fmt, va_list args)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 0)));
#endif /* PALLOC_H */
diff --git a/src/include/utils/pg_crc.h b/src/include/utils/pg_crc.h
index b37e94eba2..375c405da5 100644
--- a/src/include/utils/pg_crc.h
+++ b/src/include/utils/pg_crc.h
@@ -72,7 +72,7 @@ extern CRCDLLIMPORT const uint32 pg_crc32_table[];
/*
* crc0 represents the LSBs of the 64-bit value, crc1 the MSBs. Note that
* with crc0 placed first, the output of 32-bit and 64-bit implementations
- * will be bit-compatible only on little-endian architectures. If it were
+ * will be bit-compatible only on little-endian architectures. If it were
* important to make the two possible implementations bit-compatible on
* all machines, we could do a configure test to decide how to order the
* two fields, but it seems not worth the trouble.
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index b8ca643293..cfbfaa26cc 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -30,7 +30,7 @@
* the analyzed-and-rewritten query tree, and rebuild it when next needed.
*
* An actual execution plan, represented by CachedPlan, is derived from the
- * CachedPlanSource when we need to execute the query. The plan could be
+ * CachedPlanSource when we need to execute the query. The plan could be
* either generic (usable with any set of plan parameters) or custom (for a
* specific set of parameters). plancache.c contains the logic that decides
* which way to do it for any particular execution. If we are using a generic
@@ -61,15 +61,15 @@
* allows the query tree to be discarded easily when it is invalidated.
*
* Some callers wish to use the CachedPlan API even with one-shot queries
- * that have no reason to be saved at all. We therefore support a "oneshot"
- * variant that does no data copying or invalidation checking. In this case
+ * that have no reason to be saved at all. We therefore support a "oneshot"
+ * variant that does no data copying or invalidation checking. In this case
* there are no separate memory contexts: the CachedPlanSource struct and
* all subsidiary data live in the caller's CurrentMemoryContext, and there
- * is no way to free memory short of clearing that entire context. A oneshot
+ * is no way to free memory short of clearing that entire context. A oneshot
* plan is always treated as unsaved.
*
* Note: the string referenced by commandTag is not subsidiary storage;
- * it is assumed to be a compile-time-constant string. As with portals,
+ * it is assumed to be a compile-time-constant string. As with portals,
* commandTag shall be NULL if and only if the original query string (before
* rewriting) was an empty string.
*/
@@ -114,7 +114,7 @@ typedef struct CachedPlanSource
* CachedPlan represents an execution plan derived from a CachedPlanSource.
* The reference count includes both the link from the parent CachedPlanSource
* (if any), and any active plan executions, so the plan can be discarded
- * exactly when refcount goes to zero. Both the struct itself and the
+ * exactly when refcount goes to zero. Both the struct itself and the
* subsidiary data live in the context denoted by the context field.
* This makes it easy to free a no-longer-needed cached plan. (However,
* if is_oneshot is true, the context does not belong solely to the CachedPlan
diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h
index 0506bdee15..0b15dd28a1 100644
--- a/src/include/utils/portal.h
+++ b/src/include/utils/portal.h
@@ -58,8 +58,8 @@
* single result from the user's viewpoint. However, the rule rewriter
* may expand the single source query to zero or many actual queries.)
*
- * PORTAL_ONE_SELECT: the portal contains one single SELECT query. We run
- * the Executor incrementally as results are demanded. This strategy also
+ * PORTAL_ONE_SELECT: the portal contains one single SELECT query. We run
+ * the Executor incrementally as results are demanded. This strategy also
* supports holdable cursors (the Executor results can be dumped into a
* tuplestore for access after transaction completion).
*
@@ -73,7 +73,7 @@
* all the auxiliary queries.)
*
* PORTAL_ONE_MOD_WITH: the portal contains one single SELECT query, but
- * it has data-modifying CTEs. This is currently treated the same as the
+ * it has data-modifying CTEs. This is currently treated the same as the
* PORTAL_ONE_RETURNING case because of the possibility of needing to fire
* triggers. It may act more like PORTAL_ONE_SELECT in future.
*
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index c87dadc0eb..4d73700185 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -112,11 +112,11 @@ typedef struct RelationData
TriggerDesc *trigdesc; /* Trigger info, or NULL if rel has none */
/*
- * The index chosen as the relation's replication identity or
- * InvalidOid. Only set correctly if RelationGetIndexList has been
+ * The index chosen as the relation's replication identity or InvalidOid.
+ * Only set correctly if RelationGetIndexList has been
* called/rd_indexvalid > 0.
*/
- Oid rd_replidindex;
+ Oid rd_replidindex;
/*
* rd_options is set whenever rd_rel is loaded into the relcache entry.
@@ -142,7 +142,7 @@ typedef struct RelationData
* Note: rd_amcache is available for index AMs to cache private data about
* an index. This must be just a cache since it may get reset at any time
* (in particular, it will get reset by a relcache inval message for the
- * index). If used, it must point to a single memory chunk palloc'd in
+ * index). If used, it must point to a single memory chunk palloc'd in
* rd_indexcxt. A relcache reset will include freeing that chunk and
* setting rd_amcache = NULL.
*/
@@ -165,7 +165,7 @@ typedef struct RelationData
* foreign-table support
*
* rd_fdwroutine must point to a single memory chunk palloc'd in
- * CacheMemoryContext. It will be freed and reset to NULL on a relcache
+ * CacheMemoryContext. It will be freed and reset to NULL on a relcache
* reset.
*/
@@ -220,7 +220,8 @@ typedef struct StdRdOptions
AutoVacOpts autovacuum; /* autovacuum-related options */
bool security_barrier; /* for views */
int check_option_offset; /* for views */
- bool user_catalog_table; /* use as an additional catalog relation */
+ bool user_catalog_table; /* use as an additional catalog
+ * relation */
} StdRdOptions;
#define HEAP_MIN_FILLFACTOR 10
@@ -274,7 +275,7 @@ typedef struct StdRdOptions
((relation)->rd_options && \
((StdRdOptions *) (relation)->rd_options)->check_option_offset != 0 ? \
strcmp((char *) (relation)->rd_options + \
- ((StdRdOptions *) (relation)->rd_options)->check_option_offset, \
+ ((StdRdOptions *) (relation)->rd_options)->check_option_offset, \
"local") == 0 : false)
/*
@@ -286,13 +287,13 @@ typedef struct StdRdOptions
((relation)->rd_options && \
((StdRdOptions *) (relation)->rd_options)->check_option_offset != 0 ? \
strcmp((char *) (relation)->rd_options + \
- ((StdRdOptions *) (relation)->rd_options)->check_option_offset, \
+ ((StdRdOptions *) (relation)->rd_options)->check_option_offset, \
"cascaded") == 0 : false)
/*
* RelationIsUsedAsCatalogTable
* Returns whether the relation should be treated as a catalog table
- * from the pov of logical decoding.
+ * from the pov of logical decoding.
*/
#define RelationIsUsedAsCatalogTable(relation) \
((relation)->rd_options ? \
@@ -398,7 +399,7 @@ typedef struct StdRdOptions
* RelationGetTargetBlock
* Fetch relation's current insertion target block.
*
- * Returns InvalidBlockNumber if there is no current target block. Note
+ * Returns InvalidBlockNumber if there is no current target block. Note
* that the target block status is discarded on any smgr-level invalidation.
*/
#define RelationGetTargetBlock(relation) \
diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h
index 31f4878f99..3e1c1385a4 100644
--- a/src/include/utils/relcache.h
+++ b/src/include/utils/relcache.h
@@ -23,7 +23,7 @@ typedef struct RelationData *Relation;
/* ----------------
* RelationPtr is used in the executor to support index scans
* where we have to keep track of several index relations in an
- * array. -cim 9/10/89
+ * array. -cim 9/10/89
* ----------------
*/
typedef Relation *RelationPtr;
@@ -50,7 +50,7 @@ typedef enum IndexAttrBitmapKind
} IndexAttrBitmapKind;
extern Bitmapset *RelationGetIndexAttrBitmap(Relation relation,
- IndexAttrBitmapKind keyAttrs);
+ IndexAttrBitmapKind keyAttrs);
extern void RelationGetExclusionInfo(Relation indexRelation,
Oid **operators,
diff --git a/src/include/utils/relfilenodemap.h b/src/include/utils/relfilenodemap.h
index c20c60437a..a98791d8a3 100644
--- a/src/include/utils/relfilenodemap.h
+++ b/src/include/utils/relfilenodemap.h
@@ -13,6 +13,6 @@
#ifndef RELFILENODEMAP_H
#define RELFILENODEMAP_H
-extern Oid RelidByRelfilenode(Oid reltablespace, Oid relfilenode);
+extern Oid RelidByRelfilenode(Oid reltablespace, Oid relfilenode);
#endif /* RELFILENODEMAP_H */
diff --git a/src/include/utils/resowner.h b/src/include/utils/resowner.h
index c6f21c9d5c..e448e911a6 100644
--- a/src/include/utils/resowner.h
+++ b/src/include/utils/resowner.h
@@ -36,7 +36,7 @@ extern PGDLLIMPORT ResourceOwner TopTransactionResourceOwner;
/*
* Resource releasing is done in three phases: pre-locks, locks, and
- * post-locks. The pre-lock phase must release any resources that are
+ * post-locks. The pre-lock phase must release any resources that are
* visible to other backends (such as pinned buffers); this ensures that
* when we release a lock that another backend may be waiting on, it will
* see us as being fully out of our transaction. The post-lock phase
diff --git a/src/include/utils/resowner_private.h b/src/include/utils/resowner_private.h
index 6015d74f14..b8fd1a9221 100644
--- a/src/include/utils/resowner_private.h
+++ b/src/include/utils/resowner_private.h
@@ -84,8 +84,8 @@ extern void ResourceOwnerForgetFile(ResourceOwner owner,
/* support for dynamic shared memory management */
extern void ResourceOwnerEnlargeDSMs(ResourceOwner owner);
extern void ResourceOwnerRememberDSM(ResourceOwner owner,
- dsm_segment *);
+ dsm_segment *);
extern void ResourceOwnerForgetDSM(ResourceOwner owner,
- dsm_segment *);
+ dsm_segment *);
#endif /* RESOWNER_PRIVATE_H */
diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h
index fb740313d0..0f662ec8bb 100644
--- a/src/include/utils/selfuncs.h
+++ b/src/include/utils/selfuncs.h
@@ -23,7 +23,7 @@
/*
* Note: the default selectivity estimates are not chosen entirely at random.
* We want them to be small enough to ensure that indexscans will be used if
- * available, for typical table densities of ~100 tuples/page. Thus, for
+ * available, for typical table densities of ~100 tuples/page. Thus, for
* example, 0.01 is not quite small enough, since that makes it appear that
* nearly all pages will be hit anyway. Also, since we sometimes estimate
* eqsel as 1/num_distinct, we probably want DEFAULT_NUM_DISTINCT to equal
diff --git a/src/include/utils/snapshot.h b/src/include/utils/snapshot.h
index 8ee9285c55..d8e8b351ed 100644
--- a/src/include/utils/snapshot.h
+++ b/src/include/utils/snapshot.h
@@ -28,7 +28,7 @@ typedef struct SnapshotData *Snapshot;
* function.
*/
typedef bool (*SnapshotSatisfiesFunc) (HeapTuple htup,
- Snapshot snapshot, Buffer buffer);
+ Snapshot snapshot, Buffer buffer);
/*
* Struct representing all kind of possible snapshots.
@@ -39,7 +39,7 @@ typedef bool (*SnapshotSatisfiesFunc) (HeapTuple htup,
* * Historic MVCC snapshots used during logical decoding
* * snapshots passed to HeapTupleSatisfiesDirty()
* * snapshots used for SatisfiesAny, Toast, Self where no members are
- * accessed.
+ * accessed.
*
* TODO: It's probably a good idea to split this struct using a NodeTag
* similar to how parser and executor nodes are handled, with one type for
@@ -62,16 +62,18 @@ typedef struct SnapshotData
*/
TransactionId xmin; /* all XID < xmin are visible to me */
TransactionId xmax; /* all XID >= xmax are invisible to me */
+
/*
* For normal MVCC snapshot this contains the all xact IDs that are in
* progress, unless the snapshot was taken during recovery in which case
- * it's empty. For historic MVCC snapshots, the meaning is inverted,
- * i.e. it contains *committed* transactions between xmin and xmax.
+ * it's empty. For historic MVCC snapshots, the meaning is inverted, i.e.
+ * it contains *committed* transactions between xmin and xmax.
*/
TransactionId *xip;
uint32 xcnt; /* # of xact ids in xip[] */
/* note: all ids in xip[] satisfy xmin <= xip[i] < xmax */
int32 subxcnt; /* # of xact ids in subxip[] */
+
/*
* For non-historic MVCC snapshots, this contains subxact IDs that are in
* progress (and other transactions that are in progress if taken during
diff --git a/src/include/utils/sortsupport.h b/src/include/utils/sortsupport.h
index 13d3fbee71..8b6b0de2e8 100644
--- a/src/include/utils/sortsupport.h
+++ b/src/include/utils/sortsupport.h
@@ -33,7 +33,7 @@
*
* Note: since pg_amproc functions are indexed by (lefttype, righttype)
* it is possible to associate a BTSORTSUPPORT function with a cross-type
- * comparison. This could sensibly be used to provide a fast comparator
+ * comparison. This could sensibly be used to provide a fast comparator
* function for such cases, but probably not any other acceleration method.
*
*
diff --git a/src/include/utils/tqual.h b/src/include/utils/tqual.h
index 48abe62983..ae285c3ed5 100644
--- a/src/include/utils/tqual.h
+++ b/src/include/utils/tqual.h
@@ -3,7 +3,7 @@
* tqual.h
* POSTGRES "time qualification" definitions, ie, tuple visibility rules.
*
- * Should be moved/renamed... - vadim 07/28/98
+ * Should be moved/renamed... - vadim 07/28/98
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@@ -76,7 +76,7 @@ extern bool HeapTupleSatisfiesToast(HeapTuple htup,
extern bool HeapTupleSatisfiesDirty(HeapTuple htup,
Snapshot snapshot, Buffer buffer);
extern bool HeapTupleSatisfiesHistoricMVCC(HeapTuple htup,
- Snapshot snapshot, Buffer buffer);
+ Snapshot snapshot, Buffer buffer);
/* Special "satisfies" routines with different APIs */
extern HTSU_Result HeapTupleSatisfiesUpdate(HeapTuple htup,
@@ -95,8 +95,8 @@ extern bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple);
* details this is implemented in reorderbuffer.c not tqual.c.
*/
extern bool ResolveCminCmaxDuringDecoding(struct HTAB *tuplecid_data,
- Snapshot snapshot,
- HeapTuple htup,
- Buffer buffer,
- CommandId *cmin, CommandId *cmax);
+ Snapshot snapshot,
+ HeapTuple htup,
+ Buffer buffer,
+ CommandId *cmin, CommandId *cmax);
#endif /* TQUAL_H */
diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h
index 05445f049c..7d828e064b 100644
--- a/src/include/utils/tuplesort.h
+++ b/src/include/utils/tuplesort.h
@@ -6,7 +6,7 @@
* This module handles sorting of heap tuples, index tuples, or single
* Datums (and could easily support other kinds of sortable objects,
* if necessary). It works efficiently for both small and large amounts
- * of data. Small amounts are sorted in-memory using qsort(). Large
+ * of data. Small amounts are sorted in-memory using qsort(). Large
* amounts are sorted using temporary files and a standard external sort
* algorithm.
*
@@ -41,9 +41,9 @@ typedef struct Tuplesortstate Tuplesortstate;
* The "heap" API actually stores/sorts MinimalTuples, which means it doesn't
* preserve the system columns (tuple identity and transaction visibility
* info). The sort keys are specified by column numbers within the tuples
- * and sort operator OIDs. We save some cycles by passing and returning the
+ * and sort operator OIDs. We save some cycles by passing and returning the
* tuples in TupleTableSlots, rather than forming actual HeapTuples (which'd
- * have to be converted to MinimalTuples). This API works well for sorts
+ * have to be converted to MinimalTuples). This API works well for sorts
* executed as parts of plan trees.
*
* The "cluster" API stores/sorts full HeapTuples including all visibility
@@ -52,7 +52,7 @@ typedef struct Tuplesortstate Tuplesortstate;
* go with this API, not the "begin_heap" one!
*
* The "index_btree" API stores/sorts IndexTuples (preserving all their
- * header fields). The sort keys are specified by a btree index definition.
+ * header fields). The sort keys are specified by a btree index definition.
*
* The "index_hash" API is similar to index_btree, but the tuples are
* actually sorted by their hash codes not the raw data.
diff --git a/src/include/utils/tuplestore.h b/src/include/utils/tuplestore.h
index 16eca871cd..e4adc93689 100644
--- a/src/include/utils/tuplestore.h
+++ b/src/include/utils/tuplestore.h
@@ -8,7 +8,7 @@
* a dumbed-down version of tuplesort.c; it does no sorting of tuples
* but can only store and regurgitate a sequence of tuples. However,
* because no sort is required, it is allowed to start reading the sequence
- * before it has all been written. This is particularly useful for cursors,
+ * before it has all been written. This is particularly useful for cursors,
* because it allows random access within the already-scanned portion of
* a query without having to process the underlying scan to completion.
* Also, it is possible to support multiple independent read pointers.
diff --git a/src/include/utils/typcache.h b/src/include/utils/typcache.h
index b47a570704..ae1fc9c632 100644
--- a/src/include/utils/typcache.h
+++ b/src/include/utils/typcache.h
@@ -56,7 +56,7 @@ typedef struct TypeCacheEntry
/*
* Pre-set-up fmgr call info for the equality operator, the btree
- * comparison function, and the hash calculation function. These are kept
+ * comparison function, and the hash calculation function. These are kept
* in the type cache to avoid problems with memory leaks in repeated calls
* to functions such as array_eq, array_cmp, hash_array. There is not
* currently a need to maintain call info for the lt_opr or gt_opr.
@@ -73,7 +73,7 @@ typedef struct TypeCacheEntry
TupleDesc tupDesc;
/*
- * Fields computed when TYPECACHE_RANGE_INFO is requested. Zeroes if not
+ * Fields computed when TYPECACHE_RANGE_INFO is requested. Zeroes if not
* a range type or information hasn't yet been requested. Note that
* rng_cmp_proc_finfo could be different from the element type's default
* btree comparison function.
@@ -88,7 +88,7 @@ typedef struct TypeCacheEntry
int flags; /* flags about what we've computed */
/*
- * Private information about an enum type. NULL if not enum or
+ * Private information about an enum type. NULL if not enum or
* information hasn't been requested.
*/
struct TypeCacheEnumData *enumData;
diff --git a/src/interfaces/ecpg/ecpglib/data.c b/src/interfaces/ecpg/ecpglib/data.c
index 3ec774ca6d..28b1be758a 100644
--- a/src/interfaces/ecpg/ecpglib/data.c
+++ b/src/interfaces/ecpg/ecpglib/data.c
@@ -461,7 +461,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
* character pointers. So, use extra indirection.
*/
if (varcharsize == 0 && offset == sizeof(char *))
- str = *(char **)str;
+ str = *(char **) str;
if (varcharsize == 0 || varcharsize > size)
{
diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c
index 5ec8958b27..a4c7151f9a 100644
--- a/src/interfaces/ecpg/ecpglib/execute.c
+++ b/src/interfaces/ecpg/ecpglib/execute.c
@@ -855,7 +855,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
for (element = 0; element < var->arrsize; element++)
{
- int result;
+ int result;
nval = PGTYPESnumeric_new();
if (!nval)
@@ -890,7 +890,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
}
else
{
- int result;
+ int result;
nval = PGTYPESnumeric_new();
if (!nval)
@@ -1345,8 +1345,8 @@ ecpg_build_params(struct statement * stmt)
}
/*
- * now tobeinserted points to an area that contains the next parameter;
- * now find the position in the string where it belongs
+ * now tobeinserted points to an area that contains the next
+ * parameter; now find the position in the string where it belongs
*/
if ((position = next_insert(stmt->command, position, stmt->questionmarks) + 1) == 0)
{
@@ -1512,9 +1512,9 @@ ecpg_execute(struct statement * stmt)
*
* Parameters
* stmt statement structure holding the PGresult and
- * the list of output variables
+ * the list of output variables
* clear_result
- * PQclear() the result upon returning from this function
+ * PQclear() the result upon returning from this function
*
* Returns success as boolean. Also an SQL error is raised in case of failure.
*-------
@@ -1930,14 +1930,14 @@ ecpg_do_prologue(int lineno, const int compat, const int force_indicator,
var->arrsize = va_arg(args, long);
var->offset = va_arg(args, long);
- /*
- * Unknown array size means pointer to an array.
- * Unknown varcharsize usually also means pointer. But if the
- * type is character and the array size is known, it is an
- * array of pointers to char, so use var->pointer as it is.
+ /*
+ * Unknown array size means pointer to an array. Unknown
+ * varcharsize usually also means pointer. But if the type is
+ * character and the array size is known, it is an array of
+ * pointers to char, so use var->pointer as it is.
*/
if (var->arrsize == 0 ||
- (var->varcharsize == 0 && ((var->type != ECPGt_char && var->type != ECPGt_unsigned_char) || (var->arrsize <= 1))))
+ (var->varcharsize == 0 && ((var->type != ECPGt_char && var->type != ECPGt_unsigned_char) || (var->arrsize <= 1))))
var->value = *((char **) (var->pointer));
else
var->value = var->pointer;
diff --git a/src/interfaces/ecpg/ecpglib/extern.h b/src/interfaces/ecpg/ecpglib/extern.h
index 1f96869972..38360072ed 100644
--- a/src/interfaces/ecpg/ecpglib/extern.h
+++ b/src/interfaces/ecpg/ecpglib/extern.h
@@ -61,7 +61,7 @@ struct statement
struct variable *inlist;
struct variable *outlist;
char *oldlocale;
- int nparams;
+ int nparams;
char **paramvalues;
PGresult *results;
};
@@ -168,17 +168,17 @@ struct prepared_statement *ecpg_find_prepared_statement(const char *,
bool ecpg_store_result(const PGresult *results, int act_field,
const struct statement * stmt, struct variable * var);
bool ecpg_store_input(const int, const bool, const struct variable *, char **, bool);
-void ecpg_free_params(struct statement *stmt, bool print);
-bool ecpg_do_prologue(int, const int, const int, const char *, const bool,
- enum ECPG_statement_type, const char *, va_list,
- struct statement **);
+void ecpg_free_params(struct statement * stmt, bool print);
+bool ecpg_do_prologue(int, const int, const int, const char *, const bool,
+ enum ECPG_statement_type, const char *, va_list,
+ struct statement **);
bool ecpg_build_params(struct statement *);
bool ecpg_autostart_transaction(struct statement * stmt);
bool ecpg_execute(struct statement * stmt);
bool ecpg_process_output(struct statement *, bool);
void ecpg_do_epilogue(struct statement *);
-bool ecpg_do(const int, const int, const int, const char *, const bool,
- const int, const char *, va_list);
+bool ecpg_do(const int, const int, const int, const char *, const bool,
+ const int, const char *, va_list);
bool ecpg_check_PQresult(PGresult *, int, PGconn *, enum COMPAT_MODE);
void ecpg_raise(int line, int code, const char *sqlstate, const char *str);
diff --git a/src/interfaces/ecpg/include/sqlca.h b/src/interfaces/ecpg/include/sqlca.h
index 52fcbf830f..41e5b550af 100644
--- a/src/interfaces/ecpg/include/sqlca.h
+++ b/src/interfaces/ecpg/include/sqlca.h
@@ -40,7 +40,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/pgtypeslib/dt.h b/src/interfaces/ecpg/pgtypeslib/dt.h
index 3a50d1410e..c2635c7b28 100644
--- a/src/interfaces/ecpg/pgtypeslib/dt.h
+++ b/src/interfaces/ecpg/pgtypeslib/dt.h
@@ -255,7 +255,7 @@ do { \
* DAYS_PER_MONTH is very imprecise. The more accurate value is
* 365.2425/12 = 30.436875, or '30 days 10:29:06'. Right now we only
* return an integral number of days, but someday perhaps we should
- * also return a 'time' value to be used as well. ISO 8601 suggests
+ * also return a 'time' value to be used as well. ISO 8601 suggests
* 30 days.
*/
#define DAYS_PER_MONTH 30 /* assumes exactly 30 days per month */
@@ -353,7 +353,7 @@ void GetCurrentDateTime(struct tm *);
int date2j(int, int, int);
void TrimTrailingZeros(char *);
void dt2time(double, int *, int *, int *, fsec_t *);
-int PGTYPEStimestamp_defmt_scan(char **str, char *fmt, timestamp * d,
+int PGTYPEStimestamp_defmt_scan(char **str, char *fmt, timestamp * d,
int *year, int *month, int *day,
int *hour, int *minute, int *second,
int *tz);
diff --git a/src/interfaces/ecpg/pgtypeslib/dt_common.c b/src/interfaces/ecpg/pgtypeslib/dt_common.c
index c5d91ed922..7ca4dd51ce 100644
--- a/src/interfaces/ecpg/pgtypeslib/dt_common.c
+++ b/src/interfaces/ecpg/pgtypeslib/dt_common.c
@@ -1178,11 +1178,11 @@ DecodeNumberField(int len, char *str, int fmask,
/*
* OK, we have at most six digits to care about. Let's construct a
- * string with those digits, zero-padded on the right, and then do
- * the conversion to an integer.
+ * string with those digits, zero-padded on the right, and then do the
+ * conversion to an integer.
*
- * XXX This truncates the seventh digit, unlike rounding it as do
- * the backend and the !HAVE_INT64_TIMESTAMP case.
+ * XXX This truncates the seventh digit, unlike rounding it as do the
+ * backend and the !HAVE_INT64_TIMESTAMP case.
*/
for (i = 0; i < 6; i++)
fstr[i] = *cp != '\0' ? *cp++ : '0';
diff --git a/src/interfaces/ecpg/pgtypeslib/interval.c b/src/interfaces/ecpg/pgtypeslib/interval.c
index d0dee16690..dce8a14d88 100644
--- a/src/interfaces/ecpg/pgtypeslib/interval.c
+++ b/src/interfaces/ecpg/pgtypeslib/interval.c
@@ -160,7 +160,7 @@ DecodeISO8601Interval(char *str,
return dterr;
/*
- * Note: we could step off the end of the string here. Code below
+ * Note: we could step off the end of the string here. Code below
* *must* exit the loop if unit == '\0'.
*/
unit = *str++;
@@ -1036,8 +1036,8 @@ recalc:
static int
tm2interval(struct tm * tm, fsec_t fsec, interval * span)
{
- if ((double)tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon > INT_MAX ||
- (double)tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon < INT_MIN)
+ if ((double) tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon > INT_MAX ||
+ (double) tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon < INT_MIN)
return -1;
span->month = tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon;
#ifdef HAVE_INT64_TIMESTAMP
diff --git a/src/interfaces/ecpg/pgtypeslib/numeric.c b/src/interfaces/ecpg/pgtypeslib/numeric.c
index 9ae975ecde..84d73b7bb7 100644
--- a/src/interfaces/ecpg/pgtypeslib/numeric.c
+++ b/src/interfaces/ecpg/pgtypeslib/numeric.c
@@ -974,7 +974,7 @@ PGTYPESnumeric_sub(numeric *var1, numeric *var2, numeric *result)
* mul_var() -
*
* Multiplication on variable level. Product of var1 * var2 is stored
- * in result. Accuracy of result is determined by global_rscale.
+ * in result. Accuracy of result is determined by global_rscale.
* ----------
*/
int
diff --git a/src/interfaces/ecpg/preproc/c_keywords.c b/src/interfaces/ecpg/preproc/c_keywords.c
index 41d20d26d6..06bf039099 100644
--- a/src/interfaces/ecpg/preproc/c_keywords.c
+++ b/src/interfaces/ecpg/preproc/c_keywords.c
@@ -57,7 +57,7 @@ static const ScanKeyword ScanCKeywords[] = {
/*
- * Do a binary search using plain strcmp() comparison. This is much like
+ * Do a binary search using plain strcmp() comparison. This is much like
* ScanKeywordLookup(), except we want case-sensitive matching.
*/
const ScanKeyword *
diff --git a/src/interfaces/ecpg/preproc/extern.h b/src/interfaces/ecpg/preproc/extern.h
index 3bbb6a4473..efe74c718e 100644
--- a/src/interfaces/ecpg/preproc/extern.h
+++ b/src/interfaces/ecpg/preproc/extern.h
@@ -73,8 +73,8 @@ extern int base_yylex(void);
extern void base_yyerror(const char *);
extern void *mm_alloc(size_t), *mm_realloc(void *, size_t);
extern char *mm_strdup(const char *);
-extern void mmerror(int errorcode, enum errortype type, const char *error, ...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
-extern void mmfatal(int errorcode, const char *error, ...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3),noreturn));
+extern void mmerror(int errorcode, enum errortype type, const char *error,...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
+extern void mmfatal(int errorcode, const char *error,...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3), noreturn));
extern void output_get_descr_header(char *);
extern void output_get_descr(char *, char *);
extern void output_set_descr_header(char *);
diff --git a/src/interfaces/ecpg/preproc/output.c b/src/interfaces/ecpg/preproc/output.c
index 007c07c034..8cac947b5a 100644
--- a/src/interfaces/ecpg/preproc/output.c
+++ b/src/interfaces/ecpg/preproc/output.c
@@ -96,7 +96,7 @@ hashline_number(void)
)
{
/* "* 2" here is for escaping '\' and '"' below */
- char *line = mm_alloc(strlen("\n#line %d \"%s\"\n") + sizeof(int) * CHAR_BIT * 10 / 3 + strlen(input_filename) * 2);
+ char *line = mm_alloc(strlen("\n#line %d \"%s\"\n") + sizeof(int) * CHAR_BIT * 10 / 3 + strlen(input_filename) *2);
char *src,
*dest;
diff --git a/src/interfaces/ecpg/preproc/parse.pl b/src/interfaces/ecpg/preproc/parse.pl
index 77def8679a..efd87edd82 100644
--- a/src/interfaces/ecpg/preproc/parse.pl
+++ b/src/interfaces/ecpg/preproc/parse.pl
@@ -42,13 +42,13 @@ my %replace_token = (
# or in the block
my %replace_string = (
- 'WITH_TIME' => 'with time',
+ 'WITH_TIME' => 'with time',
'WITH_ORDINALITY' => 'with ordinality',
- 'NULLS_FIRST' => 'nulls first',
- 'NULLS_LAST' => 'nulls last',
- 'TYPECAST' => '::',
- 'DOT_DOT' => '..',
- 'COLON_EQUALS' => ':=',);
+ 'NULLS_FIRST' => 'nulls first',
+ 'NULLS_LAST' => 'nulls last',
+ 'TYPECAST' => '::',
+ 'DOT_DOT' => '..',
+ 'COLON_EQUALS' => ':=',);
# specific replace_types for specific non-terminals - never include the ':'
# ECPG-only replace_types are defined in ecpg-replace_types
diff --git a/src/interfaces/ecpg/preproc/parser.c b/src/interfaces/ecpg/preproc/parser.c
index b7bd6ca3d7..ee8a60e8df 100644
--- a/src/interfaces/ecpg/preproc/parser.c
+++ b/src/interfaces/ecpg/preproc/parser.c
@@ -35,7 +35,7 @@ static YYLTYPE lookahead_yylloc; /* yylloc for lookahead token */
* Intermediate filter between parser and base lexer (base_yylex in scan.l).
*
* The filter is needed because in some cases the standard SQL grammar
- * requires more than one token lookahead. We reduce these cases to one-token
+ * requires more than one token lookahead. We reduce these cases to one-token
* lookahead by combining tokens here, in order to keep the grammar LALR(1).
*
* Using a filter is simpler than trying to recognize multiword tokens
diff --git a/src/interfaces/ecpg/preproc/type.c b/src/interfaces/ecpg/preproc/type.c
index e54f05d5b5..6c2645965e 100644
--- a/src/interfaces/ecpg/preproc/type.c
+++ b/src/interfaces/ecpg/preproc/type.c
@@ -310,7 +310,8 @@ ECPGdump_a_type(FILE *o, const char *name, struct ECPGtype * type, const int bra
{
if (ind_type->type == ECPGt_NO_INDICATOR)
{
- char *str_neg_one = mm_strdup("-1");
+ char *str_neg_one = mm_strdup("-1");
+
ECPGdump_a_simple(o, ind_name, ind_type->type, ind_type->size, str_neg_one, NULL, ind_prefix, 0);
free(str_neg_one);
}
@@ -324,7 +325,7 @@ ECPGdump_a_type(FILE *o, const char *name, struct ECPGtype * type, const int bra
break;
case ECPGt_struct:
{
- char *str_one = mm_strdup("1");
+ char *str_one = mm_strdup("1");
if (indicator_set && ind_type->type != ECPGt_struct)
mmfatal(INDICATOR_NOT_STRUCT, "indicator for struct has to be a struct");
@@ -338,10 +339,13 @@ ECPGdump_a_type(FILE *o, const char *name, struct ECPGtype * type, const int bra
break;
case ECPGt_char_variable:
{
- /* Allocate for each, as there are code-paths where the values get stomped on. */
- char *str_varchar_one = mm_strdup("1");
- char *str_arr_one = mm_strdup("1");
- char *str_neg_one = mm_strdup("-1");
+ /*
+ * Allocate for each, as there are code-paths where the values
+ * get stomped on.
+ */
+ char *str_varchar_one = mm_strdup("1");
+ char *str_arr_one = mm_strdup("1");
+ char *str_neg_one = mm_strdup("-1");
if (indicator_set && (ind_type->type == ECPGt_struct || ind_type->type == ECPGt_array))
mmfatal(INDICATOR_NOT_SIMPLE, "indicator for simple data type has to be simple");
@@ -357,9 +361,12 @@ ECPGdump_a_type(FILE *o, const char *name, struct ECPGtype * type, const int bra
break;
case ECPGt_descriptor:
{
- /* Allocate for each, as there are code-paths where the values get stomped on. */
- char *str_neg_one = mm_strdup("-1");
- char *ind_type_neg_one = mm_strdup("-1");
+ /*
+ * Allocate for each, as there are code-paths where the values
+ * get stomped on.
+ */
+ char *str_neg_one = mm_strdup("-1");
+ char *ind_type_neg_one = mm_strdup("-1");
if (indicator_set && (ind_type->type == ECPGt_struct || ind_type->type == ECPGt_array))
mmfatal(INDICATOR_NOT_SIMPLE, "indicator for simple data type has to be simple");
@@ -374,9 +381,12 @@ ECPGdump_a_type(FILE *o, const char *name, struct ECPGtype * type, const int bra
break;
default:
{
- /* Allocate for each, as there are code-paths where the values get stomped on. */
- char *str_neg_one = mm_strdup("-1");
- char *ind_type_neg_one = mm_strdup("-1");
+ /*
+ * Allocate for each, as there are code-paths where the values
+ * get stomped on.
+ */
+ char *str_neg_one = mm_strdup("-1");
+ char *ind_type_neg_one = mm_strdup("-1");
if (indicator_set && (ind_type->type == ECPGt_struct || ind_type->type == ECPGt_array))
mmfatal(INDICATOR_NOT_SIMPLE, "indicator for simple data type has to be simple");
@@ -448,35 +458,36 @@ ECPGdump_a_simple(FILE *o, const char *name, enum ECPGttype type,
case ECPGt_unsigned_char:
case ECPGt_char_variable:
case ECPGt_string:
- {
- char *sizeof_name = "char";
- /*
- * we have to use the pointer except for arrays with given
- * bounds, ecpglib will distinguish between * and []
- */
- if ((atoi(varcharsize) > 1 ||
- (atoi(arrsize) > 0) ||
- (atoi(varcharsize) == 0 && strcmp(varcharsize, "0") != 0) ||
- (atoi(arrsize) == 0 && strcmp(arrsize, "0") != 0))
- && siz == NULL)
{
- sprintf(variable, "(%s%s)", prefix ? prefix : "", name);
- if ((type == ECPGt_char || type == ECPGt_unsigned_char) &&
- strcmp(varcharsize, "0") == 0)
+ char *sizeof_name = "char";
+
+ /*
+ * we have to use the pointer except for arrays with given
+ * bounds, ecpglib will distinguish between * and []
+ */
+ if ((atoi(varcharsize) > 1 ||
+ (atoi(arrsize) > 0) ||
+ (atoi(varcharsize) == 0 && strcmp(varcharsize, "0") != 0) ||
+ (atoi(arrsize) == 0 && strcmp(arrsize, "0") != 0))
+ && siz == NULL)
{
- /*
- * If this is an array of char *, the offset would be
- * sizeof(char *) and not sizeof(char).
- */
- sizeof_name = "char *";
+ sprintf(variable, "(%s%s)", prefix ? prefix : "", name);
+ if ((type == ECPGt_char || type == ECPGt_unsigned_char) &&
+ strcmp(varcharsize, "0") == 0)
+ {
+ /*
+ * If this is an array of char *, the offset would
+ * be sizeof(char *) and not sizeof(char).
+ */
+ sizeof_name = "char *";
+ }
}
- }
- else
- sprintf(variable, "&(%s%s)", prefix ? prefix : "", name);
+ else
+ sprintf(variable, "&(%s%s)", prefix ? prefix : "", name);
- sprintf(offset, "(%s)*sizeof(%s)", strcmp(varcharsize, "0") == 0 ? "1" : varcharsize, sizeof_name);
- break;
- }
+ sprintf(offset, "(%s)*sizeof(%s)", strcmp(varcharsize, "0") == 0 ? "1" : varcharsize, sizeof_name);
+ break;
+ }
case ECPGt_numeric:
/*
@@ -542,8 +553,8 @@ ECPGdump_a_simple(FILE *o, const char *name, enum ECPGttype type,
strcpy(arrsize, "1");
/*
- * If siz i.e. the size of structure of which this variable is part of,
- * that gives the offset to the next element, if required
+ * If siz i.e. the size of structure of which this variable is part
+ * of, that gives the offset to the next element, if required
*/
if (siz == NULL || strlen(siz) == 0)
fprintf(o, "\n\t%s,%s,(long)%s,(long)%s,%s, ", get_type(type), variable, varcharsize, arrsize, offset);
diff --git a/src/interfaces/ecpg/preproc/variable.c b/src/interfaces/ecpg/preproc/variable.c
index 50ddeab2e8..2ad7b5d255 100644
--- a/src/interfaces/ecpg/preproc/variable.c
+++ b/src/interfaces/ecpg/preproc/variable.c
@@ -437,7 +437,8 @@ remove_variable_from_list(struct arguments ** list, struct variable * var)
void
dump_variables(struct arguments * list, int mode)
{
- char *str_zero = mm_strdup("0");
+ char *str_zero = mm_strdup("0");
+
if (list == NULL)
return;
@@ -529,7 +530,7 @@ adjust_array(enum ECPGttype type_enum, char **dimension, char **length, char *ty
if (pointer_len > 2)
mmfatal(PARSE_ERROR, ngettext("multilevel pointers (more than 2 levels) are not supported; found %d level",
- "multilevel pointers (more than 2 levels) are not supported; found %d levels", pointer_len),
+ "multilevel pointers (more than 2 levels) are not supported; found %d levels", pointer_len),
pointer_len);
if (pointer_len > 1 && type_enum != ECPGt_char && type_enum != ECPGt_unsigned_char && type_enum != ECPGt_string)
diff --git a/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c b/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c
index c1a4891191..390601d84e 100644
--- a/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c
+++ b/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c
@@ -57,7 +57,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/preproc-init.c b/src/interfaces/ecpg/test/expected/preproc-init.c
index 49f2d5d57a..9e410ff155 100644
--- a/src/interfaces/ecpg/test/expected/preproc-init.c
+++ b/src/interfaces/ecpg/test/expected/preproc-init.c
@@ -51,7 +51,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-array.c b/src/interfaces/ecpg/test/expected/sql-array.c
index 3c879561b3..13b940217c 100644
--- a/src/interfaces/ecpg/test/expected/sql-array.c
+++ b/src/interfaces/ecpg/test/expected/sql-array.c
@@ -59,7 +59,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-code100.c b/src/interfaces/ecpg/test/expected/sql-code100.c
index 051fc38622..702c6e146f 100644
--- a/src/interfaces/ecpg/test/expected/sql-code100.c
+++ b/src/interfaces/ecpg/test/expected/sql-code100.c
@@ -51,7 +51,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-copystdout.c b/src/interfaces/ecpg/test/expected/sql-copystdout.c
index 563732b05d..33ea2133d6 100644
--- a/src/interfaces/ecpg/test/expected/sql-copystdout.c
+++ b/src/interfaces/ecpg/test/expected/sql-copystdout.c
@@ -53,7 +53,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-define.c b/src/interfaces/ecpg/test/expected/sql-define.c
index b9571ec53d..4a1d7ee6f0 100644
--- a/src/interfaces/ecpg/test/expected/sql-define.c
+++ b/src/interfaces/ecpg/test/expected/sql-define.c
@@ -51,7 +51,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-dynalloc.c b/src/interfaces/ecpg/test/expected/sql-dynalloc.c
index ccc337168e..cd5d5c0ab0 100644
--- a/src/interfaces/ecpg/test/expected/sql-dynalloc.c
+++ b/src/interfaces/ecpg/test/expected/sql-dynalloc.c
@@ -52,7 +52,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-dynalloc2.c b/src/interfaces/ecpg/test/expected/sql-dynalloc2.c
index e85189214b..e5d2f75782 100644
--- a/src/interfaces/ecpg/test/expected/sql-dynalloc2.c
+++ b/src/interfaces/ecpg/test/expected/sql-dynalloc2.c
@@ -52,7 +52,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-dyntest.c b/src/interfaces/ecpg/test/expected/sql-dyntest.c
index 537d9ff63a..91b4421b19 100644
--- a/src/interfaces/ecpg/test/expected/sql-dyntest.c
+++ b/src/interfaces/ecpg/test/expected/sql-dyntest.c
@@ -105,7 +105,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-indicators.c b/src/interfaces/ecpg/test/expected/sql-indicators.c
index e805e4ee00..5e167b1944 100644
--- a/src/interfaces/ecpg/test/expected/sql-indicators.c
+++ b/src/interfaces/ecpg/test/expected/sql-indicators.c
@@ -53,7 +53,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/thread-alloc.c b/src/interfaces/ecpg/test/expected/thread-alloc.c
index 199a77eb15..f80dd6cffb 100644
--- a/src/interfaces/ecpg/test/expected/thread-alloc.c
+++ b/src/interfaces/ecpg/test/expected/thread-alloc.c
@@ -74,7 +74,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/thread-descriptor.c b/src/interfaces/ecpg/test/expected/thread-descriptor.c
index 2584626f4f..e2be89dec0 100644
--- a/src/interfaces/ecpg/test/expected/thread-descriptor.c
+++ b/src/interfaces/ecpg/test/expected/thread-descriptor.c
@@ -65,7 +65,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/thread-prep.c b/src/interfaces/ecpg/test/expected/thread-prep.c
index cd28c85099..b7f32721a5 100644
--- a/src/interfaces/ecpg/test/expected/thread-prep.c
+++ b/src/interfaces/ecpg/test/expected/thread-prep.c
@@ -74,7 +74,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/pg_regress_ecpg.c b/src/interfaces/ecpg/test/pg_regress_ecpg.c
index e9bedb5c75..02f7a3066b 100644
--- a/src/interfaces/ecpg/test/pg_regress_ecpg.c
+++ b/src/interfaces/ecpg/test/pg_regress_ecpg.c
@@ -78,9 +78,9 @@ ecpg_filter(const char *sourcefile, const char *outfile)
static PID_TYPE
ecpg_start_test(const char *testname,
- _stringlist ** resultfiles,
- _stringlist ** expectfiles,
- _stringlist ** tags)
+ _stringlist **resultfiles,
+ _stringlist **expectfiles,
+ _stringlist **tags)
{
PID_TYPE pid;
char inprg[MAXPGPATH];
diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c
index 5ddd17d5df..f5ec2e0178 100644
--- a/src/interfaces/libpq/fe-auth.c
+++ b/src/interfaces/libpq/fe-auth.c
@@ -742,9 +742,9 @@ pg_fe_getauthname(void)
pglock_thread();
/*
- * We document PQconndefaults() to return NULL for a memory allocation
- * failure. We don't have an API to return a user name lookup failure,
- * so we just assume it always succeeds.
+ * We document PQconndefaults() to return NULL for a memory allocation
+ * failure. We don't have an API to return a user name lookup failure, so
+ * we just assume it always succeeds.
*/
#ifdef WIN32
if (GetUserName(username, &namesize))
@@ -767,7 +767,7 @@ pg_fe_getauthname(void)
*
* This is intended to be used by client applications that wish to send
* commands like ALTER USER joe PASSWORD 'pwd'. The password need not
- * be sent in cleartext if it is encrypted on the client side. This is
+ * be sent in cleartext if it is encrypted on the client side. This is
* good because it ensures the cleartext password won't end up in logs,
* pg_stat displays, etc. We export the function so that clients won't
* be dependent on low-level details like whether the enceyption is MD5
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 0791774af7..540426cbe9 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -134,7 +134,7 @@ static int ldapServiceLookup(const char *purl, PQconninfoOption *options,
*
* PQconninfoOptions[] is a constant static array that we use to initialize
* a dynamically allocated working copy. All the "val" fields in
- * PQconninfoOptions[] *must* be NULL. In a working copy, non-null "val"
+ * PQconninfoOptions[] *must* be NULL. In a working copy, non-null "val"
* fields point to malloc'd strings that should be freed when the working
* array is freed (see PQconninfoFree).
*
@@ -389,7 +389,7 @@ pgthreadlock_t pg_g_threadlock = default_threadlock;
* pqDropConnection
*
* Close any physical connection to the server, and reset associated
- * state inside the connection object. We don't release state that
+ * state inside the connection object. We don't release state that
* would be needed to reconnect, though.
*/
void
@@ -544,7 +544,7 @@ PQping(const char *conninfo)
* See comment for PQconnectdbParams for the definition of the string format.
*
* Returns a PGconn*. If NULL is returned, a malloc error has occurred, and
- * you should not attempt to proceed with this connection. If the status
+ * you should not attempt to proceed with this connection. If the status
* field of the connection returned is CONNECTION_BAD, an error has
* occurred. In this case you should call PQfinish on the result, (perhaps
* inspecting the error message first). Other fields of the structure may not
@@ -619,7 +619,7 @@ PQconnectStartParams(const char *const * keywords,
* See comment for PQconnectdb for the definition of the string format.
*
* Returns a PGconn*. If NULL is returned, a malloc error has occurred, and
- * you should not attempt to proceed with this connection. If the status
+ * you should not attempt to proceed with this connection. If the status
* field of the connection returned is CONNECTION_BAD, an error has
* occurred. In this case you should call PQfinish on the result, (perhaps
* inspecting the error message first). Other fields of the structure may not
@@ -856,7 +856,7 @@ connectOptions2(PGconn *conn)
* and their current default values.
*
* NOTE: as of PostgreSQL 7.0, the returned array is dynamically allocated
- * and should be freed when no longer needed via PQconninfoFree(). (In prior
+ * and should be freed when no longer needed via PQconninfoFree(). (In prior
* versions, the returned array was static, but that's not thread-safe.)
* Pre-7.0 applications that use this function will see a small memory leak
* until they are updated to call PQconninfoFree.
@@ -1480,7 +1480,7 @@ connectDBComplete(PGconn *conn)
for (;;)
{
/*
- * Wait, if necessary. Note that the initial state (just after
+ * Wait, if necessary. Note that the initial state (just after
* PQconnectStart) is to wait for the socket to select for writing.
*/
switch (flag)
@@ -1542,7 +1542,7 @@ connectDBComplete(PGconn *conn)
* will not block.
* o If you do not supply an IP address for the remote host (i.e. you
* supply a host name instead) then PQconnectStart will block on
- * gethostbyname. You will be fine if using Unix sockets (i.e. by
+ * gethostbyname. You will be fine if using Unix sockets (i.e. by
* supplying neither a host name nor a host address).
* o If your backend wants to use Kerberos authentication then you must
* supply both a host name and a host address, otherwise this function
@@ -1606,7 +1606,7 @@ PQconnectPoll(PGconn *conn)
libpq_gettext(
"invalid connection state, "
"probably indicative of memory corruption\n"
- ));
+ ));
goto error_return;
}
@@ -1809,7 +1809,7 @@ keep_going: /* We will come back to here until there is
/*
* This connection failed --- set up error report, then
* close socket (do it this way in case close() affects
- * the value of errno...). We will ignore the connect()
+ * the value of errno...). We will ignore the connect()
* failure and keep going if there are more addresses.
*/
connectFailureMessage(conn, SOCK_ERRNO);
@@ -2097,7 +2097,7 @@ keep_going: /* We will come back to here until there is
{
/*
* Server failure of some sort, such as failure to
- * fork a backend process. We need to process and
+ * fork a backend process. We need to process and
* report the error message, which might be formatted
* according to either protocol 2 or protocol 3.
* Rather than duplicate the code for that, we flip
@@ -2478,7 +2478,7 @@ keep_going: /* We will come back to here until there is
/*
* If we tried to send application_name, check to see
* if the error is about that --- pre-9.0 servers will
- * reject it at this stage of the process. If so,
+ * reject it at this stage of the process. If so,
* close the connection and retry without sending
* application_name. We could possibly get a false
* SQLSTATE match here and retry uselessly, but there
@@ -2624,9 +2624,9 @@ internal_ping(PGconn *conn)
/*
* If we failed to get any ERROR response from the postmaster, report
- * PQPING_NO_RESPONSE. This result could be somewhat misleading for a
+ * PQPING_NO_RESPONSE. This result could be somewhat misleading for a
* pre-7.4 server, since it won't send back a SQLSTATE, but those are long
- * out of support. Another corner case where the server could return a
+ * out of support. Another corner case where the server could return a
* failure without a SQLSTATE is fork failure, but NO_RESPONSE isn't
* totally unreasonable for that anyway. We expect that every other
* failure case in a modern server will produce a report with a SQLSTATE.
@@ -2666,6 +2666,7 @@ makeEmptyPGconn(void)
PGconn *conn;
#ifdef WIN32
+
/*
* Make sure socket support is up and running.
*/
@@ -2853,7 +2854,7 @@ freePGconn(PGconn *conn)
* - properly close a connection to the backend
*
* This should reset or release all transient state, but NOT the connection
- * parameters. On exit, the PGconn should be in condition to start a fresh
+ * parameters. On exit, the PGconn should be in condition to start a fresh
* connection with the same parameters (see PQreset()).
*/
static void
@@ -2982,7 +2983,7 @@ PQreset(PGconn *conn)
if (connectDBStart(conn) && connectDBComplete(conn))
{
/*
- * Notify event procs of successful reset. We treat an event proc
+ * Notify event procs of successful reset. We treat an event proc
* failure as disabling the connection ... good idea?
*/
int i;
@@ -3042,7 +3043,7 @@ PQresetPoll(PGconn *conn)
if (status == PGRES_POLLING_OK)
{
/*
- * Notify event procs of successful reset. We treat an event proc
+ * Notify event procs of successful reset. We treat an event proc
* failure as disabling the connection ... good idea?
*/
int i;
@@ -3231,7 +3232,7 @@ cancel_errReturn:
* Returns TRUE if able to send the cancel request, FALSE if not.
*
* On failure, an error message is stored in *errbuf, which must be of size
- * errbufsize (recommended size is 256 bytes). *errbuf is not changed on
+ * errbufsize (recommended size is 256 bytes). *errbuf is not changed on
* success return.
*/
int
@@ -3516,12 +3517,12 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
*
* LDAP does not require that an anonymous bind is performed explicitly,
* but we want to distinguish between the case where LDAP bind does not
- * succeed within PGLDAP_TIMEOUT seconds (return 2 to continue parsing
- * the service control file) and the case where querying the LDAP server
- * fails (return 1 to end parsing).
+ * succeed within PGLDAP_TIMEOUT seconds (return 2 to continue parsing the
+ * service control file) and the case where querying the LDAP server fails
+ * (return 1 to end parsing).
*
- * Unfortunately there is no way of setting a timeout that works for
- * both Windows and OpenLDAP.
+ * Unfortunately there is no way of setting a timeout that works for both
+ * Windows and OpenLDAP.
*/
#ifdef WIN32
/* the nonstandard ldap_connect function performs an anonymous bind */
@@ -3532,7 +3533,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
ldap_unbind(ld);
return 2;
}
-#else /* !WIN32 */
+#else /* !WIN32 */
/* in OpenLDAP, use the LDAP_OPT_NETWORK_TIMEOUT option */
if (ldap_set_option(ld, LDAP_OPT_NETWORK_TIMEOUT, &time) != LDAP_SUCCESS)
{
@@ -3572,7 +3573,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
ldap_unbind(ld);
return 3;
}
-#endif /* WIN32 */
+#endif /* WIN32 */
/* search */
res = NULL;
@@ -3788,7 +3789,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
return 0;
}
-#endif /* USE_LDAP */
+#endif /* USE_LDAP */
#define MAXBUFSIZE 256
@@ -4008,7 +4009,7 @@ parseServiceFile(const char *serviceFile,
* PQconninfoParse
*
* Parse a string like PQconnectdb() would do and return the
- * resulting connection options array. NULL is returned on failure.
+ * resulting connection options array. NULL is returned on failure.
* The result contains only options specified directly in the string,
* not any possible default values.
*
@@ -4461,8 +4462,8 @@ conninfo_add_defaults(PQconninfoOption *options, PQExpBuffer errorMessage)
/*
* If there's a service spec, use it to obtain any not-explicitly-given
- * parameters. Ignore error if no error message buffer is passed
- * because there is no way to pass back the failure message.
+ * parameters. Ignore error if no error message buffer is passed because
+ * there is no way to pass back the failure message.
*/
if (parseServiceInfo(options, errorMessage) != 0 && errorMessage)
return false;
@@ -4792,7 +4793,7 @@ cleanup:
* Connection URI parameters parser routine
*
* If successful, returns true while connOptions is filled with parsed
- * parameters. Otherwise, returns false and fills errorMessage appropriately.
+ * parameters. Otherwise, returns false and fills errorMessage appropriately.
*
* Destructively modifies 'params' buffer.
*/
diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c
index 50e4035781..4075e51841 100644
--- a/src/interfaces/libpq/fe-exec.c
+++ b/src/interfaces/libpq/fe-exec.c
@@ -94,7 +94,7 @@ static int check_field_number(const PGresult *res, int field_num);
* doesn't tell us up front how many tuples will be returned.)
* All other subsidiary storage for a PGresult is kept in PGresult_data blocks
* of size PGRESULT_DATA_BLOCKSIZE. The overhead at the start of each block
- * is just a link to the next one, if any. Free-space management info is
+ * is just a link to the next one, if any. Free-space management info is
* kept in the owning PGresult.
* A query returning a small amount of data will thus require three malloc
* calls: one for the PGresult, one for the tuples pointer array, and one
@@ -113,7 +113,7 @@ static int check_field_number(const PGresult *res, int field_num);
* blocks, instead of being crammed into a regular allocation block.
* Requirements for correct function are:
* PGRESULT_ALIGN_BOUNDARY must be a multiple of the alignment requirements
- * of all machine data types. (Currently this is set from configure
+ * of all machine data types. (Currently this is set from configure
* tests, so it should be OK automatically.)
* PGRESULT_SEP_ALLOC_THRESHOLD + PGRESULT_BLOCK_OVERHEAD <=
* PGRESULT_DATA_BLOCKSIZE
@@ -268,10 +268,10 @@ PQsetResultAttrs(PGresult *res, int numAttributes, PGresAttDesc *attDescs)
* Returns a deep copy of the provided 'src' PGresult, which cannot be NULL.
* The 'flags' argument controls which portions of the result will or will
* NOT be copied. The created result is always put into the
- * PGRES_TUPLES_OK status. The source result error message is not copied,
+ * PGRES_TUPLES_OK status. The source result error message is not copied,
* although cmdStatus is.
*
- * To set custom attributes, use PQsetResultAttrs. That function requires
+ * To set custom attributes, use PQsetResultAttrs. That function requires
* that there are no attrs contained in the result, so to use that
* function you cannot use the PG_COPYRES_ATTRS or PG_COPYRES_TUPLES
* options with this function.
@@ -299,7 +299,7 @@ PQcopyResult(const PGresult *src, int flags)
if (!dest)
return NULL;
- /* Always copy these over. Is cmdStatus really useful here? */
+ /* Always copy these over. Is cmdStatus really useful here? */
dest->client_encoding = src->client_encoding;
strcpy(dest->cmdStatus, src->cmdStatus);
@@ -757,7 +757,7 @@ pqPrepareAsyncResult(PGconn *conn)
PGresult *res;
/*
- * conn->result is the PGresult to return. If it is NULL (which probably
+ * conn->result is the PGresult to return. If it is NULL (which probably
* shouldn't happen) we assume there is an appropriate error message in
* conn->errorMessage.
*/
@@ -778,7 +778,7 @@ pqPrepareAsyncResult(PGconn *conn)
/*
* Replace conn->result with next_result, if any. In the normal case
* there isn't a next result and we're just dropping ownership of the
- * current result. In single-row mode this restores the situation to what
+ * current result. In single-row mode this restores the situation to what
* it was before we created the current single-row result.
*/
conn->result = conn->next_result;
@@ -1569,7 +1569,7 @@ pqHandleSendFailure(PGconn *conn)
/* loop until no more data readable */ ;
/*
- * Parse any available input messages. Since we are in PGASYNC_IDLE
+ * Parse any available input messages. Since we are in PGASYNC_IDLE
* state, only NOTICE and NOTIFY messages will be eaten.
*/
parseInput(conn);
@@ -1789,7 +1789,7 @@ getCopyResult(PGconn *conn, ExecStatusType copytype)
* If the server connection has been lost, don't pretend everything is
* hunky-dory; instead return a PGRES_FATAL_ERROR result, and reset the
* asyncStatus to idle (corresponding to what we'd do if we'd detected I/O
- * error in the earlier steps in PQgetResult). The text returned in the
+ * error in the earlier steps in PQgetResult). The text returned in the
* result is whatever is in conn->errorMessage; we hope that was filled
* with something relevant when the lost connection was detected.
*/
@@ -2031,7 +2031,7 @@ PQexecFinish(PGconn *conn)
* If the query was not even sent, return NULL; conn->errorMessage is set to
* a relevant message.
* If the query was sent, a new PGresult is returned (which could indicate
- * either success or failure). On success, the PGresult contains status
+ * either success or failure). On success, the PGresult contains status
* PGRES_COMMAND_OK, and its parameter and column-heading fields describe
* the statement's inputs and outputs respectively.
* The user is responsible for freeing the PGresult via PQclear()
@@ -2374,7 +2374,7 @@ PQgetCopyData(PGconn *conn, char **buffer, int async)
* PQgetline - gets a newline-terminated string from the backend.
*
* Chiefly here so that applications can use "COPY <rel> to stdout"
- * and read the output string. Returns a null-terminated string in s.
+ * and read the output string. Returns a null-terminated string in s.
*
* XXX this routine is now deprecated, because it can't handle binary data.
* If called during a COPY BINARY we return EOF.
@@ -2488,7 +2488,7 @@ PQputnbytes(PGconn *conn, const char *buffer, int nbytes)
* the application must call this routine to finish the command protocol.
*
* When using protocol 3.0 this is deprecated; it's cleaner to use PQgetResult
- * to get the transfer status. Note however that when using 2.0 protocol,
+ * to get the transfer status. Note however that when using 2.0 protocol,
* recovering from a copy failure often requires a PQreset. PQendcopy will
* take care of that, PQgetResult won't.
*
@@ -2716,7 +2716,7 @@ PQfname(const PGresult *res, int field_num)
* downcasing in the frontend might follow different locale rules than
* downcasing in the backend...
*
- * Returns -1 if no match. In the present backend it is also possible
+ * Returns -1 if no match. In the present backend it is also possible
* to have multiple matches, in which case the first one is found.
*/
int
@@ -3149,7 +3149,7 @@ PQfreemem(void *ptr)
*
* This function is here only for binary backward compatibility.
* New code should use PQfreemem(). A macro will automatically map
- * calls to PQfreemem. It should be removed in the future. bjm 2003-03-24
+ * calls to PQfreemem. It should be removed in the future. bjm 2003-03-24
*/
#undef PQfreeNotify
@@ -3344,7 +3344,7 @@ PQescapeInternal(PGconn *conn, const char *str, size_t len, bool as_ident)
/*
* If we are escaping a literal that contains backslashes, we use the
* escape string syntax so that the result is correct under either value
- * of standard_conforming_strings. We also emit a leading space in this
+ * of standard_conforming_strings. We also emit a leading space in this
* case, to guard against the possibility that the result might be
* interpolated immediately following an identifier.
*/
diff --git a/src/interfaces/libpq/fe-lobj.c b/src/interfaces/libpq/fe-lobj.c
index 8d29282fc6..18de2914d3 100644
--- a/src/interfaces/libpq/fe-lobj.c
+++ b/src/interfaces/libpq/fe-lobj.c
@@ -899,7 +899,7 @@ lo_initialize(PGconn *conn)
MemSet((char *) lobjfuncs, 0, sizeof(PGlobjfuncs));
/*
- * Execute the query to get all the functions at once. In 7.3 and later
+ * Execute the query to get all the functions at once. In 7.3 and later
* we need to be schema-safe. lo_create only exists in 8.1 and up.
* lo_truncate only exists in 8.3 and up.
*/
diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c
index cc487b22ee..a28a09790b 100644
--- a/src/interfaces/libpq/fe-misc.c
+++ b/src/interfaces/libpq/fe-misc.c
@@ -681,13 +681,13 @@ retry3:
/*
* Hack to deal with the fact that some kernels will only give us back
* 1 packet per recv() call, even if we asked for more and there is
- * more available. If it looks like we are reading a long message,
+ * more available. If it looks like we are reading a long message,
* loop back to recv() again immediately, until we run out of data or
* buffer space. Without this, the block-and-restart behavior of
* libpq's higher levels leads to O(N^2) performance on long messages.
*
* Since we left-justified the data above, conn->inEnd gives the
- * amount of data already read in the current message. We consider
+ * amount of data already read in the current message. We consider
* the message "long" once we have acquired 32k ...
*/
if (conn->inEnd > 32768 &&
diff --git a/src/interfaces/libpq/fe-protocol2.c b/src/interfaces/libpq/fe-protocol2.c
index 10510b5bf5..59e4a4973f 100644
--- a/src/interfaces/libpq/fe-protocol2.c
+++ b/src/interfaces/libpq/fe-protocol2.c
@@ -231,7 +231,7 @@ pqSetenvPoll(PGconn *conn)
case SETENV_STATE_QUERY1_SEND:
{
/*
- * Issue query to get information we need. Here we must
+ * Issue query to get information we need. Here we must
* use begin/commit in case autocommit is off by default
* in a 7.3 server.
*
@@ -725,7 +725,7 @@ getRowDescriptions(PGconn *conn)
advance_and_error:
/*
- * Discard the failed message. Unfortunately we don't know for sure where
+ * Discard the failed message. Unfortunately we don't know for sure where
* the end is, so just throw away everything in the input buffer. This is
* not very desirable but it's the best we can do in protocol v2.
*/
@@ -898,7 +898,7 @@ getAnotherTuple(PGconn *conn, bool binary)
advance_and_error:
/*
- * Discard the failed message. Unfortunately we don't know for sure where
+ * Discard the failed message. Unfortunately we don't know for sure where
* the end is, so just throw away everything in the input buffer. This is
* not very desirable but it's the best we can do in protocol v2.
*/
@@ -954,7 +954,7 @@ pqGetErrorNotice2(PGconn *conn, bool isError)
/*
* Since the message might be pretty long, we create a temporary
- * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
+ * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
* for stuff that is expected to be short.
*/
initPQExpBuffer(&workBuf);
@@ -1048,10 +1048,10 @@ failure:
/*
* checkXactStatus - attempt to track transaction-block status of server
*
- * This is called each time we receive a command-complete message. By
+ * This is called each time we receive a command-complete message. By
* watching for messages from BEGIN/COMMIT/ROLLBACK commands, we can do
* a passable job of tracking the server's xact status. BUT: this does
- * not work at all on 7.3 servers with AUTOCOMMIT OFF. (Man, was that
+ * not work at all on 7.3 servers with AUTOCOMMIT OFF. (Man, was that
* feature ever a mistake.) Caveat user.
*
* The tags known here are all those used as far back as 7.0; is it worth
diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c
index d895589148..c514ca5841 100644
--- a/src/interfaces/libpq/fe-protocol3.c
+++ b/src/interfaces/libpq/fe-protocol3.c
@@ -166,7 +166,7 @@ pqParseInput3(PGconn *conn)
* ERROR messages are displayed using the notice processor;
* ParameterStatus is handled normally; anything else is just
* dropped on the floor after displaying a suitable warning
- * notice. (An ERROR is very possibly the backend telling us why
+ * notice. (An ERROR is very possibly the backend telling us why
* it is about to close the connection, so we don't want to just
* discard it...)
*/
@@ -364,7 +364,7 @@ pqParseInput3(PGconn *conn)
case 'd': /* Copy Data */
/*
- * If we see Copy Data, just silently drop it. This would
+ * If we see Copy Data, just silently drop it. This would
* only occur if application exits COPY OUT mode too
* early.
*/
@@ -373,7 +373,7 @@ pqParseInput3(PGconn *conn)
case 'c': /* Copy Done */
/*
- * If we see Copy Done, just silently drop it. This is
+ * If we see Copy Done, just silently drop it. This is
* the normal case during PQendcopy. We will keep
* swallowing data, expecting to see command-complete for
* the COPY command.
@@ -603,7 +603,7 @@ advance_and_error:
pqSaveErrorResult(conn);
/*
- * Return zero to allow input parsing to continue. Subsequent "D"
+ * Return zero to allow input parsing to continue. Subsequent "D"
* messages will be ignored until we get to end of data, since an error
* result is already set up.
*/
@@ -785,7 +785,7 @@ set_error_result:
pqSaveErrorResult(conn);
/*
- * Return zero to allow input parsing to continue. Subsequent "D"
+ * Return zero to allow input parsing to continue. Subsequent "D"
* messages will be ignored until we get to end of data, since an error
* result is already set up.
*/
@@ -812,14 +812,14 @@ pqGetErrorNotice3(PGconn *conn, bool isError)
/*
* Since the fields might be pretty long, we create a temporary
- * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
- * for stuff that is expected to be short. We shouldn't use
+ * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
+ * for stuff that is expected to be short. We shouldn't use
* conn->errorMessage either, since this might be only a notice.
*/
initPQExpBuffer(&workBuf);
/*
- * Make a PGresult to hold the accumulated fields. We temporarily lie
+ * Make a PGresult to hold the accumulated fields. We temporarily lie
* about the result status, so that PQmakeEmptyPGresult doesn't uselessly
* copy conn->errorMessage.
*/
@@ -1031,7 +1031,7 @@ reportErrorPosition(PQExpBuffer msg, const char *query, int loc, int encoding)
/*
* Each character might occupy multiple physical bytes in the string, and
* in some Far Eastern character sets it might take more than one screen
- * column as well. We compute the starting byte offset and starting
+ * column as well. We compute the starting byte offset and starting
* screen column of each logical character, and store these in qidx[] and
* scridx[] respectively.
*/
@@ -1059,8 +1059,8 @@ reportErrorPosition(PQExpBuffer msg, const char *query, int loc, int encoding)
/*
* Within the scanning loop, cno is the current character's logical
* number, qoffset is its offset in wquery, and scroffset is its starting
- * logical screen column (all indexed from 0). "loc" is the logical
- * character number of the error location. We scan to determine loc_line
+ * logical screen column (all indexed from 0). "loc" is the logical
+ * character number of the error location. We scan to determine loc_line
* (the 1-based line number containing loc) and ibeg/iend (first character
* number and last+1 character number of the line containing loc). Note
* that qidx[] and scridx[] are filled only as far as iend.
@@ -1511,7 +1511,7 @@ pqGetCopyData3(PGconn *conn, char **buffer, int async)
for (;;)
{
/*
- * Collect the next input message. To make life simpler for async
+ * Collect the next input message. To make life simpler for async
* callers, we keep returning 0 until the next message is fully
* available, even if it is not Copy Data.
*/
@@ -1718,7 +1718,7 @@ pqEndcopy3(PGconn *conn)
/*
* Non blocking connections may have to abort at this point. If everyone
* played the game there should be no problem, but in error scenarios the
- * expected messages may not have arrived yet. (We are assuming that the
+ * expected messages may not have arrived yet. (We are assuming that the
* backend's packetizing will ensure that CommandComplete arrives along
* with the CopyDone; are there corner cases where that doesn't happen?)
*/
diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c
index d8ac40c784..9ba35674d3 100644
--- a/src/interfaces/libpq/fe-secure.c
+++ b/src/interfaces/libpq/fe-secure.c
@@ -257,7 +257,7 @@ pqsecure_open_client(PGconn *conn)
if (conn->ssl == NULL)
{
#ifdef ENABLE_THREAD_SAFETY
- int rc;
+ int rc;
#endif
/* We cannot use MSG_NOSIGNAL to block SIGPIPE when using SSL */
@@ -267,7 +267,7 @@ pqsecure_open_client(PGconn *conn)
if ((rc = pthread_mutex_lock(&ssl_config_mutex)))
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not acquire mutex: %s\n"), strerror(rc));
+ libpq_gettext("could not acquire mutex: %s\n"), strerror(rc));
return PGRES_POLLING_FAILED;
}
#endif
@@ -292,6 +292,7 @@ pqsecure_open_client(PGconn *conn)
#ifdef ENABLE_THREAD_SAFETY
pthread_mutex_unlock(&ssl_config_mutex);
#endif
+
/*
* Load client certificate, private key, and trusted CA certs.
*/
@@ -1049,7 +1050,7 @@ destroy_ssl_system(void)
* Initialize (potentially) per-connection SSL data, namely the
* client certificate, private key, and trusted CA certs.
*
- * conn->ssl must already be created. It receives the connection's client
+ * conn->ssl must already be created. It receives the connection's client
* certificate and private key. Note however that certificates also get
* loaded into the SSL_context object, and are therefore accessible to all
* connections in this process. This should be OK as long as there aren't
@@ -1129,12 +1130,12 @@ initialize_SSL(PGconn *conn)
* SSL_context struct.
*/
#ifdef ENABLE_THREAD_SAFETY
- int rc;
+ int rc;
if ((rc = pthread_mutex_lock(&ssl_config_mutex)))
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not acquire mutex: %s\n"), strerror(rc));
+ libpq_gettext("could not acquire mutex: %s\n"), strerror(rc));
return -1;
}
#endif
@@ -1349,12 +1350,12 @@ initialize_SSL(PGconn *conn)
X509_STORE *cvstore;
#ifdef ENABLE_THREAD_SAFETY
- int rc;
+ int rc;
if ((rc = pthread_mutex_lock(&ssl_config_mutex)))
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not acquire mutex: %s\n"), strerror(rc));
+ libpq_gettext("could not acquire mutex: %s\n"), strerror(rc));
return -1;
}
#endif
@@ -1414,7 +1415,7 @@ initialize_SSL(PGconn *conn)
{
/*
* stat() failed; assume root file doesn't exist. If sslmode is
- * verify-ca or verify-full, this is an error. Otherwise, continue
+ * verify-ca or verify-full, this is an error. Otherwise, continue
* without performing any server cert verification.
*/
if (conn->sslmode[0] == 'v') /* "verify-ca" or "verify-full" */
@@ -1547,7 +1548,7 @@ open_client_SSL(PGconn *conn)
static void
close_SSL(PGconn *conn)
{
- bool destroy_needed = false;
+ bool destroy_needed = false;
if (conn->ssl)
{
@@ -1586,9 +1587,9 @@ close_SSL(PGconn *conn)
/*
* This will remove our SSL locking hooks, if this is the last SSL
- * connection, which means we must wait to call it until after all
- * SSL calls have been made, otherwise we can end up with a race
- * condition and possible deadlocks.
+ * connection, which means we must wait to call it until after all SSL
+ * calls have been made, otherwise we can end up with a race condition and
+ * possible deadlocks.
*
* See comments above destroy_ssl_system().
*/
@@ -1663,7 +1664,7 @@ PQgetssl(PGconn *conn)
#if defined(ENABLE_THREAD_SAFETY) && !defined(WIN32)
/*
- * Block SIGPIPE for this thread. This prevents send()/write() from exiting
+ * Block SIGPIPE for this thread. This prevents send()/write() from exiting
* the application.
*/
int
@@ -1702,7 +1703,7 @@ pq_block_sigpipe(sigset_t *osigset, bool *sigpipe_pending)
* Discard any pending SIGPIPE and reset the signal mask.
*
* Note: we are effectively assuming here that the C library doesn't queue
- * up multiple SIGPIPE events. If it did, then we'd accidentally leave
+ * up multiple SIGPIPE events. If it did, then we'd accidentally leave
* ours in the queue when an event was already pending and we got another.
* As long as it doesn't queue multiple events, we're OK because the caller
* can't tell the difference.
@@ -1713,7 +1714,7 @@ pq_block_sigpipe(sigset_t *osigset, bool *sigpipe_pending)
* gotten one, pass got_epipe = TRUE.
*
* We do not want this to change errno, since if it did that could lose
- * the error code from a preceding send(). We essentially assume that if
+ * the error code from a preceding send(). We essentially assume that if
* we were able to do pq_block_sigpipe(), this can't fail.
*/
void
diff --git a/src/interfaces/libpq/libpq-fe.h b/src/interfaces/libpq/libpq-fe.h
index 856bdff006..80591728a0 100644
--- a/src/interfaces/libpq/libpq-fe.h
+++ b/src/interfaces/libpq/libpq-fe.h
@@ -55,9 +55,9 @@ typedef enum
* be used for user feedback or similar purposes.
*/
CONNECTION_STARTED, /* Waiting for connection to be made. */
- CONNECTION_MADE, /* Connection OK; waiting to send. */
+ CONNECTION_MADE, /* Connection OK; waiting to send. */
CONNECTION_AWAITING_RESPONSE, /* Waiting for a response from the
- * postmaster. */
+ * postmaster. */
CONNECTION_AUTH_OK, /* Received authentication; waiting for
* backend startup. */
CONNECTION_SETENV, /* Negotiating environment. */
diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h
index 0725c17023..4aeb4fad98 100644
--- a/src/interfaces/libpq/libpq-int.h
+++ b/src/interfaces/libpq/libpq-int.h
@@ -365,7 +365,8 @@ struct pg_conn
/* Connection data */
/* See PQconnectPoll() for how we use 'int' and not 'pgsocket'. */
- pgsocket sock; /* FD for socket, PGINVALID_SOCKET if unconnected */
+ pgsocket sock; /* FD for socket, PGINVALID_SOCKET if
+ * unconnected */
SockAddr laddr; /* Local address */
SockAddr raddr; /* Remote address */
ProtocolVersion pversion; /* FE/BE protocol version in use */
diff --git a/src/interfaces/libpq/pqexpbuffer.c b/src/interfaces/libpq/pqexpbuffer.c
index 54e62d3f11..5c7fd42dc7 100644
--- a/src/interfaces/libpq/pqexpbuffer.c
+++ b/src/interfaces/libpq/pqexpbuffer.c
@@ -166,7 +166,7 @@ resetPQExpBuffer(PQExpBuffer str)
* Make sure there is enough space for 'needed' more bytes in the buffer
* ('needed' does not include the terminating null).
*
- * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
+ * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
* the buffer is left in "broken" state.)
*/
int
@@ -180,7 +180,7 @@ enlargePQExpBuffer(PQExpBuffer str, size_t needed)
/*
* Guard against ridiculous "needed" values, which can occur if we're fed
- * bogus data. Without this, we can get an overflow or infinite loop in
+ * bogus data. Without this, we can get an overflow or infinite loop in
* the following.
*/
if (needed >= ((size_t) INT_MAX - str->len))
@@ -207,7 +207,7 @@ enlargePQExpBuffer(PQExpBuffer str, size_t needed)
/*
* Clamp to INT_MAX in case we went past it. Note we are assuming here
- * that INT_MAX <= UINT_MAX/2, else the above loop could overflow. We
+ * that INT_MAX <= UINT_MAX/2, else the above loop could overflow. We
* will still have newlen >= needed.
*/
if (newlen > (size_t) INT_MAX)
@@ -228,7 +228,7 @@ enlargePQExpBuffer(PQExpBuffer str, size_t needed)
/*
* printfPQExpBuffer
* Format text data under the control of fmt (an sprintf-like format string)
- * and insert it into str. More space is allocated to str if necessary.
+ * and insert it into str. More space is allocated to str if necessary.
* This is a convenience routine that does the same thing as
* resetPQExpBuffer() followed by appendPQExpBuffer().
*/
@@ -319,7 +319,7 @@ appendPQExpBufferVA(PQExpBuffer str, const char *fmt, va_list args)
/*
* Note: some versions of vsnprintf return the number of chars
* actually stored, not the total space needed as C99 specifies. And
- * at least one returns -1 on failure. Be conservative about
+ * at least one returns -1 on failure. Be conservative about
* believing whether the print worked.
*/
if (nprinted >= 0 && (size_t) nprinted < avail - 1)
diff --git a/src/interfaces/libpq/pqexpbuffer.h b/src/interfaces/libpq/pqexpbuffer.h
index 4ebb124f55..b3c0c0fefb 100644
--- a/src/interfaces/libpq/pqexpbuffer.h
+++ b/src/interfaces/libpq/pqexpbuffer.h
@@ -37,7 +37,7 @@
* more space. We must always have maxlen > len.
*
* An exception occurs if we failed to allocate enough memory for the string
- * buffer. In that case data points to a statically allocated empty string,
+ * buffer. In that case data points to a statically allocated empty string,
* and len = maxlen = 0.
*-------------------------
*/
@@ -115,7 +115,7 @@ extern void initPQExpBuffer(PQExpBuffer str);
*
* NOTE: some routines build up a string using PQExpBuffer, and then
* release the PQExpBufferData but return the data string itself to their
- * caller. At that point the data string looks like a plain malloc'd
+ * caller. At that point the data string looks like a plain malloc'd
* string.
*/
extern void destroyPQExpBuffer(PQExpBuffer str);
@@ -134,7 +134,7 @@ extern void resetPQExpBuffer(PQExpBuffer str);
* Make sure there is enough space for 'needed' more bytes in the buffer
* ('needed' does not include the terminating null).
*
- * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
+ * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
* the buffer is left in "broken" state.)
*/
extern int enlargePQExpBuffer(PQExpBuffer str, size_t needed);
@@ -142,7 +142,7 @@ extern int enlargePQExpBuffer(PQExpBuffer str, size_t needed);
/*------------------------
* printfPQExpBuffer
* Format text data under the control of fmt (an sprintf-like format string)
- * and insert it into str. More space is allocated to str if necessary.
+ * and insert it into str. More space is allocated to str if necessary.
* This is a convenience routine that does the same thing as
* resetPQExpBuffer() followed by appendPQExpBuffer().
*/
diff --git a/src/interfaces/libpq/test/uri-regress.c b/src/interfaces/libpq/test/uri-regress.c
index e08a69dc90..cf0ef135d5 100644
--- a/src/interfaces/libpq/test/uri-regress.c
+++ b/src/interfaces/libpq/test/uri-regress.c
@@ -2,7 +2,7 @@
* uri-regress.c
* A test program for libpq URI format
*
- * This is a helper for libpq conninfo regression testing. It takes a single
+ * This is a helper for libpq conninfo regression testing. It takes a single
* conninfo string as a parameter, parses it using PQconninfoParse, and then
* prints out the values from the parsed PQconninfoOption struct that differ
* from the defaults (obtained from PQconndefaults).
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index ffdf634f55..d57189fe1e 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -62,8 +62,8 @@ PG_MODULE_MAGIC;
/**********************************************************************
- * Information associated with a Perl interpreter. We have one interpreter
- * that is used for all plperlu (untrusted) functions. For plperl (trusted)
+ * Information associated with a Perl interpreter. We have one interpreter
+ * that is used for all plperlu (untrusted) functions. For plperl (trusted)
* functions, there is a separate interpreter for each effective SQL userid.
* (This is needed to ensure that an unprivileged user can't inject Perl code
* that'll be executed with the privileges of some other SQL user.)
@@ -99,7 +99,7 @@ typedef struct plperl_interp_desc
*
* The refcount field counts the struct's reference from the hash table shown
* below, plus one reference for each function call level that is using the
- * struct. We can release the struct, and the associated Perl sub, when the
+ * struct. We can release the struct, and the associated Perl sub, when the
* refcount goes to zero.
**********************************************************************/
typedef struct plperl_proc_desc
@@ -254,8 +254,8 @@ static void plperl_event_trigger_handler(PG_FUNCTION_ARGS);
static void free_plperl_function(plperl_proc_desc *prodesc);
static plperl_proc_desc *compile_plperl_function(Oid fn_oid,
- bool is_trigger,
- bool is_event_trigger);
+ bool is_trigger,
+ bool is_event_trigger);
static SV *plperl_hash_from_tuple(HeapTuple tuple, TupleDesc tupdesc);
static SV *plperl_hash_from_datum(Datum attr);
@@ -302,8 +302,8 @@ static char *setlocale_perl(int category, char *locale);
static char *
hek2cstr(HE *he)
{
- char *ret;
- SV *sv;
+ char *ret;
+ SV *sv;
/*
* HeSVKEY_force will return a temporary mortal SV*, so we need to make
@@ -707,6 +707,7 @@ plperl_init_interp(void)
int nargs = 3;
#ifdef WIN32
+
/*
* The perl library on startup does horrible things like call
* setlocale(LC_ALL,""). We have protected against that on most platforms
@@ -3055,7 +3056,7 @@ plperl_spi_execute_fetch_result(SPITupleTable *tuptable, int processed,
/*
* Note: plperl_return_next is called both in Postgres and Perl contexts.
- * We report any errors in Postgres fashion (via ereport). If called in
+ * We report any errors in Postgres fashion (via ereport). If called in
* Perl context, it is SPI.xs's responsibility to catch the error and
* convert to a Perl error. We assume (perhaps without adequate justification)
* that we need not abort the current transaction if the Perl code traps the
diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c
index 12ac964d13..d2fd685c35 100644
--- a/src/pl/plpgsql/src/pl_comp.c
+++ b/src/pl/plpgsql/src/pl_comp.c
@@ -288,7 +288,7 @@ do_compile(FunctionCallInfo fcinfo,
MemoryContext func_cxt;
/*
- * Setup the scanner input and error info. We assume that this function
+ * Setup the scanner input and error info. We assume that this function
* cannot be invoked recursively, so there's no need to save and restore
* the static variables used here.
*/
@@ -388,7 +388,7 @@ do_compile(FunctionCallInfo fcinfo,
* needed permanently, so make them in tmp cxt.
*
* We also need to resolve any polymorphic input or output
- * argument types. In validation mode we won't be able to, so we
+ * argument types. In validation mode we won't be able to, so we
* arbitrarily assume we are dealing with integers.
*/
MemoryContextSwitchTo(compile_tmp_cxt);
@@ -471,7 +471,7 @@ do_compile(FunctionCallInfo fcinfo,
/*
* If there's just one OUT parameter, out_param_varno points
- * directly to it. If there's more than one, build a row that
+ * directly to it. If there's more than one, build a row that
* holds all of them.
*/
if (num_out_args == 1)
@@ -810,7 +810,7 @@ plpgsql_compile_inline(char *proc_source)
int i;
/*
- * Setup the scanner input and error info. We assume that this function
+ * Setup the scanner input and error info. We assume that this function
* cannot be invoked recursively, so there's no need to save and restore
* the static variables used here.
*/
@@ -852,7 +852,11 @@ plpgsql_compile_inline(char *proc_source)
function->out_param_varno = -1; /* set up for no OUT param */
function->resolve_option = plpgsql_variable_conflict;
function->print_strict_params = plpgsql_print_strict_params;
- /* don't do extra validation for inline code as we don't want to add spam at runtime */
+
+ /*
+ * don't do extra validation for inline code as we don't want to add spam
+ * at runtime
+ */
function->extra_warnings = 0;
function->extra_errors = 0;
@@ -1070,7 +1074,7 @@ plpgsql_post_column_ref(ParseState *pstate, ColumnRef *cref, Node *var)
/*
* If we find a record/row variable but can't match a field name, throw
- * error if there was no core resolution for the ColumnRef either. In
+ * error if there was no core resolution for the ColumnRef either. In
* that situation, the reference is inevitably going to fail, and
* complaining about the record/row variable is likely to be more on-point
* than the core parser's error message. (It's too bad we don't have
@@ -1264,7 +1268,7 @@ resolve_column_ref(ParseState *pstate, PLpgSQL_expr *expr,
/*
* We should not get here, because a RECFIELD datum should
* have been built at parse time for every possible qualified
- * reference to fields of this record. But if we do, handle
+ * reference to fields of this record. But if we do, handle
* it like field-not-found: throw error or return NULL.
*/
if (error_if_no_field)
@@ -1871,7 +1875,7 @@ plpgsql_parse_cwordrowtype(List *idents)
*
* The returned struct may be a PLpgSQL_var, PLpgSQL_row, or
* PLpgSQL_rec depending on the given datatype, and is allocated via
- * palloc. The struct is automatically added to the current datum
+ * palloc. The struct is automatically added to the current datum
* array, and optionally to the current namespace.
*/
PLpgSQL_variable *
@@ -2325,7 +2329,7 @@ plpgsql_adddatum(PLpgSQL_datum *new)
* last call.
*
* This is used around a DECLARE section to create a list of the VARs
- * that have to be initialized at block entry. Note that VARs can also
+ * that have to be initialized at block entry. Note that VARs can also
* be created elsewhere than DECLARE, eg by a FOR-loop, but it is then
* the responsibility of special-purpose code to initialize them.
* ----------
@@ -2482,7 +2486,7 @@ plpgsql_resolve_polymorphic_argtypes(int numargs,
* delete_function - clean up as much as possible of a stale function cache
*
* We can't release the PLpgSQL_function struct itself, because of the
- * possibility that there are fn_extra pointers to it. We can release
+ * possibility that there are fn_extra pointers to it. We can release
* the subsidiary storage, but only if there are no active evaluations
* in progress. Otherwise we'll just leak that storage. Since the
* case would only occur if a pg_proc update is detected during a nested
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 5d5439948b..69d1965253 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -56,7 +56,7 @@ typedef struct
* creates its own "eval_econtext" ExprContext within this estate for
* per-evaluation workspace. eval_econtext is freed at normal function exit,
* and the EState is freed at transaction end (in case of error, we assume
- * that the abort mechanisms clean it all up). Furthermore, any exception
+ * that the abort mechanisms clean it all up). Furthermore, any exception
* block within a function has to have its own eval_econtext separate from
* the containing function's, so that we can clean up ExprContext callbacks
* properly at subtransaction exit. We maintain a stack that tracks the
@@ -64,7 +64,7 @@ typedef struct
*
* This arrangement is a bit tedious to maintain, but it's worth the trouble
* so that we don't have to re-prepare simple expressions on each trip through
- * a function. (We assume the case to optimize is many repetitions of a
+ * a function. (We assume the case to optimize is many repetitions of a
* function within a transaction.)
*
* However, there's no value in trying to amortize simple expression setup
@@ -232,9 +232,9 @@ static Portal exec_dynquery_with_params(PLpgSQL_execstate *estate,
const char *portalname, int cursorOptions);
static char *format_expr_params(PLpgSQL_execstate *estate,
- const PLpgSQL_expr *expr);
+ const PLpgSQL_expr *expr);
static char *format_preparedparamsdata(PLpgSQL_execstate *estate,
- const PreparedParamsData *ppd);
+ const PreparedParamsData *ppd);
/* ----------
@@ -543,7 +543,7 @@ plpgsql_exec_trigger(PLpgSQL_function *func,
*
* We make the tupdescs available in both records even though only one may
* have a value. This allows parsing of record references to succeed in
- * functions that are used for multiple trigger types. For example, we
+ * functions that are used for multiple trigger types. For example, we
* might have a test like "if (TG_OP = 'INSERT' and NEW.foo = 'xyz')",
* which should parse regardless of the current trigger type.
*/
@@ -1226,7 +1226,7 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block)
SPI_restore_connection();
/*
- * Must clean up the econtext too. However, any tuple table made
+ * Must clean up the econtext too. However, any tuple table made
* in the subxact will have been thrown away by SPI during subxact
* abort, so we don't need to (and mustn't try to) free the
* eval_tuptable.
@@ -1630,7 +1630,7 @@ exec_stmt_getdiag(PLpgSQL_execstate *estate, PLpgSQL_stmt_getdiag *stmt)
case PLPGSQL_GETDIAG_CONTEXT:
{
- char *contextstackstr = GetErrorContextStack();
+ char *contextstackstr = GetErrorContextStack();
exec_assign_c_string(estate, var, contextstackstr);
@@ -3335,7 +3335,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
/*
* Check for error, and set FOUND if appropriate (for historical reasons
- * we set FOUND only for certain query types). Also Assert that we
+ * we set FOUND only for certain query types). Also Assert that we
* identified the statement type the same as SPI did.
*/
switch (rc)
@@ -3422,7 +3422,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
{
if (stmt->strict)
{
- char *errdetail;
+ char *errdetail;
if (estate->func->print_strict_params)
errdetail = format_expr_params(estate, expr);
@@ -3441,7 +3441,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
{
if (n > 1 && (stmt->strict || stmt->mod_stmt))
{
- char *errdetail;
+ char *errdetail;
if (estate->func->print_strict_params)
errdetail = format_expr_params(estate, expr);
@@ -3614,7 +3614,7 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate,
{
if (stmt->strict)
{
- char *errdetail;
+ char *errdetail;
if (estate->func->print_strict_params)
errdetail = format_preparedparamsdata(estate, ppd);
@@ -3633,7 +3633,7 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate,
{
if (n > 1 && stmt->strict)
{
- char *errdetail;
+ char *errdetail;
if (estate->func->print_strict_params)
errdetail = format_preparedparamsdata(estate, ppd);
@@ -4073,7 +4073,7 @@ exec_assign_value(PLpgSQL_execstate *estate,
var->datatype->typlen);
/*
- * Now free the old value. (We can't do this any earlier
+ * Now free the old value. (We can't do this any earlier
* because of the possibility that we are assigning the var's
* old value to it, eg "foo := foo". We could optimize out
* the assignment altogether in such cases, but it's too
@@ -4433,7 +4433,7 @@ exec_assign_value(PLpgSQL_execstate *estate,
* At present this doesn't handle PLpgSQL_expr or PLpgSQL_arrayelem datums.
*
* NOTE: caller must not modify the returned value, since it points right
- * at the stored value in the case of pass-by-reference datatypes. In some
+ * at the stored value in the case of pass-by-reference datatypes. In some
* cases we have to palloc a return value, and in such cases we put it into
* the estate's short-term memory context.
*/
@@ -4942,7 +4942,7 @@ exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt,
PinPortal(portal);
/*
- * Fetch the initial tuple(s). If prefetching is allowed then we grab a
+ * Fetch the initial tuple(s). If prefetching is allowed then we grab a
* few more rows to avoid multiple trips through executor startup
* overhead.
*/
@@ -5080,7 +5080,7 @@ loop_exit:
* Because we only store one execution tree for a simple expression, we
* can't handle recursion cases. So, if we see the tree is already busy
* with an evaluation in the current xact, we just return FALSE and let the
- * caller run the expression the hard way. (Other alternatives such as
+ * caller run the expression the hard way. (Other alternatives such as
* creating a new tree for a recursive call either introduce memory leaks,
* or add enough bookkeeping to be doubtful wins anyway.) Another case that
* is covered by the expr_simple_in_use test is where a previous execution
@@ -5308,7 +5308,7 @@ setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr)
estate->cur_expr = expr;
/*
- * Also make sure this is set before parser hooks need it. There is
+ * Also make sure this is set before parser hooks need it. There is
* no need to save and restore, since the value is always correct once
* set. (Should be set already, but let's be sure.)
*/
@@ -5350,7 +5350,7 @@ plpgsql_param_fetch(ParamListInfo params, int paramid)
/*
* Do nothing if asked for a value that's not supposed to be used by this
- * SQL expression. This avoids unwanted evaluations when functions such
+ * SQL expression. This avoids unwanted evaluations when functions such
* as copyParamList try to materialize all the values.
*/
if (!bms_is_member(dno, expr->paramnos))
@@ -5684,7 +5684,7 @@ convert_value_to_string(PLpgSQL_execstate *estate, Datum value, Oid valtype)
*
* Note: the estate's eval_econtext is used for temporary storage, and may
* also contain the result Datum if we have to do a conversion to a pass-
- * by-reference data type. Be sure to do an exec_eval_cleanup() call when
+ * by-reference data type. Be sure to do an exec_eval_cleanup() call when
* done with the result.
* ----------
*/
@@ -6216,7 +6216,7 @@ plpgsql_create_econtext(PLpgSQL_execstate *estate)
/*
* Create an EState for evaluation of simple expressions, if there's not
- * one already in the current transaction. The EState is made a child of
+ * one already in the current transaction. The EState is made a child of
* TopTransactionContext so it will have the right lifespan.
*
* Note that this path is never taken when executing a DO block; the
@@ -6531,10 +6531,10 @@ static char *
format_expr_params(PLpgSQL_execstate *estate,
const PLpgSQL_expr *expr)
{
- int paramno;
- int dno;
+ int paramno;
+ int dno;
StringInfoData paramstr;
- Bitmapset *tmpset;
+ Bitmapset *tmpset;
if (!expr->paramnos)
return NULL;
@@ -6544,10 +6544,10 @@ format_expr_params(PLpgSQL_execstate *estate,
paramno = 0;
while ((dno = bms_first_member(tmpset)) >= 0)
{
- Datum paramdatum;
- Oid paramtypeid;
- bool paramisnull;
- int32 paramtypmod;
+ Datum paramdatum;
+ Oid paramtypeid;
+ bool paramisnull;
+ int32 paramtypmod;
PLpgSQL_var *curvar;
curvar = (PLpgSQL_var *) estate->datums[dno];
@@ -6563,8 +6563,9 @@ format_expr_params(PLpgSQL_execstate *estate,
appendStringInfoString(&paramstr, "NULL");
else
{
- char *value = convert_value_to_string(estate, paramdatum, paramtypeid);
- char *p;
+ char *value = convert_value_to_string(estate, paramdatum, paramtypeid);
+ char *p;
+
appendStringInfoCharMacro(&paramstr, '\'');
for (p = value; *p; p++)
{
@@ -6590,7 +6591,7 @@ static char *
format_preparedparamsdata(PLpgSQL_execstate *estate,
const PreparedParamsData *ppd)
{
- int paramno;
+ int paramno;
StringInfoData paramstr;
if (!ppd)
@@ -6607,8 +6608,9 @@ format_preparedparamsdata(PLpgSQL_execstate *estate,
appendStringInfoString(&paramstr, "NULL");
else
{
- char *value = convert_value_to_string(estate, ppd->values[paramno], ppd->types[paramno]);
- char *p;
+ char *value = convert_value_to_string(estate, ppd->values[paramno], ppd->types[paramno]);
+ char *p;
+
appendStringInfoCharMacro(&paramstr, '\'');
for (p = value; *p; p++)
{
diff --git a/src/pl/plpgsql/src/pl_funcs.c b/src/pl/plpgsql/src/pl_funcs.c
index 430232c687..d6825e4634 100644
--- a/src/pl/plpgsql/src/pl_funcs.c
+++ b/src/pl/plpgsql/src/pl_funcs.c
@@ -25,7 +25,7 @@
* list or "chain" (from the youngest item to the root) is accessible from
* any one plpgsql statement. During initial parsing of a function, ns_top
* points to the youngest item accessible from the block currently being
- * parsed. We store the entire tree, however, since at runtime we will need
+ * parsed. We store the entire tree, however, since at runtime we will need
* to access the chain that's relevant to any one statement.
*
* Block boundaries in the namespace chain are marked by PLPGSQL_NSTYPE_LABEL
@@ -113,7 +113,7 @@ plpgsql_ns_additem(int itemtype, int itemno, const char *name)
*
* If localmode is TRUE, only the topmost block level is searched.
*
- * name1 must be non-NULL. Pass NULL for name2 and/or name3 if parsing a name
+ * name1 must be non-NULL. Pass NULL for name2 and/or name3 if parsing a name
* with fewer than three components.
*
* If names_used isn't NULL, *names_used receives the number of names
diff --git a/src/pl/plpgsql/src/pl_handler.c b/src/pl/plpgsql/src/pl_handler.c
index e659f8e289..e3e350c0f8 100644
--- a/src/pl/plpgsql/src/pl_handler.c
+++ b/src/pl/plpgsql/src/pl_handler.c
@@ -44,8 +44,8 @@ int plpgsql_variable_conflict = PLPGSQL_RESOLVE_ERROR;
bool plpgsql_print_strict_params = false;
-char *plpgsql_extra_warnings_string = NULL;
-char *plpgsql_extra_errors_string = NULL;
+char *plpgsql_extra_warnings_string = NULL;
+char *plpgsql_extra_errors_string = NULL;
int plpgsql_extra_warnings;
int plpgsql_extra_errors;
@@ -59,7 +59,7 @@ plpgsql_extra_checks_check_hook(char **newvalue, void **extra, GucSource source)
char *rawstring;
List *elemlist;
ListCell *l;
- int extrachecks = 0;
+ int extrachecks = 0;
int *myextra;
if (pg_strcasecmp(*newvalue, "all") == 0)
diff --git a/src/pl/plpgsql/src/pl_scanner.c b/src/pl/plpgsql/src/pl_scanner.c
index 19d96b75fc..6a5a04bc0c 100644
--- a/src/pl/plpgsql/src/pl_scanner.c
+++ b/src/pl/plpgsql/src/pl_scanner.c
@@ -44,7 +44,7 @@ IdentifierLookup plpgsql_IdentifierLookup = IDENTIFIER_LOOKUP_NORMAL;
*
* For the most part, the reserved keywords are those that start a PL/pgSQL
* statement (and so would conflict with an assignment to a variable of the
- * same name). We also don't sweat it much about reserving keywords that
+ * same name). We also don't sweat it much about reserving keywords that
* are reserved in the core grammar. Try to avoid reserving other words.
*/
@@ -177,7 +177,7 @@ typedef struct
/*
* Scanner working state. At some point we might wish to fold all this
- * into a YY_EXTRA struct. For the moment, there is no need for plpgsql's
+ * into a YY_EXTRA struct. For the moment, there is no need for plpgsql's
* lexer to be re-entrant, and the notational burden of passing a yyscanner
* pointer around is great enough to not want to do it without need.
*/
@@ -345,7 +345,7 @@ plpgsql_yylex(void)
/*
* Internal yylex function. This wraps the core lexer and adds one feature:
- * a token pushback stack. We also make a couple of trivial single-token
+ * a token pushback stack. We also make a couple of trivial single-token
* translations from what the core lexer does to what we want, in particular
* interfacing from the core_YYSTYPE to YYSTYPE union.
*/
@@ -559,7 +559,7 @@ plpgsql_yyerror(const char *message)
/*
* If we have done any lookahead then flex will have restored the
* character after the end-of-token. Zap it again so that we report
- * only the single token here. This modifies scanbuf but we no longer
+ * only the single token here. This modifies scanbuf but we no longer
* care about that.
*/
yytext[plpgsql_yyleng] = '\0';
diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h
index b4d1498e43..d6f31ffc63 100644
--- a/src/pl/plpgsql/src/plpgsql.h
+++ b/src/pl/plpgsql/src/plpgsql.h
@@ -668,7 +668,7 @@ typedef struct PLpgSQL_func_hashkey
/*
* For a trigger function, the OID of the relation triggered on is part of
* the hash key --- we want to compile the trigger separately for each
- * relation it is used with, in case the rowtype is different. Zero if
+ * relation it is used with, in case the rowtype is different. Zero if
* not called as a trigger.
*/
Oid trigrelOid;
@@ -740,8 +740,8 @@ typedef struct PLpgSQL_function
bool print_strict_params;
/* extra checks */
- int extra_warnings;
- int extra_errors;
+ int extra_warnings;
+ int extra_errors;
int ndatums;
PLpgSQL_datum **datums;
@@ -827,7 +827,7 @@ typedef struct PLpgSQL_execstate
*
* Also, immediately before any call to func_setup, PL/pgSQL fills in the
* error_callback and assign_expr fields with pointers to its own
- * plpgsql_exec_error_callback and exec_assign_expr functions. This is
+ * plpgsql_exec_error_callback and exec_assign_expr functions. This is
* a somewhat ad-hoc expedient to simplify life for debugger plugins.
*/
@@ -890,8 +890,8 @@ extern bool plpgsql_print_strict_params;
#define PLPGSQL_XCHECK_SHADOWVAR 1
#define PLPGSQL_XCHECK_ALL ((int) ~0)
-extern int plpgsql_extra_warnings;
-extern int plpgsql_extra_errors;
+extern int plpgsql_extra_warnings;
+extern int plpgsql_extra_errors;
extern bool plpgsql_check_syntax;
extern bool plpgsql_DumpExecTree;
diff --git a/src/pl/plpython/plpy_elog.c b/src/pl/plpython/plpy_elog.c
index 77cd4273ba..461d68c546 100644
--- a/src/pl/plpython/plpy_elog.c
+++ b/src/pl/plpython/plpy_elog.c
@@ -30,7 +30,7 @@ static char *get_source_line(const char *src, int lineno);
/*
* Emit a PG error or notice, together with any available info about
* the current Python error, previously set by PLy_exception_set().
- * This should be used to propagate Python errors into PG. If fmt is
+ * This should be used to propagate Python errors into PG. If fmt is
* NULL, the Python error becomes the primary error message, otherwise
* it becomes the detail. If there is a Python traceback, it is put
* in the context.
diff --git a/src/pl/plpython/plpy_exec.c b/src/pl/plpython/plpy_exec.c
index 74e2f3dadf..b6eb6f1f95 100644
--- a/src/pl/plpython/plpy_exec.c
+++ b/src/pl/plpython/plpy_exec.c
@@ -247,7 +247,7 @@ PLy_exec_trigger(FunctionCallInfo fcinfo, PLyProcedure *proc)
Assert(CALLED_AS_TRIGGER(fcinfo));
/*
- * Input/output conversion for trigger tuples. Use the result TypeInfo
+ * Input/output conversion for trigger tuples. Use the result TypeInfo
* variable to store the tuple conversion info. We do this over again on
* each call to cover the possibility that the relation's tupdesc changed
* since the trigger was last called. PLy_input_tuple_funcs and
diff --git a/src/pl/plpython/plpy_typeio.c b/src/pl/plpython/plpy_typeio.c
index 7a5e581280..566cf6c0fe 100644
--- a/src/pl/plpython/plpy_typeio.c
+++ b/src/pl/plpython/plpy_typeio.c
@@ -377,7 +377,7 @@ PLy_output_datum_func2(PLyObToDatum *arg, HeapTuple typeTup)
/*
* Select a conversion function to convert Python objects to PostgreSQL
- * datums. Most data types can go through the generic function.
+ * datums. Most data types can go through the generic function.
*/
switch (getBaseType(element_type ? element_type : arg->typoid))
{
@@ -427,6 +427,7 @@ static void
PLy_input_datum_func2(PLyDatumToOb *arg, Oid typeOid, HeapTuple typeTup)
{
Form_pg_type typeStruct = (Form_pg_type) GETSTRUCT(typeTup);
+
/* It's safe to handle domains of array types as its base array type. */
Oid element_type = get_base_element_type(typeOid);
@@ -647,7 +648,7 @@ PLyList_FromArray(PLyDatumToOb *arg, Datum d)
}
/*
- * Convert a Python object to a PostgreSQL bool datum. This can't go
+ * Convert a Python object to a PostgreSQL bool datum. This can't go
* through the generic conversion function, because Python attaches a
* Boolean value to everything, more things than the PostgreSQL bool
* type can parse.
@@ -809,7 +810,7 @@ static Datum
PLySequence_ToArray(PLyObToDatum *arg, int32 typmod, PyObject *plrv)
{
ArrayType *array;
- Datum rv;
+ Datum rv;
int i;
Datum *elems;
bool *nulls;
@@ -847,6 +848,7 @@ PLySequence_ToArray(PLyObToDatum *arg, int32 typmod, PyObject *plrv)
lbs = 1;
array = construct_md_array(elems, nulls, 1, &len, &lbs,
get_base_element_type(arg->typoid), arg->elm->typlen, arg->elm->typbyval, arg->elm->typalign);
+
/*
* If the result type is a domain of array, the resulting array must be
* checked.
diff --git a/src/pl/plpython/plpy_util.c b/src/pl/plpython/plpy_util.c
index 88670e66d0..36958cb10f 100644
--- a/src/pl/plpython/plpy_util.c
+++ b/src/pl/plpython/plpy_util.c
@@ -55,7 +55,7 @@ PLy_free(void *ptr)
/*
* Convert a Python unicode object to a Python string/bytes object in
- * PostgreSQL server encoding. Reference ownership is passed to the
+ * PostgreSQL server encoding. Reference ownership is passed to the
* caller.
*/
PyObject *
@@ -121,7 +121,7 @@ PLyUnicode_Bytes(PyObject *unicode)
* function. The result is palloc'ed.
*
* Note that this function is disguised as PyString_AsString() when
- * using Python 3. That function retuns a pointer into the internal
+ * using Python 3. That function retuns a pointer into the internal
* memory of the argument, which isn't exactly the interface of this
* function. But in either case you get a rather short-lived
* reference that you ought to better leave alone.
@@ -139,7 +139,7 @@ PLyUnicode_AsString(PyObject *unicode)
#if PY_MAJOR_VERSION >= 3
/*
* Convert a C string in the PostgreSQL server encoding to a Python
- * unicode object. Reference ownership is passed to the caller.
+ * unicode object. Reference ownership is passed to the caller.
*/
PyObject *
PLyUnicode_FromString(const char *s)
diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c
index 8c18d5ea20..a53cca4f27 100644
--- a/src/pl/tcl/pltcl.c
+++ b/src/pl/tcl/pltcl.c
@@ -208,8 +208,8 @@ static void pltcl_event_trigger_handler(PG_FUNCTION_ARGS, bool pltrusted);
static void throw_tcl_error(Tcl_Interp *interp, const char *proname);
static pltcl_proc_desc *compile_pltcl_function(Oid fn_oid, Oid tgreloid,
- bool is_event_trigger,
- bool pltrusted);
+ bool is_event_trigger,
+ bool pltrusted);
static int pltcl_elog(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[]);
@@ -855,7 +855,7 @@ pltcl_trigger_handler(PG_FUNCTION_ARGS, bool pltrusted)
/* Find or compile the function */
prodesc = compile_pltcl_function(fcinfo->flinfo->fn_oid,
RelationGetRelid(trigdata->tg_relation),
- false, /* not an event trigger */
+ false, /* not an event trigger */
pltrusted);
pltcl_current_prodesc = prodesc;
@@ -1607,7 +1607,7 @@ pltcl_elog(ClientData cdata, Tcl_Interp *interp,
if (level == ERROR)
{
/*
- * We just pass the error back to Tcl. If it's not caught, it'll
+ * We just pass the error back to Tcl. If it's not caught, it'll
* eventually get converted to a PG error when we reach the call
* handler.
*/
diff --git a/src/port/chklocale.c b/src/port/chklocale.c
index f862c907e2..588dfd9564 100644
--- a/src/port/chklocale.c
+++ b/src/port/chklocale.c
@@ -373,7 +373,7 @@ pg_get_encoding_from_locale(const char *ctype, bool write_message)
/*
* We print a warning if we got a CODESET string but couldn't recognize
- * it. This means we need another entry in the table.
+ * it. This means we need another entry in the table.
*/
if (write_message)
{
diff --git a/src/port/crypt.c b/src/port/crypt.c
index 9347d3b47c..ef8bf46338 100644
--- a/src/port/crypt.c
+++ b/src/port/crypt.c
@@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -118,7 +118,7 @@ typedef int int32_t;
* representation is to store one bit per byte in an array of bytes. Bit N of
* the NBS spec is stored as the LSB of the Nth byte (index N-1) in the array.
* Another representation stores the 64 bits in 8 bytes, with bits 1..8 in the
- * first byte, 9..16 in the second, and so on. The DES spec apparently has
+ * first byte, 9..16 in the second, and so on. The DES spec apparently has
* bit 1 in the MSB of the first byte, but that is particularly noxious so we
* bit-reverse each byte so that bit 1 is the LSB of the first byte, bit 8 is
* the MSB of the first byte. Specifically, the 64-bit input data and key are
@@ -126,21 +126,21 @@ typedef int int32_t;
* MSB format.
*
* DES operates internally on groups of 32 bits which are expanded to 48 bits
- * by permutation E and shrunk back to 32 bits by the S boxes. To speed up
+ * by permutation E and shrunk back to 32 bits by the S boxes. To speed up
* the computation, the expansion is applied only once, the expanded
* representation is maintained during the encryption, and a compression
- * permutation is applied only at the end. To speed up the S-box lookups,
+ * permutation is applied only at the end. To speed up the S-box lookups,
* the 48 bits are maintained as eight 6 bit groups, one per byte, which
* directly feed the eight S-boxes. Within each byte, the 6 bits are the
- * most significant ones. The low two bits of each byte are zero. (Thus,
+ * most significant ones. The low two bits of each byte are zero. (Thus,
* bit 1 of the 48 bit E expansion is stored as the "4"-valued bit of the
* first byte in the eight byte representation, bit 2 of the 48 bit value is
* the "8"-valued bit, and so on.) In fact, a combined "SPE"-box lookup is
* used, in which the output is the 64 bit result of an S-box lookup which
* has been permuted by P and expanded by E, and is ready for use in the next
* iteration. Two 32-bit wide tables, SPE[0] and SPE[1], are used for this
- * lookup. Since each byte in the 48 bit path is a multiple of four, indexed
- * lookup of SPE[0] and SPE[1] is simple and fast. The key schedule and
+ * lookup. Since each byte in the 48 bit path is a multiple of four, indexed
+ * lookup of SPE[0] and SPE[1] is simple and fast. The key schedule and
* "salt" are also converted to this 8*(6+2) format. The SPE table size is
* 8*64*8 = 4K bytes.
*
@@ -154,7 +154,7 @@ typedef int int32_t;
* The byte-order problem is unfortunate, since on the one hand it is good
* to have a machine-independent C_block representation (bits 1..8 in the
* first byte, etc.), and on the other hand it is good for the LSB of the
- * first byte to be the LSB of i0. We cannot have both these things, so we
+ * first byte to be the LSB of i0. We cannot have both these things, so we
* currently use the "little-endian" representation and avoid any multi-byte
* operations that depend on byte order. This largely precludes use of the
* 64-bit datatype since the relative order of i0 and i1 are unknown. It
@@ -181,13 +181,13 @@ typedef int int32_t;
* IE3264: MSB->LSB conversion, initial permutation, and expansion.
* This is done by collecting the 32 even-numbered bits and applying
* a 32->64 bit transformation, and then collecting the 32 odd-numbered
- * bits and applying the same transformation. Since there are only
+ * bits and applying the same transformation. Since there are only
* 32 input bits, the IE3264 transformation table is half the size of
* the usual table.
* CF6464: Compression, final permutation, and LSB->MSB conversion.
* This is done by two trivial 48->32 bit compressions to obtain
* a 64-bit block (the bit numbering is given in the "CIFP" table)
- * followed by a 64->64 bit "cleanup" transformation. (It would
+ * followed by a 64->64 bit "cleanup" transformation. (It would
* be possible to group the bits in the 64-bit block so that 2
* identical 32->32 bit transformations could be used instead,
* saving a factor of 4 in space and possibly 2 in time, but
@@ -206,7 +206,7 @@ typedef int int32_t;
* transforms 56 bits into 48 bits, dropping 8 bits, so PC2 is not
* invertible. We get around that problem by using a modified PC2
* which retains the 8 otherwise-lost bits in the unused low-order
- * bits of each byte. The low-order bits are cleared when the
+ * bits of each byte. The low-order bits are cleared when the
* codes are stored into the key schedule.
* PC2ROT[1]: Same as PC2ROT[0], but with two rotations.
* This is faster than applying PC2ROT[0] twice,
@@ -215,7 +215,7 @@ typedef int int32_t;
*
* The salting is a simple permutation applied to the 48-bit result of E.
* Specifically, if bit i (1 <= i <= 24) of the salt is set then bits i and
- * i+24 of the result are swapped. The salt is thus a 24 bit number, with
+ * i+24 of the result are swapped. The salt is thus a 24 bit number, with
* 16777216 possible values. (The original salt was 12 bits and could not
* swap bits 13..24 with 36..48.)
*
@@ -467,7 +467,7 @@ static C_block PC2ROT[2][64 / CHUNKBITS][1 << CHUNKBITS];
/* Initial permutation/expansion table */
static C_block IE3264[32 / CHUNKBITS][1 << CHUNKBITS];
-/* Table that combines the S, P, and E operations. */
+/* Table that combines the S, P, and E operations. */
static int32_t SPE[2][8][64];
/* compressed/interleaved => final permutation table */
diff --git a/src/port/dirent.c b/src/port/dirent.c
index 3bdc03e9b4..9a431fc7e0 100644
--- a/src/port/dirent.c
+++ b/src/port/dirent.c
@@ -112,7 +112,7 @@ readdir(DIR *d)
int
closedir(DIR *d)
{
- int ret = 0;
+ int ret = 0;
if (d->handle != INVALID_HANDLE_VALUE)
ret = !FindClose(d->handle);
diff --git a/src/port/erand48.c b/src/port/erand48.c
index 524911edd1..9d471197c3 100644
--- a/src/port/erand48.c
+++ b/src/port/erand48.c
@@ -5,7 +5,7 @@
* This file supplies pg_erand48(), pg_lrand48(), and pg_srand48(), which
* are just like erand48(), lrand48(), and srand48() except that we use
* our own implementation rather than the one provided by the operating
- * system. We used to test for an operating system version rather than
+ * system. We used to test for an operating system version rather than
* unconditionally using our own, but (1) some versions of Cygwin have a
* buggy erand48() that always returns zero and (2) as of 2011, glibc's
* erand48() is strangely coded to be almost-but-not-quite thread-safe,
diff --git a/src/port/fls.c b/src/port/fls.c
index 2f1e922c7f..68269112ef 100644
--- a/src/port/fls.c
+++ b/src/port/fls.c
@@ -10,7 +10,7 @@
* src/port/fls.c
*
* This file was taken from FreeBSD to provide an implementation of fls()
- * for platforms that lack it. Note that the operating system's version may
+ * for platforms that lack it. Note that the operating system's version may
* be substantially more efficient than ours, since some platforms have an
* assembly instruction that does exactly this.
*
@@ -36,7 +36,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/getaddrinfo.c b/src/port/getaddrinfo.c
index 7880ad8a16..5ccb4ad21f 100644
--- a/src/port/getaddrinfo.c
+++ b/src/port/getaddrinfo.c
@@ -4,7 +4,7 @@
* Support getaddrinfo() on platforms that don't have it.
*
* We also supply getnameinfo() here, assuming that the platform will have
- * it if and only if it has getaddrinfo(). If this proves false on some
+ * it if and only if it has getaddrinfo(). If this proves false on some
* platform, we'll need to split this file and provide a separate configure
* test for getnameinfo().
*
@@ -144,6 +144,7 @@ getaddrinfo(const char *node, const char *service,
struct addrinfo hints;
#ifdef WIN32
+
/*
* If Windows has native IPv6 support, use the native Windows routine.
* Otherwise, fall through and use our own code.
@@ -266,6 +267,7 @@ freeaddrinfo(struct addrinfo * res)
if (res)
{
#ifdef WIN32
+
/*
* If Windows has native IPv6 support, use the native Windows routine.
* Otherwise, fall through and use our own code.
@@ -357,6 +359,7 @@ getnameinfo(const struct sockaddr * sa, int salen,
char *service, int servicelen, int flags)
{
#ifdef WIN32
+
/*
* If Windows has native IPv6 support, use the native Windows routine.
* Otherwise, fall through and use our own code.
diff --git a/src/port/getopt.c b/src/port/getopt.c
index b81fba14b0..f1ad93d7d6 100644
--- a/src/port/getopt.c
+++ b/src/port/getopt.c
@@ -21,7 +21,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/getopt_long.c b/src/port/getopt_long.c
index d624216ad0..b099091a76 100644
--- a/src/port/getopt_long.c
+++ b/src/port/getopt_long.c
@@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/inet_aton.c b/src/port/inet_aton.c
index 473f51f88d..27e8aaa4ec 100644
--- a/src/port/inet_aton.c
+++ b/src/port/inet_aton.c
@@ -6,7 +6,7 @@
*
* The function was been extracted whole from the file inet_aton.c in
* Release 5.3.12 of the Linux C library, which is derived from the
- * GNU C library, by Bryan Henderson in October 1996. The copyright
+ * GNU C library, by Bryan Henderson in October 1996. The copyright
* notice from that file is below.
*/
@@ -29,7 +29,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/kill.c b/src/port/kill.c
index ceea3f49bf..5a7d483ce6 100644
--- a/src/port/kill.c
+++ b/src/port/kill.c
@@ -42,7 +42,7 @@ pgkill(int pid, int sig)
/* special case for SIGKILL: just ask the system to terminate the target */
if (sig == SIGKILL)
{
- HANDLE prochandle;
+ HANDLE prochandle;
if ((prochandle = OpenProcess(PROCESS_TERMINATE, FALSE, (DWORD) pid)) == NULL)
{
diff --git a/src/port/path.c b/src/port/path.c
index 3484f2cb61..378920597d 100644
--- a/src/port/path.c
+++ b/src/port/path.c
@@ -53,7 +53,7 @@ static void trim_trailing_separator(char *path);
/*
* skip_drive
*
- * On Windows, a path may begin with "C:" or "//network/". Advance over
+ * On Windows, a path may begin with "C:" or "//network/". Advance over
* this and point to the effective start of the path.
*/
#ifdef WIN32
@@ -226,6 +226,7 @@ canonicalize_path(char *path)
int pending_strips;
#ifdef WIN32
+
/*
* The Windows command processor will accept suitably quoted paths with
* forward slashes, but barfs badly with mixed forward and back slashes.
@@ -276,7 +277,7 @@ canonicalize_path(char *path)
* Remove any trailing uses of "." and process ".." ourselves
*
* Note that "/../.." should reduce to just "/", while "../.." has to be
- * kept as-is. In the latter case we put back mistakenly trimmed ".."
+ * kept as-is. In the latter case we put back mistakenly trimmed ".."
* components below. Also note that we want a Windows drive spec to be
* visible to trim_directory(), but it's not part of the logic that's
* looking at the name components; hence distinction between path and
@@ -375,6 +376,7 @@ path_is_relative_and_below_cwd(const char *path)
else if (path_contains_parent_reference(path))
return false;
#ifdef WIN32
+
/*
* On Win32, a drive letter _not_ followed by a slash, e.g. 'E:abc', is
* relative to the cwd on that drive, or the drive's root directory if
diff --git a/src/port/pgmkdirp.c b/src/port/pgmkdirp.c
index 84367f7c9b..d9c95b522c 100644
--- a/src/port/pgmkdirp.c
+++ b/src/port/pgmkdirp.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/pqsignal.c b/src/port/pqsignal.c
index ef79e987eb..5d10843aeb 100644
--- a/src/port/pqsignal.c
+++ b/src/port/pqsignal.c
@@ -19,14 +19,14 @@
* Ultrix and SunOS provide BSD signal(2) semantics by default.
*
* SVID2 and POSIX signal(2) semantics differ from BSD signal(2)
- * semantics. We can use the POSIX sigaction(2) on systems that
+ * semantics. We can use the POSIX sigaction(2) on systems that
* allow us to request restartable signals (SA_RESTART).
*
* Some systems don't allow restartable signals at all unless we
* link to a special BSD library.
*
* We devoutly hope that there aren't any Unix-oid systems that provide
- * neither POSIX signals nor BSD signals. The alternative is to do
+ * neither POSIX signals nor BSD signals. The alternative is to do
* signal-handler reinstallation, which doesn't work well at all.
*
* Windows, of course, is resolutely in a class by itself. In the backend,
diff --git a/src/port/qsort.c b/src/port/qsort.c
index 2747df3c5a..fa35b1b153 100644
--- a/src/port/qsort.c
+++ b/src/port/qsort.c
@@ -33,7 +33,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/qsort_arg.c b/src/port/qsort_arg.c
index 3091eb09ea..c0aee733be 100644
--- a/src/port/qsort_arg.c
+++ b/src/port/qsort_arg.c
@@ -33,7 +33,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/snprintf.c b/src/port/snprintf.c
index d3f890fae9..c13faeabe5 100644
--- a/src/port/snprintf.c
+++ b/src/port/snprintf.c
@@ -18,7 +18,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -66,7 +66,7 @@
* platforms. This implementation is compatible with the Single Unix Spec:
*
* 1. -1 is returned only if processing is abandoned due to an invalid
- * parameter, such as incorrect format string. (Although not required by
+ * parameter, such as incorrect format string. (Although not required by
* the spec, this happens only when no characters have yet been transmitted
* to the destination.)
*
@@ -87,7 +87,7 @@
* Original:
* Patrick Powell Tue Apr 11 09:48:21 PDT 1995
* A bombproof version of doprnt (dopr) included.
- * Sigh. This sort of thing is always nasty do deal with. Note that
+ * Sigh. This sort of thing is always nasty do deal with. Note that
* the version here does not include floating point. (now it does ... tgl)
**************************************************************/
diff --git a/src/port/sprompt.c b/src/port/sprompt.c
index 4120acf914..0e9aca7a7a 100644
--- a/src/port/sprompt.c
+++ b/src/port/sprompt.c
@@ -57,6 +57,7 @@ simple_prompt(const char *prompt, int maxlen, bool echo)
return NULL;
#ifdef WIN32
+
/*
* A Windows console has an "input code page" and an "output code page";
* these usually match each other, but they rarely match the "Windows ANSI
@@ -65,7 +66,7 @@ simple_prompt(const char *prompt, int maxlen, bool echo)
* automatically converts text between these code pages when writing to a
* console. To identify such file descriptors, it calls GetConsoleMode()
* on the underlying HANDLE, which in turn requires GENERIC_READ access on
- * the HANDLE. Opening termout in mode "w+" allows that detection to
+ * the HANDLE. Opening termout in mode "w+" allows that detection to
* succeed. Otherwise, write() would not recognize the descriptor as a
* console, and non-ASCII characters would display incorrectly.
*
@@ -85,11 +86,12 @@ simple_prompt(const char *prompt, int maxlen, bool echo)
#endif
if (!termin || !termout
#ifdef WIN32
+
/*
* Direct console I/O does not work from the MSYS 1.0.10 console. Writes
* reach nowhere user-visible; reads block indefinitely. XXX This affects
* most Windows terminal environments, including rxvt, mintty, Cygwin
- * xterm, Cygwin sshd, and PowerShell ISE. Switch to a more-generic test.
+ * xterm, Cygwin sshd, and PowerShell ISE. Switch to a more-generic test.
*/
|| (getenv("OSTYPE") && strcmp(getenv("OSTYPE"), "msys") == 0)
#endif
diff --git a/src/port/strlcat.c b/src/port/strlcat.c
index 33ee22520d..190e57338e 100644
--- a/src/port/strlcat.c
+++ b/src/port/strlcat.c
@@ -25,7 +25,7 @@
/*
* Appends src to string dst of size siz (unlike strncat, siz is the
* full size of dst, not space left). At most siz-1 characters
- * will be copied. Always NUL terminates (unless siz <= strlen(dst)).
+ * will be copied. Always NUL terminates (unless siz <= strlen(dst)).
* Returns strlen(src) + MIN(siz, strlen(initial dst)).
* If retval >= siz, truncation occurred.
*/
diff --git a/src/port/strlcpy.c b/src/port/strlcpy.c
index 6a6f643163..2fe20c0f4d 100644
--- a/src/port/strlcpy.c
+++ b/src/port/strlcpy.c
@@ -36,8 +36,8 @@
/*
- * Copy src to string dst of size siz. At most siz-1 characters
- * will be copied. Always NUL terminates (unless siz == 0).
+ * Copy src to string dst of size siz. At most siz-1 characters
+ * will be copied. Always NUL terminates (unless siz == 0).
* Returns strlen(src); if retval >= siz, truncation occurred.
* Function creation history: http://www.gratisoft.us/todd/papers/strlcpy.html
*/
diff --git a/src/port/thread.c b/src/port/thread.c
index e0ab99dc3f..de49128e22 100644
--- a/src/port/thread.c
+++ b/src/port/thread.c
@@ -28,12 +28,12 @@
* Additional confusion exists because many operating systems that
* use pthread_setspecific/pthread_getspecific() also have *_r versions
* of standard library functions for compatibility with operating systems
- * that require them. However, internally, these *_r functions merely
+ * that require them. However, internally, these *_r functions merely
* call the thread-safe standard library functions.
*
* For example, BSD/OS 4.3 uses Bind 8.2.3 for getpwuid(). Internally,
* getpwuid() calls pthread_setspecific/pthread_getspecific() to return
- * static data to the caller in a thread-safe manner. However, BSD/OS
+ * static data to the caller in a thread-safe manner. However, BSD/OS
* also has getpwuid_r(), which merely calls getpwuid() and shifts
* around the arguments to match the getpwuid_r() function declaration.
* Therefore, while BSD/OS has getpwuid_r(), it isn't required. It also
diff --git a/src/port/unsetenv.c b/src/port/unsetenv.c
index 87a9b9e89b..f0952fea28 100644
--- a/src/port/unsetenv.c
+++ b/src/port/unsetenv.c
@@ -30,7 +30,7 @@ unsetenv(const char *name)
* entry. When we clobber the entry in the second step we are ensuring
* that we zap the actual environ member. However, there are some libc
* implementations (notably recent BSDs) that do not obey SUS but copy the
- * presented string. This method fails on such platforms. Hopefully all
+ * presented string. This method fails on such platforms. Hopefully all
* such platforms have unsetenv() and thus won't be using this hack. See:
* http://www.greenend.org.uk/rjk/2008/putenv.html
*
diff --git a/src/test/isolation/isolation_main.c b/src/test/isolation/isolation_main.c
index c8d431fd95..dc801b95e1 100644
--- a/src/test/isolation/isolation_main.c
+++ b/src/test/isolation/isolation_main.c
@@ -12,9 +12,9 @@
#include "pg_regress.h"
-char saved_argv0[MAXPGPATH];
-char isolation_exec[MAXPGPATH];
-bool looked_up_isolation_exec = false;
+char saved_argv0[MAXPGPATH];
+char isolation_exec[MAXPGPATH];
+bool looked_up_isolation_exec = false;
#define PG_ISOLATION_VERSIONSTR "isolationtester (PostgreSQL) " PG_VERSION "\n"
@@ -24,9 +24,9 @@ bool looked_up_isolation_exec = false;
*/
static PID_TYPE
isolation_start_test(const char *testname,
- _stringlist ** resultfiles,
- _stringlist ** expectfiles,
- _stringlist ** tags)
+ _stringlist **resultfiles,
+ _stringlist **expectfiles,
+ _stringlist **tags)
{
PID_TYPE pid;
char infile[MAXPGPATH];
diff --git a/src/test/isolation/isolationtester.c b/src/test/isolation/isolationtester.c
index ad6732aee7..18e1445d81 100644
--- a/src/test/isolation/isolationtester.c
+++ b/src/test/isolation/isolationtester.c
@@ -34,16 +34,16 @@ static int nconns = 0;
/* In dry run only output permutations to be run by the tester. */
static int dry_run = false;
-static void run_testspec(TestSpec * testspec);
-static void run_all_permutations(TestSpec * testspec);
-static void run_all_permutations_recurse(TestSpec * testspec, int nsteps,
- Step ** steps);
-static void run_named_permutations(TestSpec * testspec);
-static void run_permutation(TestSpec * testspec, int nsteps, Step ** steps);
+static void run_testspec(TestSpec *testspec);
+static void run_all_permutations(TestSpec *testspec);
+static void run_all_permutations_recurse(TestSpec *testspec, int nsteps,
+ Step **steps);
+static void run_named_permutations(TestSpec *testspec);
+static void run_permutation(TestSpec *testspec, int nsteps, Step **steps);
#define STEP_NONBLOCK 0x1 /* return 0 as soon as cmd waits for a lock */
#define STEP_RETRY 0x2 /* this is a retry of a previously-waiting cmd */
-static bool try_complete_step(Step * step, int flags);
+static bool try_complete_step(Step *step, int flags);
static int step_qsort_cmp(const void *a, const void *b);
static int step_bsearch_cmp(const void *a, const void *b);
@@ -184,7 +184,7 @@ main(int argc, char **argv)
/*
* Build the query we'll use to detect lock contention among sessions in
- * the test specification. Most of the time, we could get away with
+ * the test specification. Most of the time, we could get away with
* simply checking whether a session is waiting for *any* lock: we don't
* exactly expect concurrent use of test tables. However, autovacuum will
* occasionally take AccessExclusiveLock to truncate a table, and we must
@@ -291,7 +291,7 @@ static int *piles;
* explicitly specified.
*/
static void
-run_testspec(TestSpec * testspec)
+run_testspec(TestSpec *testspec)
{
if (testspec->permutations)
run_named_permutations(testspec);
@@ -303,7 +303,7 @@ run_testspec(TestSpec * testspec)
* Run all permutations of the steps and sessions.
*/
static void
-run_all_permutations(TestSpec * testspec)
+run_all_permutations(TestSpec *testspec)
{
int nsteps;
int i;
@@ -333,7 +333,7 @@ run_all_permutations(TestSpec * testspec)
}
static void
-run_all_permutations_recurse(TestSpec * testspec, int nsteps, Step ** steps)
+run_all_permutations_recurse(TestSpec *testspec, int nsteps, Step **steps)
{
int i;
int found = 0;
@@ -363,7 +363,7 @@ run_all_permutations_recurse(TestSpec * testspec, int nsteps, Step ** steps)
* Run permutations given in the test spec
*/
static void
-run_named_permutations(TestSpec * testspec)
+run_named_permutations(TestSpec *testspec)
{
int i,
j;
@@ -439,7 +439,7 @@ step_bsearch_cmp(const void *a, const void *b)
* If a step caused an error to be reported, print it out and clear it.
*/
static void
-report_error_message(Step * step)
+report_error_message(Step *step)
{
if (step->errormsg)
{
@@ -456,7 +456,7 @@ report_error_message(Step * step)
* one fails due to a timeout such as deadlock timeout.
*/
static void
-report_two_error_messages(Step * step1, Step * step2)
+report_two_error_messages(Step *step1, Step *step2)
{
char *prefix;
@@ -484,7 +484,7 @@ report_two_error_messages(Step * step1, Step * step2)
* Run one permutation
*/
static void
-run_permutation(TestSpec * testspec, int nsteps, Step ** steps)
+run_permutation(TestSpec *testspec, int nsteps, Step **steps)
{
PGresult *res;
int i;
@@ -679,11 +679,11 @@ teardown:
/*
* Our caller already sent the query associated with this step. Wait for it
* to either complete or (if given the STEP_NONBLOCK flag) to block while
- * waiting for a lock. We assume that any lock wait will persist until we
+ * waiting for a lock. We assume that any lock wait will persist until we
* have executed additional steps in the permutation.
*
* When calling this function on behalf of a given step for a second or later
- * time, pass the STEP_RETRY flag. This only affects the messages printed.
+ * time, pass the STEP_RETRY flag. This only affects the messages printed.
*
* If the connection returns an error, the message is saved in step->errormsg.
* Caller should call report_error_message shortly after this, to have it
@@ -693,7 +693,7 @@ teardown:
* a lock, returns true. Otherwise, returns false.
*/
static bool
-try_complete_step(Step * step, int flags)
+try_complete_step(Step *step, int flags)
{
PGconn *conn = conns[1 + step->session];
fd_set read_set;
diff --git a/src/test/isolation/isolationtester.h b/src/test/isolation/isolationtester.h
index 2671d15325..15ee3f8360 100644
--- a/src/test/isolation/isolationtester.h
+++ b/src/test/isolation/isolationtester.h
@@ -38,7 +38,7 @@ typedef struct
{
int nsteps;
char **stepnames;
-} Permutation;
+} Permutation;
typedef struct
{
@@ -49,7 +49,7 @@ typedef struct
int nsessions;
Permutation **permutations;
int npermutations;
-} TestSpec;
+} TestSpec;
extern TestSpec parseresult;
diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm
index 7c378e0fa9..8a3111056a 100644
--- a/src/test/perl/TestLib.pm
+++ b/src/test/perl/TestLib.pm
@@ -5,34 +5,37 @@ use warnings;
use Exporter 'import';
our @EXPORT = qw(
- tempdir
- start_test_server
- restart_test_server
- psql
- system_or_bail
-
- command_ok
- command_fails
- command_exit_is
- program_help_ok
- program_version_ok
- program_options_handling_ok
- command_like
- issues_sql_like
+ tempdir
+ start_test_server
+ restart_test_server
+ psql
+ system_or_bail
+
+ command_ok
+ command_fails
+ command_exit_is
+ program_help_ok
+ program_version_ok
+ program_options_handling_ok
+ command_like
+ issues_sql_like
);
use Cwd;
use File::Spec;
use File::Temp ();
use Test::More;
-BEGIN {
+
+BEGIN
+{
eval {
require IPC::Run;
import IPC::Run qw(run start);
1;
- } or do {
+ } or do
+ {
plan skip_all => "IPC::Run not available";
- }
+ }
}
delete $ENV{PGCONNECT_TIMEOUT};
@@ -44,7 +47,8 @@ delete $ENV{PGSERVICE};
delete $ENV{PGSSLMODE};
delete $ENV{PGUSER};
-if (!$ENV{PGPORT}) {
+if (!$ENV{PGPORT})
+{
$ENV{PGPORT} = 65432;
}
@@ -56,45 +60,58 @@ $ENV{PGPORT} = int($ENV{PGPORT}) % 65536;
#
-sub tempdir {
+sub tempdir
+{
return File::Temp::tempdir('testXXXX', DIR => cwd(), CLEANUP => 1);
}
my ($test_server_datadir, $test_server_logfile);
-sub start_test_server {
+sub start_test_server
+{
my ($tempdir) = @_;
my $ret;
system "initdb -D $tempdir/pgdata -A trust -N >/dev/null";
- $ret = system 'pg_ctl', '-D', "$tempdir/pgdata", '-s', '-w', '-l', "$tempdir/logfile", '-o', "--fsync=off -k $tempdir --listen-addresses='' --log-statement=all", 'start';
+ $ret = system 'pg_ctl', '-D', "$tempdir/pgdata", '-s', '-w', '-l',
+ "$tempdir/logfile", '-o',
+ "--fsync=off -k $tempdir --listen-addresses='' --log-statement=all",
+ 'start';
- if ($ret != 0) {
+ if ($ret != 0)
+ {
system('cat', "$tempdir/logfile");
BAIL_OUT("pg_ctl failed");
}
- $ENV{PGHOST} = $tempdir;
+ $ENV{PGHOST} = $tempdir;
$test_server_datadir = "$tempdir/pgdata";
$test_server_logfile = "$tempdir/logfile";
}
-sub restart_test_server {
- system 'pg_ctl', '-s', '-D', $test_server_datadir, '-w', '-l', $test_server_logfile, 'restart';
+sub restart_test_server
+{
+ system 'pg_ctl', '-s', '-D', $test_server_datadir, '-w', '-l',
+ $test_server_logfile, 'restart';
}
-END {
- if ($test_server_datadir) {
- system 'pg_ctl', '-D', $test_server_datadir, '-s', '-w', '-m', 'immediate', 'stop';
+END
+{
+ if ($test_server_datadir)
+ {
+ system 'pg_ctl', '-D', $test_server_datadir, '-s', '-w', '-m',
+ 'immediate', 'stop';
}
}
-sub psql {
+sub psql
+{
my ($dbname, $sql) = @_;
- run ['psql', '-X', '-q', '-d', $dbname, '-f', '-'], '<', \$sql or die;
+ run [ 'psql', '-X', '-q', '-d', $dbname, '-f', '-' ], '<', \$sql or die;
}
-sub system_or_bail {
+sub system_or_bail
+{
system(@_) == 0 or BAIL_OUT("system @_ failed: $?");
}
@@ -104,61 +121,72 @@ sub system_or_bail {
#
-sub command_ok {
+sub command_ok
+{
my ($cmd, $test_name) = @_;
- my $result = run $cmd, '>', File::Spec->devnull(), '2>', File::Spec->devnull();
+ my $result = run $cmd, '>', File::Spec->devnull(), '2>',
+ File::Spec->devnull();
ok($result, $test_name);
}
-sub command_fails {
+sub command_fails
+{
my ($cmd, $test_name) = @_;
- my $result = run $cmd, '>', File::Spec->devnull(), '2>', File::Spec->devnull();
+ my $result = run $cmd, '>', File::Spec->devnull(), '2>',
+ File::Spec->devnull();
ok(!$result, $test_name);
}
-sub command_exit_is {
+sub command_exit_is
+{
my ($cmd, $expected, $test_name) = @_;
- my $h = start $cmd, '>', File::Spec->devnull(), '2>', File::Spec->devnull();
+ my $h = start $cmd, '>', File::Spec->devnull(), '2>',
+ File::Spec->devnull();
$h->finish();
is($h->result(0), $expected, $test_name);
}
-sub program_help_ok {
+sub program_help_ok
+{
my ($cmd) = @_;
subtest "$cmd --help" => sub {
plan tests => 3;
my ($stdout, $stderr);
- my $result = run [$cmd, '--help'], '>', \$stdout, '2>', \$stderr;
+ my $result = run [ $cmd, '--help' ], '>', \$stdout, '2>', \$stderr;
ok($result, "$cmd --help exit code 0");
isnt($stdout, '', "$cmd --help goes to stdout");
is($stderr, '', "$cmd --help nothing to stderr");
};
}
-sub program_version_ok {
+sub program_version_ok
+{
my ($cmd) = @_;
subtest "$cmd --version" => sub {
plan tests => 3;
my ($stdout, $stderr);
- my $result = run [$cmd, '--version'], '>', \$stdout, '2>', \$stderr;
+ my $result = run [ $cmd, '--version' ], '>', \$stdout, '2>', \$stderr;
ok($result, "$cmd --version exit code 0");
isnt($stdout, '', "$cmd --version goes to stdout");
is($stderr, '', "$cmd --version nothing to stderr");
};
}
-sub program_options_handling_ok {
+sub program_options_handling_ok
+{
my ($cmd) = @_;
subtest "$cmd options handling" => sub {
plan tests => 2;
my ($stdout, $stderr);
- my $result = run [$cmd, '--not-a-valid-option'], '>', \$stdout, '2>', \$stderr;
+ my $result = run [ $cmd, '--not-a-valid-option' ], '>', \$stdout,
+ '2>', \$stderr;
ok(!$result, "$cmd with invalid option nonzero exit code");
isnt($stderr, '', "$cmd with invalid option prints error message");
};
}
-sub command_like {
+sub command_like
+{
my ($cmd, $expected_stdout, $test_name) = @_;
subtest $test_name => sub {
plan tests => 3;
@@ -170,7 +198,8 @@ sub command_like {
};
}
-sub issues_sql_like {
+sub issues_sql_like
+{
my ($cmd, $expected_sql, $test_name) = @_;
subtest $test_name => sub {
plan tests => 2;
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index c41cf7e771..803cf903db 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -39,7 +39,7 @@ typedef struct _resultmap
char *type;
char *resultfile;
struct _resultmap *next;
-} _resultmap;
+} _resultmap;
/*
* Values obtained from pg_config_paths.h and Makefile. The PG installation
@@ -67,7 +67,7 @@ static char *shellprog = SHELLPROG;
/*
* On Windows we use -w in diff switches to avoid problems with inconsistent
- * newline representation. The actual result files will generally have
+ * newline representation. The actual result files will generally have
* Windows-style newlines, but the comparison files might or might not.
*/
#ifndef WIN32
@@ -177,7 +177,7 @@ unlimit_core_size(void)
* Add an item at the end of a stringlist.
*/
void
-add_stringlist_item(_stringlist ** listhead, const char *str)
+add_stringlist_item(_stringlist **listhead, const char *str)
{
_stringlist *newentry = malloc(sizeof(_stringlist));
_stringlist *oldentry;
@@ -198,7 +198,7 @@ add_stringlist_item(_stringlist ** listhead, const char *str)
* Free a stringlist.
*/
static void
-free_stringlist(_stringlist ** listhead)
+free_stringlist(_stringlist **listhead)
{
if (listhead == NULL || *listhead == NULL)
return;
@@ -213,7 +213,7 @@ free_stringlist(_stringlist ** listhead)
* Split a delimited string into a stringlist
*/
static void
-split_to_stringlist(const char *s, const char *delim, _stringlist ** listhead)
+split_to_stringlist(const char *s, const char *delim, _stringlist **listhead)
{
char *sc = strdup(s);
char *token = strtok(sc, delim);
@@ -438,6 +438,7 @@ convert_sourcefiles_in(char *source_subdir, char *dest_dir, char *dest_subdir, c
snprintf(testtablespace, MAXPGPATH, "%s/testtablespace", outputdir);
#ifdef WIN32
+
/*
* On Windows only, clean out the test tablespace dir, or create it if it
* doesn't exist. On other platforms we expect the Makefile to take care
@@ -540,7 +541,7 @@ convert_sourcefiles(void)
* namely, it is a standard regular expression with an implicit ^ at the start.
* (We currently support only a very limited subset of regular expressions,
* see string_matches_pattern() above.) What hostplatformpattern will be
- * matched against is the config.guess output. (In the shell-script version,
+ * matched against is the config.guess output. (In the shell-script version,
* we also provided an indication of whether gcc or another compiler was in
* use, but that facility isn't used anymore.)
*/
@@ -783,7 +784,7 @@ initialize_environment(void)
/*
* GNU make stores some flags in the MAKEFLAGS environment variable to
- * pass arguments to its own children. If we are invoked by make,
+ * pass arguments to its own children. If we are invoked by make,
* that causes the make invoked by us to think its part of the make
* task invoking us, and so it tries to communicate with the toplevel
* make. Which fails.
@@ -810,7 +811,7 @@ initialize_environment(void)
* Set up shared library paths to include the temp install.
*
* LD_LIBRARY_PATH covers many platforms. DYLD_LIBRARY_PATH works on
- * Darwin, and maybe other Mach-based systems. LIBPATH is for AIX.
+ * Darwin, and maybe other Mach-based systems. LIBPATH is for AIX.
* Windows needs shared libraries in PATH (only those linked into
* executables, not dlopen'ed ones). Feel free to account for others
* as well.
@@ -930,7 +931,7 @@ spawn_process(const char *cmdline)
pid_t pid;
/*
- * Must flush I/O buffers before fork. Ideally we'd use fflush(NULL) here
+ * Must flush I/O buffers before fork. Ideally we'd use fflush(NULL) here
* ... does anyone still care about systems where that doesn't work?
*/
fflush(stdout);
@@ -951,7 +952,7 @@ spawn_process(const char *cmdline)
* In child
*
* Instead of using system(), exec the shell directly, and tell it to
- * "exec" the command too. This saves two useless processes per
+ * "exec" the command too. This saves two useless processes per
* parallel test case.
*/
char *cmdline2;
@@ -1156,10 +1157,10 @@ get_alternative_expectfile(const char *expectfile, int i)
char *tmp;
char *s;
- if (!(tmp = (char*) malloc(ssize)))
+ if (!(tmp = (char *) malloc(ssize)))
return NULL;
- if (!(s = (char*) malloc(ssize)))
+ if (!(s = (char *) malloc(ssize)))
{
free(tmp);
return NULL;
@@ -1194,6 +1195,7 @@ run_diff(const char *cmd, const char *filename)
exit(2);
}
#ifdef WIN32
+
/*
* On WIN32, if the 'diff' command cannot be found, system() returns 1,
* but produces nothing to stdout, so we check for that here.
@@ -1816,7 +1818,7 @@ create_database(const char *dbname)
dbname, dbname, dbname, dbname, dbname);
/*
- * Install any requested procedural languages. We use CREATE OR REPLACE
+ * Install any requested procedural languages. We use CREATE OR REPLACE
* so that this will work whether or not the language is preinstalled.
*/
for (sl = loadlanguage; sl != NULL; sl = sl->next)
@@ -1844,7 +1846,7 @@ drop_role_if_exists(const char *rolename)
}
static void
-create_role(const char *rolename, const _stringlist * granted_dbs)
+create_role(const char *rolename, const _stringlist *granted_dbs)
{
header(_("creating role \"%s\""), rolename);
psql_command("postgres", "CREATE ROLE \"%s\" WITH LOGIN", rolename);
diff --git a/src/test/regress/pg_regress.h b/src/test/regress/pg_regress.h
index 78f771fc4e..942d761a11 100644
--- a/src/test/regress/pg_regress.h
+++ b/src/test/regress/pg_regress.h
@@ -24,7 +24,7 @@ typedef struct _stringlist
{
char *str;
struct _stringlist *next;
-} _stringlist;
+} _stringlist;
typedef PID_TYPE(*test_function) (const char *,
_stringlist **,
@@ -54,7 +54,7 @@ extern const char *pretty_diff_opts;
int regression_main(int argc, char *argv[],
init_function ifunc, test_function tfunc);
-void add_stringlist_item(_stringlist ** listhead, const char *str);
+void add_stringlist_item(_stringlist **listhead, const char *str);
PID_TYPE spawn_process(const char *cmdline);
void replace_string(char *string, char *replace, char *replacement);
bool file_exists(const char *file);
diff --git a/src/test/regress/pg_regress_main.c b/src/test/regress/pg_regress_main.c
index 90327b0611..22197aa17d 100644
--- a/src/test/regress/pg_regress_main.c
+++ b/src/test/regress/pg_regress_main.c
@@ -24,9 +24,9 @@
*/
static PID_TYPE
psql_start_test(const char *testname,
- _stringlist ** resultfiles,
- _stringlist ** expectfiles,
- _stringlist ** tags)
+ _stringlist **resultfiles,
+ _stringlist **expectfiles,
+ _stringlist **tags)
{
PID_TYPE pid;
char infile[MAXPGPATH];
diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index 4b69067485..c1461092fb 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -224,10 +224,10 @@ typedef struct
{
Point center;
double radius;
-} WIDGET;
+} WIDGET;
WIDGET *widget_in(char *str);
-char *widget_out(WIDGET * widget);
+char *widget_out(WIDGET *widget);
#define NARGS 3
@@ -258,13 +258,13 @@ widget_in(char *str)
}
char *
-widget_out(WIDGET * widget)
+widget_out(WIDGET *widget)
{
if (widget == NULL)
return NULL;
return psprintf("(%g,%g,%g)",
- widget->center.x, widget->center.y, widget->radius);
+ widget->center.x, widget->center.y, widget->radius);
}
PG_FUNCTION_INFO_V1(pt_in_widget);
@@ -789,6 +789,7 @@ make_tuple_indirect(PG_FUNCTION_ARGS)
else
{
struct varlena *oldattr = attr;
+
attr = palloc0(VARSIZE_ANY(oldattr));
memcpy(attr, oldattr, VARSIZE_ANY(oldattr));
}
diff --git a/src/timezone/localtime.c b/src/timezone/localtime.c
index 5079868965..85b227c925 100644
--- a/src/timezone/localtime.c
+++ b/src/timezone/localtime.c
@@ -25,15 +25,15 @@
#ifndef WILDABBR
/*----------
* Someone might make incorrect use of a time zone abbreviation:
- * 1. They might reference tzname[0] before calling tzset (explicitly
+ * 1. They might reference tzname[0] before calling tzset (explicitly
* or implicitly).
- * 2. They might reference tzname[1] before calling tzset (explicitly
+ * 2. They might reference tzname[1] before calling tzset (explicitly
* or implicitly).
- * 3. They might reference tzname[1] after setting to a time zone
+ * 3. They might reference tzname[1] after setting to a time zone
* in which Daylight Saving Time is never observed.
- * 4. They might reference tzname[0] after setting to a time zone
+ * 4. They might reference tzname[0] after setting to a time zone
* in which Standard Time is never observed.
- * 5. They might reference tm.TM_ZONE after calling offtime.
+ * 5. They might reference tm.TM_ZONE after calling offtime.
* What's best to do in the above cases is open to debate;
* for now, we just set things up so that in any of the five cases
* WILDABBR is used. Another possibility: initialize tzname[0] to the
@@ -1453,7 +1453,7 @@ pg_get_timezone_offset(const pg_tz *tz, long int *gmtoff)
{
/*
* The zone could have more than one ttinfo, if it's historically used
- * more than one abbreviation. We return TRUE as long as they all have
+ * more than one abbreviation. We return TRUE as long as they all have
* the same gmtoff.
*/
const struct state *sp;
diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c
index 3c161df15a..3bbe0a86b8 100644
--- a/src/timezone/pgtz.c
+++ b/src/timezone/pgtz.c
@@ -119,7 +119,7 @@ pg_open_tzfile(const char *name, char *canonname)
/*
* Scan specified directory for a case-insensitive match to fname
- * (of length fnamelen --- fname may not be null terminated!). If found,
+ * (of length fnamelen --- fname may not be null terminated!). If found,
* copy the actual filename into canonname and return true.
*/
static bool
@@ -142,7 +142,7 @@ scan_directory_ci(const char *dirname, const char *fname, int fnamelen,
while ((direntry = ReadDir(dirdesc, dirname)) != NULL)
{
/*
- * Ignore . and .., plus any other "hidden" files. This is a security
+ * Ignore . and .., plus any other "hidden" files. This is a security
* measure to prevent access to files outside the timezone directory.
*/
if (direntry->d_name[0] == '.')
@@ -335,7 +335,7 @@ pg_tzset_offset(long gmtoffset)
* This is called before GUC variable initialization begins. Its purpose
* is to ensure that log_timezone has a valid value before any logging GUC
* variables could become set to values that require elog.c to provide
- * timestamps (e.g., log_line_prefix). We may as well initialize
+ * timestamps (e.g., log_line_prefix). We may as well initialize
* session_timestamp to something valid, too.
*/
void
diff --git a/src/timezone/zic.c b/src/timezone/zic.c
index c6a811e018..2e38990323 100644
--- a/src/timezone/zic.c
+++ b/src/timezone/zic.c
@@ -481,7 +481,7 @@ main(int argc, char *argv[])
(void) umask(umask(S_IWGRP | S_IWOTH) | (S_IWGRP | S_IWOTH));
#endif /* !WIN32 */
progname = argv[0];
- if (TYPE_BIT(zic_t) < 64)
+ if (TYPE_BIT(zic_t) <64)
{
(void) fprintf(stderr, "%s: %s\n", progname,
_("wild compilation-time specification of zic_t"));
@@ -2848,6 +2848,7 @@ mkdirs(char *argname)
{
*cp = '\0';
#ifdef WIN32
+
/*
* DOS drive specifier?
*/
diff --git a/src/tools/entab/entab.c b/src/tools/entab/entab.c
index 19b1740c9b..2e74cf49bd 100644
--- a/src/tools/entab/entab.c
+++ b/src/tools/entab/entab.c
@@ -114,7 +114,7 @@ main(int argc, char **argv)
-q (protect quotes)\n\
-s minimum_spaces\n\
-t tab_width\n",
- cp);
+ cp);
exit(0);
}
@@ -179,9 +179,9 @@ main(int argc, char **argv)
if (col_in_tab == tab_size)
{
/*
- * Is the next character going to be a tab? We do
- * tab replacement in the current spot if the next
- * char is going to be a tab and ignore min_spaces.
+ * Is the next character going to be a tab? We do tab
+ * replacement in the current spot if the next char is
+ * going to be a tab and ignore min_spaces.
*/
nxt_spaces = 0;
while (1)
@@ -249,7 +249,7 @@ main(int argc, char **argv)
else if (*src != '\r' && *src != '\n')
escaped = FALSE;
- /* reached newline/CR; clip line? */
+ /* reached newline/CR; clip line? */
if ((*src == '\r' || *src == '\n') &&
clip_lines == TRUE &&
quote_char == ' ' &&
diff --git a/src/tools/git_changelog b/src/tools/git_changelog
index 8221934c8c..fd3efa19b7 100755
--- a/src/tools/git_changelog
+++ b/src/tools/git_changelog
@@ -44,7 +44,7 @@ my @BRANCHES = qw(master
REL6_5_PATCHES REL6_4);
# Might want to make this parameter user-settable.
-my $timestamp_slop = 24*60*60;
+my $timestamp_slop = 24 * 60 * 60;
my $details_after = 0;
my $post_date = 0;
diff --git a/src/tools/msvc/MSBuildProject.pm b/src/tools/msvc/MSBuildProject.pm
index 3bdd8a1a6d..e1c8d81549 100644
--- a/src/tools/msvc/MSBuildProject.pm
+++ b/src/tools/msvc/MSBuildProject.pm
@@ -18,7 +18,7 @@ sub _new
bless($self, $classname);
$self->{filenameExtension} = '.vcxproj';
- $self->{ToolsVersion} = '4.0';
+ $self->{ToolsVersion} = '4.0';
return $self;
}
@@ -415,7 +415,7 @@ sub new
my $self = $classname->SUPER::_new(@_);
bless($self, $classname);
- $self->{vcver} = '11.00';
+ $self->{vcver} = '11.00';
$self->{PlatformToolset} = 'v110';
return $self;
@@ -458,9 +458,9 @@ sub new
my $self = $classname->SUPER::_new(@_);
bless($self, $classname);
- $self->{vcver} = '12.00';
+ $self->{vcver} = '12.00';
$self->{PlatformToolset} = 'v120';
- $self->{ToolsVersion} = '12.0';
+ $self->{ToolsVersion} = '12.0';
return $self;
}
diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm
index 1254d89c29..260f6308f9 100644
--- a/src/tools/msvc/Mkvcbuild.pm
+++ b/src/tools/msvc/Mkvcbuild.pm
@@ -375,7 +375,7 @@ sub mkvcbuild
$pgreceivexlog->AddFile('src\bin\pg_basebackup\pg_receivexlog.c');
$pgreceivexlog->AddLibrary('ws2_32.lib');
- my $pgrecvlogical= AddSimpleFrontend('pg_basebackup', 1);
+ my $pgrecvlogical = AddSimpleFrontend('pg_basebackup', 1);
$pgrecvlogical->{name} = 'pg_recvlogical';
$pgrecvlogical->AddFile('src\bin\pg_basebackup\pg_recvlogical.c');
$pgrecvlogical->AddLibrary('ws2_32.lib');
diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm
index 95c6bcbc1b..283d399fbd 100644
--- a/src/tools/msvc/Solution.pm
+++ b/src/tools/msvc/Solution.pm
@@ -15,14 +15,14 @@ sub _new
my $classname = shift;
my $options = shift;
my $self = {
- projects => {},
- options => $options,
- numver => '',
- strver => '',
- VisualStudioVersion => undef,
+ projects => {},
+ options => $options,
+ numver => '',
+ strver => '',
+ VisualStudioVersion => undef,
MinimumVisualStudioVersion => undef,
- vcver => undef,
- platform => undef, };
+ vcver => undef,
+ platform => undef, };
bless($self, $classname);
# integer_datetimes is now the default
@@ -505,10 +505,8 @@ sub AddProject
{
$proj->AddIncludeDir($self->{options}->{gss} . '\inc\krb5');
$proj->AddLibrary($self->{options}->{gss} . '\lib\i386\krb5_32.lib');
- $proj->AddLibrary(
- $self->{options}->{gss} . '\lib\i386\comerr32.lib');
- $proj->AddLibrary(
- $self->{options}->{gss} . '\lib\i386\gssapi32.lib');
+ $proj->AddLibrary($self->{options}->{gss} . '\lib\i386\comerr32.lib');
+ $proj->AddLibrary($self->{options}->{gss} . '\lib\i386\gssapi32.lib');
}
if ($self->{options}->{iconv})
{
@@ -749,13 +747,13 @@ sub new
my $self = $classname->SUPER::_new(@_);
bless($self, $classname);
- $self->{solutionFileVersion} = '12.00';
- $self->{vcver} = '12.00';
- $self->{visualStudioName} = 'Visual Studio 2013';
- $self->{VisualStudioVersion} = '12.0.21005.1',
- $self->{MinimumVisualStudioVersion} = '10.0.40219.1',
+ $self->{solutionFileVersion} = '12.00';
+ $self->{vcver} = '12.00';
+ $self->{visualStudioName} = 'Visual Studio 2013';
+ $self->{VisualStudioVersion} = '12.0.21005.1',
+ $self->{MinimumVisualStudioVersion} = '10.0.40219.1',
- return $self;
+ return $self;
}
sub GetAdditionalHeaders
diff --git a/src/tools/msvc/gendef.pl b/src/tools/msvc/gendef.pl
index 7f79274b1c..8ccaab3551 100644
--- a/src/tools/msvc/gendef.pl
+++ b/src/tools/msvc/gendef.pl
@@ -13,10 +13,10 @@ use List::Util qw(max);
sub dumpsyms
{
- my ($objfile, $symfile) = @_;
- system("dumpbin /symbols /out:symbols.out $_ >NUL")
- && die "Could not call dumpbin";
- rename("symbols.out", $symfile);
+ my ($objfile, $symfile) = @_;
+ system("dumpbin /symbols /out:symbols.out $_ >NUL")
+ && die "Could not call dumpbin";
+ rename("symbols.out", $symfile);
}
# Given a symbol file path, loops over its contents
@@ -28,131 +28,143 @@ sub dumpsyms
#
sub extract_syms
{
- my ($symfile, $def) = @_;
- open(F, "<$symfile") || die "Could not open $symfile for $_\n";
- while (<F>)
- {
- # Expected symbol lines look like:
- #
- # 0 1 2 3 4 5 6
- # IDX SYMBOL SECT SYMTYPE SYMSTATIC SYMNAME
- # ------------------------------------------------------------------------
- # 02E 00000130 SECTA notype External | _standbyState
- # 02F 00000009 SECT9 notype Static | _LocalRecoveryInProgress
- # 064 00000020 SECTC notype () Static | _XLogCheckBuffer
- # 065 00000000 UNDEF notype () External | _BufferGetTag
- #
- # See http://msdn.microsoft.com/en-us/library/b842y285.aspx
- #
- # We're not interested in the symbol index or offset.
- #
- # SECT[ION] is only examined to see whether the symbol is defined in a
- # COFF section of the local object file; if UNDEF, it's a symbol to be
- # resolved at link time from another object so we can't export it.
- #
- # SYMTYPE is always notype for C symbols as there's no typeinfo and no
- # way to get the symbol type from name (de)mangling. However, we care
- # if "notype" is suffixed by "()" or not. The presence of () means the
- # symbol is a function, the absence means it isn't.
- #
- # SYMSTATIC indicates whether it's a compilation-unit local "static"
- # symbol ("Static"), or whether it's available for use from other
- # compilation units ("External"). We export all symbols that aren't
- # static as part of the whole program DLL interface to produce UNIX-like
- # default linkage.
- #
- # SYMNAME is, obviously, the symbol name. The leading underscore
- # indicates that the _cdecl calling convention is used. See
- # http://www.unixwiz.net/techtips/win32-callconv.html
- # http://www.codeproject.com/Articles/1388/Calling-Conventions-Demystified
- #
- s/notype \(\)/func/g;
- s/notype/data/g;
-
- my @pieces = split;
- # Skip file and section headers and other non-symbol entries
- next unless defined($pieces[0]) and $pieces[0] =~ /^[A-F0-9]{3,}$/;
- # Skip blank symbol names
- next unless $pieces[6];
- # Skip externs used from another compilation unit
- next if ($pieces[2] eq "UNDEF");
- # Skip static symbols
- next unless ($pieces[4] eq "External");
- # Skip some more MSVC-generated crud
- next if $pieces[6] =~ /^@/;
- next if $pieces[6] =~ /^\(/;
- # __real and __xmm are out-of-line floating point literals and
- # (for __xmm) their SIMD equivalents. They shouldn't be part
- # of the DLL interface.
- next if $pieces[6] =~ /^__real/;
- next if $pieces[6] =~ /^__xmm/;
- # __imp entries are imports from other DLLs, eg __imp__malloc .
- # (We should never have one of these that hasn't already been skipped
- # by the UNDEF test above, though).
- next if $pieces[6] =~ /^__imp/;
- # More under-documented internal crud
- next if $pieces[6] =~ /NULL_THUNK_DATA$/;
- next if $pieces[6] =~ /^__IMPORT_DESCRIPTOR/;
- next if $pieces[6] =~ /^__NULL_IMPORT/;
- # Skip string literals
- next if $pieces[6] =~ /^\?\?_C/;
-
- # We assume that if a symbol is defined as data, then as a function,
- # the linker will reject the binary anyway. So it's OK to just pick
- # whatever came last.
- $def->{$pieces[6]} = $pieces[3];
- }
- close(F);
+ my ($symfile, $def) = @_;
+ open(F, "<$symfile") || die "Could not open $symfile for $_\n";
+ while (<F>)
+ {
+
+ # Expected symbol lines look like:
+ #
+ # 0 1 2 3 4 5 6
+ # IDX SYMBOL SECT SYMTYPE SYMSTATIC SYMNAME
+ # ------------------------------------------------------------------------
+ # 02E 00000130 SECTA notype External | _standbyState
+ # 02F 00000009 SECT9 notype Static | _LocalRecoveryInProgress
+ # 064 00000020 SECTC notype () Static | _XLogCheckBuffer
+ # 065 00000000 UNDEF notype () External | _BufferGetTag
+ #
+ # See http://msdn.microsoft.com/en-us/library/b842y285.aspx
+ #
+ # We're not interested in the symbol index or offset.
+ #
+ # SECT[ION] is only examined to see whether the symbol is defined in a
+ # COFF section of the local object file; if UNDEF, it's a symbol to be
+ # resolved at link time from another object so we can't export it.
+ #
+ # SYMTYPE is always notype for C symbols as there's no typeinfo and no
+ # way to get the symbol type from name (de)mangling. However, we care
+ # if "notype" is suffixed by "()" or not. The presence of () means the
+ # symbol is a function, the absence means it isn't.
+ #
+ # SYMSTATIC indicates whether it's a compilation-unit local "static"
+ # symbol ("Static"), or whether it's available for use from other
+ # compilation units ("External"). We export all symbols that aren't
+ # static as part of the whole program DLL interface to produce UNIX-like
+ # default linkage.
+ #
+ # SYMNAME is, obviously, the symbol name. The leading underscore
+ # indicates that the _cdecl calling convention is used. See
+ # http://www.unixwiz.net/techtips/win32-callconv.html
+ # http://www.codeproject.com/Articles/1388/Calling-Conventions-Demystified
+ #
+ s/notype \(\)/func/g;
+ s/notype/data/g;
+
+ my @pieces = split;
+
+ # Skip file and section headers and other non-symbol entries
+ next unless defined($pieces[0]) and $pieces[0] =~ /^[A-F0-9]{3,}$/;
+
+ # Skip blank symbol names
+ next unless $pieces[6];
+
+ # Skip externs used from another compilation unit
+ next if ($pieces[2] eq "UNDEF");
+
+ # Skip static symbols
+ next unless ($pieces[4] eq "External");
+
+ # Skip some more MSVC-generated crud
+ next if $pieces[6] =~ /^@/;
+ next if $pieces[6] =~ /^\(/;
+
+ # __real and __xmm are out-of-line floating point literals and
+ # (for __xmm) their SIMD equivalents. They shouldn't be part
+ # of the DLL interface.
+ next if $pieces[6] =~ /^__real/;
+ next if $pieces[6] =~ /^__xmm/;
+
+ # __imp entries are imports from other DLLs, eg __imp__malloc .
+ # (We should never have one of these that hasn't already been skipped
+ # by the UNDEF test above, though).
+ next if $pieces[6] =~ /^__imp/;
+
+ # More under-documented internal crud
+ next if $pieces[6] =~ /NULL_THUNK_DATA$/;
+ next if $pieces[6] =~ /^__IMPORT_DESCRIPTOR/;
+ next if $pieces[6] =~ /^__NULL_IMPORT/;
+
+ # Skip string literals
+ next if $pieces[6] =~ /^\?\?_C/;
+
+ # We assume that if a symbol is defined as data, then as a function,
+ # the linker will reject the binary anyway. So it's OK to just pick
+ # whatever came last.
+ $def->{ $pieces[6] } = $pieces[3];
+ }
+ close(F);
}
sub writedef
{
- my ($deffile, $platform, $def) = @_;
- open(DEF, ">$deffile") || die "Could not write to $deffile\n";
- print DEF "EXPORTS\n";
- foreach my $f (sort keys %{$def})
- {
- my $isdata = $def->{$f} eq 'data';
- # Strip the leading underscore for win32, but not x64
- $f =~ s/^_//
- unless ($platform eq "x64");
-
- # Emit just the name if it's a function symbol, or emit the name
- # decorated with the DATA option for variables.
- if ($isdata) {
- print DEF " $f DATA\n";
- } else {
- print DEF " $f\n";
- }
- }
- close(DEF);
+ my ($deffile, $platform, $def) = @_;
+ open(DEF, ">$deffile") || die "Could not write to $deffile\n";
+ print DEF "EXPORTS\n";
+ foreach my $f (sort keys %{$def})
+ {
+ my $isdata = $def->{$f} eq 'data';
+
+ # Strip the leading underscore for win32, but not x64
+ $f =~ s/^_//
+ unless ($platform eq "x64");
+
+ # Emit just the name if it's a function symbol, or emit the name
+ # decorated with the DATA option for variables.
+ if ($isdata)
+ {
+ print DEF " $f DATA\n";
+ }
+ else
+ {
+ print DEF " $f\n";
+ }
+ }
+ close(DEF);
}
-sub usage {
- die ("Usage: gendef.pl <modulepath> <platform>\n"
- . " modulepath: path to dir with obj files, no trailing slash"
- . " platform: Win32 | x64");
+sub usage
+{
+ die( "Usage: gendef.pl <modulepath> <platform>\n"
+ . " modulepath: path to dir with obj files, no trailing slash"
+ . " platform: Win32 | x64");
}
usage()
- unless
- scalar(@ARGV) == 2
- && (($ARGV[0] =~ /\\([^\\]+$)/)
- && ($ARGV[1] eq 'Win32' || $ARGV[1] eq 'x64'));
+ unless scalar(@ARGV) == 2
+ && ( ($ARGV[0] =~ /\\([^\\]+$)/)
+ && ($ARGV[1] eq 'Win32' || $ARGV[1] eq 'x64'));
my $defname = uc $1;
-my $deffile = "$ARGV[0]/$defname.def";
+my $deffile = "$ARGV[0]/$defname.def";
my $platform = $ARGV[1];
# if the def file exists and is newer than all input object files, skip
# its creation
-if (
- -f $deffile
- && (-M $deffile > max( map { -M } <$ARGV[0]/*.obj> ))
- )
+if (-f $deffile
+ && (-M $deffile > max(map { -M } <$ARGV[0]/*.obj>)))
{
- print "Not re-generating $defname.DEF, file already exists.\n";
- exit(0);
+ print "Not re-generating $defname.DEF, file already exists.\n";
+ exit(0);
}
print "Generating $defname.DEF from directory $ARGV[0], platform $platform\n";
@@ -161,12 +173,12 @@ my %def = ();
while (<$ARGV[0]/*.obj>)
{
- my $objfile = $_;
- my $symfile = $objfile;
- $symfile =~ s/\.obj$/.sym/i;
- dumpsyms($objfile, $symfile);
- print ".";
- extract_syms($symfile, \%def);
+ my $objfile = $_;
+ my $symfile = $objfile;
+ $symfile =~ s/\.obj$/.sym/i;
+ dumpsyms($objfile, $symfile);
+ print ".";
+ extract_syms($symfile, \%def);
}
print "\n";
diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent
index 18d3effede..c5f1a7379e 100755
--- a/src/tools/pgindent/pgindent
+++ b/src/tools/pgindent/pgindent
@@ -25,12 +25,12 @@ my $extra_opts = "";
my ($typedefs_file, $typedef_str, $code_base, $excludes, $indent, $build);
my %options = (
- "typedefs=s" => \$typedefs_file,
- "list-of-typedefs=s" => \$typedef_str,
- "code-base=s" => \$code_base,
- "excludes=s" => \$excludes,
- "indent=s" => \$indent,
- "build" => \$build,);
+ "typedefs=s" => \$typedefs_file,
+ "list-of-typedefs=s" => \$typedef_str,
+ "code-base=s" => \$code_base,
+ "excludes=s" => \$excludes,
+ "indent=s" => \$indent,
+ "build" => \$build,);
GetOptions(%options) || die "bad command line argument\n";
run_build($code_base) if ($build);
@@ -394,8 +394,12 @@ sub entab
print $tmp_fh $source;
$tmp_fh->close();
- open(my $entab, '-|',
- "$entab -d -t8 -qc " . $tmp_fh->filename . " | $entab -t4 -qc | $entab -d -t4 -m");
+ open(
+ my $entab,
+ '-|',
+ "$entab -d -t8 -qc "
+ . $tmp_fh->filename
+ . " | $entab -t4 -qc | $entab -d -t4 -m");
local ($/) = undef;
$source = <$entab>;
close($entab);
diff --git a/src/tutorial/complex.c b/src/tutorial/complex.c
index 80655885d5..922784004e 100644
--- a/src/tutorial/complex.c
+++ b/src/tutorial/complex.c
@@ -139,7 +139,7 @@ complex_add(PG_FUNCTION_ARGS)
* It's essential that the comparison operators and support function for a
* B-tree index opclass always agree on the relative ordering of any two
* data values. Experience has shown that it's depressingly easy to write
- * unintentionally inconsistent functions. One way to reduce the odds of
+ * unintentionally inconsistent functions. One way to reduce the odds of
* making a mistake is to make all the functions simple wrappers around
* an internal three-way-comparison function, as we do here.
*****************************************************************************/