summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/common/heaptuple.c4
-rw-r--r--src/backend/access/gin/ginvacuum.c10
-rw-r--r--src/backend/access/gin/ginxlog.c12
-rw-r--r--src/backend/access/gist/gistutil.c12
-rw-r--r--src/backend/access/hash/hashfunc.c8
-rw-r--r--src/backend/access/heap/heapam.c4
-rw-r--r--src/backend/access/heap/heapam_handler.c17
-rw-r--r--src/backend/access/heap/rewriteheap.c2
-rw-r--r--src/backend/access/heap/tuptoaster.c8
-rw-r--r--src/backend/access/heap/vacuumlazy.c18
-rw-r--r--src/backend/access/nbtree/nbtinsert.c10
-rw-r--r--src/backend/access/nbtree/nbtsearch.c6
-rw-r--r--src/backend/access/nbtree/nbtsort.c8
-rw-r--r--src/backend/access/nbtree/nbtutils.c2
-rw-r--r--src/backend/access/spgist/spgscan.c18
-rw-r--r--src/backend/access/spgist/spgtextproc.c4
-rw-r--r--src/backend/access/spgist/spgvacuum.c6
-rw-r--r--src/backend/access/table/tableam.c6
-rw-r--r--src/backend/access/transam/xact.c12
-rw-r--r--src/backend/access/transam/xlog.c6
-rw-r--r--src/backend/catalog/aclchk.c8
-rw-r--r--src/backend/catalog/catalog.c16
-rw-r--r--src/backend/catalog/heap.c4
-rw-r--r--src/backend/catalog/index.c49
-rw-r--r--src/backend/catalog/objectaddress.c2
-rw-r--r--src/backend/catalog/pg_aggregate.c9
-rw-r--r--src/backend/catalog/pg_proc.c16
-rw-r--r--src/backend/catalog/pg_publication.c2
-rw-r--r--src/backend/catalog/storage.c2
-rw-r--r--src/backend/commands/amcmds.c2
-rw-r--r--src/backend/commands/cluster.c6
-rw-r--r--src/backend/commands/constraint.c4
-rw-r--r--src/backend/commands/dbcommands.c2
-rw-r--r--src/backend/commands/explain.c18
-rw-r--r--src/backend/commands/extension.c12
-rw-r--r--src/backend/commands/indexcmds.c16
-rw-r--r--src/backend/commands/statscmds.c6
-rw-r--r--src/backend/commands/tablecmds.c89
-rw-r--r--src/backend/commands/tablespace.c6
-rw-r--r--src/backend/commands/trigger.c6
-rw-r--r--src/backend/commands/vacuum.c27
-rw-r--r--src/backend/executor/execExpr.c6
-rw-r--r--src/backend/executor/execExprInterp.c2
-rw-r--r--src/backend/executor/execMain.c2
-rw-r--r--src/backend/executor/execParallel.c4
-rw-r--r--src/backend/executor/execPartition.c24
-rw-r--r--src/backend/executor/execReplication.c22
-rw-r--r--src/backend/executor/execScan.c3
-rw-r--r--src/backend/executor/execTuples.c33
-rw-r--r--src/backend/executor/functions.c4
-rw-r--r--src/backend/executor/nodeAgg.c2
-rw-r--r--src/backend/executor/nodeGatherMerge.c4
-rw-r--r--src/backend/executor/nodeHashjoin.c2
-rw-r--r--src/backend/executor/nodeIndexonlyscan.c6
-rw-r--r--src/backend/executor/nodeIndexscan.c2
-rw-r--r--src/backend/executor/nodeLockRows.c2
-rw-r--r--src/backend/executor/nodeModifyTable.c6
-rw-r--r--src/backend/executor/nodeSeqscan.c4
-rw-r--r--src/backend/executor/nodeSubplan.c2
-rw-r--r--src/backend/executor/nodeSubqueryscan.c1
-rw-r--r--src/backend/libpq/auth.c4
-rw-r--r--src/backend/libpq/be-secure-openssl.c21
-rw-r--r--src/backend/nodes/bitmapset.c8
-rw-r--r--src/backend/optimizer/path/clausesel.c6
-rw-r--r--src/backend/optimizer/util/inherit.c1
-rw-r--r--src/backend/optimizer/util/pathnode.c2
-rw-r--r--src/backend/parser/parse_utilcmd.c38
-rw-r--r--src/backend/partitioning/partdesc.c18
-rw-r--r--src/backend/replication/libpqwalreceiver/libpqwalreceiver.c2
-rw-r--r--src/backend/replication/logical/reorderbuffer.c14
-rw-r--r--src/backend/replication/slotfuncs.c10
-rw-r--r--src/backend/replication/syncrep.c5
-rw-r--r--src/backend/replication/walreceiver.c10
-rw-r--r--src/backend/replication/walsender.c6
-rw-r--r--src/backend/statistics/dependencies.c8
-rw-r--r--src/backend/statistics/extended_stats.c39
-rw-r--r--src/backend/statistics/mcv.c50
-rw-r--r--src/backend/storage/buffer/bufmgr.c10
-rw-r--r--src/backend/storage/file/fd.c2
-rw-r--r--src/backend/storage/ipc/latch.c6
-rw-r--r--src/backend/storage/ipc/pmsignal.c2
-rw-r--r--src/backend/storage/ipc/signalfuncs.c2
-rw-r--r--src/backend/storage/lmgr/lmgr.c5
-rw-r--r--src/backend/storage/smgr/smgr.c8
-rw-r--r--src/backend/storage/sync/sync.c4
-rw-r--r--src/backend/tcop/dest.c4
-rw-r--r--src/backend/utils/adt/formatting.c11
-rw-r--r--src/backend/utils/adt/genfile.c2
-rw-r--r--src/backend/utils/adt/geo_ops.c36
-rw-r--r--src/backend/utils/adt/json.c4
-rw-r--r--src/backend/utils/adt/jsonb.c2
-rw-r--r--src/backend/utils/adt/like.c2
-rw-r--r--src/backend/utils/adt/like_support.c6
-rw-r--r--src/backend/utils/adt/numutils.c4
-rw-r--r--src/backend/utils/adt/regexp.c35
-rw-r--r--src/backend/utils/adt/ri_triggers.c70
-rw-r--r--src/backend/utils/adt/ruleutils.c2
-rw-r--r--src/backend/utils/adt/varchar.c16
-rw-r--r--src/backend/utils/adt/varlena.c16
-rw-r--r--src/backend/utils/cache/relcache.c6
-rw-r--r--src/backend/utils/cache/relmapper.c4
-rw-r--r--src/backend/utils/cache/syscache.c2
-rw-r--r--src/backend/utils/fmgr/fmgr.c2
-rw-r--r--src/backend/utils/hash/hashfn.c1
-rw-r--r--src/backend/utils/init/globals.c2
-rw-r--r--src/backend/utils/init/miscinit.c4
-rw-r--r--src/backend/utils/mb/mbutils.c6
-rw-r--r--src/backend/utils/misc/guc.c32
-rw-r--r--src/backend/utils/mmgr/dsa.c3
-rw-r--r--src/bin/initdb/initdb.c16
-rw-r--r--src/bin/pg_archivecleanup/pg_archivecleanup.c3
-rw-r--r--src/bin/pg_basebackup/pg_receivewal.c4
-rw-r--r--src/bin/pg_basebackup/pg_recvlogical.c12
-rw-r--r--src/bin/pg_basebackup/receivelog.c12
-rw-r--r--src/bin/pg_dump/common.c6
-rw-r--r--src/bin/pg_dump/compress_io.c6
-rw-r--r--src/bin/pg_dump/parallel.c20
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c93
-rw-r--r--src/bin/pg_dump/pg_backup_custom.c16
-rw-r--r--src/bin/pg_dump/pg_backup_db.c22
-rw-r--r--src/bin/pg_dump/pg_backup_directory.c10
-rw-r--r--src/bin/pg_dump/pg_backup_tar.c22
-rw-r--r--src/bin/pg_dump/pg_dump.c131
-rw-r--r--src/bin/pg_dump/pg_dumpall.c6
-rw-r--r--src/bin/pg_dump/pg_restore.c2
-rw-r--r--src/bin/pg_rewind/filemap.c10
-rw-r--r--src/bin/pg_rewind/libpq_fetch.c4
-rw-r--r--src/bin/pg_rewind/parsexlog.c2
-rw-r--r--src/bin/pg_rewind/pg_rewind.c22
-rw-r--r--src/bin/pg_upgrade/controldata.c13
-rw-r--r--src/bin/pg_upgrade/function.c22
-rw-r--r--src/bin/pg_waldump/pg_waldump.c12
-rw-r--r--src/bin/pgbench/pgbench.c14
-rw-r--r--src/bin/psql/command.c26
-rw-r--r--src/bin/psql/common.c12
-rw-r--r--src/bin/psql/copy.c2
-rw-r--r--src/bin/psql/crosstabview.c12
-rw-r--r--src/bin/psql/describe.c102
-rw-r--r--src/bin/psql/startup.c2
-rw-r--r--src/bin/psql/tab-complete.c4
-rw-r--r--src/bin/psql/variables.c6
-rw-r--r--src/common/d2s.c10
-rw-r--r--src/common/f2s.c12
-rw-r--r--src/common/file_utils.c2
-rw-r--r--src/common/logging.c18
-rw-r--r--src/common/pg_lzcompress.c11
-rw-r--r--src/common/rmtree.c6
-rw-r--r--src/include/access/amapi.h2
-rw-r--r--src/include/access/gistxlog.h10
-rw-r--r--src/include/access/hio.h2
-rw-r--r--src/include/access/relscan.h2
-rw-r--r--src/include/access/spgist_private.h2
-rw-r--r--src/include/access/spgxlog.h2
-rw-r--r--src/include/access/tableam.h28
-rw-r--r--src/include/access/tupdesc.h4
-rw-r--r--src/include/access/xlog.h2
-rw-r--r--src/include/access/xlog_internal.h2
-rw-r--r--src/include/catalog/dependency.h7
-rw-r--r--src/include/catalog/index.h12
-rw-r--r--src/include/catalog/pg_attrdef.h3
-rw-r--r--src/include/catalog/pg_default_acl.h3
-rw-r--r--src/include/catalog/pg_policy.h3
-rw-r--r--src/include/catalog/storage.h2
-rw-r--r--src/include/commands/defrem.h2
-rw-r--r--src/include/commands/trigger.h4
-rw-r--r--src/include/commands/vacuum.h4
-rw-r--r--src/include/common/file_utils.h4
-rw-r--r--src/include/common/logging.h16
-rw-r--r--src/include/executor/execParallel.h2
-rw-r--r--src/include/executor/executor.h30
-rw-r--r--src/include/executor/tuptable.h56
-rw-r--r--src/include/libpq/libpq-be.h2
-rw-r--r--src/include/miscadmin.h2
-rw-r--r--src/include/nodes/execnodes.h32
-rw-r--r--src/include/nodes/parsenodes.h4
-rw-r--r--src/include/nodes/plannodes.h4
-rw-r--r--src/include/parser/parse_node.h4
-rw-r--r--src/include/pgstat.h2
-rw-r--r--src/include/port.h1
-rw-r--r--src/include/replication/logical.h4
-rw-r--r--src/include/replication/reorderbuffer.h4
-rw-r--r--src/include/statistics/extended_stats_internal.h4
-rw-r--r--src/include/statistics/statistics.h2
-rw-r--r--src/include/storage/fd.h2
-rw-r--r--src/include/storage/md.h4
-rw-r--r--src/include/tcop/deparse_utility.h2
-rw-r--r--src/include/utils/datum.h2
-rw-r--r--src/interfaces/ecpg/ecpglib/cursor.c42
-rw-r--r--src/interfaces/ecpg/ecpglib/descriptor.c9
-rw-r--r--src/interfaces/ecpg/ecpglib/ecpglib_extern.h14
-rw-r--r--src/interfaces/ecpg/ecpglib/error.c4
-rw-r--r--src/interfaces/ecpg/ecpglib/execute.c45
-rw-r--r--src/interfaces/ecpg/ecpglib/prepare.c124
-rw-r--r--src/interfaces/ecpg/include/ecpglib.h6
-rw-r--r--src/interfaces/ecpg/preproc/ecpg.c8
-rw-r--r--src/interfaces/ecpg/preproc/output.c14
-rw-r--r--src/interfaces/ecpg/preproc/preproc_extern.h2
-rw-r--r--src/interfaces/ecpg/preproc/type.h2
-rw-r--r--src/interfaces/libpq/fe-connect.c3
-rw-r--r--src/interfaces/libpq/libpq-fe.h2
-rw-r--r--src/interfaces/libpq/pqexpbuffer.c7
-rw-r--r--src/pl/plpgsql/src/pl_exec.c60
-rw-r--r--src/pl/plpgsql/src/plpgsql.h2
-rw-r--r--src/pl/plpython/plpy_exec.c4
-rw-r--r--src/pl/tcl/pltcl.c4
-rw-r--r--src/port/dlopen.c2
-rw-r--r--src/port/pg_bitutils.c4
-rw-r--r--src/port/strtof.c13
-rw-r--r--src/test/regress/pg_regress.c4
-rw-r--r--src/test/regress/regress.c2
-rw-r--r--src/tools/pgindent/typedefs.list134
211 files changed, 1388 insertions, 1266 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 783b04a3cb..a48a6cd757 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -787,8 +787,8 @@ expand_tuple(HeapTuple *targetHeapTuple,
}
/*
- * Now walk the missing attributes. If there is a missing value
- * make space for it. Otherwise, it's going to be NULL.
+ * Now walk the missing attributes. If there is a missing value make
+ * space for it. Otherwise, it's going to be NULL.
*/
for (attnum = firstmissingnum;
attnum < natts;
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index b9a28d1863..dc46f2460e 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -394,17 +394,17 @@ ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno)
* There is at least one empty page. So we have to rescan the tree
* deleting empty pages.
*/
- Buffer buffer;
+ Buffer buffer;
DataPageDeleteStack root,
- *ptr,
- *tmp;
+ *ptr,
+ *tmp;
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, rootBlkno,
RBM_NORMAL, gvs->strategy);
/*
- * Lock posting tree root for cleanup to ensure there are no concurrent
- * inserts.
+ * Lock posting tree root for cleanup to ensure there are no
+ * concurrent inserts.
*/
LockBufferForCleanup(buffer);
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index b648af1ff6..c945b28272 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -205,8 +205,8 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
while (segno < a_segno)
{
/*
- * Once modification is started and page tail is copied, we've
- * to copy unmodified segments.
+ * Once modification is started and page tail is copied, we've to
+ * copy unmodified segments.
*/
segsize = SizeOfGinPostingList(oldseg);
if (tailCopy)
@@ -257,12 +257,12 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
}
/*
- * We're about to start modification of the page. So, copy tail of the
- * page if it's not done already.
+ * We're about to start modification of the page. So, copy tail of
+ * the page if it's not done already.
*/
if (!tailCopy && segptr != segmentend)
{
- int tailSize = segmentend - segptr;
+ int tailSize = segmentend - segptr;
tailCopy = (Pointer) palloc(tailSize);
memcpy(tailCopy, segptr, tailSize);
@@ -304,7 +304,7 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
segptr = (Pointer) oldseg;
if (segptr != segmentend && tailCopy)
{
- int restSize = segmentend - segptr;
+ int restSize = segmentend - segptr;
Assert(writePtr + restSize <= PageGetSpecialPointer(page));
memcpy(writePtr, segptr, restSize);
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index 94b6ad6a59..49df05653b 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -839,16 +839,16 @@ gistNewBuffer(Relation r)
gistcheckpage(r, buffer);
/*
- * Otherwise, recycle it if deleted, and too old to have any processes
- * interested in it.
+ * Otherwise, recycle it if deleted, and too old to have any
+ * processes interested in it.
*/
if (gistPageRecyclable(page))
{
/*
- * If we are generating WAL for Hot Standby then create a
- * WAL record that will allow us to conflict with queries
- * running on standby, in case they have snapshots older
- * than the page's deleteXid.
+ * If we are generating WAL for Hot Standby then create a WAL
+ * record that will allow us to conflict with queries running
+ * on standby, in case they have snapshots older than the
+ * page's deleteXid.
*/
if (XLogStandbyInfoActive() && RelationNeedsWAL(r))
gistXLogPageReuse(r, blkno, GistPageGetDeleteXid(page));
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 0bf15ae723..6ec1ec3df3 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -246,7 +246,7 @@ hashtext(PG_FUNCTION_ARGS)
{
text *key = PG_GETARG_TEXT_PP(0);
Oid collid = PG_GET_COLLATION();
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
Datum result;
if (!collid)
@@ -271,7 +271,7 @@ hashtext(PG_FUNCTION_ARGS)
int32_t ulen = -1;
UChar *uchar = NULL;
Size bsize;
- uint8_t *buf;
+ uint8_t *buf;
ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
@@ -302,7 +302,7 @@ hashtextextended(PG_FUNCTION_ARGS)
{
text *key = PG_GETARG_TEXT_PP(0);
Oid collid = PG_GET_COLLATION();
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
Datum result;
if (!collid)
@@ -328,7 +328,7 @@ hashtextextended(PG_FUNCTION_ARGS)
int32_t ulen = -1;
UChar *uchar = NULL;
Size bsize;
- uint8_t *buf;
+ uint8_t *buf;
ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 19d2c529d8..723e153705 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -1684,8 +1684,8 @@ void
heap_get_latest_tid(TableScanDesc sscan,
ItemPointer tid)
{
- Relation relation = sscan->rs_rd;
- Snapshot snapshot = sscan->rs_snapshot;
+ Relation relation = sscan->rs_rd;
+ Snapshot snapshot = sscan->rs_snapshot;
ItemPointerData ctid;
TransactionId priorXmax;
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 56b2abda5f..674c1d3a81 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -474,6 +474,7 @@ tuple_lock_retry:
HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
{
tmfd->xmax = priorXmax;
+
/*
* Cmin is the problematic value, so store that. See
* above.
@@ -1172,7 +1173,7 @@ heapam_index_build_range_scan(Relation heapRelation,
Snapshot snapshot;
bool need_unregister_snapshot = false;
TransactionId OldestXmin;
- BlockNumber previous_blkno = InvalidBlockNumber;
+ BlockNumber previous_blkno = InvalidBlockNumber;
BlockNumber root_blkno = InvalidBlockNumber;
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
@@ -1263,7 +1264,7 @@ heapam_index_build_range_scan(Relation heapRelation,
/* Publish number of blocks to scan */
if (progress)
{
- BlockNumber nblocks;
+ BlockNumber nblocks;
if (hscan->rs_base.rs_parallel != NULL)
{
@@ -1314,7 +1315,7 @@ heapam_index_build_range_scan(Relation heapRelation,
/* Report scan progress, if asked to. */
if (progress)
{
- BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
+ BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
if (blocks_done != previous_blkno)
{
@@ -1668,7 +1669,7 @@ heapam_index_build_range_scan(Relation heapRelation,
/* Report scan progress one last time. */
if (progress)
{
- BlockNumber blks_done;
+ BlockNumber blks_done;
if (hscan->rs_base.rs_parallel != NULL)
{
@@ -1720,7 +1721,7 @@ heapam_index_validate_scan(Relation heapRelation,
BlockNumber root_blkno = InvalidBlockNumber;
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
bool in_index[MaxHeapTuplesPerPage];
- BlockNumber previous_blkno = InvalidBlockNumber;
+ BlockNumber previous_blkno = InvalidBlockNumber;
/* state variables for the merge */
ItemPointer indexcursor = NULL;
@@ -1955,8 +1956,8 @@ static BlockNumber
heapam_scan_get_blocks_done(HeapScanDesc hscan)
{
ParallelBlockTableScanDesc bpscan = NULL;
- BlockNumber startblock;
- BlockNumber blocks_done;
+ BlockNumber startblock;
+ BlockNumber blocks_done;
if (hscan->rs_base.rs_parallel != NULL)
{
@@ -1974,7 +1975,7 @@ heapam_scan_get_blocks_done(HeapScanDesc hscan)
blocks_done = hscan->rs_cblock - startblock;
else
{
- BlockNumber nblocks;
+ BlockNumber nblocks;
nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
blocks_done = nblocks - startblock +
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index bce4274362..131ec7b8d7 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -652,7 +652,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
}
else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
{
- int options = HEAP_INSERT_SKIP_FSM;
+ int options = HEAP_INSERT_SKIP_FSM;
if (!state->rs_use_wal)
options |= HEAP_INSERT_SKIP_WAL;
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 74e957abb7..e10715a775 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -2295,16 +2295,16 @@ static struct varlena *
toast_decompress_datum_slice(struct varlena *attr, int32 slicelength)
{
struct varlena *result;
- int32 rawsize;
+ int32 rawsize;
Assert(VARATT_IS_COMPRESSED(attr));
result = (struct varlena *) palloc(slicelength + VARHDRSZ);
rawsize = pglz_decompress(TOAST_COMPRESS_RAWDATA(attr),
- VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
- VARDATA(result),
- slicelength, false);
+ VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
+ VARDATA(result),
+ slicelength, false);
if (rawsize < 0)
elog(ERROR, "compressed data is corrupted");
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 9e17acc110..637e47c08c 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -164,7 +164,7 @@ static void lazy_cleanup_index(Relation indrel,
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
static bool should_attempt_truncation(VacuumParams *params,
- LVRelStats *vacrelstats);
+ LVRelStats *vacrelstats);
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
static BlockNumber count_nondeletable_pages(Relation onerel,
LVRelStats *vacrelstats);
@@ -1067,9 +1067,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* cheaper to get rid of it in the next pruning pass than
* to treat it like an indexed tuple. Finally, if index
* cleanup is disabled, the second heap pass will not
- * execute, and the tuple will not get removed, so we
- * must treat it like any other dead tuple that we choose
- * to keep.
+ * execute, and the tuple will not get removed, so we must
+ * treat it like any other dead tuple that we choose to
+ * keep.
*
* If this were to happen for a tuple that actually needed
* to be deleted, we'd be in trouble, because it'd
@@ -1087,6 +1087,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
all_visible = false;
break;
case HEAPTUPLE_LIVE:
+
/*
* Count it as live. Not only is this natural, but it's
* also what acquire_sample_rows() does.
@@ -1251,13 +1252,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
else
{
/*
- * Here, we have indexes but index cleanup is disabled. Instead of
- * vacuuming the dead tuples on the heap, we just forget them.
+ * Here, we have indexes but index cleanup is disabled.
+ * Instead of vacuuming the dead tuples on the heap, we just
+ * forget them.
*
* Note that vacrelstats->dead_tuples could have tuples which
* became dead after HOT-pruning but are not marked dead yet.
- * We do not process them because it's a very rare condition, and
- * the next vacuum will process them anyway.
+ * We do not process them because it's a very rare condition,
+ * and the next vacuum will process them anyway.
*/
Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED);
}
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 0a9472c71b..36a570045a 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -1811,11 +1811,11 @@ _bt_insert_parent(Relation rel,
/*
* Re-find and write lock the parent of buf.
*
- * It's possible that the location of buf's downlink has changed
- * since our initial _bt_search() descent. _bt_getstackbuf() will
- * detect and recover from this, updating the stack, which ensures
- * that the new downlink will be inserted at the correct offset.
- * Even buf's parent may have changed.
+ * It's possible that the location of buf's downlink has changed since
+ * our initial _bt_search() descent. _bt_getstackbuf() will detect
+ * and recover from this, updating the stack, which ensures that the
+ * new downlink will be inserted at the correct offset. Even buf's
+ * parent may have changed.
*/
stack->bts_btentry = bknum;
pbuf = _bt_getstackbuf(rel, stack);
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 5906c41f31..dc42213ac6 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -166,8 +166,8 @@ _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access,
new_stack->bts_parent = stack_in;
/*
- * Page level 1 is lowest non-leaf page level prior to leaves. So,
- * if we're on the level 1 and asked to lock leaf page in write mode,
+ * Page level 1 is lowest non-leaf page level prior to leaves. So, if
+ * we're on the level 1 and asked to lock leaf page in write mode,
* then lock next page in write mode, because it must be a leaf.
*/
if (opaque->btpo.level == 1 && access == BT_WRITE)
@@ -1235,7 +1235,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/* Initialize remaining insertion scan key fields */
inskey.heapkeyspace = _bt_heapkeyspace(rel);
- inskey.anynullkeys = false; /* unusued */
+ inskey.anynullkeys = false; /* unused */
inskey.nextkey = nextkey;
inskey.pivotsearch = false;
inskey.scantid = NULL;
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 0b5be776d6..d6fa574238 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -962,10 +962,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
* much smaller.
*
* Since the truncated tuple is often smaller than the original
- * tuple, it cannot just be copied in place (besides, we want
- * to actually save space on the leaf page). We delete the
- * original high key, and add our own truncated high key at the
- * same offset.
+ * tuple, it cannot just be copied in place (besides, we want to
+ * actually save space on the leaf page). We delete the original
+ * high key, and add our own truncated high key at the same
+ * offset.
*
* Note that the page layout won't be changed very much. oitup is
* already located at the physical beginning of tuple space, so we
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 77c9c7285c..1238d544cd 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -107,7 +107,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
key = palloc(offsetof(BTScanInsertData, scankeys) +
sizeof(ScanKeyData) * indnkeyatts);
key->heapkeyspace = itup == NULL || _bt_heapkeyspace(rel);
- key->anynullkeys = false; /* initial assumption */
+ key->anynullkeys = false; /* initial assumption */
key->nextkey = false;
key->pivotsearch = false;
key->keysz = Min(indnkeyatts, tupnatts);
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index 9365bc57ad..7bc5ec09bf 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -39,8 +39,8 @@ static int
pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
const pairingheap_node *b, void *arg)
{
- const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
- const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
+ const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
+ const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
SpGistScanOpaque so = (SpGistScanOpaque) arg;
int i;
@@ -79,7 +79,7 @@ pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
}
static void
-spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
+spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem *item)
{
if (!so->state.attLeafType.attbyval &&
DatumGetPointer(item->value) != NULL)
@@ -97,7 +97,7 @@ spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
* Called in queue context
*/
static void
-spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem * item)
+spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem *item)
{
pairingheap_add(so->scanQueue, &item->phNode);
}
@@ -439,7 +439,7 @@ spgNewHeapItem(SpGistScanOpaque so, int level, ItemPointer heapPtr,
* the scan is not ordered AND the item satisfies the scankeys
*/
static bool
-spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgLeafTest(SpGistScanOpaque so, SpGistSearchItem *item,
SpGistLeafTuple leafTuple, bool isnull,
bool *reportedSome, storeRes_func storeRes)
{
@@ -530,7 +530,7 @@ spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
static void
spgInitInnerConsistentIn(spgInnerConsistentIn *in,
SpGistScanOpaque so,
- SpGistSearchItem * item,
+ SpGistSearchItem *item,
SpGistInnerTuple innerTuple)
{
in->scankeys = so->keyData;
@@ -551,7 +551,7 @@ spgInitInnerConsistentIn(spgInnerConsistentIn *in,
static SpGistSearchItem *
spgMakeInnerItem(SpGistScanOpaque so,
- SpGistSearchItem * parentItem,
+ SpGistSearchItem *parentItem,
SpGistNodeTuple tuple,
spgInnerConsistentOut *out, int i, bool isnull,
double *distances)
@@ -585,7 +585,7 @@ spgMakeInnerItem(SpGistScanOpaque so,
}
static void
-spgInnerTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
SpGistInnerTuple innerTuple, bool isnull)
{
MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt);
@@ -683,7 +683,7 @@ enum SpGistSpecialOffsetNumbers
static OffsetNumber
spgTestLeafTuple(SpGistScanOpaque so,
- SpGistSearchItem * item,
+ SpGistSearchItem *item,
Page page, OffsetNumber offset,
bool isnull, bool isroot,
bool *reportedSome,
diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c
index d22998c54b..a7c1a09e05 100644
--- a/src/backend/access/spgist/spgtextproc.c
+++ b/src/backend/access/spgist/spgtextproc.c
@@ -632,8 +632,8 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS)
res = (level >= queryLen) ||
DatumGetBool(DirectFunctionCall2Coll(text_starts_with,
PG_GET_COLLATION(),
- out->leafValue,
- PointerGetDatum(query)));
+ out->leafValue,
+ PointerGetDatum(query)));
if (!res) /* no need to consider remaining conditions */
break;
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index fc85c6f940..2b1662a267 100644
--- a/src/backend/access/spgist/spgvacuum.c
+++ b/src/backend/access/spgist/spgvacuum.c
@@ -192,9 +192,9 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
* happened since VACUUM started.
*
* Note: we could make a tighter test by seeing if the xid is
- * "running" according to the active snapshot; but snapmgr.c doesn't
- * currently export a suitable API, and it's not entirely clear
- * that a tighter test is worth the cycles anyway.
+ * "running" according to the active snapshot; but snapmgr.c
+ * doesn't currently export a suitable API, and it's not entirely
+ * clear that a tighter test is worth the cycles anyway.
*/
if (TransactionIdFollowsOrEquals(dt->xid, bds->myXmin))
spgAddPendingTID(bds, &dt->pointer);
diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c
index c3455bc48b..12adf59085 100644
--- a/src/backend/access/table/tableam.c
+++ b/src/backend/access/table/tableam.c
@@ -94,7 +94,7 @@ TableScanDesc
table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
{
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
Oid relid = RelationGetRelid(relation);
Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
@@ -158,7 +158,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan)
{
Snapshot snapshot;
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
@@ -223,7 +223,7 @@ table_index_fetch_tuple_check(Relation rel,
void
table_get_latest_tid(TableScanDesc scan, ItemPointer tid)
{
- Relation rel = scan->rs_rd;
+ Relation rel = scan->rs_rd;
const TableAmRoutine *tableam = rel->rd_tableam;
/*
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 20feeec327..b40da74e09 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -570,9 +570,9 @@ AssignTransactionId(TransactionState s)
/*
* Ensure parent(s) have XIDs, so that a child always has an XID later
- * than its parent. Mustn't recurse here, or we might get a stack overflow
- * if we're at the bottom of a huge stack of subtransactions none of which
- * have XIDs yet.
+ * than its parent. Mustn't recurse here, or we might get a stack
+ * overflow if we're at the bottom of a huge stack of subtransactions none
+ * of which have XIDs yet.
*/
if (isSubXact && !FullTransactionIdIsValid(s->parent->fullTransactionId))
{
@@ -2868,8 +2868,8 @@ StartTransactionCommand(void)
* just skipping the reset in StartTransaction() won't work.)
*/
static int save_XactIsoLevel;
-static bool save_XactReadOnly;
-static bool save_XactDeferrable;
+static bool save_XactReadOnly;
+static bool save_XactDeferrable;
void
SaveTransactionCharacteristics(void)
@@ -5193,7 +5193,7 @@ SerializeTransactionState(Size maxsize, char *start_address)
nxids = add_size(nxids, s->nChildXids);
}
Assert(SerializedTransactionStateHeaderSize + nxids * sizeof(TransactionId)
- <= maxsize);
+ <= maxsize);
/* Copy them to our scratch space. */
workspace = palloc(nxids * sizeof(TransactionId));
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 527522f165..c7c9e91b6a 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -6397,9 +6397,9 @@ StartupXLOG(void)
ereport(FATAL,
(errmsg("could not find redo location referenced by checkpoint record"),
errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" and add required recovery options.\n"
- "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
- "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
- DataDir, DataDir, DataDir)));
+ "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
+ "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
+ DataDir, DataDir, DataDir)));
}
}
else
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index a600f43a67..f0fdda1eb9 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -848,7 +848,7 @@ objectsInSchemaToOids(ObjectType objtype, List *nspnames)
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- Oid oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
+ Oid oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
objects = lappend_oid(objects, oid);
}
@@ -895,7 +895,7 @@ getRelationsInNamespace(Oid namespaceId, char relkind)
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- Oid oid = ((Form_pg_class) GETSTRUCT(tuple))->oid;
+ Oid oid = ((Form_pg_class) GETSTRUCT(tuple))->oid;
relations = lappend_oid(relations, oid);
}
@@ -1311,7 +1311,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
}
else
{
- Oid defAclOid;
+ Oid defAclOid;
/* Prepare to insert or update pg_default_acl entry */
MemSet(values, 0, sizeof(values));
@@ -1384,7 +1384,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
if (isNew)
InvokeObjectPostCreateHook(DefaultAclRelationId, defAclOid, 0);
else
- InvokeObjectPostAlterHook(DefaultAclRelationId, defAclOid, 0);
+ InvokeObjectPostAlterHook(DefaultAclRelationId, defAclOid, 0);
}
if (HeapTupleIsValid(tuple))
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 2878e6a5b0..11936a6571 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -476,15 +476,15 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
Datum
pg_nextoid(PG_FUNCTION_ARGS)
{
- Oid reloid = PG_GETARG_OID(0);
- Name attname = PG_GETARG_NAME(1);
- Oid idxoid = PG_GETARG_OID(2);
- Relation rel;
- Relation idx;
- HeapTuple atttuple;
+ Oid reloid = PG_GETARG_OID(0);
+ Name attname = PG_GETARG_NAME(1);
+ Oid idxoid = PG_GETARG_OID(2);
+ Relation rel;
+ Relation idx;
+ HeapTuple atttuple;
Form_pg_attribute attform;
- AttrNumber attno;
- Oid newoid;
+ AttrNumber attno;
+ Oid newoid;
/*
* As this function is not intended to be used during normal running, and
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 6cffe550b3..3c46c25107 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -2550,8 +2550,8 @@ AddRelationNewConstraints(Relation rel,
/*
* If the expression is just a NULL constant, we do not bother to make
* an explicit pg_attrdef entry, since the default behavior is
- * equivalent. This applies to column defaults, but not for generation
- * expressions.
+ * equivalent. This applies to column defaults, but not for
+ * generation expressions.
*
* Note a nonobvious property of this test: if the column is of a
* domain type, what we'll get is not a bare null Const but a
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index c8d22e1b65..b7d1ac0923 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -1236,8 +1236,7 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char
Anum_pg_class_reloptions, &isnull);
/*
- * Extract the list of column names to be used for the index
- * creation.
+ * Extract the list of column names to be used for the index creation.
*/
for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
{
@@ -1270,8 +1269,8 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char
optionDatum,
INDEX_CREATE_SKIP_BUILD | INDEX_CREATE_CONCURRENT,
0,
- true, /* allow table to be a system catalog? */
- false, /* is_internal? */
+ true, /* allow table to be a system catalog? */
+ false, /* is_internal? */
NULL);
/* Close the relations used and clean up */
@@ -1540,7 +1539,7 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
values, nulls, replaces);
CatalogTupleUpdate(description, &tuple->t_self, tuple);
- break; /* Assume there can be only one match */
+ break; /* Assume there can be only one match */
}
systable_endscan(sd);
@@ -1552,8 +1551,8 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
*/
if (get_rel_relispartition(oldIndexId))
{
- List *ancestors = get_partition_ancestors(oldIndexId);
- Oid parentIndexRelid = linitial_oid(ancestors);
+ List *ancestors = get_partition_ancestors(oldIndexId);
+ Oid parentIndexRelid = linitial_oid(ancestors);
DeleteInheritsTuple(oldIndexId, parentIndexRelid);
StoreSingleInheritance(newIndexId, parentIndexRelid, 1);
@@ -1583,7 +1582,11 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
newClassRel->pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched;
newClassRel->pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched;
newClassRel->pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit;
- /* The data will be sent by the next pgstat_report_stat() call. */
+
+ /*
+ * The data will be sent by the next pgstat_report_stat()
+ * call.
+ */
}
}
}
@@ -1614,27 +1617,26 @@ index_concurrently_set_dead(Oid heapId, Oid indexId)
Relation userIndexRelation;
/*
- * No more predicate locks will be acquired on this index, and we're
- * about to stop doing inserts into the index which could show
- * conflicts with existing predicate locks, so now is the time to move
- * them to the heap relation.
+ * No more predicate locks will be acquired on this index, and we're about
+ * to stop doing inserts into the index which could show conflicts with
+ * existing predicate locks, so now is the time to move them to the heap
+ * relation.
*/
userHeapRelation = table_open(heapId, ShareUpdateExclusiveLock);
userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock);
TransferPredicateLocksToHeapRelation(userIndexRelation);
/*
- * Now we are sure that nobody uses the index for queries; they just
- * might have it open for updating it. So now we can unset indisready
- * and indislive, then wait till nobody could be using it at all
- * anymore.
+ * Now we are sure that nobody uses the index for queries; they just might
+ * have it open for updating it. So now we can unset indisready and
+ * indislive, then wait till nobody could be using it at all anymore.
*/
index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
/*
- * Invalidate the relcache for the table, so that after this commit
- * all sessions will refresh the table's index list. Forgetting just
- * the index's relcache entry is not enough.
+ * Invalidate the relcache for the table, so that after this commit all
+ * sessions will refresh the table's index list. Forgetting just the
+ * index's relcache entry is not enough.
*/
CacheInvalidateRelcache(userHeapRelation);
@@ -1786,7 +1788,7 @@ index_constraint_create(Relation heapRelation,
*/
if (OidIsValid(parentConstraintId))
{
- ObjectAddress referenced;
+ ObjectAddress referenced;
ObjectAddressSet(referenced, ConstraintRelationId, parentConstraintId);
recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
@@ -2709,7 +2711,7 @@ index_build(Relation heapRelation,
PROGRESS_SCAN_BLOCKS_DONE,
PROGRESS_SCAN_BLOCKS_TOTAL
};
- const int64 val[] = {
+ const int64 val[] = {
PROGRESS_CREATEIDX_PHASE_BUILD,
PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE,
0, 0, 0, 0
@@ -3014,10 +3016,11 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
PROGRESS_SCAN_BLOCKS_DONE,
PROGRESS_SCAN_BLOCKS_TOTAL
};
- const int64 val[] = {
+ const int64 val[] = {
PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN,
0, 0, 0, 0
};
+
pgstat_progress_update_multi_param(5, index, val);
}
@@ -3080,7 +3083,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
PROGRESS_SCAN_BLOCKS_DONE,
PROGRESS_SCAN_BLOCKS_TOTAL
};
- const int64 val[] = {
+ const int64 val[] = {
PROGRESS_CREATEIDX_PHASE_VALIDATE_SORT,
0, 0
};
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index 8b51ec7f39..7a32ac1fb1 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -3050,7 +3050,7 @@ getObjectDescription(const ObjectAddress *object)
StringInfoData opfam;
amprocDesc = table_open(AccessMethodProcedureRelationId,
- AccessShareLock);
+ AccessShareLock);
ScanKeyInit(&skey[0],
Anum_pg_amproc_oid,
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index cdc8d9453d..310d45266f 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -612,7 +612,7 @@ AggregateCreate(const char *aggName,
myself = ProcedureCreate(aggName,
aggNamespace,
- replace, /* maybe replacement */
+ replace, /* maybe replacement */
false, /* doesn't return a set */
finaltype, /* returnType */
GetUserId(), /* proowner */
@@ -693,10 +693,9 @@ AggregateCreate(const char *aggName,
/*
* If we're replacing an existing entry, we need to validate that
- * we're not changing anything that would break callers.
- * Specifically we must not change aggkind or aggnumdirectargs,
- * which affect how an aggregate call is treated in parse
- * analysis.
+ * we're not changing anything that would break callers. Specifically
+ * we must not change aggkind or aggnumdirectargs, which affect how an
+ * aggregate call is treated in parse analysis.
*/
if (aggKind != oldagg->aggkind)
ereport(ERROR,
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index fb22035a2a..3487caf82f 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -423,7 +423,11 @@ ProcedureCreate(const char *procedureName,
prokind == PROKIND_PROCEDURE
? errmsg("cannot change whether a procedure has output parameters")
: errmsg("cannot change return type of existing function"),
- /* translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP AGGREGATE */
+
+ /*
+ * translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP
+ * AGGREGATE
+ */
errhint("Use %s %s first.",
dropcmd,
format_procedure(oldproc->oid))));
@@ -450,7 +454,7 @@ ProcedureCreate(const char *procedureName,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot change return type of existing function"),
errdetail("Row type defined by OUT parameters is different."),
- /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+ /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
errhint("Use %s %s first.",
dropcmd,
format_procedure(oldproc->oid))));
@@ -495,7 +499,7 @@ ProcedureCreate(const char *procedureName,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot change name of input parameter \"%s\"",
old_arg_names[j]),
- /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+ /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
errhint("Use %s %s first.",
dropcmd,
format_procedure(oldproc->oid))));
@@ -521,7 +525,7 @@ ProcedureCreate(const char *procedureName,
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot remove parameter defaults from existing function"),
- /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+ /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
errhint("Use %s %s first.",
dropcmd,
format_procedure(oldproc->oid))));
@@ -549,7 +553,7 @@ ProcedureCreate(const char *procedureName,
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot change data type of existing parameter default value"),
- /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+ /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
errhint("Use %s %s first.",
dropcmd,
format_procedure(oldproc->oid))));
@@ -575,7 +579,7 @@ ProcedureCreate(const char *procedureName,
else
{
/* Creating a new procedure */
- Oid newOid;
+ Oid newOid;
/* First, get default permissions and set up proacl */
proacl = get_user_default_acl(OBJECT_FUNCTION, proowner,
diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c
index f8475c1aba..b3bf81ae63 100644
--- a/src/backend/catalog/pg_publication.c
+++ b/src/backend/catalog/pg_publication.c
@@ -317,7 +317,7 @@ GetAllTablesPublications(void)
result = NIL;
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
- Oid oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
+ Oid oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
result = lappend_oid(result, oid);
}
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index fb41f223ad..3cc886f7fe 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -99,7 +99,7 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence)
break;
default:
elog(ERROR, "invalid relpersistence: %c", relpersistence);
- return NULL; /* placate compiler */
+ return NULL; /* placate compiler */
}
srel = smgropen(rnode, backend);
diff --git a/src/backend/commands/amcmds.c b/src/backend/commands/amcmds.c
index c1603737eb..c0e40980d5 100644
--- a/src/backend/commands/amcmds.c
+++ b/src/backend/commands/amcmds.c
@@ -61,7 +61,7 @@ CreateAccessMethod(CreateAmStmt *stmt)
errhint("Must be superuser to create an access method.")));
/* Check if name is used */
- amoid = GetSysCacheOid1(AMNAME, Anum_pg_am_oid,
+ amoid = GetSysCacheOid1(AMNAME, Anum_pg_am_oid,
CStringGetDatum(stmt->amname));
if (OidIsValid(amoid))
{
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 3ee7056047..cacc023619 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -70,8 +70,8 @@ typedef struct
static void rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose);
static void copy_table_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
- bool verbose, bool *pSwapToastByContent,
- TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
+ bool verbose, bool *pSwapToastByContent,
+ TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
static List *get_tables_to_cluster(MemoryContext cluster_context);
@@ -614,7 +614,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose)
/* Copy the heap data into the new table in the desired order */
copy_table_data(OIDNewHeap, tableOid, indexOid, verbose,
- &swap_toast_by_content, &frozenXid, &cutoffMulti);
+ &swap_toast_by_content, &frozenXid, &cutoffMulti);
/*
* Swap the physical files of the target and transient tables, then
diff --git a/src/backend/commands/constraint.c b/src/backend/commands/constraint.c
index cd04e4ea81..806962a686 100644
--- a/src/backend/commands/constraint.c
+++ b/src/backend/commands/constraint.c
@@ -83,7 +83,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" must be fired for INSERT or UPDATE",
funcname)));
- ItemPointerSetInvalid(&checktid); /* keep compiler quiet */
+ ItemPointerSetInvalid(&checktid); /* keep compiler quiet */
}
slot = table_slot_create(trigdata->tg_relation, NULL);
@@ -109,7 +109,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
tmptid = checktid;
{
IndexFetchTableData *scan = table_index_fetch_begin(trigdata->tg_relation);
- bool call_again = false;
+ bool call_again = false;
if (!table_index_fetch_tuple(scan, &tmptid, SnapshotSelf, slot,
&call_again, NULL))
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 9707afabd9..5015e5b3b6 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -2033,7 +2033,7 @@ get_database_oid(const char *dbname, bool missing_ok)
/* We assume that there can be at most one matching tuple */
if (HeapTupleIsValid(dbtuple))
- oid = ((Form_pg_database)GETSTRUCT(dbtuple))->oid;
+ oid = ((Form_pg_database) GETSTRUCT(dbtuple))->oid;
else
oid = InvalidOid;
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index a6c6de78f1..039a87c155 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -606,7 +606,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
static void
ExplainPrintSettings(ExplainState *es)
{
- int num;
+ int num;
struct config_generic **gucs;
/* bail out if information about settings not requested */
@@ -622,13 +622,13 @@ ExplainPrintSettings(ExplainState *es)
if (es->format != EXPLAIN_FORMAT_TEXT)
{
- int i;
+ int i;
ExplainOpenGroup("Settings", "Settings", true, es);
for (i = 0; i < num; i++)
{
- char *setting;
+ char *setting;
struct config_generic *conf = gucs[i];
setting = GetConfigOptionByName(conf->name, NULL, true);
@@ -640,14 +640,14 @@ ExplainPrintSettings(ExplainState *es)
}
else
{
- int i;
- StringInfoData str;
+ int i;
+ StringInfoData str;
initStringInfo(&str);
for (i = 0; i < num; i++)
{
- char *setting;
+ char *setting;
struct config_generic *conf = gucs[i];
if (i > 0)
@@ -705,8 +705,8 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
ExplainNode(ps, NIL, NULL, NULL, es);
/*
- * If requested, include information about GUC parameters with values
- * that don't match the built-in defaults.
+ * If requested, include information about GUC parameters with values that
+ * don't match the built-in defaults.
*/
ExplainPrintSettings(es);
}
@@ -1674,7 +1674,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
if (es->costs && es->verbose &&
outerPlanState(planstate)->worker_jit_instrument)
{
- PlanState *child = outerPlanState(planstate);
+ PlanState *child = outerPlanState(planstate);
int n;
SharedJitInstrumentation *w = child->worker_jit_instrument;
diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c
index d4723fced8..300bb1261f 100644
--- a/src/backend/commands/extension.c
+++ b/src/backend/commands/extension.c
@@ -903,9 +903,9 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
t_sql = DirectFunctionCall3Coll(replace_text,
C_COLLATION_OID,
- t_sql,
- CStringGetTextDatum("@extschema@"),
- CStringGetTextDatum(qSchemaName));
+ t_sql,
+ CStringGetTextDatum("@extschema@"),
+ CStringGetTextDatum(qSchemaName));
}
/*
@@ -916,9 +916,9 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
{
t_sql = DirectFunctionCall3Coll(replace_text,
C_COLLATION_OID,
- t_sql,
- CStringGetTextDatum("MODULE_PATHNAME"),
- CStringGetTextDatum(control->module_pathname));
+ t_sql,
+ CStringGetTextDatum("MODULE_PATHNAME"),
+ CStringGetTextDatum(control->module_pathname));
}
/* And now back to C string */
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 7e7c03ef12..62a4c4fb9b 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -96,8 +96,8 @@ static void update_relispartition(Oid relationId, bool newval);
*/
struct ReindexIndexCallbackState
{
- bool concurrent; /* flag from statement */
- Oid locked_table_oid; /* tracks previously locked table */
+ bool concurrent; /* flag from statement */
+ Oid locked_table_oid; /* tracks previously locked table */
};
/*
@@ -396,7 +396,7 @@ WaitForOlderSnapshots(TransactionId limitXmin, bool progress)
{
if (progress)
{
- PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId);
+ PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId);
pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID,
holder->pid);
@@ -984,7 +984,7 @@ DefineIndex(Oid relationId,
*/
if (partitioned && stmt->relation && !stmt->relation->inh)
{
- PartitionDesc pd = RelationGetPartitionDesc(rel);
+ PartitionDesc pd = RelationGetPartitionDesc(rel);
if (pd->nparts != 0)
flags |= INDEX_CREATE_INVALID;
@@ -3003,7 +3003,7 @@ ReindexRelationConcurrently(Oid relationOid, int options)
/* Get a session-level lock on each table. */
foreach(lc, relationLocks)
{
- LockRelId *lockrelid = (LockRelId *) lfirst(lc);
+ LockRelId *lockrelid = (LockRelId *) lfirst(lc);
LockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
}
@@ -3112,8 +3112,8 @@ ReindexRelationConcurrently(Oid relationOid, int options)
/*
* The index is now valid in the sense that it contains all currently
- * interesting tuples. But since it might not contain tuples deleted just
- * before the reference snap was taken, we have to wait out any
+ * interesting tuples. But since it might not contain tuples deleted
+ * just before the reference snap was taken, we have to wait out any
* transactions that might have older snapshots.
*/
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
@@ -3250,7 +3250,7 @@ ReindexRelationConcurrently(Oid relationOid, int options)
*/
foreach(lc, relationLocks)
{
- LockRelId *lockrelid = (LockRelId *) lfirst(lc);
+ LockRelId *lockrelid = (LockRelId *) lfirst(lc);
UnlockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
}
diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c
index a191916d03..95ec352abe 100644
--- a/src/backend/commands/statscmds.c
+++ b/src/backend/commands/statscmds.c
@@ -465,9 +465,9 @@ UpdateStatisticsForTypeChange(Oid statsOid, Oid relationOid, int attnum,
elog(ERROR, "cache lookup failed for statistics object %u", statsOid);
/*
- * When none of the defined statistics types contain datum values
- * from the table's columns then there's no need to reset the stats.
- * Functional dependencies and ndistinct stats should still hold true.
+ * When none of the defined statistics types contain datum values from the
+ * table's columns then there's no need to reset the stats. Functional
+ * dependencies and ndistinct stats should still hold true.
*/
if (!statext_is_kind_built(oldtup, STATS_EXT_MCV))
{
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index bfcf9472d7..7fa8dcce61 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -379,7 +379,7 @@ static void ATExecCheckNotNull(AlteredTableInfo *tab, Relation rel,
const char *colName, LOCKMODE lockmode);
static bool NotNullImpliedByRelConstraints(Relation rel, Form_pg_attribute attr);
static bool ConstraintImpliedByRelConstraint(Relation scanrel,
- List *partConstraint, List *existedConstraints);
+ List *partConstraint, List *existedConstraints);
static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName,
Node *newDefault, LOCKMODE lockmode);
static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName,
@@ -1099,9 +1099,9 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
}
/*
- * Now add any newly specified CHECK constraints to the new relation.
- * Same as for defaults above, but these need to come after partitioning
- * is set up.
+ * Now add any newly specified CHECK constraints to the new relation. Same
+ * as for defaults above, but these need to come after partitioning is set
+ * up.
*/
if (stmt->constraints)
AddRelationNewConstraints(rel, NIL, stmt->constraints,
@@ -1401,9 +1401,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
*/
if (IsSystemClass(relOid, classform) && relkind == RELKIND_INDEX)
{
- HeapTuple locTuple;
- Form_pg_index indexform;
- bool indisvalid;
+ HeapTuple locTuple;
+ Form_pg_index indexform;
+ bool indisvalid;
locTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(relOid));
if (!HeapTupleIsValid(locTuple))
@@ -1786,6 +1786,7 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged,
{
Relation toastrel = relation_open(toast_relid,
AccessExclusiveLock);
+
RelationSetNewRelfilenode(toastrel,
toastrel->rd_rel->relpersistence);
table_close(toastrel, NoLock);
@@ -4336,6 +4337,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
/* nothing to do here, oid columns don't exist anymore */
break;
case AT_SetTableSpace: /* SET TABLESPACE */
+
/*
* Only do this for partitioned tables and indexes, for which this
* is just a catalog change. Other relation types which have
@@ -4626,8 +4628,8 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode)
{
/*
* If required, test the current data within the table against new
- * constraints generated by ALTER TABLE commands, but don't rebuild
- * data.
+ * constraints generated by ALTER TABLE commands, but don't
+ * rebuild data.
*/
if (tab->constraints != NIL || tab->verify_new_notnull ||
tab->partition_constraint != NULL)
@@ -4798,8 +4800,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
{
/*
* If we are rebuilding the tuples OR if we added any new but not
- * verified NOT NULL constraints, check all not-null constraints.
- * This is a bit of overkill but it minimizes risk of bugs, and
+ * verified NOT NULL constraints, check all not-null constraints. This
+ * is a bit of overkill but it minimizes risk of bugs, and
* heap_attisnull is a pretty cheap test anyway.
*/
for (i = 0; i < newTupDesc->natts; i++)
@@ -4941,8 +4943,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
{
/*
* If there's no rewrite, old and new table are guaranteed to
- * have the same AM, so we can just use the old slot to
- * verify new constraints etc.
+ * have the same AM, so we can just use the old slot to verify
+ * new constraints etc.
*/
insertslot = oldslot;
}
@@ -6209,9 +6211,8 @@ ATExecSetNotNull(AlteredTableInfo *tab, Relation rel,
/*
* Ordinarily phase 3 must ensure that no NULLs exist in columns that
* are set NOT NULL; however, if we can find a constraint which proves
- * this then we can skip that. We needn't bother looking if
- * we've already found that we must verify some other NOT NULL
- * constraint.
+ * this then we can skip that. We needn't bother looking if we've
+ * already found that we must verify some other NOT NULL constraint.
*/
if (!tab->verify_new_notnull &&
!NotNullImpliedByRelConstraints(rel, (Form_pg_attribute) GETSTRUCT(tuple)))
@@ -10503,7 +10504,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
*/
if (tab->rewrite)
{
- Relation newrel;
+ Relation newrel;
newrel = table_open(RelationGetRelid(rel), NoLock);
RelationClearMissing(newrel);
@@ -10657,8 +10658,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
{
/*
* Changing the type of a column that is used by a
- * generated column is not allowed by SQL standard.
- * It might be doable with some thinking and effort.
+ * generated column is not allowed by SQL standard. It
+ * might be doable with some thinking and effort.
*/
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
@@ -10862,13 +10863,13 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
/*
* Here we go --- change the recorded column type and collation. (Note
- * heapTup is a copy of the syscache entry, so okay to scribble on.)
- * First fix up the missing value if any.
+ * heapTup is a copy of the syscache entry, so okay to scribble on.) First
+ * fix up the missing value if any.
*/
if (attTup->atthasmissing)
{
- Datum missingval;
- bool missingNull;
+ Datum missingval;
+ bool missingNull;
/* if rewrite is true the missing value should already be cleared */
Assert(tab->rewrite == 0);
@@ -10881,7 +10882,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
/* if it's a null array there is nothing to do */
- if (! missingNull)
+ if (!missingNull)
{
/*
* Get the datum out of the array and repack it in a new array
@@ -10890,12 +10891,12 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* changed, only the array metadata.
*/
- int one = 1;
- bool isNull;
- Datum valuesAtt[Natts_pg_attribute];
- bool nullsAtt[Natts_pg_attribute];
- bool replacesAtt[Natts_pg_attribute];
- HeapTuple newTup;
+ int one = 1;
+ bool isNull;
+ Datum valuesAtt[Natts_pg_attribute];
+ bool nullsAtt[Natts_pg_attribute];
+ bool replacesAtt[Natts_pg_attribute];
+ HeapTuple newTup;
MemSet(valuesAtt, 0, sizeof(valuesAtt));
MemSet(nullsAtt, false, sizeof(nullsAtt));
@@ -10910,12 +10911,12 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
attTup->attalign,
&isNull);
missingval = PointerGetDatum(
- construct_array(&missingval,
- 1,
- targettype,
- tform->typlen,
- tform->typbyval,
- tform->typalign));
+ construct_array(&missingval,
+ 1,
+ targettype,
+ tform->typlen,
+ tform->typbyval,
+ tform->typalign));
valuesAtt[Anum_pg_attribute_attmissingval - 1] = missingval;
replacesAtt[Anum_pg_attribute_attmissingval - 1] = true;
@@ -12311,16 +12312,16 @@ ATExecSetTableSpaceNoStorage(Relation rel, Oid newTableSpace)
Oid reloid = RelationGetRelid(rel);
/*
- * Shouldn't be called on relations having storage; these are processed
- * in phase 3.
+ * Shouldn't be called on relations having storage; these are processed in
+ * phase 3.
*/
Assert(!RELKIND_HAS_STORAGE(rel->rd_rel->relkind));
/* Can't allow a non-shared relation in pg_global */
if (newTableSpace == GLOBALTABLESPACE_OID)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("only shared relations can be placed in pg_global tablespace")));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("only shared relations can be placed in pg_global tablespace")));
/*
* No work if no change in tablespace.
@@ -15044,7 +15045,7 @@ ComputePartitionAttrs(ParseState *pstate, Relation rel, List *partParams, AttrNu
i = -1;
while ((i = bms_next_member(expr_attrs, i)) >= 0)
{
- AttrNumber attno = i + FirstLowInvalidHeapAttributeNumber;
+ AttrNumber attno = i + FirstLowInvalidHeapAttributeNumber;
if (TupleDescAttr(RelationGetDescr(rel), attno - 1)->attgenerated)
ereport(ERROR,
@@ -15202,7 +15203,7 @@ PartConstraintImpliedByRelConstraint(Relation scanrel,
bool
ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *provenConstraint)
{
- List *existConstraint = list_copy(provenConstraint);
+ List *existConstraint = list_copy(provenConstraint);
TupleConstr *constr = RelationGetDescr(scanrel)->constr;
int num_check,
i;
@@ -15240,8 +15241,8 @@ ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *p
* not-false and try to prove the same for testConstraint.
*
* Note that predicate_implied_by assumes its first argument is known
- * immutable. That should always be true for both NOT NULL and
- * partition constraints, so we don't test it here.
+ * immutable. That should always be true for both NOT NULL and partition
+ * constraints, so we don't test it here.
*/
return predicate_implied_by(testConstraint, existConstraint, true);
}
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 8ec963f1cf..33df2ec0af 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -1143,9 +1143,9 @@ GetDefaultTablespace(char relpersistence, bool partitioned)
/*
* Allow explicit specification of database's default tablespace in
- * default_tablespace without triggering permissions checks. Don't
- * allow specifying that when creating a partitioned table, however,
- * since the result is confusing.
+ * default_tablespace without triggering permissions checks. Don't allow
+ * specifying that when creating a partitioned table, however, since the
+ * result is confusing.
*/
if (result == MyDatabaseTableSpace)
{
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 2beb378145..209021a61a 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -4245,9 +4245,9 @@ AfterTriggerExecute(EState *estate,
case AFTER_TRIGGER_FDW_REUSE:
/*
- * Store tuple in the slot so that tg_trigtuple does not
- * reference tuplestore memory. (It is formally possible for the
- * trigger function to queue trigger events that add to the same
+ * Store tuple in the slot so that tg_trigtuple does not reference
+ * tuplestore memory. (It is formally possible for the trigger
+ * function to queue trigger events that add to the same
* tuplestore, which can push other tuples out of memory.) The
* distinction is academic, because we start with a minimal tuple
* that is stored as a heap tuple, constructed in different memory
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index afdd3307ac..d69a73d13e 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -88,13 +88,13 @@ void
ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
{
VacuumParams params;
- bool verbose = false;
- bool skip_locked = false;
- bool analyze = false;
- bool freeze = false;
- bool full = false;
- bool disable_page_skipping = false;
- ListCell *lc;
+ bool verbose = false;
+ bool skip_locked = false;
+ bool analyze = false;
+ bool freeze = false;
+ bool full = false;
+ bool disable_page_skipping = false;
+ ListCell *lc;
/* Set default value */
params.index_cleanup = VACOPT_TERNARY_DEFAULT;
@@ -103,7 +103,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
/* Parse options list */
foreach(lc, vacstmt->options)
{
- DefElem *opt = (DefElem *) lfirst(lc);
+ DefElem *opt = (DefElem *) lfirst(lc);
/* Parse common options for VACUUM and ANALYZE */
if (strcmp(opt->defname, "verbose") == 0)
@@ -593,8 +593,9 @@ vacuum_open_relation(Oid relid, RangeVar *relation, int options,
/*
* Determine the log level.
*
- * For manual VACUUM or ANALYZE, we emit a WARNING to match the log statements
- * in the permission checks; otherwise, only log if the caller so requested.
+ * For manual VACUUM or ANALYZE, we emit a WARNING to match the log
+ * statements in the permission checks; otherwise, only log if the caller
+ * so requested.
*/
if (!IsAutoVacuumWorkerProcess())
elevel = WARNING;
@@ -1328,9 +1329,9 @@ vac_update_datfrozenxid(void)
}
/*
- * Some table AMs might not need per-relation xid / multixid
- * horizons. It therefore seems reasonable to allow relfrozenxid and
- * relminmxid to not be set (i.e. set to their respective Invalid*Id)
+ * Some table AMs might not need per-relation xid / multixid horizons.
+ * It therefore seems reasonable to allow relfrozenxid and relminmxid
+ * to not be set (i.e. set to their respective Invalid*Id)
* independently. Thus validate and compute horizon for each only if
* set.
*
diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c
index 0a7b2b8f47..5d64471eed 100644
--- a/src/backend/executor/execExpr.c
+++ b/src/backend/executor/execExpr.c
@@ -2367,10 +2367,10 @@ get_last_attnums_walker(Node *node, LastAttnumInfo *info)
static void
ExecComputeSlotInfo(ExprState *state, ExprEvalStep *op)
{
- PlanState *parent = state->parent;
+ PlanState *parent = state->parent;
TupleDesc desc = NULL;
const TupleTableSlotOps *tts_ops = NULL;
- bool isfixed = false;
+ bool isfixed = false;
if (op->d.fetch.known_desc != NULL)
{
@@ -3313,7 +3313,7 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
*/
ExprState *
ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc,
- const TupleTableSlotOps * lops, const TupleTableSlotOps * rops,
+ const TupleTableSlotOps *lops, const TupleTableSlotOps *rops,
int numCols,
const AttrNumber *keyColIdx,
const Oid *eqfunctions,
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index a018925d4e..612a88456e 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -4038,7 +4038,7 @@ void
ExecEvalSysVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext,
TupleTableSlot *slot)
{
- Datum d;
+ Datum d;
/* slot_getsysattr has sufficient defenses against bad attnums */
d = slot_getsysattr(slot,
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index ed7c0606bf..44e4a6d104 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -2551,7 +2551,7 @@ EvalPlanQualSlot(EPQState *epqstate,
if (relation)
*slot = table_slot_create(relation,
- &epqstate->estate->es_tupleTable);
+ &epqstate->estate->es_tupleTable);
else
*slot = ExecAllocTableSlot(&epqstate->estate->es_tupleTable,
epqstate->origslot->tts_tupleDescriptor,
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index 3d4b01cb4d..da9074c54c 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -1058,7 +1058,7 @@ ExecParallelRetrieveJitInstrumentation(PlanState *planstate,
* instrumentation in per-query context.
*/
ibytes = offsetof(SharedJitInstrumentation, jit_instr)
- + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
+ + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
planstate->worker_jit_instrument =
MemoryContextAlloc(planstate->state->es_query_cxt, ibytes);
@@ -1133,7 +1133,7 @@ ExecParallelCleanup(ParallelExecutorInfo *pei)
/* Accumulate JIT instrumentation, if any. */
if (pei->jit_instrumentation)
ExecParallelRetrieveJitInstrumentation(pei->planstate,
- pei->jit_instrumentation);
+ pei->jit_instrumentation);
/* Free any serialized parameters. */
if (DsaPointerIsValid(pei->param_exec))
diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c
index 6cdbb9db42..73ba298c5d 100644
--- a/src/backend/executor/execPartition.c
+++ b/src/backend/executor/execPartition.c
@@ -145,12 +145,12 @@ typedef struct PartitionDispatchData
TupleTableSlot *tupslot;
AttrNumber *tupmap;
int indexes[FLEXIBLE_ARRAY_MEMBER];
-} PartitionDispatchData;
+} PartitionDispatchData;
/* struct to hold result relations coming from UPDATE subplans */
typedef struct SubplanResultRelHashElem
{
- Oid relid; /* hash key -- must be first */
+ Oid relid; /* hash key -- must be first */
ResultRelInfo *rri;
} SubplanResultRelHashElem;
@@ -375,7 +375,7 @@ ExecFindPartition(ModifyTableState *mtstate,
if (proute->subplan_resultrel_htab)
{
Oid partoid = partdesc->oids[partidx];
- SubplanResultRelHashElem *elem;
+ SubplanResultRelHashElem *elem;
elem = hash_search(proute->subplan_resultrel_htab,
&partoid, HASH_FIND, NULL);
@@ -474,7 +474,7 @@ ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate,
ResultRelInfo *rri = &mtstate->resultRelInfo[i];
bool found;
Oid partoid = RelationGetRelid(rri->ri_RelationDesc);
- SubplanResultRelHashElem *elem;
+ SubplanResultRelHashElem *elem;
elem = (SubplanResultRelHashElem *)
hash_search(htab, &partoid, HASH_ENTER, &found);
@@ -762,9 +762,9 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate,
* It's safe to reuse these from the partition root, as we
* only process one tuple at a time (therefore we won't
* overwrite needed data in slots), and the results of
- * projections are independent of the underlying
- * storage. Projections and where clauses themselves don't
- * store state / are independent of the underlying storage.
+ * projections are independent of the underlying storage.
+ * Projections and where clauses themselves don't store state
+ * / are independent of the underlying storage.
*/
leaf_part_rri->ri_onConflict->oc_ProjSlot =
rootResultRelInfo->ri_onConflict->oc_ProjSlot;
@@ -892,7 +892,7 @@ ExecInitRoutingInfo(ModifyTableState *mtstate,
{
MemoryContext oldcxt;
PartitionRoutingInfo *partrouteinfo;
- int rri_index;
+ int rri_index;
oldcxt = MemoryContextSwitchTo(proute->memcxt);
@@ -1668,16 +1668,16 @@ ExecCreatePartitionPruneState(PlanState *planstate,
}
else
{
- int pd_idx = 0;
- int pp_idx;
+ int pd_idx = 0;
+ int pp_idx;
/*
* Some new partitions have appeared since plan time, and
* those are reflected in our PartitionDesc but were not
* present in the one used to construct subplan_map and
* subpart_map. So we must construct new and longer arrays
- * where the partitions that were originally present map to the
- * same place, and any added indexes map to -1, as if the
+ * where the partitions that were originally present map to
+ * the same place, and any added indexes map to -1, as if the
* new partitions had been pruned.
*/
pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index f8f6463358..0326284c83 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -227,7 +227,7 @@ retry:
static bool
tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2)
{
- int attrnum;
+ int attrnum;
Assert(slot1->tts_tupleDescriptor->natts ==
slot2->tts_tupleDescriptor->natts);
@@ -265,8 +265,8 @@ tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2)
if (!DatumGetBool(FunctionCall2Coll(&typentry->eq_opr_finfo,
att->attcollation,
- slot1->tts_values[attrnum],
- slot2->tts_values[attrnum])))
+ slot1->tts_values[attrnum],
+ slot2->tts_values[attrnum])))
return false;
}
@@ -406,7 +406,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
resultRelInfo->ri_TrigDesc->trig_insert_before_row)
{
if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
- skip_tuple = true; /* "do nothing" */
+ skip_tuple = true; /* "do nothing" */
}
if (!skip_tuple)
@@ -471,7 +471,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
{
if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
tid, NULL, slot))
- skip_tuple = true; /* "do nothing" */
+ skip_tuple = true; /* "do nothing" */
}
if (!skip_tuple)
@@ -490,7 +490,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
if (resultRelInfo->ri_PartitionCheck)
ExecPartitionCheck(resultRelInfo, slot, estate, true);
- simple_table_update(rel, tid, slot,estate->es_snapshot,
+ simple_table_update(rel, tid, slot, estate->es_snapshot,
&update_indexes);
if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
@@ -591,8 +591,8 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
const char *relname)
{
/*
- * We currently only support writing to regular tables. However, give
- * a more specific error for partitioned and foreign tables.
+ * We currently only support writing to regular tables. However, give a
+ * more specific error for partitioned and foreign tables.
*/
if (relkind == RELKIND_PARTITIONED_TABLE)
ereport(ERROR,
@@ -600,14 +600,14 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
errmsg("cannot use relation \"%s.%s\" as logical replication target",
nspname, relname),
errdetail("\"%s.%s\" is a partitioned table.",
- nspname, relname)));
+ nspname, relname)));
else if (relkind == RELKIND_FOREIGN_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot use relation \"%s.%s\" as logical replication target",
nspname, relname),
errdetail("\"%s.%s\" is a foreign table.",
- nspname, relname)));
+ nspname, relname)));
if (relkind != RELKIND_RELATION)
ereport(ERROR,
@@ -615,5 +615,5 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
errmsg("cannot use relation \"%s.%s\" as logical replication target",
nspname, relname),
errdetail("\"%s.%s\" is not a table.",
- nspname, relname)));
+ nspname, relname)));
}
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index 881131aff2..67c4be5108 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -81,7 +81,8 @@ ExecScanFetch(ScanState *node,
/* Check if it meets the access-method conditions */
if (!(*recheckMtd) (node, slot))
- return ExecClearTuple(slot); /* would not be returned by scan */
+ return ExecClearTuple(slot); /* would not be returned by
+ * scan */
return slot;
}
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 55d1669db0..ad13fd9a05 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -71,13 +71,12 @@
static TupleDesc ExecTypeFromTLInternal(List *targetList,
bool skipjunk);
-static pg_attribute_always_inline void
-slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
+static pg_attribute_always_inline void slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
int natts);
static inline void tts_buffer_heap_store_tuple(TupleTableSlot *slot,
- HeapTuple tuple,
- Buffer buffer,
- bool transfer_pin);
+ HeapTuple tuple,
+ Buffer buffer,
+ bool transfer_pin);
static void tts_heap_store_tuple(TupleTableSlot *slot, HeapTuple tuple, bool shouldFree);
@@ -138,7 +137,7 @@ tts_virtual_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
{
elog(ERROR, "virtual tuple table slot does not have system attributes");
- return 0; /* silence compiler warnings */
+ return 0; /* silence compiler warnings */
}
/*
@@ -164,7 +163,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
for (int natt = 0; natt < desc->natts; natt++)
{
Form_pg_attribute att = TupleDescAttr(desc, natt);
- Datum val;
+ Datum val;
if (att->attbyval || slot->tts_isnull[natt])
continue;
@@ -200,7 +199,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
for (int natt = 0; natt < desc->natts; natt++)
{
Form_pg_attribute att = TupleDescAttr(desc, natt);
- Datum val;
+ Datum val;
if (att->attbyval || slot->tts_isnull[natt])
continue;
@@ -210,7 +209,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
if (att->attlen == -1 &&
VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(val)))
{
- Size data_length;
+ Size data_length;
/*
* We want to flatten the expanded value so that the materialized
@@ -228,7 +227,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
}
else
{
- Size data_length = 0;
+ Size data_length = 0;
data = (char *) att_align_nominal(data, att->attalign);
data_length = att_addlength_datum(data_length, att->attlen, val);
@@ -382,7 +381,7 @@ tts_heap_materialize(TupleTableSlot *slot)
static void
tts_heap_copyslot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
{
- HeapTuple tuple;
+ HeapTuple tuple;
MemoryContext oldcontext;
oldcontext = MemoryContextSwitchTo(dstslot->tts_mcxt);
@@ -499,7 +498,7 @@ tts_minimal_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
{
elog(ERROR, "minimal tuple table slot does not have system attributes");
- return 0; /* silence compiler warnings */
+ return 0; /* silence compiler warnings */
}
static void
@@ -1077,8 +1076,10 @@ TupleTableSlot *
MakeTupleTableSlot(TupleDesc tupleDesc,
const TupleTableSlotOps *tts_ops)
{
- Size basesz, allocsz;
+ Size basesz,
+ allocsz;
TupleTableSlot *slot;
+
basesz = tts_ops->base_slot_size;
/*
@@ -1866,7 +1867,7 @@ void
slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
{
/* Check for caller errors */
- Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */
+ Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */
Assert(attnum > 0);
if (unlikely(attnum > slot->tts_tupleDescriptor->natts))
@@ -1876,8 +1877,8 @@ slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
slot->tts_ops->getsomeattrs(slot, attnum);
/*
- * If the underlying tuple doesn't have enough attributes, tuple descriptor
- * must have the missing attributes.
+ * If the underlying tuple doesn't have enough attributes, tuple
+ * descriptor must have the missing attributes.
*/
if (unlikely(slot->tts_nvalid < attnum))
{
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 965e5dea70..b34f565bfe 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -1762,7 +1762,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
if (junkFilter)
{
TupleTableSlot *slot =
- MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
+ MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
*junkFilter = ExecInitJunkFilter(tlist, slot);
}
@@ -1929,7 +1929,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
if (junkFilter)
{
TupleTableSlot *slot =
- MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
+ MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
*junkFilter = ExecInitJunkFilterConversion(tlist,
CreateTupleDescCopy(tupdesc),
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index fd3c71e764..43ab9fb392 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -754,7 +754,7 @@ process_ordered_aggregate_single(AggState *aggstate,
oldAbbrevVal == newAbbrevVal &&
DatumGetBool(FunctionCall2Coll(&pertrans->equalfnOne,
pertrans->aggCollation,
- oldVal, *newVal)))))
+ oldVal, *newVal)))))
{
/* equal to prior, so forget this one */
if (!pertrans->inputtypeByVal && !*isNull)
diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c
index 4de1d2b484..d2da5d3a95 100644
--- a/src/backend/executor/nodeGatherMerge.c
+++ b/src/backend/executor/nodeGatherMerge.c
@@ -700,10 +700,10 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
Assert(HeapTupleIsValid(tup));
/* Build the TupleTableSlot for the given tuple */
- ExecStoreHeapTuple(tup, /* tuple to store */
+ ExecStoreHeapTuple(tup, /* tuple to store */
gm_state->gm_slots[reader], /* slot in which to store
* the tuple */
- true); /* pfree tuple when done with it */
+ true); /* pfree tuple when done with it */
return true;
}
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index aa43296e26..5ccdc1af2e 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -750,7 +750,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags)
rclauses = lappend(rclauses, ExecInitExpr(lsecond(hclause->args),
(PlanState *) hjstate));
rhclauses = lappend(rhclauses, ExecInitExpr(lsecond(hclause->args),
- innerPlanState(hjstate)));
+ innerPlanState(hjstate)));
hoperators = lappend_oid(hoperators, hclause->opno);
hcollations = lappend_oid(hcollations, hclause->inputcollid);
}
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index 8fd52e9c80..5dce284fe7 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -192,9 +192,9 @@ IndexOnlyNext(IndexOnlyScanState *node)
/*
* Fill the scan tuple slot with data from the index. This might be
- * provided in either HeapTuple or IndexTuple format. Conceivably
- * an index AM might fill both fields, in which case we prefer the
- * heap format, since it's probably a bit cheaper to fill a slot from.
+ * provided in either HeapTuple or IndexTuple format. Conceivably an
+ * index AM might fill both fields, in which case we prefer the heap
+ * format, since it's probably a bit cheaper to fill a slot from.
*/
if (scandesc->xs_hitup)
{
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index c97eb60f77..73bfd424d9 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -242,7 +242,7 @@ IndexNextWithReorder(IndexScanState *node)
scandesc->xs_orderbynulls,
node) <= 0)
{
- HeapTuple tuple;
+ HeapTuple tuple;
tuple = reorderqueue_pop(node);
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index 7674ac893c..4067554ed9 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -327,7 +327,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
/* node returns unmodified slots from the outer plan */
lrstate->ps.resultopsset = true;
lrstate->ps.resultops = ExecGetResultSlotOps(outerPlanState(lrstate),
- &lrstate->ps.resultopsfixed);
+ &lrstate->ps.resultopsfixed);
/*
* LockRows nodes do no projections, so initialize projection info for
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index d3a0dece5a..8acdaf2057 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -865,6 +865,7 @@ ldelete:;
goto ldelete;
case TM_SelfModified:
+
/*
* This can be reached when following an update
* chain from a tuple updated by another session,
@@ -1070,7 +1071,7 @@ ExecUpdate(ModifyTableState *mtstate,
{
if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
tupleid, oldtuple, slot))
- return NULL; /* "do nothing" */
+ return NULL; /* "do nothing" */
}
/* INSTEAD OF ROW UPDATE Triggers */
@@ -1079,7 +1080,7 @@ ExecUpdate(ModifyTableState *mtstate,
{
if (!ExecIRUpdateTriggers(estate, resultRelInfo,
oldtuple, slot))
- return NULL; /* "do nothing" */
+ return NULL; /* "do nothing" */
}
else if (resultRelInfo->ri_FdwRoutine)
{
@@ -1401,6 +1402,7 @@ lreplace:;
return NULL;
case TM_SelfModified:
+
/*
* This can be reached when following an update
* chain from a tuple updated by another session,
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 8bd7430a91..436b43f8ca 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -228,8 +228,8 @@ ExecReScanSeqScan(SeqScanState *node)
scan = node->ss.ss_currentScanDesc;
if (scan != NULL)
- table_rescan(scan, /* scan desc */
- NULL); /* new scan keys */
+ table_rescan(scan, /* scan desc */
+ NULL); /* new scan keys */
ExecScanReScan((ScanState *) node);
}
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 749b4eced3..3662fcada8 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -684,7 +684,7 @@ execTuplesUnequal(TupleTableSlot *slot1,
/* Apply the type-specific equality function */
if (!DatumGetBool(FunctionCall2Coll(&eqfunctions[i],
collations[i],
- attr1, attr2)))
+ attr1, attr2)))
{
result = true; /* they are unequal */
break;
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index 707ec0d190..de8c006051 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -131,6 +131,7 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags)
ExecInitScanTupleSlot(estate, &subquerystate->ss,
ExecGetResultType(subquerystate->subplan),
ExecGetResultSlotOps(subquerystate->subplan, NULL));
+
/*
* The slot used as the scantuple isn't the slot above (outside of EPQ),
* but the one from the node below.
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 62466be702..caf3b71f9e 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -2154,8 +2154,8 @@ CheckPAMAuth(Port *port, const char *user, const char *password)
* later used inside the PAM conversation to pass the password to the
* authentication module.
*/
- pam_passw_conv.appdata_ptr = unconstify(char *, password); /* from password above,
- * not allocated */
+ pam_passw_conv.appdata_ptr = unconstify(char *, password); /* from password above,
+ * not allocated */
/* Optionally, one can set the service name in pg_hba.conf */
if (port->hba->pamservice && port->hba->pamservice[0] != '\0')
diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c
index c38a71df58..673066a456 100644
--- a/src/backend/libpq/be-secure-openssl.c
+++ b/src/backend/libpq/be-secure-openssl.c
@@ -68,10 +68,10 @@ static bool dummy_ssl_passwd_cb_called = false;
static bool ssl_is_server_start;
static int ssl_protocol_version_to_openssl(int v, const char *guc_name,
- int loglevel);
+ int loglevel);
#ifndef SSL_CTX_set_min_proto_version
-static int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version);
-static int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version);
+static int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version);
+static int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version);
#endif
@@ -192,9 +192,10 @@ be_tls_init(bool isServerStart)
if (ssl_min_protocol_version)
{
- int ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version,
- "ssl_min_protocol_version",
- isServerStart ? FATAL : LOG);
+ int ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version,
+ "ssl_min_protocol_version",
+ isServerStart ? FATAL : LOG);
+
if (ssl_ver == -1)
goto error;
SSL_CTX_set_min_proto_version(context, ssl_ver);
@@ -202,9 +203,10 @@ be_tls_init(bool isServerStart)
if (ssl_max_protocol_version)
{
- int ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version,
- "ssl_max_protocol_version",
- isServerStart ? FATAL : LOG);
+ int ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version,
+ "ssl_max_protocol_version",
+ isServerStart ? FATAL : LOG);
+
if (ssl_ver == -1)
goto error;
SSL_CTX_set_max_proto_version(context, ssl_ver);
@@ -1150,6 +1152,7 @@ be_tls_get_peer_serial(Port *port, char *ptr, size_t len)
serial = X509_get_serialNumber(port->peer);
b = ASN1_INTEGER_to_BN(serial, NULL);
decimal = BN_bn2dec(b);
+
BN_free(b);
strlcpy(ptr, decimal, len);
OPENSSL_free(decimal);
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c
index 4ad17d0c31..665149defe 100644
--- a/src/backend/nodes/bitmapset.c
+++ b/src/backend/nodes/bitmapset.c
@@ -476,10 +476,10 @@ bms_member_index(Bitmapset *a, int x)
}
/*
- * Now add bits of the last word, but only those before the item.
- * We can do that by applying a mask and then using popcount again.
- * To get 0-based index, we want to count only preceding bits, not
- * the item itself, so we subtract 1.
+ * Now add bits of the last word, but only those before the item. We can
+ * do that by applying a mask and then using popcount again. To get
+ * 0-based index, we want to count only preceding bits, not the item
+ * itself, so we subtract 1.
*/
mask = ((bitmapword) 1 << bitnum) - 1;
result += bmw_popcount(a->words[wordnum] & mask);
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c
index 9dae586a51..bfad6b7987 100644
--- a/src/backend/optimizer/path/clausesel.c
+++ b/src/backend/optimizer/path/clausesel.c
@@ -161,9 +161,9 @@ clauselist_selectivity_simple(PlannerInfo *root,
int listidx;
/*
- * If there's exactly one clause (and it was not estimated yet), just
- * go directly to clause_selectivity(). None of what we might do below
- * is relevant.
+ * If there's exactly one clause (and it was not estimated yet), just go
+ * directly to clause_selectivity(). None of what we might do below is
+ * relevant.
*/
if ((list_length(clauses) == 1) &&
bms_num_members(estimatedclauses) == 0)
diff --git a/src/backend/optimizer/util/inherit.c b/src/backend/optimizer/util/inherit.c
index ccc8c11a98..bbf204ddfb 100644
--- a/src/backend/optimizer/util/inherit.c
+++ b/src/backend/optimizer/util/inherit.c
@@ -311,6 +311,7 @@ expand_partitioned_rtentry(PlannerInfo *root, RelOptInfo *relinfo,
if (!root->partColsUpdated)
root->partColsUpdated =
has_partition_attrs(parentrel, parentrte->updatedCols, NULL);
+
/*
* There shouldn't be any generated columns in the partition key.
*/
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 36aee35d46..d66471c7a4 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -3621,7 +3621,7 @@ create_limit_path(PlannerInfo *root, RelOptInfo *rel,
*/
void
adjust_limit_rows_costs(double *rows, /* in/out parameter */
- Cost *startup_cost, /* in/out parameter */
+ Cost *startup_cost, /* in/out parameter */
Cost *total_cost, /* in/out parameter */
int64 offset_est,
int64 count_est)
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 4564c0ae81..bbeaada2ae 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -1053,8 +1053,8 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
InvalidOid, &found_whole_row);
/*
- * Prevent this for the same reason as for constraints below.
- * Note that defaults cannot contain any vars, so it's OK that the
+ * Prevent this for the same reason as for constraints below. Note
+ * that defaults cannot contain any vars, so it's OK that the
* error message refers to generated columns.
*/
if (found_whole_row)
@@ -3845,11 +3845,11 @@ transformPartitionBound(ParseState *pstate, Relation parent,
* any necessary validation.
*/
result_spec->lowerdatums =
- transformPartitionRangeBounds(pstate, spec->lowerdatums,
- parent);
+ transformPartitionRangeBounds(pstate, spec->lowerdatums,
+ parent);
result_spec->upperdatums =
- transformPartitionRangeBounds(pstate, spec->upperdatums,
- parent);
+ transformPartitionRangeBounds(pstate, spec->upperdatums,
+ parent);
}
else
elog(ERROR, "unexpected partition strategy: %d", (int) strategy);
@@ -3876,17 +3876,17 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
i = j = 0;
foreach(lc, blist)
{
- Node *expr = lfirst(lc);
+ Node *expr = lfirst(lc);
PartitionRangeDatum *prd = NULL;
/*
- * Infinite range bounds -- "minvalue" and "maxvalue" -- get passed
- * in as ColumnRefs.
+ * Infinite range bounds -- "minvalue" and "maxvalue" -- get passed in
+ * as ColumnRefs.
*/
if (IsA(expr, ColumnRef))
{
- ColumnRef *cref = (ColumnRef *) expr;
- char *cname = NULL;
+ ColumnRef *cref = (ColumnRef *) expr;
+ char *cname = NULL;
/*
* There should be a single field named either "minvalue" or
@@ -3899,8 +3899,8 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
if (cname == NULL)
{
/*
- * ColumnRef is not in the desired single-field-name form.
- * For consistency between all partition strategies, let the
+ * ColumnRef is not in the desired single-field-name form. For
+ * consistency between all partition strategies, let the
* expression transformation report any errors rather than
* doing it ourselves.
*/
@@ -3965,8 +3965,8 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
}
/*
- * Once we see MINVALUE or MAXVALUE for one column, the remaining
- * columns must be the same.
+ * Once we see MINVALUE or MAXVALUE for one column, the remaining columns
+ * must be the same.
*/
validateInfiniteBounds(pstate, result);
@@ -4030,13 +4030,13 @@ transformPartitionBoundValue(ParseState *pstate, Node *val,
/*
* Check that the input expression's collation is compatible with one
- * specified for the parent's partition key (partcollation). Don't
- * throw an error if it's the default collation which we'll replace with
- * the parent's collation anyway.
+ * specified for the parent's partition key (partcollation). Don't throw
+ * an error if it's the default collation which we'll replace with the
+ * parent's collation anyway.
*/
if (IsA(value, CollateExpr))
{
- Oid exprCollOid = exprCollation(value);
+ Oid exprCollOid = exprCollation(value);
if (OidIsValid(exprCollOid) &&
exprCollOid != DEFAULT_COLLATION_OID &&
diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c
index 4d6595b249..b207b765f2 100644
--- a/src/backend/partitioning/partdesc.c
+++ b/src/backend/partitioning/partdesc.c
@@ -38,7 +38,7 @@ typedef struct PartitionDirectoryData
{
MemoryContext pdir_mcxt;
HTAB *pdir_hash;
-} PartitionDirectoryData;
+} PartitionDirectoryData;
typedef struct PartitionDirectoryEntry
{
@@ -74,9 +74,9 @@ RelationBuildPartitionDesc(Relation rel)
/*
* Get partition oids from pg_inherits. This uses a single snapshot to
- * fetch the list of children, so while more children may be getting
- * added concurrently, whatever this function returns will be accurate
- * as of some well-defined point in time.
+ * fetch the list of children, so while more children may be getting added
+ * concurrently, whatever this function returns will be accurate as of
+ * some well-defined point in time.
*/
inhoids = find_inheritance_children(RelationGetRelid(rel), NoLock);
nparts = list_length(inhoids);
@@ -122,14 +122,14 @@ RelationBuildPartitionDesc(Relation rel)
*
* Note that this algorithm assumes that PartitionBoundSpec we manage
* to fetch is the right one -- so this is only good enough for
- * concurrent ATTACH PARTITION, not concurrent DETACH PARTITION
- * or some hypothetical operation that changes the partition bounds.
+ * concurrent ATTACH PARTITION, not concurrent DETACH PARTITION or
+ * some hypothetical operation that changes the partition bounds.
*/
if (boundspec == NULL)
{
Relation pg_class;
- SysScanDesc scan;
- ScanKeyData key[1];
+ SysScanDesc scan;
+ ScanKeyData key[1];
Datum datum;
bool isnull;
@@ -301,7 +301,7 @@ PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel)
void
DestroyPartitionDirectory(PartitionDirectory pdir)
{
- HASH_SEQ_STATUS status;
+ HASH_SEQ_STATUS status;
PartitionDirectoryEntry *pde;
hash_seq_init(&status, pdir->pdir_hash);
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index 765d58d120..b455c59cd7 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -57,7 +57,7 @@ static void libpqrcv_get_senderinfo(WalReceiverConn *conn,
char **sender_host, int *sender_port);
static char *libpqrcv_identify_system(WalReceiverConn *conn,
TimeLineID *primary_tli);
-static int libpqrcv_server_version(WalReceiverConn *conn);
+static int libpqrcv_server_version(WalReceiverConn *conn);
static void libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn,
TimeLineID tli, char **filename,
char **content, int *len);
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 65f86ad73d..acebf5893e 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -468,8 +468,8 @@ ReorderBufferReturnTupleBuf(ReorderBuffer *rb, ReorderBufferTupleBuf *tuple)
Oid *
ReorderBufferGetRelids(ReorderBuffer *rb, int nrelids)
{
- Oid *relids;
- Size alloc_len;
+ Oid *relids;
+ Size alloc_len;
alloc_len = sizeof(Oid) * nrelids;
@@ -1327,8 +1327,8 @@ ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
else
{
/*
- * Maybe we already saw this tuple before in this transaction,
- * but if so it must have the same cmin.
+ * Maybe we already saw this tuple before in this transaction, but
+ * if so it must have the same cmin.
*/
Assert(ent->cmin == change->data.tuplecid.cmin);
@@ -2464,8 +2464,8 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
}
case REORDER_BUFFER_CHANGE_TRUNCATE:
{
- Size size;
- char *data;
+ Size size;
+ char *data;
/* account for the OIDs of truncated relations */
size = sizeof(Oid) * change->data.truncate.nrelids;
@@ -2767,7 +2767,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
/* the base struct contains all the data, easy peasy */
case REORDER_BUFFER_CHANGE_TRUNCATE:
{
- Oid *relids;
+ Oid *relids;
relids = ReorderBufferGetRelids(rb,
change->data.truncate.nrelids);
diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c
index 182fe5bc82..808a6f5b83 100644
--- a/src/backend/replication/slotfuncs.c
+++ b/src/backend/replication/slotfuncs.c
@@ -730,11 +730,11 @@ copy_replication_slot(FunctionCallInfo fcinfo, bool logical_slot)
SpinLockRelease(&src->mutex);
/*
- * Check if the source slot still exists and is valid. We regard it
- * as invalid if the type of replication slot or name has been
- * changed, or the restart_lsn either is invalid or has gone backward.
- * (The restart_lsn could go backwards if the source slot is dropped
- * and copied from an older slot during installation.)
+ * Check if the source slot still exists and is valid. We regard it as
+ * invalid if the type of replication slot or name has been changed,
+ * or the restart_lsn either is invalid or has gone backward. (The
+ * restart_lsn could go backwards if the source slot is dropped and
+ * copied from an older slot during installation.)
*
* Since erroring out will release and drop the destination slot we
* don't need to release it here.
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index 6c160c13c6..83734575c2 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -276,9 +276,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
WAIT_EVENT_SYNC_REP);
/*
- * If the postmaster dies, we'll probably never get an
- * acknowledgment, because all the wal sender processes will exit. So
- * just bail out.
+ * If the postmaster dies, we'll probably never get an acknowledgment,
+ * because all the wal sender processes will exit. So just bail out.
*/
if (rc & WL_POSTMASTER_DEATH)
{
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index d52ec7b2cf..6abc780778 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -808,11 +808,11 @@ WalRcvQuickDieHandler(SIGNAL_ARGS)
* anyway.
*
* Note we use _exit(2) not _exit(0). This is to force the postmaster
- * into a system reset cycle if someone sends a manual SIGQUIT to a
- * random backend. This is necessary precisely because we don't clean up
- * our shared memory state. (The "dead man switch" mechanism in
- * pmsignal.c should ensure the postmaster sees this as a crash, too, but
- * no harm in being doubly sure.)
+ * into a system reset cycle if someone sends a manual SIGQUIT to a random
+ * backend. This is necessary precisely because we don't clean up our
+ * shared memory state. (The "dead man switch" mechanism in pmsignal.c
+ * should ensure the postmaster sees this as a crash, too, but no harm in
+ * being doubly sure.)
*/
_exit(2);
}
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 8aa12ec912..3f31368022 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -218,7 +218,7 @@ typedef struct
int write_head;
int read_heads[NUM_SYNC_REP_WAIT_MODE];
WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE];
-} LagTracker;
+} LagTracker;
static LagTracker *lag_tracker;
@@ -1407,7 +1407,7 @@ WalSndWaitForWal(XLogRecPtr loc)
sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH |
- WL_SOCKET_READABLE | WL_TIMEOUT;
+ WL_SOCKET_READABLE | WL_TIMEOUT;
if (pq_is_send_pending())
wakeEvents |= WL_SOCKET_WRITEABLE;
@@ -2255,7 +2255,7 @@ WalSndLoop(WalSndSendDataCallback send_data)
int wakeEvents;
wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT |
- WL_SOCKET_READABLE;
+ WL_SOCKET_READABLE;
/*
* Use fresh timestamp, not last_processed, to reduce the chance
diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c
index 0b26e4166d..8a71c2b534 100644
--- a/src/backend/statistics/dependencies.c
+++ b/src/backend/statistics/dependencies.c
@@ -279,8 +279,8 @@ dependency_degree(int numrows, HeapTuple *rows, int k, AttrNumber *dependency,
* build an array of SortItem(s) sorted using the multi-sort support
*
* XXX This relies on all stats entries pointing to the same tuple
- * descriptor. For now that assumption holds, but it might change in
- * the future for example if we support statistics on multiple tables.
+ * descriptor. For now that assumption holds, but it might change in the
+ * future for example if we support statistics on multiple tables.
*/
items = build_sorted_items(numrows, &nitems, rows, stats[0]->tupDesc,
mss, k, attnums_dep);
@@ -300,8 +300,8 @@ dependency_degree(int numrows, HeapTuple *rows, int k, AttrNumber *dependency,
{
/*
* Check if the group ended, which may be either because we processed
- * all the items (i==nitems), or because the i-th item is not equal
- * to the preceding one.
+ * all the items (i==nitems), or because the i-th item is not equal to
+ * the preceding one.
*/
if (i == nitems ||
multi_sort_compare_dims(0, k - 2, &items[i - 1], &items[i], mss) != 0)
diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c
index ac0ae52ecf..cc6112df3b 100644
--- a/src/backend/statistics/extended_stats.c
+++ b/src/backend/statistics/extended_stats.c
@@ -67,7 +67,7 @@ static VacAttrStats **lookup_var_attr_stats(Relation rel, Bitmapset *attrs,
int nvacatts, VacAttrStats **vacatts);
static void statext_store(Relation pg_stext, Oid relid,
MVNDistinct *ndistinct, MVDependencies *dependencies,
- MCVList * mcvlist, VacAttrStats **stats);
+ MCVList *mcvlist, VacAttrStats **stats);
/*
@@ -317,7 +317,7 @@ lookup_var_attr_stats(Relation rel, Bitmapset *attrs,
static void
statext_store(Relation pg_stext, Oid statOid,
MVNDistinct *ndistinct, MVDependencies *dependencies,
- MCVList * mcv, VacAttrStats **stats)
+ MCVList *mcv, VacAttrStats **stats)
{
HeapTuple stup,
oldtup;
@@ -538,9 +538,9 @@ build_attnums_array(Bitmapset *attrs, int *numattrs)
{
/*
* Make sure the bitmap contains only user-defined attributes. As
- * bitmaps can't contain negative values, this can be violated in
- * two ways. Firstly, the bitmap might contain 0 as a member, and
- * secondly the integer value might be larger than MaxAttrNumber.
+ * bitmaps can't contain negative values, this can be violated in two
+ * ways. Firstly, the bitmap might contain 0 as a member, and secondly
+ * the integer value might be larger than MaxAttrNumber.
*/
Assert(AttrNumberIsForUserDefinedAttr(j));
Assert(j <= MaxAttrNumber);
@@ -600,7 +600,7 @@ build_sorted_items(int numrows, int *nitems, HeapTuple *rows, TupleDesc tdesc,
idx = 0;
for (i = 0; i < numrows; i++)
{
- bool toowide = false;
+ bool toowide = false;
items[idx].values = &values[idx * numattrs];
items[idx].isnull = &isnull[idx * numattrs];
@@ -608,8 +608,8 @@ build_sorted_items(int numrows, int *nitems, HeapTuple *rows, TupleDesc tdesc,
/* load the values/null flags from sample rows */
for (j = 0; j < numattrs; j++)
{
- Datum value;
- bool isnull;
+ Datum value;
+ bool isnull;
value = heap_getattr(rows[i], attnums[j], tdesc, &isnull);
@@ -988,7 +988,7 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
int listidx;
StatisticExtInfo *stat;
List *stat_clauses;
- Selectivity simple_sel,
+ Selectivity simple_sel,
mcv_sel,
mcv_basesel,
mcv_totalsel,
@@ -1006,9 +1006,9 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
* Pre-process the clauses list to extract the attnums seen in each item.
* We need to determine if there's any clauses which will be useful for
* selectivity estimations with extended stats. Along the way we'll record
- * all of the attnums for each clause in a list which we'll reference later
- * so we don't need to repeat the same work again. We'll also keep track of
- * all attnums seen.
+ * all of the attnums for each clause in a list which we'll reference
+ * later so we don't need to repeat the same work again. We'll also keep
+ * track of all attnums seen.
*
* We also skip clauses that we already estimated using different types of
* statistics (we treat them as incompatible).
@@ -1066,9 +1066,10 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
}
/*
- * First compute "simple" selectivity, i.e. without the extended statistics,
- * and essentially assuming independence of the columns/clauses. We'll then
- * use the various selectivities computed from MCV list to improve it.
+ * First compute "simple" selectivity, i.e. without the extended
+ * statistics, and essentially assuming independence of the
+ * columns/clauses. We'll then use the various selectivities computed from
+ * MCV list to improve it.
*/
simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid,
jointype, sjinfo, NULL);
@@ -1105,16 +1106,16 @@ statext_clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid,
JoinType jointype, SpecialJoinInfo *sjinfo,
RelOptInfo *rel, Bitmapset **estimatedclauses)
{
- Selectivity sel;
+ Selectivity sel;
/* First, try estimating clauses using a multivariate MCV list. */
sel = statext_mcv_clauselist_selectivity(root, clauses, varRelid, jointype,
sjinfo, rel, estimatedclauses);
/*
- * Then, apply functional dependencies on the remaining clauses by
- * calling dependencies_clauselist_selectivity. Pass 'estimatedclauses'
- * so the function can properly skip clauses already estimated above.
+ * Then, apply functional dependencies on the remaining clauses by calling
+ * dependencies_clauselist_selectivity. Pass 'estimatedclauses' so the
+ * function can properly skip clauses already estimated above.
*
* The reasoning for applying dependencies last is that the more complex
* stats can track more complex correlations between the attributes, and
diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c
index 05ab6c9bb7..d22820dec7 100644
--- a/src/backend/statistics/mcv.c
+++ b/src/backend/statistics/mcv.c
@@ -209,20 +209,20 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
*
* Using the same algorithm might exclude items that are close to the
* "average" frequency of the sample. But that does not say whether the
- * observed frequency is close to the base frequency or not. We also
- * need to consider unexpectedly uncommon items (again, compared to the
- * base frequency), and the single-column algorithm does not have to.
+ * observed frequency is close to the base frequency or not. We also need
+ * to consider unexpectedly uncommon items (again, compared to the base
+ * frequency), and the single-column algorithm does not have to.
*
* We simply decide how many items to keep by computing minimum count
- * using get_mincount_for_mcv_list() and then keep all items that seem
- * to be more common than that.
+ * using get_mincount_for_mcv_list() and then keep all items that seem to
+ * be more common than that.
*/
mincount = get_mincount_for_mcv_list(numrows, totalrows);
/*
- * Walk the groups until we find the first group with a count below
- * the mincount threshold (the index of that group is the number of
- * groups we want to keep).
+ * Walk the groups until we find the first group with a count below the
+ * mincount threshold (the index of that group is the number of groups we
+ * want to keep).
*/
for (i = 0; i < nitems; i++)
{
@@ -240,7 +240,7 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
*/
if (nitems > 0)
{
- int j;
+ int j;
/*
* Allocate the MCV list structure, set the global parameters.
@@ -485,7 +485,7 @@ statext_mcv_load(Oid mvoid)
* (or a longer type) instead of using an array of bool items.
*/
bytea *
-statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
+statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
{
int i;
int dim;
@@ -603,7 +603,7 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
info[dim].nbytes = 0;
for (i = 0; i < info[dim].nvalues; i++)
{
- Size len;
+ Size len;
values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i]));
@@ -616,7 +616,7 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
info[dim].nbytes = 0;
for (i = 0; i < info[dim].nvalues; i++)
{
- Size len;
+ Size len;
/* c-strings include terminator, so +1 byte */
values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i]));
@@ -636,11 +636,11 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
* for each attribute, deduplicated values and items).
*
* The header fields are copied one by one, so that we don't need any
- * explicit alignment (we copy them while deserializing). All fields
- * after this need to be properly aligned, for direct access.
+ * explicit alignment (we copy them while deserializing). All fields after
+ * this need to be properly aligned, for direct access.
*/
total_length = MAXALIGN(VARHDRSZ + (3 * sizeof(uint32))
- + sizeof(AttrNumber) + (ndims * sizeof(Oid)));
+ + sizeof(AttrNumber) + (ndims * sizeof(Oid)));
/* dimension info */
total_length += MAXALIGN(ndims * sizeof(DimensionInfo));
@@ -650,14 +650,14 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
total_length += MAXALIGN(info[i].nbytes);
/*
- * And finally the items (no additional alignment needed, we start
- * at proper alignment and the itemsize formula uses MAXALIGN)
+ * And finally the items (no additional alignment needed, we start at
+ * proper alignment and the itemsize formula uses MAXALIGN)
*/
total_length += mcvlist->nitems * itemsize;
/*
- * Allocate space for the whole serialized MCV list (we'll skip bytes,
- * so we set them to zero to make the result more compressible).
+ * Allocate space for the whole serialized MCV list (we'll skip bytes, so
+ * we set them to zero to make the result more compressible).
*/
raw = palloc0(total_length);
SET_VARSIZE(raw, total_length);
@@ -1189,8 +1189,8 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS)
HeapTuple tuple;
Datum result;
- StringInfoData itemValues;
- StringInfoData itemNulls;
+ StringInfoData itemValues;
+ StringInfoData itemNulls;
int i;
@@ -1213,9 +1213,9 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS)
*/
values = (char **) palloc0(5 * sizeof(char *));
- values[0] = (char *) palloc(64 * sizeof(char)); /* item index */
- values[3] = (char *) palloc(64 * sizeof(char)); /* frequency */
- values[4] = (char *) palloc(64 * sizeof(char)); /* base frequency */
+ values[0] = (char *) palloc(64 * sizeof(char)); /* item index */
+ values[3] = (char *) palloc(64 * sizeof(char)); /* frequency */
+ values[4] = (char *) palloc(64 * sizeof(char)); /* base frequency */
outfuncs = (Oid *) palloc0(sizeof(Oid) * mcvlist->ndimensions);
fmgrinfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * mcvlist->ndimensions);
@@ -1376,7 +1376,7 @@ pg_mcv_list_send(PG_FUNCTION_ARGS)
*/
static bool *
mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
- Bitmapset *keys, MCVList * mcvlist, bool is_or)
+ Bitmapset *keys, MCVList *mcvlist, bool is_or)
{
int i;
ListCell *l;
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 33d7941a40..bee79d84dc 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -2813,12 +2813,12 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
case RELKIND_MATVIEW:
{
/*
- * Not every table AM uses BLCKSZ wide fixed size
- * blocks. Therefore tableam returns the size in bytes - but
- * for the purpose of this routine, we want the number of
- * blocks. Therefore divide, rounding up.
+ * Not every table AM uses BLCKSZ wide fixed size blocks.
+ * Therefore tableam returns the size in bytes - but for the
+ * purpose of this routine, we want the number of blocks.
+ * Therefore divide, rounding up.
*/
- uint64 szbytes;
+ uint64 szbytes;
szbytes = table_relation_size(relation, forkNum);
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index fdac9850e0..ffae52089f 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -1731,7 +1731,7 @@ FileClose(File file)
* see LruDelete.
*/
elog(vfdP->fdstate & FD_TEMP_FILE_LIMIT ? LOG : data_sync_elevel(LOG),
- "could not close file \"%s\": %m", vfdP->fileName);
+ "could not close file \"%s\": %m", vfdP->fileName);
}
--nfile;
diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c
index e0712f906a..bff254c2b2 100644
--- a/src/backend/storage/ipc/latch.c
+++ b/src/backend/storage/ipc/latch.c
@@ -856,7 +856,7 @@ WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
if (rc < 0)
ereport(ERROR,
(errcode_for_socket_access(),
- /* translator: %s is a syscall name, such as "poll()" */
+ /* translator: %s is a syscall name, such as "poll()" */
errmsg("%s failed: %m",
"epoll_ctl()")));
}
@@ -1089,7 +1089,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
waiting = false;
ereport(ERROR,
(errcode_for_socket_access(),
- /* translator: %s is a syscall name, such as "poll()" */
+ /* translator: %s is a syscall name, such as "poll()" */
errmsg("%s failed: %m",
"epoll_wait()")));
}
@@ -1215,7 +1215,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
waiting = false;
ereport(ERROR,
(errcode_for_socket_access(),
- /* translator: %s is a syscall name, such as "poll()" */
+ /* translator: %s is a syscall name, such as "poll()" */
errmsg("%s failed: %m",
"poll()")));
}
diff --git a/src/backend/storage/ipc/pmsignal.c b/src/backend/storage/ipc/pmsignal.c
index 48f4311464..86acec09f3 100644
--- a/src/backend/storage/ipc/pmsignal.c
+++ b/src/backend/storage/ipc/pmsignal.c
@@ -370,7 +370,7 @@ void
PostmasterDeathSignalInit(void)
{
#ifdef USE_POSTMASTER_DEATH_SIGNAL
- int signum = POSTMASTER_DEATH_SIGNAL;
+ int signum = POSTMASTER_DEATH_SIGNAL;
/* Register our signal handler. */
pqsignal(signum, postmaster_death_handler);
diff --git a/src/backend/storage/ipc/signalfuncs.c b/src/backend/storage/ipc/signalfuncs.c
index 4bfbd57464..ade8d713aa 100644
--- a/src/backend/storage/ipc/signalfuncs.c
+++ b/src/backend/storage/ipc/signalfuncs.c
@@ -181,7 +181,7 @@ pg_rotate_logfile(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to rotate log files with adminpack 1.0"),
- /* translator: %s is a SQL function name */
+ /* translator: %s is a SQL function name */
errhint("Consider using %s, which is part of core, instead.",
"pg_logfile_rotate()"))));
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index 106d227a5a..f838b0f758 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -906,7 +906,7 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode, bool progress)
*/
if (progress)
{
- PGPROC *holder = BackendIdGetProc(lockholders->backendId);
+ PGPROC *holder = BackendIdGetProc(lockholders->backendId);
pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID,
holder->pid);
@@ -925,9 +925,10 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode, bool progress)
PROGRESS_WAITFOR_DONE,
PROGRESS_WAITFOR_CURRENT_PID
};
- const int64 values[] = {
+ const int64 values[] = {
0, 0, 0
};
+
pgstat_progress_update_multi_param(3, index, values);
}
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 8191118b61..dba8c397fe 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -91,7 +91,7 @@ static const int NSmgr = lengthof(smgrsw);
*/
static HTAB *SMgrRelationHash = NULL;
-static dlist_head unowned_relns;
+static dlist_head unowned_relns;
/* local function prototypes */
static void smgrshutdown(int code, Datum arg);
@@ -713,7 +713,7 @@ smgrimmedsync(SMgrRelation reln, ForkNumber forknum)
void
AtEOXact_SMgr(void)
{
- dlist_mutable_iter iter;
+ dlist_mutable_iter iter;
/*
* Zap all unowned SMgrRelations. We rely on smgrclose() to remove each
@@ -721,8 +721,8 @@ AtEOXact_SMgr(void)
*/
dlist_foreach_modify(iter, &unowned_relns)
{
- SMgrRelation rel = dlist_container(SMgrRelationData, node,
- iter.cur);
+ SMgrRelation rel = dlist_container(SMgrRelationData, node,
+ iter.cur);
Assert(rel->smgr_owner == NULL);
diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c
index 096735c807..705f229b27 100644
--- a/src/backend/storage/sync/sync.c
+++ b/src/backend/storage/sync/sync.c
@@ -548,8 +548,8 @@ RegisterSyncRequest(const FileTag *ftag, SyncRequestType type,
for (;;)
{
/*
- * Notify the checkpointer about it. If we fail to queue a message
- * in retryOnError mode, we have to sleep and try again ... ugly, but
+ * Notify the checkpointer about it. If we fail to queue a message in
+ * retryOnError mode, we have to sleep and try again ... ugly, but
* hopefully won't happen often.
*
* XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an
diff --git a/src/backend/tcop/dest.c b/src/backend/tcop/dest.c
index ee9e349a5b..7bb81df970 100644
--- a/src/backend/tcop/dest.c
+++ b/src/backend/tcop/dest.c
@@ -113,8 +113,8 @@ DestReceiver *
CreateDestReceiver(CommandDest dest)
{
/*
- * It's ok to cast the constness away as any modification of the none receiver
- * would be a bug (which gets easier to catch this way).
+ * It's ok to cast the constness away as any modification of the none
+ * receiver would be a bug (which gets easier to catch this way).
*/
switch (dest)
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 69a691f18e..3a6a878ffa 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -3023,6 +3023,7 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
int len,
value;
bool fx_mode = false;
+
/* number of extra skipped characters (more than given in format string) */
int extra_skip = 0;
@@ -3049,8 +3050,8 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
/*
* In non FX (fixed format) mode one format string space or
* separator match to one space or separator in input string.
- * Or match nothing if there is no space or separator in
- * the current position of input string.
+ * Or match nothing if there is no space or separator in the
+ * current position of input string.
*/
extra_skip--;
if (isspace((unsigned char) *s) || is_separator_char(s))
@@ -3176,11 +3177,13 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
n->key->name)));
break;
case DCH_TZH:
+
/*
* Value of TZH might be negative. And the issue is that we
* might swallow minus sign as the separator. So, if we have
- * skipped more characters than specified in the format string,
- * then we consider prepending last skipped minus to TZH.
+ * skipped more characters than specified in the format
+ * string, then we consider prepending last skipped minus to
+ * TZH.
*/
if (*s == '+' || *s == '-' || *s == ' ')
{
diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c
index a3c6adaf64..f526106530 100644
--- a/src/backend/utils/adt/genfile.c
+++ b/src/backend/utils/adt/genfile.c
@@ -219,7 +219,7 @@ pg_read_file(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to read files with adminpack 1.0"),
- /* translator: %s is a SQL function name */
+ /* translator: %s is a SQL function name */
errhint("Consider using %s, which is part of core, instead.",
"pg_file_read()"))));
diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c
index 28e85e397e..f2be614310 100644
--- a/src/backend/utils/adt/geo_ops.c
+++ b/src/backend/utils/adt/geo_ops.c
@@ -88,7 +88,7 @@ static int point_inside(Point *p, int npts, Point *plist);
static inline void line_construct(LINE *result, Point *pt, float8 m);
static inline float8 line_sl(LINE *line);
static inline float8 line_invsl(LINE *line);
-static bool line_interpt_line(Point *result, LINE *l1, LINE *l2);
+static bool line_interpt_line(Point *result, LINE *l1, LINE *l2);
static bool line_contain_point(LINE *line, Point *point);
static float8 line_closept_point(Point *result, LINE *line, Point *pt);
@@ -96,10 +96,10 @@ static float8 line_closept_point(Point *result, LINE *line, Point *pt);
static inline void statlseg_construct(LSEG *lseg, Point *pt1, Point *pt2);
static inline float8 lseg_sl(LSEG *lseg);
static inline float8 lseg_invsl(LSEG *lseg);
-static bool lseg_interpt_line(Point *result, LSEG *lseg, LINE *line);
-static bool lseg_interpt_lseg(Point *result, LSEG *l1, LSEG *l2);
+static bool lseg_interpt_line(Point *result, LSEG *lseg, LINE *line);
+static bool lseg_interpt_lseg(Point *result, LSEG *l1, LSEG *l2);
static int lseg_crossing(float8 x, float8 y, float8 px, float8 py);
-static bool lseg_contain_point(LSEG *lseg, Point *point);
+static bool lseg_contain_point(LSEG *lseg, Point *point);
static float8 lseg_closept_point(Point *result, LSEG *lseg, Point *pt);
static float8 lseg_closept_line(Point *result, LSEG *lseg, LINE *line);
static float8 lseg_closept_lseg(Point *result, LSEG *on_lseg, LSEG *to_lseg);
@@ -692,9 +692,9 @@ static bool
box_contain_box(BOX *contains_box, BOX *contained_box)
{
return FPge(contains_box->high.x, contained_box->high.x) &&
- FPle(contains_box->low.x, contained_box->low.x) &&
- FPge(contains_box->high.y, contained_box->high.y) &&
- FPle(contains_box->low.y, contained_box->low.y);
+ FPle(contains_box->low.x, contained_box->low.x) &&
+ FPge(contains_box->high.y, contained_box->high.y) &&
+ FPle(contains_box->low.y, contained_box->low.y);
}
@@ -2378,8 +2378,8 @@ dist_ppath(PG_FUNCTION_ARGS)
Assert(path->npts > 0);
/*
- * The distance from a point to a path is the smallest distance
- * from the point to any of its constituent segments.
+ * The distance from a point to a path is the smallest distance from the
+ * point to any of its constituent segments.
*/
for (i = 0; i < path->npts; i++)
{
@@ -2553,9 +2553,9 @@ lseg_interpt_line(Point *result, LSEG *lseg, LINE *line)
LINE tmp;
/*
- * First, we promote the line segment to a line, because we know how
- * to find the intersection point of two lines. If they don't have
- * an intersection point, we are done.
+ * First, we promote the line segment to a line, because we know how to
+ * find the intersection point of two lines. If they don't have an
+ * intersection point, we are done.
*/
line_construct(&tmp, &lseg->p[0], lseg_sl(lseg));
if (!line_interpt_line(&interpt, &tmp, line))
@@ -2602,8 +2602,8 @@ line_closept_point(Point *result, LINE *line, Point *point)
LINE tmp;
/*
- * We drop a perpendicular to find the intersection point. Ordinarily
- * we should always find it, but that can fail in the presence of NaN
+ * We drop a perpendicular to find the intersection point. Ordinarily we
+ * should always find it, but that can fail in the presence of NaN
* coordinates, and perhaps even from simple roundoff issues.
*/
line_construct(&tmp, point, line_invsl(line));
@@ -2693,8 +2693,8 @@ lseg_closept_lseg(Point *result, LSEG *on_lseg, LSEG *to_lseg)
return 0.0;
/*
- * Then, we find the closest points from the endpoints of the second
- * line segment, and keep the closest one.
+ * Then, we find the closest points from the endpoints of the second line
+ * segment, and keep the closest one.
*/
dist = lseg_closept_point(result, on_lseg, &to_lseg->p[0]);
d = lseg_closept_point(&point, on_lseg, &to_lseg->p[1]);
@@ -3063,7 +3063,7 @@ static bool
box_contain_point(BOX *box, Point *point)
{
return box->high.x >= point->x && box->low.x <= point->x &&
- box->high.y >= point->y && box->low.y <= point-> y;
+ box->high.y >= point->y && box->low.y <= point->y;
}
Datum
@@ -3150,7 +3150,7 @@ static bool
box_contain_lseg(BOX *box, LSEG *lseg)
{
return box_contain_point(box, &lseg->p[0]) &&
- box_contain_point(box, &lseg->p[1]);
+ box_contain_point(box, &lseg->p[1]);
}
Datum
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index bb4bac85f7..a9784d067c 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -207,7 +207,7 @@ IsValidJsonNumber(const char *str, int len)
*/
if (*str == '-')
{
- dummy_lex.input = unconstify(char *, str) + 1;
+ dummy_lex.input = unconstify(char *, str) +1;
dummy_lex.input_length = len - 1;
}
else
@@ -2192,7 +2192,7 @@ json_build_object(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("argument list must have even number of elements"),
- /* translator: %s is a SQL function name */
+ /* translator: %s is a SQL function name */
errhint("The arguments of %s must consist of alternating keys and values.",
"json_build_object()")));
diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c
index 036d771386..c742172bd8 100644
--- a/src/backend/utils/adt/jsonb.c
+++ b/src/backend/utils/adt/jsonb.c
@@ -1155,7 +1155,7 @@ jsonb_build_object(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("argument list must have even number of elements"),
- /* translator: %s is a SQL function name */
+ /* translator: %s is a SQL function name */
errhint("The arguments of %s must consist of alternating keys and values.",
"jsonb_build_object()")));
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 704e5720cf..f4dfc504d6 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -152,7 +152,7 @@ GenericMatchText(const char *s, int slen, const char *p, int plen, Oid collation
{
if (collation && !lc_ctype_is_c(collation) && collation != DEFAULT_COLLATION_OID)
{
- pg_locale_t locale = pg_newlocale_from_collation(collation);
+ pg_locale_t locale = pg_newlocale_from_collation(collation);
if (locale && !locale->deterministic)
ereport(ERROR,
diff --git a/src/backend/utils/adt/like_support.c b/src/backend/utils/adt/like_support.c
index 7528c80f7c..e2583bc680 100644
--- a/src/backend/utils/adt/like_support.c
+++ b/src/backend/utils/adt/like_support.c
@@ -262,9 +262,9 @@ match_pattern_prefix(Node *leftop,
* optimized equality or prefix tests use bytewise comparisons, which is
* not consistent with nondeterministic collations. The actual
* pattern-matching implementation functions will later error out that
- * pattern-matching is not supported with nondeterministic collations.
- * (We could also error out here, but by doing it later we get more
- * precise error messages.) (It should be possible to support at least
+ * pattern-matching is not supported with nondeterministic collations. (We
+ * could also error out here, but by doing it later we get more precise
+ * error messages.) (It should be possible to support at least
* Pattern_Prefix_Exact, but no point as along as the actual
* pattern-matching implementations don't support it.)
*
diff --git a/src/backend/utils/adt/numutils.c b/src/backend/utils/adt/numutils.c
index c7df630c3c..70138feb29 100644
--- a/src/backend/utils/adt/numutils.c
+++ b/src/backend/utils/adt/numutils.c
@@ -182,7 +182,7 @@ invalid_syntax:
errmsg("invalid input syntax for type %s: \"%s\"",
"smallint", s)));
- return 0; /* keep compiler quiet */
+ return 0; /* keep compiler quiet */
}
/*
@@ -258,7 +258,7 @@ invalid_syntax:
errmsg("invalid input syntax for type %s: \"%s\"",
"integer", s)));
- return 0; /* keep compiler quiet */
+ return 0; /* keep compiler quiet */
}
/*
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index c5be472bce..00a9a33ecc 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -1101,8 +1101,8 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
/* enlarge output space if needed */
while (array_idx + matchctx->npatterns * 2 + 1 > array_len)
{
- array_len += array_len + 1; /* 2^n-1 => 2^(n+1)-1 */
- if (array_len > MaxAllocSize/sizeof(int))
+ array_len += array_len + 1; /* 2^n-1 => 2^(n+1)-1 */
+ if (array_len > MaxAllocSize / sizeof(int))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("too many regular expression matches")));
@@ -1117,8 +1117,9 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
for (i = 1; i <= matchctx->npatterns; i++)
{
- int so = pmatch[i].rm_so;
- int eo = pmatch[i].rm_eo;
+ int so = pmatch[i].rm_so;
+ int eo = pmatch[i].rm_eo;
+
matchctx->match_locs[array_idx++] = so;
matchctx->match_locs[array_idx++] = eo;
if (so >= 0 && eo >= 0 && (eo - so) > maxlen)
@@ -1127,8 +1128,9 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
}
else
{
- int so = pmatch[0].rm_so;
- int eo = pmatch[0].rm_eo;
+ int so = pmatch[0].rm_so;
+ int eo = pmatch[0].rm_eo;
+
matchctx->match_locs[array_idx++] = so;
matchctx->match_locs[array_idx++] = eo;
if (so >= 0 && eo >= 0 && (eo - so) > maxlen)
@@ -1190,10 +1192,10 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
* interest.
*
* Worst case: assume we need the maximum size (maxlen*eml), but take
- * advantage of the fact that the original string length in bytes is an
- * upper bound on the byte length of any fetched substring (and we know
- * that len+1 is safe to allocate because the varlena header is longer
- * than 1 byte).
+ * advantage of the fact that the original string length in bytes is
+ * an upper bound on the byte length of any fetched substring (and we
+ * know that len+1 is safe to allocate because the varlena header is
+ * longer than 1 byte).
*/
if (maxsiz > orig_len)
conv_bufsiz = orig_len + 1;
@@ -1248,9 +1250,10 @@ build_regexp_match_result(regexp_matches_ctx *matchctx)
}
else if (buf)
{
- int len = pg_wchar2mb_with_len(matchctx->wide_str + so,
- buf,
- eo - so);
+ int len = pg_wchar2mb_with_len(matchctx->wide_str + so,
+ buf,
+ eo - so);
+
Assert(len < bufsiz);
elems[i] = PointerGetDatum(cstring_to_text_with_len(buf, len));
nulls[i] = false;
@@ -1409,15 +1412,15 @@ build_regexp_split_result(regexp_matches_ctx *splitctx)
if (buf)
{
- int bufsiz PG_USED_FOR_ASSERTS_ONLY = splitctx->conv_bufsiz;
- int len;
+ int bufsiz PG_USED_FOR_ASSERTS_ONLY = splitctx->conv_bufsiz;
+ int len;
endpos = splitctx->match_locs[splitctx->next_match * 2];
if (endpos < startpos)
elog(ERROR, "invalid match starting position");
len = pg_wchar2mb_with_len(splitctx->wide_str + startpos,
buf,
- endpos-startpos);
+ endpos - startpos);
Assert(len < bufsiz);
return PointerGetDatum(cstring_to_text_with_len(buf, len));
}
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 095334b336..b9e0f5c048 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -635,10 +635,10 @@ ri_restrict(TriggerData *trigdata, bool is_no_action)
oldslot = trigdata->tg_trigslot;
/*
- * If another PK row now exists providing the old key values, we
- * should not do anything. However, this check should only be
- * made in the NO ACTION case; in RESTRICT cases we don't wish to
- * allow another row to be substituted.
+ * If another PK row now exists providing the old key values, we should
+ * not do anything. However, this check should only be made in the NO
+ * ACTION case; in RESTRICT cases we don't wish to allow another row to be
+ * substituted.
*/
if (is_no_action &&
ri_Check_Pk_Match(pk_rel, fk_rel, oldslot, riinfo))
@@ -651,8 +651,8 @@ ri_restrict(TriggerData *trigdata, bool is_no_action)
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the restrict lookup (it's the
- * same query for delete and update cases)
+ * Fetch or prepare a saved plan for the restrict lookup (it's the same
+ * query for delete and update cases)
*/
ri_BuildQueryKey(&qkey, riinfo, RI_PLAN_RESTRICT_CHECKREF);
@@ -713,7 +713,7 @@ ri_restrict(TriggerData *trigdata, bool is_no_action)
ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
oldslot, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_SELECT);
if (SPI_finish() != SPI_OK_FINISH)
@@ -813,13 +813,13 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Build up the arguments from the key values
- * in the deleted PK tuple and delete the referencing rows
+ * We have a plan now. Build up the arguments from the key values in the
+ * deleted PK tuple and delete the referencing rows
*/
ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
oldslot, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_DELETE);
if (SPI_finish() != SPI_OK_FINISH)
@@ -940,7 +940,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
oldslot, newslot,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_UPDATE);
if (SPI_finish() != SPI_OK_FINISH)
@@ -1119,7 +1119,7 @@ ri_set(TriggerData *trigdata, bool is_set_null)
ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
oldslot, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_UPDATE);
if (SPI_finish() != SPI_OK_FINISH)
@@ -1132,18 +1132,17 @@ ri_set(TriggerData *trigdata, bool is_set_null)
else
{
/*
- * If we just deleted or updated the PK row whose key was equal to
- * the FK columns' default values, and a referencing row exists in
- * the FK table, we would have updated that row to the same values
- * it already had --- and RI_FKey_fk_upd_check_required would
- * hence believe no check is necessary. So we need to do another
- * lookup now and in case a reference still exists, abort the
- * operation. That is already implemented in the NO ACTION
- * trigger, so just run it. (This recheck is only needed in the
- * SET DEFAULT case, since CASCADE would remove such rows in case
- * of a DELETE operation or would change the FK key values in case
- * of an UPDATE, while SET NULL is certain to result in rows that
- * satisfy the FK constraint.)
+ * If we just deleted or updated the PK row whose key was equal to the
+ * FK columns' default values, and a referencing row exists in the FK
+ * table, we would have updated that row to the same values it already
+ * had --- and RI_FKey_fk_upd_check_required would hence believe no
+ * check is necessary. So we need to do another lookup now and in
+ * case a reference still exists, abort the operation. That is
+ * already implemented in the NO ACTION trigger, so just run it. (This
+ * recheck is only needed in the SET DEFAULT case, since CASCADE would
+ * remove such rows in case of a DELETE operation or would change the
+ * FK key values in case of an UPDATE, while SET NULL is certain to
+ * result in rows that satisfy the FK constraint.)
*/
return ri_restrict(trigdata, true);
}
@@ -1170,8 +1169,8 @@ RI_FKey_pk_upd_check_required(Trigger *trigger, Relation pk_rel,
riinfo = ri_FetchConstraintInfo(trigger, pk_rel, true);
/*
- * If any old key value is NULL, the row could not have been
- * referenced by an FK row, so no check is needed.
+ * If any old key value is NULL, the row could not have been referenced by
+ * an FK row, so no check is needed.
*/
if (ri_NullCheck(RelationGetDescr(pk_rel), oldslot, riinfo, true) != RI_KEYS_NONE_NULL)
return false;
@@ -1213,14 +1212,17 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
*/
if (ri_nullcheck == RI_KEYS_ALL_NULL)
return false;
+
/*
- * If some new key values are NULL, the behavior depends on the match type.
+ * If some new key values are NULL, the behavior depends on the match
+ * type.
*/
else if (ri_nullcheck == RI_KEYS_SOME_NULL)
{
switch (riinfo->confmatchtype)
{
case FKCONSTR_MATCH_SIMPLE:
+
/*
* If any new key value is NULL, the row must satisfy the
* constraint, so no check is needed.
@@ -1228,12 +1230,14 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
return false;
case FKCONSTR_MATCH_PARTIAL:
+
/*
* Don't know, must run full check.
*/
break;
case FKCONSTR_MATCH_FULL:
+
/*
* If some new key values are NULL, the row fails the
* constraint. We must not throw error here, because the row
@@ -1251,12 +1255,12 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
*/
/*
- * If the original row was inserted by our own transaction, we
- * must fire the trigger whether or not the keys are equal. This
- * is because our UPDATE will invalidate the INSERT so that the
- * INSERT RI trigger will not do anything; so we had better do the
- * UPDATE check. (We could skip this if we knew the INSERT
- * trigger already fired, but there is no easy way to know that.)
+ * If the original row was inserted by our own transaction, we must fire
+ * the trigger whether or not the keys are equal. This is because our
+ * UPDATE will invalidate the INSERT so that the INSERT RI trigger will
+ * not do anything; so we had better do the UPDATE check. (We could skip
+ * this if we knew the INSERT trigger already fired, but there is no easy
+ * way to know that.)
*/
xminDatum = slot_getsysattr(oldslot, MinTransactionIdAttributeNumber, &isnull);
Assert(!isnull);
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 1e3bcb47b8..f911511158 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -1566,7 +1566,7 @@ pg_get_statisticsobj_worker(Oid statextid, bool missing_ok)
*/
if (!ndistinct_enabled || !dependencies_enabled || !mcv_enabled)
{
- bool gotone = false;
+ bool gotone = false;
appendStringInfoString(&buf, " (");
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 4003631d8f..332dc860c4 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -755,8 +755,8 @@ bpchareq(PG_FUNCTION_ARGS)
pg_newlocale_from_collation(collid)->deterministic)
{
/*
- * Since we only care about equality or not-equality, we can avoid all the
- * expense of strcoll() here, and just do bitwise comparison.
+ * Since we only care about equality or not-equality, we can avoid all
+ * the expense of strcoll() here, and just do bitwise comparison.
*/
if (len1 != len2)
result = false;
@@ -793,8 +793,8 @@ bpcharne(PG_FUNCTION_ARGS)
pg_newlocale_from_collation(collid)->deterministic)
{
/*
- * Since we only care about equality or not-equality, we can avoid all the
- * expense of strcoll() here, and just do bitwise comparison.
+ * Since we only care about equality or not-equality, we can avoid all
+ * the expense of strcoll() here, and just do bitwise comparison.
*/
if (len1 != len2)
result = true;
@@ -983,7 +983,7 @@ hashbpchar(PG_FUNCTION_ARGS)
Oid collid = PG_GET_COLLATION();
char *keydata;
int keylen;
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
Datum result;
if (!collid)
@@ -1010,7 +1010,7 @@ hashbpchar(PG_FUNCTION_ARGS)
int32_t ulen = -1;
UChar *uchar = NULL;
Size bsize;
- uint8_t *buf;
+ uint8_t *buf;
ulen = icu_to_uchar(&uchar, keydata, keylen);
@@ -1043,7 +1043,7 @@ hashbpcharextended(PG_FUNCTION_ARGS)
Oid collid = PG_GET_COLLATION();
char *keydata;
int keylen;
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
Datum result;
if (!collid)
@@ -1071,7 +1071,7 @@ hashbpcharextended(PG_FUNCTION_ARGS)
int32_t ulen = -1;
UChar *uchar = NULL;
Size bsize;
- uint8_t *buf;
+ uint8_t *buf;
ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index f82ce92ce3..e166effa5e 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -1152,7 +1152,7 @@ text_position_setup(text *t1, text *t2, Oid collid, TextPositionState *state)
{
int len1 = VARSIZE_ANY_EXHDR(t1);
int len2 = VARSIZE_ANY_EXHDR(t2);
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
check_collation_set(collid);
@@ -1723,11 +1723,11 @@ texteq(PG_FUNCTION_ARGS)
len2;
/*
- * Since we only care about equality or not-equality, we can avoid all the
- * expense of strcoll() here, and just do bitwise comparison. In fact, we
- * don't even have to do a bitwise comparison if we can show the lengths
- * of the strings are unequal; which might save us from having to detoast
- * one or both values.
+ * Since we only care about equality or not-equality, we can avoid all
+ * the expense of strcoll() here, and just do bitwise comparison. In
+ * fact, we don't even have to do a bitwise comparison if we can show
+ * the lengths of the strings are unequal; which might save us from
+ * having to detoast one or both values.
*/
len1 = toast_raw_datum_size(arg1);
len2 = toast_raw_datum_size(arg2);
@@ -1873,7 +1873,7 @@ text_starts_with(PG_FUNCTION_ARGS)
Datum arg1 = PG_GETARG_DATUM(0);
Datum arg2 = PG_GETARG_DATUM(1);
Oid collid = PG_GET_COLLATION();
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
bool result;
Size len1,
len2;
@@ -5346,7 +5346,7 @@ text_concat_ws(PG_FUNCTION_ARGS)
Datum
text_left(PG_FUNCTION_ARGS)
{
- int n = PG_GETARG_INT32(1);
+ int n = PG_GETARG_INT32(1);
if (n < 0)
{
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index d0f6f715e6..969884d485 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2635,9 +2635,9 @@ RelationClearRelation(Relation relation, bool rebuild)
* there should be no PartitionDirectory with a pointer to the old
* entry.
*
- * Note that newrel and relation have already been swapped, so
- * the "old" partition descriptor is actually the one hanging off
- * of newrel.
+ * Note that newrel and relation have already been swapped, so the
+ * "old" partition descriptor is actually the one hanging off of
+ * newrel.
*/
MemoryContextSetParent(newrel->rd_pdcxt, relation->rd_pdcxt);
newrel->rd_partdesc = NULL;
diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c
index f870a07d2a..7ad0aa0b94 100644
--- a/src/backend/utils/cache/relmapper.c
+++ b/src/backend/utils/cache/relmapper.c
@@ -656,7 +656,7 @@ EstimateRelationMapSpace(void)
void
SerializeRelationMap(Size maxSize, char *startAddress)
{
- SerializedActiveRelMaps *relmaps;
+ SerializedActiveRelMaps *relmaps;
Assert(maxSize >= EstimateRelationMapSpace());
@@ -673,7 +673,7 @@ SerializeRelationMap(Size maxSize, char *startAddress)
void
RestoreRelationMap(char *startAddress)
{
- SerializedActiveRelMaps *relmaps;
+ SerializedActiveRelMaps *relmaps;
if (active_shared_updates.num_mappings != 0 ||
active_local_updates.num_mappings != 0 ||
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index ac98c19155..476538354d 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -1235,7 +1235,7 @@ GetSysCacheOid(int cacheId,
result = heap_getattr(tuple, oidcol,
SysCache[cacheId]->cc_tupdesc,
&isNull);
- Assert(!isNull); /* columns used as oids should never be NULL */
+ Assert(!isNull); /* columns used as oids should never be NULL */
ReleaseSysCache(tuple);
return result;
}
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index ead8b371a7..f039567e20 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -554,7 +554,7 @@ static void
record_C_func(HeapTuple procedureTuple,
PGFunction user_fn, const Pg_finfo_record *inforec)
{
- Oid fn_oid = ((Form_pg_proc) GETSTRUCT(procedureTuple))->oid;
+ Oid fn_oid = ((Form_pg_proc) GETSTRUCT(procedureTuple))->oid;
CFuncHashTabEntry *entry;
bool found;
diff --git a/src/backend/utils/hash/hashfn.c b/src/backend/utils/hash/hashfn.c
index 9f5e2925de..66985cc2e9 100644
--- a/src/backend/utils/hash/hashfn.c
+++ b/src/backend/utils/hash/hashfn.c
@@ -653,6 +653,7 @@ hash_uint32_extended(uint32 k, uint64 seed)
/* report the result */
PG_RETURN_UINT64(((uint64) b << 32) | c);
}
+
/*
* string_hash: hash function for keys that are NUL-terminated strings.
*
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index a5950c1e8c..3bf96de256 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -39,7 +39,7 @@ volatile uint32 CritSectionCount = 0;
int MyProcPid;
pg_time_t MyStartTime;
-TimestampTz MyStartTimestamp;
+TimestampTz MyStartTimestamp;
struct Port *MyProcPort;
int32 MyCancelKey;
int MyPMChildSlot;
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index c180a9910d..83c9514856 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -592,8 +592,8 @@ InitializeSessionUserId(const char *rolename, Oid roleid)
AssertState(!OidIsValid(AuthenticatedUserId));
/*
- * Make sure syscache entries are flushed for recent catalog changes.
- * This allows us to find roles that were created on-the-fly during
+ * Make sure syscache entries are flushed for recent catalog changes. This
+ * allows us to find roles that were created on-the-fly during
* authentication.
*/
AcceptInvalidationMessages();
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index dea5dcb368..bc2be43e21 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -561,7 +561,7 @@ char *
pg_any_to_server(const char *s, int len, int encoding)
{
if (len <= 0)
- return unconstify(char *, s); /* empty string is always valid */
+ return unconstify(char *, s); /* empty string is always valid */
if (encoding == DatabaseEncoding->encoding ||
encoding == PG_SQL_ASCII)
@@ -634,11 +634,11 @@ char *
pg_server_to_any(const char *s, int len, int encoding)
{
if (len <= 0)
- return unconstify(char *, s); /* empty string is always valid */
+ return unconstify(char *, s); /* empty string is always valid */
if (encoding == DatabaseEncoding->encoding ||
encoding == PG_SQL_ASCII)
- return unconstify(char *, s); /* assume data is valid */
+ return unconstify(char *, s); /* assume data is valid */
if (DatabaseEncoding->encoding == PG_SQL_ASCII)
{
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index ed51da4234..8acfa303c5 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -459,13 +459,13 @@ const struct config_enum_entry ssl_protocol_versions_info[] = {
static struct config_enum_entry shared_memory_options[] = {
#ifndef WIN32
- { "sysv", SHMEM_TYPE_SYSV, false},
+ {"sysv", SHMEM_TYPE_SYSV, false},
#endif
#ifndef EXEC_BACKEND
- { "mmap", SHMEM_TYPE_MMAP, false},
+ {"mmap", SHMEM_TYPE_MMAP, false},
#endif
#ifdef WIN32
- { "windows", SHMEM_TYPE_WINDOWS, false},
+ {"windows", SHMEM_TYPE_WINDOWS, false},
#endif
{NULL, 0, false}
};
@@ -1599,6 +1599,7 @@ static struct config_bool ConfigureNamesBool[] =
true,
NULL, NULL, NULL
},
+
/*
* WITH OIDS support, and consequently default_with_oids, was removed in
* PostgreSQL 12, but we tolerate the parameter being set to false to
@@ -8894,21 +8895,21 @@ ShowAllGUCConfig(DestReceiver *dest)
struct config_generic **
get_explain_guc_options(int *num)
{
- int i;
+ int i;
struct config_generic **result;
*num = 0;
/*
- * Allocate enough space to fit all GUC_EXPLAIN options. We may not
- * need all the space, but there are fairly few such options so we
- * don't waste a lot of memory.
+ * Allocate enough space to fit all GUC_EXPLAIN options. We may not need
+ * all the space, but there are fairly few such options so we don't waste
+ * a lot of memory.
*/
result = palloc(sizeof(struct config_generic *) * num_guc_explain_variables);
for (i = 0; i < num_guc_variables; i++)
{
- bool modified;
+ bool modified;
struct config_generic *conf = guc_variables[i];
/* return only options visible to the user */
@@ -8927,15 +8928,17 @@ get_explain_guc_options(int *num)
switch (conf->vartype)
{
case PGC_BOOL:
- {
- struct config_bool *lconf = (struct config_bool *) conf;
- modified = (lconf->boot_val != *(lconf->variable));
- }
- break;
+ {
+ struct config_bool *lconf = (struct config_bool *) conf;
+
+ modified = (lconf->boot_val != *(lconf->variable));
+ }
+ break;
case PGC_INT:
{
struct config_int *lconf = (struct config_int *) conf;
+
modified = (lconf->boot_val != *(lconf->variable));
}
break;
@@ -8943,6 +8946,7 @@ get_explain_guc_options(int *num)
case PGC_REAL:
{
struct config_real *lconf = (struct config_real *) conf;
+
modified = (lconf->boot_val != *(lconf->variable));
}
break;
@@ -8950,6 +8954,7 @@ get_explain_guc_options(int *num)
case PGC_STRING:
{
struct config_string *lconf = (struct config_string *) conf;
+
modified = (strcmp(lconf->boot_val, *(lconf->variable)) != 0);
}
break;
@@ -8957,6 +8962,7 @@ get_explain_guc_options(int *num)
case PGC_ENUM:
{
struct config_enum *lconf = (struct config_enum *) conf;
+
modified = (lconf->boot_val != *(lconf->variable));
}
break;
diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c
index 62e6b652af..1135ca9122 100644
--- a/src/backend/utils/mmgr/dsa.c
+++ b/src/backend/utils/mmgr/dsa.c
@@ -1676,6 +1676,7 @@ ensure_active_superblock(dsa_area *area, dsa_area_pool *pool,
return false;
}
}
+
/*
* This shouldn't happen: get_best_segment() or make_new_segment()
* promised that we can successfully allocate npages.
@@ -2267,7 +2268,7 @@ static void
check_for_freed_segments_locked(dsa_area *area)
{
size_t freed_segment_counter;
- int i;
+ int i;
Assert(LWLockHeldByMe(DSA_AREA_LOCK(area)));
freed_segment_counter = area->control->freed_segment_counter;
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index 5abc64f5f7..9a9069e551 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -185,7 +185,7 @@ static const char *default_timezone = NULL;
"# allows any local user to connect as any PostgreSQL user, including\n" \
"# the database superuser. If you do not trust all your local users,\n" \
"# use another authentication method.\n"
-static bool authwarning = false;
+static bool authwarning = false;
/*
* Centralized knowledge of switches to pass to backend
@@ -2431,11 +2431,11 @@ check_need_password(const char *authmethodlocal, const char *authmethodhost)
!(pwprompt || pwfilename))
{
pg_log_error("must specify a password for the superuser to enable %s authentication",
- (strcmp(authmethodlocal, "md5") == 0 ||
- strcmp(authmethodlocal, "password") == 0 ||
- strcmp(authmethodlocal, "scram-sha-256") == 0)
- ? authmethodlocal
- : authmethodhost);
+ (strcmp(authmethodlocal, "md5") == 0 ||
+ strcmp(authmethodlocal, "password") == 0 ||
+ strcmp(authmethodlocal, "scram-sha-256") == 0)
+ ? authmethodlocal
+ : authmethodhost);
exit(1);
}
}
@@ -3067,8 +3067,8 @@ main(int argc, char *argv[])
char pg_ctl_path[MAXPGPATH];
/*
- * Ensure that buffering behavior of stdout matches what it is
- * in interactive usage (at least on most platforms). This prevents
+ * Ensure that buffering behavior of stdout matches what it is in
+ * interactive usage (at least on most platforms). This prevents
* unexpected output ordering when, eg, output is redirected to a file.
* POSIX says we must do this before any other usage of these files.
*/
diff --git a/src/bin/pg_archivecleanup/pg_archivecleanup.c b/src/bin/pg_archivecleanup/pg_archivecleanup.c
index 8f89be64cd..bb4257ff18 100644
--- a/src/bin/pg_archivecleanup/pg_archivecleanup.c
+++ b/src/bin/pg_archivecleanup/pg_archivecleanup.c
@@ -123,7 +123,8 @@ CleanupPriorWALFiles(void)
if ((IsXLogFileName(walfile) || IsPartialXLogFileName(walfile)) &&
strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0)
{
- char WALFilePath[MAXPGPATH * 2]; /* the file path including archive */
+ char WALFilePath[MAXPGPATH * 2]; /* the file path
+ * including archive */
/*
* Use the original file name again now, including any
diff --git a/src/bin/pg_basebackup/pg_receivewal.c b/src/bin/pg_basebackup/pg_receivewal.c
index 5301e88375..86859b7813 100644
--- a/src/bin/pg_basebackup/pg_receivewal.c
+++ b/src/bin/pg_basebackup/pg_receivewal.c
@@ -633,7 +633,7 @@ main(int argc, char **argv)
{
/* translator: second %s is an option name */
pg_log_error("%s needs a slot to be specified using --slot",
- do_drop_slot ? "--drop-slot" : "--create-slot");
+ do_drop_slot ? "--drop-slot" : "--create-slot");
fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
progname);
exit(1);
@@ -728,7 +728,7 @@ main(int argc, char **argv)
if (do_drop_slot)
{
if (verbose)
- pg_log_info("dropping replication slot \"%s\"", replication_slot);
+ pg_log_info("dropping replication slot \"%s\"", replication_slot);
if (!DropReplicationSlot(conn, replication_slot))
exit(1);
diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c
index 547eb8de86..2e45c14642 100644
--- a/src/bin/pg_basebackup/pg_recvlogical.c
+++ b/src/bin/pg_basebackup/pg_recvlogical.c
@@ -133,9 +133,9 @@ sendFeedback(PGconn *conn, TimestampTz now, bool force, bool replyRequested)
if (verbose)
pg_log_info("confirming write up to %X/%X, flush to %X/%X (slot %s)",
- (uint32) (output_written_lsn >> 32), (uint32) output_written_lsn,
- (uint32) (output_fsync_lsn >> 32), (uint32) output_fsync_lsn,
- replication_slot);
+ (uint32) (output_written_lsn >> 32), (uint32) output_written_lsn,
+ (uint32) (output_fsync_lsn >> 32), (uint32) output_fsync_lsn,
+ replication_slot);
replybuf[len] = 'r';
len += 1;
@@ -1021,11 +1021,11 @@ prepareToTerminate(PGconn *conn, XLogRecPtr endpos, bool keepalive, XLogRecPtr l
{
if (keepalive)
pg_log_info("endpos %X/%X reached by keepalive",
- (uint32) (endpos >> 32), (uint32) endpos);
+ (uint32) (endpos >> 32), (uint32) endpos);
else
pg_log_info("endpos %X/%X reached by record at %X/%X",
- (uint32) (endpos >> 32), (uint32) (endpos),
- (uint32) (lsn >> 32), (uint32) lsn);
+ (uint32) (endpos >> 32), (uint32) (endpos),
+ (uint32) (lsn >> 32), (uint32) lsn);
}
}
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index e9854527e2..bf99a7dbc8 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -378,8 +378,8 @@ CheckServerVersionForStreaming(PGconn *conn)
const char *serverver = PQparameterStatus(conn, "server_version");
pg_log_error("incompatible server version %s; client does not support streaming from server versions older than %s",
- serverver ? serverver : "'unknown'",
- "9.3");
+ serverver ? serverver : "'unknown'",
+ "9.3");
return false;
}
else if (serverMajor > maxServerMajor)
@@ -387,8 +387,8 @@ CheckServerVersionForStreaming(PGconn *conn)
const char *serverver = PQparameterStatus(conn, "server_version");
pg_log_error("incompatible server version %s; client does not support streaming from server versions newer than %s",
- serverver ? serverver : "'unknown'",
- PG_VERSION);
+ serverver ? serverver : "'unknown'",
+ PG_VERSION);
return false;
}
return true;
@@ -620,8 +620,8 @@ ReceiveXlogStream(PGconn *conn, StreamCtl *stream)
if (stream->startpos > stoppos)
{
pg_log_error("server stopped streaming timeline %u at %X/%X, but reported next timeline %u to begin at %X/%X",
- stream->timeline, (uint32) (stoppos >> 32), (uint32) stoppos,
- newtimeline, (uint32) (stream->startpos >> 32), (uint32) stream->startpos);
+ stream->timeline, (uint32) (stoppos >> 32), (uint32) stoppos,
+ newtimeline, (uint32) (stream->startpos >> 32), (uint32) stream->startpos);
goto error;
}
diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c
index b402e49896..522a245088 100644
--- a/src/bin/pg_dump/common.c
+++ b/src/bin/pg_dump/common.c
@@ -1021,9 +1021,9 @@ findParentsByOid(TableInfo *self,
if (parent == NULL)
{
pg_log_error("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
- inhinfo[i].inhparent,
- self->dobj.name,
- oid);
+ inhinfo[i].inhparent,
+ self->dobj.name,
+ oid);
exit_nicely(1);
}
self->parents[j++] = parent;
diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c
index a0d7644a8a..952caef52c 100644
--- a/src/bin/pg_dump/compress_io.c
+++ b/src/bin/pg_dump/compress_io.c
@@ -235,7 +235,7 @@ InitCompressorZlib(CompressorState *cs, int level)
if (deflateInit(zp, level) != Z_OK)
fatal("could not initialize compression library: %s",
- zp->msg);
+ zp->msg);
/* Just be paranoid - maybe End is called after Start, with no Write */
zp->next_out = (void *) cs->zlibOut;
@@ -334,7 +334,7 @@ ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF)
if (inflateInit(zp) != Z_OK)
fatal("could not initialize compression library: %s",
- zp->msg);
+ zp->msg);
/* no minimal chunk size for zlib */
while ((cnt = readF(AH, &buf, &buflen)))
@@ -586,7 +586,7 @@ cfread(void *ptr, int size, cfp *fp)
const char *errmsg = gzerror(fp->compressedfp, &errnum);
fatal("could not read from input file: %s",
- errnum == Z_ERRNO ? strerror(errno) : errmsg);
+ errnum == Z_ERRNO ? strerror(errno) : errmsg);
}
}
else
diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c
index 3dfdae3a57..7152fd6457 100644
--- a/src/bin/pg_dump/parallel.c
+++ b/src/bin/pg_dump/parallel.c
@@ -1158,7 +1158,7 @@ parseWorkerCommand(ArchiveHandle *AH, TocEntry **te, T_Action *act,
}
else
fatal("unrecognized command received from master: \"%s\"",
- msg);
+ msg);
}
/*
@@ -1201,7 +1201,7 @@ parseWorkerResponse(ArchiveHandle *AH, TocEntry *te,
}
else
fatal("invalid message received from worker: \"%s\"",
- msg);
+ msg);
return status;
}
@@ -1439,7 +1439,7 @@ ListenToWorkers(ArchiveHandle *AH, ParallelState *pstate, bool do_wait)
}
else
fatal("invalid message received from worker: \"%s\"",
- msg);
+ msg);
/* Free the string returned from getMessageFromWorker */
free(msg);
@@ -1744,7 +1744,7 @@ pgpipe(int handles[2])
if ((s = socket(AF_INET, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
{
pg_log_error("pgpipe: could not create socket: error code %d",
- WSAGetLastError());
+ WSAGetLastError());
return -1;
}
@@ -1755,21 +1755,21 @@ pgpipe(int handles[2])
if (bind(s, (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
{
pg_log_error("pgpipe: could not bind: error code %d",
- WSAGetLastError());
+ WSAGetLastError());
closesocket(s);
return -1;
}
if (listen(s, 1) == SOCKET_ERROR)
{
pg_log_error("pgpipe: could not listen: error code %d",
- WSAGetLastError());
+ WSAGetLastError());
closesocket(s);
return -1;
}
if (getsockname(s, (SOCKADDR *) &serv_addr, &len) == SOCKET_ERROR)
{
pg_log_error("pgpipe: getsockname() failed: error code %d",
- WSAGetLastError());
+ WSAGetLastError());
closesocket(s);
return -1;
}
@@ -1780,7 +1780,7 @@ pgpipe(int handles[2])
if ((tmp_sock = socket(AF_INET, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
{
pg_log_error("pgpipe: could not create second socket: error code %d",
- WSAGetLastError());
+ WSAGetLastError());
closesocket(s);
return -1;
}
@@ -1789,7 +1789,7 @@ pgpipe(int handles[2])
if (connect(handles[1], (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
{
pg_log_error("pgpipe: could not connect socket: error code %d",
- WSAGetLastError());
+ WSAGetLastError());
closesocket(handles[1]);
handles[1] = -1;
closesocket(s);
@@ -1798,7 +1798,7 @@ pgpipe(int handles[2])
if ((tmp_sock = accept(s, (SOCKADDR *) &serv_addr, &len)) == PGINVALID_SOCKET)
{
pg_log_error("pgpipe: could not accept connection: error code %d",
- WSAGetLastError());
+ WSAGetLastError());
closesocket(handles[1]);
handles[1] = -1;
closesocket(s);
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index d764d36936..564772ea7e 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -324,7 +324,7 @@ ProcessArchiveRestoreOptions(Archive *AHX)
break;
default:
fatal("unexpected section code %d",
- (int) te->section);
+ (int) te->section);
break;
}
}
@@ -608,7 +608,7 @@ RestoreArchive(Archive *AHX)
{
/* complain and emit unmodified command */
pg_log_warning("could not find where to insert IF EXISTS in statement \"%s\"",
- dropStmtOrig);
+ dropStmtOrig);
appendPQExpBufferStr(ftStmt, dropStmt);
}
}
@@ -889,7 +889,7 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
_selectOutputSchema(AH, te->namespace);
pg_log_info("processing data for table \"%s.%s\"",
- te->namespace, te->tag);
+ te->namespace, te->tag);
/*
* In parallel restore, if we created the table earlier in
@@ -1288,8 +1288,8 @@ EndRestoreBlobs(ArchiveHandle *AH)
pg_log_info(ngettext("restored %d large object",
"restored %d large objects",
- AH->blobCount),
- AH->blobCount);
+ AH->blobCount),
+ AH->blobCount);
}
@@ -1320,12 +1320,12 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop)
loOid = lo_create(AH->connection, oid);
if (loOid == 0 || loOid != oid)
fatal("could not create large object %u: %s",
- oid, PQerrorMessage(AH->connection));
+ oid, PQerrorMessage(AH->connection));
}
AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
if (AH->loFd == -1)
fatal("could not open large object %u: %s",
- oid, PQerrorMessage(AH->connection));
+ oid, PQerrorMessage(AH->connection));
}
else
{
@@ -1429,7 +1429,7 @@ SortTocFromFile(Archive *AHX)
te = getTocEntryByDumpId(AH, id);
if (!te)
fatal("could not find entry for ID %d",
- id);
+ id);
/* Mark it wanted */
ropt->idWanted[id - 1] = true;
@@ -1662,10 +1662,10 @@ dump_lo_buf(ArchiveHandle *AH)
pg_log_debug(ngettext("wrote %lu byte of large object data (result = %lu)",
"wrote %lu bytes of large object data (result = %lu)",
AH->lo_buf_used),
- (unsigned long) AH->lo_buf_used, (unsigned long) res);
+ (unsigned long) AH->lo_buf_used, (unsigned long) res);
if (res != AH->lo_buf_used)
fatal("could not write to large object (result: %lu, expected: %lu)",
- (unsigned long) res, (unsigned long) AH->lo_buf_used);
+ (unsigned long) res, (unsigned long) AH->lo_buf_used);
}
else
{
@@ -1772,12 +1772,12 @@ warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...)
if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE)
{
pg_log_generic(PG_LOG_INFO, "from TOC entry %d; %u %u %s %s %s",
- AH->currentTE->dumpId,
- AH->currentTE->catalogId.tableoid,
- AH->currentTE->catalogId.oid,
- AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
- AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
- AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
+ AH->currentTE->dumpId,
+ AH->currentTE->catalogId.tableoid,
+ AH->currentTE->catalogId.oid,
+ AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
+ AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
+ AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
}
AH->lastErrorStage = AH->stage;
AH->lastErrorTE = AH->currentTE;
@@ -2111,7 +2111,7 @@ _discoverArchiveFormat(ArchiveHandle *AH)
if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH)
fatal("directory name too long: \"%s\"",
- AH->fSpec);
+ AH->fSpec);
if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
{
AH->format = archDirectory;
@@ -2121,7 +2121,7 @@ _discoverArchiveFormat(ArchiveHandle *AH)
#ifdef HAVE_LIBZ
if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH)
fatal("directory name too long: \"%s\"",
- AH->fSpec);
+ AH->fSpec);
if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
{
AH->format = archDirectory;
@@ -2129,7 +2129,7 @@ _discoverArchiveFormat(ArchiveHandle *AH)
}
#endif
fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)",
- AH->fSpec);
+ AH->fSpec);
fh = NULL; /* keep compiler quiet */
}
else
@@ -2152,7 +2152,7 @@ _discoverArchiveFormat(ArchiveHandle *AH)
fatal("could not read input file: %m");
else
fatal("input file is too short (read %lu, expected 5)",
- (unsigned long) cnt);
+ (unsigned long) cnt);
}
/* Save it, just in case we need it later */
@@ -2321,7 +2321,7 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt,
AH->currUser = NULL; /* unknown */
AH->currSchema = NULL; /* ditto */
AH->currTablespace = NULL; /* ditto */
- AH->currTableAm = NULL; /* ditto */
+ AH->currTableAm = NULL; /* ditto */
AH->toc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
@@ -2465,11 +2465,11 @@ mark_dump_job_done(ArchiveHandle *AH,
void *callback_data)
{
pg_log_info("finished item %d %s %s",
- te->dumpId, te->desc, te->tag);
+ te->dumpId, te->desc, te->tag);
if (status != 0)
fatal("worker process failed: exit code %d",
- status);
+ status);
}
@@ -2589,7 +2589,7 @@ ReadToc(ArchiveHandle *AH)
/* Sanity check */
if (te->dumpId <= 0)
fatal("entry ID %d out of range -- perhaps a corrupt TOC",
- te->dumpId);
+ te->dumpId);
te->hadDumper = ReadInt(AH);
@@ -2702,7 +2702,7 @@ ReadToc(ArchiveHandle *AH)
AH->ReadExtraTocPtr(AH, te);
pg_log_debug("read TOC entry %d (ID %d) for %s %s",
- i, te->dumpId, te->desc, te->tag);
+ i, te->dumpId, te->desc, te->tag);
/* link completed entry into TOC circular list */
te->prev = AH->toc->prev;
@@ -2738,12 +2738,12 @@ processEncodingEntry(ArchiveHandle *AH, TocEntry *te)
encoding = pg_char_to_encoding(ptr1);
if (encoding < 0)
fatal("unrecognized encoding \"%s\"",
- ptr1);
+ ptr1);
AH->public.encoding = encoding;
}
else
fatal("invalid ENCODING item: %s",
- te->defn);
+ te->defn);
free(defn);
}
@@ -2761,7 +2761,7 @@ processStdStringsEntry(ArchiveHandle *AH, TocEntry *te)
AH->public.std_strings = false;
else
fatal("invalid STDSTRINGS item: %s",
- te->defn);
+ te->defn);
}
static void
@@ -3193,7 +3193,7 @@ _doSetSessionAuth(ArchiveHandle *AH, const char *user)
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
/* NOT warn_or_exit_horribly... use -O instead to skip this. */
fatal("could not set session user to \"%s\": %s",
- user, PQerrorMessage(AH->connection));
+ user, PQerrorMessage(AH->connection));
PQclear(res);
}
@@ -3415,7 +3415,8 @@ static void
_selectTableAccessMethod(ArchiveHandle *AH, const char *tableam)
{
PQExpBuffer cmd;
- const char *want, *have;
+ const char *want,
+ *have;
have = AH->currTableAm;
want = tableam;
@@ -3530,7 +3531,7 @@ _getObjectDescription(PQExpBuffer buf, TocEntry *te, ArchiveHandle *AH)
}
pg_log_warning("don't know how to set owner for object type \"%s\"",
- type);
+ type);
}
/*
@@ -3688,7 +3689,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
else
{
pg_log_warning("don't know how to set owner for object type \"%s\"",
- te->desc);
+ te->desc);
}
}
@@ -3805,12 +3806,12 @@ ReadHead(ArchiveHandle *AH)
if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX)
fatal("unsupported version (%d.%d) in file header",
- vmaj, vmin);
+ vmaj, vmin);
AH->intSize = AH->ReadBytePtr(AH);
if (AH->intSize > 32)
fatal("sanity check on integer size (%lu) failed",
- (unsigned long) AH->intSize);
+ (unsigned long) AH->intSize);
if (AH->intSize > sizeof(int))
pg_log_warning("archive was made on a machine with larger integers, some operations might fail");
@@ -3824,7 +3825,7 @@ ReadHead(ArchiveHandle *AH)
if (AH->format != fmt)
fatal("expected format (%d) differs from format found in file (%d)",
- AH->format, fmt);
+ AH->format, fmt);
}
if (AH->version >= K_VERS_1_2)
@@ -3995,8 +3996,8 @@ restore_toc_entries_prefork(ArchiveHandle *AH, TocEntry *pending_list)
{
/* OK, restore the item and update its dependencies */
pg_log_info("processing item %d %s %s",
- next_work_item->dumpId,
- next_work_item->desc, next_work_item->tag);
+ next_work_item->dumpId,
+ next_work_item->desc, next_work_item->tag);
(void) restore_toc_entry(AH, next_work_item, false);
@@ -4085,8 +4086,8 @@ restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate,
if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA)) == 0)
{
pg_log_info("skipping item %d %s %s",
- next_work_item->dumpId,
- next_work_item->desc, next_work_item->tag);
+ next_work_item->dumpId,
+ next_work_item->desc, next_work_item->tag);
/* Update its dependencies as though we'd completed it */
reduce_dependencies(AH, next_work_item, &ready_list);
/* Loop around to see if anything else can be dispatched */
@@ -4094,8 +4095,8 @@ restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate,
}
pg_log_info("launching item %d %s %s",
- next_work_item->dumpId,
- next_work_item->desc, next_work_item->tag);
+ next_work_item->dumpId,
+ next_work_item->desc, next_work_item->tag);
/* Dispatch to some worker */
DispatchJobForTocEntry(AH, pstate, next_work_item, ACT_RESTORE,
@@ -4186,7 +4187,7 @@ restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list)
for (te = pending_list->pending_next; te != pending_list; te = te->pending_next)
{
pg_log_info("processing missed item %d %s %s",
- te->dumpId, te->desc, te->tag);
+ te->dumpId, te->desc, te->tag);
(void) restore_toc_entry(AH, te, false);
}
}
@@ -4472,7 +4473,7 @@ mark_restore_job_done(ArchiveHandle *AH,
ParallelReadyList *ready_list = (ParallelReadyList *) callback_data;
pg_log_info("finished item %d %s %s",
- te->dumpId, te->desc, te->tag);
+ te->dumpId, te->desc, te->tag);
if (status == WORKER_CREATE_DONE)
mark_create_done(AH, te);
@@ -4485,7 +4486,7 @@ mark_restore_job_done(ArchiveHandle *AH,
AH->public.n_errors++;
else if (status != 0)
fatal("worker process failed: exit code %d",
- status);
+ status);
reduce_dependencies(AH, te, ready_list);
}
@@ -4657,7 +4658,7 @@ repoint_table_dependencies(ArchiveHandle *AH)
te->dependencies[i] = tabledataid;
te->dataLength = Max(te->dataLength, tabledatate->dataLength);
pg_log_debug("transferring dependency %d -> %d to %d",
- te->dumpId, olddep, tabledataid);
+ te->dumpId, olddep, tabledataid);
}
}
}
@@ -4791,7 +4792,7 @@ static void
inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te)
{
pg_log_info("table \"%s\" could not be created, will not restore its data",
- te->tag);
+ te->tag);
if (AH->tableDataId[te->dumpId] != 0)
{
diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c
index ae5306b9da..497b81b684 100644
--- a/src/bin/pg_dump/pg_backup_custom.c
+++ b/src/bin/pg_dump/pg_backup_custom.c
@@ -445,7 +445,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
default: /* Always have a default */
fatal("unrecognized data block type (%d) while searching archive",
- blkType);
+ blkType);
break;
}
_readBlockHeader(AH, &blkType, &id);
@@ -482,7 +482,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
/* Are we sane? */
if (id != te->dumpId)
fatal("found unexpected block ID (%d) when reading data -- expected %d",
- id, te->dumpId);
+ id, te->dumpId);
switch (blkType)
{
@@ -496,7 +496,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
default: /* Always have a default */
fatal("unrecognized data block type %d while restoring archive",
- blkType);
+ blkType);
break;
}
}
@@ -910,11 +910,11 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id)
int byt;
/*
- * Note: if we are at EOF with a pre-1.3 input file, we'll fatal()
- * inside ReadInt rather than returning EOF. It doesn't seem worth
- * jumping through hoops to deal with that case better, because no such
- * files are likely to exist in the wild: only some 7.1 development
- * versions of pg_dump ever generated such files.
+ * Note: if we are at EOF with a pre-1.3 input file, we'll fatal() inside
+ * ReadInt rather than returning EOF. It doesn't seem worth jumping
+ * through hoops to deal with that case better, because no such files are
+ * likely to exist in the wild: only some 7.1 development versions of
+ * pg_dump ever generated such files.
*/
if (AH->version < K_VERS_1_3)
*type = BLK_DATA;
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index 9fd3b8a79f..8af5c7bebd 100644
--- a/src/bin/pg_dump/pg_backup_db.c
+++ b/src/bin/pg_dump/pg_backup_db.c
@@ -53,7 +53,7 @@ _check_database_version(ArchiveHandle *AH)
remoteversion > AH->public.maxRemoteVersion))
{
pg_log_error("server version: %s; %s version: %s",
- remoteversion_str, progname, PG_VERSION);
+ remoteversion_str, progname, PG_VERSION);
fatal("aborting because of server version mismatch");
}
@@ -138,7 +138,7 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
newuser = requser;
pg_log_info("connecting to database \"%s\" as user \"%s\"",
- newdb, newuser);
+ newdb, newuser);
password = AH->savedPassword;
@@ -182,7 +182,7 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
{
if (!PQconnectionNeedsPassword(newConn))
fatal("could not reconnect to database: %s",
- PQerrorMessage(newConn));
+ PQerrorMessage(newConn));
PQfinish(newConn);
if (password)
@@ -304,8 +304,8 @@ ConnectDatabase(Archive *AHX,
/* check to see that the backend connection was successfully made */
if (PQstatus(AH->connection) == CONNECTION_BAD)
fatal("connection to database \"%s\" failed: %s",
- PQdb(AH->connection) ? PQdb(AH->connection) : "",
- PQerrorMessage(AH->connection));
+ PQdb(AH->connection) ? PQdb(AH->connection) : "",
+ PQerrorMessage(AH->connection));
/* Start strict; later phases may override this. */
PQclear(ExecuteSqlQueryForSingleRow((Archive *) AH,
@@ -383,7 +383,7 @@ static void
die_on_query_failure(ArchiveHandle *AH, const char *query)
{
pg_log_error("query failed: %s",
- PQerrorMessage(AH->connection));
+ PQerrorMessage(AH->connection));
fatal("query was: %s", query);
}
@@ -427,8 +427,8 @@ ExecuteSqlQueryForSingleRow(Archive *fout, const char *query)
if (ntups != 1)
fatal(ngettext("query returned %d row instead of one: %s",
"query returned %d rows instead of one: %s",
- ntups),
- ntups, query);
+ ntups),
+ ntups, query);
return res;
}
@@ -571,7 +571,7 @@ ExecuteSqlCommandBuf(Archive *AHX, const char *buf, size_t bufLen)
if (AH->pgCopyIn &&
PQputCopyData(AH->connection, buf, bufLen) <= 0)
fatal("error returned by PQputCopyData: %s",
- PQerrorMessage(AH->connection));
+ PQerrorMessage(AH->connection));
}
else if (AH->outputKind == OUTPUT_OTHERDATA)
{
@@ -620,7 +620,7 @@ EndDBCopyMode(Archive *AHX, const char *tocEntryTag)
if (PQputCopyEnd(AH->connection, NULL) <= 0)
fatal("error returned by PQputCopyEnd: %s",
- PQerrorMessage(AH->connection));
+ PQerrorMessage(AH->connection));
/* Check command status and return to normal libpq state */
res = PQgetResult(AH->connection);
@@ -632,7 +632,7 @@ EndDBCopyMode(Archive *AHX, const char *tocEntryTag)
/* Do this to ensure we've pumped libpq back to idle state */
if (PQgetResult(AH->connection) != NULL)
pg_log_warning("unexpected extra results during COPY of table \"%s\"",
- tocEntryTag);
+ tocEntryTag);
AH->pgCopyIn = false;
}
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index 70eca82c91..cfa2f6ec74 100644
--- a/src/bin/pg_dump/pg_backup_directory.c
+++ b/src/bin/pg_dump/pg_backup_directory.c
@@ -348,7 +348,7 @@ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
if (dLen > 0 && cfwrite(data, dLen, ctx->dataFH) != dLen)
fatal("could not write to output file: %s",
- get_cfp_error(ctx->dataFH));
+ get_cfp_error(ctx->dataFH));
return;
@@ -452,7 +452,7 @@ _LoadBlobs(ArchiveHandle *AH)
/* Can't overflow because line and fname are the same length. */
if (sscanf(line, "%u %s\n", &oid, fname) != 2)
fatal("invalid line in large object TOC file \"%s\": \"%s\"",
- fname, line);
+ fname, line);
StartRestoreBlob(AH, oid, AH->public.ropt->dropSchema);
snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, fname);
@@ -461,7 +461,7 @@ _LoadBlobs(ArchiveHandle *AH)
}
if (!cfeof(ctx->blobsTocFH))
fatal("error reading large object TOC file \"%s\"",
- fname);
+ fname);
if (cfclose(ctx->blobsTocFH) != 0)
fatal("could not close large object TOC file \"%s\": %m",
@@ -486,7 +486,7 @@ _WriteByte(ArchiveHandle *AH, const int i)
if (cfwrite(&c, 1, ctx->dataFH) != 1)
fatal("could not write to output file: %s",
- get_cfp_error(ctx->dataFH));
+ get_cfp_error(ctx->dataFH));
return 1;
}
@@ -516,7 +516,7 @@ _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
if (cfwrite(buf, len, ctx->dataFH) != len)
fatal("could not write to output file: %s",
- get_cfp_error(ctx->dataFH));
+ get_cfp_error(ctx->dataFH));
return;
}
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index b52593c3c0..569df9b4b5 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -553,10 +553,10 @@ _tarReadRaw(ArchiveHandle *AH, void *buf, size_t len, TAR_MEMBER *th, FILE *fh)
const char *errmsg = gzerror(th->zFH, &errnum);
fatal("could not read from input file: %s",
- errnum == Z_ERRNO ? strerror(errno) : errmsg);
+ errnum == Z_ERRNO ? strerror(errno) : errmsg);
#else
fatal("could not read from input file: %s",
- strerror(errno));
+ strerror(errno));
#endif
}
}
@@ -691,7 +691,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
if (pos1 < 6 || strncmp(te->copyStmt, "COPY ", 5) != 0 ||
strcmp(te->copyStmt + pos1, " FROM stdin;\n") != 0)
fatal("unexpected COPY statement syntax: \"%s\"",
- te->copyStmt);
+ te->copyStmt);
/* Emit all but the FROM part ... */
ahwrite(te->copyStmt, 1, pos1, AH);
@@ -1113,7 +1113,7 @@ _tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th)
snprintf(buf1, sizeof(buf1), INT64_FORMAT, (int64) len);
snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) th->fileLen);
fatal("actual file length (%s) does not match expected (%s)",
- buf1, buf2);
+ buf1, buf2);
}
pad = ((len + 511) & ~511) - len;
@@ -1150,7 +1150,7 @@ _tarPositionTo(ArchiveHandle *AH, const char *filename)
snprintf(buf1, sizeof(buf1), INT64_FORMAT, (int64) ctx->tarFHpos);
snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) ctx->tarNextMember);
pg_log_debug("moving from position %s to next member at file position %s",
- buf1, buf2);
+ buf1, buf2);
while (ctx->tarFHpos < ctx->tarNextMember)
_tarReadRaw(AH, &c, 1, NULL, ctx->tarFH);
@@ -1188,8 +1188,8 @@ _tarPositionTo(ArchiveHandle *AH, const char *filename)
id = atoi(th->targetFile);
if ((TocIDRequired(AH, id) & REQ_DATA) != 0)
fatal("restoring data out of order is not supported in this archive format: "
- "\"%s\" is required, but comes before \"%s\" in the archive file.",
- th->targetFile, filename);
+ "\"%s\" is required, but comes before \"%s\" in the archive file.",
+ th->targetFile, filename);
/* Header doesn't match, so read to next header */
len = ((th->fileLen + 511) & ~511); /* Padded length */
@@ -1234,8 +1234,8 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
if (len != 512)
fatal(ngettext("incomplete tar header found (%lu byte)",
"incomplete tar header found (%lu bytes)",
- len),
- (unsigned long) len);
+ len),
+ (unsigned long) len);
/* Calc checksum */
chk = tarChecksum(h);
@@ -1274,7 +1274,7 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
snprintf(posbuf, sizeof(posbuf), UINT64_FORMAT, (uint64) hPos);
snprintf(lenbuf, sizeof(lenbuf), UINT64_FORMAT, (uint64) len);
pg_log_debug("TOC Entry %s at %s (length %s, checksum %d)",
- tag, posbuf, lenbuf, sum);
+ tag, posbuf, lenbuf, sum);
}
if (chk != sum)
@@ -1284,7 +1284,7 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
snprintf(posbuf, sizeof(posbuf), UINT64_FORMAT,
(uint64) ftello(ctx->tarFH));
fatal("corrupt tar header found in %s (expected %d, computed %d) file position %s",
- tag, sum, chk, posbuf);
+ tag, sum, chk, posbuf);
}
th->targetFile = pg_strdup(tag);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index e8ce719a0a..38a01758a1 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -135,7 +135,7 @@ static const CatalogId nilCatalogId = {0, 0};
/* override for standard extra_float_digits setting */
static bool have_extra_float_digits = false;
-static int extra_float_digits;
+static int extra_float_digits;
/*
* The default number of rows per INSERT when
@@ -601,7 +601,7 @@ main(int argc, char **argv)
errno == ERANGE)
{
pg_log_error("rows-per-insert must be in range %d..%d",
- 1, INT_MAX);
+ 1, INT_MAX);
exit_nicely(1);
}
dopt.dump_inserts = (int) rowsPerInsert;
@@ -1112,13 +1112,14 @@ setup_connection(Archive *AH, const char *dumpencoding,
ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
/*
- * Use an explicitly specified extra_float_digits if it has been
- * provided. Otherwise, set extra_float_digits so that we can dump float
- * data exactly (given correctly implemented float I/O code, anyway).
+ * Use an explicitly specified extra_float_digits if it has been provided.
+ * Otherwise, set extra_float_digits so that we can dump float data
+ * exactly (given correctly implemented float I/O code, anyway).
*/
if (have_extra_float_digits)
{
PQExpBuffer q = createPQExpBuffer();
+
appendPQExpBuffer(q, "SET extra_float_digits TO %d",
extra_float_digits);
ExecuteSqlStatement(AH, q->data);
@@ -1921,7 +1922,7 @@ dumpTableData_copy(Archive *fout, void *dcontext)
/* Do this to ensure we've pumped libpq back to idle state */
if (PQgetResult(conn) != NULL)
pg_log_warning("unexpected extra results during COPY of table \"%s\"",
- classname);
+ classname);
destroyPQExpBuffer(q);
return 1;
@@ -3468,7 +3469,7 @@ dumpBlobs(Archive *fout, void *arg)
loFd = lo_open(conn, blobOid, INV_READ);
if (loFd == -1)
fatal("could not open large object %u: %s",
- blobOid, PQerrorMessage(conn));
+ blobOid, PQerrorMessage(conn));
StartBlob(fout, blobOid);
@@ -3478,7 +3479,7 @@ dumpBlobs(Archive *fout, void *arg)
cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
if (cnt < 0)
fatal("error reading large object %u: %s",
- blobOid, PQerrorMessage(conn));
+ blobOid, PQerrorMessage(conn));
WriteData(fout, buf, cnt);
} while (cnt > 0);
@@ -3711,7 +3712,7 @@ dumpPolicy(Archive *fout, PolicyInfo *polinfo)
else
{
pg_log_error("unexpected policy command type: %c",
- polinfo->polcmd);
+ polinfo->polcmd);
exit_nicely(1);
}
@@ -3838,7 +3839,7 @@ getPublications(Archive *fout)
if (strlen(pubinfo[i].rolname) == 0)
pg_log_warning("owner of publication \"%s\" appears to be invalid",
- pubinfo[i].dobj.name);
+ pubinfo[i].dobj.name);
/* Decide whether we want to dump it */
selectDumpableObject(&(pubinfo[i].dobj), fout);
@@ -4172,7 +4173,7 @@ getSubscriptions(Archive *fout)
if (strlen(subinfo[i].rolname) == 0)
pg_log_warning("owner of subscription \"%s\" appears to be invalid",
- subinfo[i].dobj.name);
+ subinfo[i].dobj.name);
/* Decide whether we want to dump it */
selectDumpableObject(&(subinfo[i].dobj), fout);
@@ -4488,7 +4489,7 @@ binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
}
if (extobj == NULL)
fatal("could not find parent extension for %s %s",
- objtype, objname);
+ objtype, objname);
appendPQExpBufferStr(upgrade_buffer,
"\n-- For binary upgrade, handle extension membership the hard way\n");
@@ -4620,7 +4621,7 @@ getNamespaces(Archive *fout, int *numNamespaces)
if (strlen(nsinfo[i].rolname) == 0)
pg_log_warning("owner of schema \"%s\" appears to be invalid",
- nsinfo[i].dobj.name);
+ nsinfo[i].dobj.name);
}
PQclear(res);
@@ -4968,7 +4969,7 @@ getTypes(Archive *fout, int *numTypes)
if (strlen(tyinfo[i].rolname) == 0)
pg_log_warning("owner of data type \"%s\" appears to be invalid",
- tyinfo[i].dobj.name);
+ tyinfo[i].dobj.name);
}
*numTypes = ntups;
@@ -5053,7 +5054,7 @@ getOperators(Archive *fout, int *numOprs)
if (strlen(oprinfo[i].rolname) == 0)
pg_log_warning("owner of operator \"%s\" appears to be invalid",
- oprinfo[i].dobj.name);
+ oprinfo[i].dobj.name);
}
PQclear(res);
@@ -5355,7 +5356,7 @@ getOpclasses(Archive *fout, int *numOpclasses)
if (strlen(opcinfo[i].rolname) == 0)
pg_log_warning("owner of operator class \"%s\" appears to be invalid",
- opcinfo[i].dobj.name);
+ opcinfo[i].dobj.name);
}
PQclear(res);
@@ -5439,7 +5440,7 @@ getOpfamilies(Archive *fout, int *numOpfamilies)
if (strlen(opfinfo[i].rolname) == 0)
pg_log_warning("owner of operator family \"%s\" appears to be invalid",
- opfinfo[i].dobj.name);
+ opfinfo[i].dobj.name);
}
PQclear(res);
@@ -5608,7 +5609,7 @@ getAggregates(Archive *fout, int *numAggs)
agginfo[i].aggfn.rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
if (strlen(agginfo[i].aggfn.rolname) == 0)
pg_log_warning("owner of aggregate function \"%s\" appears to be invalid",
- agginfo[i].aggfn.dobj.name);
+ agginfo[i].aggfn.dobj.name);
agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */
agginfo[i].aggfn.proacl = pg_strdup(PQgetvalue(res, i, i_aggacl));
@@ -5868,7 +5869,7 @@ getFuncs(Archive *fout, int *numFuncs)
if (strlen(finfo[i].rolname) == 0)
pg_log_warning("owner of function \"%s\" appears to be invalid",
- finfo[i].dobj.name);
+ finfo[i].dobj.name);
}
PQclear(res);
@@ -6664,7 +6665,7 @@ getTables(Archive *fout, int *numTables)
/* Emit notice if join for owner failed */
if (strlen(tblinfo[i].rolname) == 0)
pg_log_warning("owner of table \"%s\" appears to be invalid",
- tblinfo[i].dobj.name);
+ tblinfo[i].dobj.name);
}
if (dopt->lockWaitTimeout)
@@ -6706,7 +6707,7 @@ getOwnedSeqs(Archive *fout, TableInfo tblinfo[], int numTables)
owning_tab = findTableByOid(seqinfo->owning_tab);
if (owning_tab == NULL)
fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
- seqinfo->owning_tab, seqinfo->dobj.catId.oid);
+ seqinfo->owning_tab, seqinfo->dobj.catId.oid);
/*
* Only dump identity sequences if we're going to dump the table that
@@ -7470,7 +7471,7 @@ getRules(Archive *fout, int *numRules)
ruleinfo[i].ruletable = findTableByOid(ruletableoid);
if (ruleinfo[i].ruletable == NULL)
fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
- ruletableoid, ruleinfo[i].dobj.catId.oid);
+ ruletableoid, ruleinfo[i].dobj.catId.oid);
ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace;
ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump;
ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type));
@@ -7686,9 +7687,9 @@ getTriggers(Archive *fout, TableInfo tblinfo[], int numTables)
{
if (PQgetisnull(res, j, i_tgconstrrelname))
fatal("query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %u)",
- tginfo[j].dobj.name,
- tbinfo->dobj.name,
- tginfo[j].tgconstrrelid);
+ tginfo[j].dobj.name,
+ tbinfo->dobj.name,
+ tginfo[j].tgconstrrelid);
tginfo[j].tgconstrrelname = pg_strdup(PQgetvalue(res, j, i_tgconstrrelname));
}
else
@@ -8377,7 +8378,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
{
if (j + 1 != atoi(PQgetvalue(res, j, i_attnum)))
fatal("invalid column numbering in table \"%s\"",
- tbinfo->dobj.name);
+ tbinfo->dobj.name);
tbinfo->attnames[j] = pg_strdup(PQgetvalue(res, j, i_attname));
tbinfo->atttypnames[j] = pg_strdup(PQgetvalue(res, j, i_atttypname));
tbinfo->atttypmod[j] = atoi(PQgetvalue(res, j, i_atttypmod));
@@ -8436,7 +8437,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
if (adnum <= 0 || adnum > ntups)
fatal("invalid adnum value %d for table \"%s\"",
- adnum, tbinfo->dobj.name);
+ adnum, tbinfo->dobj.name);
/*
* dropped columns shouldn't have defaults, but just in case,
@@ -8552,7 +8553,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
pg_log_error(ngettext("expected %d check constraint on table \"%s\" but found %d",
"expected %d check constraints on table \"%s\" but found %d",
tbinfo->ncheck),
- tbinfo->ncheck, tbinfo->dobj.name, numConstrs);
+ tbinfo->ncheck, tbinfo->dobj.name, numConstrs);
pg_log_error("(The system catalogs might be corrupted.)");
exit_nicely(1);
}
@@ -10130,7 +10131,7 @@ dumpType(Archive *fout, TypeInfo *tyinfo)
dumpUndefinedType(fout, tyinfo);
else
pg_log_warning("typtype of data type \"%s\" appears to be invalid",
- tyinfo->dobj.name);
+ tyinfo->dobj.name);
}
/*
@@ -11977,7 +11978,7 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
appendPQExpBufferStr(q, " STABLE");
else if (provolatile[0] != PROVOLATILE_VOLATILE)
fatal("unrecognized provolatile value for function \"%s\"",
- finfo->dobj.name);
+ finfo->dobj.name);
}
if (proisstrict[0] == 't')
@@ -12027,7 +12028,7 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
appendPQExpBufferStr(q, " PARALLEL RESTRICTED");
else if (proparallel[0] != PROPARALLEL_UNSAFE)
fatal("unrecognized proparallel value for function \"%s\"",
- finfo->dobj.name);
+ finfo->dobj.name);
}
for (i = 0; i < nconfigitems; i++)
@@ -12160,7 +12161,7 @@ dumpCast(Archive *fout, CastInfo *cast)
funcInfo = findFuncByOid(cast->castfunc);
if (funcInfo == NULL)
fatal("could not find function definition for function with OID %u",
- cast->castfunc);
+ cast->castfunc);
}
defqry = createPQExpBuffer();
@@ -12269,14 +12270,14 @@ dumpTransform(Archive *fout, TransformInfo *transform)
fromsqlFuncInfo = findFuncByOid(transform->trffromsql);
if (fromsqlFuncInfo == NULL)
fatal("could not find function definition for function with OID %u",
- transform->trffromsql);
+ transform->trffromsql);
}
if (OidIsValid(transform->trftosql))
{
tosqlFuncInfo = findFuncByOid(transform->trftosql);
if (tosqlFuncInfo == NULL)
fatal("could not find function definition for function with OID %u",
- transform->trftosql);
+ transform->trftosql);
}
defqry = createPQExpBuffer();
@@ -12649,7 +12650,7 @@ getFormattedOperatorName(Archive *fout, const char *oproid)
if (oprInfo == NULL)
{
pg_log_warning("could not find operator with OID %s",
- oproid);
+ oproid);
return NULL;
}
@@ -12717,7 +12718,7 @@ dumpAccessMethod(Archive *fout, AccessMethodInfo *aminfo)
break;
default:
pg_log_warning("invalid type \"%c\" of access method \"%s\"",
- aminfo->amtype, qamname);
+ aminfo->amtype, qamname);
destroyPQExpBuffer(q);
destroyPQExpBuffer(delq);
free(qamname);
@@ -13471,7 +13472,7 @@ dumpCollation(Archive *fout, CollInfo *collinfo)
appendPQExpBufferStr(q, "default");
else
fatal("unrecognized collation provider: %s\n",
- collprovider);
+ collprovider);
if (strcmp(PQgetvalue(res, 0, i_collisdeterministic), "f") == 0)
appendPQExpBufferStr(q, ", deterministic = false");
@@ -13943,7 +13944,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo)
if (!convertok)
{
pg_log_warning("aggregate function %s could not be dumped correctly for this database version; ignored",
- aggsig);
+ aggsig);
if (aggfullsig)
free(aggfullsig);
@@ -13998,7 +13999,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo)
break;
default:
fatal("unrecognized aggfinalmodify value for aggregate \"%s\"",
- agginfo->aggfn.dobj.name);
+ agginfo->aggfn.dobj.name);
break;
}
}
@@ -14054,7 +14055,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo)
break;
default:
fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"",
- agginfo->aggfn.dobj.name);
+ agginfo->aggfn.dobj.name);
break;
}
}
@@ -14079,7 +14080,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo)
appendPQExpBufferStr(details, ",\n PARALLEL = restricted");
else if (proparallel[0] != PROPARALLEL_UNSAFE)
fatal("unrecognized proparallel value for function \"%s\"",
- agginfo->aggfn.dobj.name);
+ agginfo->aggfn.dobj.name);
}
appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n",
@@ -14776,7 +14777,7 @@ dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo)
default:
/* shouldn't get here */
fatal("unrecognized object type in default privileges: %d",
- (int) daclinfo->defaclobjtype);
+ (int) daclinfo->defaclobjtype);
type = ""; /* keep compiler quiet */
}
@@ -14794,7 +14795,7 @@ dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo)
fout->remoteVersion,
q))
fatal("could not parse default ACL list (%s)",
- daclinfo->defaclacl);
+ daclinfo->defaclacl);
if (daclinfo->dobj.dump & DUMP_COMPONENT_ACL)
ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId,
@@ -14874,7 +14875,7 @@ dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
initacls, initracls, owner,
"", fout->remoteVersion, sql))
fatal("could not parse initial GRANT ACL list (%s) or initial REVOKE ACL list (%s) for object \"%s\" (%s)",
- initacls, initracls, name, type);
+ initacls, initracls, name, type);
appendPQExpBuffer(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
}
@@ -14882,7 +14883,7 @@ dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
acls, racls, owner,
"", fout->remoteVersion, sql))
fatal("could not parse GRANT ACL list (%s) or REVOKE ACL list (%s) for object \"%s\" (%s)",
- acls, racls, name, type);
+ acls, racls, name, type);
if (sql->len > 0)
{
@@ -15381,17 +15382,17 @@ createViewAsClause(Archive *fout, TableInfo *tbinfo)
{
if (PQntuples(res) < 1)
fatal("query to obtain definition of view \"%s\" returned no data",
- tbinfo->dobj.name);
+ tbinfo->dobj.name);
else
fatal("query to obtain definition of view \"%s\" returned more than one definition",
- tbinfo->dobj.name);
+ tbinfo->dobj.name);
}
len = PQgetlength(res, 0, 0);
if (len == 0)
fatal("definition of view \"%s\" appears to be empty (length zero)",
- tbinfo->dobj.name);
+ tbinfo->dobj.name);
/* Strip off the trailing semicolon so that other things may follow. */
Assert(PQgetvalue(res, 0, 0)[len - 1] == ';');
@@ -15473,7 +15474,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
if (tbinfo->hasoids)
pg_log_warning("WITH OIDS is not supported anymore (table \"%s\")",
- qrelname);
+ qrelname);
if (dopt->binary_upgrade)
binary_upgrade_set_type_oids_by_rel_oid(fout, q,
@@ -15600,7 +15601,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
*/
if (tbinfo->numParents != 1)
fatal("invalid number of parents %d for table \"%s\"",
- tbinfo->numParents, tbinfo->dobj.name);
+ tbinfo->numParents, tbinfo->dobj.name);
appendPQExpBuffer(q, " PARTITION OF %s",
fmtQualifiedDumpable(parentRel));
@@ -16157,7 +16158,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
{
- char *tableam = NULL;
+ char *tableam = NULL;
if (tbinfo->relkind == RELKIND_RELATION ||
tbinfo->relkind == RELKIND_MATVIEW)
@@ -16287,7 +16288,7 @@ getAttrName(int attrnum, TableInfo *tblInfo)
return "tableoid";
}
fatal("invalid column number %d for table \"%s\"",
- attrnum, tblInfo->dobj.name);
+ attrnum, tblInfo->dobj.name);
return NULL; /* keep compiler quiet */
}
@@ -16549,7 +16550,7 @@ dumpConstraint(Archive *fout, ConstraintInfo *coninfo)
if (indxinfo == NULL)
fatal("missing index for constraint \"%s\"",
- coninfo->dobj.name);
+ coninfo->dobj.name);
if (dopt->binary_upgrade)
binary_upgrade_set_pg_class_oids(fout, q,
@@ -16769,7 +16770,7 @@ dumpConstraint(Archive *fout, ConstraintInfo *coninfo)
else
{
fatal("unrecognized constraint type: %c",
- coninfo->contype);
+ coninfo->contype);
}
/* Dump Constraint Comments --- only works for table constraints */
@@ -16902,8 +16903,8 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
{
pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
"query to get data of sequence \"%s\" returned %d rows (expected 1)",
- PQntuples(res)),
- tbinfo->dobj.name, PQntuples(res));
+ PQntuples(res)),
+ tbinfo->dobj.name, PQntuples(res));
exit_nicely(1);
}
@@ -17056,7 +17057,7 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
if (owning_tab == NULL)
fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
- tbinfo->owning_tab, tbinfo->dobj.catId.oid);
+ tbinfo->owning_tab, tbinfo->dobj.catId.oid);
if (owning_tab->dobj.dump & DUMP_COMPONENT_DEFINITION)
{
@@ -17122,8 +17123,8 @@ dumpSequenceData(Archive *fout, TableDataInfo *tdinfo)
{
pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
"query to get data of sequence \"%s\" returned %d rows (expected 1)",
- PQntuples(res)),
- tbinfo->dobj.name, PQntuples(res));
+ PQntuples(res)),
+ tbinfo->dobj.name, PQntuples(res));
exit_nicely(1);
}
@@ -17291,9 +17292,9 @@ dumpTrigger(Archive *fout, TriggerInfo *tginfo)
{
/* hm, not found before end of bytea value... */
pg_log_error("invalid argument string (%s) for trigger \"%s\" on table \"%s\"",
- tginfo->tgargs,
- tginfo->dobj.name,
- tbinfo->dobj.name);
+ tginfo->tgargs,
+ tginfo->dobj.name,
+ tbinfo->dobj.name);
exit_nicely(1);
}
@@ -17520,7 +17521,7 @@ dumpRule(Archive *fout, RuleInfo *rinfo)
if (PQntuples(res) != 1)
{
pg_log_error("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned",
- rinfo->dobj.name, tbinfo->dobj.name);
+ rinfo->dobj.name, tbinfo->dobj.name);
exit_nicely(1);
}
@@ -17949,7 +17950,7 @@ getDependencies(Archive *fout)
{
#ifdef NOT_USED
pg_log_warning("no referencing object %u %u",
- objId.tableoid, objId.oid);
+ objId.tableoid, objId.oid);
#endif
continue;
}
@@ -17960,7 +17961,7 @@ getDependencies(Archive *fout)
{
#ifdef NOT_USED
pg_log_warning("no referenced object %u %u",
- refobjId.tableoid, refobjId.oid);
+ refobjId.tableoid, refobjId.oid);
#endif
continue;
}
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 102731ea0c..e8fe5b596a 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -54,7 +54,7 @@ static char *constructConnStr(const char **keywords, const char **values);
static PGresult *executeQuery(PGconn *conn, const char *query);
static void executeCommand(PGconn *conn, const char *query);
static void expand_dbname_patterns(PGconn *conn, SimpleStringList *patterns,
- SimpleStringList *names);
+ SimpleStringList *names);
static char pg_dump_bin[MAXPGPATH];
static const char *progname;
@@ -1406,8 +1406,8 @@ expand_dbname_patterns(PGconn *conn,
/*
* The loop below runs multiple SELECTs, which might sometimes result in
- * duplicate entries in the name list, but we don't care, since all
- * we're going to do is test membership of the list.
+ * duplicate entries in the name list, but we don't care, since all we're
+ * going to do is test membership of the list.
*/
for (SimpleStringListCell *cell = patterns->head; cell; cell = cell->next)
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index 8a3fad3d16..f9b1ae6809 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -410,7 +410,7 @@ main(int argc, char **argv)
default:
pg_log_error("unrecognized archive format \"%s\"; please specify \"c\", \"d\", or \"t\"",
- opts->formatName);
+ opts->formatName);
exit_nicely(1);
}
}
diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c
index 813eadcb01..3c7ef90013 100644
--- a/src/bin/pg_rewind/filemap.c
+++ b/src/bin/pg_rewind/filemap.c
@@ -508,10 +508,10 @@ check_file_excluded(const char *path, bool is_source)
{
if (is_source)
pg_log_debug("entry \"%s\" excluded from source file list",
- path);
+ path);
else
pg_log_debug("entry \"%s\" excluded from target file list",
- path);
+ path);
return true;
}
}
@@ -528,10 +528,10 @@ check_file_excluded(const char *path, bool is_source)
{
if (is_source)
pg_log_debug("entry \"%s\" excluded from source file list",
- path);
+ path);
else
pg_log_debug("entry \"%s\" excluded from target file list",
- path);
+ path);
return true;
}
}
@@ -659,7 +659,7 @@ print_filemap(void)
entry->pagemap.bitmapsize > 0)
{
pg_log_debug("%s (%s)", entry->path,
- action_to_str(entry->action));
+ action_to_str(entry->action));
if (entry->pagemap.bitmapsize > 0)
datapagemap_print(&entry->pagemap);
diff --git a/src/bin/pg_rewind/libpq_fetch.c b/src/bin/pg_rewind/libpq_fetch.c
index b6fa7e5b09..d6cbe23926 100644
--- a/src/bin/pg_rewind/libpq_fetch.c
+++ b/src/bin/pg_rewind/libpq_fetch.c
@@ -320,7 +320,7 @@ receiveFileChunks(const char *sql)
if (PQgetisnull(res, 0, 2))
{
pg_log_debug("received null value for chunk for file \"%s\", file has been deleted",
- filename);
+ filename);
remove_target_file(filename, true);
pg_free(filename);
PQclear(res);
@@ -333,7 +333,7 @@ receiveFileChunks(const char *sql)
*/
snprintf(chunkoff_str, sizeof(chunkoff_str), INT64_FORMAT, chunkoff);
pg_log_debug("received chunk for file \"%s\", offset %s, size %d",
- filename, chunkoff_str, chunksize);
+ filename, chunkoff_str, chunksize);
open_target_file(filename, false);
diff --git a/src/bin/pg_rewind/parsexlog.c b/src/bin/pg_rewind/parsexlog.c
index 65e523f5d4..b31071bc09 100644
--- a/src/bin/pg_rewind/parsexlog.c
+++ b/src/bin/pg_rewind/parsexlog.c
@@ -315,7 +315,7 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
pg_log_error("could not read file \"%s\": %m", xlogfpath);
else
pg_log_error("could not read file \"%s\": read %d of %zu",
- xlogfpath, r, (Size) XLOG_BLCKSZ);
+ xlogfpath, r, (Size) XLOG_BLCKSZ);
return -1;
}
diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c
index d47b5f9648..6cd3917628 100644
--- a/src/bin/pg_rewind/pg_rewind.c
+++ b/src/bin/pg_rewind/pg_rewind.c
@@ -52,7 +52,7 @@ char *datadir_target = NULL;
char *datadir_source = NULL;
char *connstr_source = NULL;
-static bool debug = false;
+static bool debug = false;
bool showprogress = false;
bool dry_run = false;
bool do_sync = true;
@@ -260,8 +260,8 @@ main(int argc, char **argv)
{
findCommonAncestorTimeline(&divergerec, &lastcommontliIndex);
pg_log_info("servers diverged at WAL location %X/%X on timeline %u",
- (uint32) (divergerec >> 32), (uint32) divergerec,
- targetHistory[lastcommontliIndex].tli);
+ (uint32) (divergerec >> 32), (uint32) divergerec,
+ targetHistory[lastcommontliIndex].tli);
/*
* Check for the possibility that the target is in fact a direct
@@ -304,8 +304,8 @@ main(int argc, char **argv)
lastcommontliIndex,
&chkptrec, &chkpttli, &chkptredo);
pg_log_info("rewinding from last common checkpoint at %X/%X on timeline %u",
- (uint32) (chkptrec >> 32), (uint32) chkptrec,
- chkpttli);
+ (uint32) (chkptrec >> 32), (uint32) chkptrec,
+ chkpttli);
/*
* Build the filemap, by comparing the source and target data directories.
@@ -344,8 +344,8 @@ main(int argc, char **argv)
if (showprogress)
{
pg_log_info("need to copy %lu MB (total source directory size is %lu MB)",
- (unsigned long) (filemap->fetch_size / (1024 * 1024)),
- (unsigned long) (filemap->total_size / (1024 * 1024)));
+ (unsigned long) (filemap->fetch_size / (1024 * 1024)),
+ (unsigned long) (filemap->total_size / (1024 * 1024)));
fetch_size = filemap->fetch_size;
fetch_done = 0;
@@ -495,8 +495,8 @@ progress_report(bool force)
fetch_size / 1024);
fprintf(stderr, _("%*s/%s kB (%d%%) copied"),
- (int) strlen(fetch_size_str), fetch_done_str, fetch_size_str,
- percent);
+ (int) strlen(fetch_size_str), fetch_done_str, fetch_size_str,
+ percent);
if (isatty(fileno(stderr)))
fprintf(stderr, "\r");
else
@@ -581,8 +581,8 @@ getTimelineHistory(ControlFileData *controlFile, int *nentries)
entry = &history[i];
pg_log_debug("%d: %X/%X - %X/%X", entry->tli,
- (uint32) (entry->begin >> 32), (uint32) (entry->begin),
- (uint32) (entry->end >> 32), (uint32) (entry->end));
+ (uint32) (entry->begin >> 32), (uint32) (entry->begin),
+ (uint32) (entry->end >> 32), (uint32) (entry->end));
}
}
diff --git a/src/bin/pg_upgrade/controldata.c b/src/bin/pg_upgrade/controldata.c
index bbeba673d4..6788f882a8 100644
--- a/src/bin/pg_upgrade/controldata.c
+++ b/src/bin/pg_upgrade/controldata.c
@@ -137,14 +137,15 @@ get_control_data(ClusterInfo *cluster, bool live_check)
if (p == NULL || strlen(p) <= 1)
pg_fatal("%d: database cluster state problem\n", __LINE__);
- p++; /* remove ':' char */
+ p++; /* remove ':' char */
/*
- * We checked earlier for a postmaster lock file, and if we found
- * one, we tried to start/stop the server to replay the WAL. However,
- * pg_ctl -m immediate doesn't leave a lock file, but does require
- * WAL replay, so we check here that the server was shut down cleanly,
- * from the controldata perspective.
+ * We checked earlier for a postmaster lock file, and if we
+ * found one, we tried to start/stop the server to replay the
+ * WAL. However, pg_ctl -m immediate doesn't leave a lock
+ * file, but does require WAL replay, so we check here that
+ * the server was shut down cleanly, from the controldata
+ * perspective.
*/
/* remove leading spaces */
while (*p == ' ')
diff --git a/src/bin/pg_upgrade/function.c b/src/bin/pg_upgrade/function.c
index 2a7df78f80..0c66d1c056 100644
--- a/src/bin/pg_upgrade/function.c
+++ b/src/bin/pg_upgrade/function.c
@@ -41,7 +41,7 @@ library_name_compare(const void *p1, const void *p2)
return cmp;
else
return ((const LibraryInfo *) p1)->dbnum -
- ((const LibraryInfo *) p2)->dbnum;
+ ((const LibraryInfo *) p2)->dbnum;
}
@@ -213,16 +213,16 @@ check_loadable_libraries(void)
{
/*
* In Postgres 9.0, Python 3 support was added, and to do that, a
- * plpython2u language was created with library name plpython2.so as a
- * symbolic link to plpython.so. In Postgres 9.1, only the
- * plpython2.so library was created, and both plpythonu and plpython2u
- * pointing to it. For this reason, any reference to library name
- * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
- * the new cluster.
+ * plpython2u language was created with library name plpython2.so
+ * as a symbolic link to plpython.so. In Postgres 9.1, only the
+ * plpython2.so library was created, and both plpythonu and
+ * plpython2u pointing to it. For this reason, any reference to
+ * library name "plpython" in an old PG <= 9.1 cluster must look
+ * for "plpython2" in the new cluster.
*
- * For this case, we could check pg_pltemplate, but that only works
- * for languages, and does not help with function shared objects, so
- * we just do a general fix.
+ * For this case, we could check pg_pltemplate, but that only
+ * works for languages, and does not help with function shared
+ * objects, so we just do a general fix.
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
strcmp(lib, "$libdir/plpython") == 0)
@@ -257,7 +257,7 @@ check_loadable_libraries(void)
if (was_load_failure)
fprintf(script, _("Database: %s\n"),
- old_cluster.dbarr.dbs[os_info.libraries[libnum].dbnum].db_name);
+ old_cluster.dbarr.dbs[os_info.libraries[libnum].dbnum].db_name);
}
PQfinish(conn);
diff --git a/src/bin/pg_waldump/pg_waldump.c b/src/bin/pg_waldump/pg_waldump.c
index dab9525c68..c40014d483 100644
--- a/src/bin/pg_waldump/pg_waldump.c
+++ b/src/bin/pg_waldump/pg_waldump.c
@@ -1031,9 +1031,9 @@ main(int argc, char **argv)
else if (!XLByteInSeg(private.startptr, segno, WalSegSz))
{
pg_log_error("start WAL location %X/%X is not inside file \"%s\"",
- (uint32) (private.startptr >> 32),
- (uint32) private.startptr,
- fname);
+ (uint32) (private.startptr >> 32),
+ (uint32) private.startptr,
+ fname);
goto bad_argument;
}
@@ -1074,9 +1074,9 @@ main(int argc, char **argv)
private.endptr != (segno + 1) * WalSegSz)
{
pg_log_error("end WAL location %X/%X is not inside file \"%s\"",
- (uint32) (private.endptr >> 32),
- (uint32) private.endptr,
- argv[argc - 1]);
+ (uint32) (private.endptr >> 32),
+ (uint32) private.endptr,
+ argv[argc - 1]);
goto bad_argument;
}
}
diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index a81383eb57..e99ab1e07f 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -217,7 +217,7 @@ bool progress_timestamp = false; /* progress report with Unix time */
int nclients = 1; /* number of clients */
int nthreads = 1; /* number of threads */
bool is_connect; /* establish connection for each transaction */
-bool report_per_command; /* report per-command latencies */
+bool report_per_command; /* report per-command latencies */
int main_pid; /* main process id used in log filename */
char *pghost = "";
@@ -422,11 +422,11 @@ typedef struct
/*
* Separate randomness for each thread. Each thread option uses its own
- * random state to make all of them independent of each other and therefore
- * deterministic at the thread level.
+ * random state to make all of them independent of each other and
+ * therefore deterministic at the thread level.
*/
RandomState ts_choose_rs; /* random state for selecting a script */
- RandomState ts_throttle_rs; /* random state for transaction throttling */
+ RandomState ts_throttle_rs; /* random state for transaction throttling */
RandomState ts_sample_rs; /* random state for log sampling */
int64 throttle_trigger; /* previous/next throttling (us) */
@@ -777,7 +777,7 @@ invalid_syntax:
bool
strtodouble(const char *str, bool errorOK, double *dv)
{
- char *end;
+ char *end;
errno = 0;
*dv = strtod(str, &end);
@@ -1322,7 +1322,7 @@ makeVariableValue(Variable *var)
else if (is_an_int(var->svalue))
{
/* if it looks like an int, it must be an int without overflow */
- int64 iv;
+ int64 iv;
if (!strtoint64(var->svalue, false, &iv))
return false;
@@ -2725,7 +2725,7 @@ readCommandResponse(CState *st, char *varprefix)
while (res != NULL)
{
- bool is_last;
+ bool is_last;
/* peek at the next result to know whether the current is last */
next_res = PQgetResult(st->con);
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index a2c0ec0b7f..df3824f689 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -291,7 +291,7 @@ exec_command(const char *cmd,
!is_branching_command(cmd))
{
pg_log_warning("\\%s command ignored; use \\endif or Ctrl-C to exit current \\if block",
- cmd);
+ cmd);
}
if (strcmp(cmd, "a") == 0)
@@ -551,8 +551,8 @@ exec_command_cd(PsqlScanState scan_state, bool active_branch, const char *cmd)
if (!pw)
{
pg_log_error("could not get home directory for user ID %ld: %s",
- (long) user_id,
- errno ? strerror(errno) : _("user does not exist"));
+ (long) user_id,
+ errno ? strerror(errno) : _("user does not exist"));
exit(EXIT_FAILURE);
}
dir = pw->pw_dir;
@@ -1015,10 +1015,10 @@ exec_command_ef_ev(PsqlScanState scan_state, bool active_branch,
sverbuf, sizeof(sverbuf));
if (is_func)
pg_log_error("The server (version %s) does not support editing function source.",
- sverbuf);
+ sverbuf);
else
pg_log_error("The server (version %s) does not support editing view definitions.",
- sverbuf);
+ sverbuf);
status = PSQL_CMD_ERROR;
}
else if (!query_buf)
@@ -1933,7 +1933,7 @@ exec_command_prompt(PsqlScanState scan_state, bool active_branch,
if (!result)
{
pg_log_error("\\%s: could not read value for variable",
- cmd);
+ cmd);
success = false;
}
}
@@ -2145,7 +2145,7 @@ exec_command_setenv(PsqlScanState scan_state, bool active_branch,
else if (strchr(envvar, '=') != NULL)
{
pg_log_error("\\%s: environment variable name must not contain \"=\"",
- cmd);
+ cmd);
success = false;
}
else if (!envval)
@@ -2206,10 +2206,10 @@ exec_command_sf_sv(PsqlScanState scan_state, bool active_branch,
sverbuf, sizeof(sverbuf));
if (is_func)
pg_log_error("The server (version %s) does not support showing function source.",
- sverbuf);
+ sverbuf);
else
pg_log_error("The server (version %s) does not support showing view definitions.",
- sverbuf);
+ sverbuf);
status = PSQL_CMD_ERROR;
}
else if (!obj_desc)
@@ -3441,7 +3441,7 @@ do_edit(const char *filename_arg, PQExpBuffer query_buf,
if (ret == 0 || ret > MAXPGPATH)
{
pg_log_error("could not locate temporary directory: %s",
- !ret ? strerror(errno) : "");
+ !ret ? strerror(errno) : "");
return false;
}
@@ -3761,8 +3761,8 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet)
else
{
pg_log_error("\\pset: ambiguous abbreviation \"%s\" matches both \"%s\" and \"%s\"",
- value,
- formats[match_pos].name, formats[i].name);
+ value,
+ formats[match_pos].name, formats[i].name);
return false;
}
}
@@ -4694,7 +4694,7 @@ get_create_object_cmd(EditableObjectType obj_type, Oid oid,
break;
default:
pg_log_error("\"%s.%s\" is not a view",
- nspname, relname);
+ nspname, relname);
result = false;
break;
}
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 9579e10630..44a782478d 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -199,7 +199,7 @@ psql_get_variable(const char *varname, PsqlScanQuoteType quote,
if (!appendShellStringNoError(&buf, value))
{
pg_log_error("shell command argument contains a newline or carriage return: \"%s\"",
- value);
+ value);
free(buf.data);
return NULL;
}
@@ -509,7 +509,7 @@ AcceptResult(const PGresult *result)
default:
OK = false;
pg_log_error("unexpected PQresultStatus: %d",
- PQresultStatus(result));
+ PQresultStatus(result));
break;
}
@@ -1278,7 +1278,7 @@ PrintQueryResults(PGresult *results)
default:
success = false;
pg_log_error("unexpected PQresultStatus: %d",
- PQresultStatus(results));
+ PQresultStatus(results));
break;
}
@@ -1378,8 +1378,8 @@ SendQuery(const char *query)
char sverbuf[32];
pg_log_warning("The server (version %s) does not support savepoints for ON_ERROR_ROLLBACK.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
on_error_rollback_warning = true;
}
else
@@ -1484,7 +1484,7 @@ SendQuery(const char *query)
/* PQTRANS_UNKNOWN is expected given a broken connection. */
if (transaction_status != PQTRANS_UNKNOWN || ConnectionUp())
pg_log_error("unexpected transaction status (%d)",
- transaction_status);
+ transaction_status);
break;
}
diff --git a/src/bin/psql/copy.c b/src/bin/psql/copy.c
index b02177a5c2..f9e53d6295 100644
--- a/src/bin/psql/copy.c
+++ b/src/bin/psql/copy.c
@@ -390,7 +390,7 @@ do_copy(const char *args)
char *reason = wait_result_to_str(pclose_rc);
pg_log_error("%s: %s", options->file,
- reason ? reason : "");
+ reason ? reason : "");
if (reason)
free(reason);
}
diff --git a/src/bin/psql/crosstabview.c b/src/bin/psql/crosstabview.c
index 390f750c41..184ebe6d21 100644
--- a/src/bin/psql/crosstabview.c
+++ b/src/bin/psql/crosstabview.c
@@ -227,7 +227,7 @@ PrintResultsInCrosstab(const PGresult *res)
if (piv_columns.count > CROSSTABVIEW_MAX_COLUMNS)
{
pg_log_error("\\crosstabview: maximum number of columns (%d) exceeded",
- CROSSTABVIEW_MAX_COLUMNS);
+ CROSSTABVIEW_MAX_COLUMNS);
goto error_return;
}
@@ -396,10 +396,10 @@ printCrosstab(const PGresult *results,
if (cont.cells[idx] != NULL)
{
pg_log_error("\\crosstabview: query result contains multiple data values for row \"%s\", column \"%s\"",
- rp->name ? rp->name :
- (popt.nullPrint ? popt.nullPrint : "(null)"),
- cp->name ? cp->name :
- (popt.nullPrint ? popt.nullPrint : "(null)"));
+ rp->name ? rp->name :
+ (popt.nullPrint ? popt.nullPrint : "(null)"),
+ cp->name ? cp->name :
+ (popt.nullPrint ? popt.nullPrint : "(null)"));
goto error;
}
@@ -644,7 +644,7 @@ indexOfColumn(char *arg, const PGresult *res)
if (idx < 0 || idx >= PQnfields(res))
{
pg_log_error("\\crosstabview: column number %d is out of range 1..%d",
- idx + 1, PQnfields(res));
+ idx + 1, PQnfields(res));
return -1;
}
}
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 773107227d..3dc5447f1a 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -160,8 +160,8 @@ describeAccessMethods(const char *pattern, bool verbose)
char sverbuf[32];
pg_log_error("The server (version %s) does not support access methods.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -229,8 +229,8 @@ describeTablespaces(const char *pattern, bool verbose)
char sverbuf[32];
pg_log_info("The server (version %s) does not support tablespaces.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -340,9 +340,9 @@ describeFunctions(const char *functypes, const char *pattern, bool verbose, bool
char sverbuf[32];
pg_log_error("\\df does not take a \"%c\" option with server version %s",
- 'p',
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ 'p',
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -351,9 +351,9 @@ describeFunctions(const char *functypes, const char *pattern, bool verbose, bool
char sverbuf[32];
pg_log_error("\\df does not take a \"%c\" option with server version %s",
- 'w',
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ 'w',
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -1100,8 +1100,8 @@ listDefaultACLs(const char *pattern)
char sverbuf[32];
pg_log_error("The server (version %s) does not support altering default privileges.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -1401,7 +1401,7 @@ describeTableDetails(const char *pattern, bool verbose, bool showSystem)
{
if (pattern)
pg_log_error("Did not find any relation named \"%s\".",
- pattern);
+ pattern);
else
pg_log_error("Did not find any relations.");
}
@@ -3548,8 +3548,8 @@ listDbRoleSettings(const char *pattern, const char *pattern2)
char sverbuf[32];
pg_log_error("The server (version %s) does not support per-database role settings.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -3584,10 +3584,10 @@ listDbRoleSettings(const char *pattern, const char *pattern2)
{
if (pattern && pattern2)
pg_log_error("Did not find any settings for role \"%s\" and database \"%s\".",
- pattern, pattern2);
+ pattern, pattern2);
else if (pattern)
pg_log_error("Did not find any settings for role \"%s\".",
- pattern);
+ pattern);
else
pg_log_error("Did not find any settings.");
}
@@ -3760,7 +3760,7 @@ listTables(const char *tabtypes, const char *pattern, bool verbose, bool showSys
{
if (pattern)
pg_log_error("Did not find any relation named \"%s\".",
- pattern);
+ pattern);
else
pg_log_error("Did not find any relations.");
}
@@ -3804,7 +3804,7 @@ listPartitionedTables(const char *reltypes, const char *pattern, bool verbose)
PQExpBufferData title;
PGresult *res;
printQueryOpt myopt = pset.popt;
- bool translate_columns[] = {false, false, false, false, false, false, false, false, false};
+ bool translate_columns[] = {false, false, false, false, false, false, false, false, false};
const char *tabletitle;
bool mixed_output = false;
@@ -4432,8 +4432,8 @@ listCollations(const char *pattern, bool verbose, bool showSystem)
char sverbuf[32];
pg_log_error("The server (version %s) does not support collations.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -4587,8 +4587,8 @@ listTSParsers(const char *pattern, bool verbose)
char sverbuf[32];
pg_log_error("The server (version %s) does not support full text search.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -4667,7 +4667,7 @@ listTSParsersVerbose(const char *pattern)
{
if (pattern)
pg_log_error("Did not find any text search parser named \"%s\".",
- pattern);
+ pattern);
else
pg_log_error("Did not find any text search parsers.");
}
@@ -4834,8 +4834,8 @@ listTSDictionaries(const char *pattern, bool verbose)
char sverbuf[32];
pg_log_error("The server (version %s) does not support full text search.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -4905,8 +4905,8 @@ listTSTemplates(const char *pattern, bool verbose)
char sverbuf[32];
pg_log_error("The server (version %s) does not support full text search.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -4976,8 +4976,8 @@ listTSConfigs(const char *pattern, bool verbose)
char sverbuf[32];
pg_log_error("The server (version %s) does not support full text search.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -5057,7 +5057,7 @@ listTSConfigsVerbose(const char *pattern)
{
if (pattern)
pg_log_error("Did not find any text search configuration named \"%s\".",
- pattern);
+ pattern);
else
pg_log_error("Did not find any text search configurations.");
}
@@ -5182,8 +5182,8 @@ listForeignDataWrappers(const char *pattern, bool verbose)
char sverbuf[32];
pg_log_error("The server (version %s) does not support foreign-data wrappers.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -5265,8 +5265,8 @@ listForeignServers(const char *pattern, bool verbose)
char sverbuf[32];
pg_log_error("The server (version %s) does not support foreign servers.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -5347,8 +5347,8 @@ listUserMappings(const char *pattern, bool verbose)
char sverbuf[32];
pg_log_error("The server (version %s) does not support user mappings.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -5408,8 +5408,8 @@ listForeignTables(const char *pattern, bool verbose)
char sverbuf[32];
pg_log_error("The server (version %s) does not support foreign tables.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -5486,8 +5486,8 @@ listExtensions(const char *pattern)
char sverbuf[32];
pg_log_error("The server (version %s) does not support extensions.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -5543,8 +5543,8 @@ listExtensionContents(const char *pattern)
char sverbuf[32];
pg_log_error("The server (version %s) does not support extensions.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -5571,7 +5571,7 @@ listExtensionContents(const char *pattern)
{
if (pattern)
pg_log_error("Did not find any extension named \"%s\".",
- pattern);
+ pattern);
else
pg_log_error("Did not find any extensions.");
}
@@ -5657,8 +5657,8 @@ listPublications(const char *pattern)
char sverbuf[32];
pg_log_error("The server (version %s) does not support publications.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -5728,8 +5728,8 @@ describePublications(const char *pattern)
char sverbuf[32];
pg_log_error("The server (version %s) does not support publications.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
@@ -5766,7 +5766,7 @@ describePublications(const char *pattern)
{
if (pattern)
pg_log_error("Did not find any publication named \"%s\".",
- pattern);
+ pattern);
else
pg_log_error("Did not find any publications.");
}
@@ -5884,8 +5884,8 @@ describeSubscriptions(const char *pattern, bool verbose)
char sverbuf[32];
pg_log_error("The server (version %s) does not support subscriptions.",
- formatPGVersionNumber(pset.sversion, false,
- sverbuf, sizeof(sverbuf)));
+ formatPGVersionNumber(pset.sversion, false,
+ sverbuf, sizeof(sverbuf)));
return true;
}
diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c
index 7c5b45f7cc..dc6dcbba01 100644
--- a/src/bin/psql/startup.c
+++ b/src/bin/psql/startup.c
@@ -104,7 +104,7 @@ log_locus_callback(const char **filename, uint64 *lineno)
if (pset.inputfile)
{
*filename = pset.inputfile;
- *lineno = pset.lineno;
+ *lineno = pset.lineno;
}
else
{
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index d77aa2936d..04ce3e722f 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -485,7 +485,7 @@ static const SchemaQuery Query_for_list_of_relations = {
static const SchemaQuery Query_for_list_of_partitioned_relations = {
.catname = "pg_catalog.pg_class c",
.selcondition = "c.relkind IN (" CppAsString2(RELKIND_PARTITIONED_TABLE)
- ", " CppAsString2(RELKIND_PARTITIONED_INDEX) ")",
+ ", " CppAsString2(RELKIND_PARTITIONED_INDEX) ")",
.viscondition = "pg_catalog.pg_table_is_visible(c.oid)",
.namespace = "c.relnamespace",
.result = "pg_catalog.quote_ident(c.relname)",
@@ -4361,7 +4361,7 @@ exec_query(const char *query)
{
#ifdef NOT_USED
pg_log_error("tab completion query failed: %s\nQuery was:\n%s",
- PQerrorMessage(pset.db), query);
+ PQerrorMessage(pset.db), query);
#endif
PQclear(result);
result = NULL;
diff --git a/src/bin/psql/variables.c b/src/bin/psql/variables.c
index e456b9db2f..1d2a31cd65 100644
--- a/src/bin/psql/variables.c
+++ b/src/bin/psql/variables.c
@@ -139,7 +139,7 @@ ParseVariableBool(const char *value, const char *name, bool *result)
/* string is not recognized; don't clobber *result */
if (name)
pg_log_error("unrecognized value \"%s\" for \"%s\": Boolean expected",
- value, name);
+ value, name);
valid = false;
}
return valid;
@@ -176,7 +176,7 @@ ParseVariableNum(const char *value, const char *name, int *result)
/* string is not recognized; don't clobber *result */
if (name)
pg_log_error("invalid value \"%s\" for \"%s\": integer expected",
- value, name);
+ value, name);
return false;
}
}
@@ -394,5 +394,5 @@ PsqlVarEnumError(const char *name, const char *value, const char *suggestions)
{
pg_log_error("unrecognized value \"%s\" for \"%s\"\n"
"Available values are: %s.",
- value, name, suggestions);
+ value, name, suggestions);
}
diff --git a/src/common/d2s.c b/src/common/d2s.c
index 1e4782c10a..8f4bc2a63c 100644
--- a/src/common/d2s.c
+++ b/src/common/d2s.c
@@ -671,9 +671,9 @@ to_chars_df(const floating_decimal_64 v, const uint32 olength, char *const resul
else
{
/*
- * We can save some code later by pre-filling with zeros. We know
- * that there can be no more than 16 output digits in this form,
- * otherwise we would not choose fixed-point output.
+ * We can save some code later by pre-filling with zeros. We know that
+ * there can be no more than 16 output digits in this form, otherwise
+ * we would not choose fixed-point output.
*/
Assert(exp < 16 && exp + olength <= 16);
memset(result, '0', 16);
@@ -800,8 +800,8 @@ to_chars(floating_decimal_64 v, const bool sign, char *const result)
/*
* The thresholds for fixed-point output are chosen to match printf
- * defaults. Beware that both the code of to_chars_df and the value
- * of DOUBLE_SHORTEST_DECIMAL_LEN are sensitive to these thresholds.
+ * defaults. Beware that both the code of to_chars_df and the value of
+ * DOUBLE_SHORTEST_DECIMAL_LEN are sensitive to these thresholds.
*/
if (exp >= -4 && exp < 15)
return to_chars_df(v, olength, result + index) + sign;
diff --git a/src/common/f2s.c b/src/common/f2s.c
index ff22b56c92..e3325573b2 100644
--- a/src/common/f2s.c
+++ b/src/common/f2s.c
@@ -481,10 +481,10 @@ to_chars_f(const floating_decimal_32 v, const uint32 olength, char *const result
else
{
/*
- * We can save some code later by pre-filling with zeros. We know
- * that there can be no more than 6 output digits in this form,
- * otherwise we would not choose fixed-point output. memset 8
- * rather than 6 bytes to let the compiler optimize it.
+ * We can save some code later by pre-filling with zeros. We know that
+ * there can be no more than 6 output digits in this form, otherwise
+ * we would not choose fixed-point output. memset 8 rather than 6
+ * bytes to let the compiler optimize it.
*/
Assert(exp < 6 && exp + olength <= 6);
memset(result, '0', 8);
@@ -575,8 +575,8 @@ to_chars(const floating_decimal_32 v, const bool sign, char *const result)
/*
* The thresholds for fixed-point output are chosen to match printf
- * defaults. Beware that both the code of to_chars_f and the value
- * of FLOAT_SHORTEST_DECIMAL_LEN are sensitive to these thresholds.
+ * defaults. Beware that both the code of to_chars_f and the value of
+ * FLOAT_SHORTEST_DECIMAL_LEN are sensitive to these thresholds.
*/
if (exp >= -4 && exp < 6)
return to_chars_f(v, olength, result + index) + sign;
diff --git a/src/common/file_utils.c b/src/common/file_utils.c
index eaec568819..3d7a637212 100644
--- a/src/common/file_utils.c
+++ b/src/common/file_utils.c
@@ -36,7 +36,7 @@
#define MINIMUM_VERSION_FOR_PG_WAL 100000
#ifdef PG_FLUSH_DATA_WORKS
-static int pre_sync_fname(const char *fname, bool isdir);
+static int pre_sync_fname(const char *fname, bool isdir);
#endif
static void walkdir(const char *path,
int (*action) (const char *fname, bool isdir),
diff --git a/src/common/logging.c b/src/common/logging.c
index 59f60445c7..f247554a32 100644
--- a/src/common/logging.c
+++ b/src/common/logging.c
@@ -16,10 +16,10 @@
enum pg_log_level __pg_log_level;
static const char *progname;
-static int log_flags;
+static int log_flags;
-static void (*log_pre_callback)(void);
-static void (*log_locus_callback)(const char **, uint64 *);
+static void (*log_pre_callback) (void);
+static void (*log_locus_callback) (const char **, uint64 *);
static const char *sgr_error = NULL;
static const char *sgr_warning = NULL;
@@ -60,13 +60,13 @@ pg_logging_init(const char *argv0)
if (pg_colors_env)
{
- char *colors = strdup(pg_colors_env);
+ char *colors = strdup(pg_colors_env);
if (colors)
{
for (char *token = strtok(colors, ":"); token; token = strtok(NULL, ":"))
{
- char *e = strchr(token, '=');
+ char *e = strchr(token, '=');
if (e)
{
@@ -111,19 +111,19 @@ pg_logging_set_level(enum pg_log_level new_level)
}
void
-pg_logging_set_pre_callback(void (*cb)(void))
+pg_logging_set_pre_callback(void (*cb) (void))
{
log_pre_callback = cb;
}
void
-pg_logging_set_locus_callback(void (*cb)(const char **filename, uint64 *lineno))
+pg_logging_set_locus_callback(void (*cb) (const char **filename, uint64 *lineno))
{
log_locus_callback = cb;
}
void
-pg_log_generic(enum pg_log_level level, const char * pg_restrict fmt, ...)
+pg_log_generic(enum pg_log_level level, const char *pg_restrict fmt,...)
{
va_list ap;
@@ -133,7 +133,7 @@ pg_log_generic(enum pg_log_level level, const char * pg_restrict fmt, ...)
}
void
-pg_log_generic_v(enum pg_log_level level, const char * pg_restrict fmt, va_list ap)
+pg_log_generic_v(enum pg_log_level level, const char *pg_restrict fmt, va_list ap)
{
int save_errno = errno;
const char *filename = NULL;
diff --git a/src/common/pg_lzcompress.c b/src/common/pg_lzcompress.c
index 97b0e40e40..988b3987d0 100644
--- a/src/common/pg_lzcompress.c
+++ b/src/common/pg_lzcompress.c
@@ -687,7 +687,7 @@ pglz_compress(const char *source, int32 slen, char *dest,
*/
int32
pglz_decompress(const char *source, int32 slen, char *dest,
- int32 rawsize, bool check_complete)
+ int32 rawsize, bool check_complete)
{
const unsigned char *sp;
const unsigned char *srcend;
@@ -759,10 +759,9 @@ pglz_decompress(const char *source, int32 slen, char *dest,
}
/*
- * Check we decompressed the right amount.
- * If we are slicing, then we won't necessarily
- * be at the end of the source or dest buffers
- * when we hit a stop, so we don't test them.
+ * Check we decompressed the right amount. If we are slicing, then we
+ * won't necessarily be at the end of the source or dest buffers when we
+ * hit a stop, so we don't test them.
*/
if (check_complete && (dp != destend || sp != srcend))
return -1;
@@ -770,5 +769,5 @@ pglz_decompress(const char *source, int32 slen, char *dest,
/*
* That's it.
*/
- return (char*)dp - dest;
+ return (char *) dp - dest;
}
diff --git a/src/common/rmtree.c b/src/common/rmtree.c
index 2c3c4dd2d4..3c207917b5 100644
--- a/src/common/rmtree.c
+++ b/src/common/rmtree.c
@@ -77,7 +77,7 @@ rmtree(const char *path, bool rmtopdir)
if (errno != ENOENT)
{
pg_log_warning("could not stat file or directory \"%s\": %m",
- pathbuf);
+ pathbuf);
result = false;
}
continue;
@@ -99,7 +99,7 @@ rmtree(const char *path, bool rmtopdir)
if (errno != ENOENT)
{
pg_log_warning("could not remove file or directory \"%s\": %m",
- pathbuf);
+ pathbuf);
result = false;
}
}
@@ -111,7 +111,7 @@ rmtree(const char *path, bool rmtopdir)
if (rmdir(path) != 0)
{
pg_log_warning("could not remove file or directory \"%s\": %m",
- path);
+ path);
result = false;
}
}
diff --git a/src/include/access/amapi.h b/src/include/access/amapi.h
index 09a7404267..6e3db06eed 100644
--- a/src/include/access/amapi.h
+++ b/src/include/access/amapi.h
@@ -216,7 +216,7 @@ typedef struct IndexAmRoutine
amcostestimate_function amcostestimate;
amoptions_function amoptions;
amproperty_function amproperty; /* can be NULL */
- ambuildphasename_function ambuildphasename; /* can be NULL */
+ ambuildphasename_function ambuildphasename; /* can be NULL */
amvalidate_function amvalidate;
ambeginscan_function ambeginscan;
amrescan_function amrescan;
diff --git a/src/include/access/gistxlog.h b/src/include/access/gistxlog.h
index e66b034d7b..969a5376b5 100644
--- a/src/include/access/gistxlog.h
+++ b/src/include/access/gistxlog.h
@@ -18,9 +18,10 @@
#include "lib/stringinfo.h"
#define XLOG_GIST_PAGE_UPDATE 0x00
-#define XLOG_GIST_DELETE 0x10 /* delete leaf index tuples for a page */
-#define XLOG_GIST_PAGE_REUSE 0x20 /* old page is about to be reused from
- * FSM */
+#define XLOG_GIST_DELETE 0x10 /* delete leaf index tuples for a
+ * page */
+#define XLOG_GIST_PAGE_REUSE 0x20 /* old page is about to be reused
+ * from FSM */
#define XLOG_GIST_PAGE_SPLIT 0x30
/* #define XLOG_GIST_INSERT_COMPLETE 0x40 */ /* not used anymore */
/* #define XLOG_GIST_CREATE_INDEX 0x50 */ /* not used anymore */
@@ -83,7 +84,8 @@ typedef struct gistxlogPageSplit
typedef struct gistxlogPageDelete
{
TransactionId deleteXid; /* last Xid which could see page in scan */
- OffsetNumber downlinkOffset; /* Offset of downlink referencing this page */
+ OffsetNumber downlinkOffset; /* Offset of downlink referencing this
+ * page */
} gistxlogPageDelete;
#define SizeOfGistxlogPageDelete (offsetof(gistxlogPageDelete, downlinkOffset) + sizeof(OffsetNumber))
diff --git a/src/include/access/hio.h b/src/include/access/hio.h
index cec087cb1a..8daa12a5cb 100644
--- a/src/include/access/hio.h
+++ b/src/include/access/hio.h
@@ -31,7 +31,7 @@ typedef struct BulkInsertStateData
{
BufferAccessStrategy strategy; /* our BULKWRITE strategy object */
Buffer current_buf; /* current insertion target page */
-} BulkInsertStateData;
+} BulkInsertStateData;
extern void RelationPutHeapTuple(Relation relation, Buffer buffer,
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index 8bb480d322..2bde2c2bc4 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -78,7 +78,7 @@ typedef struct ParallelBlockTableScanDescData
BlockNumber phs_startblock; /* starting block number */
pg_atomic_uint64 phs_nallocated; /* number of blocks allocated to
* workers so far. */
-} ParallelBlockTableScanDescData;
+} ParallelBlockTableScanDescData;
typedef struct ParallelBlockTableScanDescData *ParallelBlockTableScanDesc;
/*
diff --git a/src/include/access/spgist_private.h b/src/include/access/spgist_private.h
index ce22bd36af..27daf9436d 100644
--- a/src/include/access/spgist_private.h
+++ b/src/include/access/spgist_private.h
@@ -146,7 +146,7 @@ typedef struct SpGistSearchItem
/* array with numberOfOrderBys entries */
double distances[FLEXIBLE_ARRAY_MEMBER];
-} SpGistSearchItem;
+} SpGistSearchItem;
#define SizeOfSpGistSearchItem(n_distances) \
(offsetof(SpGistSearchItem, distances) + sizeof(double) * (n_distances))
diff --git a/src/include/access/spgxlog.h b/src/include/access/spgxlog.h
index ee8fc6fd6b..073f740a13 100644
--- a/src/include/access/spgxlog.h
+++ b/src/include/access/spgxlog.h
@@ -18,7 +18,7 @@
#include "storage/off.h"
/* XLOG record types for SPGiST */
-/* #define XLOG_SPGIST_CREATE_INDEX 0x00 */ /* not used anymore */
+ /* #define XLOG_SPGIST_CREATE_INDEX 0x00 */ /* not used anymore */
#define XLOG_SPGIST_ADD_LEAF 0x10
#define XLOG_SPGIST_MOVE_LEAFS 0x20
#define XLOG_SPGIST_ADD_NODE 0x30
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index 53b58c51da..31c5eb9e92 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -45,21 +45,21 @@ struct ValidateIndexState;
typedef enum ScanOptions
{
/* one of SO_TYPE_* may be specified */
- SO_TYPE_SEQSCAN = 1 << 0,
- SO_TYPE_BITMAPSCAN = 1 << 1,
- SO_TYPE_SAMPLESCAN = 1 << 2,
- SO_TYPE_ANALYZE = 1 << 3,
+ SO_TYPE_SEQSCAN = 1 << 0,
+ SO_TYPE_BITMAPSCAN = 1 << 1,
+ SO_TYPE_SAMPLESCAN = 1 << 2,
+ SO_TYPE_ANALYZE = 1 << 3,
/* several of SO_ALLOW_* may be specified */
/* allow or disallow use of access strategy */
- SO_ALLOW_STRAT = 1 << 4,
+ SO_ALLOW_STRAT = 1 << 4,
/* report location to syncscan logic? */
- SO_ALLOW_SYNC = 1 << 5,
+ SO_ALLOW_SYNC = 1 << 5,
/* verify visibility page-at-a-time? */
- SO_ALLOW_PAGEMODE = 1 << 6,
+ SO_ALLOW_PAGEMODE = 1 << 6,
/* unregister snapshot at scan end? */
- SO_TEMP_SNAPSHOT = 1 << 7
+ SO_TEMP_SNAPSHOT = 1 << 7
} ScanOptions;
/*
@@ -575,12 +575,12 @@ typedef struct TableAmRoutine
/*
* This callback should return true if the relation requires a TOAST table
- * and false if it does not. It may wish to examine the relation's
- * tuple descriptor before making a decision, but if it uses some other
- * method of storing large values (or if it does not support them) it can
- * simply return false.
+ * and false if it does not. It may wish to examine the relation's tuple
+ * descriptor before making a decision, but if it uses some other method
+ * of storing large values (or if it does not support them) it can simply
+ * return false.
*/
- bool (*relation_needs_toast_table) (Relation rel);
+ bool (*relation_needs_toast_table) (Relation rel);
/* ------------------------------------------------------------------------
@@ -738,7 +738,7 @@ table_beginscan(Relation rel, Snapshot snapshot,
int nkeys, struct ScanKeyData *key)
{
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
}
diff --git a/src/include/access/tupdesc.h b/src/include/access/tupdesc.h
index a592d22a0e..3610a9795e 100644
--- a/src/include/access/tupdesc.h
+++ b/src/include/access/tupdesc.h
@@ -38,7 +38,7 @@ typedef struct TupleConstr
{
AttrDefault *defval; /* array */
ConstrCheck *check; /* array */
- struct AttrMissing *missing; /* missing attributes values, NULL if none */
+ struct AttrMissing *missing; /* missing attributes values, NULL if none */
uint16 num_defval;
uint16 num_check;
bool has_not_null;
@@ -85,7 +85,7 @@ typedef struct TupleDescData
TupleConstr *constr; /* constraints, or NULL if none */
/* attrs[N] is the description of Attribute Number N+1 */
FormData_pg_attribute attrs[FLEXIBLE_ARRAY_MEMBER];
-} TupleDescData;
+} TupleDescData;
typedef struct TupleDescData *TupleDesc;
/* Accessor for the i'th attribute of tupdesc. */
diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h
index 2af938bfdc..1ead0256ad 100644
--- a/src/include/access/xlog.h
+++ b/src/include/access/xlog.h
@@ -95,7 +95,7 @@ typedef enum
RECOVERY_TARGET_TIMELINE_CONTROLFILE,
RECOVERY_TARGET_TIMELINE_LATEST,
RECOVERY_TARGET_TIMELINE_NUMERIC
-} RecoveryTargetTimeLineGoal;
+} RecoveryTargetTimeLineGoal;
extern XLogRecPtr ProcLastRecPtr;
extern XLogRecPtr XactLastRecEnd;
diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h
index 39a474c499..f7596071c1 100644
--- a/src/include/access/xlog_internal.h
+++ b/src/include/access/xlog_internal.h
@@ -268,7 +268,7 @@ typedef enum
RECOVERY_TARGET_ACTION_PAUSE,
RECOVERY_TARGET_ACTION_PROMOTE,
RECOVERY_TARGET_ACTION_SHUTDOWN
-} RecoveryTargetAction;
+} RecoveryTargetAction;
/*
* Method table for resource managers.
diff --git a/src/include/catalog/dependency.h b/src/include/catalog/dependency.h
index 57545b70d8..5a43c1e1a8 100644
--- a/src/include/catalog/dependency.h
+++ b/src/include/catalog/dependency.h
@@ -136,7 +136,8 @@ typedef enum ObjectClass
#define PERFORM_DELETION_QUIETLY 0x0004 /* suppress notices */
#define PERFORM_DELETION_SKIP_ORIGINAL 0x0008 /* keep original obj */
#define PERFORM_DELETION_SKIP_EXTENSIONS 0x0010 /* keep extensions */
-#define PERFORM_DELETION_CONCURRENT_LOCK 0x0020 /* normal drop with concurrent lock mode */
+#define PERFORM_DELETION_CONCURRENT_LOCK 0x0020 /* normal drop with
+ * concurrent lock mode */
/* in dependency.c */
@@ -200,10 +201,10 @@ extern long changeDependencyFor(Oid classId, Oid objectId,
Oid newRefObjectId);
extern long changeDependenciesOf(Oid classId, Oid oldObjectId,
- Oid newObjectId);
+ Oid newObjectId);
extern long changeDependenciesOn(Oid refClassId, Oid oldRefObjectId,
- Oid newRefObjectId);
+ Oid newRefObjectId);
extern Oid getExtensionOfObject(Oid classId, Oid objectId);
diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h
index 799efee954..c5729e2628 100644
--- a/src/include/catalog/index.h
+++ b/src/include/catalog/index.h
@@ -79,18 +79,18 @@ extern Oid index_create(Relation heapRelation,
#define INDEX_CONSTR_CREATE_REMOVE_OLD_DEPS (1 << 4)
extern Oid index_concurrently_create_copy(Relation heapRelation,
- Oid oldIndexId,
- const char *newName);
+ Oid oldIndexId,
+ const char *newName);
extern void index_concurrently_build(Oid heapRelationId,
- Oid indexRelationId);
+ Oid indexRelationId);
extern void index_concurrently_swap(Oid newIndexId,
- Oid oldIndexId,
- const char *oldName);
+ Oid oldIndexId,
+ const char *oldName);
extern void index_concurrently_set_dead(Oid heapId,
- Oid indexId);
+ Oid indexId);
extern ObjectAddress index_constraint_create(Relation heapRelation,
Oid indexRelationId,
diff --git a/src/include/catalog/pg_attrdef.h b/src/include/catalog/pg_attrdef.h
index db2abcbb75..4ec371bc4a 100644
--- a/src/include/catalog/pg_attrdef.h
+++ b/src/include/catalog/pg_attrdef.h
@@ -34,7 +34,8 @@ CATALOG(pg_attrdef,2604,AttrDefaultRelationId)
int16 adnum; /* attnum of attribute */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
- pg_node_tree adbin BKI_FORCE_NOT_NULL; /* nodeToString representation of default */
+ pg_node_tree adbin BKI_FORCE_NOT_NULL; /* nodeToString representation of
+ * default */
#endif
} FormData_pg_attrdef;
diff --git a/src/include/catalog/pg_default_acl.h b/src/include/catalog/pg_default_acl.h
index 545cee710d..601b11e11c 100644
--- a/src/include/catalog/pg_default_acl.h
+++ b/src/include/catalog/pg_default_acl.h
@@ -35,7 +35,8 @@ CATALOG(pg_default_acl,826,DefaultAclRelationId)
char defaclobjtype; /* see DEFACLOBJ_xxx constants below */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
- aclitem defaclacl[1] BKI_FORCE_NOT_NULL; /* permissions to add at CREATE time */
+ aclitem defaclacl[1] BKI_FORCE_NOT_NULL; /* permissions to add at
+ * CREATE time */
#endif
} FormData_pg_default_acl;
diff --git a/src/include/catalog/pg_policy.h b/src/include/catalog/pg_policy.h
index 1ca32dec11..62a78d8049 100644
--- a/src/include/catalog/pg_policy.h
+++ b/src/include/catalog/pg_policy.h
@@ -35,7 +35,8 @@ CATALOG(pg_policy,3256,PolicyRelationId)
bool polpermissive; /* restrictive or permissive policy */
#ifdef CATALOG_VARLEN
- Oid polroles[1] BKI_FORCE_NOT_NULL; /* Roles associated with policy */
+ Oid polroles[1] BKI_FORCE_NOT_NULL; /* Roles associated with
+ * policy */
pg_node_tree polqual; /* Policy quals. */
pg_node_tree polwithcheck; /* WITH CHECK quals. */
#endif
diff --git a/src/include/catalog/storage.h b/src/include/catalog/storage.h
index 3579d3f3eb..f535ed1b5d 100644
--- a/src/include/catalog/storage.h
+++ b/src/include/catalog/storage.h
@@ -24,7 +24,7 @@ extern void RelationDropStorage(Relation rel);
extern void RelationPreserveStorage(RelFileNode rnode, bool atCommit);
extern void RelationTruncate(Relation rel, BlockNumber nblocks);
extern void RelationCopyStorage(SMgrRelation src, SMgrRelation dst,
- ForkNumber forkNum, char relpersistence);
+ ForkNumber forkNum, char relpersistence);
/*
* These functions used to be in storage/smgr/smgr.c, which explains the
diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h
index 4003080323..029406fd4e 100644
--- a/src/include/commands/defrem.h
+++ b/src/include/commands/defrem.h
@@ -94,7 +94,7 @@ extern void UpdateStatisticsForTypeChange(Oid statsOid,
/* commands/aggregatecmds.c */
extern ObjectAddress DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle,
- List *parameters, bool replace);
+ List *parameters, bool replace);
/* commands/opclasscmds.c */
extern ObjectAddress DefineOpClass(CreateOpClassStmt *stmt);
diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h
index 6d460ffd74..ba6254bf3f 100644
--- a/src/include/commands/trigger.h
+++ b/src/include/commands/trigger.h
@@ -79,7 +79,7 @@ typedef struct TransitionCaptureState
* and allow the inserting code (copy.c and nodeModifyTable.c) to provide
* a slot containing the original tuple directly.
*/
- TupleTableSlot *tcs_original_insert_tuple;
+ TupleTableSlot *tcs_original_insert_tuple;
/*
* Private data including the tuplestore(s) into which to insert tuples.
@@ -260,7 +260,7 @@ extern bool AfterTriggerPendingOnRel(Oid relid);
extern bool RI_FKey_pk_upd_check_required(Trigger *trigger, Relation pk_rel,
TupleTableSlot *old_slot, TupleTableSlot *new_slot);
extern bool RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
- TupleTableSlot *old_slot, TupleTableSlot *new_slot);
+ TupleTableSlot *old_slot, TupleTableSlot *new_slot);
extern bool RI_Initial_Check(Trigger *trigger,
Relation fk_rel, Relation pk_rel);
extern void RI_PartitionRemove_Check(Trigger *trigger, Relation fk_rel,
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index 270f61b083..c6325a989d 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -181,9 +181,9 @@ typedef struct VacuumParams
* which verbose logs are activated, -1
* to use default */
VacOptTernaryValue index_cleanup; /* Do index vacuum and cleanup,
- * default value depends on reloptions */
+ * default value depends on reloptions */
VacOptTernaryValue truncate; /* Truncate empty pages at the end,
- * default value depends on reloptions */
+ * default value depends on reloptions */
} VacuumParams;
/* GUC parameters */
diff --git a/src/include/common/file_utils.h b/src/include/common/file_utils.h
index 5cb1fd3a00..3aefc38b10 100644
--- a/src/include/common/file_utils.h
+++ b/src/include/common/file_utils.h
@@ -15,10 +15,10 @@
#ifndef FILE_UTILS_H
#define FILE_UTILS_H
-extern int fsync_fname(const char *fname, bool isdir);
+extern int fsync_fname(const char *fname, bool isdir);
extern void fsync_pgdata(const char *pg_data, int serverVersion);
extern void fsync_dir_recurse(const char *dir);
-extern int durable_rename(const char *oldfile, const char *newfile);
+extern int durable_rename(const char *oldfile, const char *newfile);
extern int fsync_parent_path(const char *fname);
#endif /* FILE_UTILS_H */
diff --git a/src/include/common/logging.h b/src/include/common/logging.h
index 5a3249198a..4a28e9a3a8 100644
--- a/src/include/common/logging.h
+++ b/src/include/common/logging.h
@@ -63,14 +63,14 @@ extern enum pg_log_level __pg_log_level;
*/
#define PG_LOG_FLAG_TERSE 1
-void pg_logging_init(const char *argv0);
-void pg_logging_config(int new_flags);
-void pg_logging_set_level(enum pg_log_level new_level);
-void pg_logging_set_pre_callback(void (*cb)(void));
-void pg_logging_set_locus_callback(void (*cb)(const char **filename, uint64 *lineno));
+void pg_logging_init(const char *argv0);
+void pg_logging_config(int new_flags);
+void pg_logging_set_level(enum pg_log_level new_level);
+void pg_logging_set_pre_callback(void (*cb) (void));
+void pg_logging_set_locus_callback(void (*cb) (const char **filename, uint64 *lineno));
-void pg_log_generic(enum pg_log_level level, const char * pg_restrict fmt, ...) pg_attribute_printf(2, 3);
-void pg_log_generic_v(enum pg_log_level level, const char * pg_restrict fmt, va_list ap) pg_attribute_printf(2, 0);
+void pg_log_generic(enum pg_log_level level, const char *pg_restrict fmt,...) pg_attribute_printf(2, 3);
+void pg_log_generic_v(enum pg_log_level level, const char *pg_restrict fmt, va_list ap) pg_attribute_printf(2, 0);
#define pg_log_fatal(...) do { \
if (likely(__pg_log_level <= PG_LOG_FATAL)) pg_log_generic(PG_LOG_FATAL, __VA_ARGS__); \
@@ -92,4 +92,4 @@ void pg_log_generic_v(enum pg_log_level level, const char * pg_restrict fmt, va_
if (unlikely(__pg_log_level <= PG_LOG_DEBUG)) pg_log_generic(PG_LOG_DEBUG, __VA_ARGS__); \
} while(0)
-#endif /* COMMON_LOGGING_H */
+#endif /* COMMON_LOGGING_H */
diff --git a/src/include/executor/execParallel.h b/src/include/executor/execParallel.h
index d104d28a57..98900cfbf2 100644
--- a/src/include/executor/execParallel.h
+++ b/src/include/executor/execParallel.h
@@ -27,7 +27,7 @@ typedef struct ParallelExecutorInfo
ParallelContext *pcxt; /* parallel context we're using */
BufferUsage *buffer_usage; /* points to bufusage area in DSM */
SharedExecutorInstrumentation *instrumentation; /* optional */
- struct SharedJitInstrumentation *jit_instrumentation; /* optional */
+ struct SharedJitInstrumentation *jit_instrumentation; /* optional */
dsa_area *area; /* points to DSA area in DSM */
dsa_pointer param_exec; /* serialized PARAM_EXEC parameters */
bool finished; /* set true by ExecParallelFinish */
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index eb4c8b5e79..14a5891585 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -127,15 +127,15 @@ extern TupleHashTable BuildTupleHashTable(PlanState *parent,
MemoryContext tablecxt,
MemoryContext tempcxt, bool use_variable_hash_iv);
extern TupleHashTable BuildTupleHashTableExt(PlanState *parent,
- TupleDesc inputDesc,
- int numCols, AttrNumber *keyColIdx,
- const Oid *eqfuncoids,
- FmgrInfo *hashfunctions,
- Oid *collations,
- long nbuckets, Size additionalsize,
- MemoryContext metacxt,
- MemoryContext tablecxt,
- MemoryContext tempcxt, bool use_variable_hash_iv);
+ TupleDesc inputDesc,
+ int numCols, AttrNumber *keyColIdx,
+ const Oid *eqfuncoids,
+ FmgrInfo *hashfunctions,
+ Oid *collations,
+ long nbuckets, Size additionalsize,
+ MemoryContext metacxt,
+ MemoryContext tablecxt,
+ MemoryContext tempcxt, bool use_variable_hash_iv);
extern TupleHashEntry LookupTupleHashEntry(TupleHashTable hashtable,
TupleTableSlot *slot,
bool *isnew);
@@ -204,7 +204,7 @@ extern void EvalPlanQualInit(EPQState *epqstate, EState *estate,
extern void EvalPlanQualSetPlan(EPQState *epqstate,
Plan *subplan, List *auxrowmarks);
extern TupleTableSlot *EvalPlanQualSlot(EPQState *epqstate,
- Relation relation, Index rti);
+ Relation relation, Index rti);
#define EvalPlanQualSetSlot(epqstate, slot) ((epqstate)->origslot = (slot))
extern void EvalPlanQualFetchRowMarks(EPQState *epqstate);
@@ -435,12 +435,12 @@ extern void ExecScanReScan(ScanState *node);
*/
extern void ExecInitResultTypeTL(PlanState *planstate);
extern void ExecInitResultSlot(PlanState *planstate,
- const TupleTableSlotOps *tts_ops);
+ const TupleTableSlotOps *tts_ops);
extern void ExecInitResultTupleSlotTL(PlanState *planstate,
- const TupleTableSlotOps *tts_ops);
+ const TupleTableSlotOps *tts_ops);
extern void ExecInitScanTupleSlot(EState *estate, ScanState *scanstate,
- TupleDesc tupleDesc,
- const TupleTableSlotOps *tts_ops);
+ TupleDesc tupleDesc,
+ const TupleTableSlotOps *tts_ops);
extern TupleTableSlot *ExecInitExtraTupleSlot(EState *estate,
TupleDesc tupledesc,
const TupleTableSlotOps *tts_ops);
@@ -516,7 +516,7 @@ extern void ExecAssignExprContext(EState *estate, PlanState *planstate);
extern TupleDesc ExecGetResultType(PlanState *planstate);
extern TupleTableSlot ExecGetResultSlot(PlanState *planstate);
extern const TupleTableSlotOps *ExecGetResultSlotOps(PlanState *planstate,
- bool *isfixed);
+ bool *isfixed);
extern void ExecAssignProjectionInfo(PlanState *planstate,
TupleDesc inputDesc);
extern void ExecConditionalAssignProjectionInfo(PlanState *planstate,
diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h
index 63c2cd14f0..7c7059f78f 100644
--- a/src/include/executor/tuptable.h
+++ b/src/include/executor/tuptable.h
@@ -127,21 +127,21 @@ typedef struct TupleTableSlot
#define FIELDNO_TUPLETABLESLOT_ISNULL 6
bool *tts_isnull; /* current per-attribute isnull flags */
MemoryContext tts_mcxt; /* slot itself is in this context */
- ItemPointerData tts_tid; /* stored tuple's tid */
- Oid tts_tableOid; /* table oid of tuple */
+ ItemPointerData tts_tid; /* stored tuple's tid */
+ Oid tts_tableOid; /* table oid of tuple */
} TupleTableSlot;
/* routines for a TupleTableSlot implementation */
struct TupleTableSlotOps
{
/* Minimum size of the slot */
- size_t base_slot_size;
+ size_t base_slot_size;
/* Initialization. */
- void (*init)(TupleTableSlot *slot);
+ void (*init) (TupleTableSlot *slot);
/* Destruction. */
- void (*release)(TupleTableSlot *slot);
+ void (*release) (TupleTableSlot *slot);
/*
* Clear the contents of the slot. Only the contents are expected to be
@@ -149,7 +149,7 @@ struct TupleTableSlotOps
* this callback should free the memory allocated for the tuple contained
* in the slot.
*/
- void (*clear)(TupleTableSlot *slot);
+ void (*clear) (TupleTableSlot *slot);
/*
* Fill up first natts entries of tts_values and tts_isnull arrays with
@@ -158,26 +158,26 @@ struct TupleTableSlotOps
* in which case it should set tts_nvalid to the number of returned
* columns.
*/
- void (*getsomeattrs)(TupleTableSlot *slot, int natts);
+ void (*getsomeattrs) (TupleTableSlot *slot, int natts);
/*
* Returns value of the given system attribute as a datum and sets isnull
* to false, if it's not NULL. Throws an error if the slot type does not
* support system attributes.
*/
- Datum (*getsysattr)(TupleTableSlot *slot, int attnum, bool *isnull);
+ Datum (*getsysattr) (TupleTableSlot *slot, int attnum, bool *isnull);
/*
* Make the contents of the slot solely depend on the slot, and not on
* underlying resources (like another memory context, buffers, etc).
*/
- void (*materialize)(TupleTableSlot *slot);
+ void (*materialize) (TupleTableSlot *slot);
/*
* Copy the contents of the source slot into the destination slot's own
* context. Invoked using callback of the destination slot.
*/
- void (*copyslot) (TupleTableSlot *dstslot, TupleTableSlot *srcslot);
+ void (*copyslot) (TupleTableSlot *dstslot, TupleTableSlot *srcslot);
/*
* Return a heap tuple "owned" by the slot. It is slot's responsibility to
@@ -185,7 +185,7 @@ struct TupleTableSlotOps
* heap tuple, it should not implement this callback and should set it as
* NULL.
*/
- HeapTuple (*get_heap_tuple)(TupleTableSlot *slot);
+ HeapTuple (*get_heap_tuple) (TupleTableSlot *slot);
/*
* Return a minimal tuple "owned" by the slot. It is slot's responsibility
@@ -193,7 +193,7 @@ struct TupleTableSlotOps
* "own" a minimal tuple, it should not implement this callback and should
* set it as NULL.
*/
- MinimalTuple (*get_minimal_tuple)(TupleTableSlot *slot);
+ MinimalTuple (*get_minimal_tuple) (TupleTableSlot *slot);
/*
* Return a copy of heap tuple representing the contents of the slot. The
@@ -203,17 +203,17 @@ struct TupleTableSlotOps
* the slot i.e. the caller has to take responsibilty to free memory
* consumed by the slot.
*/
- HeapTuple (*copy_heap_tuple)(TupleTableSlot *slot);
+ HeapTuple (*copy_heap_tuple) (TupleTableSlot *slot);
/*
- * Return a copy of minimal tuple representing the contents of the slot. The
- * copy needs to be palloc'd in the current memory context. The slot
+ * Return a copy of minimal tuple representing the contents of the slot.
+ * The copy needs to be palloc'd in the current memory context. The slot
* itself is expected to remain unaffected. It is *not* expected to have
* meaningful "system columns" in the copy. The copy is not be "owned" by
* the slot i.e. the caller has to take responsibilty to free memory
* consumed by the slot.
*/
- MinimalTuple (*copy_minimal_tuple)(TupleTableSlot *slot);
+ MinimalTuple (*copy_minimal_tuple) (TupleTableSlot *slot);
};
/*
@@ -239,7 +239,7 @@ typedef struct VirtualTupleTableSlot
{
TupleTableSlot base;
- char *data; /* data for materialized slots */
+ char *data; /* data for materialized slots */
} VirtualTupleTableSlot;
typedef struct HeapTupleTableSlot
@@ -247,10 +247,10 @@ typedef struct HeapTupleTableSlot
TupleTableSlot base;
#define FIELDNO_HEAPTUPLETABLESLOT_TUPLE 1
- HeapTuple tuple; /* physical tuple */
+ HeapTuple tuple; /* physical tuple */
#define FIELDNO_HEAPTUPLETABLESLOT_OFF 2
- uint32 off; /* saved state for slot_deform_heap_tuple */
- HeapTupleData tupdata; /* optional workspace for storing tuple */
+ uint32 off; /* saved state for slot_deform_heap_tuple */
+ HeapTupleData tupdata; /* optional workspace for storing tuple */
} HeapTupleTableSlot;
/* heap tuple residing in a buffer */
@@ -265,7 +265,7 @@ typedef struct BufferHeapTupleTableSlot
* false in such a case, since presumably tts_tuple is pointing at the
* buffer page.)
*/
- Buffer buffer; /* tuple's buffer, or InvalidBuffer */
+ Buffer buffer; /* tuple's buffer, or InvalidBuffer */
} BufferHeapTupleTableSlot;
typedef struct MinimalTupleTableSlot
@@ -280,11 +280,11 @@ typedef struct MinimalTupleTableSlot
* physical tuples.
*/
#define FIELDNO_MINIMALTUPLETABLESLOT_TUPLE 1
- HeapTuple tuple; /* tuple wrapper */
- MinimalTuple mintuple; /* minimal tuple, or NULL if none */
- HeapTupleData minhdr; /* workspace for minimal-tuple-only case */
+ HeapTuple tuple; /* tuple wrapper */
+ MinimalTuple mintuple; /* minimal tuple, or NULL if none */
+ HeapTupleData minhdr; /* workspace for minimal-tuple-only case */
#define FIELDNO_MINIMALTUPLETABLESLOT_OFF 4
- uint32 off; /* saved state for slot_deform_heap_tuple */
+ uint32 off; /* saved state for slot_deform_heap_tuple */
} MinimalTupleTableSlot;
/*
@@ -313,13 +313,13 @@ extern TupleTableSlot *ExecStoreBufferHeapTuple(HeapTuple tuple,
TupleTableSlot *slot,
Buffer buffer);
extern TupleTableSlot *ExecStorePinnedBufferHeapTuple(HeapTuple tuple,
- TupleTableSlot *slot,
- Buffer buffer);
+ TupleTableSlot *slot,
+ Buffer buffer);
extern TupleTableSlot *ExecStoreMinimalTuple(MinimalTuple mtup,
TupleTableSlot *slot,
bool shouldFree);
extern void ExecForceStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot,
- bool shouldFree);
+ bool shouldFree);
extern TupleTableSlot *ExecStoreVirtualTuple(TupleTableSlot *slot);
extern TupleTableSlot *ExecStoreAllNullTuple(TupleTableSlot *slot);
extern void ExecStoreHeapTupleDatum(Datum data, TupleTableSlot *slot);
diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h
index 00b1fbe441..96415a9c8b 100644
--- a/src/include/libpq/libpq-be.h
+++ b/src/include/libpq/libpq-be.h
@@ -286,7 +286,7 @@ extern void be_tls_get_peer_serial(Port *port, char *ptr, size_t len);
extern char *be_tls_get_certificate_hash(Port *port, size_t *len);
#endif
-#endif /* USE_SSL */
+#endif /* USE_SSL */
#ifdef ENABLE_GSS
/*
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index b677c7e821..61283806b6 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -25,7 +25,7 @@
#include <signal.h>
-#include "datatype/timestamp.h" /* for TimestampTZ */
+#include "datatype/timestamp.h" /* for TimestampTZ */
#include "pgtime.h" /* for pg_time_t */
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 4b1635eb8b..64122bc1e3 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -431,7 +431,7 @@ typedef struct ResultRelInfo
Instrumentation *ri_TrigInstrument;
/* On-demand created slots for triggers / returning processing */
- TupleTableSlot *ri_ReturningSlot; /* for trigger output tuples */
+ TupleTableSlot *ri_ReturningSlot; /* for trigger output tuples */
TupleTableSlot *ri_TrigOldSlot; /* for a trigger's old tuple */
TupleTableSlot *ri_TrigNewSlot; /* for a trigger's new tuple */
@@ -700,7 +700,7 @@ typedef struct TupleHashTableData
AttrNumber *keyColIdx; /* attr numbers of key columns */
FmgrInfo *tab_hash_funcs; /* hash functions for table datatype(s) */
ExprState *tab_eq_func; /* comparator for table datatype(s) */
- Oid *tab_collations; /* collations for hash and comparison */
+ Oid *tab_collations; /* collations for hash and comparison */
MemoryContext tablecxt; /* memory context containing table */
MemoryContext tempcxt; /* context for function evaluations */
Size entrysize; /* actual size to make each hash entry */
@@ -870,7 +870,7 @@ typedef struct SubPlanState
AttrNumber *keyColIdx; /* control data for hash tables */
Oid *tab_eq_funcoids; /* equality func oids for table
* datatype(s) */
- Oid *tab_collations; /* collations for hash and comparison */
+ Oid *tab_collations; /* collations for hash and comparison */
FmgrInfo *tab_hash_funcs; /* hash functions for table datatype(s) */
FmgrInfo *tab_eq_funcs; /* equality functions for table datatype(s) */
FmgrInfo *lhs_hash_funcs; /* hash functions for lefthand datatype(s) */
@@ -979,7 +979,7 @@ typedef struct PlanState
/*
* Other run-time state needed by most if not all node types.
*/
- TupleDesc ps_ResultTupleDesc; /* node's return type */
+ TupleDesc ps_ResultTupleDesc; /* node's return type */
TupleTableSlot *ps_ResultTupleSlot; /* slot for my result tuples */
ExprContext *ps_ExprContext; /* node's expression-evaluation context */
ProjectionInfo *ps_ProjInfo; /* info for doing tuple projection */
@@ -1018,14 +1018,14 @@ typedef struct PlanState
const TupleTableSlotOps *outerops;
const TupleTableSlotOps *innerops;
const TupleTableSlotOps *resultops;
- bool scanopsfixed;
- bool outeropsfixed;
- bool inneropsfixed;
- bool resultopsfixed;
- bool scanopsset;
- bool outeropsset;
- bool inneropsset;
- bool resultopsset;
+ bool scanopsfixed;
+ bool outeropsfixed;
+ bool inneropsfixed;
+ bool resultopsfixed;
+ bool scanopsset;
+ bool outeropsset;
+ bool inneropsset;
+ bool resultopsset;
} PlanState;
/* ----------------
@@ -1113,8 +1113,8 @@ typedef struct ModifyTableState
PlanState **mt_plans; /* subplans (one per target rel) */
int mt_nplans; /* number of plans in the array */
int mt_whichplan; /* which one is being executed (0..n-1) */
- TupleTableSlot** mt_scans; /* input tuple corresponding to underlying
- plans */
+ TupleTableSlot **mt_scans; /* input tuple corresponding to underlying
+ * plans */
ResultRelInfo *resultRelInfo; /* per-subplan target relations */
ResultRelInfo *rootResultRelInfo; /* root target relation (partitioned
* table root) */
@@ -1321,14 +1321,14 @@ typedef struct SampleScanState
*/
typedef struct
{
- struct ScanKeyData *scan_key; /* scankey to put value into */
+ struct ScanKeyData *scan_key; /* scankey to put value into */
ExprState *key_expr; /* expr to evaluate to get value */
bool key_toastable; /* is expr's result a toastable datatype? */
} IndexRuntimeKeyInfo;
typedef struct
{
- struct ScanKeyData *scan_key; /* scankey to put value into */
+ struct ScanKeyData *scan_key; /* scankey to put value into */
ExprState *array_expr; /* expr to evaluate to get array value */
int next_elem; /* next array element to use */
int num_elems; /* number of elems in current array value */
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 12e9730dd0..2a8edf934f 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -2138,7 +2138,7 @@ typedef struct Constraint
bool is_no_inherit; /* is constraint non-inheritable? */
Node *raw_expr; /* expr, as untransformed parse tree */
char *cooked_expr; /* expr, as nodeToString representation */
- char generated_when; /* ALWAYS or BY DEFAULT */
+ char generated_when; /* ALWAYS or BY DEFAULT */
char generated_kind; /* currently always STORED */
/* Fields used for unique constraints (UNIQUE and PRIMARY KEY): */
@@ -3174,7 +3174,7 @@ typedef struct ClusterStmt
typedef struct VacuumStmt
{
NodeTag type;
- List *options; /* list of DefElem nodes */
+ List *options; /* list of DefElem nodes */
List *rels; /* list of VacuumRelation, or NIL for all */
bool is_vacuumcmd; /* true for VACUUM, false for ANALYZE */
} VacuumStmt;
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 1241245566..70f8b8e22b 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -818,7 +818,7 @@ typedef struct WindowAgg
int partNumCols; /* number of columns in partition clause */
AttrNumber *partColIdx; /* their indexes in the target list */
Oid *partOperators; /* equality operators for partition columns */
- Oid *partCollations; /* collations for partition columns */
+ Oid *partCollations; /* collations for partition columns */
int ordNumCols; /* number of columns in ordering clause */
AttrNumber *ordColIdx; /* their indexes in the target list */
Oid *ordOperators; /* equality operators for ordering columns */
@@ -844,7 +844,7 @@ typedef struct Unique
int numCols; /* number of columns to check for uniqueness */
AttrNumber *uniqColIdx; /* their indexes in the target list */
Oid *uniqOperators; /* equality operators to compare with */
- Oid *uniqCollations; /* collations for equality comparisons */
+ Oid *uniqCollations; /* collations for equality comparisons */
} Unique;
/* ------------
diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h
index 3d8039aa51..63d88b7a26 100644
--- a/src/include/parser/parse_node.h
+++ b/src/include/parser/parse_node.h
@@ -70,9 +70,9 @@ typedef enum ParseExprKind
EXPR_KIND_POLICY, /* USING or WITH CHECK expr in policy */
EXPR_KIND_PARTITION_BOUND, /* partition bound expression */
EXPR_KIND_PARTITION_EXPRESSION, /* PARTITION BY expression */
- EXPR_KIND_CALL_ARGUMENT, /* procedure argument in CALL */
+ EXPR_KIND_CALL_ARGUMENT, /* procedure argument in CALL */
EXPR_KIND_COPY_WHERE, /* WHERE condition in COPY FROM */
- EXPR_KIND_GENERATED_COLUMN, /* generation expression for a column */
+ EXPR_KIND_GENERATED_COLUMN, /* generation expression for a column */
} ParseExprKind;
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 929987190c..b5796a7bba 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -541,7 +541,7 @@ typedef struct PgStat_MsgChecksumFailure
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
int m_failurecount;
- TimestampTz m_failure_time;
+ TimestampTz m_failure_time;
} PgStat_MsgChecksumFailure;
diff --git a/src/include/port.h b/src/include/port.h
index 7e2004b178..7de6ba67f0 100644
--- a/src/include/port.h
+++ b/src/include/port.h
@@ -508,6 +508,7 @@ extern char *inet_net_ntop(int af, const void *src, int bits,
/* port/pg_strong_random.c */
extern bool pg_strong_random(void *buf, size_t len);
+
/*
* pg_backend_random used to be a wrapper for pg_strong_random before
* Postgres 12 for the backend code.
diff --git a/src/include/replication/logical.h b/src/include/replication/logical.h
index 0daa38cb89..7840409198 100644
--- a/src/include/replication/logical.h
+++ b/src/include/replication/logical.h
@@ -47,8 +47,8 @@ typedef struct LogicalDecodingContext
/*
* Marks the logical decoding context as fast forward decoding one. Such a
- * context does not have plugin loaded so most of the following
- * properties are unused.
+ * context does not have plugin loaded so most of the following properties
+ * are unused.
*/
bool fast_forward;
diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h
index 7646b60f94..d68c01c156 100644
--- a/src/include/replication/reorderbuffer.h
+++ b/src/include/replication/reorderbuffer.h
@@ -402,8 +402,8 @@ void ReorderBufferReturnTupleBuf(ReorderBuffer *, ReorderBufferTupleBuf *tuple)
ReorderBufferChange *ReorderBufferGetChange(ReorderBuffer *);
void ReorderBufferReturnChange(ReorderBuffer *, ReorderBufferChange *);
-Oid * ReorderBufferGetRelids(ReorderBuffer *, int nrelids);
-void ReorderBufferReturnRelids(ReorderBuffer *, Oid *relids);
+Oid *ReorderBufferGetRelids(ReorderBuffer *, int nrelids);
+void ReorderBufferReturnRelids(ReorderBuffer *, Oid *relids);
void ReorderBufferQueueChange(ReorderBuffer *, TransactionId, XLogRecPtr lsn, ReorderBufferChange *);
void ReorderBufferQueueMessage(ReorderBuffer *, TransactionId, Snapshot snapshot, XLogRecPtr lsn,
diff --git a/src/include/statistics/extended_stats_internal.h b/src/include/statistics/extended_stats_internal.h
index cc36176b3c..7235a91a06 100644
--- a/src/include/statistics/extended_stats_internal.h
+++ b/src/include/statistics/extended_stats_internal.h
@@ -71,8 +71,8 @@ extern MVDependencies *statext_dependencies_deserialize(bytea *data);
extern MCVList *statext_mcv_build(int numrows, HeapTuple *rows,
Bitmapset *attrs, VacAttrStats **stats,
double totalrows);
-extern bytea *statext_mcv_serialize(MCVList * mcv, VacAttrStats **stats);
-extern MCVList * statext_mcv_deserialize(bytea *data);
+extern bytea *statext_mcv_serialize(MCVList *mcv, VacAttrStats **stats);
+extern MCVList *statext_mcv_deserialize(bytea *data);
extern MultiSortSupport multi_sort_init(int ndims);
extern void multi_sort_add_dimension(MultiSortSupport mss, int sortdim,
diff --git a/src/include/statistics/statistics.h b/src/include/statistics/statistics.h
index 69724cc107..6810cf60fa 100644
--- a/src/include/statistics/statistics.h
+++ b/src/include/statistics/statistics.h
@@ -77,7 +77,7 @@ typedef struct MVDependencies
typedef struct MCVItem
{
double frequency; /* frequency of this combination */
- double base_frequency; /* frequency if independent */
+ double base_frequency; /* frequency if independent */
bool *isnull; /* NULL flags */
Datum *values; /* item values */
} MCVItem;
diff --git a/src/include/storage/fd.h b/src/include/storage/fd.h
index a03b4d14a2..53f69d46db 100644
--- a/src/include/storage/fd.h
+++ b/src/include/storage/fd.h
@@ -147,7 +147,7 @@ extern int durable_rename(const char *oldfile, const char *newfile, int loglevel
extern int durable_unlink(const char *fname, int loglevel);
extern int durable_link_or_rename(const char *oldfile, const char *newfile, int loglevel);
extern void SyncDataDirectory(void);
-extern int data_sync_elevel(int elevel);
+extern int data_sync_elevel(int elevel);
/* Filename components */
#define PG_TEMP_FILES_DIR "pgsql_tmp"
diff --git a/src/include/storage/md.h b/src/include/storage/md.h
index a6758a10dc..b038c8b1ae 100644
--- a/src/include/storage/md.h
+++ b/src/include/storage/md.h
@@ -44,8 +44,8 @@ extern void ForgetDatabaseSyncRequests(Oid dbid);
extern void DropRelationFiles(RelFileNode *delrels, int ndelrels, bool isRedo);
/* md sync callbacks */
-extern int mdsyncfiletag(const FileTag *ftag, char *path);
-extern int mdunlinkfiletag(const FileTag *ftag, char *path);
+extern int mdsyncfiletag(const FileTag *ftag, char *path);
+extern int mdunlinkfiletag(const FileTag *ftag, char *path);
extern bool mdfiletagmatches(const FileTag *ftag, const FileTag *candidate);
#endif /* MD_H */
diff --git a/src/include/tcop/deparse_utility.h b/src/include/tcop/deparse_utility.h
index 0c3c6e1f67..7884e04647 100644
--- a/src/include/tcop/deparse_utility.h
+++ b/src/include/tcop/deparse_utility.h
@@ -102,7 +102,7 @@ typedef struct CollectedCommand
} defprivs;
} d;
- struct CollectedCommand *parent; /* when nested */
+ struct CollectedCommand *parent; /* when nested */
} CollectedCommand;
#endif /* DEPARSE_UTILITY_H */
diff --git a/src/include/utils/datum.h b/src/include/utils/datum.h
index 4365bf06e6..2d78c8774c 100644
--- a/src/include/utils/datum.h
+++ b/src/include/utils/datum.h
@@ -53,7 +53,7 @@ extern bool datumIsEqual(Datum value1, Datum value2,
* true if the two datums are equal, false otherwise.
*/
extern bool datum_image_eq(Datum value1, Datum value2,
- bool typByVal, int typLen);
+ bool typByVal, int typLen);
/*
* Serialize and restore datums so that we can transfer them to parallel
diff --git a/src/interfaces/ecpg/ecpglib/cursor.c b/src/interfaces/ecpg/ecpglib/cursor.c
index 133d62321b..c364a3c97d 100644
--- a/src/interfaces/ecpg/ecpglib/cursor.c
+++ b/src/interfaces/ecpg/ecpglib/cursor.c
@@ -25,14 +25,14 @@ static bool find_cursor(const char *, const struct connection *);
* others --- keep same as the parameters in ECPGdo() function
*/
bool
-ECPGopen(const char *cursor_name,const char *prepared_name,
- const int lineno, const int compat,const int force_indicator,
- const char *connection_name, const bool questionmarks,
- const int st, const char *query,...)
+ECPGopen(const char *cursor_name, const char *prepared_name,
+ const int lineno, const int compat, const int force_indicator,
+ const char *connection_name, const bool questionmarks,
+ const int st, const char *query,...)
{
va_list args;
bool status;
- const char *real_connection_name = NULL;
+ const char *real_connection_name = NULL;
if (!query)
{
@@ -53,8 +53,8 @@ ECPGopen(const char *cursor_name,const char *prepared_name,
else
{
/*
- * If can't get the connection name by declared name then using connection name
- * coming from the parameter connection_name
+ * If can't get the connection name by declared name then using
+ * connection name coming from the parameter connection_name
*/
real_connection_name = connection_name;
}
@@ -81,13 +81,13 @@ ECPGopen(const char *cursor_name,const char *prepared_name,
*/
bool
ECPGfetch(const char *cursor_name,
- const int lineno, const int compat,const int force_indicator,
- const char *connection_name, const bool questionmarks,
- const int st, const char *query,...)
+ const int lineno, const int compat, const int force_indicator,
+ const char *connection_name, const bool questionmarks,
+ const int st, const char *query,...)
{
va_list args;
bool status;
- const char *real_connection_name = NULL;
+ const char *real_connection_name = NULL;
if (!query)
{
@@ -99,8 +99,8 @@ ECPGfetch(const char *cursor_name,
if (real_connection_name == NULL)
{
/*
- * If can't get the connection name by cursor name then using connection name
- * coming from the parameter connection_name
+ * If can't get the connection name by cursor name then using
+ * connection name coming from the parameter connection_name
*/
real_connection_name = connection_name;
}
@@ -123,13 +123,13 @@ ECPGfetch(const char *cursor_name,
*/
bool
ECPGclose(const char *cursor_name,
- const int lineno, const int compat,const int force_indicator,
- const char *connection_name, const bool questionmarks,
- const int st, const char *query,...)
+ const int lineno, const int compat, const int force_indicator,
+ const char *connection_name, const bool questionmarks,
+ const int st, const char *query,...)
{
va_list args;
bool status;
- const char *real_connection_name = NULL;
+ const char *real_connection_name = NULL;
struct connection *con = NULL;
if (!query)
@@ -142,8 +142,8 @@ ECPGclose(const char *cursor_name,
if (real_connection_name == NULL)
{
/*
- * If can't get the connection name by cursor name then using connection name
- * coming from the parameter connection_name
+ * If can't get the connection name by cursor name then using
+ * connection name coming from the parameter connection_name
*/
real_connection_name = connection_name;
}
@@ -192,12 +192,12 @@ add_cursor(const int lineno, const char *cursor_name, const char *connection_nam
if (!con)
{
ecpg_raise(lineno, ECPG_NO_CONN, ECPG_SQLSTATE_CONNECTION_DOES_NOT_EXIST,
- connection_name ? connection_name : ecpg_gettext("NULL"));
+ connection_name ? connection_name : ecpg_gettext("NULL"));
return;
}
/* allocate a node to store the new cursor */
- new = (struct cursor_statement *)ecpg_alloc(sizeof(struct cursor_statement), lineno);
+ new = (struct cursor_statement *) ecpg_alloc(sizeof(struct cursor_statement), lineno);
if (new)
{
new->name = ecpg_strdup(cursor_name, lineno);
diff --git a/src/interfaces/ecpg/ecpglib/descriptor.c b/src/interfaces/ecpg/ecpglib/descriptor.c
index 97849d0793..d5ec3a6120 100644
--- a/src/interfaces/ecpg/ecpglib/descriptor.c
+++ b/src/interfaces/ecpg/ecpglib/descriptor.c
@@ -589,7 +589,7 @@ ECPGset_desc_header(int lineno, const char *desc_name, int count)
static void
set_desc_attr(struct descriptor_item *desc_item, struct variable *var,
- char *tobeinserted)
+ char *tobeinserted)
{
if (var->type != ECPGt_bytea)
desc_item->is_binary = false;
@@ -603,8 +603,7 @@ set_desc_attr(struct descriptor_item *desc_item, struct variable *var,
desc_item->data_len = variable->len;
}
- ecpg_free(desc_item->data); /* free() takes care of a
- * potential NULL value */
+ ecpg_free(desc_item->data); /* free() takes care of a potential NULL value */
desc_item->data = (char *) tobeinserted;
}
@@ -875,8 +874,8 @@ ECPGdescribe(int line, int compat, bool input, const char *connection_name, cons
if (real_connection_name == NULL)
{
/*
- * If can't get the connection name by declared name then using connection name
- * coming from the parameter connection_name
+ * If can't get the connection name by declared name then using
+ * connection name coming from the parameter connection_name
*/
real_connection_name = connection_name;
}
diff --git a/src/interfaces/ecpg/ecpglib/ecpglib_extern.h b/src/interfaces/ecpg/ecpglib/ecpglib_extern.h
index d63efd5459..d34a75c04d 100644
--- a/src/interfaces/ecpg/ecpglib/ecpglib_extern.h
+++ b/src/interfaces/ecpg/ecpglib/ecpglib_extern.h
@@ -90,9 +90,9 @@ struct statement
/* structure to store declared statements */
struct declared_statement
{
- char *name; /* declared name */
- char *connection_name;
- char *cursor_name;
+ char *name; /* declared name */
+ char *connection_name;
+ char *cursor_name;
struct declared_statement *next;
};
@@ -107,7 +107,7 @@ struct prepared_statement
struct cursor_statement
{
- char *name; /*cursor name*/
+ char *name; /* cursor name */
struct cursor_statement *next;
};
@@ -204,10 +204,10 @@ struct descriptor *ecpg_find_desc(int line, const char *name);
struct prepared_statement *ecpg_find_prepared_statement(const char *,
struct connection *, struct prepared_statement **);
-void ecpg_update_declare_statement(const char *, const char *, const int);
-char *ecpg_get_con_name_by_declared_name(const char *);
+void ecpg_update_declare_statement(const char *, const char *, const int);
+char *ecpg_get_con_name_by_declared_name(const char *);
const char *ecpg_get_con_name_by_cursor_name(const char *);
-void ecpg_release_declared_statement(const char *);
+void ecpg_release_declared_statement(const char *);
bool ecpg_store_result(const PGresult *results, int act_field,
const struct statement *stmt, struct variable *var);
diff --git a/src/interfaces/ecpg/ecpglib/error.c b/src/interfaces/ecpg/ecpglib/error.c
index 397aa28190..fb519195fc 100644
--- a/src/interfaces/ecpg/ecpglib/error.c
+++ b/src/interfaces/ecpg/ecpglib/error.c
@@ -201,10 +201,10 @@ ecpg_raise(int line, int code, const char *sqlstate, const char *str)
break;
case ECPG_INVALID_CURSOR:
- snprintf(sqlca->sqlerrm.sqlerrmc, sizeof(sqlca->sqlerrm.sqlerrmc),
+ snprintf(sqlca->sqlerrm.sqlerrmc, sizeof(sqlca->sqlerrm.sqlerrmc),
/*------
translator: this string will be truncated at 149 characters expanded. */
- ecpg_gettext("The cursor is invalid on line %d"),line);
+ ecpg_gettext("The cursor is invalid on line %d"), line);
break;
default:
diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c
index 2db1b62058..6dbf2fa9e0 100644
--- a/src/interfaces/ecpg/ecpglib/execute.c
+++ b/src/interfaces/ecpg/ecpglib/execute.c
@@ -488,11 +488,12 @@ sprintf_float_value(char *ptr, float value, const char *delim)
sprintf(ptr, "%.15g%s", value, delim);
}
-static char*
+static char *
convert_bytea_to_string(char *from_data, int from_len, int lineno)
{
- char *to_data;
- int to_len = ecpg_hex_enc_len(from_len) + 4 + 1; /* backslash + 'x' + quote + quote */
+ char *to_data;
+ int to_len = ecpg_hex_enc_len(from_len) + 4 + 1; /* backslash + 'x' +
+ * quote + quote */
to_data = ecpg_alloc(to_len, lineno);
if (!to_data)
@@ -1080,16 +1081,16 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
static void
print_param_value(char *value, int len, int is_binary, int lineno, int nth)
{
- char *value_s;
- bool malloced = false;
+ char *value_s;
+ bool malloced = false;
if (value == NULL)
value_s = "null";
- else if (! is_binary)
+ else if (!is_binary)
value_s = value;
else
{
- value_s = ecpg_alloc(ecpg_hex_enc_len(len)+1, lineno);
+ value_s = ecpg_alloc(ecpg_hex_enc_len(len) + 1, lineno);
if (value_s != NULL)
{
ecpg_hex_encode(value, len, value_s);
@@ -1101,7 +1102,7 @@ print_param_value(char *value, int len, int is_binary, int lineno, int nth)
}
ecpg_log("ecpg_free_params on line %d: parameter %d = %s\n",
- lineno, nth, value_s);
+ lineno, nth, value_s);
if (malloced)
ecpg_free(value_s);
@@ -1116,7 +1117,7 @@ ecpg_free_params(struct statement *stmt, bool print)
{
if (print)
print_param_value(stmt->paramvalues[n], stmt->paramlengths[n],
- stmt->paramformats[n], stmt->lineno, n + 1);
+ stmt->paramformats[n], stmt->lineno, n + 1);
ecpg_free(stmt->paramvalues[n]);
}
ecpg_free(stmt->paramvalues);
@@ -1162,13 +1163,13 @@ insert_tobeinserted(int position, int ph_len, struct statement *stmt, char *tobe
static bool
store_input_from_desc(struct statement *stmt, struct descriptor_item *desc_item,
- char **tobeinserted)
+ char **tobeinserted)
{
struct variable var;
/*
- * In case of binary data, only allocate memory and memcpy because
- * binary data have been already stored into desc_item->data with
+ * In case of binary data, only allocate memory and memcpy because binary
+ * data have been already stored into desc_item->data with
* ecpg_store_input() at ECPGset_desc().
*/
if (desc_item->is_binary)
@@ -1454,7 +1455,8 @@ ecpg_build_params(struct statement *stmt)
stmt->statement_type == ECPGst_exec_with_exprlist)
{
/* Add double quote both side for embedding statement name. */
- char *str = ecpg_alloc(strlen(tobeinserted) + 2 + 1, stmt->lineno);
+ char *str = ecpg_alloc(strlen(tobeinserted) + 2 + 1, stmt->lineno);
+
sprintf(str, "\"%s\"", tobeinserted);
ecpg_free(tobeinserted);
tobeinserted = str;
@@ -1471,7 +1473,8 @@ ecpg_build_params(struct statement *stmt)
if (binary_format)
{
- char *p = convert_bytea_to_string(tobeinserted, binary_length, stmt->lineno);
+ char *p = convert_bytea_to_string(tobeinserted, binary_length, stmt->lineno);
+
if (!p)
{
ecpg_free_params(stmt, false);
@@ -1541,8 +1544,8 @@ ecpg_build_params(struct statement *stmt)
}
/*
- * Check if there are unmatched things left.
- * PREPARE AS has no parameter. Check other statement.
+ * Check if there are unmatched things left. PREPARE AS has no parameter.
+ * Check other statement.
*/
if (stmt->statement_type != ECPGst_prepare &&
next_insert(stmt->command, position, stmt->questionmarks, std_strings) >= 0)
@@ -1617,7 +1620,7 @@ ecpg_execute(struct statement *stmt)
if (stmt->statement_type == ECPGst_prepare)
{
- if(! ecpg_register_prepared_stmt(stmt))
+ if (!ecpg_register_prepared_stmt(stmt))
{
ecpg_free_params(stmt, true);
return false;
@@ -2266,7 +2269,7 @@ ECPGdo(const int lineno, const int compat, const int force_indicator, const char
{
va_list args;
bool ret;
- const char *real_connection_name = NULL;
+ const char *real_connection_name = NULL;
real_connection_name = connection_name;
@@ -2283,11 +2286,11 @@ ECPGdo(const int lineno, const int compat, const int force_indicator, const char
if (real_connection_name == NULL)
{
/*
- * If can't get the connection name by declared name then using connection name
- * coming from the parameter connection_name
+ * If can't get the connection name by declared name then using
+ * connection name coming from the parameter connection_name
*/
real_connection_name = connection_name;
- }
+ }
}
va_start(args, query);
diff --git a/src/interfaces/ecpg/ecpglib/prepare.c b/src/interfaces/ecpg/ecpglib/prepare.c
index 83de5e88b1..387e4f27ce 100644
--- a/src/interfaces/ecpg/ecpglib/prepare.c
+++ b/src/interfaces/ecpg/ecpglib/prepare.c
@@ -63,51 +63,51 @@ ecpg_register_prepared_stmt(struct statement *stmt)
struct prepared_statement *this;
struct connection *con = NULL;
struct prepared_statement *prev = NULL;
- char *real_connection_name;
- int lineno = stmt->lineno;
-
- real_connection_name = ecpg_get_con_name_by_declared_name(stmt->name);
- if (real_connection_name == NULL)
- real_connection_name = stmt->connection->name;
-
- con = ecpg_get_connection(real_connection_name);
- if (!ecpg_init(con, real_connection_name, stmt->lineno))
- return false;
-
- /* check if we already have prepared this statement */
- this = ecpg_find_prepared_statement(stmt->name, con, &prev);
- if (this && !deallocate_one(lineno, ECPG_COMPAT_PGSQL, con, prev, this))
- return false;
-
- /* allocate new statement */
- this = (struct prepared_statement *) ecpg_alloc(sizeof(struct prepared_statement), lineno);
- if (!this)
- return false;
-
- prep_stmt = (struct statement *) ecpg_alloc(sizeof(struct statement), lineno);
- if (!stmt)
- {
- ecpg_free(this);
- return false;
- }
- memset(prep_stmt, 0, sizeof(struct statement));
-
- /* create statement */
- prep_stmt->lineno = lineno;
- prep_stmt->connection = con;
- prep_stmt->command = ecpg_strdup(stmt->command, lineno);
- prep_stmt->inlist = prep_stmt->outlist = NULL;
- this->name = ecpg_strdup(stmt->name, lineno);
- this->stmt = prep_stmt;
- this->prepared = true;
-
- if (con->prep_stmts == NULL)
- this->next = NULL;
- else
- this->next = con->prep_stmts;
-
- con->prep_stmts = this;
- return true;
+ char *real_connection_name;
+ int lineno = stmt->lineno;
+
+ real_connection_name = ecpg_get_con_name_by_declared_name(stmt->name);
+ if (real_connection_name == NULL)
+ real_connection_name = stmt->connection->name;
+
+ con = ecpg_get_connection(real_connection_name);
+ if (!ecpg_init(con, real_connection_name, stmt->lineno))
+ return false;
+
+ /* check if we already have prepared this statement */
+ this = ecpg_find_prepared_statement(stmt->name, con, &prev);
+ if (this && !deallocate_one(lineno, ECPG_COMPAT_PGSQL, con, prev, this))
+ return false;
+
+ /* allocate new statement */
+ this = (struct prepared_statement *) ecpg_alloc(sizeof(struct prepared_statement), lineno);
+ if (!this)
+ return false;
+
+ prep_stmt = (struct statement *) ecpg_alloc(sizeof(struct statement), lineno);
+ if (!stmt)
+ {
+ ecpg_free(this);
+ return false;
+ }
+ memset(prep_stmt, 0, sizeof(struct statement));
+
+ /* create statement */
+ prep_stmt->lineno = lineno;
+ prep_stmt->connection = con;
+ prep_stmt->command = ecpg_strdup(stmt->command, lineno);
+ prep_stmt->inlist = prep_stmt->outlist = NULL;
+ this->name = ecpg_strdup(stmt->name, lineno);
+ this->stmt = prep_stmt;
+ this->prepared = true;
+
+ if (con->prep_stmts == NULL)
+ this->next = NULL;
+ else
+ this->next = con->prep_stmts;
+
+ con->prep_stmts = this;
+ return true;
}
static bool
@@ -239,8 +239,8 @@ ECPGprepare(int lineno, const char *connection_name, const bool questionmarks,
if (real_connection_name == NULL)
{
/*
- * If can't get the connection name by declared name then using connection name
- * coming from the parameter connection_name
+ * If can't get the connection name by declared name then using
+ * connection name coming from the parameter connection_name
*/
real_connection_name = connection_name;
}
@@ -345,8 +345,8 @@ ECPGdeallocate(int lineno, int c, const char *connection_name, const char *name)
if (real_connection_name == NULL)
{
/*
- * If can't get the connection name by declared name then using connection name
- * coming from the parameter connection_name
+ * If can't get the connection name by declared name then using
+ * connection name coming from the parameter connection_name
*/
real_connection_name = connection_name;
}
@@ -408,8 +408,8 @@ ECPGprepared_statement(const char *connection_name, const char *name, int lineno
if (real_connection_name == NULL)
{
/*
- * If can't get the connection name by declared name then using connection name
- * coming from the parameter connection_name
+ * If can't get the connection name by declared name then using
+ * connection name coming from the parameter connection_name
*/
real_connection_name = connection_name;
}
@@ -667,11 +667,11 @@ ECPGdeclare(int lineno, const char *connection_name, const char *name)
{
/*
* Going to here means not using AT clause in the DECLARE STATEMENT
- * ECPG pre-processor allows this case.
- * However, we don't allocate a node to store the declared name
- * because the DECLARE STATEMENT without using AT clause will be ignored.
- * The following statement such as PREPARE, EXECUTE are executed
- * as usual on the current connection.
+ * ECPG pre-processor allows this case. However, we don't allocate a
+ * node to store the declared name because the DECLARE STATEMENT
+ * without using AT clause will be ignored. The following statement
+ * such as PREPARE, EXECUTE are executed as usual on the current
+ * connection.
*/
return true;
}
@@ -682,7 +682,10 @@ ECPGdeclare(int lineno, const char *connection_name, const char *name)
if (ecpg_find_declared_statement(name))
{
- /* Should not go to here because the pre-compiler has check the duplicate name */
+ /*
+ * Should not go to here because the pre-compiler has check the
+ * duplicate name
+ */
return false;
}
@@ -751,7 +754,7 @@ ecpg_update_declare_statement(const char *declared_name, const char *cursor_name
/* Find the declared node by declared name */
p = ecpg_find_declared_statement(declared_name);
if (p)
- p->cursor_name = ecpg_strdup(cursor_name,lineno);
+ p->cursor_name = ecpg_strdup(cursor_name, lineno);
}
/*
@@ -831,7 +834,10 @@ ecpg_release_declared_statement(const char *connection_name)
ecpg_free(cur->cursor_name);
ecpg_free(cur);
- /* One connection can be used by multiple declared name, so no break here */
+ /*
+ * One connection can be used by multiple declared name, so no
+ * break here
+ */
}
else
prev = cur;
diff --git a/src/interfaces/ecpg/include/ecpglib.h b/src/interfaces/ecpg/include/ecpglib.h
index 1edf272ada..f1031d2595 100644
--- a/src/interfaces/ecpg/include/ecpglib.h
+++ b/src/interfaces/ecpg/include/ecpglib.h
@@ -56,9 +56,9 @@ bool ECPGprepare(int, const char *, const bool, const char *, const char *);
bool ECPGdeallocate(int, int, const char *, const char *);
bool ECPGdeallocate_all(int, int, const char *);
bool ECPGdeclare(int, const char *, const char *);
-bool ECPGopen(const char*, const char*, const int, const int, const int, const char *, const bool, const int, const char *,...);
-bool ECPGfetch(const char*, const int, const int, const int, const char *, const bool, const int, const char *,...);
-bool ECPGclose(const char*, const int, const int, const int, const char *, const bool, const int, const char *,...);
+bool ECPGopen(const char *, const char *, const int, const int, const int, const char *, const bool, const int, const char *,...);
+bool ECPGfetch(const char *, const int, const int, const int, const char *, const bool, const int, const char *,...);
+bool ECPGclose(const char *, const int, const int, const int, const char *, const bool, const int, const char *,...);
char *ECPGprepared_statement(const char *, const char *, int);
PGconn *ECPGget_PGconn(const char *);
PGTransactionStatusType ECPGtransactionStatus(const char *);
diff --git a/src/interfaces/ecpg/preproc/ecpg.c b/src/interfaces/ecpg/preproc/ecpg.c
index a6a43f5a3f..861a2d98d5 100644
--- a/src/interfaces/ecpg/preproc/ecpg.c
+++ b/src/interfaces/ecpg/preproc/ecpg.c
@@ -121,8 +121,8 @@ free_argument(struct arguments *arg)
free_argument(arg->next);
/*
- * Don't free variables in it because the original codes don't free it either
- * variables are static structures instead of allocating
+ * Don't free variables in it because the original codes don't free it
+ * either variables are static structures instead of allocating
*/
free(arg);
}
@@ -520,13 +520,13 @@ main(int argc, char *const argv[])
free(input_filename);
}
- if(g_declared_list)
+ if (g_declared_list)
{
free_declared_stmt(g_declared_list);
g_declared_list = NULL;
}
- if(cur)
+ if (cur)
{
free_cursor(cur);
cur = NULL;
diff --git a/src/interfaces/ecpg/preproc/output.c b/src/interfaces/ecpg/preproc/output.c
index 1851809e63..9419a39f9e 100644
--- a/src/interfaces/ecpg/preproc/output.c
+++ b/src/interfaces/ecpg/preproc/output.c
@@ -142,9 +142,9 @@ output_statement(char *stmt, int whenever_mode, enum ECPG_statement_type st)
st = ECPGst_normal;
/*
- * In following cases, stmt is CSTRING or char_variable. They must be output directly.
- * - prepared_name of EXECUTE without exprlist
- * - execstring of EXECUTE IMMEDIATE
+ * In following cases, stmt is CSTRING or char_variable. They must be
+ * output directly. - prepared_name of EXECUTE without exprlist -
+ * execstring of EXECUTE IMMEDIATE
*/
fprintf(base_yyout, "%s, ", ecpg_statement_type_name[st]);
if (st == ECPGst_execute || st == ECPGst_exec_immediate)
@@ -317,9 +317,9 @@ output_cursor_name(char *str)
j++;
} while (str[j] == ' ' || str[j] == '\t');
- if ((str[j] != '\n') && (str[j] != '\r' || str[j + 1] != '\n')) /* not followed by a
- * newline */
- fputs("\\\\",base_yyout);
+ if ((str[j] != '\n') && (str[j] != '\r' || str[j + 1] != '\n')) /* not followed by a
+ * newline */
+ fputs("\\\\", base_yyout);
}
else if (str[i] == '\r' && str[i + 1] == '\n')
{
@@ -362,7 +362,7 @@ output_declare_statement(char *name)
void
output_cursor_statement(int cursor_stmt, char *cursor_name, char *prepared_name, char *stmt, int whenever_mode, enum ECPG_statement_type st)
{
- switch(cursor_stmt)
+ switch (cursor_stmt)
{
case ECPGcst_open:
fprintf(base_yyout, "{ ECPGopen(");
diff --git a/src/interfaces/ecpg/preproc/preproc_extern.h b/src/interfaces/ecpg/preproc/preproc_extern.h
index 8ccfdf0d1f..552ffc627b 100644
--- a/src/interfaces/ecpg/preproc/preproc_extern.h
+++ b/src/interfaces/ecpg/preproc/preproc_extern.h
@@ -72,7 +72,7 @@ extern void output_prepare_statement(char *, char *);
extern void output_deallocate_prepare_statement(char *);
extern void output_simple_statement(char *, int);
extern void output_declare_statement(char *);
-extern void output_cursor_statement(int , char *, char *, char *, int , enum ECPG_statement_type);
+extern void output_cursor_statement(int, char *, char *, char *, int, enum ECPG_statement_type);
extern char *hashline_number(void);
extern int base_yyparse(void);
extern int base_yylex(void);
diff --git a/src/interfaces/ecpg/preproc/type.h b/src/interfaces/ecpg/preproc/type.h
index 4d6afd93b8..b16a92c3f2 100644
--- a/src/interfaces/ecpg/preproc/type.h
+++ b/src/interfaces/ecpg/preproc/type.h
@@ -145,7 +145,7 @@ struct cursor
/* structure to store declared name */
struct declared_name_st
{
- char *name; /* declared name */
+ char *name; /* declared name */
struct declared_name_st *next;
};
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index b89df08c29..d3bcb98275 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -313,7 +313,7 @@ static const internalPQconninfoOption PQconninfoOptions[] = {
* and "prefer".
*/
{"gssencmode", "PGGSSENCMODE", DefaultGSSMode, NULL,
- "GSSENC-Mode", "", 7, /* sizeof("disable") == 7 */
+ "GSSENC-Mode", "", 7, /* sizeof("disable") == 7 */
offsetof(struct pg_conn, gssencmode)},
#if defined(ENABLE_GSS) || defined(ENABLE_SSPI)
@@ -2420,6 +2420,7 @@ keep_going: /* We will come back to here until there is
getHostaddr(conn, host_addr, NI_MAXHOST);
if (strlen(host_addr) > 0)
conn->connip = strdup(host_addr);
+
/*
* purposely ignore strdup failure; not a big problem if
* it fails anyway.
diff --git a/src/interfaces/libpq/libpq-fe.h b/src/interfaces/libpq/libpq-fe.h
index f44030b4b0..54c951723c 100644
--- a/src/interfaces/libpq/libpq-fe.h
+++ b/src/interfaces/libpq/libpq-fe.h
@@ -349,7 +349,7 @@ extern void PQinitSSL(int do_init);
extern void PQinitOpenSSL(int do_ssl, int do_crypto);
/* Return true if GSSAPI encryption is in use */
-extern int PQgssEncInUse(PGconn *conn);
+extern int PQgssEncInUse(PGconn *conn);
/* Returns GSSAPI context if GSSAPI is in use */
extern void *PQgetgssctx(PGconn *conn);
diff --git a/src/interfaces/libpq/pqexpbuffer.c b/src/interfaces/libpq/pqexpbuffer.c
index dc7c3ea07d..b3c53b0cda 100644
--- a/src/interfaces/libpq/pqexpbuffer.c
+++ b/src/interfaces/libpq/pqexpbuffer.c
@@ -36,8 +36,9 @@
/* All "broken" PQExpBuffers point to this string. */
static const char oom_buffer[1] = "";
+
/* Need a char * for unconstify() compatiblity */
-static const char * oom_buffer_ptr = oom_buffer;
+static const char *oom_buffer_ptr = oom_buffer;
static bool appendPQExpBufferVA(PQExpBuffer str, const char *fmt, va_list args) pg_attribute_printf(2, 0);
@@ -93,7 +94,7 @@ initPQExpBuffer(PQExpBuffer str)
str->data = (char *) malloc(INITIAL_EXPBUFFER_SIZE);
if (str->data == NULL)
{
- str->data = unconstify(char *, oom_buffer_ptr); /* see comment above */
+ str->data = unconstify(char *, oom_buffer_ptr); /* see comment above */
str->maxlen = 0;
str->len = 0;
}
@@ -132,7 +133,7 @@ termPQExpBuffer(PQExpBuffer str)
if (str->data != oom_buffer)
free(str->data);
/* just for luck, make the buffer validly empty. */
- str->data = unconstify(char *, oom_buffer_ptr); /* see comment above */
+ str->data = unconstify(char *, oom_buffer_ptr); /* see comment above */
str->maxlen = 0;
str->len = 0;
}
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 1b1d87e59a..bb2249b81e 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -941,7 +941,7 @@ plpgsql_exec_trigger(PLpgSQL_function *func,
expanded_record_set_field_internal(rec_new->erh,
i + 1,
(Datum) 0,
- true, /*isnull*/
+ true, /* isnull */
false, false);
}
}
@@ -6895,8 +6895,8 @@ exec_move_row_from_fields(PLpgSQL_execstate *estate,
int strict_multiassignment_level = 0;
/*
- * The extra check strict strict_multi_assignment can be active,
- * only when input tupdesc is specified.
+ * The extra check strict strict_multi_assignment can be active, only when
+ * input tupdesc is specified.
*/
if (tupdesc != NULL)
{
@@ -6991,15 +6991,15 @@ exec_move_row_from_fields(PLpgSQL_execstate *estate,
/* When source value is missing */
if (strict_multiassignment_level)
- ereport(strict_multiassignment_level,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("number of source and target fields in assignment does not match"),
- /* translator: %s represents a name of an extra check */
- errdetail("%s check of %s is active.",
- "strict_multi_assignment",
- strict_multiassignment_level == ERROR ? "extra_errors" :
- "extra_warnings"),
- errhint("Make sure the query returns the exact list of columns.")));
+ ereport(strict_multiassignment_level,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("number of source and target fields in assignment does not match"),
+ /* translator: %s represents a name of an extra check */
+ errdetail("%s check of %s is active.",
+ "strict_multi_assignment",
+ strict_multiassignment_level == ERROR ? "extra_errors" :
+ "extra_warnings"),
+ errhint("Make sure the query returns the exact list of columns.")));
}
/* Cast the new value to the right type, if needed. */
@@ -7028,11 +7028,11 @@ exec_move_row_from_fields(PLpgSQL_execstate *estate,
ereport(strict_multiassignment_level,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("number of source and target fields in assignment does not match"),
- /* translator: %s represents a name of an extra check */
+ /* translator: %s represents a name of an extra check */
errdetail("%s check of %s is active.",
"strict_multi_assignment",
- strict_multiassignment_level == ERROR ? "extra_errors" :
- "extra_warnings"),
+ strict_multiassignment_level == ERROR ? "extra_errors" :
+ "extra_warnings"),
errhint("Make sure the query returns the exact list of columns.")));
}
@@ -7099,15 +7099,15 @@ exec_move_row_from_fields(PLpgSQL_execstate *estate,
valtypmod = -1;
if (strict_multiassignment_level)
- ereport(strict_multiassignment_level,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("number of source and target fields in assignment does not match"),
- /* translator: %s represents a name of an extra check */
- errdetail("%s check of %s is active.",
- "strict_multi_assignment",
- strict_multiassignment_level == ERROR ? "extra_errors" :
- "extra_warnings"),
- errhint("Make sure the query returns the exact list of columns.")));
+ ereport(strict_multiassignment_level,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("number of source and target fields in assignment does not match"),
+ /* translator: %s represents a name of an extra check */
+ errdetail("%s check of %s is active.",
+ "strict_multi_assignment",
+ strict_multiassignment_level == ERROR ? "extra_errors" :
+ "extra_warnings"),
+ errhint("Make sure the query returns the exact list of columns.")));
}
exec_assign_value(estate, (PLpgSQL_datum *) var,
@@ -7115,8 +7115,8 @@ exec_move_row_from_fields(PLpgSQL_execstate *estate,
}
/*
- * When strict_multiassignment extra check is active, ensure there
- * are no unassigned source attributes.
+ * When strict_multiassignment extra check is active, ensure there are
+ * no unassigned source attributes.
*/
if (strict_multiassignment_level && anum < td_natts)
{
@@ -7128,11 +7128,11 @@ exec_move_row_from_fields(PLpgSQL_execstate *estate,
ereport(strict_multiassignment_level,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("number of source and target fields in assignment does not match"),
- /* translator: %s represents a name of an extra check */
+ /* translator: %s represents a name of an extra check */
errdetail("%s check of %s is active.",
- "strict_multi_assignment",
- strict_multiassignment_level == ERROR ? "extra_errors" :
- "extra_warnings"),
+ "strict_multi_assignment",
+ strict_multiassignment_level == ERROR ? "extra_errors" :
+ "extra_warnings"),
errhint("Make sure the query returns the exact list of columns.")));
}
diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h
index 4eff62e8e5..a72e319e55 100644
--- a/src/pl/plpgsql/src/plpgsql.h
+++ b/src/pl/plpgsql/src/plpgsql.h
@@ -453,7 +453,7 @@ typedef struct PLpgSQL_stmt
* set). This can be used by a profiler as the index for an array of
* per-statement metrics.
*/
- unsigned int stmtid;
+ unsigned int stmtid;
} PLpgSQL_stmt;
/*
diff --git a/src/pl/plpython/plpy_exec.c b/src/pl/plpython/plpy_exec.c
index fd6cdc4ce5..c0c3ae9df0 100644
--- a/src/pl/plpython/plpy_exec.c
+++ b/src/pl/plpython/plpy_exec.c
@@ -753,8 +753,8 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r
Py_DECREF(pltlevel);
/*
- * Note: In BEFORE trigger, stored generated columns are not computed yet,
- * so don't make them accessible in NEW row.
+ * Note: In BEFORE trigger, stored generated columns are not
+ * computed yet, so don't make them accessible in NEW row.
*/
if (TRIGGER_FIRED_BY_INSERT(tdata->tg_event))
diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c
index 1362ca51d1..db8ca6abd7 100644
--- a/src/pl/tcl/pltcl.c
+++ b/src/pl/tcl/pltcl.c
@@ -1165,8 +1165,8 @@ pltcl_trigger_handler(PG_FUNCTION_ARGS, pltcl_call_state *call_state,
* Now the command part of the event for TG_op and data for NEW
* and OLD
*
- * Note: In BEFORE trigger, stored generated columns are not computed yet,
- * so don't make them accessible in NEW row.
+ * Note: In BEFORE trigger, stored generated columns are not
+ * computed yet, so don't make them accessible in NEW row.
*/
if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
{
diff --git a/src/port/dlopen.c b/src/port/dlopen.c
index 6cca2eb370..05ad85b5b3 100644
--- a/src/port/dlopen.c
+++ b/src/port/dlopen.c
@@ -24,7 +24,7 @@
void *
dlopen(const char *file, int mode)
{
- int flags = 0;
+ int flags = 0;
if (mode & RTLD_NOW)
flags |= BIND_IMMEDIATE;
diff --git a/src/port/pg_bitutils.c b/src/port/pg_bitutils.c
index d12765b33f..60fb55af53 100644
--- a/src/port/pg_bitutils.c
+++ b/src/port/pg_bitutils.c
@@ -203,7 +203,7 @@ pg_popcount32_asm(uint32 word)
{
uint32 res;
- __asm__ __volatile__(" popcntl %1,%0\n" : "=q"(res) : "rm"(word) : "cc");
+__asm__ __volatile__(" popcntl %1,%0\n":"=q"(res):"rm"(word):"cc");
return (int) res;
}
@@ -216,7 +216,7 @@ pg_popcount64_asm(uint64 word)
{
uint64 res;
- __asm__ __volatile__(" popcntq %1,%0\n" : "=q"(res) : "rm"(word) : "cc");
+__asm__ __volatile__(" popcntq %1,%0\n":"=q"(res):"rm"(word):"cc");
return (int) res;
}
diff --git a/src/port/strtof.c b/src/port/strtof.c
index 9e37633bda..904cc5ff72 100644
--- a/src/port/strtof.c
+++ b/src/port/strtof.c
@@ -41,9 +41,9 @@ strtof(const char *nptr, char **endptr)
* Value might be in-range for double but not float.
*/
if (dresult != 0 && fresult == 0)
- caller_errno = ERANGE; /* underflow */
+ caller_errno = ERANGE; /* underflow */
if (!isinf(dresult) && isinf(fresult))
- caller_errno = ERANGE; /* overflow */
+ caller_errno = ERANGE; /* overflow */
}
else
caller_errno = errno;
@@ -78,7 +78,7 @@ pg_strtof(const char *nptr, char **endptr)
float fresult;
errno = 0;
- fresult = (strtof)(nptr, endptr);
+ fresult = (strtof) (nptr, endptr);
if (errno)
{
/* On error, just return the error to the caller. */
@@ -100,7 +100,8 @@ pg_strtof(const char *nptr, char **endptr)
/*
* Try again. errno is already 0 here.
*/
- double dresult = strtod(nptr, NULL);
+ double dresult = strtod(nptr, NULL);
+
if (errno)
{
/* On error, just return the error */
@@ -113,8 +114,8 @@ pg_strtof(const char *nptr, char **endptr)
errno = caller_errno;
return fresult;
}
- else if ((dresult > 0 && dresult <= FLT_MIN && (float)dresult != 0.0) ||
- (dresult < 0 && dresult >= -FLT_MIN && (float)dresult != 0.0))
+ else if ((dresult > 0 && dresult <= FLT_MIN && (float) dresult != 0.0) ||
+ (dresult < 0 && dresult >= -FLT_MIN && (float) dresult != 0.0))
{
/* subnormal but nonzero value */
errno = caller_errno;
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index a1a3d48748..7beee00dbd 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -1802,7 +1802,7 @@ run_schedule(const char *schedule, test_function tfunc)
}
else
{
- status(_("ok ")); /* align with FAILED */
+ status(_("ok ")); /* align with FAILED */
success_count++;
}
@@ -1882,7 +1882,7 @@ run_single_test(const char *test, test_function tfunc)
}
else
{
- status(_("ok ")); /* align with FAILED */
+ status(_("ok ")); /* align with FAILED */
success_count++;
}
diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index ad3e803899..7f03b7e857 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -184,7 +184,7 @@ pt_in_widget(PG_FUNCTION_ARGS)
distance = DatumGetFloat8(DirectFunctionCall2(point_distance,
PointPGetDatum(point),
- PointPGetDatum(&widget->center)));
+ PointPGetDatum(&widget->center)));
PG_RETURN_BOOL(distance < widget->radius);
}
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index c6050a3f77..8cc033eb13 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -112,6 +112,7 @@ ArchiveEntryPtrType
ArchiveFormat
ArchiveHandle
ArchiveMode
+ArchiveOpts
ArchiverOutput
ArchiverStage
ArrayAnalyzeExtraData
@@ -127,8 +128,6 @@ ArrayIterator
ArrayMapState
ArrayMetaState
ArrayParseState
-ArrayRef
-ArrayRefState
ArrayType
AsyncQueueControl
AsyncQueueEntry
@@ -167,6 +166,8 @@ BTArrayKeyInfo
BTBuildState
BTCycleId
BTIndexStat
+BTInsertState
+BTInsertStateData
BTLeader
BTMetaPageData
BTOneVacInfo
@@ -176,6 +177,8 @@ BTPageOpaqueData
BTPageStat
BTPageState
BTParallelScanDesc
+BTScanInsert
+BTScanInsertData
BTScanOpaque
BTScanOpaqueData
BTScanPos
@@ -209,6 +212,7 @@ BeginSampleScan_function
BernoulliSamplerData
BgWorkerStartTime
BgwHandleStatus
+BinaryArithmFunc
BipartiteMatchState
BitmapAnd
BitmapAndPath
@@ -272,6 +276,7 @@ BufferCachePagesContext
BufferCachePagesRec
BufferDesc
BufferDescPadded
+BufferHeapTupleTableSlot
BufferLookupEnt
BufferStrategyControl
BufferTag
@@ -279,6 +284,7 @@ BufferUsage
BuildAccumulator
BuiltinScript
BulkInsertState
+BulkInsertStateData
CACHESIGN
CAC_state
CCFastEqualFN
@@ -295,8 +301,10 @@ COP
CRITICAL_SECTION
CRSSnapshotAction
CState
+CTEMaterialize
CV
C_block
+CachedExpression
CachedPlan
CachedPlanSource
CallContext
@@ -323,14 +331,13 @@ Chromosome
CkptSortItem
CkptTsStatus
ClientAuthentication_hook_type
+ClientCertMode
ClientData
ClonePtrType
-ClonedConstraint
ClosePortalStmt
ClosePtrType
Clump
ClusterInfo
-ClusterOption
ClusterStmt
CmdType
CoalesceExpr
@@ -505,6 +512,7 @@ DictSnowball
DictSubState
DictSyn
DictThesaurus
+DimensionInfo
DirectoryMethodData
DirectoryMethodFile
DisableTimeoutParams
@@ -655,7 +663,9 @@ FileFdwExecutionState
FileFdwPlanState
FileNameMap
FileTag
+FinalPathExtraData
FindSplitData
+FindSplitStrat
FixedParallelExecutorState
FixedParallelState
FixedParamState
@@ -807,6 +817,7 @@ FuncCandidateList
FuncDetailCode
FuncExpr
FuncInfo
+FuncLookupError
Function
FunctionCallInfo
FunctionCallInfoBaseData
@@ -853,7 +864,6 @@ GenerationChunk
GenerationContext
GenerationPointer
GenericCosts
-GenericIndexOpts
GenericXLogState
GeqoPrivateData
GetForeignJoinPaths_function
@@ -891,13 +901,14 @@ GinStatsData
GinTernaryValue
GinTupleCollector
GinVacuumState
-GistBDItem
GistBufferingMode
+GistBulkDeleteResult
GistEntryVector
GistInetKey
GistNSN
GistSplitUnion
GistSplitVector
+GistVacState
GlobalTransaction
GrantRoleStmt
GrantStmt
@@ -905,6 +916,7 @@ GrantTargetType
Group
GroupPath
GroupPathExtraData
+GroupResultPath
GroupState
GroupVarInfo
GroupingFunc
@@ -987,6 +999,7 @@ HeapTupleData
HeapTupleFields
HeapTupleHeader
HeapTupleHeaderData
+HeapTupleTableSlot
HistControl
HotStandbyState
I32
@@ -1020,6 +1033,7 @@ IndexBuildCallback
IndexBuildResult
IndexBulkDeleteCallback
IndexBulkDeleteResult
+IndexClause
IndexClauseSet
IndexElem
IndexFetchHeapData
@@ -1030,7 +1044,6 @@ IndexOnlyScan
IndexOnlyScanState
IndexOptInfo
IndexPath
-IndexQualInfo
IndexRuntimeKeyInfo
IndexScan
IndexScanDesc
@@ -1057,6 +1070,7 @@ Instrumentation
Int128AggState
Int8TransTypeData
IntRBTreeNode
+IntegerSet
InternalDefaultACL
InternalGrant
Interval
@@ -1065,6 +1079,7 @@ InvalidationChunk
InvalidationListHeader
IpcMemoryId
IpcMemoryKey
+IpcMemoryState
IpcSemaphoreId
IpcSemaphoreKey
IsForeignRelUpdatable_function
@@ -1085,6 +1100,7 @@ JOBOBJECT_BASIC_LIMIT_INFORMATION
JOBOBJECT_BASIC_UI_RESTRICTIONS
JOBOBJECT_SECURITY_LIMIT_INFORMATION
JitContext
+JitInstrumentation
JitProviderCallbacks
JitProviderCompileExprCB
JitProviderInit
@@ -1120,9 +1136,11 @@ JsonPathGinPath
JsonPathGinPathItem
JsonPathItem
JsonPathItemType
+JsonPathKeyword
JsonPathParseItem
JsonPathParseResult
JsonPathPredicateCallback
+JsonPathString
JsonSemAction
JsonTokenType
JsonTransformStringValuesAction
@@ -1200,6 +1218,7 @@ LWLockMinimallyPadded
LWLockMode
LWLockPadded
LabelProvider
+LagTracker
LargeObjectDesc
LastAttnumInfo
Latch
@@ -1210,6 +1229,7 @@ LexemeHashKey
LexemeInfo
LexemeKey
LexizeData
+LibraryInfo
Limit
LimitPath
LimitState
@@ -1274,6 +1294,8 @@ LogicalTape
LogicalTapeSet
MAGIC
MBuf
+MCVItem
+MCVList
MEMORY_BASIC_INFORMATION
MINIDUMPWRITEDUMP
MINIDUMP_TYPE
@@ -1308,8 +1330,8 @@ MinMaxExpr
MinMaxOp
MinimalTuple
MinimalTupleData
+MinimalTupleTableSlot
MinmaxOpaque
-MissingPtr
ModifyTable
ModifyTablePath
ModifyTableState
@@ -1354,10 +1376,10 @@ NonEmptyRange
Notification
NotifyStmt
Nsrt
-NullableDatum
NullIfExpr
NullTest
NullTestType
+NullableDatum
Numeric
NumericAggState
NumericDigit
@@ -1379,6 +1401,7 @@ ObjectAccessPostAlter
ObjectAccessPostCreate
ObjectAccessType
ObjectAddress
+ObjectAddressAndFlags
ObjectAddressExtra
ObjectAddressStack
ObjectAddresses
@@ -1430,8 +1453,11 @@ PATH
PBOOL
PCtxtHandle
PFN
+PGAlignedBlock
+PGAlignedXLogBlock
PGAsyncStatusType
PGCALL2
+PGChecksummablePage
PGContextVisibility
PGEvent
PGEventConnDestroy
@@ -1610,7 +1636,6 @@ PTEntryArray
PTIterationArray
PTOKEN_PRIVILEGES
PTOKEN_USER
-PULONG
PUTENVPROC
PVOID
PX_Alias
@@ -1629,7 +1654,6 @@ Pairs
ParallelAppendState
ParallelBitmapHeapState
ParallelBlockTableScanDesc
-ParallelBlockTableScanDescData
ParallelCompletionPtr
ParallelContext
ParallelExecutorInfo
@@ -1637,11 +1661,12 @@ ParallelHashGrowth
ParallelHashJoinBatch
ParallelHashJoinBatchAccessor
ParallelHashJoinState
-ParallelTableScanDesc
-ParallelTableScanDescData
ParallelIndexScanDesc
+ParallelReadyList
ParallelSlot
ParallelState
+ParallelTableScanDesc
+ParallelTableScanDescData
ParallelWorkerContext
ParallelWorkerInfo
Param
@@ -1667,14 +1692,16 @@ ParserSetupHook
ParserState
PartClauseInfo
PartClauseMatchStatus
+PartClauseTarget
PartitionBoundInfo
PartitionBoundInfoData
PartitionBoundSpec
PartitionCmd
PartitionDesc
PartitionDescData
+PartitionDirectory
+PartitionDirectoryEntry
PartitionDispatch
-PartitionDispatchData
PartitionElem
PartitionHashBound
PartitionKey
@@ -1690,9 +1717,12 @@ PartitionPruningData
PartitionRangeBound
PartitionRangeDatum
PartitionRangeDatumKind
+PartitionRoutingInfo
PartitionScheme
PartitionSpec
PartitionTupleRouting
+PartitionedRelPruneInfo
+PartitionedRelPruningData
PartitionwiseAggregateType
PasswordType
Path
@@ -1712,6 +1742,7 @@ PerlInterpreter
Perl_check_t
Perl_ppaddr_t
Permutation
+PgBackendGSSStatus
PgBackendSSLStatus
PgBackendStatus
PgBenchExpr
@@ -1726,6 +1757,7 @@ PgFdwAnalyzeState
PgFdwDirectModifyState
PgFdwModifyState
PgFdwOption
+PgFdwPathExtraData
PgFdwRelationInfo
PgFdwScanState
PgIfAddrCallback
@@ -1741,6 +1773,7 @@ PgStat_MsgAnalyze
PgStat_MsgArchiver
PgStat_MsgAutovacStart
PgStat_MsgBgWriter
+PgStat_MsgChecksumFailure
PgStat_MsgDeadlock
PgStat_MsgDropdb
PgStat_MsgDummy
@@ -1811,6 +1844,7 @@ PredXactList
PredXactListElement
PredicateLockData
PredicateLockTargetType
+PrepParallelRestorePtrType
PrepareStmt
PreparedParamsData
PreparedStatement
@@ -1869,6 +1903,7 @@ QPRS_STATE
QTN2QTState
QTNode
QUERYTYPE
+QUERY_SECURITY_CONTEXT_TOKEN_FN
QualCost
QualItem
Query
@@ -1885,8 +1920,8 @@ QueryRepresentationOperand
QuerySource
QueueBackendStatus
QueuePosition
-RBNode
-RBOrderControl
+RBTNode
+RBTOrderControl
RBTree
RBTreeIterator
REPARSE_JUNCTION_DATA_BUFFER
@@ -1899,6 +1934,7 @@ RI_QueryKey
RTEKind
RWConflict
RWConflictPoolHeader
+RandomState
Range
RangeBound
RangeBox
@@ -1930,7 +1966,7 @@ RecordCacheEntry
RecordCompareData
RecordIOData
RecoveryLockListsEntry
-RecoveryTargetAction
+RecoveryTargetTimeLineGoal
RecoveryTargetType
RectBox
RecursionContext
@@ -1961,7 +1997,6 @@ Relation
RelationData
RelationPtr
RelationSyncEntry
-RelativeTime
RelcacheCallbackFunction
RelfilenodeMapEntry
RelfilenodeMapKey
@@ -2015,7 +2050,6 @@ RestoreOptions
RestorePass
RestrictInfo
Result
-ResultPath
ResultRelInfo
ResultState
ReturnSetInfo
@@ -2099,8 +2133,8 @@ Scan
ScanDirection
ScanKey
ScanKeyData
-ScanKeyword
-ScanStackEntry
+ScanKeywordHashFunc
+ScanKeywordList
ScanState
ScanTypeControl
SchemaQuery
@@ -2111,12 +2145,15 @@ SecLabelStmt
SeenRelsEntry
SelectStmt
Selectivity
+SemTPadded
SemiAntiJoinFactors
SeqScan
SeqScanState
SeqTable
SeqTableData
SerCommitSeqNo
+SerializableXactHandle
+SerializedActiveRelMaps
SerializedReindexState
SerializedSnapshotData
SerializedTransactionState
@@ -2137,6 +2174,7 @@ SetOperation
SetOperationStmt
SetToDefault
SetupWorkerPtrType
+ShDependObjectInfo
SharedBitmapState
SharedDependencyObjectType
SharedDependencyType
@@ -2150,6 +2188,7 @@ SharedInvalRelmapMsg
SharedInvalSmgrMsg
SharedInvalSnapshotMsg
SharedInvalidationMessage
+SharedJitInstrumentation
SharedRecordTableEntry
SharedRecordTableKey
SharedRecordTypmodRegistry
@@ -2232,12 +2271,14 @@ SpGistPageOpaque
SpGistPageOpaqueData
SpGistScanOpaque
SpGistScanOpaqueData
+SpGistSearchItem
SpGistState
SpGistTypeDesc
SpecialJoinInfo
SpinDelayStatus
SplitInterval
SplitLR
+SplitPoint
SplitVar
SplitedPageLayout
StackElem
@@ -2273,16 +2314,23 @@ SubTransactionId
SubXactCallback
SubXactCallbackItem
SubXactEvent
+SubplanResultRelHashElem
SubqueryScan
SubqueryScanPath
SubqueryScanState
+SubscriptingRef
+SubscriptingRefState
Subscription
SubscriptionInfo
SubscriptionRelState
+SupportRequestCost
+SupportRequestIndexCondition
+SupportRequestRows
+SupportRequestSelectivity
+SupportRequestSimplify
Syn
SyncOps
SyncRepConfigData
-SyncRequestHandler
SyncRequestType
SysScanDesc
SyscacheCallbackFunction
@@ -2336,7 +2384,6 @@ TSVectorParseState
TSVectorStat
TState
TStoreState
-TTOffList
TYPCATEGORY
T_Action
T_WorkerStatus
@@ -2383,8 +2430,6 @@ TidPath
TidScan
TidScanState
TimeADT
-TimeInterval
-TimeIntervalData
TimeLineHistoryCmd
TimeLineHistoryEntry
TimeLineID
@@ -2479,6 +2524,8 @@ U32
U8
UChar
UCharIterator
+UColAttribute
+UColAttributeValue
UCollator
UConverter
UErrorCode
@@ -2503,6 +2550,7 @@ UserMapping
UserOpts
VacAttrStats
VacAttrStatsP
+VacOptTernaryValue
VacuumParams
VacuumRelation
VacuumStmt
@@ -2532,6 +2580,7 @@ ViewCheckOption
ViewOptions
ViewStmt
VirtualTransactionId
+VirtualTupleTableSlot
Vsrt
WAITORTIMERCALLBACK
WAIT_ORDER
@@ -2576,6 +2625,7 @@ WindowAgg
WindowAggPath
WindowAggState
WindowClause
+WindowClauseSortData
WindowDef
WindowFunc
WindowFuncExprState
@@ -2638,6 +2688,7 @@ XLogSource
XLogwrtResult
XLogwrtRqst
XPVIV
+XPVMG
XactCallback
XactCallbackItem
XactEvent
@@ -2652,8 +2703,6 @@ XmlTableBuilderData
YYLTYPE
YYSTYPE
YY_BUFFER_STATE
-ZipfCache
-ZipfCell
_SPI_connection
_SPI_plan
__AssignProcessToJobObject
@@ -2672,6 +2721,7 @@ allocfunc
ambeginscan_function
ambuild_function
ambuildempty_function
+ambuildphasename_function
ambulkdelete_function
amcanreturn_function
amcostestimate_function
@@ -2709,7 +2759,6 @@ bits16
bits32
bits8
bloom_filter
-boolean
brin_column_state
bytea
cached_re_str
@@ -2810,6 +2859,7 @@ file_entry_t
file_type_t
filemap_t
finalize_primnode_context
+find_dependent_phvs_context
find_expr_references_context
fix_join_expr_context
fix_scan_expr_context
@@ -2819,6 +2869,8 @@ float4
float4KEY
float8
float8KEY
+floating_decimal_32
+floating_decimal_64
fmAggrefPtr
fmExprContextCallbackFunction
fmNodePtr
@@ -2857,13 +2909,14 @@ ginxlogRecompressDataLeaf
ginxlogSplit
ginxlogUpdateMeta
ginxlogVacuumDataLeafPage
+gistxlogDelete
gistxlogPage
+gistxlogPageDelete
+gistxlogPageReuse
gistxlogPageSplit
gistxlogPageUpdate
grouping_sets_data
gseg_picksplit_item
-gss_OID
-gss_OID_desc
gss_buffer_desc
gss_cred_id_t
gss_ctx_id_t
@@ -2889,6 +2942,7 @@ inet
inetKEY
inet_struct
init_function
+inline_cte_walker_context
inline_error_callback_arg
ino_t
inquiry
@@ -2905,6 +2959,9 @@ int64KEY
int8
internalPQconninfoOption
intptr_t
+intset_internal_node
+intset_leaf_node
+intset_node
intvKEY
itemIdSort
itemIdSortData
@@ -2920,6 +2977,7 @@ key_t
lclContext
lclTocEntry
leafSegmentInfo
+leaf_item
line_t
lineno_t
list_qsort_comparator
@@ -2959,6 +3017,8 @@ mp_int
mp_result
mp_sign
mp_size
+mp_small
+mp_usmall
mp_word
mpz_t
mxact
@@ -3094,10 +3154,10 @@ radius_attribute
radius_packet
rangeTableEntry_used_context
rank_context
-rb_allocfunc
-rb_combiner
-rb_comparator
-rb_freefunc
+rbt_allocfunc
+rbt_combiner
+rbt_comparator
+rbt_freefunc
reduce_outer_joins_state
reference
regex_arc_t
@@ -3154,7 +3214,7 @@ slist_iter
slist_mutable_iter
slist_node
slock_t
-smgrid
+socket_set
spgBulkDeleteState
spgChooseIn
spgChooseOut
@@ -3196,7 +3256,7 @@ stream_stop_callback
string
substitute_actual_parameters_context
substitute_actual_srf_parameters_context
-substitute_multiple_relids_context
+substitute_phv_relids_context
svtype
symbol
tablespaceinfo
@@ -3205,6 +3265,7 @@ teSection
temp_tablespaces_extra
test_function
test_shm_mq_header
+test_spec
text
timeKEY
time_t
@@ -3277,6 +3338,7 @@ walrcv_identify_system_fn
walrcv_readtimelinehistoryfile_fn
walrcv_receive_fn
walrcv_send_fn
+walrcv_server_version_fn
walrcv_startstreaming_fn
wchar2mb_with_len_converter
wchar_t