diff options
Diffstat (limited to 'src/backend')
163 files changed, 2254 insertions, 2153 deletions
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index 2594407754..442a46140d 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -364,7 +364,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm) MemoryContext oldcxt; MemoryContext perRangeCxt; BrinMemTuple *dtup; - BrinTuple *btup = NULL; + BrinTuple *btup = NULL; Size btupsz = 0; opaque = (BrinOpaque *) scan->opaque; @@ -920,13 +920,13 @@ brin_summarize_range(PG_FUNCTION_ARGS) Datum brin_desummarize_range(PG_FUNCTION_ARGS) { - Oid indexoid = PG_GETARG_OID(0); - int64 heapBlk64 = PG_GETARG_INT64(1); + Oid indexoid = PG_GETARG_OID(0); + int64 heapBlk64 = PG_GETARG_INT64(1); BlockNumber heapBlk; - Oid heapoid; - Relation heapRel; - Relation indexRel; - bool done; + Oid heapoid; + Relation heapRel; + Relation indexRel; + bool done; if (heapBlk64 > MaxBlockNumber || heapBlk64 < 0) { @@ -977,7 +977,8 @@ brin_desummarize_range(PG_FUNCTION_ARGS) RelationGetRelationName(indexRel)))); /* the revmap does the hard work */ - do { + do + { done = brinRevmapDesummarizeRange(indexRel, heapBlk); } while (!done); diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c index 9ed279bf42..fc8b10ab39 100644 --- a/src/backend/access/brin/brin_revmap.c +++ b/src/backend/access/brin/brin_revmap.c @@ -318,11 +318,11 @@ bool brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk) { BrinRevmap *revmap; - BlockNumber pagesPerRange; + BlockNumber pagesPerRange; RevmapContents *contents; ItemPointerData *iptr; - ItemPointerData invalidIptr; - BlockNumber revmapBlk; + ItemPointerData invalidIptr; + BlockNumber revmapBlk; Buffer revmapBuf; Buffer regBuf; Page revmapPg; @@ -415,7 +415,7 @@ brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk) if (RelationNeedsWAL(idxrel)) { xl_brin_desummarize xlrec; - XLogRecPtr recptr; + XLogRecPtr recptr; xlrec.pagesPerRange = revmap->rm_pagesPerRange; xlrec.heapBlk = heapBlk; diff --git a/src/backend/access/brin/brin_xlog.c b/src/backend/access/brin/brin_xlog.c index 8f5b5ceb3f..dff7198a39 100644 --- a/src/backend/access/brin/brin_xlog.c +++ b/src/backend/access/brin/brin_xlog.c @@ -268,7 +268,7 @@ brin_xlog_desummarize_page(XLogReaderState *record) action = XLogReadBufferForRedo(record, 0, &buffer); if (action == BLK_NEEDS_REDO) { - ItemPointerData iptr; + ItemPointerData iptr; ItemPointerSetInvalid(&iptr); brinSetHeapBlockItemptr(buffer, xlrec->pagesPerRange, xlrec->heapBlk, iptr); @@ -283,7 +283,7 @@ brin_xlog_desummarize_page(XLogReaderState *record) action = XLogReadBufferForRedo(record, 1, &buffer); if (action == BLK_NEEDS_REDO) { - Page regPg = BufferGetPage(buffer); + Page regPg = BufferGetPage(buffer); PageIndexTupleDeleteNoCompact(regPg, xlrec->regOffset); diff --git a/src/backend/access/common/printsimple.c b/src/backend/access/common/printsimple.c index 5fe1c72da8..851c3bf4de 100644 --- a/src/backend/access/common/printsimple.c +++ b/src/backend/access/common/printsimple.c @@ -102,8 +102,8 @@ printsimple(TupleTableSlot *slot, DestReceiver *self) case INT4OID: { - int32 num = DatumGetInt32(value); - char str[12]; /* sign, 10 digits and '\0' */ + int32 num = DatumGetInt32(value); + char str[12]; /* sign, 10 digits and '\0' */ pg_ltoa(num, str); pq_sendcountedtext(&buf, str, strlen(str), false); @@ -112,8 +112,8 @@ printsimple(TupleTableSlot *slot, DestReceiver *self) case INT8OID: { - int64 num = DatumGetInt64(value); - char str[23]; /* sign, 21 digits and '\0' */ + int64 num = DatumGetInt64(value); + char str[23]; /* sign, 21 digits and '\0' */ pg_lltoa(num, str); pq_sendcountedtext(&buf, str, strlen(str), false); diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index 26c077a7bb..27e502a360 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -140,9 +140,9 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn * exclusive cleanup lock. This guarantees that no insertions currently * happen in this subtree. Caller also acquire Exclusive lock on deletable * page and is acquiring and releasing exclusive lock on left page before. - * Left page was locked and released. Then parent and this page are locked. - * We acquire left page lock here only to mark page dirty after changing - * right pointer. + * Left page was locked and released. Then parent and this page are + * locked. We acquire left page lock here only to mark page dirty after + * changing right pointer. */ lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno, RBM_NORMAL, gvs->strategy); @@ -258,7 +258,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno, RBM_NORMAL, gvs->strategy); - if(!isRoot) + if (!isRoot) LockBuffer(buffer, GIN_EXCLUSIVE); page = BufferGetPage(buffer); @@ -295,8 +295,8 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, } } - if(!isRoot) - LockBuffer(buffer, GIN_UNLOCK); + if (!isRoot) + LockBuffer(buffer, GIN_UNLOCK); ReleaseBuffer(buffer); @@ -326,7 +326,7 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot) RBM_NORMAL, gvs->strategy); page = BufferGetPage(buffer); - ginTraverseLock(buffer,false); + ginTraverseLock(buffer, false); Assert(GinPageIsData(page)); @@ -347,15 +347,15 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot) } else { - OffsetNumber i; - bool hasEmptyChild = FALSE; - bool hasNonEmptyChild = FALSE; - OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff; - BlockNumber* children = palloc(sizeof(BlockNumber) * (maxoff + 1)); + OffsetNumber i; + bool hasEmptyChild = FALSE; + bool hasNonEmptyChild = FALSE; + OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff; + BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1)); /* - * Read all children BlockNumbers. - * Not sure it is safe if there are many concurrent vacuums. + * Read all children BlockNumbers. Not sure it is safe if there are + * many concurrent vacuums. */ for (i = FirstOffsetNumber; i <= maxoff; i++) @@ -380,26 +380,26 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot) vacuum_delay_point(); /* - * All subtree is empty - just return TRUE to indicate that parent must - * do a cleanup. Unless we are ROOT an there is way to go upper. + * All subtree is empty - just return TRUE to indicate that parent + * must do a cleanup. Unless we are ROOT an there is way to go upper. */ - if(hasEmptyChild && !hasNonEmptyChild && !isRoot) + if (hasEmptyChild && !hasNonEmptyChild && !isRoot) return TRUE; - if(hasEmptyChild) + if (hasEmptyChild) { DataPageDeleteStack root, *ptr, *tmp; buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno, - RBM_NORMAL, gvs->strategy); + RBM_NORMAL, gvs->strategy); LockBufferForCleanup(buffer); memset(&root, 0, sizeof(DataPageDeleteStack)); - root.leftBlkno = InvalidBlockNumber; - root.isRoot = TRUE; + root.leftBlkno = InvalidBlockNumber; + root.isRoot = TRUE; ginScanToDelete(gvs, blkno, TRUE, &root, InvalidOffsetNumber); diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index df54638f3e..d0b0547491 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -333,12 +333,12 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir) if (scan->kill_prior_tuple) { /* - * Yes, so remember it for later. (We'll deal with all such - * tuples at once right after leaving the index page or at - * end of scan.) In case if caller reverses the indexscan - * direction it is quite possible that the same item might - * get entered multiple times. But, we don't detect that; - * instead, we just forget any excess entries. + * Yes, so remember it for later. (We'll deal with all such tuples + * at once right after leaving the index page or at end of scan.) + * In case if caller reverses the indexscan direction it is quite + * possible that the same item might get entered multiple times. + * But, we don't detect that; instead, we just forget any excess + * entries. */ if (so->killedItems == NULL) so->killedItems = palloc(MaxIndexTuplesPerPage * @@ -348,7 +348,7 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir) { so->killedItems[so->numKilled].heapTid = so->hashso_heappos; so->killedItems[so->numKilled].indexOffset = - ItemPointerGetOffsetNumber(&(so->hashso_curpos)); + ItemPointerGetOffsetNumber(&(so->hashso_curpos)); so->numKilled++; } } @@ -477,9 +477,8 @@ hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, Relation rel = scan->indexRelation; /* - * Before leaving current page, deal with any killed items. - * Also, ensure that we acquire lock on current page before - * calling _hash_kill_items. + * Before leaving current page, deal with any killed items. Also, ensure + * that we acquire lock on current page before calling _hash_kill_items. */ if (so->numKilled > 0) { @@ -516,9 +515,8 @@ hashendscan(IndexScanDesc scan) Relation rel = scan->indexRelation; /* - * Before leaving current page, deal with any killed items. - * Also, ensure that we acquire lock on current page before - * calling _hash_kill_items. + * Before leaving current page, deal with any killed items. Also, ensure + * that we acquire lock on current page before calling _hash_kill_items. */ if (so->numKilled > 0) { @@ -889,8 +887,8 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, /* * Let us mark the page as clean if vacuum removes the DEAD tuples - * from an index page. We do this by clearing LH_PAGE_HAS_DEAD_TUPLES - * flag. + * from an index page. We do this by clearing + * LH_PAGE_HAS_DEAD_TUPLES flag. */ if (tuples_removed && *tuples_removed > 0 && H_HAS_DEAD_TUPLES(opaque)) diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c index d1c0e6904f..0ea11b2e74 100644 --- a/src/backend/access/hash/hash_xlog.c +++ b/src/backend/access/hash/hash_xlog.c @@ -950,22 +950,22 @@ hash_xlog_update_meta_page(XLogReaderState *record) static TransactionId hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record) { - xl_hash_vacuum_one_page *xlrec; - OffsetNumber *unused; + xl_hash_vacuum_one_page *xlrec; + OffsetNumber *unused; Buffer ibuffer, hbuffer; Page ipage, hpage; - RelFileNode rnode; - BlockNumber blkno; + RelFileNode rnode; + BlockNumber blkno; ItemId iitemid, hitemid; IndexTuple itup; - HeapTupleHeader htuphdr; - BlockNumber hblkno; - OffsetNumber hoffnum; - TransactionId latestRemovedXid = InvalidTransactionId; - int i; + HeapTupleHeader htuphdr; + BlockNumber hblkno; + OffsetNumber hoffnum; + TransactionId latestRemovedXid = InvalidTransactionId; + int i; xlrec = (xl_hash_vacuum_one_page *) XLogRecGetData(record); @@ -984,9 +984,9 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record) return latestRemovedXid; /* - * Check if WAL replay has reached a consistent database state. If not, - * we must PANIC. See the definition of btree_xlog_delete_get_latestRemovedXid - * for more details. + * Check if WAL replay has reached a consistent database state. If not, we + * must PANIC. See the definition of + * btree_xlog_delete_get_latestRemovedXid for more details. */ if (!reachedConsistency) elog(PANIC, "hash_xlog_vacuum_get_latestRemovedXid: cannot operate with inconsistent data"); @@ -1098,11 +1098,11 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record) static void hash_xlog_vacuum_one_page(XLogReaderState *record) { - XLogRecPtr lsn = record->EndRecPtr; + XLogRecPtr lsn = record->EndRecPtr; xl_hash_vacuum_one_page *xldata; - Buffer buffer; - Buffer metabuf; - Page page; + Buffer buffer; + Buffer metabuf; + Page page; XLogRedoAction action; HashPageOpaque pageopaque; @@ -1123,7 +1123,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record) if (InHotStandby) { TransactionId latestRemovedXid = - hash_xlog_vacuum_get_latestRemovedXid(record); + hash_xlog_vacuum_get_latestRemovedXid(record); RelFileNode rnode; XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL); @@ -1146,8 +1146,8 @@ hash_xlog_vacuum_one_page(XLogReaderState *record) } /* - * Mark the page as not containing any LP_DEAD items. See comments - * in _hash_vacuum_one_page() for details. + * Mark the page as not containing any LP_DEAD items. See comments in + * _hash_vacuum_one_page() for details. */ pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES; @@ -1160,7 +1160,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record) if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO) { - Page metapage; + Page metapage; HashMetaPage metap; metapage = BufferGetPage(metabuf); diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index 8699b5bc30..01c8d8006c 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -24,7 +24,7 @@ #include "storage/buf_internals.h" static void _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, - RelFileNode hnode); + RelFileNode hnode); /* * _hash_doinsert() -- Handle insertion of a single index tuple. @@ -63,8 +63,8 @@ restart_insert: /* * Read the metapage. We don't lock it yet; HashMaxItemSize() will - * examine pd_pagesize_version, but that can't change so we can examine - * it without a lock. + * examine pd_pagesize_version, but that can't change so we can examine it + * without a lock. */ metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE); metapage = BufferGetPage(metabuf); @@ -126,10 +126,9 @@ restart_insert: BlockNumber nextblkno; /* - * Check if current page has any DEAD tuples. If yes, - * delete these tuples and see if we can get a space for - * the new item to be inserted before moving to the next - * page in the bucket chain. + * Check if current page has any DEAD tuples. If yes, delete these + * tuples and see if we can get a space for the new item to be + * inserted before moving to the next page in the bucket chain. */ if (H_HAS_DEAD_TUPLES(pageopaque)) { @@ -139,7 +138,7 @@ restart_insert: _hash_vacuum_one_page(rel, metabuf, buf, heapRel->rd_node); if (PageGetFreeSpace(page) >= itemsz) - break; /* OK, now we have enough space */ + break; /* OK, now we have enough space */ } } @@ -337,13 +336,13 @@ static void _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, RelFileNode hnode) { - OffsetNumber deletable[MaxOffsetNumber]; - int ndeletable = 0; + OffsetNumber deletable[MaxOffsetNumber]; + int ndeletable = 0; OffsetNumber offnum, - maxoff; - Page page = BufferGetPage(buf); - HashPageOpaque pageopaque; - HashMetaPage metap; + maxoff; + Page page = BufferGetPage(buf); + HashPageOpaque pageopaque; + HashMetaPage metap; /* Scan each tuple in page to see if it is marked as LP_DEAD */ maxoff = PageGetMaxOffsetNumber(page); @@ -351,7 +350,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - ItemId itemId = PageGetItemId(page, offnum); + ItemId itemId = PageGetItemId(page, offnum); if (ItemIdIsDead(itemId)) deletable[ndeletable++] = offnum; @@ -360,8 +359,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, if (ndeletable > 0) { /* - * Write-lock the meta page so that we can decrement - * tuple count. + * Write-lock the meta page so that we can decrement tuple count. */ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); @@ -374,8 +372,8 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, * Mark the page as not containing any LP_DEAD items. This is not * certainly true (there might be some that have recently been marked, * but weren't included in our target-item list), but it will almost - * always be true and it doesn't seem worth an additional page scan - * to check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint + * always be true and it doesn't seem worth an additional page scan to + * check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint * anyway. */ pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); @@ -390,7 +388,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, /* XLOG stuff */ if (RelationNeedsWAL(rel)) { - xl_hash_vacuum_one_page xlrec; + xl_hash_vacuum_one_page xlrec; XLogRecPtr recptr; xlrec.hnode = hnode; @@ -401,12 +399,12 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, XLogRegisterData((char *) &xlrec, SizeOfHashVacuumOnePage); /* - * We need the target-offsets array whether or not we store the whole - * buffer, to allow us to find the latestRemovedXid on a standby - * server. + * We need the target-offsets array whether or not we store the + * whole buffer, to allow us to find the latestRemovedXid on a + * standby server. */ XLogRegisterData((char *) deletable, - ndeletable * sizeof(OffsetNumber)); + ndeletable * sizeof(OffsetNumber)); XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD); @@ -417,9 +415,10 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, } END_CRIT_SECTION(); + /* - * Releasing write lock on meta page as we have updated - * the tuple count. + * Releasing write lock on meta page as we have updated the tuple + * count. */ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); } diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index bf1ffff4e8..4544889294 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -177,8 +177,8 @@ _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag, pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); /* - * Set hasho_prevblkno with current hashm_maxbucket. This value will - * be used to validate cached HashMetaPageData. See + * Set hasho_prevblkno with current hashm_maxbucket. This value will be + * used to validate cached HashMetaPageData. See * _hash_getbucketbuf_from_hashkey(). */ pageopaque->hasho_prevblkno = max_bucket; @@ -509,8 +509,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, * Choose the number of initial bucket pages to match the fill factor * given the estimated number of tuples. We round up the result to the * total number of buckets which has to be allocated before using its - * _hashm_spare element. However always force at least 2 bucket pages. - * The upper limit is determined by considerations explained in + * _hashm_spare element. However always force at least 2 bucket pages. The + * upper limit is determined by considerations explained in * _hash_expandtable(). */ dnumbuckets = num_tuples / ffactor; @@ -568,8 +568,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, metap->hashm_maxbucket = num_buckets - 1; /* - * Set highmask as next immediate ((2 ^ x) - 1), which should be sufficient - * to cover num_buckets. + * Set highmask as next immediate ((2 ^ x) - 1), which should be + * sufficient to cover num_buckets. */ metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1; metap->hashm_lowmask = (metap->hashm_highmask >> 1); @@ -748,8 +748,8 @@ restart_expand: { /* * Copy bucket mapping info now; refer to the comment in code below - * where we copy this information before calling _hash_splitbucket - * to see why this is okay. + * where we copy this information before calling _hash_splitbucket to + * see why this is okay. */ maxbucket = metap->hashm_maxbucket; highmask = metap->hashm_highmask; @@ -792,8 +792,7 @@ restart_expand: * We treat allocation of buckets as a separate WAL-logged action. * Even if we fail after this operation, won't leak bucket pages; * rather, the next split will consume this space. In any case, even - * without failure we don't use all the space in one split - * operation. + * without failure we don't use all the space in one split operation. */ buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket; if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add)) @@ -870,10 +869,9 @@ restart_expand: /* * Mark the old bucket to indicate that split is in progress. (At - * operation end, we will clear the split-in-progress flag.) Also, - * for a primary bucket page, hasho_prevblkno stores the number of - * buckets that existed as of the last split, so we must update that - * value here. + * operation end, we will clear the split-in-progress flag.) Also, for a + * primary bucket page, hasho_prevblkno stores the number of buckets that + * existed as of the last split, so we must update that value here. */ oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT; oopaque->hasho_prevblkno = maxbucket; @@ -1008,8 +1006,8 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks) /* * Initialize the page. Just zeroing the page won't work; see - * _hash_freeovflpage for similar usage. We take care to make the - * special space valid for the benefit of tools such as pageinspect. + * _hash_freeovflpage for similar usage. We take care to make the special + * space valid for the benefit of tools such as pageinspect. */ _hash_pageinit(page, BLCKSZ); @@ -1462,11 +1460,11 @@ log_split_page(Relation rel, Buffer buf) * _hash_getcachedmetap() -- Returns cached metapage data. * * If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on - * the metapage. If not set, we'll set it before returning if we have to - * refresh the cache, and return with a pin but no lock on it; caller is - * responsible for releasing the pin. + * the metapage. If not set, we'll set it before returning if we have to + * refresh the cache, and return with a pin but no lock on it; caller is + * responsible for releasing the pin. * - * We refresh the cache if it's not initialized yet or force_refresh is true. + * We refresh the cache if it's not initialized yet or force_refresh is true. */ HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh) @@ -1476,13 +1474,13 @@ _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh) Assert(metabuf); if (force_refresh || rel->rd_amcache == NULL) { - char *cache = NULL; + char *cache = NULL; /* - * It's important that we don't set rd_amcache to an invalid - * value. Either MemoryContextAlloc or _hash_getbuf could fail, - * so don't install a pointer to the newly-allocated storage in the - * actual relcache entry until both have succeeeded. + * It's important that we don't set rd_amcache to an invalid value. + * Either MemoryContextAlloc or _hash_getbuf could fail, so don't + * install a pointer to the newly-allocated storage in the actual + * relcache entry until both have succeeeded. */ if (rel->rd_amcache == NULL) cache = MemoryContextAlloc(rel->rd_indexcxt, @@ -1517,7 +1515,7 @@ _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh) * us an opportunity to use the previously saved metapage contents to reach * the target bucket buffer, instead of reading from the metapage every time. * This saves one buffer access every time we want to reach the target bucket - * buffer, which is very helpful savings in bufmgr traffic and contention. + * buffer, which is very helpful savings in bufmgr traffic and contention. * * The access type parameter (HASH_READ or HASH_WRITE) indicates whether the * bucket buffer has to be locked for reading or writing. diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index 9f832f2544..c513c3b842 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -528,20 +528,21 @@ _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket, void _hash_kill_items(IndexScanDesc scan) { - HashScanOpaque so = (HashScanOpaque) scan->opaque; - Page page; - HashPageOpaque opaque; - OffsetNumber offnum, maxoff; - int numKilled = so->numKilled; - int i; - bool killedsomething = false; + HashScanOpaque so = (HashScanOpaque) scan->opaque; + Page page; + HashPageOpaque opaque; + OffsetNumber offnum, + maxoff; + int numKilled = so->numKilled; + int i; + bool killedsomething = false; Assert(so->numKilled > 0); Assert(so->killedItems != NULL); /* - * Always reset the scan state, so we don't look for same - * items on other pages. + * Always reset the scan state, so we don't look for same items on other + * pages. */ so->numKilled = 0; @@ -555,7 +556,7 @@ _hash_kill_items(IndexScanDesc scan) while (offnum <= maxoff) { - ItemId iid = PageGetItemId(page, offnum); + ItemId iid = PageGetItemId(page, offnum); IndexTuple ituple = (IndexTuple) PageGetItem(page, iid); if (ItemPointerEquals(&ituple->t_tid, &so->killedItems[i].heapTid)) @@ -563,15 +564,15 @@ _hash_kill_items(IndexScanDesc scan) /* found the item */ ItemIdMarkDead(iid); killedsomething = true; - break; /* out of inner search loop */ + break; /* out of inner search loop */ } offnum = OffsetNumberNext(offnum); } } /* - * Since this can be redone later if needed, mark as dirty hint. - * Whenever we mark anything LP_DEAD, we also set the page's + * Since this can be redone later if needed, mark as dirty hint. Whenever + * we mark anything LP_DEAD, we also set the page's * LH_PAGE_HAS_DEAD_TUPLES flag, which is likewise just a hint. */ if (killedsomething) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 0c3e2b065a..e890e08c9a 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -3518,10 +3518,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, * * For HOT considerations, this is wasted effort if we fail to update or * have to put the new tuple on a different page. But we must compute the - * list before obtaining buffer lock --- in the worst case, if we are doing - * an update on one of the relevant system catalogs, we could deadlock if - * we try to fetch the list later. In any case, the relcache caches the - * data so this is usually pretty cheap. + * list before obtaining buffer lock --- in the worst case, if we are + * doing an update on one of the relevant system catalogs, we could + * deadlock if we try to fetch the list later. In any case, the relcache + * caches the data so this is usually pretty cheap. * * We also need columns used by the replica identity and columns that are * considered the "key" of rows in the table. @@ -3540,15 +3540,16 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, page = BufferGetPage(buffer); interesting_attrs = NULL; + /* * If the page is already full, there is hardly any chance of doing a HOT * update on this page. It might be wasteful effort to look for index - * column updates only to later reject HOT updates for lack of space in the - * same page. So we be conservative and only fetch hot_attrs if the page is - * not already full. Since we are already holding a pin on the buffer, - * there is no chance that the buffer can get cleaned up concurrently and - * even if that was possible, in the worst case we lose a chance to do a - * HOT update. + * column updates only to later reject HOT updates for lack of space in + * the same page. So we be conservative and only fetch hot_attrs if the + * page is not already full. Since we are already holding a pin on the + * buffer, there is no chance that the buffer can get cleaned up + * concurrently and even if that was possible, in the worst case we lose a + * chance to do a HOT update. */ if (!PageIsFull(page)) { @@ -4176,7 +4177,7 @@ l2: * logged. */ old_key_tuple = ExtractReplicaIdentity(relation, &oldtup, - bms_overlap(modified_attrs, id_attrs), + bms_overlap(modified_attrs, id_attrs), &old_key_copied); /* NO EREPORT(ERROR) from here till changes are logged */ @@ -4422,17 +4423,17 @@ static Bitmapset * HeapDetermineModifiedColumns(Relation relation, Bitmapset *interesting_cols, HeapTuple oldtup, HeapTuple newtup) { - int attnum; - Bitmapset *modified = NULL; + int attnum; + Bitmapset *modified = NULL; while ((attnum = bms_first_member(interesting_cols)) >= 0) { attnum += FirstLowInvalidHeapAttributeNumber; if (!heap_tuple_attr_equals(RelationGetDescr(relation), - attnum, oldtup, newtup)) + attnum, oldtup, newtup)) modified = bms_add_member(modified, - attnum - FirstLowInvalidHeapAttributeNumber); + attnum - FirstLowInvalidHeapAttributeNumber); } return modified; diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 775f2ff1f8..116f5f32f6 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -100,7 +100,7 @@ typedef struct BTParallelScanDescData * scan */ slock_t btps_mutex; /* protects above variables */ ConditionVariable btps_cv; /* used to synchronize parallel scan */ -} BTParallelScanDescData; +} BTParallelScanDescData; typedef struct BTParallelScanDescData *BTParallelScanDesc; @@ -289,11 +289,11 @@ btbuildempty(Relation index) _bt_initmetapage(metapage, P_NONE, 0); /* - * Write the page and log it. It might seem that an immediate sync - * would be sufficient to guarantee that the file exists on disk, but - * recovery itself might remove it while replaying, for example, an - * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we - * need this even when wal_level=minimal. + * Write the page and log it. It might seem that an immediate sync would + * be sufficient to guarantee that the file exists on disk, but recovery + * itself might remove it while replaying, for example, an + * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we need + * this even when wal_level=minimal. */ PageSetChecksumInplace(metapage, BTREE_METAPAGE); smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE, diff --git a/src/backend/access/rmgrdesc/brindesc.c b/src/backend/access/rmgrdesc/brindesc.c index 8eb5275a8b..637ebf30f8 100644 --- a/src/backend/access/rmgrdesc/brindesc.c +++ b/src/backend/access/rmgrdesc/brindesc.c @@ -66,7 +66,7 @@ brin_desc(StringInfo buf, XLogReaderState *record) xl_brin_desummarize *xlrec = (xl_brin_desummarize *) rec; appendStringInfo(buf, "pagesPerRange %u, heapBlk %u, page offset %u", - xlrec->pagesPerRange, xlrec->heapBlk, xlrec->regOffset); + xlrec->pagesPerRange, xlrec->heapBlk, xlrec->regOffset); } } diff --git a/src/backend/access/rmgrdesc/clogdesc.c b/src/backend/access/rmgrdesc/clogdesc.c index ef268c5ab3..9181154ffd 100644 --- a/src/backend/access/rmgrdesc/clogdesc.c +++ b/src/backend/access/rmgrdesc/clogdesc.c @@ -36,7 +36,7 @@ clog_desc(StringInfo buf, XLogReaderState *record) memcpy(&xlrec, rec, sizeof(xl_clog_truncate)); appendStringInfo(buf, "page %d; oldestXact %u", - xlrec.pageno, xlrec.oldestXact); + xlrec.pageno, xlrec.oldestXact); } } diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c index b22fdd48f3..df51f3ce1f 100644 --- a/src/backend/access/rmgrdesc/gindesc.c +++ b/src/backend/access/rmgrdesc/gindesc.c @@ -117,18 +117,18 @@ gin_desc(StringInfo buf, XLogReaderState *record) if (!(xlrec->flags & GIN_INSERT_ISDATA)) appendStringInfo(buf, " isdelete: %c", - (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F'); + (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F'); else if (xlrec->flags & GIN_INSERT_ISLEAF) desc_recompress_leaf(buf, (ginxlogRecompressDataLeaf *) payload); else { ginxlogInsertDataInternal *insertData = - (ginxlogInsertDataInternal *) payload; + (ginxlogInsertDataInternal *) payload; appendStringInfo(buf, " pitem: %u-%u/%u", - PostingItemGetBlockNumber(&insertData->newitem), - ItemPointerGetBlockNumber(&insertData->newitem.key), - ItemPointerGetOffsetNumber(&insertData->newitem.key)); + PostingItemGetBlockNumber(&insertData->newitem), + ItemPointerGetBlockNumber(&insertData->newitem.key), + ItemPointerGetOffsetNumber(&insertData->newitem.key)); } } } @@ -159,7 +159,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) else { ginxlogVacuumDataLeafPage *xlrec = - (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL); + (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL); desc_recompress_leaf(buf, &xlrec->data); } diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c index 00a0ab4438..9a37259916 100644 --- a/src/backend/access/spgist/spginsert.c +++ b/src/backend/access/spgist/spginsert.c @@ -164,10 +164,10 @@ spgbuildempty(Relation index) /* * Write the page and log it unconditionally. This is important - * particularly for indexes created on tablespaces and databases - * whose creation happened after the last redo pointer as recovery - * removes any of their existing content when the corresponding - * create records are replayed. + * particularly for indexes created on tablespaces and databases whose + * creation happened after the last redo pointer as recovery removes any + * of their existing content when the corresponding create records are + * replayed. */ PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO); smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO, diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index 7a007a6ba5..bece57589e 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -84,7 +84,7 @@ static int ZeroCLOGPage(int pageno, bool writeXlog); static bool CLOGPagePrecedes(int page1, int page2); static void WriteZeroPageXlogRec(int pageno); static void WriteTruncateXlogRec(int pageno, TransactionId oldestXact, - Oid oldestXidDb); + Oid oldestXidDb); static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids, TransactionId *subxids, XidStatus status, XLogRecPtr lsn, int pageno); @@ -680,13 +680,13 @@ TruncateCLOG(TransactionId oldestXact, Oid oldestxid_datoid) /* vac_truncate_clog already advanced oldestXid */ Assert(TransactionIdPrecedesOrEquals(oldestXact, - ShmemVariableCache->oldestXid)); + ShmemVariableCache->oldestXid)); /* - * Write XLOG record and flush XLOG to disk. We record the oldest xid we're - * keeping information about here so we can ensure that it's always ahead - * of clog truncation in case we crash, and so a standby finds out the new - * valid xid before the next checkpoint. + * Write XLOG record and flush XLOG to disk. We record the oldest xid + * we're keeping information about here so we can ensure that it's always + * ahead of clog truncation in case we crash, and so a standby finds out + * the new valid xid before the next checkpoint. */ WriteTruncateXlogRec(cutoffPage, oldestXact, oldestxid_datoid); diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c index 03ffa20908..7646c23c4e 100644 --- a/src/backend/access/transam/commit_ts.c +++ b/src/backend/access/transam/commit_ts.c @@ -748,8 +748,8 @@ ShutdownCommitTs(void) SimpleLruFlush(CommitTsCtl, false); /* - * fsync pg_commit_ts to ensure that any files flushed previously are durably - * on disk. + * fsync pg_commit_ts to ensure that any files flushed previously are + * durably on disk. */ fsync_fname("pg_commit_ts", true); } @@ -764,8 +764,8 @@ CheckPointCommitTs(void) SimpleLruFlush(CommitTsCtl, true); /* - * fsync pg_commit_ts to ensure that any files flushed previously are durably - * on disk. + * fsync pg_commit_ts to ensure that any files flushed previously are + * durably on disk. */ fsync_fname("pg_commit_ts", true); } diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index cc68484a5d..cef03f83e0 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -87,9 +87,9 @@ SubTransSetParent(TransactionId xid, TransactionId parent) ptr += entryno; /* - * It's possible we'll try to set the parent xid multiple times - * but we shouldn't ever be changing the xid from one valid xid - * to another valid xid, which would corrupt the data structure. + * It's possible we'll try to set the parent xid multiple times but we + * shouldn't ever be changing the xid from one valid xid to another valid + * xid, which would corrupt the data structure. */ if (*ptr != parent) { @@ -162,13 +162,13 @@ SubTransGetTopmostTransaction(TransactionId xid) parentXid = SubTransGetParent(parentXid); /* - * By convention the parent xid gets allocated first, so should - * always precede the child xid. Anything else points to a corrupted - * data structure that could lead to an infinite loop, so exit. + * By convention the parent xid gets allocated first, so should always + * precede the child xid. Anything else points to a corrupted data + * structure that could lead to an infinite loop, so exit. */ if (!TransactionIdPrecedes(parentXid, previousXid)) elog(ERROR, "pg_subtrans contains invalid entry: xid %u points to parent xid %u", - previousXid, parentXid); + previousXid, parentXid); } Assert(TransactionIdIsValid(previousXid)); diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 7bf2555af2..c50f9c4bf6 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -166,7 +166,7 @@ typedef struct GlobalTransactionData */ XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */ XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */ - TransactionId xid; /* The GXACT id */ + TransactionId xid; /* The GXACT id */ Oid owner; /* ID of user that executed the xact */ BackendId locking_backend; /* backend currently working on the xact */ @@ -220,11 +220,11 @@ static void RemoveGXact(GlobalTransaction gxact); static void XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len); static char *ProcessTwoPhaseBuffer(TransactionId xid, - XLogRecPtr prepare_start_lsn, - bool fromdisk, bool setParent, bool setNextXid); + XLogRecPtr prepare_start_lsn, + bool fromdisk, bool setParent, bool setNextXid); static void MarkAsPreparingGuts(GlobalTransaction gxact, TransactionId xid, - const char *gid, TimestampTz prepared_at, Oid owner, - Oid databaseid); + const char *gid, TimestampTz prepared_at, Oid owner, + Oid databaseid); static void RemoveTwoPhaseFile(TransactionId xid, bool giveWarning); static void RecreateTwoPhaseFile(TransactionId xid, void *content, int len); @@ -1304,7 +1304,7 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"), - errdetail("Failed while allocating a WAL reading processor."))); + errdetail("Failed while allocating a WAL reading processor."))); record = XLogReadRecord(xlogreader, lsn, &errormsg); if (record == NULL) @@ -1318,9 +1318,9 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len) (XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE) ereport(ERROR, (errcode_for_file_access(), - errmsg("expected two-phase state data is not present in WAL at %X/%X", - (uint32) (lsn >> 32), - (uint32) lsn))); + errmsg("expected two-phase state data is not present in WAL at %X/%X", + (uint32) (lsn >> 32), + (uint32) lsn))); if (len != NULL) *len = XLogRecGetDataLen(xlogreader); @@ -1675,7 +1675,10 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) LWLockAcquire(TwoPhaseStateLock, LW_SHARED); for (i = 0; i < TwoPhaseState->numPrepXacts; i++) { - /* Note that we are using gxact not pgxact so this works in recovery also */ + /* + * Note that we are using gxact not pgxact so this works in recovery + * also + */ GlobalTransaction gxact = TwoPhaseState->prepXacts[i]; if ((gxact->valid || gxact->inredo) && @@ -1727,8 +1730,8 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) void restoreTwoPhaseData(void) { - DIR *cldir; - struct dirent *clde; + DIR *cldir; + struct dirent *clde; cldir = AllocateDir(TWOPHASE_DIR); while ((clde = ReadDir(cldir, TWOPHASE_DIR)) != NULL) @@ -1801,8 +1804,8 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p) xid = gxact->xid; buf = ProcessTwoPhaseBuffer(xid, - gxact->prepare_start_lsn, - gxact->ondisk, false, true); + gxact->prepare_start_lsn, + gxact->ondisk, false, true); if (buf == NULL) continue; @@ -1876,8 +1879,8 @@ StandbyRecoverPreparedTransactions(void) xid = gxact->xid; buf = ProcessTwoPhaseBuffer(xid, - gxact->prepare_start_lsn, - gxact->ondisk, false, false); + gxact->prepare_start_lsn, + gxact->ondisk, false, false); if (buf != NULL) pfree(buf); } @@ -1920,17 +1923,17 @@ RecoverPreparedTransactions(void) xid = gxact->xid; /* - * Reconstruct subtrans state for the transaction --- needed - * because pg_subtrans is not preserved over a restart. Note that - * we are linking all the subtransactions directly to the - * top-level XID; there may originally have been a more complex - * hierarchy, but there's no need to restore that exactly. - * It's possible that SubTransSetParent has been set before, if - * the prepared transaction generated xid assignment records. + * Reconstruct subtrans state for the transaction --- needed because + * pg_subtrans is not preserved over a restart. Note that we are + * linking all the subtransactions directly to the top-level XID; + * there may originally have been a more complex hierarchy, but + * there's no need to restore that exactly. It's possible that + * SubTransSetParent has been set before, if the prepared transaction + * generated xid assignment records. */ buf = ProcessTwoPhaseBuffer(xid, - gxact->prepare_start_lsn, - gxact->ondisk, true, false); + gxact->prepare_start_lsn, + gxact->ondisk, true, false); if (buf == NULL) continue; @@ -1949,9 +1952,8 @@ RecoverPreparedTransactions(void) bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage)); /* - * Recreate its GXACT and dummy PGPROC. But, check whether - * it was added in redo and already has a shmem entry for - * it. + * Recreate its GXACT and dummy PGPROC. But, check whether it was + * added in redo and already has a shmem entry for it. */ LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE); MarkAsPreparingGuts(gxact, xid, gid, @@ -1980,9 +1982,8 @@ RecoverPreparedTransactions(void) StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids); /* - * We're done with recovering this transaction. Clear - * MyLockedGxact, like we do in PrepareTransaction() during normal - * operation. + * We're done with recovering this transaction. Clear MyLockedGxact, + * like we do in PrepareTransaction() during normal operation. */ PostPrepare_Twophase(); @@ -2049,8 +2050,8 @@ ProcessTwoPhaseBuffer(TransactionId xid, else { ereport(WARNING, - (errmsg("removing future two-phase state from memory for \"%u\"", - xid))); + (errmsg("removing future two-phase state from memory for \"%u\"", + xid))); PrepareRedoRemove(xid, true); } return NULL; @@ -2063,8 +2064,8 @@ ProcessTwoPhaseBuffer(TransactionId xid, if (buf == NULL) { ereport(WARNING, - (errmsg("removing corrupt two-phase state file for \"%u\"", - xid))); + (errmsg("removing corrupt two-phase state file for \"%u\"", + xid))); RemoveTwoPhaseFile(xid, true); return NULL; } @@ -2082,15 +2083,15 @@ ProcessTwoPhaseBuffer(TransactionId xid, if (fromdisk) { ereport(WARNING, - (errmsg("removing corrupt two-phase state file for \"%u\"", - xid))); + (errmsg("removing corrupt two-phase state file for \"%u\"", + xid))); RemoveTwoPhaseFile(xid, true); } else { ereport(WARNING, - (errmsg("removing corrupt two-phase state from memory for \"%u\"", - xid))); + (errmsg("removing corrupt two-phase state from memory for \"%u\"", + xid))); PrepareRedoRemove(xid, true); } pfree(buf); @@ -2098,8 +2099,8 @@ ProcessTwoPhaseBuffer(TransactionId xid, } /* - * Examine subtransaction XIDs ... they should all follow main - * XID, and they may force us to advance nextXid. + * Examine subtransaction XIDs ... they should all follow main XID, and + * they may force us to advance nextXid. */ subxids = (TransactionId *) (buf + MAXALIGN(sizeof(TwoPhaseFileHeader)) + @@ -2122,7 +2123,7 @@ ProcessTwoPhaseBuffer(TransactionId xid, */ LWLockAcquire(XidGenLock, LW_EXCLUSIVE); if (TransactionIdFollowsOrEquals(subxid, - ShmemVariableCache->nextXid)) + ShmemVariableCache->nextXid)) { ShmemVariableCache->nextXid = subxid; TransactionIdAdvance(ShmemVariableCache->nextXid); @@ -2175,14 +2176,15 @@ RecordTransactionCommitPrepared(TransactionId xid, MyPgXact->delayChkpt = true; /* - * Emit the XLOG commit record. Note that we mark 2PC commits as potentially - * having AccessExclusiveLocks since we don't know whether or not they do. + * Emit the XLOG commit record. Note that we mark 2PC commits as + * potentially having AccessExclusiveLocks since we don't know whether or + * not they do. */ recptr = XactLogCommitRecord(committs, nchildren, children, nrels, rels, ninvalmsgs, invalmsgs, initfileinval, false, - MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK, + MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK, xid); @@ -2260,13 +2262,14 @@ RecordTransactionAbortPrepared(TransactionId xid, START_CRIT_SECTION(); /* - * Emit the XLOG commit record. Note that we mark 2PC aborts as potentially - * having AccessExclusiveLocks since we don't know whether or not they do. + * Emit the XLOG commit record. Note that we mark 2PC aborts as + * potentially having AccessExclusiveLocks since we don't know whether or + * not they do. */ recptr = XactLogAbortRecord(GetCurrentTimestamp(), nchildren, children, nrels, rels, - MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK, + MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK, xid); /* Always flush, since we're about to remove the 2PC state file */ @@ -2301,8 +2304,8 @@ void PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn) { TwoPhaseFileHeader *hdr = (TwoPhaseFileHeader *) buf; - char *bufptr; - const char *gid; + char *bufptr; + const char *gid; GlobalTransaction gxact; Assert(RecoveryInProgress()); @@ -2315,8 +2318,8 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn) * * This creates a gxact struct and puts it into the active array. * - * In redo, this struct is mainly used to track PREPARE/COMMIT entries - * in shared memory. Hence, we only fill up the bare minimum contents here. + * In redo, this struct is mainly used to track PREPARE/COMMIT entries in + * shared memory. Hence, we only fill up the bare minimum contents here. * The gxact also gets marked with gxact->inredo set to true to indicate * that it got added in the redo phase */ @@ -2340,7 +2343,7 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn) gxact->locking_backend = InvalidBackendId; gxact->valid = false; gxact->ondisk = XLogRecPtrIsInvalid(start_lsn); - gxact->inredo = true; /* yes, added in redo */ + gxact->inredo = true; /* yes, added in redo */ strcpy(gxact->gid, gid); /* And insert it into the active array */ diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index 5efbfbd3d6..b02dd6fbd2 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -272,7 +272,7 @@ AdvanceOldestClogXid(TransactionId oldest_datfrozenxid) { LWLockAcquire(CLogTruncationLock, LW_EXCLUSIVE); if (TransactionIdPrecedes(ShmemVariableCache->oldestClogXid, - oldest_datfrozenxid)) + oldest_datfrozenxid)) { ShmemVariableCache->oldestClogXid = oldest_datfrozenxid; } diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index a3ff1b22f0..7e8c598f2a 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -115,7 +115,7 @@ TransactionId *ParallelCurrentXids; * globally accessible, so can be set from anywhere in the code that requires * recording flags. */ -int MyXactFlags; +int MyXactFlags; /* * transaction states - transaction state from server perspective @@ -2641,7 +2641,8 @@ CleanupTransaction(void) * do abort cleanup processing */ AtCleanup_Portals(); /* now safe to release portal memory */ - AtEOXact_Snapshot(false, true); /* and release the transaction's snapshots */ + AtEOXact_Snapshot(false, true); /* and release the transaction's + * snapshots */ CurrentResourceOwner = NULL; /* and resource owner */ if (TopTransactionResourceOwner) @@ -5646,8 +5647,8 @@ xact_redo(XLogReaderState *record) else if (info == XLOG_XACT_PREPARE) { /* - * Store xid and start/end pointers of the WAL record in - * TwoPhaseState gxact entry. + * Store xid and start/end pointers of the WAL record in TwoPhaseState + * gxact entry. */ PrepareRedoAdd(XLogRecGetData(record), record->ReadRecPtr, diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index b98e37e1d3..399822d3fe 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -550,13 +550,12 @@ typedef struct XLogCtlInsert bool fullPageWrites; /* - * exclusiveBackupState indicates the state of an exclusive backup - * (see comments of ExclusiveBackupState for more details). - * nonExclusiveBackups is a counter indicating the number of streaming - * base backups currently in progress. forcePageWrites is set to true - * when either of these is non-zero. lastBackupStart is the latest - * checkpoint redo location used as a starting point for an online - * backup. + * exclusiveBackupState indicates the state of an exclusive backup (see + * comments of ExclusiveBackupState for more details). nonExclusiveBackups + * is a counter indicating the number of streaming base backups currently + * in progress. forcePageWrites is set to true when either of these is + * non-zero. lastBackupStart is the latest checkpoint redo location used + * as a starting point for an online backup. */ ExclusiveBackupState exclusiveBackupState; int nonExclusiveBackups; @@ -1082,7 +1081,7 @@ XLogInsertRecord(XLogRecData *rdata, */ if ((flags & XLOG_MARK_UNIMPORTANT) == 0) { - int lockno = holdingAllLocks ? 0 : MyLockNo; + int lockno = holdingAllLocks ? 0 : MyLockNo; WALInsertLocks[lockno].l.lastImportantAt = StartPos; } @@ -1405,7 +1404,8 @@ checkXLogConsistency(XLogReaderState *record) /* * If the block LSN is already ahead of this WAL record, we can't - * expect contents to match. This can happen if recovery is restarted. + * expect contents to match. This can happen if recovery is + * restarted. */ if (PageGetLSN(replay_image_masked) > record->EndRecPtr) continue; @@ -4975,15 +4975,15 @@ BootStrapXLOG(void) sysidentifier |= getpid() & 0xFFF; /* - * Generate a random nonce. This is used for authentication requests - * that will fail because the user does not exist. The nonce is used to - * create a genuine-looking password challenge for the non-existent user, - * in lieu of an actual stored password. + * Generate a random nonce. This is used for authentication requests that + * will fail because the user does not exist. The nonce is used to create + * a genuine-looking password challenge for the non-existent user, in lieu + * of an actual stored password. */ if (!pg_backend_random(mock_auth_nonce, MOCK_AUTH_NONCE_LEN)) ereport(PANIC, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("could not generate secret authorization token"))); + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("could not generate secret authorization token"))); /* First timeline ID is always 1 */ ThisTimeLineID = 1; @@ -5298,7 +5298,7 @@ readRecoveryCommandFile(void) DatumGetLSN(DirectFunctionCall3(pg_lsn_in, CStringGetDatum(item->value), ObjectIdGetDatum(InvalidOid), - Int32GetDatum(-1))); + Int32GetDatum(-1))); ereport(DEBUG2, (errmsg_internal("recovery_target_lsn = '%X/%X'", (uint32) (recoveryTargetLSN >> 32), @@ -5643,9 +5643,9 @@ recoveryStopsBefore(XLogReaderState *record) recoveryStopTime = 0; recoveryStopName[0] = '\0'; ereport(LOG, - (errmsg("recovery stopping before WAL location (LSN) \"%X/%X\"", - (uint32) (recoveryStopLSN >> 32), - (uint32) recoveryStopLSN))); + (errmsg("recovery stopping before WAL location (LSN) \"%X/%X\"", + (uint32) (recoveryStopLSN >> 32), + (uint32) recoveryStopLSN))); return true; } @@ -5800,9 +5800,9 @@ recoveryStopsAfter(XLogReaderState *record) recoveryStopTime = 0; recoveryStopName[0] = '\0'; ereport(LOG, - (errmsg("recovery stopping after WAL location (LSN) \"%X/%X\"", - (uint32) (recoveryStopLSN >> 32), - (uint32) recoveryStopLSN))); + (errmsg("recovery stopping after WAL location (LSN) \"%X/%X\"", + (uint32) (recoveryStopLSN >> 32), + (uint32) recoveryStopLSN))); return true; } @@ -6348,12 +6348,12 @@ StartupXLOG(void) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"), - errdetail("Failed while allocating a WAL reading processor."))); + errdetail("Failed while allocating a WAL reading processor."))); xlogreader->system_identifier = ControlFile->system_identifier; /* - * Allocate pages dedicated to WAL consistency checks, those had better - * be aligned. + * Allocate pages dedicated to WAL consistency checks, those had better be + * aligned. */ replay_image_masked = (char *) palloc(BLCKSZ); master_image_masked = (char *) palloc(BLCKSZ); @@ -6687,21 +6687,21 @@ StartupXLOG(void) /* * Copy any missing timeline history files between 'now' and the recovery - * target timeline from archive to pg_wal. While we don't need those - * files ourselves - the history file of the recovery target timeline - * covers all the previous timelines in the history too - a cascading - * standby server might be interested in them. Or, if you archive the WAL - * from this server to a different archive than the master, it'd be good - * for all the history files to get archived there after failover, so that - * you can use one of the old timelines as a PITR target. Timeline history - * files are small, so it's better to copy them unnecessarily than not - * copy them and regret later. + * target timeline from archive to pg_wal. While we don't need those files + * ourselves - the history file of the recovery target timeline covers all + * the previous timelines in the history too - a cascading standby server + * might be interested in them. Or, if you archive the WAL from this + * server to a different archive than the master, it'd be good for all the + * history files to get archived there after failover, so that you can use + * one of the old timelines as a PITR target. Timeline history files are + * small, so it's better to copy them unnecessarily than not copy them and + * regret later. */ restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI); /* - * Before running in recovery, scan pg_twophase and fill in its status - * to be able to work on entries generated by redo. Doing a scan before + * Before running in recovery, scan pg_twophase and fill in its status to + * be able to work on entries generated by redo. Doing a scan before * taking any recovery action has the merit to discard any 2PC files that * are newer than the first record to replay, saving from any conflicts at * replay. This avoids as well any subsequent scans when doing recovery @@ -7426,7 +7426,7 @@ StartupXLOG(void) snprintf(reason, sizeof(reason), "%s LSN %X/%X\n", recoveryStopAfter ? "after" : "before", - (uint32 ) (recoveryStopLSN >> 32), + (uint32) (recoveryStopLSN >> 32), (uint32) recoveryStopLSN); else if (recoveryTarget == RECOVERY_TARGET_NAME) snprintf(reason, sizeof(reason), @@ -9645,6 +9645,7 @@ xlog_redo(XLogReaderState *record) MultiXactAdvanceOldest(checkPoint.oldestMulti, checkPoint.oldestMultiDB); + /* * No need to set oldestClogXid here as well; it'll be set when we * redo an xl_clog_truncate if it changed since initialization. @@ -10238,8 +10239,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, if (exclusive) { /* - * At first, mark that we're now starting an exclusive backup, - * to ensure that there are no other sessions currently running + * At first, mark that we're now starting an exclusive backup, to + * ensure that there are no other sessions currently running * pg_start_backup() or pg_stop_backup(). */ if (XLogCtl->Insert.exclusiveBackupState != EXCLUSIVE_BACKUP_NONE) @@ -10505,8 +10506,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, { /* * Check for existing backup label --- implies a backup is already - * running. (XXX given that we checked exclusiveBackupState above, - * maybe it would be OK to just unlink any such label file?) + * running. (XXX given that we checked exclusiveBackupState + * above, maybe it would be OK to just unlink any such label + * file?) */ if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0) { @@ -10727,8 +10729,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) if (exclusive) { /* - * At first, mark that we're now stopping an exclusive backup, - * to ensure that there are no other sessions currently running + * At first, mark that we're now stopping an exclusive backup, to + * ensure that there are no other sessions currently running * pg_start_backup() or pg_stop_backup(). */ WALInsertLockAcquireExclusive(); @@ -10790,8 +10792,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) durable_unlink(BACKUP_LABEL_FILE, ERROR); /* - * Remove tablespace_map file if present, it is created only if there - * are tablespaces. + * Remove tablespace_map file if present, it is created only if + * there are tablespaces. */ durable_unlink(TABLESPACE_MAP, DEBUG1); } @@ -10978,9 +10980,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) * archived before returning. If archiving isn't enabled, the required WAL * needs to be transported via streaming replication (hopefully with * wal_keep_segments set high enough), or some more exotic mechanism like - * polling and copying files from pg_wal with script. We have no - * knowledge of those mechanisms, so it's up to the user to ensure that he - * gets all the required WAL. + * polling and copying files from pg_wal with script. We have no knowledge + * of those mechanisms, so it's up to the user to ensure that he gets all + * the required WAL. * * We wait until both the last WAL file filled during backup and the * history file have been archived, and assume that the alphabetic sorting @@ -10990,8 +10992,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) * We wait forever, since archive_command is supposed to work and we * assume the admin wanted his backup to work completely. If you don't * wish to wait, then either waitforarchive should be passed in as false, - * or you can set statement_timeout. Also, some notices are - * issued to clue in anyone who might be doing this interactively. + * or you can set statement_timeout. Also, some notices are issued to + * clue in anyone who might be doing this interactively. */ if (waitforarchive && XLogArchivingActive()) { @@ -11717,8 +11719,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, * little chance that the problem will just go away, but * PANIC is not good for availability either, especially * in hot standby mode. So, we treat that the same as - * disconnection, and retry from archive/pg_wal again. - * The WAL in the archive should be identical to what was + * disconnection, and retry from archive/pg_wal again. The + * WAL in the archive should be identical to what was * streamed, so it's unlikely that it helps, but one can * hope... */ @@ -11881,9 +11883,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, * not open already. Also read the timeline history * file if we haven't initialized timeline history * yet; it should be streamed over and present in - * pg_wal by now. Use XLOG_FROM_STREAM so that - * source info is set correctly and XLogReceiptTime - * isn't changed. + * pg_wal by now. Use XLOG_FROM_STREAM so that source + * info is set correctly and XLogReceiptTime isn't + * changed. */ if (readFile < 0) { diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c index 8568c8abd6..b3223d691d 100644 --- a/src/backend/access/transam/xlogfuncs.c +++ b/src/backend/access/transam/xlogfuncs.c @@ -156,7 +156,8 @@ pg_stop_backup(PG_FUNCTION_ARGS) * Exclusive backups were typically started in a different connection, so * don't try to verify that status of backup is set to * SESSION_BACKUP_EXCLUSIVE in this function. Actual verification that an - * exclusive backup is in fact running is handled inside do_pg_stop_backup. + * exclusive backup is in fact running is handled inside + * do_pg_stop_backup. */ stoppoint = do_pg_stop_backup(NULL, true, NULL); @@ -527,7 +528,7 @@ pg_walfile_name(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("recovery is in progress"), - errhint("pg_walfile_name() cannot be executed during recovery."))); + errhint("pg_walfile_name() cannot be executed during recovery."))); XLByteToPrevSeg(locationpoint, xlogsegno); XLogFileName(xlogfilename, ThisTimeLineID, xlogsegno); diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index a3bd0b7f51..6a02738479 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -388,10 +388,10 @@ XLogRegisterBufData(uint8 block_id, char *data, int len) * * The flags that can be used here are: * - XLOG_INCLUDE_ORIGIN, to determine if the replication origin should be - * included in the record. + * included in the record. * - XLOG_MARK_UNIMPORTANT, to signal that the record is not important for - * durability, which allows to avoid triggering WAL archiving and other - * background activity. + * durability, which allows to avoid triggering WAL archiving and other + * background activity. */ void XLogSetRecordFlags(uint8 flags) @@ -507,10 +507,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, hdr_rdt.data = hdr_scratch; /* - * Enforce consistency checks for this record if user is looking for - * it. Do this before at the beginning of this routine to give the - * possibility for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY - * directly for a record. + * Enforce consistency checks for this record if user is looking for it. + * Do this before at the beginning of this routine to give the possibility + * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for + * a record. */ if (wal_consistency_checking[rmid]) info |= XLR_CHECK_CONSISTENCY; @@ -576,9 +576,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, bkpb.fork_flags |= BKPBLOCK_WILL_INIT; /* - * If needs_backup is true or WAL checking is enabled for - * current resource manager, log a full-page write for the current - * block. + * If needs_backup is true or WAL checking is enabled for current + * resource manager, log a full-page write for the current block. */ include_image = needs_backup || (info & XLR_CHECK_CONSISTENCY) != 0; @@ -645,8 +644,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE; /* - * If WAL consistency checking is enabled for the resource manager of - * this WAL record, a full-page image is included in the record + * If WAL consistency checking is enabled for the resource manager + * of this WAL record, a full-page image is included in the record * for the block modified. During redo, the full-page is replayed * only if BKPIMAGE_APPLY is set. */ diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index f077662946..c3b1371764 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -892,8 +892,8 @@ XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr) * that, except when caller has explicitly specified the offset that * falls somewhere there or when we are skipping multi-page * continuation record. It doesn't matter though because - * ReadPageInternal() is prepared to handle that and will read at least - * short page-header worth of data + * ReadPageInternal() is prepared to handle that and will read at + * least short page-header worth of data */ targetRecOff = tmpRecPtr % XLOG_BLCKSZ; diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index d7f2e55b09..7430a1f77b 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -805,22 +805,23 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ); /* - * If the desired page is currently read in and valid, we have nothing to do. + * If the desired page is currently read in and valid, we have nothing to + * do. * * The caller should've ensured that it didn't previously advance readOff - * past the valid limit of this timeline, so it doesn't matter if the current - * TLI has since become historical. + * past the valid limit of this timeline, so it doesn't matter if the + * current TLI has since become historical. */ if (lastReadPage == wantPage && state->readLen != 0 && - lastReadPage + state->readLen >= wantPage + Min(wantLength,XLOG_BLCKSZ-1)) + lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1)) return; /* * If we're reading from the current timeline, it hasn't become historical * and the page we're reading is after the last page read, we can again - * just carry on. (Seeking backwards requires a check to make sure the older - * page isn't on a prior timeline). + * just carry on. (Seeking backwards requires a check to make sure the + * older page isn't on a prior timeline). * * ThisTimeLineID might've become historical since we last looked, but the * caller is required not to read past the flush limit it saw at the time @@ -835,8 +836,8 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa /* * If we're just reading pages from a previously validated historical - * timeline and the timeline we're reading from is valid until the - * end of the current segment we can just keep reading. + * timeline and the timeline we're reading from is valid until the end of + * the current segment we can just keep reading. */ if (state->currTLIValidUntil != InvalidXLogRecPtr && state->currTLI != ThisTimeLineID && @@ -845,10 +846,10 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa return; /* - * If we reach this point we're either looking up a page for random access, - * the current timeline just became historical, or we're reading from a new - * segment containing a timeline switch. In all cases we need to determine - * the newest timeline on the segment. + * If we reach this point we're either looking up a page for random + * access, the current timeline just became historical, or we're reading + * from a new segment containing a timeline switch. In all cases we need + * to determine the newest timeline on the segment. * * If it's the current timeline we can just keep reading from here unless * we detect a timeline switch that makes the current timeline historical. @@ -861,26 +862,29 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa * We need to re-read the timeline history in case it's been changed * by a promotion or replay from a cascaded replica. */ - List *timelineHistory = readTimeLineHistory(ThisTimeLineID); + List *timelineHistory = readTimeLineHistory(ThisTimeLineID); - XLogRecPtr endOfSegment = (((wantPage / XLogSegSize) + 1) * XLogSegSize) - 1; + XLogRecPtr endOfSegment = (((wantPage / XLogSegSize) + 1) * XLogSegSize) - 1; Assert(wantPage / XLogSegSize == endOfSegment / XLogSegSize); - /* Find the timeline of the last LSN on the segment containing wantPage. */ + /* + * Find the timeline of the last LSN on the segment containing + * wantPage. + */ state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory); state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory, - &state->nextTLI); + &state->nextTLI); Assert(state->currTLIValidUntil == InvalidXLogRecPtr || - wantPage + wantLength < state->currTLIValidUntil); + wantPage + wantLength < state->currTLIValidUntil); list_free_deep(timelineHistory); elog(DEBUG3, "switched to timeline %u valid until %X/%X", - state->currTLI, - (uint32)(state->currTLIValidUntil >> 32), - (uint32)(state->currTLIValidUntil)); + state->currTLI, + (uint32) (state->currTLIValidUntil >> 32), + (uint32) (state->currTLIValidUntil)); } } @@ -929,21 +933,22 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, * * We have to do it each time through the loop because if we're in * recovery as a cascading standby, the current timeline might've - * become historical. We can't rely on RecoveryInProgress() because - * in a standby configuration like + * become historical. We can't rely on RecoveryInProgress() because in + * a standby configuration like * - * A => B => C + * A => B => C * * if we're a logical decoding session on C, and B gets promoted, our * timeline will change while we remain in recovery. * * We can't just keep reading from the old timeline as the last WAL - * archive in the timeline will get renamed to .partial by StartupXLOG(). + * archive in the timeline will get renamed to .partial by + * StartupXLOG(). * * If that happens after our caller updated ThisTimeLineID but before * we actually read the xlog page, we might still try to read from the - * old (now renamed) segment and fail. There's not much we can do about - * this, but it can only happen when we're a leaf of a cascading + * old (now renamed) segment and fail. There's not much we can do + * about this, but it can only happen when we're a leaf of a cascading * standby whose master gets promoted while we're decoding, so a * one-off ERROR isn't too bad. */ diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index 806db7f35e..cd82cb9f29 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -1125,8 +1125,10 @@ doDeletion(const ObjectAddress *object, int flags) heap_drop_with_catalog(object->objectId); } - /* for a sequence, in addition to dropping the heap, also - * delete pg_sequence tuple */ + /* + * for a sequence, in addition to dropping the heap, also + * delete pg_sequence tuple + */ if (relKind == RELKIND_SEQUENCE) DeleteSequenceTuple(object->objectId); break; @@ -1942,7 +1944,7 @@ find_expr_references_walker(Node *node, } else if (IsA(node, NextValueExpr)) { - NextValueExpr *nve = (NextValueExpr *) node; + NextValueExpr *nve = (NextValueExpr *) node; add_object_address(OCLASS_CLASS, nve->seqid, 0, context->addrs); diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 0f1547b567..fa926048e1 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -1762,10 +1762,10 @@ heap_drop_with_catalog(Oid relid) /* * To drop a partition safely, we must grab exclusive lock on its parent, * because another backend might be about to execute a query on the parent - * table. If it relies on previously cached partition descriptor, then - * it could attempt to access the just-dropped relation as its partition. - * We must therefore take a table lock strong enough to prevent all - * queries on the table from proceeding until we commit and send out a + * table. If it relies on previously cached partition descriptor, then it + * could attempt to access the just-dropped relation as its partition. We + * must therefore take a table lock strong enough to prevent all queries + * on the table from proceeding until we commit and send out a * shared-cache-inval notice that will make them update their index lists. */ tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 3dfb8fa4f9..6bc05cab3a 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -854,7 +854,7 @@ get_object_address(ObjectType objtype, Node *object, objlist = castNode(List, object); domaddr = get_object_address_type(OBJECT_DOMAIN, - linitial_node(TypeName, objlist), + linitial_node(TypeName, objlist), missing_ok); constrname = strVal(lsecond(objlist)); @@ -878,7 +878,7 @@ get_object_address(ObjectType objtype, Node *object, case OBJECT_PUBLICATION: case OBJECT_SUBSCRIPTION: address = get_object_address_unqualified(objtype, - (Value *) object, missing_ok); + (Value *) object, missing_ok); break; case OBJECT_TYPE: case OBJECT_DOMAIN: @@ -1345,7 +1345,7 @@ get_object_address_relobject(ObjectType objtype, List *object, if (relation != NULL) heap_close(relation, AccessShareLock); - relation = NULL; /* department of accident prevention */ + relation = NULL; /* department of accident prevention */ return address; } @@ -1762,7 +1762,7 @@ get_object_address_publication_rel(List *object, relname = linitial(object); relation = relation_openrv_extended(makeRangeVarFromNameList(relname), - AccessShareLock, missing_ok); + AccessShareLock, missing_ok); if (!relation) return address; @@ -2138,7 +2138,7 @@ pg_get_object_address(PG_FUNCTION_ARGS) if (list_length(name) != 1) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("name list length must be exactly %d", 1))); + errmsg("name list length must be exactly %d", 1))); objnode = linitial(name); break; case OBJECT_TYPE: @@ -2166,18 +2166,18 @@ pg_get_object_address(PG_FUNCTION_ARGS) case OBJECT_FUNCTION: case OBJECT_AGGREGATE: case OBJECT_OPERATOR: - { - ObjectWithArgs *owa = makeNode(ObjectWithArgs); + { + ObjectWithArgs *owa = makeNode(ObjectWithArgs); - owa->objname = name; - owa->objargs = args; - objnode = (Node *) owa; - break; - } + owa->objname = name; + owa->objargs = args; + objnode = (Node *) owa; + break; + } case OBJECT_LARGEOBJECT: /* already handled above */ break; - /* no default, to let compiler warn about missing case */ + /* no default, to let compiler warn about missing case */ } if (objnode == NULL) @@ -3370,7 +3370,7 @@ getObjectDescription(const ObjectAddress *object) { HeapTuple tup; char *pubname; - Form_pg_publication_rel prform; + Form_pg_publication_rel prform; tup = SearchSysCache1(PUBLICATIONREL, ObjectIdGetDatum(object->objectId)); @@ -4896,7 +4896,7 @@ getObjectIdentityParts(const ObjectAddress *object, { HeapTuple tup; char *pubname; - Form_pg_publication_rel prform; + Form_pg_publication_rel prform; tup = SearchSysCache1(PUBLICATIONREL, ObjectIdGetDatum(object->objectId)); @@ -5012,8 +5012,8 @@ getOpFamilyIdentity(StringInfo buffer, Oid opfid, List **object) if (object) *object = list_make3(pstrdup(NameStr(amForm->amname)), - pstrdup(schema), - pstrdup(NameStr(opfForm->opfname))); + pstrdup(schema), + pstrdup(NameStr(opfForm->opfname))); ReleaseSysCache(amTup); ReleaseSysCache(opfTup); diff --git a/src/backend/catalog/pg_collation.c b/src/backend/catalog/pg_collation.c index ede920955d..30cd0cba19 100644 --- a/src/backend/catalog/pg_collation.c +++ b/src/backend/catalog/pg_collation.c @@ -80,12 +80,12 @@ CollationCreate(const char *collname, Oid collnamespace, if (if_not_exists) { ereport(NOTICE, - (errcode(ERRCODE_DUPLICATE_OBJECT), - collencoding == -1 - ? errmsg("collation \"%s\" already exists, skipping", - collname) - : errmsg("collation \"%s\" for encoding \"%s\" already exists, skipping", - collname, pg_encoding_to_char(collencoding)))); + (errcode(ERRCODE_DUPLICATE_OBJECT), + collencoding == -1 + ? errmsg("collation \"%s\" already exists, skipping", + collname) + : errmsg("collation \"%s\" for encoding \"%s\" already exists, skipping", + collname, pg_encoding_to_char(collencoding)))); return InvalidOid; } else @@ -94,8 +94,8 @@ CollationCreate(const char *collname, Oid collnamespace, collencoding == -1 ? errmsg("collation \"%s\" already exists", collname) - : errmsg("collation \"%s\" for encoding \"%s\" already exists", - collname, pg_encoding_to_char(collencoding)))); + : errmsg("collation \"%s\" for encoding \"%s\" already exists", + collname, pg_encoding_to_char(collencoding)))); } /* open pg_collation; see below about the lock level */ @@ -123,16 +123,16 @@ CollationCreate(const char *collname, Oid collnamespace, { heap_close(rel, NoLock); ereport(NOTICE, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("collation \"%s\" already exists, skipping", - collname))); + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("collation \"%s\" already exists, skipping", + collname))); return InvalidOid; } else ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("collation \"%s\" already exists", - collname))); + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("collation \"%s\" already exists", + collname))); } tupDesc = RelationGetDescr(rel); diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c index 3e0db69998..d616df62c1 100644 --- a/src/backend/catalog/pg_depend.c +++ b/src/backend/catalog/pg_depend.c @@ -577,9 +577,9 @@ getOwnedSequences(Oid relid, AttrNumber attnum) Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup); /* - * We assume any auto or internal dependency of a sequence on a column must be - * what we are looking for. (We need the relkind test because indexes - * can also have auto dependencies on columns.) + * We assume any auto or internal dependency of a sequence on a column + * must be what we are looking for. (We need the relkind test because + * indexes can also have auto dependencies on columns.) */ if (deprec->classid == RelationRelationId && deprec->objsubid == 0 && diff --git a/src/backend/catalog/pg_inherits.c b/src/backend/catalog/pg_inherits.c index 04214fc203..e5fb52cfbf 100644 --- a/src/backend/catalog/pg_inherits.c +++ b/src/backend/catalog/pg_inherits.c @@ -38,8 +38,8 @@ */ typedef struct SeenRelsEntry { - Oid rel_id; /* relation oid */ - ListCell *numparents_cell; /* corresponding list cell */ + Oid rel_id; /* relation oid */ + ListCell *numparents_cell; /* corresponding list cell */ } SeenRelsEntry; /* @@ -167,8 +167,8 @@ List * find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents) { /* hash table for O(1) rel_oid -> rel_numparents cell lookup */ - HTAB *seen_rels; - HASHCTL ctl; + HTAB *seen_rels; + HASHCTL ctl; List *rels_list, *rel_numparents; ListCell *l; @@ -212,8 +212,8 @@ find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents) foreach(lc, currentchildren) { Oid child_oid = lfirst_oid(lc); - bool found; - SeenRelsEntry *hash_entry; + bool found; + SeenRelsEntry *hash_entry; hash_entry = hash_search(seen_rels, &child_oid, HASH_ENTER, &found); if (found) diff --git a/src/backend/catalog/pg_namespace.c b/src/backend/catalog/pg_namespace.c index 613b963683..3e20d051c2 100644 --- a/src/backend/catalog/pg_namespace.c +++ b/src/backend/catalog/pg_namespace.c @@ -50,7 +50,7 @@ NamespaceCreate(const char *nspName, Oid ownerId, bool isTemp) TupleDesc tupDesc; ObjectAddress myself; int i; - Acl *nspacl; + Acl *nspacl; /* sanity checks */ if (!nspName) diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c index 92f9902173..17105f4f2c 100644 --- a/src/backend/catalog/pg_publication.c +++ b/src/backend/catalog/pg_publication.c @@ -73,7 +73,7 @@ check_publication_add_relation(Relation targetrel) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"%s\" is a system table", RelationGetRelationName(targetrel)), - errdetail("System tables cannot be added to publications."))); + errdetail("System tables cannot be added to publications."))); /* UNLOGGED and TEMP relations cannot be part of publication. */ if (!RelationNeedsWAL(targetrel)) @@ -81,7 +81,7 @@ check_publication_add_relation(Relation targetrel) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("table \"%s\" cannot be replicated", RelationGetRelationName(targetrel)), - errdetail("Temporary and unlogged relations cannot be replicated."))); + errdetail("Temporary and unlogged relations cannot be replicated."))); } /* @@ -119,8 +119,8 @@ publication_add_relation(Oid pubid, Relation targetrel, Oid relid = RelationGetRelid(targetrel); Oid prrelid; Publication *pub = GetPublication(pubid); - ObjectAddress myself, - referenced; + ObjectAddress myself, + referenced; rel = heap_open(PublicationRelRelationId, RowExclusiveLock); @@ -139,8 +139,8 @@ publication_add_relation(Oid pubid, Relation targetrel, ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("relation \"%s\" is already member of publication \"%s\"", - RelationGetRelationName(targetrel), pub->name))); + errmsg("relation \"%s\" is already member of publication \"%s\"", + RelationGetRelationName(targetrel), pub->name))); } check_publication_add_relation(targetrel); @@ -186,9 +186,9 @@ publication_add_relation(Oid pubid, Relation targetrel, List * GetRelationPublications(Oid relid) { - List *result = NIL; - CatCList *pubrellist; - int i; + List *result = NIL; + CatCList *pubrellist; + int i; /* Find all publications associated with the relation. */ pubrellist = SearchSysCacheList1(PUBLICATIONRELMAP, @@ -215,11 +215,11 @@ GetRelationPublications(Oid relid) List * GetPublicationRelations(Oid pubid) { - List *result; - Relation pubrelsrel; - ScanKeyData scankey; - SysScanDesc scan; - HeapTuple tup; + List *result; + Relation pubrelsrel; + ScanKeyData scankey; + SysScanDesc scan; + HeapTuple tup; /* Find all publications associated with the relation. */ pubrelsrel = heap_open(PublicationRelRelationId, AccessShareLock); @@ -235,7 +235,7 @@ GetPublicationRelations(Oid pubid) result = NIL; while (HeapTupleIsValid(tup = systable_getnext(scan))) { - Form_pg_publication_rel pubrel; + Form_pg_publication_rel pubrel; pubrel = (Form_pg_publication_rel) GETSTRUCT(tup); @@ -254,11 +254,11 @@ GetPublicationRelations(Oid pubid) List * GetAllTablesPublications(void) { - List *result; - Relation rel; - ScanKeyData scankey; - SysScanDesc scan; - HeapTuple tup; + List *result; + Relation rel; + ScanKeyData scankey; + SysScanDesc scan; + HeapTuple tup; /* Find all publications that are marked as for all tables. */ rel = heap_open(PublicationRelationId, AccessShareLock); @@ -304,8 +304,8 @@ GetAllTablesPublicationRelations(void) while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { - Oid relid = HeapTupleGetOid(tuple); - Form_pg_class relForm = (Form_pg_class) GETSTRUCT(tuple); + Oid relid = HeapTupleGetOid(tuple); + Form_pg_class relForm = (Form_pg_class) GETSTRUCT(tuple); if (is_publishable_class(relid, relForm)) result = lappend_oid(result, relid); @@ -325,9 +325,9 @@ GetAllTablesPublicationRelations(void) Publication * GetPublication(Oid pubid) { - HeapTuple tup; - Publication *pub; - Form_pg_publication pubform; + HeapTuple tup; + Publication *pub; + Form_pg_publication pubform; tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid)); @@ -397,9 +397,9 @@ get_publication_oid(const char *pubname, bool missing_ok) char * get_publication_name(Oid pubid) { - HeapTuple tup; - char *pubname; - Form_pg_publication pubform; + HeapTuple tup; + char *pubname; + Form_pg_publication pubform; tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid)); @@ -421,10 +421,10 @@ Datum pg_get_publication_tables(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; - char *pubname = text_to_cstring(PG_GETARG_TEXT_PP(0)); - Publication *publication; - List *tables; - ListCell **lcp; + char *pubname = text_to_cstring(PG_GETARG_TEXT_PP(0)); + Publication *publication; + List *tables; + ListCell **lcp; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) @@ -455,7 +455,7 @@ pg_get_publication_tables(PG_FUNCTION_ARGS) while (*lcp != NULL) { - Oid relid = lfirst_oid(*lcp); + Oid relid = lfirst_oid(*lcp); *lcp = lnext(*lcp); SRF_RETURN_NEXT(funcctx, ObjectIdGetDatum(relid)); diff --git a/src/backend/catalog/pg_subscription.c b/src/backend/catalog/pg_subscription.c index 7dc21f1052..ab5f3719fc 100644 --- a/src/backend/catalog/pg_subscription.c +++ b/src/backend/catalog/pg_subscription.c @@ -44,11 +44,11 @@ static List *textarray_to_stringlist(ArrayType *textarray); Subscription * GetSubscription(Oid subid, bool missing_ok) { - HeapTuple tup; - Subscription *sub; - Form_pg_subscription subform; - Datum datum; - bool isnull; + HeapTuple tup; + Subscription *sub; + Form_pg_subscription subform; + Datum datum; + bool isnull; tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid)); @@ -115,11 +115,11 @@ GetSubscription(Oid subid, bool missing_ok) int CountDBSubscriptions(Oid dbid) { - int nsubs = 0; - Relation rel; - ScanKeyData scankey; - SysScanDesc scan; - HeapTuple tup; + int nsubs = 0; + Relation rel; + ScanKeyData scankey; + SysScanDesc scan; + HeapTuple tup; rel = heap_open(SubscriptionRelationId, RowExclusiveLock); @@ -181,8 +181,8 @@ get_subscription_oid(const char *subname, bool missing_ok) char * get_subscription_name(Oid subid) { - HeapTuple tup; - char *subname; + HeapTuple tup; + char *subname; Form_pg_subscription subform; tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid)); @@ -206,9 +206,10 @@ get_subscription_name(Oid subid) static List * textarray_to_stringlist(ArrayType *textarray) { - Datum *elems; - int nelems, i; - List *res = NIL; + Datum *elems; + int nelems, + i; + List *res = NIL; deconstruct_array(textarray, TEXTOID, -1, false, 'i', @@ -232,7 +233,7 @@ textarray_to_stringlist(ArrayType *textarray) */ Oid SetSubscriptionRelState(Oid subid, Oid relid, char state, - XLogRecPtr sublsn) + XLogRecPtr sublsn) { Relation rel; HeapTuple tup; @@ -248,8 +249,8 @@ SetSubscriptionRelState(Oid subid, Oid relid, char state, ObjectIdGetDatum(subid)); /* - * If the record for given table does not exist yet create new - * record, otherwise update the existing one. + * If the record for given table does not exist yet create new record, + * otherwise update the existing one. */ if (!HeapTupleIsValid(tup)) { @@ -415,8 +416,8 @@ GetSubscriptionRelations(Oid subid) Relation rel; HeapTuple tup; int nkeys = 0; - ScanKeyData skey[2]; - SysScanDesc scan; + ScanKeyData skey[2]; + SysScanDesc scan; rel = heap_open(SubscriptionRelRelationId, AccessShareLock); @@ -430,12 +431,12 @@ GetSubscriptionRelations(Oid subid) while (HeapTupleIsValid(tup = systable_getnext(scan))) { - Form_pg_subscription_rel subrel; - SubscriptionRelState *relstate; + Form_pg_subscription_rel subrel; + SubscriptionRelState *relstate; subrel = (Form_pg_subscription_rel) GETSTRUCT(tup); - relstate = (SubscriptionRelState *)palloc(sizeof(SubscriptionRelState)); + relstate = (SubscriptionRelState *) palloc(sizeof(SubscriptionRelState)); relstate->relid = subrel->srrelid; relstate->state = subrel->srsubstate; relstate->lsn = subrel->srsublsn; @@ -462,8 +463,8 @@ GetSubscriptionNotReadyRelations(Oid subid) Relation rel; HeapTuple tup; int nkeys = 0; - ScanKeyData skey[2]; - SysScanDesc scan; + ScanKeyData skey[2]; + SysScanDesc scan; rel = heap_open(SubscriptionRelRelationId, AccessShareLock); @@ -482,12 +483,12 @@ GetSubscriptionNotReadyRelations(Oid subid) while (HeapTupleIsValid(tup = systable_getnext(scan))) { - Form_pg_subscription_rel subrel; - SubscriptionRelState *relstate; + Form_pg_subscription_rel subrel; + SubscriptionRelState *relstate; subrel = (Form_pg_subscription_rel) GETSTRUCT(tup); - relstate = (SubscriptionRelState *)palloc(sizeof(SubscriptionRelState)); + relstate = (SubscriptionRelState *) palloc(sizeof(SubscriptionRelState)); relstate->relid = subrel->srrelid; relstate->state = subrel->srsubstate; relstate->lsn = subrel->srsublsn; diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index a4b949d8c7..4d3fe8c745 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -428,7 +428,7 @@ ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddre address = get_object_address_rv(stmt->objectType, stmt->relation, (List *) stmt->object, - &rel, AccessExclusiveLock, false); + &rel, AccessExclusiveLock, false); /* * If a relation was involved, it would have been opened and locked. We diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 404acb2deb..ecdd8950ee 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -1275,7 +1275,7 @@ acquire_inherited_sample_rows(Relation onerel, int elevel, nrels, i; ListCell *lc; - bool has_child; + bool has_child; /* * Find all members of inheritance set. We only need AccessShareLock on diff --git a/src/backend/commands/collationcmds.c b/src/backend/commands/collationcmds.c index 9264d7fc51..110fb7ef65 100644 --- a/src/backend/commands/collationcmds.c +++ b/src/backend/commands/collationcmds.c @@ -268,9 +268,9 @@ AlterCollation(AlterCollationStmt *stmt) elog(ERROR, "invalid collation version change"); else if (oldversion && newversion && strcmp(newversion, oldversion) != 0) { - bool nulls[Natts_pg_collation]; - bool replaces[Natts_pg_collation]; - Datum values[Natts_pg_collation]; + bool nulls[Natts_pg_collation]; + bool replaces[Natts_pg_collation]; + Datum values[Natts_pg_collation]; ereport(NOTICE, (errmsg("changing version from %s to %s", @@ -379,8 +379,8 @@ get_icu_language_tag(const char *localename) uloc_toLanguageTag(localename, buf, sizeof(buf), TRUE, &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("could not convert locale name \"%s\" to language tag: %s", - localename, u_errorName(status)))); + (errmsg("could not convert locale name \"%s\" to language tag: %s", + localename, u_errorName(status)))); return pstrdup(buf); } @@ -405,7 +405,7 @@ get_icu_locale_comment(const char *localename) return result; } -#endif /* USE_ICU */ +#endif /* USE_ICU */ Datum @@ -493,7 +493,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS) CollationCreate(localebuf, nspid, GetUserId(), COLLPROVIDER_LIBC, enc, localebuf, localebuf, - get_collation_actual_version(COLLPROVIDER_LIBC, localebuf), + get_collation_actual_version(COLLPROVIDER_LIBC, localebuf), if_not_exists); CommandCounterIncrement(); @@ -526,7 +526,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS) CollationCreate(alias, nspid, GetUserId(), COLLPROVIDER_LIBC, enc, locale, locale, - get_collation_actual_version(COLLPROVIDER_LIBC, locale), + get_collation_actual_version(COLLPROVIDER_LIBC, locale), true); CommandCounterIncrement(); } @@ -546,7 +546,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS) } else { - int i; + int i; /* * Start the loop at -1 to sneak in the root locale without too much @@ -563,7 +563,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS) Oid collid; if (i == -1) - name = ""; /* ICU root locale */ + name = ""; /* ICU root locale */ else name = ucol_getAvailable(i); @@ -572,7 +572,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS) collid = CollationCreate(psprintf("%s-x-icu", langtag), nspid, GetUserId(), COLLPROVIDER_ICU, -1, collcollate, collcollate, - get_collation_actual_version(COLLPROVIDER_ICU, collcollate), + get_collation_actual_version(COLLPROVIDER_ICU, collcollate), if_not_exists); CreateComments(collid, CollationRelationId, 0, @@ -585,29 +585,29 @@ pg_import_system_collations(PG_FUNCTION_ARGS) en = ucol_getKeywordValuesForLocale("collation", name, TRUE, &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("could not get keyword values for locale \"%s\": %s", - name, u_errorName(status)))); + (errmsg("could not get keyword values for locale \"%s\": %s", + name, u_errorName(status)))); status = U_ZERO_ERROR; uenum_reset(en, &status); while ((val = uenum_next(en, NULL, &status))) { - char *localeid = psprintf("%s@collation=%s", name, val); + char *localeid = psprintf("%s@collation=%s", name, val); - langtag = get_icu_language_tag(localeid); + langtag = get_icu_language_tag(localeid); collcollate = U_ICU_VERSION_MAJOR_NUM >= 54 ? langtag : localeid; collid = CollationCreate(psprintf("%s-x-icu", langtag), - nspid, GetUserId(), COLLPROVIDER_ICU, -1, + nspid, GetUserId(), COLLPROVIDER_ICU, -1, collcollate, collcollate, - get_collation_actual_version(COLLPROVIDER_ICU, collcollate), + get_collation_actual_version(COLLPROVIDER_ICU, collcollate), if_not_exists); CreateComments(collid, CollationRelationId, 0, get_icu_locale_comment(localeid)); } if (U_FAILURE(status)) ereport(ERROR, - (errmsg("could not get keyword values for locale \"%s\": %s", - name, u_errorName(status)))); + (errmsg("could not get keyword values for locale \"%s\": %s", + name, u_errorName(status)))); uenum_close(en); } } diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 137b1ef42d..84b1a54cb9 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -111,7 +111,7 @@ typedef struct CopyStateData List *attnumlist; /* integer list of attnums to copy */ char *filename; /* filename, or NULL for STDIN/STDOUT */ bool is_program; /* is 'filename' a program to popen? */ - copy_data_source_cb data_source_cb; /* function for reading data*/ + copy_data_source_cb data_source_cb; /* function for reading data */ bool binary; /* binary format? */ bool oids; /* include OIDs? */ bool freeze; /* freeze rows on loading? */ @@ -532,7 +532,7 @@ CopySendEndOfRow(CopyState cstate) (void) pq_putmessage('d', fe_msgbuf->data, fe_msgbuf->len); break; case COPY_CALLBACK: - Assert(false); /* Not yet supported. */ + Assert(false); /* Not yet supported. */ break; } diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index c0ba2b451a..11038f6764 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -855,8 +855,8 @@ dropdb(const char *dbname, bool missing_ok) { ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), - errmsg("database \"%s\" is used by an active logical replication slot", - dbname), + errmsg("database \"%s\" is used by an active logical replication slot", + dbname), errdetail_plural("There is %d active slot", "There are %d active slots", nslots_active, nslots_active))); @@ -2134,7 +2134,8 @@ dbase_redo(XLogReaderState *record) * which can happen in some cases. * * This will lock out walsenders trying to connect to db-specific - * slots for logical decoding too, so it's safe for us to drop slots. + * slots for logical decoding too, so it's safe for us to drop + * slots. */ LockSharedObjectForSession(DatabaseRelationId, xlrec->db_id, 0, AccessExclusiveLock); ResolveRecoveryConflictWithDatabase(xlrec->db_id); diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c index 8da924517b..3ad4eea59e 100644 --- a/src/backend/commands/define.c +++ b/src/backend/commands/define.c @@ -336,7 +336,7 @@ defGetStringList(DefElem *def) if (nodeTag(def->arg) != T_List) elog(ERROR, "unrecognized node type: %d", (int) nodeTag(def->arg)); - foreach(cell, (List *)def->arg) + foreach(cell, (List *) def->arg) { Node *str = (Node *) lfirst(cell); diff --git a/src/backend/commands/dropcmds.c b/src/backend/commands/dropcmds.c index a1a64fa8c9..9e307eb8af 100644 --- a/src/backend/commands/dropcmds.c +++ b/src/backend/commands/dropcmds.c @@ -102,7 +102,7 @@ RemoveObjects(DropStmt *stmt) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is an aggregate function", - NameListToString(castNode(ObjectWithArgs, object)->objname)), + NameListToString(castNode(ObjectWithArgs, object)->objname)), errhint("Use DROP AGGREGATE to drop aggregate functions."))); ReleaseSysCache(tup); @@ -145,7 +145,7 @@ owningrel_does_not_exist_skipping(List *object, const char **msg, char **name) RangeVar *parent_rel; parent_object = list_truncate(list_copy(object), - list_length(object) - 1); + list_length(object) - 1); if (schema_does_not_exist_skipping(parent_object, msg, name)) return true; @@ -328,6 +328,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object) case OBJECT_FUNCTION: { ObjectWithArgs *owa = castNode(ObjectWithArgs, object); + if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) && !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name)) { @@ -340,6 +341,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object) case OBJECT_AGGREGATE: { ObjectWithArgs *owa = castNode(ObjectWithArgs, object); + if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) && !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name)) { @@ -352,6 +354,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object) case OBJECT_OPERATOR: { ObjectWithArgs *owa = castNode(ObjectWithArgs, object); + if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) && !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name)) { @@ -390,7 +393,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object) msg = gettext_noop("trigger \"%s\" for relation \"%s\" does not exist, skipping"); name = strVal(llast(castNode(List, object))); args = NameListToString(list_truncate(list_copy(castNode(List, object)), - list_length(castNode(List, object)) - 1)); + list_length(castNode(List, object)) - 1)); } break; case OBJECT_POLICY: @@ -399,7 +402,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object) msg = gettext_noop("policy \"%s\" for relation \"%s\" does not exist, skipping"); name = strVal(llast(castNode(List, object))); args = NameListToString(list_truncate(list_copy(castNode(List, object)), - list_length(castNode(List, object)) - 1)); + list_length(castNode(List, object)) - 1)); } break; case OBJECT_EVENT_TRIGGER: @@ -412,7 +415,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object) msg = gettext_noop("rule \"%s\" for relation \"%s\" does not exist, skipping"); name = strVal(llast(castNode(List, object))); args = NameListToString(list_truncate(list_copy(castNode(List, object)), - list_length(castNode(List, object)) - 1)); + list_length(castNode(List, object)) - 1)); } break; case OBJECT_FDW: diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index d1983257c2..4cfab418a6 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -2250,7 +2250,7 @@ stringify_grantobjtype(GrantObjectType objtype) } elog(ERROR, "unrecognized grant object type: %d", (int) objtype); - return "???"; /* keep compiler quiet */ + return "???"; /* keep compiler quiet */ } /* @@ -2292,5 +2292,5 @@ stringify_adefprivs_objtype(GrantObjectType objtype) } elog(ERROR, "unrecognized grant object type: %d", (int) objtype); - return "???"; /* keep compiler quiet */ + return "???"; /* keep compiler quiet */ } diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c index 96cf296210..ba85952baa 100644 --- a/src/backend/commands/foreigncmds.c +++ b/src/backend/commands/foreigncmds.c @@ -878,8 +878,8 @@ CreateForeignServer(CreateForeignServerStmt *stmt) ownerId = GetUserId(); /* - * Check that there is no other foreign server by this name. - * Do nothing if IF NOT EXISTS was enforced. + * Check that there is no other foreign server by this name. Do nothing if + * IF NOT EXISTS was enforced. */ if (GetForeignServerByName(stmt->servername, true) != NULL) { @@ -1171,20 +1171,20 @@ CreateUserMapping(CreateUserMappingStmt *stmt) if (stmt->if_not_exists) { ereport(NOTICE, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("user mapping for \"%s\" already exists for server %s, skipping", - MappingUserName(useId), - stmt->servername))); + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("user mapping for \"%s\" already exists for server %s, skipping", + MappingUserName(useId), + stmt->servername))); heap_close(rel, RowExclusiveLock); return InvalidObjectAddress; } else ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("user mapping for \"%s\" already exists for server %s", - MappingUserName(useId), - stmt->servername))); + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("user mapping for \"%s\" already exists for server %s", + MappingUserName(useId), + stmt->servername))); } fdw = GetForeignDataWrapper(srv->fdwid); @@ -1275,8 +1275,8 @@ AlterUserMapping(AlterUserMappingStmt *stmt) if (!OidIsValid(umId)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("user mapping for \"%s\" does not exist for the server", - MappingUserName(useId)))); + errmsg("user mapping for \"%s\" does not exist for the server", + MappingUserName(useId)))); user_mapping_ddl_aclcheck(useId, srv->serverid, stmt->servername); @@ -1390,13 +1390,13 @@ RemoveUserMapping(DropUserMappingStmt *stmt) if (!stmt->missing_ok) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("user mapping for \"%s\" does not exist for the server", - MappingUserName(useId)))); + errmsg("user mapping for \"%s\" does not exist for the server", + MappingUserName(useId)))); /* IF EXISTS specified, just note it */ ereport(NOTICE, - (errmsg("user mapping for \"%s\" does not exist for the server, skipping", - MappingUserName(useId)))); + (errmsg("user mapping for \"%s\" does not exist for the server, skipping", + MappingUserName(useId)))); return InvalidOid; } diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index 1c8d88d336..8f06c23df9 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -74,7 +74,7 @@ parse_publication_options(List *options, *publish_delete = true; /* Parse options */ - foreach (lc, options) + foreach(lc, options) { DefElem *defel = (DefElem *) lfirst(lc); @@ -106,9 +106,9 @@ parse_publication_options(List *options, errmsg("invalid publish list"))); /* Process the option list. */ - foreach (lc, publish_list) + foreach(lc, publish_list) { - char *publish_opt = (char *)lfirst(lc); + char *publish_opt = (char *) lfirst(lc); if (strcmp(publish_opt, "insert") == 0) *publish_insert = true; @@ -157,7 +157,7 @@ CreatePublication(CreatePublicationStmt *stmt) if (stmt->for_all_tables && !superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser to create FOR ALL TABLES publication")))); + (errmsg("must be superuser to create FOR ALL TABLES publication")))); rel = heap_open(PublicationRelationId, RowExclusiveLock); @@ -228,7 +228,7 @@ CreatePublication(CreatePublicationStmt *stmt) */ static void AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, - HeapTuple tup) + HeapTuple tup) { bool nulls[Natts_pg_publication]; bool replaces[Natts_pg_publication]; @@ -237,7 +237,7 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, bool publish_insert; bool publish_update; bool publish_delete; - ObjectAddress obj; + ObjectAddress obj; parse_publication_options(stmt->options, &publish_given, &publish_insert, @@ -275,7 +275,7 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, } else { - List *relids = GetPublicationRelations(HeapTupleGetOid(tup)); + List *relids = GetPublicationRelations(HeapTupleGetOid(tup)); /* * We don't want to send too many individual messages, at some point @@ -283,11 +283,11 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, */ if (list_length(relids) < MAX_RELCACHE_INVAL_MSGS) { - ListCell *lc; + ListCell *lc; - foreach (lc, relids) + foreach(lc, relids) { - Oid relid = lfirst_oid(lc); + Oid relid = lfirst_oid(lc); CacheInvalidateRelcacheByRelid(relid); } @@ -330,7 +330,7 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel, PublicationAddTables(pubid, rels, false, stmt); else if (stmt->tableAction == DEFELEM_DROP) PublicationDropTables(pubid, rels, false); - else /* DEFELEM_SET */ + else /* DEFELEM_SET */ { List *oldrelids = GetPublicationRelations(pubid); List *delrels = NIL; @@ -358,6 +358,7 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel, { Relation oldrel = heap_open(oldrelid, ShareUpdateExclusiveLock); + delrels = lappend(delrels, oldrel); } } @@ -366,8 +367,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel, PublicationDropTables(pubid, delrels, true); /* - * Don't bother calculating the difference for adding, we'll catch - * and skip existing ones when doing catalog update. + * Don't bother calculating the difference for adding, we'll catch and + * skip existing ones when doing catalog update. */ PublicationAddTables(pubid, rels, true, stmt); @@ -386,8 +387,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel, void AlterPublication(AlterPublicationStmt *stmt) { - Relation rel; - HeapTuple tup; + Relation rel; + HeapTuple tup; rel = heap_open(PublicationRelationId, RowExclusiveLock); @@ -444,9 +445,9 @@ RemovePublicationById(Oid pubid) void RemovePublicationRelById(Oid proid) { - Relation rel; - HeapTuple tup; - Form_pg_publication_rel pubrel; + Relation rel; + HeapTuple tup; + Form_pg_publication_rel pubrel; rel = heap_open(PublicationRelRelationId, RowExclusiveLock); @@ -570,14 +571,14 @@ static void PublicationAddTables(Oid pubid, List *rels, bool if_not_exists, AlterPublicationStmt *stmt) { - ListCell *lc; + ListCell *lc; Assert(!stmt || !stmt->for_all_tables); foreach(lc, rels) { Relation rel = (Relation) lfirst(lc); - ObjectAddress obj; + ObjectAddress obj; /* Must be owner of the table or superuser. */ if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId())) @@ -602,9 +603,9 @@ PublicationAddTables(Oid pubid, List *rels, bool if_not_exists, static void PublicationDropTables(Oid pubid, List *rels, bool missing_ok) { - ObjectAddress obj; - ListCell *lc; - Oid prid; + ObjectAddress obj; + ListCell *lc; + Oid prid; foreach(lc, rels) { @@ -632,7 +633,7 @@ PublicationDropTables(Oid pubid, List *rels, bool missing_ok) /* * Internal workhorse for changing a publication owner */ - static void +static void AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) { Form_pg_publication form; @@ -663,8 +664,8 @@ AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) if (form->puballtables && !superuser_arg(newOwnerId)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied to change owner of publication \"%s\"", - NameStr(form->pubname)), + errmsg("permission denied to change owner of publication \"%s\"", + NameStr(form->pubname)), errhint("The owner of a FOR ALL TABLES publication must be a superuser."))); } @@ -686,9 +687,9 @@ AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) ObjectAddress AlterPublicationOwner(const char *name, Oid newOwnerId) { - Oid subid; - HeapTuple tup; - Relation rel; + Oid subid; + HeapTuple tup; + Relation rel; ObjectAddress address; rel = heap_open(PublicationRelationId, RowExclusiveLock); @@ -719,8 +720,8 @@ AlterPublicationOwner(const char *name, Oid newOwnerId) void AlterPublicationOwner_oid(Oid subid, Oid newOwnerId) { - HeapTuple tup; - Relation rel; + HeapTuple tup; + Relation rel; rel = heap_open(PublicationRelationId, RowExclusiveLock); diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 0f7cf1dce8..568b3022f2 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -100,10 +100,10 @@ static Form_pg_sequence_data read_seq_tuple(Relation rel, Buffer *buf, HeapTuple seqdatatuple); static LOCKMODE alter_sequence_get_lock_level(List *options); static void init_params(ParseState *pstate, List *options, bool for_identity, - bool isInit, - Form_pg_sequence seqform, - bool *changed_seqform, - Form_pg_sequence_data seqdataform, List **owned_by); + bool isInit, + Form_pg_sequence seqform, + bool *changed_seqform, + Form_pg_sequence_data seqdataform, List **owned_by); static void do_setval(Oid relid, int64 next, bool iscalled); static void process_owned_by(Relation seqrel, List *owned_by, bool for_identity); @@ -117,7 +117,7 @@ DefineSequence(ParseState *pstate, CreateSeqStmt *seq) { FormData_pg_sequence seqform; FormData_pg_sequence_data seqdataform; - bool changed_seqform = false; /* not used here */ + bool changed_seqform = false; /* not used here */ List *owned_by; CreateStmt *stmt = makeNode(CreateStmt); Oid seqoid; @@ -703,9 +703,9 @@ nextval_internal(Oid relid, bool check_permissions) snprintf(buf, sizeof(buf), INT64_FORMAT, maxv); ereport(ERROR, - (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED), - errmsg("nextval: reached maximum value of sequence \"%s\" (%s)", - RelationGetRelationName(seqrel), buf))); + (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED), + errmsg("nextval: reached maximum value of sequence \"%s\" (%s)", + RelationGetRelationName(seqrel), buf))); } next = minv; } @@ -726,9 +726,9 @@ nextval_internal(Oid relid, bool check_permissions) snprintf(buf, sizeof(buf), INT64_FORMAT, minv); ereport(ERROR, - (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED), - errmsg("nextval: reached minimum value of sequence \"%s\" (%s)", - RelationGetRelationName(seqrel), buf))); + (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED), + errmsg("nextval: reached minimum value of sequence \"%s\" (%s)", + RelationGetRelationName(seqrel), buf))); } next = maxv; } @@ -1390,7 +1390,7 @@ init_params(ParseState *pstate, List *options, bool for_identity, /* AS type */ if (as_type != NULL) { - Oid newtypid = typenameTypeId(pstate, defGetTypeName(as_type)); + Oid newtypid = typenameTypeId(pstate, defGetTypeName(as_type)); if (newtypid != INT2OID && newtypid != INT4OID && @@ -1399,7 +1399,7 @@ init_params(ParseState *pstate, List *options, bool for_identity, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), for_identity ? errmsg("identity column type must be smallint, integer, or bigint") - : errmsg("sequence type must be smallint, integer, or bigint"))); + : errmsg("sequence type must be smallint, integer, or bigint"))); if (!isInit) { @@ -1411,11 +1411,11 @@ init_params(ParseState *pstate, List *options, bool for_identity, */ if ((seqform->seqtypid == INT2OID && seqform->seqmax == PG_INT16_MAX) || (seqform->seqtypid == INT4OID && seqform->seqmax == PG_INT32_MAX) || - (seqform->seqtypid == INT8OID && seqform->seqmax == PG_INT64_MAX)) + (seqform->seqtypid == INT8OID && seqform->seqmax == PG_INT64_MAX)) reset_max_value = true; if ((seqform->seqtypid == INT2OID && seqform->seqmin == PG_INT16_MIN) || (seqform->seqtypid == INT4OID && seqform->seqmin == PG_INT32_MIN) || - (seqform->seqtypid == INT8OID && seqform->seqmin == PG_INT64_MIN)) + (seqform->seqtypid == INT8OID && seqform->seqmin == PG_INT64_MIN)) reset_min_value = true; } @@ -1479,7 +1479,7 @@ init_params(ParseState *pstate, List *options, bool for_identity, seqform->seqmax = PG_INT64_MAX; } else - seqform->seqmax = -1; /* descending seq */ + seqform->seqmax = -1; /* descending seq */ *changed_seqform = true; seqdataform->log_cnt = 0; } @@ -1494,8 +1494,8 @@ init_params(ParseState *pstate, List *options, bool for_identity, ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("MAXVALUE (%s) is out of range for sequence data type %s", - bufx, format_type_be(seqform->seqtypid)))); + errmsg("MAXVALUE (%s) is out of range for sequence data type %s", + bufx, format_type_be(seqform->seqtypid)))); } /* MINVALUE (null arg means NO MINVALUE) */ @@ -1518,7 +1518,7 @@ init_params(ParseState *pstate, List *options, bool for_identity, seqform->seqmin = PG_INT64_MIN; } else - seqform->seqmin = 1; /* ascending seq */ + seqform->seqmin = 1; /* ascending seq */ *changed_seqform = true; seqdataform->log_cnt = 0; } @@ -1533,8 +1533,8 @@ init_params(ParseState *pstate, List *options, bool for_identity, ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("MINVALUE (%s) is out of range for sequence data type %s", - bufm, format_type_be(seqform->seqtypid)))); + errmsg("MINVALUE (%s) is out of range for sequence data type %s", + bufm, format_type_be(seqform->seqtypid)))); } /* crosscheck min/max */ @@ -1560,9 +1560,9 @@ init_params(ParseState *pstate, List *options, bool for_identity, else if (isInit) { if (seqform->seqincrement > 0) - seqform->seqstart = seqform->seqmin; /* ascending seq */ + seqform->seqstart = seqform->seqmin; /* ascending seq */ else - seqform->seqstart = seqform->seqmax; /* descending seq */ + seqform->seqstart = seqform->seqmax; /* descending seq */ *changed_seqform = true; } diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c index 94865b395b..2b3785f394 100644 --- a/src/backend/commands/statscmds.c +++ b/src/backend/commands/statscmds.c @@ -90,8 +90,8 @@ CreateStatistics(CreateStatsStmt *stmt) { ereport(NOTICE, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("statistics object \"%s\" already exists, skipping", - namestr))); + errmsg("statistics object \"%s\" already exists, skipping", + namestr))); return InvalidObjectAddress; } diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c index 1f7274bc57..89358a4ec3 100644 --- a/src/backend/commands/subscriptioncmds.c +++ b/src/backend/commands/subscriptioncmds.c @@ -94,7 +94,7 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given, *synchronous_commit = NULL; /* Parse options */ - foreach (lc, options) + foreach(lc, options) { DefElem *defel = (DefElem *) lfirst(lc); @@ -200,8 +200,8 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given, } /* - * Do additional checking for disallowed combination when - * slot_name = NONE was used. + * Do additional checking for disallowed combination when slot_name = NONE + * was used. */ if (slot_name && *slot_name_given && !*slot_name) { @@ -367,7 +367,7 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) values[Anum_pg_subscription_subsynccommit - 1] = CStringGetTextDatum(synchronous_commit); values[Anum_pg_subscription_subpublications - 1] = - publicationListToArray(publications); + publicationListToArray(publications); tup = heap_form_tuple(RelationGetDescr(rel), values, nulls); @@ -386,12 +386,12 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) */ if (connect) { - XLogRecPtr lsn; - char *err; - WalReceiverConn *wrconn; - List *tables; - ListCell *lc; - char table_state; + XLogRecPtr lsn; + char *err; + WalReceiverConn *wrconn; + List *tables; + ListCell *lc; + char table_state; /* Try to connect to the publisher. */ wrconn = walrcv_connect(conninfo, true, stmt->subname, &err); @@ -412,7 +412,7 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) * info. */ tables = fetch_table_list(wrconn, publications); - foreach (lc, tables) + foreach(lc, tables) { RangeVar *rv = (RangeVar *) lfirst(lc); Oid relid; @@ -431,9 +431,9 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) (errmsg("synchronized table states"))); /* - * If requested, create permanent slot for the subscription. - * We won't use the initial snapshot for anything, so no need - * to export it. + * If requested, create permanent slot for the subscription. We + * won't use the initial snapshot for anything, so no need to + * export it. */ if (create_slot) { @@ -442,8 +442,8 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) walrcv_create_slot(wrconn, slotname, false, CRS_NOEXPORT_SNAPSHOT, &lsn); ereport(NOTICE, - (errmsg("created replication slot \"%s\" on publisher", - slotname))); + (errmsg("created replication slot \"%s\" on publisher", + slotname))); } } PG_CATCH(); @@ -478,7 +478,7 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) static void AlterSubscription_refresh(Subscription *sub, bool copy_data) { - char *err; + char *err; List *pubrel_names; List *subrel_states; Oid *subrel_local_oids; @@ -505,31 +505,31 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data) subrel_states = GetSubscriptionRelations(sub->oid); /* - * Build qsorted array of local table oids for faster lookup. - * This can potentially contain all tables in the database so - * speed of lookup is important. + * Build qsorted array of local table oids for faster lookup. This can + * potentially contain all tables in the database so speed of lookup is + * important. */ subrel_local_oids = palloc(list_length(subrel_states) * sizeof(Oid)); off = 0; foreach(lc, subrel_states) { SubscriptionRelState *relstate = (SubscriptionRelState *) lfirst(lc); + subrel_local_oids[off++] = relstate->relid; } qsort(subrel_local_oids, list_length(subrel_states), sizeof(Oid), oid_cmp); /* - * Walk over the remote tables and try to match them to locally - * known tables. If the table is not known locally create a new state - * for it. + * Walk over the remote tables and try to match them to locally known + * tables. If the table is not known locally create a new state for it. * * Also builds array of local oids of remote tables for the next step. */ off = 0; pubrel_local_oids = palloc(list_length(pubrel_names) * sizeof(Oid)); - foreach (lc, pubrel_names) + foreach(lc, pubrel_names) { RangeVar *rv = (RangeVar *) lfirst(lc); Oid relid; @@ -546,7 +546,7 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data) list_length(subrel_states), sizeof(Oid), oid_cmp)) { SetSubscriptionRelState(sub->oid, relid, - copy_data ? SUBREL_STATE_INIT : SUBREL_STATE_READY, + copy_data ? SUBREL_STATE_INIT : SUBREL_STATE_READY, InvalidXLogRecPtr); ereport(NOTICE, (errmsg("added subscription for table %s.%s", @@ -556,20 +556,20 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data) } /* - * Next remove state for tables we should not care about anymore using - * the data we collected above + * Next remove state for tables we should not care about anymore using the + * data we collected above */ qsort(pubrel_local_oids, list_length(pubrel_names), sizeof(Oid), oid_cmp); for (off = 0; off < list_length(subrel_states); off++) { - Oid relid = subrel_local_oids[off]; + Oid relid = subrel_local_oids[off]; if (!bsearch(&relid, pubrel_local_oids, list_length(pubrel_names), sizeof(Oid), oid_cmp)) { - char *namespace; + char *namespace; RemoveSubscriptionRel(sub->oid, relid); @@ -596,7 +596,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt) HeapTuple tup; Oid subid; bool update_tuple = false; - Subscription *sub; + Subscription *sub; rel = heap_open(SubscriptionRelationId, RowExclusiveLock); @@ -644,7 +644,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt) if (slotname) values[Anum_pg_subscription_subslotname - 1] = - DirectFunctionCall1(namein, CStringGetDatum(slotname)); + DirectFunctionCall1(namein, CStringGetDatum(slotname)); else nulls[Anum_pg_subscription_subslotname - 1] = true; replaces[Anum_pg_subscription_subslotname - 1] = true; @@ -663,8 +663,8 @@ AlterSubscription(AlterSubscriptionStmt *stmt) case ALTER_SUBSCRIPTION_ENABLED: { - bool enabled, - enabled_given; + bool enabled, + enabled_given; parse_subscription_options(stmt->options, NULL, &enabled_given, &enabled, NULL, @@ -702,14 +702,14 @@ AlterSubscription(AlterSubscriptionStmt *stmt) case ALTER_SUBSCRIPTION_PUBLICATION: case ALTER_SUBSCRIPTION_PUBLICATION_REFRESH: { - bool copy_data; + bool copy_data; parse_subscription_options(stmt->options, NULL, NULL, NULL, NULL, NULL, NULL, ©_data, NULL); values[Anum_pg_subscription_subpublications - 1] = - publicationListToArray(stmt->publication); + publicationListToArray(stmt->publication); replaces[Anum_pg_subscription_subpublications - 1] = true; update_tuple = true; @@ -733,7 +733,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt) case ALTER_SUBSCRIPTION_REFRESH: { - bool copy_data; + bool copy_data; if (!sub->enabled) ereport(ERROR, @@ -791,14 +791,13 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) char *slotname; char originname[NAMEDATALEN]; char *err = NULL; - RepOriginId originid; - WalReceiverConn *wrconn = NULL; - StringInfoData cmd; + RepOriginId originid; + WalReceiverConn *wrconn = NULL; + StringInfoData cmd; /* - * Lock pg_subscription with AccessExclusiveLock to ensure - * that the launcher doesn't restart new worker during dropping - * the subscription + * Lock pg_subscription with AccessExclusiveLock to ensure that the + * launcher doesn't restart new worker during dropping the subscription */ rel = heap_open(SubscriptionRelationId, AccessExclusiveLock); @@ -833,8 +832,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) InvokeObjectDropHook(SubscriptionRelationId, subid, 0); /* - * Lock the subscription so nobody else can do anything with it - * (including the replication workers). + * Lock the subscription so nobody else can do anything with it (including + * the replication workers). */ LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock); @@ -895,7 +894,10 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) if (originid != InvalidRepOriginId) replorigin_drop(originid); - /* If there is no slot associated with the subscription, we can finish here. */ + /* + * If there is no slot associated with the subscription, we can finish + * here. + */ if (!slotname) { heap_close(rel, NoLock); @@ -903,8 +905,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) } /* - * Otherwise drop the replication slot at the publisher node using - * the replication connection. + * Otherwise drop the replication slot at the publisher node using the + * replication connection. */ load_file("libpqwalreceiver", false); @@ -922,14 +924,15 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) PG_TRY(); { - WalRcvExecResult *res; + WalRcvExecResult *res; + res = walrcv_exec(wrconn, cmd.data, 0, NULL); if (res->status != WALRCV_OK_COMMAND) ereport(ERROR, - (errmsg("could not drop the replication slot \"%s\" on publisher", - slotname), - errdetail("The error was: %s", res->err))); + (errmsg("could not drop the replication slot \"%s\" on publisher", + slotname), + errdetail("The error was: %s", res->err))); else ereport(NOTICE, (errmsg("dropped replication slot \"%s\" on publisher", @@ -973,9 +976,9 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) if (!superuser_arg(newOwnerId)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied to change owner of subscription \"%s\"", - NameStr(form->subname)), - errhint("The owner of a subscription must be a superuser."))); + errmsg("permission denied to change owner of subscription \"%s\"", + NameStr(form->subname)), + errhint("The owner of a subscription must be a superuser."))); form->subowner = newOwnerId; CatalogTupleUpdate(rel, &tup->t_self, tup); @@ -1055,24 +1058,24 @@ AlterSubscriptionOwner_oid(Oid subid, Oid newOwnerId) static List * fetch_table_list(WalReceiverConn *wrconn, List *publications) { - WalRcvExecResult *res; - StringInfoData cmd; - TupleTableSlot *slot; - Oid tableRow[2] = {TEXTOID, TEXTOID}; - ListCell *lc; - bool first; - List *tablelist = NIL; + WalRcvExecResult *res; + StringInfoData cmd; + TupleTableSlot *slot; + Oid tableRow[2] = {TEXTOID, TEXTOID}; + ListCell *lc; + bool first; + List *tablelist = NIL; Assert(list_length(publications) > 0); initStringInfo(&cmd); appendStringInfo(&cmd, "SELECT DISTINCT t.schemaname, t.tablename\n" - " FROM pg_catalog.pg_publication_tables t\n" - " WHERE t.pubname IN ("); + " FROM pg_catalog.pg_publication_tables t\n" + " WHERE t.pubname IN ("); first = true; - foreach (lc, publications) + foreach(lc, publications) { - char *pubname = strVal(lfirst(lc)); + char *pubname = strVal(lfirst(lc)); if (first) first = false; diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index e259378051..7319aa597e 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -363,9 +363,9 @@ static ObjectAddress ATExecSetNotNull(AlteredTableInfo *tab, Relation rel, static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName, Node *newDefault, LOCKMODE lockmode); static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName, - Node *def, LOCKMODE lockmode); + Node *def, LOCKMODE lockmode); static ObjectAddress ATExecSetIdentity(Relation rel, const char *colName, - Node *def, LOCKMODE lockmode); + Node *def, LOCKMODE lockmode); static ObjectAddress ATExecDropIdentity(Relation rel, const char *colName, bool missing_ok, LOCKMODE lockmode); static void ATPrepSetStatistics(Relation rel, const char *colName, Node *newValue, LOCKMODE lockmode); @@ -643,8 +643,8 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, descriptor->tdhasoid = (localHasOids || parentOidCount > 0); /* - * If a partitioned table doesn't have the system OID column, then none - * of its partitions should have it. + * If a partitioned table doesn't have the system OID column, then none of + * its partitions should have it. */ if (stmt->partbound && parentOidCount == 0 && localHasOids) ereport(ERROR, @@ -1112,9 +1112,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid, } /* - * Similarly, if we previously locked some other partition's heap, and - * the name we're looking up no longer refers to that relation, release - * the now-useless lock. + * Similarly, if we previously locked some other partition's heap, and the + * name we're looking up no longer refers to that relation, release the + * now-useless lock. */ if (relOid != oldRelOid && OidIsValid(state->partParentOid)) { @@ -2219,8 +2219,8 @@ MergeAttributes(List *schema, List *supers, char relpersistence, else ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" specified more than once", - coldef->colname))); + errmsg("column \"%s\" specified more than once", + coldef->colname))); } prev = rest; rest = next; @@ -4541,7 +4541,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, - &isnull[ex->attnum - 1]); + &isnull[ex->attnum - 1]); } /* @@ -5589,12 +5589,12 @@ static void ATPrepDropNotNull(Relation rel, bool recurse, bool recursing) { /* - * If the parent is a partitioned table, like check constraints, we do - * not support removing the NOT NULL while partitions exist. + * If the parent is a partitioned table, like check constraints, we do not + * support removing the NOT NULL while partitions exist. */ if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { - PartitionDesc partdesc = RelationGetPartitionDesc(rel); + PartitionDesc partdesc = RelationGetPartitionDesc(rel); Assert(partdesc != NULL); if (partdesc->nparts > 0 && !recurse && !recursing) @@ -5639,8 +5639,8 @@ ATExecDropNotNull(Relation rel, const char *colName, LOCKMODE lockmode) if (get_attidentity(RelationGetRelid(rel), attnum)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("column \"%s\" of relation \"%s\" is an identity column", - colName, RelationGetRelationName(rel)))); + errmsg("column \"%s\" of relation \"%s\" is an identity column", + colName, RelationGetRelationName(rel)))); /* * Check that the attribute is not in a primary key @@ -5768,7 +5768,7 @@ ATPrepSetNotNull(Relation rel, bool recurse, bool recursing) */ if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { - PartitionDesc partdesc = RelationGetPartitionDesc(rel); + PartitionDesc partdesc = RelationGetPartitionDesc(rel); if (partdesc && partdesc->nparts > 0 && !recurse && !recursing) ereport(ERROR, @@ -5867,8 +5867,8 @@ ATExecColumnDefault(Relation rel, const char *colName, if (get_attidentity(RelationGetRelid(rel), attnum)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("column \"%s\" of relation \"%s\" is an identity column", - colName, RelationGetRelationName(rel)), + errmsg("column \"%s\" of relation \"%s\" is an identity column", + colName, RelationGetRelationName(rel)), newDefault ? 0 : errhint("Use ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY instead."))); /* @@ -5959,8 +5959,8 @@ ATExecAddIdentity(Relation rel, const char *colName, if (attTup->atthasdef) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("column \"%s\" of relation \"%s\" already has a default value", - colName, RelationGetRelationName(rel)))); + errmsg("column \"%s\" of relation \"%s\" already has a default value", + colName, RelationGetRelationName(rel)))); attTup->attidentity = cdef->identity; CatalogTupleUpdate(attrelation, &tuple->t_self, tuple); @@ -5986,7 +5986,7 @@ static ObjectAddress ATExecSetIdentity(Relation rel, const char *colName, Node *def, LOCKMODE lockmode) { ListCell *option; - DefElem *generatedEl = NULL; + DefElem *generatedEl = NULL; HeapTuple tuple; Form_pg_attribute attTup; AttrNumber attnum; @@ -5995,7 +5995,7 @@ ATExecSetIdentity(Relation rel, const char *colName, Node *def, LOCKMODE lockmod foreach(option, castNode(List, def)) { - DefElem *defel = lfirst_node(DefElem, option); + DefElem *defel = lfirst_node(DefElem, option); if (strcmp(defel->defname, "generated") == 0) { @@ -6036,8 +6036,8 @@ ATExecSetIdentity(Relation rel, const char *colName, Node *def, LOCKMODE lockmod if (!attTup->attidentity) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("column \"%s\" of relation \"%s\" is not an identity column", - colName, RelationGetRelationName(rel)))); + errmsg("column \"%s\" of relation \"%s\" is not an identity column", + colName, RelationGetRelationName(rel)))); if (generatedEl) { @@ -11137,7 +11137,7 @@ CreateInheritance(Relation child_rel, Relation parent_rel) inhseqno + 1, catalogRelation, parent_rel->rd_rel->relkind == - RELKIND_PARTITIONED_TABLE); + RELKIND_PARTITIONED_TABLE); /* Now we're done with pg_inherits */ heap_close(catalogRelation, RowExclusiveLock); diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 1566fb4607..0271788bf9 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -340,7 +340,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, foreach(lc, varList) { - TriggerTransition *tt = lfirst_node(TriggerTransition, lc); + TriggerTransition *tt = lfirst_node(TriggerTransition, lc); if (!(tt->isTable)) ereport(ERROR, @@ -359,21 +359,21 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is a partitioned table", RelationGetRelationName(rel)), - errdetail("Triggers on partitioned tables cannot have transition tables."))); + errdetail("Triggers on partitioned tables cannot have transition tables."))); if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is a foreign table", RelationGetRelationName(rel)), - errdetail("Triggers on foreign tables cannot have transition tables."))); + errdetail("Triggers on foreign tables cannot have transition tables."))); if (rel->rd_rel->relkind == RELKIND_VIEW) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is a view", RelationGetRelationName(rel)), - errdetail("Triggers on views cannot have transition tables."))); + errdetail("Triggers on views cannot have transition tables."))); if (stmt->timing != TRIGGER_TYPE_AFTER) ereport(ERROR, @@ -396,7 +396,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, if (newtablename != NULL) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("NEW TABLE cannot be specified multiple times"))); + errmsg("NEW TABLE cannot be specified multiple times"))); newtablename = tt->name; } @@ -411,7 +411,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, if (oldtablename != NULL) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("OLD TABLE cannot be specified multiple times"))); + errmsg("OLD TABLE cannot be specified multiple times"))); oldtablename = tt->name; } @@ -421,7 +421,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, strcmp(newtablename, oldtablename) == 0) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("OLD TABLE name and NEW TABLE name cannot be the same"))); + errmsg("OLD TABLE name and NEW TABLE name cannot be the same"))); } /* @@ -782,12 +782,12 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, if (oldtablename) values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein, - CStringGetDatum(oldtablename)); + CStringGetDatum(oldtablename)); else nulls[Anum_pg_trigger_tgoldtable - 1] = true; if (newtablename) values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein, - CStringGetDatum(newtablename)); + CStringGetDatum(newtablename)); else nulls[Anum_pg_trigger_tgnewtable - 1] = true; @@ -3412,7 +3412,8 @@ typedef struct AfterTriggersData AfterTriggerEventList events; /* deferred-event list */ int query_depth; /* current query list index */ AfterTriggerEventList *query_stack; /* events pending from each query */ - Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from each query */ + Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from + * each query */ Tuplestorestate **old_tuplestores; /* all old tuples from each query */ Tuplestorestate **new_tuplestores; /* all new tuples from each query */ int maxquerydepth; /* allocated len of above array */ @@ -3778,8 +3779,8 @@ AfterTriggerExecute(AfterTriggerEvent event, case AFTER_TRIGGER_FDW_FETCH: { Tuplestorestate *fdw_tuplestore = - GetTriggerTransitionTuplestore - (afterTriggers.fdw_tuplestores); + GetTriggerTransitionTuplestore + (afterTriggers.fdw_tuplestores); if (!tuplestore_gettupleslot(fdw_tuplestore, true, false, trig_tuple_slot1)) @@ -5130,7 +5131,7 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, Assert(oldtup != NULL); old_tuplestore = GetTriggerTransitionTuplestore - (afterTriggers.old_tuplestores); + (afterTriggers.old_tuplestores); tuplestore_puttuple(old_tuplestore, oldtup); } if ((event == TRIGGER_EVENT_INSERT && @@ -5143,14 +5144,14 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, Assert(newtup != NULL); new_tuplestore = GetTriggerTransitionTuplestore - (afterTriggers.new_tuplestores); + (afterTriggers.new_tuplestores); tuplestore_puttuple(new_tuplestore, newtup); } /* If transition tables are the only reason we're here, return. */ if ((event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) || - (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) || - (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row)) + (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) || + (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row)) return; } @@ -5253,7 +5254,7 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, { fdw_tuplestore = GetTriggerTransitionTuplestore - (afterTriggers.fdw_tuplestores); + (afterTriggers.fdw_tuplestores); new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH; } else diff --git a/src/backend/commands/tsearchcmds.c b/src/backend/commands/tsearchcmds.c index b58d60c855..dfb95a1ed3 100644 --- a/src/backend/commands/tsearchcmds.c +++ b/src/backend/commands/tsearchcmds.c @@ -1685,7 +1685,7 @@ deserialize_deflist(Datum txt) *wsptr++ = '\0'; result = lappend(result, makeDefElem(pstrdup(workspace), - (Node *) makeString(pstrdup(startvalue)), -1)); + (Node *) makeString(pstrdup(startvalue)), -1)); state = CS_WAITKEY; } } @@ -1717,7 +1717,7 @@ deserialize_deflist(Datum txt) *wsptr++ = '\0'; result = lappend(result, makeDefElem(pstrdup(workspace), - (Node *) makeString(pstrdup(startvalue)), -1)); + (Node *) makeString(pstrdup(startvalue)), -1)); state = CS_WAITKEY; } } @@ -1732,7 +1732,7 @@ deserialize_deflist(Datum txt) *wsptr++ = '\0'; result = lappend(result, makeDefElem(pstrdup(workspace), - (Node *) makeString(pstrdup(startvalue)), -1)); + (Node *) makeString(pstrdup(startvalue)), -1)); state = CS_WAITKEY; } else @@ -1751,7 +1751,7 @@ deserialize_deflist(Datum txt) *wsptr++ = '\0'; result = lappend(result, makeDefElem(pstrdup(workspace), - (Node *) makeString(pstrdup(startvalue)), -1)); + (Node *) makeString(pstrdup(startvalue)), -1)); } else if (state != CS_WAITKEY) ereport(ERROR, diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 36d5f40f06..10d6ba9e04 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -1463,7 +1463,7 @@ AddRoleMems(const char *rolename, Oid roleid, ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), (errmsg("role \"%s\" is a member of role \"%s\"", - rolename, get_rolespec_name(memberRole))))); + rolename, get_rolespec_name(memberRole))))); /* * Check if entry for this role/member already exists; if so, give @@ -1478,7 +1478,7 @@ AddRoleMems(const char *rolename, Oid roleid, { ereport(NOTICE, (errmsg("role \"%s\" is already a member of role \"%s\"", - get_rolespec_name(memberRole), rolename))); + get_rolespec_name(memberRole), rolename))); ReleaseSysCache(authmem_tuple); continue; } @@ -1587,7 +1587,7 @@ DelRoleMems(const char *rolename, Oid roleid, { ereport(WARNING, (errmsg("role \"%s\" is not a member of role \"%s\"", - get_rolespec_name(memberRole), rolename))); + get_rolespec_name(memberRole), rolename))); continue; } diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 5b43a66bdc..56356de670 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -1337,7 +1337,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, */ initStringInfo(&buf); appendStringInfo(&buf, - _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"), + _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"), nkeep, OldestXmin); appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"), nunused); @@ -1912,8 +1912,8 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) /* If we haven't prefetched this lot yet, do so now. */ if (prefetchedUntil > blkno) { - BlockNumber prefetchStart; - BlockNumber pblkno; + BlockNumber prefetchStart; + BlockNumber pblkno; prefetchStart = blkno & ~(PREFETCH_SIZE - 1); for (pblkno = prefetchStart; pblkno <= blkno; pblkno++) diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index 996acae6e0..a5d6574eaf 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -469,11 +469,11 @@ DefineView(ViewStmt *stmt, const char *queryString, if (stmt->withCheckOption == LOCAL_CHECK_OPTION) stmt->options = lappend(stmt->options, makeDefElem("check_option", - (Node *) makeString("local"), -1)); + (Node *) makeString("local"), -1)); else if (stmt->withCheckOption == CASCADED_CHECK_OPTION) stmt->options = lappend(stmt->options, makeDefElem("check_option", - (Node *) makeString("cascaded"), -1)); + (Node *) makeString("cascaded"), -1)); /* * Check that the view is auto-updatable if WITH CHECK OPTION was diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index 7e85c66da3..7337d21d7d 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -413,12 +413,13 @@ ExecSupportsMarkRestore(Path *pathnode) return true; case T_CustomScan: - { - CustomPath *customPath = castNode(CustomPath, pathnode); - if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE) - return true; - return false; - } + { + CustomPath *customPath = castNode(CustomPath, pathnode); + + if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE) + return true; + return false; + } case T_Result: /* diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index 4b1f634e21..07c8852fca 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -380,7 +380,7 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, hashtable->in_hash_funcs = hashtable->tab_hash_funcs; hashtable->cur_eq_funcs = hashtable->tab_eq_funcs; - key = NULL; /* flag to reference inputslot */ + key = NULL; /* flag to reference inputslot */ if (isnew) { diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index fb2ba3302c..4a899f1eb5 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -868,7 +868,7 @@ InitPlan(QueryDesc *queryDesc, int eflags) estate->es_num_root_result_relations = 0; if (plannedstmt->nonleafResultRelations) { - int num_roots = list_length(plannedstmt->rootResultRelations); + int num_roots = list_length(plannedstmt->rootResultRelations); /* * Firstly, build ResultRelInfos for all the partitioned table @@ -876,7 +876,7 @@ InitPlan(QueryDesc *queryDesc, int eflags) * triggers, if any. */ resultRelInfos = (ResultRelInfo *) - palloc(num_roots * sizeof(ResultRelInfo)); + palloc(num_roots * sizeof(ResultRelInfo)); resultRelInfo = resultRelInfos; foreach(l, plannedstmt->rootResultRelations) { @@ -900,7 +900,7 @@ InitPlan(QueryDesc *queryDesc, int eflags) /* Simply lock the rest of them. */ foreach(l, plannedstmt->nonleafResultRelations) { - Index resultRelIndex = lfirst_int(l); + Index resultRelIndex = lfirst_int(l); /* We locked the roots above. */ if (!list_member_int(plannedstmt->rootResultRelations, @@ -1919,13 +1919,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo, if (resultRelInfo->ri_PartitionRoot) { HeapTuple tuple = ExecFetchSlotTuple(slot); - TupleConversionMap *map; + TupleConversionMap *map; rel = resultRelInfo->ri_PartitionRoot; tupdesc = RelationGetDescr(rel); /* a reverse map */ map = convert_tuples_by_name(orig_tupdesc, tupdesc, - gettext_noop("could not convert row type")); + gettext_noop("could not convert row type")); if (map != NULL) { tuple = do_convert_tuple(tuple, map); @@ -1966,13 +1966,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo, { HeapTuple tuple = ExecFetchSlotTuple(slot); TupleDesc old_tupdesc = RelationGetDescr(rel); - TupleConversionMap *map; + TupleConversionMap *map; rel = resultRelInfo->ri_PartitionRoot; tupdesc = RelationGetDescr(rel); /* a reverse map */ map = convert_tuples_by_name(old_tupdesc, tupdesc, - gettext_noop("could not convert row type")); + gettext_noop("could not convert row type")); if (map != NULL) { tuple = do_convert_tuple(tuple, map); @@ -2008,13 +2008,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo, { HeapTuple tuple = ExecFetchSlotTuple(slot); TupleDesc old_tupdesc = RelationGetDescr(rel); - TupleConversionMap *map; + TupleConversionMap *map; rel = resultRelInfo->ri_PartitionRoot; tupdesc = RelationGetDescr(rel); /* a reverse map */ map = convert_tuples_by_name(old_tupdesc, tupdesc, - gettext_noop("could not convert row type")); + gettext_noop("could not convert row type")); if (map != NULL) { tuple = do_convert_tuple(tuple, map); @@ -3340,7 +3340,7 @@ ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd, (errcode(ERRCODE_CHECK_VIOLATION), errmsg("no partition of relation \"%s\" found for row", RelationGetRelationName(failed_rel)), - val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0)); + val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0)); } return result; @@ -3359,8 +3359,8 @@ ExecBuildSlotPartitionKeyDescription(Relation rel, bool *isnull, int maxfieldlen) { - StringInfoData buf; - PartitionKey key = RelationGetPartitionKey(rel); + StringInfoData buf; + PartitionKey key = RelationGetPartitionKey(rel); int partnatts = get_partition_natts(key); int i; Oid relid = RelationGetRelid(rel); diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index 9c98f5492e..0610180016 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -608,9 +608,9 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate, /* * Also store the per-worker detail. * - * Worker instrumentation should be allocated in the same context as - * the regular instrumentation information, which is the per-query - * context. Switch into per-query memory context. + * Worker instrumentation should be allocated in the same context as the + * regular instrumentation information, which is the per-query context. + * Switch into per-query memory context. */ oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt); ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation)); diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index 486ddf1762..5469cde1e0 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -259,7 +259,7 @@ ExecInitNode(Plan *node, EState *estate, int eflags) case T_NamedTuplestoreScan: result = (PlanState *) ExecInitNamedTuplestoreScan((NamedTuplestoreScan *) node, - estate, eflags); + estate, eflags); break; case T_WorkTableScan: diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index 6af8018b71..c6a66b6195 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -116,15 +116,15 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid, TupleTableSlot *searchslot, TupleTableSlot *outslot) { - HeapTuple scantuple; - ScanKeyData skey[INDEX_MAX_KEYS]; - IndexScanDesc scan; - SnapshotData snap; - TransactionId xwait; - Relation idxrel; - bool found; - - /* Open the index.*/ + HeapTuple scantuple; + ScanKeyData skey[INDEX_MAX_KEYS]; + IndexScanDesc scan; + SnapshotData snap; + TransactionId xwait; + Relation idxrel; + bool found; + + /* Open the index. */ idxrel = index_open(idxoid, RowExclusiveLock); /* Start an index scan. */ @@ -152,8 +152,8 @@ retry: snap.xmin : snap.xmax; /* - * If the tuple is locked, wait for locking transaction to finish - * and retry. + * If the tuple is locked, wait for locking transaction to finish and + * retry. */ if (TransactionIdIsValid(xwait)) { @@ -165,7 +165,7 @@ retry: /* Found tuple, try to lock it in the lockmode. */ if (found) { - Buffer buf; + Buffer buf; HeapUpdateFailureData hufd; HTSU_Result res; HeapTupleData locktup; @@ -177,7 +177,7 @@ retry: res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false), lockmode, LockWaitBlock, - false /* don't follow updates */, + false /* don't follow updates */ , &buf, &hufd); /* the tuple slot already has the buffer pinned */ ReleaseBuffer(buf); @@ -219,7 +219,7 @@ retry: * to use. */ static bool -tuple_equals_slot(TupleDesc desc, HeapTuple tup, TupleTableSlot *slot) +tuple_equals_slot(TupleDesc desc, HeapTuple tup, TupleTableSlot *slot) { Datum values[MaxTupleAttributeNumber]; bool isnull[MaxTupleAttributeNumber]; @@ -267,12 +267,12 @@ bool RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode, TupleTableSlot *searchslot, TupleTableSlot *outslot) { - HeapTuple scantuple; - HeapScanDesc scan; - SnapshotData snap; - TransactionId xwait; - bool found; - TupleDesc desc = RelationGetDescr(rel); + HeapTuple scantuple; + HeapScanDesc scan; + SnapshotData snap; + TransactionId xwait; + bool found; + TupleDesc desc = RelationGetDescr(rel); Assert(equalTupleDescs(desc, outslot->tts_tupleDescriptor)); @@ -299,8 +299,8 @@ retry: snap.xmin : snap.xmax; /* - * If the tuple is locked, wait for locking transaction to finish - * and retry. + * If the tuple is locked, wait for locking transaction to finish and + * retry. */ if (TransactionIdIsValid(xwait)) { @@ -312,7 +312,7 @@ retry: /* Found tuple, try to lock it in the lockmode. */ if (found) { - Buffer buf; + Buffer buf; HeapUpdateFailureData hufd; HTSU_Result res; HeapTupleData locktup; @@ -324,7 +324,7 @@ retry: res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false), lockmode, LockWaitBlock, - false /* don't follow updates */, + false /* don't follow updates */ , &buf, &hufd); /* the tuple slot already has the buffer pinned */ ReleaseBuffer(buf); @@ -363,10 +363,10 @@ retry: void ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot) { - bool skip_tuple = false; - HeapTuple tuple; - ResultRelInfo *resultRelInfo = estate->es_result_relation_info; - Relation rel = resultRelInfo->ri_RelationDesc; + bool skip_tuple = false; + HeapTuple tuple; + ResultRelInfo *resultRelInfo = estate->es_result_relation_info; + Relation rel = resultRelInfo->ri_RelationDesc; /* For now we support only tables. */ Assert(rel->rd_rel->relkind == RELKIND_RELATION); @@ -379,7 +379,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot) { slot = ExecBRInsertTriggers(estate, resultRelInfo, slot); - if (slot == NULL) /* "do nothing" */ + if (slot == NULL) /* "do nothing" */ skip_tuple = true; } @@ -420,10 +420,10 @@ void ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, TupleTableSlot *searchslot, TupleTableSlot *slot) { - bool skip_tuple = false; - HeapTuple tuple; - ResultRelInfo *resultRelInfo = estate->es_result_relation_info; - Relation rel = resultRelInfo->ri_RelationDesc; + bool skip_tuple = false; + HeapTuple tuple; + ResultRelInfo *resultRelInfo = estate->es_result_relation_info; + Relation rel = resultRelInfo->ri_RelationDesc; /* For now we support only tables. */ Assert(rel->rd_rel->relkind == RELKIND_RELATION); @@ -438,7 +438,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, &searchslot->tts_tuple->t_self, NULL, slot); - if (slot == NULL) /* "do nothing" */ + if (slot == NULL) /* "do nothing" */ skip_tuple = true; } @@ -482,9 +482,9 @@ void ExecSimpleRelationDelete(EState *estate, EPQState *epqstate, TupleTableSlot *searchslot) { - bool skip_tuple = false; - ResultRelInfo *resultRelInfo = estate->es_result_relation_info; - Relation rel = resultRelInfo->ri_RelationDesc; + bool skip_tuple = false; + ResultRelInfo *resultRelInfo = estate->es_result_relation_info; + Relation rel = resultRelInfo->ri_RelationDesc; /* For now we support only tables. */ Assert(rel->rd_rel->relkind == RELKIND_RELATION); @@ -568,6 +568,6 @@ CheckSubscriptionRelkind(char relkind, const char *nspname, if (relkind != RELKIND_RELATION) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("logical replication target relation \"%s.%s\" is not a table", - nspname, relname))); + errmsg("logical replication target relation \"%s.%s\" is not a table", + nspname, relname))); } diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index 08229bd6a7..cb2596cb31 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -826,14 +826,14 @@ void ExecLockNonLeafAppendTables(List *partitioned_rels, EState *estate) { PlannedStmt *stmt = estate->es_plannedstmt; - ListCell *lc; + ListCell *lc; foreach(lc, partitioned_rels) { ListCell *l; - Index rti = lfirst_int(lc); - bool is_result_rel = false; - Oid relid = getrelid(rti, estate->es_range_table); + Index rti = lfirst_int(lc); + bool is_result_rel = false; + Oid relid = getrelid(rti, estate->es_range_table); /* If this is a result relation, already locked in InitPlan */ foreach(l, stmt->nonleafResultRelations) diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index c2b861828d..7eeda95af7 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -471,7 +471,7 @@ typedef struct AggStatePerGroupData * NULL and not auto-replace it with a later input value. Only the first * non-NULL input will be auto-substituted. */ -} AggStatePerGroupData; +} AggStatePerGroupData; /* * AggStatePerPhaseData - per-grouping-set-phase state @@ -515,7 +515,7 @@ typedef struct AggStatePerHashData AttrNumber *hashGrpColIdxInput; /* hash col indices in input slot */ AttrNumber *hashGrpColIdxHash; /* indices in hashtbl tuples */ Agg *aggnode; /* original Agg node, for numGroups etc. */ -} AggStatePerHashData; +} AggStatePerHashData; static void select_current_set(AggState *aggstate, int setno, bool is_hash); diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c index a107545b83..aae5e3fa63 100644 --- a/src/backend/executor/nodeAppend.c +++ b/src/backend/executor/nodeAppend.c @@ -129,8 +129,8 @@ ExecInitAppend(Append *node, EState *estate, int eflags) Assert(!(eflags & EXEC_FLAG_MARK)); /* - * Lock the non-leaf tables in the partition tree controlled by this - * node. It's a no-op for non-partitioned parent tables. + * Lock the non-leaf tables in the partition tree controlled by this node. + * It's a no-op for non-partitioned parent tables. */ ExecLockNonLeafAppendTables(node->partitioned_rels, estate); diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index d240f9c03e..c453362230 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -506,8 +506,9 @@ BitmapAdjustPrefetchIterator(BitmapHeapScanState *node, * In case of shared mode, we can not ensure that the current * blockno of the main iterator and that of the prefetch iterator * are same. It's possible that whatever blockno we are - * prefetching will be processed by another process. Therefore, we - * don't validate the blockno here as we do in non-parallel case. + * prefetching will be processed by another process. Therefore, + * we don't validate the blockno here as we do in non-parallel + * case. */ if (prefetch_iterator) tbm_shared_iterate(prefetch_iterator); diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c index 1e5b1b7675..c1db2e263b 100644 --- a/src/backend/executor/nodeGather.c +++ b/src/backend/executor/nodeGather.c @@ -225,7 +225,7 @@ ExecGather(GatherState *node) void ExecEndGather(GatherState *node) { - ExecEndNode(outerPlanState(node)); /* let children clean up first */ + ExecEndNode(outerPlanState(node)); /* let children clean up first */ ExecShutdownGather(node); ExecFreeExprContext(&node->ps); ExecClearTuple(node->ps.ps_ResultTupleSlot); diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c index 62c399e0b1..e066574836 100644 --- a/src/backend/executor/nodeGatherMerge.c +++ b/src/backend/executor/nodeGatherMerge.c @@ -35,7 +35,7 @@ typedef struct GMReaderTupleBuffer int readCounter; int nTuples; bool done; -} GMReaderTupleBuffer; +} GMReaderTupleBuffer; /* * When we read tuples from workers, it's a good idea to read several at once @@ -230,17 +230,17 @@ ExecGatherMerge(GatherMergeState *node) ResetExprContext(econtext); /* - * Get next tuple, either from one of our workers, or by running the - * plan ourselves. + * Get next tuple, either from one of our workers, or by running the plan + * ourselves. */ slot = gather_merge_getnext(node); if (TupIsNull(slot)) return NULL; /* - * form the result tuple using ExecProject(), and return it --- unless - * the projection produces an empty set, in which case we must loop - * back around for another tuple + * form the result tuple using ExecProject(), and return it --- unless the + * projection produces an empty set, in which case we must loop back + * around for another tuple */ econtext->ecxt_outertuple = slot; return ExecProject(node->ps.ps_ProjInfo); @@ -255,7 +255,7 @@ ExecGatherMerge(GatherMergeState *node) void ExecEndGatherMerge(GatherMergeState *node) { - ExecEndNode(outerPlanState(node)); /* let children clean up first */ + ExecEndNode(outerPlanState(node)); /* let children clean up first */ ExecShutdownGatherMerge(node); ExecFreeExprContext(&node->ps); ExecClearTuple(node->ps.ps_ResultTupleSlot); @@ -534,8 +534,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait) HeapTuple tup = NULL; /* - * If we're being asked to generate a tuple from the leader, then we - * just call ExecProcNode as normal to produce one. + * If we're being asked to generate a tuple from the leader, then we just + * call ExecProcNode as normal to produce one. */ if (gm_state->nreaders == reader) { @@ -582,8 +582,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait) &tuple_buffer->done)); /* - * Attempt to read more tuples in nowait mode and store them in - * the tuple array. + * Attempt to read more tuples in nowait mode and store them in the + * tuple array. */ if (HeapTupleIsValid(tup)) form_tuple_array(gm_state, reader); diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c index 8a2e78266b..fef83dbdbd 100644 --- a/src/backend/executor/nodeMergeAppend.c +++ b/src/backend/executor/nodeMergeAppend.c @@ -72,8 +72,8 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); /* - * Lock the non-leaf tables in the partition tree controlled by this - * node. It's a no-op for non-partitioned parent tables. + * Lock the non-leaf tables in the partition tree controlled by this node. + * It's a no-op for non-partitioned parent tables. */ ExecLockNonLeafAppendTables(node->partitioned_rels, estate); diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 652cd97599..cf555fe78d 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -1328,7 +1328,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, static void fireBSTriggers(ModifyTableState *node) { - ResultRelInfo *resultRelInfo = node->resultRelInfo; + ResultRelInfo *resultRelInfo = node->resultRelInfo; /* * If the node modifies a partitioned table, we must fire its triggers. @@ -1364,7 +1364,7 @@ fireBSTriggers(ModifyTableState *node) static void fireASTriggers(ModifyTableState *node) { - ResultRelInfo *resultRelInfo = node->resultRelInfo; + ResultRelInfo *resultRelInfo = node->resultRelInfo; /* * If the node modifies a partitioned table, we must fire its triggers. @@ -1676,7 +1676,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* If modifying a partitioned table, initialize the root table info */ if (node->rootResultRelIndex >= 0) mtstate->rootResultRelInfo = estate->es_root_result_relations + - node->rootResultRelIndex; + node->rootResultRelIndex; mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans); mtstate->mt_nplans = nplans; @@ -1753,12 +1753,12 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* The root table RT index is at the head of the partitioned_rels list */ if (node->partitioned_rels) { - Index root_rti; - Oid root_oid; + Index root_rti; + Oid root_oid; root_rti = linitial_int(node->partitioned_rels); root_oid = getrelid(root_rti, estate->es_range_table); - rel = heap_open(root_oid, NoLock); /* locked by InitPlan */ + rel = heap_open(root_oid, NoLock); /* locked by InitPlan */ } else rel = mtstate->resultRelInfo->ri_RelationDesc; @@ -1815,15 +1815,15 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) } /* - * Build WITH CHECK OPTION constraints for each leaf partition rel. - * Note that we didn't build the withCheckOptionList for each partition - * within the planner, but simple translation of the varattnos for each - * partition will suffice. This only occurs for the INSERT case; - * UPDATE/DELETE cases are handled above. + * Build WITH CHECK OPTION constraints for each leaf partition rel. Note + * that we didn't build the withCheckOptionList for each partition within + * the planner, but simple translation of the varattnos for each partition + * will suffice. This only occurs for the INSERT case; UPDATE/DELETE + * cases are handled above. */ if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0) { - List *wcoList; + List *wcoList; Assert(operation == CMD_INSERT); resultRelInfo = mtstate->mt_partitions; diff --git a/src/backend/executor/nodeProjectSet.c b/src/backend/executor/nodeProjectSet.c index 01048cc826..2f0a4e647b 100644 --- a/src/backend/executor/nodeProjectSet.c +++ b/src/backend/executor/nodeProjectSet.c @@ -120,7 +120,7 @@ ExecProjectSRF(ProjectSetState *node, bool continuing) { TupleTableSlot *resultSlot = node->ps.ps_ResultTupleSlot; ExprContext *econtext = node->ps.ps_ExprContext; - bool hassrf PG_USED_FOR_ASSERTS_ONLY; + bool hassrf PG_USED_FOR_ASSERTS_ONLY; bool hasresult; int argno; diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index 85b3f67b33..9ae53bb8a7 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -64,7 +64,7 @@ typedef struct SetOpStatePerGroupData { long numLeft; /* number of left-input dups in group */ long numRight; /* number of right-input dups in group */ -} SetOpStatePerGroupData; +} SetOpStatePerGroupData; static TupleTableSlot *setop_retrieve_direct(SetOpState *setopstate); diff --git a/src/backend/executor/nodeTableFuncscan.c b/src/backend/executor/nodeTableFuncscan.c index e9df48044e..da557ceb6f 100644 --- a/src/backend/executor/nodeTableFuncscan.c +++ b/src/backend/executor/nodeTableFuncscan.c @@ -288,7 +288,7 @@ tfuncFetchRows(TableFuncScanState *tstate, ExprContext *econtext) PG_TRY(); { routine->InitOpaque(tstate, - tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts); + tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts); /* * If evaluating the document expression returns NULL, the table @@ -343,7 +343,7 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc) int colno; Datum value; int ordinalitycol = - ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol; + ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol; /* * Install the document as a possibly-toasted Datum into the tablefunc @@ -443,8 +443,8 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext) ExecClearTuple(tstate->ss.ss_ScanTupleSlot); /* - * Obtain the value of each column for this row, installing them into the - * slot; then add the tuple to the tuplestore. + * Obtain the value of each column for this row, installing them into + * the slot; then add the tuple to the tuplestore. */ for (colno = 0; colno < natts; colno++) { @@ -456,12 +456,12 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext) } else { - bool isnull; + bool isnull; values[colno] = routine->GetValue(tstate, colno, - tupdesc->attrs[colno]->atttypid, - tupdesc->attrs[colno]->atttypmod, + tupdesc->attrs[colno]->atttypid, + tupdesc->attrs[colno]->atttypmod, &isnull); /* No value? Evaluate and apply the default, if any */ @@ -479,7 +479,7 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("null is not allowed in column \"%s\"", - NameStr(tupdesc->attrs[colno]->attname)))); + NameStr(tupdesc->attrs[colno]->attname)))); nulls[colno] = isnull; } diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 35021e1839..97c3925874 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -1230,7 +1230,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL))) { if (list_length(stmt_list) == 1 && - linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY && + linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY && linitial_node(PlannedStmt, stmt_list)->rowMarks == NIL && ExecSupportsBackwardScan(linitial_node(PlannedStmt, stmt_list)->planTree)) portal->cursorOptions |= CURSOR_OPT_SCROLL; @@ -1246,7 +1246,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, if (portal->cursorOptions & CURSOR_OPT_SCROLL) { if (list_length(stmt_list) == 1 && - linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY && + linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY && linitial_node(PlannedStmt, stmt_list)->rowMarks != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -1990,8 +1990,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, stmt_list = pg_analyze_and_rewrite_params(parsetree, src, plan->parserSetup, - plan->parserSetupArg, - _SPI_current->queryEnv); + plan->parserSetupArg, + _SPI_current->queryEnv); } else { @@ -2668,7 +2668,7 @@ SPI_register_relation(EphemeralNamedRelation enr) if (enr == NULL || enr->md.name == NULL) return SPI_ERROR_ARGUMENT; - res = _SPI_begin_call(false); /* keep current memory context */ + res = _SPI_begin_call(false); /* keep current memory context */ if (res < 0) return res; @@ -2702,7 +2702,7 @@ SPI_unregister_relation(const char *name) if (name == NULL) return SPI_ERROR_ARGUMENT; - res = _SPI_begin_call(false); /* keep current memory context */ + res = _SPI_begin_call(false); /* keep current memory context */ if (res < 0) return res; @@ -2735,8 +2735,8 @@ SPI_register_trigger_data(TriggerData *tdata) if (tdata->tg_newtable) { EphemeralNamedRelation enr = - palloc(sizeof(EphemeralNamedRelationData)); - int rc; + palloc(sizeof(EphemeralNamedRelationData)); + int rc; enr->md.name = tdata->tg_trigger->tgnewtable; enr->md.reliddesc = tdata->tg_relation->rd_id; @@ -2752,8 +2752,8 @@ SPI_register_trigger_data(TriggerData *tdata) if (tdata->tg_oldtable) { EphemeralNamedRelation enr = - palloc(sizeof(EphemeralNamedRelationData)); - int rc; + palloc(sizeof(EphemeralNamedRelationData)); + int rc; enr->md.name = tdata->tg_trigger->tgoldtable; enr->md.reliddesc = tdata->tg_relation->rd_id; diff --git a/src/backend/lib/rbtree.c b/src/backend/lib/rbtree.c index b08e48b344..cdf8a73aa5 100644 --- a/src/backend/lib/rbtree.c +++ b/src/backend/lib/rbtree.c @@ -818,7 +818,7 @@ loop: if (current == NULL) { iter->is_over = true; - break; /* end of iteration */ + break; /* end of iteration */ } else if (came_from == current->right) { diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 6d3ff68607..f36d7b9b6d 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -757,10 +757,10 @@ CheckPWChallengeAuth(Port *port, char **logdetail) * If the user does not exist, or has no password or it's expired, we * still go through the motions of authentication, to avoid revealing to * the client that the user didn't exist. If 'md5' is allowed, we choose - * whether to use 'md5' or 'scram-sha-256' authentication based on - * current password_encryption setting. The idea is that most genuine - * users probably have a password of that type, and if we pretend that - * this user had a password of that type, too, it "blends in" best. + * whether to use 'md5' or 'scram-sha-256' authentication based on current + * password_encryption setting. The idea is that most genuine users + * probably have a password of that type, and if we pretend that this user + * had a password of that type, too, it "blends in" best. */ if (!shadow_pass) pwtype = Password_encryption; @@ -770,8 +770,8 @@ CheckPWChallengeAuth(Port *port, char **logdetail) /* * If 'md5' authentication is allowed, decide whether to perform 'md5' or * 'scram-sha-256' authentication based on the type of password the user - * has. If it's an MD5 hash, we must do MD5 authentication, and if it's - * a SCRAM verifier, we must do SCRAM authentication. + * has. If it's an MD5 hash, we must do MD5 authentication, and if it's a + * SCRAM verifier, we must do SCRAM authentication. * * If MD5 authentication is not allowed, always use SCRAM. If the user * had an MD5 password, CheckSCRAMAuth() will fail. diff --git a/src/backend/libpq/crypt.c b/src/backend/libpq/crypt.c index e7a6b04fb5..0013ee3878 100644 --- a/src/backend/libpq/crypt.c +++ b/src/backend/libpq/crypt.c @@ -50,7 +50,7 @@ get_role_password(const char *role, char **logdetail) { *logdetail = psprintf(_("Role \"%s\" does not exist."), role); - return NULL; /* no such user */ + return NULL; /* no such user */ } datum = SysCacheGetAttr(AUTHNAME, roleTup, @@ -60,7 +60,7 @@ get_role_password(const char *role, char **logdetail) ReleaseSysCache(roleTup); *logdetail = psprintf(_("User \"%s\" has no password assigned."), role); - return NULL; /* user has no password */ + return NULL; /* user has no password */ } shadow_pass = TextDatumGetCString(datum); @@ -76,7 +76,7 @@ get_role_password(const char *role, char **logdetail) *logdetail = psprintf(_("User \"%s\" has an empty password."), role); pfree(shadow_pass); - return NULL; /* empty password */ + return NULL; /* empty password */ } /* @@ -122,8 +122,8 @@ encrypt_password(PasswordType target_type, const char *role, if (guessed_type != PASSWORD_TYPE_PLAINTEXT) { /* - * Cannot convert an already-encrypted password from one - * format to another, so return it as it is. + * Cannot convert an already-encrypted password from one format to + * another, so return it as it is. */ return pstrdup(password); } @@ -274,6 +274,7 @@ plain_crypt_verify(const char *role, const char *shadow_pass, break; case PASSWORD_TYPE_PLAINTEXT: + /* * We never store passwords in plaintext, so this shouldn't * happen. diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index 5561c399da..823880ebff 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -617,7 +617,10 @@ check_db(const char *dbname, const char *role, Oid roleid, List *tokens) tok = lfirst(cell); if (am_walsender && !am_db_walsender) { - /* physical replication walsender connections can only match replication keyword */ + /* + * physical replication walsender connections can only match + * replication keyword + */ if (token_is_keyword(tok, "replication")) return true; } @@ -1842,7 +1845,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int ret; List *parsed_servers; ListCell *l; - char *dupval = pstrdup(val); + char *dupval = pstrdup(val); REQUIRE_AUTH_OPTION(uaRADIUS, "radiusservers", "radius"); @@ -1891,7 +1894,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, { List *parsed_ports; ListCell *l; - char *dupval = pstrdup(val); + char *dupval = pstrdup(val); REQUIRE_AUTH_OPTION(uaRADIUS, "radiusports", "radius"); @@ -1926,7 +1929,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, else if (strcmp(name, "radiussecrets") == 0) { List *parsed_secrets; - char *dupval = pstrdup(val); + char *dupval = pstrdup(val); REQUIRE_AUTH_OPTION(uaRADIUS, "radiussecrets", "radius"); @@ -1948,7 +1951,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, else if (strcmp(name, "radiusidentifiers") == 0) { List *parsed_identifiers; - char *dupval = pstrdup(val); + char *dupval = pstrdup(val); REQUIRE_AUTH_OPTION(uaRADIUS, "radiusidentifiers", "radius"); diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index f7b205f195..d1cc38beb2 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -85,7 +85,7 @@ #ifdef HAVE_UTIME_H #include <utime.h> #endif -#ifdef _MSC_VER /* mstcpip.h is missing on mingw */ +#ifdef _MSC_VER /* mstcpip.h is missing on mingw */ #include <mstcpip.h> #endif diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 6ad38443a0..7811ad5d52 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -373,7 +373,7 @@ _copyGather(const Gather *from) static GatherMerge * _copyGatherMerge(const GatherMerge *from) { - GatherMerge *newnode = makeNode(GatherMerge); + GatherMerge *newnode = makeNode(GatherMerge); /* * copy node superclass fields @@ -691,7 +691,7 @@ _copyCteScan(const CteScan *from) static NamedTuplestoreScan * _copyNamedTuplestoreScan(const NamedTuplestoreScan *from) { - NamedTuplestoreScan *newnode = makeNode(NamedTuplestoreScan); + NamedTuplestoreScan *newnode = makeNode(NamedTuplestoreScan); /* * copy node superclass fields diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index 3e8189ced3..95c1d3efbb 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -1129,7 +1129,8 @@ exprSetCollation(Node *expr, Oid collation) Assert(!OidIsValid(collation)); /* result is always boolean */ break; case T_NextValueExpr: - Assert(!OidIsValid(collation)); /* result is always an integer type */ + Assert(!OidIsValid(collation)); /* result is always an integer + * type */ break; default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(expr)); diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 8d9ff63931..4949d58864 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -468,7 +468,7 @@ _outGather(StringInfo str, const Gather *node) static void _outGatherMerge(StringInfo str, const GatherMerge *node) { - int i; + int i; WRITE_NODE_TYPE("GATHERMERGE"); diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index c66019e3ba..bbd39a2ed9 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -109,7 +109,7 @@ typedef struct PagetableEntry */ typedef struct PTEntryArray { - pg_atomic_uint32 refcount; /* no. of iterator attached */ + pg_atomic_uint32 refcount; /* no. of iterator attached */ PagetableEntry ptentry[FLEXIBLE_ARRAY_MEMBER]; } PTEntryArray; @@ -206,7 +206,7 @@ typedef struct TBMSharedIteratorState */ typedef struct PTIterationArray { - pg_atomic_uint32 refcount; /* no. of iterator attached */ + pg_atomic_uint32 refcount; /* no. of iterator attached */ int index[FLEXIBLE_ARRAY_MEMBER]; /* index array */ } PTIterationArray; @@ -905,8 +905,8 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm) /* * For every shared iterator, referring to pagetable and iterator array, - * increase the refcount by 1 so that while freeing the shared iterator - * we don't free pagetable and iterator array until its refcount becomes 0. + * increase the refcount by 1 so that while freeing the shared iterator we + * don't free pagetable and iterator array until its refcount becomes 0. */ if (ptbase != NULL) pg_atomic_add_fetch_u32(&ptbase->refcount, 1); diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index b93b4fc773..78ca55bbd6 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -112,7 +112,7 @@ static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel, static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte); + RangeTblEntry *rte); static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist); @@ -648,6 +648,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, return; case RTE_NAMEDTUPLESTORE: + /* * tuplestore cannot be shared, at least without more * infrastructure to support that. @@ -1579,7 +1580,7 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, total_subpaths, pathkeys, NULL, - partitioned_rels)); + partitioned_rels)); } } @@ -2220,10 +2221,10 @@ generate_gather_paths(PlannerInfo *root, RelOptInfo *rel) * For each useful ordering, we can consider an order-preserving Gather * Merge. */ - foreach (lc, rel->partial_pathlist) + foreach(lc, rel->partial_pathlist) { - Path *subpath = (Path *) lfirst(lc); - GatherMergePath *path; + Path *subpath = (Path *) lfirst(lc); + GatherMergePath *path; if (subpath->pathkeys == NIL) continue; diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 52643d0ad6..cdb18d978d 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -664,8 +664,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count, { /* * For index only scans compute workers based on number of index pages - * fetched; the number of heap pages we fetch might be so small as - * to effectively rule out parallelism, which we don't want to do. + * fetched; the number of heap pages we fetch might be so small as to + * effectively rule out parallelism, which we don't want to do. */ if (indexonly) rand_heap_pages = -1; @@ -2188,7 +2188,7 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path, /* For partial paths, scale row estimate. */ if (path->path.parallel_workers > 0) { - double parallel_divisor = get_parallel_divisor(&path->path); + double parallel_divisor = get_parallel_divisor(&path->path); path->path.rows = clamp_row_est(path->path.rows / parallel_divisor); @@ -2624,7 +2624,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, /* For partial paths, scale row estimate. */ if (path->jpath.path.parallel_workers > 0) { - double parallel_divisor = get_parallel_divisor(&path->jpath.path); + double parallel_divisor = get_parallel_divisor(&path->jpath.path); path->jpath.path.rows = clamp_row_est(path->jpath.path.rows / parallel_divisor); @@ -3029,7 +3029,7 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path, /* For partial paths, scale row estimate. */ if (path->jpath.path.parallel_workers > 0) { - double parallel_divisor = get_parallel_divisor(&path->jpath.path); + double parallel_divisor = get_parallel_divisor(&path->jpath.path); path->jpath.path.rows = clamp_row_est(path->jpath.path.rows / parallel_divisor); diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index 6e4bae854a..607a8f97bf 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -1073,8 +1073,8 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel, true); /* - * if, after costing the path, we find that it's not worth - * using parallel workers, just free it. + * if, after costing the path, we find that it's not worth using + * parallel workers, just free it. */ if (ipath->path.parallel_workers > 0) add_partial_path(rel, (Path *) ipath); diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 1c252c0ef5..94beeb858d 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -140,7 +140,7 @@ static TableFuncScan *create_tablefuncscan_plan(PlannerInfo *root, Path *best_pa static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path, List *tlist, List *scan_clauses); static NamedTuplestoreScan *create_namedtuplestorescan_plan(PlannerInfo *root, - Path *best_path, List *tlist, List *scan_clauses); + Path *best_path, List *tlist, List *scan_clauses); static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path, List *tlist, List *scan_clauses); static ForeignScan *create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path, @@ -200,7 +200,7 @@ static TableFuncScan *make_tablefuncscan(List *qptlist, List *qpqual, static CteScan *make_ctescan(List *qptlist, List *qpqual, Index scanrelid, int ctePlanId, int cteParam); static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual, - Index scanrelid, char *enrname); + Index scanrelid, char *enrname); static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual, Index scanrelid, int wtParam); static Append *make_append(List *appendplans, List *tlist, List *partitioned_rels); @@ -4910,7 +4910,7 @@ label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples) /* * bitmap_subplan_mark_shared - * Set isshared flag in bitmap subplan so that it will be created in + * Set isshared flag in bitmap subplan so that it will be created in * shared memory. */ static void @@ -6425,7 +6425,7 @@ make_modifytable(PlannerInfo *root, node->partitioned_rels = partitioned_rels; node->resultRelations = resultRelations; node->resultRelIndex = -1; /* will be set correctly in setrefs.c */ - node->rootResultRelIndex = -1; /* will be set correctly in setrefs.c */ + node->rootResultRelIndex = -1; /* will be set correctly in setrefs.c */ node->plans = subplans; if (!onconflict) { diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index c4a5651abd..40cb79d4cd 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -73,9 +73,9 @@ create_upper_paths_hook_type create_upper_paths_hook = NULL; #define EXPRKIND_QUAL 0 #define EXPRKIND_TARGET 1 #define EXPRKIND_RTFUNC 2 -#define EXPRKIND_RTFUNC_LATERAL 3 +#define EXPRKIND_RTFUNC_LATERAL 3 #define EXPRKIND_VALUES 4 -#define EXPRKIND_VALUES_LATERAL 5 +#define EXPRKIND_VALUES_LATERAL 5 #define EXPRKIND_LIMIT 6 #define EXPRKIND_APPINFO 7 #define EXPRKIND_PHV 8 @@ -1041,7 +1041,7 @@ inheritance_planner(PlannerInfo *root) ListCell *lc; Index rti; RangeTblEntry *parent_rte; - List *partitioned_rels = NIL; + List *partitioned_rels = NIL; Assert(parse->commandType != CMD_INSERT); @@ -1102,10 +1102,10 @@ inheritance_planner(PlannerInfo *root) /* * If the parent RTE is a partitioned table, we should use that as the * nominal relation, because the RTEs added for partitioned tables - * (including the root parent) as child members of the inheritance set - * do not appear anywhere else in the plan. The situation is exactly - * the opposite in the case of non-partitioned inheritance parent as - * described below. + * (including the root parent) as child members of the inheritance set do + * not appear anywhere else in the plan. The situation is exactly the + * opposite in the case of non-partitioned inheritance parent as described + * below. */ parent_rte = rt_fetch(parentRTindex, root->parse->rtable); if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE) @@ -1278,9 +1278,9 @@ inheritance_planner(PlannerInfo *root) * is used elsewhere in the plan, so using the original parent RTE * would give rise to confusing use of multiple aliases in EXPLAIN * output for what the user will think is the "same" table. OTOH, - * it's not a problem in the partitioned inheritance case, because - * the duplicate child RTE added for the parent does not appear - * anywhere else in the plan tree. + * it's not a problem in the partitioned inheritance case, because the + * duplicate child RTE added for the parent does not appear anywhere + * else in the plan tree. */ if (nominalRelation < 0) nominalRelation = appinfo->child_relid; @@ -3364,7 +3364,7 @@ get_number_of_groups(PlannerInfo *root, ListCell *lc; ListCell *lc2; - Assert(gd); /* keep Coverity happy */ + Assert(gd); /* keep Coverity happy */ dNumGroups = 0; @@ -4336,8 +4336,8 @@ consider_groupingsets_paths(PlannerInfo *root, /* * We treat this as a knapsack problem: the knapsack capacity * represents work_mem, the item weights are the estimated memory - * usage of the hashtables needed to implement a single rollup, and - * we really ought to use the cost saving as the item value; + * usage of the hashtables needed to implement a single rollup, + * and we really ought to use the cost saving as the item value; * however, currently the costs assigned to sort nodes don't * reflect the comparison costs well, and so we treat all items as * of equal value (each rollup we hash instead saves us one sort). @@ -6072,7 +6072,7 @@ get_partitioned_child_rels(PlannerInfo *root, Index rti) foreach(l, root->pcinfo_list) { - PartitionedChildRelInfo *pc = lfirst(l); + PartitionedChildRelInfo *pc = lfirst(l); if (pc->parent_relid == rti) { diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index c192dc4f70..5cac171cb6 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -883,8 +883,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) * If the main target relation is a partitioned table, the * following list contains the RT indexes of partitioned child * relations including the root, which are not included in the - * above list. We also keep RT indexes of the roots separately - * to be identitied as such during the executor initialization. + * above list. We also keep RT indexes of the roots + * separately to be identitied as such during the executor + * initialization. */ if (splan->partitioned_rels != NIL) { @@ -893,9 +894,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) list_copy(splan->partitioned_rels)); /* Remember where this root will be in the global list. */ splan->rootResultRelIndex = - list_length(root->glob->rootResultRelations); + list_length(root->glob->rootResultRelations); root->glob->rootResultRelations = - lappend_int(root->glob->rootResultRelations, + lappend_int(root->glob->rootResultRelations, linitial_int(splan->partitioned_rels)); } } diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c index a1be858901..8b44fb96b0 100644 --- a/src/backend/optimizer/prep/prepunion.c +++ b/src/backend/optimizer/prep/prepunion.c @@ -1555,9 +1555,10 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti) newrc->waitPolicy = oldrc->waitPolicy; /* - * We mark RowMarks for partitioned child tables as parent RowMarks - * so that the executor ignores them (except their existence means - * that the child tables be locked using appropriate mode). + * We mark RowMarks for partitioned child tables as parent + * RowMarks so that the executor ignores them (except their + * existence means that the child tables be locked using + * appropriate mode). */ newrc->isParent = (childrte->relkind == RELKIND_PARTITIONED_TABLE); @@ -1593,8 +1594,8 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti) * parent RT index to the list of RT indexes of its partitioned child * tables. When creating an Append or a ModifyTable path for the parent, * we copy the child RT index list verbatim to the path so that it could - * be carried over to the executor so that the latter could identify - * the partitioned child tables. + * be carried over to the executor so that the latter could identify the + * partitioned child tables. */ if (partitioned_child_rels != NIL) { diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 2d5caae9a9..46778aaefd 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -1642,8 +1642,8 @@ create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Relids required_outer, double *rows) { GatherMergePath *pathnode = makeNode(GatherMergePath); - Cost input_startup_cost = 0; - Cost input_total_cost = 0; + Cost input_startup_cost = 0; + Cost input_total_cost = 0; Assert(subpath->parallel_safe); Assert(pathkeys); @@ -1669,7 +1669,7 @@ create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, else { /* We'll need to insert a Sort node, so include cost for that */ - Path sort_path; /* dummy for result of cost_sort */ + Path sort_path; /* dummy for result of cost_sort */ cost_sort(&sort_path, root, diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index 2a5ec181de..8f9dd9099b 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -1149,7 +1149,7 @@ get_relation_constraints(PlannerInfo *root, Index varno = rel->relid; Relation relation; TupleConstr *constr; - List *pcqual; + List *pcqual; /* * We assume the relation has already been safely locked. diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 342d884003..76a3868fa0 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -149,9 +149,9 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent) /* * Pass top parent's relids down the inheritance hierarchy. If the parent - * has top_parent_relids set, it's a direct or an indirect child of the top - * parent indicated by top_parent_relids. By extension this child is also - * an indirect child of that parent. + * has top_parent_relids set, it's a direct or an indirect child of the + * top parent indicated by top_parent_relids. By extension this child is + * also an indirect child of that parent. */ if (parent) { diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index 567dd54c6c..86482eba26 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -1637,7 +1637,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt) * Recursively transform the components of the tree. */ sostmt = castNode(SetOperationStmt, - transformSetOperationTree(pstate, stmt, true, NULL)); + transformSetOperationTree(pstate, stmt, true, NULL)); Assert(sostmt); qry->setOperations = (Node *) sostmt; @@ -2809,8 +2809,8 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), /*------ translator: %s is a SQL row locking clause such as FOR UPDATE */ - errmsg("%s cannot be applied to a named tuplestore", - LCS_asString(lc->strength)), + errmsg("%s cannot be applied to a named tuplestore", + LCS_asString(lc->strength)), parser_errposition(pstate, thisrel->location))); break; default: diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c index e268a127d1..27dd49d301 100644 --- a/src/backend/parser/parse_clause.c +++ b/src/backend/parser/parse_clause.c @@ -60,7 +60,7 @@ static Node *transformJoinUsingClause(ParseState *pstate, static Node *transformJoinOnClause(ParseState *pstate, JoinExpr *j, List *namespace); static RangeTblEntry *getRTEForSpecialRelationTypes(ParseState *pstate, - RangeVar *rv); + RangeVar *rv); static RangeTblEntry *transformTableEntry(ParseState *pstate, RangeVar *r); static RangeTblEntry *transformCTEReference(ParseState *pstate, RangeVar *r, CommonTableExpr *cte, Index levelsup); @@ -70,7 +70,7 @@ static RangeTblEntry *transformRangeSubselect(ParseState *pstate, static RangeTblEntry *transformRangeFunction(ParseState *pstate, RangeFunction *r); static RangeTblEntry *transformRangeTableFunc(ParseState *pstate, - RangeTableFunc *t); + RangeTableFunc *t); static TableSampleClause *transformRangeTableSample(ParseState *pstate, RangeTableSample *rts); static Node *transformFromClauseItem(ParseState *pstate, Node *n, @@ -359,7 +359,7 @@ transformJoinUsingClause(ParseState *pstate, /* Now create the lvar = rvar join condition */ e = makeSimpleA_Expr(AEXPR_OP, "=", - (Node *) copyObject(lvar), (Node *) copyObject(rvar), + (Node *) copyObject(lvar), (Node *) copyObject(rvar), -1); /* Prepare to combine into an AND clause, if multiple join columns */ @@ -759,7 +759,7 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf) /* Transform and apply typecast to the row-generating expression ... */ Assert(rtf->rowexpr != NULL); tf->rowexpr = coerce_to_specific_type(pstate, - transformExpr(pstate, rtf->rowexpr, EXPR_KIND_FROM_FUNCTION), + transformExpr(pstate, rtf->rowexpr, EXPR_KIND_FROM_FUNCTION), TEXTOID, constructName); assign_expr_collations(pstate, tf->rowexpr); @@ -767,7 +767,7 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf) /* ... and to the document itself */ Assert(rtf->docexpr != NULL); tf->docexpr = coerce_to_specific_type(pstate, - transformExpr(pstate, rtf->docexpr, EXPR_KIND_FROM_FUNCTION), + transformExpr(pstate, rtf->docexpr, EXPR_KIND_FROM_FUNCTION), docType, constructName); assign_expr_collations(pstate, tf->docexpr); @@ -792,9 +792,8 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf) makeString(pstrdup(rawc->colname))); /* - * Determine the type and typmod for the new column. FOR - * ORDINALITY columns are INTEGER per spec; the others are - * user-specified. + * Determine the type and typmod for the new column. FOR ORDINALITY + * columns are INTEGER per spec; the others are user-specified. */ if (rawc->for_ordinality) { @@ -824,14 +823,14 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf) tf->coltypes = lappend_oid(tf->coltypes, typid); tf->coltypmods = lappend_int(tf->coltypmods, typmod); tf->colcollations = lappend_oid(tf->colcollations, - type_is_collatable(typid) ? DEFAULT_COLLATION_OID : InvalidOid); + type_is_collatable(typid) ? DEFAULT_COLLATION_OID : InvalidOid); /* Transform the PATH and DEFAULT expressions */ if (rawc->colexpr) { colexpr = coerce_to_specific_type(pstate, - transformExpr(pstate, rawc->colexpr, - EXPR_KIND_FROM_FUNCTION), + transformExpr(pstate, rawc->colexpr, + EXPR_KIND_FROM_FUNCTION), TEXTOID, constructName); assign_expr_collations(pstate, colexpr); @@ -842,8 +841,8 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf) if (rawc->coldefexpr) { coldefexpr = coerce_to_specific_type_typmod(pstate, - transformExpr(pstate, rawc->coldefexpr, - EXPR_KIND_FROM_FUNCTION), + transformExpr(pstate, rawc->coldefexpr, + EXPR_KIND_FROM_FUNCTION), typid, typmod, constructName); assign_expr_collations(pstate, coldefexpr); @@ -1050,7 +1049,6 @@ transformRangeTableSample(ParseState *pstate, RangeTableSample *rts) static RangeTblEntry * getRTEForSpecialRelationTypes(ParseState *pstate, RangeVar *rv) { - CommonTableExpr *cte; Index levelsup; RangeTblEntry *rte = NULL; diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index 4f9b1a76b0..92101c9103 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -1255,7 +1255,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a) /* ROW() op ROW() is handled specially */ cmp = make_row_comparison_op(pstate, a->name, - copyObject(((RowExpr *) lexpr)->args), + copyObject(((RowExpr *) lexpr)->args), ((RowExpr *) rexpr)->args, a->location); } diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c index 40451f3fef..e412d0f9d3 100644 --- a/src/backend/parser/parse_relation.c +++ b/src/backend/parser/parse_relation.c @@ -1164,6 +1164,7 @@ parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockmode) */ if (get_visible_ENR_metadata(pstate->p_queryEnv, relation->relname)) rel = NULL; + /* * An unqualified name might have been meant as a reference to * some not-yet-in-scope CTE. The bare "does not exist" message @@ -2002,7 +2003,7 @@ addRangeTableEntryForENR(ParseState *pstate, default: elog(ERROR, "unexpected enrtype: %d", enrmd->enrtype); - return NULL; /* for fussy compilers */ + return NULL; /* for fussy compilers */ } /* diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 882955bb1c..beb099569b 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -363,7 +363,7 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, char **snamespace_p, char **sname_p) { ListCell *option; - DefElem *nameEl = NULL; + DefElem *nameEl = NULL; Oid snamespaceid; char *snamespace; char *sname; @@ -378,12 +378,12 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, * used by pg_dump. Else, generate a name. * * Although we use ChooseRelationName, it's not guaranteed that the - * selected sequence name won't conflict; given sufficiently long - * field names, two different serial columns in the same table could - * be assigned the same sequence name, and we'd not notice since we - * aren't creating the sequence quite yet. In practice this seems - * quite unlikely to be a problem, especially since few people would - * need two serial columns in one table. + * selected sequence name won't conflict; given sufficiently long field + * names, two different serial columns in the same table could be assigned + * the same sequence name, and we'd not notice since we aren't creating + * the sequence quite yet. In practice this seems quite unlikely to be a + * problem, especially since few people would need two serial columns in + * one table. */ foreach(option, seqoptions) @@ -402,7 +402,8 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, if (nameEl) { - RangeVar *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg)); + RangeVar *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg)); + snamespace = rv->schemaname; sname = rv->relname; seqoptions = list_delete_ptr(seqoptions, nameEl); @@ -429,14 +430,14 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, cxt->relation->relname, column->colname))); /* - * Build a CREATE SEQUENCE command to create the sequence object, and - * add it to the list of things to be done before this CREATE/ALTER - * TABLE. + * Build a CREATE SEQUENCE command to create the sequence object, and add + * it to the list of things to be done before this CREATE/ALTER TABLE. */ seqstmt = makeNode(CreateSeqStmt); seqstmt->for_identity = for_identity; seqstmt->sequence = makeRangeVar(snamespace, sname, -1); seqstmt->options = seqoptions; + /* * If a sequence data type was specified, add it to the options. Prepend * to the list rather than append; in case a user supplied their own AS @@ -448,11 +449,11 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, seqstmt->options); /* - * If this is ALTER ADD COLUMN, make sure the sequence will be owned - * by the table's owner. The current user might be someone else - * (perhaps a superuser, or someone who's only a member of the owning - * role), but the SEQUENCE OWNED BY mechanisms will bleat unless table - * and sequence have exactly the same owning role. + * If this is ALTER ADD COLUMN, make sure the sequence will be owned by + * the table's owner. The current user might be someone else (perhaps a + * superuser, or someone who's only a member of the owning role), but the + * SEQUENCE OWNED BY mechanisms will bleat unless table and sequence have + * exactly the same owning role. */ if (cxt->rel) seqstmt->ownerId = cxt->rel->rd_rel->relowner; @@ -462,9 +463,9 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, cxt->blist = lappend(cxt->blist, seqstmt); /* - * Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence - * as owned by this column, and add it to the list of things to be - * done after this CREATE/ALTER TABLE. + * Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence as + * owned by this column, and add it to the list of things to be done after + * this CREATE/ALTER TABLE. */ altseqstmt = makeNode(AlterSeqStmt); altseqstmt->sequence = makeRangeVar(snamespace, sname, -1); @@ -647,31 +648,31 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column) break; case CONSTR_IDENTITY: - { - Type ctype; - Oid typeOid; + { + Type ctype; + Oid typeOid; - ctype = typenameType(cxt->pstate, column->typeName, NULL); - typeOid = HeapTupleGetOid(ctype); - ReleaseSysCache(ctype); + ctype = typenameType(cxt->pstate, column->typeName, NULL); + typeOid = HeapTupleGetOid(ctype); + ReleaseSysCache(ctype); - if (saw_identity) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("multiple identity specifications for column \"%s\" of table \"%s\"", + if (saw_identity) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("multiple identity specifications for column \"%s\" of table \"%s\"", column->colname, cxt->relation->relname), - parser_errposition(cxt->pstate, - constraint->location))); + parser_errposition(cxt->pstate, + constraint->location))); - generateSerialExtraStmts(cxt, column, - typeOid, constraint->options, true, - NULL, NULL); + generateSerialExtraStmts(cxt, column, + typeOid, constraint->options, true, + NULL, NULL); - column->identity = constraint->generated_when; - saw_identity = true; - column->is_not_null = TRUE; - break; - } + column->identity = constraint->generated_when; + saw_identity = true; + column->is_not_null = TRUE; + break; + } case CONSTR_CHECK: cxt->ckconstraints = lappend(cxt->ckconstraints, constraint); @@ -1036,7 +1037,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla if (attribute->attidentity && (table_like_clause->options & CREATE_TABLE_LIKE_IDENTITY)) { - Oid seq_relid; + Oid seq_relid; List *seq_options; /* @@ -1067,7 +1068,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla stmt->objtype = OBJECT_COLUMN; stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname), - makeString(cxt->relation->relname), + makeString(cxt->relation->relname), makeString(def->colname)); stmt->comment = comment; @@ -1132,7 +1133,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla stmt->objtype = OBJECT_TABCONSTRAINT; stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname), - makeString(cxt->relation->relname), + makeString(cxt->relation->relname), makeString(n->conname)); stmt->comment = comment; @@ -2766,7 +2767,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, * change the data type of the sequence. */ attnum = get_attnum(relid, cmd->name); - /* if attribute not found, something will error about it later */ + + /* + * if attribute not found, something will error about it + * later + */ if (attnum != InvalidAttrNumber && get_attidentity(relid, attnum)) { Oid seq_relid = getOwnedSequence(relid, attnum); @@ -2774,7 +2779,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, AlterSeqStmt *altseqstmt = makeNode(AlterSeqStmt); altseqstmt->sequence = makeRangeVar(get_namespace_name(get_rel_namespace(seq_relid)), - get_rel_name(seq_relid), + get_rel_name(seq_relid), -1); altseqstmt->options = list_make1(makeDefElem("as", (Node *) makeTypeNameFromOid(typeOid, -1), -1)); altseqstmt->for_identity = true; @@ -2787,8 +2792,8 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, case AT_AddIdentity: { - Constraint *def = castNode(Constraint, cmd->def); - ColumnDef *newdef = makeNode(ColumnDef); + Constraint *def = castNode(Constraint, cmd->def); + ColumnDef *newdef = makeNode(ColumnDef); AttrNumber attnum; newdef->colname = cmd->name; @@ -2796,7 +2801,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, cmd->def = (Node *) newdef; attnum = get_attnum(relid, cmd->name); - /* if attribute not found, something will error about it later */ + + /* + * if attribute not found, something will error about it + * later + */ if (attnum != InvalidAttrNumber) generateSerialExtraStmts(&cxt, newdef, get_atttype(relid, attnum), @@ -2825,7 +2834,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, */ foreach(lc, castNode(List, cmd->def)) { - DefElem *def = lfirst_node(DefElem, lc); + DefElem *def = lfirst_node(DefElem, lc); if (strcmp(def->defname, "generated") == 0) newdef = lappend(newdef, def); @@ -2846,7 +2855,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, seqstmt = makeNode(AlterSeqStmt); seq_relid = linitial_oid(seqlist); seqstmt->sequence = makeRangeVar(get_namespace_name(get_rel_namespace(seq_relid)), - get_rel_name(seq_relid), -1); + get_rel_name(seq_relid), -1); seqstmt->options = newseqopts; seqstmt->for_identity = true; seqstmt->missing_ok = false; @@ -2854,8 +2863,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, cxt.alist = lappend(cxt.alist, seqstmt); } } - /* If column was not found or was not an identity column, we - * just let the ALTER TABLE command error out later. */ + + /* + * If column was not found or was not an identity column, + * we just let the ALTER TABLE command error out later. + */ cmd->def = (Node *) newdef; newcmds = lappend(newcmds, cmd); @@ -3392,8 +3404,8 @@ transformPartitionBound(ParseState *pstate, Relation parent, Node *bound) else if (seen_unbounded) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("cannot specify finite value after UNBOUNDED"), - parser_errposition(pstate, exprLocation((Node *) ldatum)))); + errmsg("cannot specify finite value after UNBOUNDED"), + parser_errposition(pstate, exprLocation((Node *) ldatum)))); } seen_unbounded = false; foreach(cell1, spec->upperdatums) @@ -3406,8 +3418,8 @@ transformPartitionBound(ParseState *pstate, Relation parent, Node *bound) else if (seen_unbounded) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("cannot specify finite value after UNBOUNDED"), - parser_errposition(pstate, exprLocation((Node *) rdatum)))); + errmsg("cannot specify finite value after UNBOUNDED"), + parser_errposition(pstate, exprLocation((Node *) rdatum)))); } i = j = 0; diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c index 6e9e03a571..f251ac6788 100644 --- a/src/backend/port/posix_sema.c +++ b/src/backend/port/posix_sema.c @@ -125,7 +125,7 @@ PosixSemaphoreCreate(void) * Attempt to create a new unnamed semaphore. */ static void -PosixSemaphoreCreate(sem_t * sem) +PosixSemaphoreCreate(sem_t *sem) { if (sem_init(sem, 1, 1) < 0) elog(FATAL, "sem_init failed: %m"); @@ -137,7 +137,7 @@ PosixSemaphoreCreate(sem_t * sem) * PosixSemaphoreKill - removes a semaphore */ static void -PosixSemaphoreKill(sem_t * sem) +PosixSemaphoreKill(sem_t *sem) { #ifdef USE_NAMED_POSIX_SEMAPHORES /* Got to use sem_close for named semaphores */ diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c index f1194891f5..c3454276bf 100644 --- a/src/backend/postmaster/bgworker.c +++ b/src/backend/postmaster/bgworker.c @@ -458,7 +458,7 @@ ReportBackgroundWorkerExit(slist_mutable_iter *cur) { RegisteredBgWorker *rw; BackgroundWorkerSlot *slot; - int notify_pid; + int notify_pid; rw = slist_container(RegisteredBgWorker, rw_lnode, cur->cur); diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c index 48efe15e82..2674bb49ba 100644 --- a/src/backend/postmaster/bgwriter.c +++ b/src/backend/postmaster/bgwriter.c @@ -310,8 +310,8 @@ BackgroundWriterMain(void) * check whether there has been any WAL inserted since the last time * we've logged a running xacts. * - * We do this logging in the bgwriter as it is the only process that is - * run regularly and returns to its mainloop all the time. E.g. + * We do this logging in the bgwriter as it is the only process that + * is run regularly and returns to its mainloop all the time. E.g. * Checkpointer, when active, is barely ever in its mainloop and thus * makes it hard to log regularly. */ @@ -350,7 +350,7 @@ BackgroundWriterMain(void) */ rc = WaitLatch(MyLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - BgWriterDelay /* ms */, WAIT_EVENT_BGWRITER_MAIN); + BgWriterDelay /* ms */ , WAIT_EVENT_BGWRITER_MAIN); /* * If no latch event and BgBufferSync says nothing's happening, extend diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index a8dc355ead..a55071900d 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -558,7 +558,7 @@ CheckpointerMain(void) rc = WaitLatch(MyLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - cur_timeout * 1000L /* convert to ms */, + cur_timeout * 1000L /* convert to ms */ , WAIT_EVENT_CHECKPOINTER_MAIN); /* diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index ba0ad3eb03..f453dade6c 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -181,8 +181,8 @@ static TabStatusArray *pgStatTabList = NULL; */ typedef struct TabStatHashEntry { - Oid t_id; - PgStat_TableStatus* tsa_entry; + Oid t_id; + PgStat_TableStatus *tsa_entry; } TabStatHashEntry; /* @@ -1748,17 +1748,17 @@ pgstat_initstats(Relation rel) static PgStat_TableStatus * get_tabstat_entry(Oid rel_id, bool isshared) { - TabStatHashEntry* hash_entry; + TabStatHashEntry *hash_entry; PgStat_TableStatus *entry; TabStatusArray *tsa; - bool found; + bool found; /* * Create hash table if we don't have it already. */ if (pgStatTabHash == NULL) { - HASHCTL ctl; + HASHCTL ctl; memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); @@ -1837,14 +1837,14 @@ get_tabstat_entry(Oid rel_id, bool isshared) PgStat_TableStatus * find_tabstat_entry(Oid rel_id) { - TabStatHashEntry* hash_entry; + TabStatHashEntry *hash_entry; /* If hashtable doesn't exist, there are no entries at all */ - if(!pgStatTabHash) + if (!pgStatTabHash) return NULL; hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_FIND, NULL); - if(!hash_entry) + if (!hash_entry) return NULL; /* Note that this step could also return NULL, but that's correct */ @@ -2872,7 +2872,7 @@ pgstat_bestart(void) break; default: elog(FATAL, "unrecognized process type: %d", - (int) MyAuxProcType); + (int) MyAuxProcType); proc_exit(1); } } @@ -2891,8 +2891,8 @@ pgstat_bestart(void) /* We have userid for client-backends, wal-sender and bgworker processes */ if (beentry->st_backendType == B_BACKEND - || beentry->st_backendType == B_WAL_SENDER - || beentry->st_backendType == B_BG_WORKER) + || beentry->st_backendType == B_WAL_SENDER + || beentry->st_backendType == B_BG_WORKER) beentry->st_userid = GetSessionUserId(); else beentry->st_userid = InvalidOid; @@ -3409,14 +3409,14 @@ pgstat_get_wait_event(uint32 wait_event_info) break; case PG_WAIT_ACTIVITY: { - WaitEventActivity w = (WaitEventActivity) wait_event_info; + WaitEventActivity w = (WaitEventActivity) wait_event_info; event_name = pgstat_get_wait_activity(w); break; } case PG_WAIT_CLIENT: { - WaitEventClient w = (WaitEventClient) wait_event_info; + WaitEventClient w = (WaitEventClient) wait_event_info; event_name = pgstat_get_wait_client(w); break; @@ -3426,14 +3426,14 @@ pgstat_get_wait_event(uint32 wait_event_info) break; case PG_WAIT_IPC: { - WaitEventIPC w = (WaitEventIPC) wait_event_info; + WaitEventIPC w = (WaitEventIPC) wait_event_info; event_name = pgstat_get_wait_ipc(w); break; } case PG_WAIT_TIMEOUT: { - WaitEventTimeout w = (WaitEventTimeout) wait_event_info; + WaitEventTimeout w = (WaitEventTimeout) wait_event_info; event_name = pgstat_get_wait_timeout(w); break; @@ -3508,7 +3508,7 @@ pgstat_get_wait_activity(WaitEventActivity w) case WAIT_EVENT_LOGICAL_APPLY_MAIN: event_name = "LogicalApplyMain"; break; - /* no default case, so that compiler will warn */ + /* no default case, so that compiler will warn */ } return event_name; @@ -3548,7 +3548,7 @@ pgstat_get_wait_client(WaitEventClient w) case WAIT_EVENT_WAL_SENDER_WRITE_DATA: event_name = "WalSenderWriteData"; break; - /* no default case, so that compiler will warn */ + /* no default case, so that compiler will warn */ } return event_name; @@ -3612,7 +3612,7 @@ pgstat_get_wait_ipc(WaitEventIPC w) case WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE: event_name = "LogicalSyncStateChange"; break; - /* no default case, so that compiler will warn */ + /* no default case, so that compiler will warn */ } return event_name; @@ -3640,7 +3640,7 @@ pgstat_get_wait_timeout(WaitEventTimeout w) case WAIT_EVENT_RECOVERY_APPLY_DELAY: event_name = "RecoveryApplyDelay"; break; - /* no default case, so that compiler will warn */ + /* no default case, so that compiler will warn */ } return event_name; @@ -4061,6 +4061,7 @@ pgstat_get_backend_desc(BackendType backendType) return backendDesc; } + /* ------------------------------------------------------------ * Local support functions follow * ------------------------------------------------------------ @@ -4405,7 +4406,7 @@ PgstatCollectorMain(int argc, char *argv[]) wr = WaitLatchOrSocket(MyLatch, WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | WL_TIMEOUT, pgStatSock, - 2 * 1000L /* msec */, + 2 * 1000L /* msec */ , WAIT_EVENT_PGSTAT_MAIN); #endif diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index fdce5524f4..35b4ec88d3 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -1251,7 +1251,7 @@ PostmasterMain(int argc, char *argv[]) ereport(LOG, (errcode_for_file_access(), errmsg("could not remove file \"%s\": %m", - LOG_METAINFO_DATAFILE))); + LOG_METAINFO_DATAFILE))); /* * If enabled, start up syslogger collection subprocess @@ -3071,7 +3071,7 @@ CleanupBackgroundWorker(int pid, int exitstatus) /* child's exit status */ { char namebuf[MAXPGPATH]; - slist_mutable_iter iter; + slist_mutable_iter iter; slist_foreach_modify(iter, &BackgroundWorkerList) { @@ -3147,7 +3147,7 @@ CleanupBackgroundWorker(int pid, rw->rw_backend = NULL; rw->rw_pid = 0; rw->rw_child_slot = 0; - ReportBackgroundWorkerExit(&iter); /* report child death */ + ReportBackgroundWorkerExit(&iter); /* report child death */ LogChildExit(EXIT_STATUS_0(exitstatus) ? DEBUG1 : LOG, namebuf, pid, exitstatus); @@ -5149,11 +5149,12 @@ RandomCancelKey(int32 *cancel_key) #ifdef HAVE_STRONG_RANDOM return pg_strong_random((char *) cancel_key, sizeof(int32)); #else + /* * If built with --disable-strong-random, use plain old erand48. * - * We cannot use pg_backend_random() in postmaster, because it stores - * its state in shared memory. + * We cannot use pg_backend_random() in postmaster, because it stores its + * state in shared memory. */ static unsigned short seed[3]; @@ -5348,10 +5349,10 @@ StartAutovacuumWorker(void) if (canAcceptConnections() == CAC_OK) { /* - * Compute the cancel key that will be assigned to this session. - * We probably don't need cancel keys for autovac workers, but - * we'd better have something random in the field to prevent - * unfriendly people from sending cancels to them. + * Compute the cancel key that will be assigned to this session. We + * probably don't need cancel keys for autovac workers, but we'd + * better have something random in the field to prevent unfriendly + * people from sending cancels to them. */ if (!RandomCancelKey(&MyCancelKey)) { diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c index aaefdaebad..9f5ca5cac0 100644 --- a/src/backend/postmaster/syslogger.c +++ b/src/backend/postmaster/syslogger.c @@ -1360,7 +1360,7 @@ set_next_rotation_time(void) static void update_metainfo_datafile(void) { - FILE *fh; + FILE *fh; if (!(Log_destination & LOG_DESTINATION_STDERR) && !(Log_destination & LOG_DESTINATION_CSVLOG)) @@ -1369,7 +1369,7 @@ update_metainfo_datafile(void) ereport(LOG, (errcode_for_file_access(), errmsg("could not remove file \"%s\": %m", - LOG_METAINFO_DATAFILE))); + LOG_METAINFO_DATAFILE))); return; } @@ -1378,7 +1378,7 @@ update_metainfo_datafile(void) ereport(LOG, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", - LOG_METAINFO_DATAFILE_TMP))); + LOG_METAINFO_DATAFILE_TMP))); return; } @@ -1388,7 +1388,7 @@ update_metainfo_datafile(void) { ereport(LOG, (errcode_for_file_access(), - errmsg("could not write file \"%s\": %m", + errmsg("could not write file \"%s\": %m", LOG_METAINFO_DATAFILE_TMP))); fclose(fh); return; @@ -1401,7 +1401,7 @@ update_metainfo_datafile(void) { ereport(LOG, (errcode_for_file_access(), - errmsg("could not write file \"%s\": %m", + errmsg("could not write file \"%s\": %m", LOG_METAINFO_DATAFILE_TMP))); fclose(fh); return; @@ -1412,8 +1412,8 @@ update_metainfo_datafile(void) if (rename(LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE) != 0) ereport(LOG, (errcode_for_file_access(), - errmsg("could not rename file \"%s\" to \"%s\": %m", - LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE))); + errmsg("could not rename file \"%s\" to \"%s\": %m", + LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE))); } /* -------------------------------- diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c index 3ee0dd5aa4..cb5f58b6ba 100644 --- a/src/backend/replication/basebackup.c +++ b/src/backend/replication/basebackup.c @@ -58,8 +58,8 @@ static bool sendFile(char *readfilename, char *tarfilename, static void sendFileWithContent(const char *filename, const char *content); static int64 _tarWriteHeader(const char *filename, const char *linktarget, struct stat * statbuf, bool sizeonly); -static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf, - bool sizeonly); +static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat * statbuf, + bool sizeonly); static void send_int8_string(StringInfoData *buf, int64 intval); static void SendBackupHeader(List *tablespaces); static void base_backup_cleanup(int code, Datum arg); @@ -106,15 +106,15 @@ static const char *excludeDirContents[] = { /* * Skip temporary statistics files. PG_STAT_TMP_DIR must be skipped even - * when stats_temp_directory is set because PGSS_TEXT_FILE is always created - * there. + * when stats_temp_directory is set because PGSS_TEXT_FILE is always + * created there. */ PG_STAT_TMP_DIR, /* - * It is generally not useful to backup the contents of this directory even - * if the intention is to restore to another master. See backup.sgml for a - * more detailed description. + * It is generally not useful to backup the contents of this directory + * even if the intention is to restore to another master. See backup.sgml + * for a more detailed description. */ "pg_replslot", @@ -365,7 +365,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) dir = AllocateDir("pg_wal"); if (!dir) ereport(ERROR, - (errmsg("could not open directory \"%s\": %m", "pg_wal"))); + (errmsg("could not open directory \"%s\": %m", "pg_wal"))); while ((de = ReadDir(dir, "pg_wal")) != NULL) { /* Does it look like a WAL segment, and is it in the range? */ @@ -404,8 +404,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) qsort(walFiles, nWalFiles, sizeof(char *), compareWalFileNames); /* - * There must be at least one xlog file in the pg_wal directory, - * since we are doing backup-including-xlog. + * There must be at least one xlog file in the pg_wal directory, since + * we are doing backup-including-xlog. */ if (nWalFiles < 1) ereport(ERROR, @@ -1036,7 +1036,7 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces, if (strcmp(de->d_name, excludeDirContents[excludeIdx]) == 0) { elog(DEBUG1, "contents of directory \"%s\" excluded from backup", de->d_name); - size += _tarWriteDir(pathbuf, basepathlen, &statbuf, sizeonly); + size += _tarWriteDir(pathbuf, basepathlen, &statbuf, sizeonly); excludeFound = true; break; } @@ -1281,7 +1281,7 @@ _tarWriteHeader(const char *filename, const char *linktarget, if (!sizeonly) { rc = tarCreateHeader(h, filename, linktarget, statbuf->st_size, - statbuf->st_mode, statbuf->st_uid, statbuf->st_gid, + statbuf->st_mode, statbuf->st_uid, statbuf->st_gid, statbuf->st_mtime); switch (rc) @@ -1295,9 +1295,9 @@ _tarWriteHeader(const char *filename, const char *linktarget, break; case TAR_SYMLINK_TOO_LONG: ereport(ERROR, - (errmsg("symbolic link target too long for tar format: " - "file name \"%s\", target \"%s\"", - filename, linktarget))); + (errmsg("symbolic link target too long for tar format: " + "file name \"%s\", target \"%s\"", + filename, linktarget))); break; default: elog(ERROR, "unrecognized tar error: %d", rc); @@ -1314,7 +1314,7 @@ _tarWriteHeader(const char *filename, const char *linktarget, * write it as a directory anyway. */ static int64 -_tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf, +_tarWriteDir(const char *pathbuf, int basepathlen, struct stat * statbuf, bool sizeonly) { /* If symlink, write it as a directory anyway */ diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c index 9d7bb25d39..ebe9c91e98 100644 --- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c +++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c @@ -40,42 +40,42 @@ void _PG_init(void); struct WalReceiverConn { /* Current connection to the primary, if any */ - PGconn *streamConn; + PGconn *streamConn; /* Used to remember if the connection is logical or physical */ - bool logical; + bool logical; /* Buffer for currently read records */ - char *recvBuf; + char *recvBuf; }; /* Prototypes for interface functions */ static WalReceiverConn *libpqrcv_connect(const char *conninfo, - bool logical, const char *appname, - char **err); + bool logical, const char *appname, + char **err); static void libpqrcv_check_conninfo(const char *conninfo); static char *libpqrcv_get_conninfo(WalReceiverConn *conn); static char *libpqrcv_identify_system(WalReceiverConn *conn, - TimeLineID *primary_tli, - int *server_version); + TimeLineID *primary_tli, + int *server_version); static void libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn, TimeLineID tli, char **filename, char **content, int *len); static bool libpqrcv_startstreaming(WalReceiverConn *conn, - const WalRcvStreamOptions *options); + const WalRcvStreamOptions *options); static void libpqrcv_endstreaming(WalReceiverConn *conn, - TimeLineID *next_tli); -static int libpqrcv_receive(WalReceiverConn *conn, char **buffer, - pgsocket *wait_fd); + TimeLineID *next_tli); +static int libpqrcv_receive(WalReceiverConn *conn, char **buffer, + pgsocket *wait_fd); static void libpqrcv_send(WalReceiverConn *conn, const char *buffer, - int nbytes); + int nbytes); static char *libpqrcv_create_slot(WalReceiverConn *conn, - const char *slotname, - bool temporary, - CRSSnapshotAction snapshot_action, - XLogRecPtr *lsn); + const char *slotname, + bool temporary, + CRSSnapshotAction snapshot_action, + XLogRecPtr *lsn); static WalRcvExecResult *libpqrcv_exec(WalReceiverConn *conn, - const char *query, - const int nRetTypes, - const Oid *retTypes); + const char *query, + const int nRetTypes, + const Oid *retTypes); static void libpqrcv_disconnect(WalReceiverConn *conn); static WalReceiverFunctionsType PQWalReceiverFunctions = { @@ -153,7 +153,7 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname, conn = palloc0(sizeof(WalReceiverConn)); conn->streamConn = PQconnectStartParams(keys, vals, - /* expand_dbname = */ true); + /* expand_dbname = */ true); if (PQstatus(conn->streamConn) == CONNECTION_BAD) { *err = pchomp(PQerrorMessage(conn->streamConn)); @@ -216,8 +216,8 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname, static void libpqrcv_check_conninfo(const char *conninfo) { - PQconninfoOption *opts = NULL; - char *err = NULL; + PQconninfoOption *opts = NULL; + char *err = NULL; opts = PQconninfoParse(conninfo, &err); if (opts == NULL) @@ -362,9 +362,9 @@ libpqrcv_startstreaming(WalReceiverConn *conn, */ if (options->logical) { - char *pubnames_str; - List *pubnames; - char *pubnames_literal; + char *pubnames_str; + List *pubnames; + char *pubnames_literal; appendStringInfoString(&cmd, " ("); @@ -435,8 +435,8 @@ libpqrcv_endstreaming(WalReceiverConn *conn, TimeLineID *next_tli) * next timeline's ID, or just CommandComplete if the server was shut * down. * - * If we had not yet received CopyDone from the backend, PGRES_COPY_OUT - * is also possible in case we aborted the copy in mid-stream. + * If we had not yet received CopyDone from the backend, PGRES_COPY_OUT is + * also possible in case we aborted the copy in mid-stream. */ res = PQgetResult(conn->streamConn); if (PQresultStatus(res) == PGRES_TUPLES_OK) @@ -545,9 +545,9 @@ libpqrcv_PQexec(PGconn *streamConn, const char *query) /* * PQexec() silently discards any prior query results on the connection. - * This is not required for this function as it's expected that the - * caller (which is this library in all cases) will behave correctly and - * we don't have to be backwards compatible with old libpq. + * This is not required for this function as it's expected that the caller + * (which is this library in all cases) will behave correctly and we don't + * have to be backwards compatible with old libpq. */ /* @@ -737,9 +737,9 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname, bool temporary, CRSSnapshotAction snapshot_action, XLogRecPtr *lsn) { - PGresult *res; - StringInfoData cmd; - char *snapshot; + PGresult *res; + StringInfoData cmd; + char *snapshot; initStringInfo(&cmd); @@ -777,7 +777,7 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname, } *lsn = DatumGetLSN(DirectFunctionCall1Coll(pg_lsn_in, InvalidOid, - CStringGetDatum(PQgetvalue(res, 0, 1)))); + CStringGetDatum(PQgetvalue(res, 0, 1)))); if (!PQgetisnull(res, 0, 2)) snapshot = pstrdup(PQgetvalue(res, 0, 2)); else @@ -793,15 +793,15 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname, */ static void libpqrcv_processTuples(PGresult *pgres, WalRcvExecResult *walres, - const int nRetTypes, const Oid *retTypes) + const int nRetTypes, const Oid *retTypes) { - int tupn; - int coln; - int nfields = PQnfields(pgres); - HeapTuple tuple; - AttInMetadata *attinmeta; - MemoryContext rowcontext; - MemoryContext oldcontext; + int tupn; + int coln; + int nfields = PQnfields(pgres); + HeapTuple tuple; + AttInMetadata *attinmeta; + MemoryContext rowcontext; + MemoryContext oldcontext; /* Make sure we got expected number of fields. */ if (nfields != nRetTypes) @@ -832,7 +832,7 @@ libpqrcv_processTuples(PGresult *pgres, WalRcvExecResult *walres, /* Process returned rows. */ for (tupn = 0; tupn < PQntuples(pgres); tupn++) { - char *cstrs[MaxTupleAttributeNumber]; + char *cstrs[MaxTupleAttributeNumber]; CHECK_FOR_INTERRUPTS(); @@ -877,7 +877,7 @@ libpqrcv_exec(WalReceiverConn *conn, const char *query, if (MyDatabaseId == InvalidOid) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("the query interface requires a database connection"))); + errmsg("the query interface requires a database connection"))); pgres = libpqrcv_PQexec(conn->streamConn, query); @@ -905,7 +905,7 @@ libpqrcv_exec(WalReceiverConn *conn, const char *query, walres->status = WALRCV_OK_COMMAND; break; - /* Empty query is considered error. */ + /* Empty query is considered error. */ case PGRES_EMPTY_QUERY: walres->status = WALRCV_ERROR; walres->err = _("empty query"); @@ -935,16 +935,16 @@ libpqrcv_exec(WalReceiverConn *conn, const char *query, static char * stringlist_to_identifierstr(PGconn *conn, List *strings) { - ListCell *lc; + ListCell *lc; StringInfoData res; - bool first = true; + bool first = true; initStringInfo(&res); - foreach (lc, strings) + foreach(lc, strings) { - char *val = strVal(lfirst(lc)); - char *val_escaped; + char *val = strVal(lfirst(lc)); + char *val_escaped; if (first) first = false; diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index 09c87d7c53..4e2c350dc7 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -57,8 +57,8 @@ /* max sleep time between cycles (3min) */ #define DEFAULT_NAPTIME_PER_CYCLE 180000L -int max_logical_replication_workers = 4; -int max_sync_workers_per_subscription = 2; +int max_logical_replication_workers = 4; +int max_sync_workers_per_subscription = 2; LogicalRepWorker *MyLogicalRepWorker = NULL; @@ -68,7 +68,7 @@ typedef struct LogicalRepCtxStruct pid_t launcher_pid; /* Background workers. */ - LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]; + LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER]; } LogicalRepCtxStruct; LogicalRepCtxStruct *LogicalRepCtx; @@ -83,9 +83,9 @@ static void logicalrep_worker_cleanup(LogicalRepWorker *worker); volatile sig_atomic_t got_SIGHUP = false; volatile sig_atomic_t got_SIGTERM = false; -static bool on_commit_launcher_wakeup = false; +static bool on_commit_launcher_wakeup = false; -Datum pg_stat_get_subscription(PG_FUNCTION_ARGS); +Datum pg_stat_get_subscription(PG_FUNCTION_ARGS); /* @@ -122,8 +122,8 @@ get_subscription_list(void) while (HeapTupleIsValid(tup = heap_getnext(scan, ForwardScanDirection))) { Form_pg_subscription subform = (Form_pg_subscription) GETSTRUCT(tup); - Subscription *sub; - MemoryContext oldcxt; + Subscription *sub; + MemoryContext oldcxt; /* * Allocate our results in the caller's context, not the @@ -224,15 +224,16 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker, LogicalRepWorker * logicalrep_worker_find(Oid subid, Oid relid, bool only_running) { - int i; - LogicalRepWorker *res = NULL; + int i; + LogicalRepWorker *res = NULL; Assert(LWLockHeldByMe(LogicalRepWorkerLock)); /* Search for attached worker for a given subscription id. */ for (i = 0; i < max_logical_replication_workers; i++) { - LogicalRepWorker *w = &LogicalRepCtx->workers[i]; + LogicalRepWorker *w = &LogicalRepCtx->workers[i]; + if (w->in_use && w->subid == subid && w->relid == relid && (!only_running || w->proc)) { @@ -251,17 +252,17 @@ void logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid, Oid relid) { - BackgroundWorker bgw; + BackgroundWorker bgw; BackgroundWorkerHandle *bgw_handle; - int i; - int slot = 0; - LogicalRepWorker *worker = NULL; - int nsyncworkers; - TimestampTz now; + int i; + int slot = 0; + LogicalRepWorker *worker = NULL; + int nsyncworkers; + TimestampTz now; ereport(LOG, - (errmsg("starting logical replication worker for subscription \"%s\"", - subname))); + (errmsg("starting logical replication worker for subscription \"%s\"", + subname))); /* Report this after the initial starting message for consistency. */ if (max_replication_slots == 0) @@ -300,7 +301,7 @@ retry: */ if (worker == NULL || nsyncworkers >= max_sync_workers_per_subscription) { - bool did_cleanup = false; + bool did_cleanup = false; for (i = 0; i < max_logical_replication_workers; i++) { @@ -373,7 +374,7 @@ retry: /* Register the new dynamic worker. */ memset(&bgw, 0, sizeof(bgw)); - bgw.bgw_flags = BGWORKER_SHMEM_ACCESS | + bgw.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; bgw.bgw_start_time = BgWorkerStart_RecoveryFinished; snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres"); @@ -394,7 +395,7 @@ retry: ereport(WARNING, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), errmsg("out of background worker slots"), - errhint("You might need to increase max_worker_processes."))); + errhint("You might need to increase max_worker_processes."))); return; } @@ -410,7 +411,7 @@ void logicalrep_worker_stop(Oid subid, Oid relid) { LogicalRepWorker *worker; - uint16 generation; + uint16 generation; LWLockAcquire(LogicalRepWorkerLock, LW_SHARED); @@ -435,7 +436,7 @@ logicalrep_worker_stop(Oid subid, Oid relid) */ while (worker->in_use && !worker->proc) { - int rc; + int rc; LWLockRelease(LogicalRepWorkerLock); @@ -478,7 +479,7 @@ logicalrep_worker_stop(Oid subid, Oid relid) /* ... and wait for it to die. */ for (;;) { - int rc; + int rc; LWLockAcquire(LogicalRepWorkerLock, LW_SHARED); if (!worker->proc || worker->generation != generation) @@ -509,7 +510,7 @@ logicalrep_worker_stop(Oid subid, Oid relid) void logicalrep_worker_wakeup(Oid subid, Oid relid) { - LogicalRepWorker *worker; + LogicalRepWorker *worker; LWLockAcquire(LogicalRepWorkerLock, LW_SHARED); worker = logicalrep_worker_find(subid, relid, true); @@ -544,18 +545,18 @@ logicalrep_worker_attach(int slot) { LWLockRelease(LogicalRepWorkerLock); ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("logical replication worker slot %d is empty, cannot attach", - slot))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("logical replication worker slot %d is empty, cannot attach", + slot))); } if (MyLogicalRepWorker->proc) { LWLockRelease(LogicalRepWorkerLock); ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("logical replication worker slot %d is already used by " - "another worker, cannot attach", slot))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("logical replication worker slot %d is already used by " + "another worker, cannot attach", slot))); } MyLogicalRepWorker->proc = MyProc; @@ -620,7 +621,7 @@ logicalrep_worker_onexit(int code, Datum arg) void logicalrep_worker_sigterm(SIGNAL_ARGS) { - int save_errno = errno; + int save_errno = errno; got_SIGTERM = true; @@ -634,7 +635,7 @@ logicalrep_worker_sigterm(SIGNAL_ARGS) void logicalrep_worker_sighup(SIGNAL_ARGS) { - int save_errno = errno; + int save_errno = errno; got_SIGHUP = true; @@ -651,15 +652,16 @@ logicalrep_worker_sighup(SIGNAL_ARGS) int logicalrep_sync_worker_count(Oid subid) { - int i; - int res = 0; + int i; + int res = 0; Assert(LWLockHeldByMe(LogicalRepWorkerLock)); /* Search for attached worker for a given subscription id. */ for (i = 0; i < max_logical_replication_workers; i++) { - LogicalRepWorker *w = &LogicalRepCtx->workers[i]; + LogicalRepWorker *w = &LogicalRepCtx->workers[i]; + if (w->subid == subid && OidIsValid(w->relid)) res++; } @@ -699,7 +701,7 @@ ApplyLauncherRegister(void) return; memset(&bgw, 0, sizeof(bgw)); - bgw.bgw_flags = BGWORKER_SHMEM_ACCESS | + bgw.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; bgw.bgw_start_time = BgWorkerStart_RecoveryFinished; snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres"); @@ -729,7 +731,7 @@ ApplyLauncherShmemInit(void) if (!found) { - int slot; + int slot; memset(LogicalRepCtx, 0, ApplyLauncherShmemSize()); @@ -783,7 +785,7 @@ ApplyLauncherWakeup(void) void ApplyLauncherMain(Datum main_arg) { - TimestampTz last_start_time = 0; + TimestampTz last_start_time = 0; ereport(DEBUG1, (errmsg("logical replication launcher started"))); @@ -813,10 +815,10 @@ ApplyLauncherMain(Datum main_arg) int rc; List *sublist; ListCell *lc; - MemoryContext subctx; - MemoryContext oldctx; - TimestampTz now; - long wait_time = DEFAULT_NAPTIME_PER_CYCLE; + MemoryContext subctx; + MemoryContext oldctx; + TimestampTz now; + long wait_time = DEFAULT_NAPTIME_PER_CYCLE; now = GetCurrentTimestamp(); @@ -826,7 +828,7 @@ ApplyLauncherMain(Datum main_arg) { /* Use temporary context for the database list and worker info. */ subctx = AllocSetContextCreate(TopMemoryContext, - "Logical Replication Launcher sublist", + "Logical Replication Launcher sublist", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); @@ -838,8 +840,8 @@ ApplyLauncherMain(Datum main_arg) /* Start the missing workers for enabled subscriptions. */ foreach(lc, sublist) { - Subscription *sub = (Subscription *) lfirst(lc); - LogicalRepWorker *w; + Subscription *sub = (Subscription *) lfirst(lc); + LogicalRepWorker *w; LWLockAcquire(LogicalRepWorkerLock, LW_SHARED); w = logicalrep_worker_find(sub->oid, InvalidOid, false); @@ -864,9 +866,9 @@ ApplyLauncherMain(Datum main_arg) { /* * The wait in previous cycle was interrupted in less than - * wal_retrieve_retry_interval since last worker was started, - * this usually means crash of the worker, so we should retry - * in wal_retrieve_retry_interval again. + * wal_retrieve_retry_interval since last worker was started, this + * usually means crash of the worker, so we should retry in + * wal_retrieve_retry_interval again. */ wait_time = wal_retrieve_retry_interval; } @@ -948,7 +950,7 @@ pg_stat_get_subscription(PG_FUNCTION_ARGS) Datum values[PG_STAT_GET_SUBSCRIPTION_COLS]; bool nulls[PG_STAT_GET_SUBSCRIPTION_COLS]; int worker_pid; - LogicalRepWorker worker; + LogicalRepWorker worker; memcpy(&worker, &LogicalRepCtx->workers[i], sizeof(LogicalRepWorker)); @@ -992,7 +994,10 @@ pg_stat_get_subscription(PG_FUNCTION_ARGS) tuplestore_putvalues(tupstore, tupdesc, values, nulls); - /* If only a single subscription was requested, and we found it, break. */ + /* + * If only a single subscription was requested, and we found it, + * break. + */ if (OidIsValid(subid)) break; } diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index 7409e5ce3d..33cb01b8d0 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -118,7 +118,7 @@ StartupDecodingContext(List *output_plugin_options, XLogPageReadCB read_page, LogicalOutputPluginWriterPrepareWrite prepare_write, LogicalOutputPluginWriterWrite do_write, - LogicalOutputPluginWriterUpdateProgress update_progress) + LogicalOutputPluginWriterUpdateProgress update_progress) { ReplicationSlot *slot; MemoryContext context, @@ -202,8 +202,8 @@ StartupDecodingContext(List *output_plugin_options, * plugin contains the name of the output plugin * output_plugin_options contains options passed to the output plugin * read_page, prepare_write, do_write, update_progress - * callbacks that have to be filled to perform the use-case dependent, - * actual, work. + * callbacks that have to be filled to perform the use-case dependent, + * actual, work. * * Needs to be called while in a memory context that's at least as long lived * as the decoding context because further memory contexts will be created @@ -219,7 +219,7 @@ CreateInitDecodingContext(char *plugin, XLogPageReadCB read_page, LogicalOutputPluginWriterPrepareWrite prepare_write, LogicalOutputPluginWriterWrite do_write, - LogicalOutputPluginWriterUpdateProgress update_progress) + LogicalOutputPluginWriterUpdateProgress update_progress) { TransactionId xmin_horizon = InvalidTransactionId; ReplicationSlot *slot; diff --git a/src/backend/replication/logical/logicalfuncs.c b/src/backend/replication/logical/logicalfuncs.c index 27164de093..ba4d8cc5a4 100644 --- a/src/backend/replication/logical/logicalfuncs.c +++ b/src/backend/replication/logical/logicalfuncs.c @@ -328,17 +328,19 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin if (ctx->reader->EndRecPtr != InvalidXLogRecPtr && confirm) { LogicalConfirmReceivedLocation(ctx->reader->EndRecPtr); + /* * If only the confirmed_flush_lsn has changed the slot won't get - * marked as dirty by the above. Callers on the walsender interface - * are expected to keep track of their own progress and don't need - * it written out. But SQL-interface users cannot specify their own - * start positions and it's harder for them to keep track of their - * progress, so we should make more of an effort to save it for them. + * marked as dirty by the above. Callers on the walsender + * interface are expected to keep track of their own progress and + * don't need it written out. But SQL-interface users cannot + * specify their own start positions and it's harder for them to + * keep track of their progress, so we should make more of an + * effort to save it for them. * - * Dirty the slot so it's written out at the next checkpoint. We'll - * still lose its position on crash, as documented, but it's better - * than always losing the position even on clean restart. + * Dirty the slot so it's written out at the next checkpoint. + * We'll still lose its position on crash, as documented, but it's + * better than always losing the position even on clean restart. */ ReplicationSlotMarkDirty(); } diff --git a/src/backend/replication/logical/proto.c b/src/backend/replication/logical/proto.c index adc62a0f3b..ff348ff2a8 100644 --- a/src/backend/replication/logical/proto.c +++ b/src/backend/replication/logical/proto.c @@ -28,7 +28,7 @@ static void logicalrep_write_attrs(StringInfo out, Relation rel); static void logicalrep_write_tuple(StringInfo out, Relation rel, - HeapTuple tuple); + HeapTuple tuple); static void logicalrep_read_attrs(StringInfo in, LogicalRepRelation *rel); static void logicalrep_read_tuple(StringInfo in, LogicalRepTupleData *tuple); @@ -72,7 +72,7 @@ void logicalrep_write_commit(StringInfo out, ReorderBufferTXN *txn, XLogRecPtr commit_lsn) { - uint8 flags = 0; + uint8 flags = 0; pq_sendbyte(out, 'C'); /* sending COMMIT */ @@ -92,7 +92,7 @@ void logicalrep_read_commit(StringInfo in, LogicalRepCommitData *commit_data) { /* read flags (unused for now) */ - uint8 flags = pq_getmsgbyte(in); + uint8 flags = pq_getmsgbyte(in); if (flags != 0) elog(ERROR, "unrecognized flags %u in commit message", flags); @@ -136,7 +136,7 @@ logicalrep_read_origin(StringInfo in, XLogRecPtr *origin_lsn) * Write INSERT to the output stream. */ void -logicalrep_write_insert(StringInfo out, Relation rel, HeapTuple newtuple) +logicalrep_write_insert(StringInfo out, Relation rel, HeapTuple newtuple) { pq_sendbyte(out, 'I'); /* action INSERT */ @@ -160,7 +160,7 @@ LogicalRepRelId logicalrep_read_insert(StringInfo in, LogicalRepTupleData *newtup) { char action; - LogicalRepRelId relid; + LogicalRepRelId relid; /* read the relation id */ relid = pq_getmsgint(in, 4); @@ -180,7 +180,7 @@ logicalrep_read_insert(StringInfo in, LogicalRepTupleData *newtup) */ void logicalrep_write_update(StringInfo out, Relation rel, HeapTuple oldtuple, - HeapTuple newtuple) + HeapTuple newtuple) { pq_sendbyte(out, 'U'); /* action UPDATE */ @@ -194,9 +194,9 @@ logicalrep_write_update(StringInfo out, Relation rel, HeapTuple oldtuple, if (oldtuple != NULL) { if (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL) - pq_sendbyte(out, 'O'); /* old tuple follows */ + pq_sendbyte(out, 'O'); /* old tuple follows */ else - pq_sendbyte(out, 'K'); /* old key follows */ + pq_sendbyte(out, 'K'); /* old key follows */ logicalrep_write_tuple(out, rel, oldtuple); } @@ -213,7 +213,7 @@ logicalrep_read_update(StringInfo in, bool *has_oldtuple, LogicalRepTupleData *newtup) { char action; - LogicalRepRelId relid; + LogicalRepRelId relid; /* read the relation id */ relid = pq_getmsgint(in, 4); @@ -277,7 +277,7 @@ LogicalRepRelId logicalrep_read_delete(StringInfo in, LogicalRepTupleData *oldtup) { char action; - LogicalRepRelId relid; + LogicalRepRelId relid; /* read the relation id */ relid = pq_getmsgint(in, 4); @@ -323,7 +323,7 @@ logicalrep_write_rel(StringInfo out, Relation rel) LogicalRepRelation * logicalrep_read_rel(StringInfo in) { - LogicalRepRelation *rel = palloc(sizeof(LogicalRepRelation)); + LogicalRepRelation *rel = palloc(sizeof(LogicalRepRelation)); rel->remoteid = pq_getmsgint(in, 4); @@ -424,12 +424,12 @@ logicalrep_write_tuple(StringInfo out, Relation rel, HeapTuple tuple) if (isnull[i]) { - pq_sendbyte(out, 'n'); /* null column */ + pq_sendbyte(out, 'n'); /* null column */ continue; } else if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i])) { - pq_sendbyte(out, 'u'); /* unchanged toast column */ + pq_sendbyte(out, 'u'); /* unchanged toast column */ continue; } @@ -473,21 +473,21 @@ logicalrep_read_tuple(StringInfo in, LogicalRepTupleData *tuple) switch (kind) { - case 'n': /* null */ + case 'n': /* null */ tuple->values[i] = NULL; tuple->changed[i] = true; break; - case 'u': /* unchanged column */ + case 'u': /* unchanged column */ /* we don't receive the value of an unchanged column */ tuple->values[i] = NULL; break; - case 't': /* text formatted value */ + case 't': /* text formatted value */ { int len; tuple->changed[i] = true; - len = pq_getmsgint(in, 4); /* read length */ + len = pq_getmsgint(in, 4); /* read length */ /* and data */ tuple->values[i] = palloc(len + 1); @@ -534,7 +534,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel) for (i = 0; i < desc->natts; i++) { Form_pg_attribute att = desc->attrs[i]; - uint8 flags = 0; + uint8 flags = 0; if (att->attisdropped) continue; @@ -612,7 +612,7 @@ logicalrep_write_namespace(StringInfo out, Oid nspid) pq_sendbyte(out, '\0'); else { - char *nspname = get_namespace_name(nspid); + char *nspname = get_namespace_name(nspid); if (nspname == NULL) elog(ERROR, "cache lookup failed for namespace %u", diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c index 590355a846..41eff8971a 100644 --- a/src/backend/replication/logical/relation.c +++ b/src/backend/replication/logical/relation.c @@ -30,13 +30,13 @@ #include "utils/memutils.h" #include "utils/syscache.h" -static MemoryContext LogicalRepRelMapContext = NULL; +static MemoryContext LogicalRepRelMapContext = NULL; -static HTAB *LogicalRepRelMap = NULL; -static HTAB *LogicalRepTypMap = NULL; +static HTAB *LogicalRepRelMap = NULL; +static HTAB *LogicalRepTypMap = NULL; static void logicalrep_typmap_invalidate_cb(Datum arg, int cacheid, - uint32 hashvalue); + uint32 hashvalue); /* * Relcache invalidation callback for our relation map cache. @@ -44,7 +44,7 @@ static void logicalrep_typmap_invalidate_cb(Datum arg, int cacheid, static void logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid) { - LogicalRepRelMapEntry *entry; + LogicalRepRelMapEntry *entry; /* Just to be sure. */ if (LogicalRepRelMap == NULL) @@ -110,7 +110,7 @@ logicalrep_relmap_init(void) /* This will usually be small. */ LogicalRepTypMap = hash_create("logicalrep type map cache", 2, &ctl, - HASH_ELEM | HASH_BLOBS |HASH_CONTEXT); + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); /* Watch for invalidation events. */ CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb, @@ -134,7 +134,7 @@ logicalrep_relmap_free_entry(LogicalRepRelMapEntry *entry) if (remoterel->natts > 0) { - int i; + int i; for (i = 0; i < remoterel->natts; i++) pfree(remoterel->attnames[i]); @@ -157,10 +157,10 @@ logicalrep_relmap_free_entry(LogicalRepRelMapEntry *entry) void logicalrep_relmap_update(LogicalRepRelation *remoterel) { - MemoryContext oldctx; - LogicalRepRelMapEntry *entry; - bool found; - int i; + MemoryContext oldctx; + LogicalRepRelMapEntry *entry; + bool found; + int i; if (LogicalRepRelMap == NULL) logicalrep_relmap_init(); @@ -202,7 +202,7 @@ logicalrep_relmap_update(LogicalRepRelation *remoterel) static int logicalrep_rel_att_by_name(LogicalRepRelation *remoterel, const char *attname) { - int i; + int i; for (i = 0; i < remoterel->natts; i++) { @@ -222,7 +222,7 @@ logicalrep_rel_att_by_name(LogicalRepRelation *remoterel, const char *attname) LogicalRepRelMapEntry * logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) { - LogicalRepRelMapEntry *entry; + LogicalRepRelMapEntry *entry; bool found; if (LogicalRepRelMap == NULL) @@ -245,7 +245,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) Bitmapset *idkey; TupleDesc desc; LogicalRepRelation *remoterel; - MemoryContext oldctx; + MemoryContext oldctx; + remoterel = &entry->remoterel; /* Try to find and lock the relation by name. */ @@ -265,8 +266,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) /* * Build the mapping of local attribute numbers to remote attribute - * numbers and validate that we don't miss any replicated columns - * as that would result in potentially unwanted data loss. + * numbers and validate that we don't miss any replicated columns as + * that would result in potentially unwanted data loss. */ desc = RelationGetDescr(entry->localrel); oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext); @@ -276,8 +277,9 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) found = 0; for (i = 0; i < desc->natts; i++) { - int attnum = logicalrep_rel_att_by_name(remoterel, - NameStr(desc->attrs[i]->attname)); + int attnum = logicalrep_rel_att_by_name(remoterel, + NameStr(desc->attrs[i]->attname)); + entry->attrmap[i] = attnum; if (attnum >= 0) found++; @@ -287,9 +289,9 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) if (found < remoterel->natts) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("logical replication target relation \"%s.%s\" is missing " - "some replicated columns", - remoterel->nspname, remoterel->relname))); + errmsg("logical replication target relation \"%s.%s\" is missing " + "some replicated columns", + remoterel->nspname, remoterel->relname))); /* * Check that replica identity matches. We allow for stricter replica @@ -299,8 +301,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) * but in the opposite scenario it will. * * Don't throw any error here just mark the relation entry as not - * updatable, as replica identity is only for updates and deletes - * but inserts can be replicated even without it. + * updatable, as replica identity is only for updates and deletes but + * inserts can be replicated even without it. */ entry->updatable = true; idkey = RelationGetIndexAttrBitmap(entry->localrel, @@ -310,6 +312,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) { idkey = RelationGetIndexAttrBitmap(entry->localrel, INDEX_ATTR_BITMAP_PRIMARY_KEY); + /* * If no replica identity index and no PK, the published table * must have replica identity FULL. @@ -321,14 +324,14 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) i = -1; while ((i = bms_next_member(idkey, i)) >= 0) { - int attnum = i + FirstLowInvalidHeapAttributeNumber; + int attnum = i + FirstLowInvalidHeapAttributeNumber; if (!AttrNumberIsForUserDefinedAttr(attnum)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("logical replication target relation \"%s.%s\" uses " - "system columns in REPLICA IDENTITY index", - remoterel->nspname, remoterel->relname))); + errmsg("logical replication target relation \"%s.%s\" uses " + "system columns in REPLICA IDENTITY index", + remoterel->nspname, remoterel->relname))); attnum = AttrNumberGetAttrOffset(attnum); @@ -371,7 +374,7 @@ static void logicalrep_typmap_invalidate_cb(Datum arg, int cacheid, uint32 hashvalue) { HASH_SEQ_STATUS status; - LogicalRepTyp *entry; + LogicalRepTyp *entry; /* Just to be sure. */ if (LogicalRepTypMap == NULL) @@ -402,9 +405,9 @@ logicalrep_typmap_free_entry(LogicalRepTyp *entry) void logicalrep_typmap_update(LogicalRepTyp *remotetyp) { - MemoryContext oldctx; - LogicalRepTyp *entry; - bool found; + MemoryContext oldctx; + LogicalRepTyp *entry; + bool found; if (LogicalRepTypMap == NULL) logicalrep_relmap_init(); @@ -433,9 +436,9 @@ logicalrep_typmap_update(LogicalRepTyp *remotetyp) Oid logicalrep_typmap_getid(Oid remoteid) { - LogicalRepTyp *entry; - bool found; - Oid nspoid; + LogicalRepTyp *entry; + bool found; + Oid nspoid; /* Internal types are mapped directly. */ if (remoteid < FirstNormalObjectId) diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 428d7aa55e..8848f5b4ec 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -59,7 +59,7 @@ * by the following graph describing the SnapBuild->state transitions: * * +-------------------------+ - * +----| START |-------------+ + * +----| START |-------------+ * | +-------------------------+ | * | | | * | | | @@ -68,22 +68,22 @@ * | | | * | v | * | +-------------------------+ v - * | | BUILDING_SNAPSHOT |------------>| + * | | BUILDING_SNAPSHOT |------------>| * | +-------------------------+ | * | | | * | | | - * | running_xacts #2, xacts from #1 finished | + * | running_xacts #2, xacts from #1 finished | * | | | * | | | * | v | * | +-------------------------+ v - * | | FULL_SNAPSHOT |------------>| + * | | FULL_SNAPSHOT |------------>| * | +-------------------------+ | * | | | * running_xacts | saved snapshot * with zero xacts | at running_xacts's lsn * | | | - * | running_xacts with xacts from #2 finished | + * | running_xacts with xacts from #2 finished | * | | | * | v | * | +-------------------------+ | @@ -209,9 +209,9 @@ struct SnapBuild TransactionId was_xmin; TransactionId was_xmax; - size_t was_xcnt; /* number of used xip entries */ - size_t was_xcnt_space; /* allocated size of xip */ - TransactionId *was_xip; /* running xacts array, xidComparator-sorted */ + size_t was_xcnt; /* number of used xip entries */ + size_t was_xcnt_space; /* allocated size of xip */ + TransactionId *was_xip; /* running xacts array, xidComparator-sorted */ } was_running; /* @@ -608,8 +608,8 @@ SnapBuildInitialSnapshot(SnapBuild *builder) { if (newxcnt >= GetMaxSnapshotXidCount()) ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("initial slot snapshot too large"))); + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("initial slot snapshot too large"))); newxip[newxcnt++] = xid; } @@ -986,6 +986,7 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid, if (NormalTransactionIdFollows(subxid, xmax)) xmax = subxid; } + /* * If we're forcing timetravel we also need visibility information * about subtransaction, so keep track of subtransaction's state, even @@ -1031,8 +1032,8 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid, /* * Adjust xmax of the snapshot builder, we only do that for committed, - * catalog modifying, transactions, everything else isn't interesting - * for us since we'll never look at the respective rows. + * catalog modifying, transactions, everything else isn't interesting for + * us since we'll never look at the respective rows. */ if (needs_timetravel && (!TransactionIdIsValid(builder->xmax) || @@ -1130,8 +1131,8 @@ SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xact running->oldestRunningXid); /* - * Increase shared memory limits, so vacuum can work on tuples we prevented - * from being pruned till now. + * Increase shared memory limits, so vacuum can work on tuples we + * prevented from being pruned till now. */ LogicalIncreaseXminForSlot(lsn, running->oldestRunningXid); @@ -1202,11 +1203,11 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn * modifying transactions. * * c) First incrementally build a snapshot for catalog tuples - * (BUILDING_SNAPSHOT), that requires all, already in-progress, - * transactions to finish. Every transaction starting after that - * (FULL_SNAPSHOT state), has enough information to be decoded. But - * for older running transactions no viable snapshot exists yet, so - * CONSISTENT will only be reached once all of those have finished. + * (BUILDING_SNAPSHOT), that requires all, already in-progress, + * transactions to finish. Every transaction starting after that + * (FULL_SNAPSHOT state), has enough information to be decoded. But + * for older running transactions no viable snapshot exists yet, so + * CONSISTENT will only be reached once all of those have finished. * --- */ @@ -1271,6 +1272,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn /* there won't be any state to cleanup */ return false; } + /* * c) transition from START to BUILDING_SNAPSHOT. * @@ -1308,6 +1310,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn SnapBuildWaitSnapshot(running, running->nextXid); } + /* * c) transition from BUILDING_SNAPSHOT to FULL_SNAPSHOT. * @@ -1324,13 +1327,14 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn SnapBuildStartNextPhaseAt(builder, running->nextXid); ereport(LOG, - (errmsg("logical decoding found initial consistent point at %X/%X", - (uint32) (lsn >> 32), (uint32) lsn), - errdetail("Waiting for transactions (approximately %d) older than %u to end.", - running->xcnt, running->nextXid))); + (errmsg("logical decoding found initial consistent point at %X/%X", + (uint32) (lsn >> 32), (uint32) lsn), + errdetail("Waiting for transactions (approximately %d) older than %u to end.", + running->xcnt, running->nextXid))); SnapBuildWaitSnapshot(running, running->nextXid); } + /* * c) transition from FULL_SNAPSHOT to CONSISTENT. * @@ -1368,9 +1372,9 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn * * This isn't required for the correctness of decoding, but to: * a) allow isolationtester to notice that we're currently waiting for - * something. + * something. * b) log a new xl_running_xacts record where it'd be helpful, without having - * to write for bgwriter or checkpointer. + * to write for bgwriter or checkpointer. * --- */ static void @@ -1383,9 +1387,9 @@ SnapBuildWaitSnapshot(xl_running_xacts *running, TransactionId cutoff) TransactionId xid = running->xids[off]; /* - * Upper layers should prevent that we ever need to wait on - * ourselves. Check anyway, since failing to do so would either - * result in an endless wait or an Assert() failure. + * Upper layers should prevent that we ever need to wait on ourselves. + * Check anyway, since failing to do so would either result in an + * endless wait or an Assert() failure. */ if (TransactionIdIsCurrentTransactionId(xid)) elog(ERROR, "waiting for ourselves"); @@ -1864,8 +1868,9 @@ CheckPointSnapBuild(void) char path[MAXPGPATH + 21]; /* - * We start off with a minimum of the last redo pointer. No new replication - * slot will start before that, so that's a safe upper bound for removal. + * We start off with a minimum of the last redo pointer. No new + * replication slot will start before that, so that's a safe upper bound + * for removal. */ redo = GetRedoRecPtr(); diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index 7e51076b37..1e3753b8fe 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -113,7 +113,8 @@ StringInfo copybuf = NULL; /* * Exit routine for synchronization worker. */ -static void pg_attribute_noreturn() +static void +pg_attribute_noreturn() finish_sync_worker(void) { /* @@ -148,12 +149,12 @@ finish_sync_worker(void) static bool wait_for_sync_status_change(Oid relid, char origstate) { - int rc; - char state = origstate; + int rc; + char state = origstate; while (!got_SIGTERM) { - LogicalRepWorker *worker; + LogicalRepWorker *worker; LWLockAcquire(LogicalRepWorkerLock, LW_SHARED); worker = logicalrep_worker_find(MyLogicalRepWorker->subid, @@ -269,7 +270,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) struct tablesync_start_time_mapping { Oid relid; - TimestampTz last_start_time; + TimestampTz last_start_time; }; static List *table_states = NIL; static HTAB *last_start_times = NULL; @@ -281,9 +282,9 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) /* We need up to date sync state info for subscription tables here. */ if (!table_states_valid) { - MemoryContext oldctx; - List *rstates; - ListCell *lc; + MemoryContext oldctx; + List *rstates; + ListCell *lc; SubscriptionRelState *rstate; /* Clean the old list. */ @@ -294,7 +295,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) started_tx = true; /* Fetch all non-ready tables. */ - rstates = GetSubscriptionNotReadyRelations(MySubscription->oid); + rstates = GetSubscriptionNotReadyRelations(MySubscription->oid); /* Allocate the tracking info in a permanent memory context. */ oldctx = MemoryContextSwitchTo(CacheMemoryContext); @@ -324,6 +325,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) last_start_times = hash_create("Logical replication table sync worker start times", 256, &ctl, HASH_ELEM | HASH_BLOBS); } + /* * Clean up the hash table when we're done with all tables (just to * release the bit of memory). @@ -337,14 +339,14 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) /* Process all tables that are being synchronized. */ foreach(lc, table_states) { - SubscriptionRelState *rstate = (SubscriptionRelState *)lfirst(lc); + SubscriptionRelState *rstate = (SubscriptionRelState *) lfirst(lc); if (rstate->state == SUBREL_STATE_SYNCDONE) { /* - * Apply has caught up to the position where the table sync - * has finished. Time to mark the table as ready so that - * apply will just continue to replicate it normally. + * Apply has caught up to the position where the table sync has + * finished. Time to mark the table as ready so that apply will + * just continue to replicate it normally. */ if (current_lsn >= rstate->lsn) { @@ -362,8 +364,8 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) } else { - LogicalRepWorker *syncworker; - int nsyncworkers = 0; + LogicalRepWorker *syncworker; + int nsyncworkers = 0; LWLockAcquire(LogicalRepWorkerLock, LW_SHARED); syncworker = logicalrep_worker_find(MyLogicalRepWorker->subid, @@ -376,6 +378,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) SpinLockRelease(&syncworker->relmutex); } else + /* * If no sync worker for this table yet, count running sync * workers for this subscription, while we have the lock, for @@ -394,16 +397,16 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) * There are three possible synchronization situations here. * * a) Apply is in front of the table sync: We tell the table - * sync to CATCHUP. + * sync to CATCHUP. * * b) Apply is behind the table sync: We tell the table sync - * to mark the table as SYNCDONE and finish. - + * to mark the table as SYNCDONE and finish. + * * c) Apply and table sync are at the same position: We tell - * table sync to mark the table as READY and finish. + * table sync to mark the table as READY and finish. * - * In any case we'll need to wait for table sync to change - * the state in catalog and only then continue ourselves. + * In any case we'll need to wait for table sync to change the + * state in catalog and only then continue ourselves. */ if (current_lsn > rstate->lsn) { @@ -427,20 +430,19 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) logicalrep_worker_wakeup_ptr(syncworker); /* - * Enter busy loop and wait for synchronization status - * change. + * Enter busy loop and wait for synchronization status change. */ wait_for_sync_status_change(rstate->relid, rstate->state); } /* - * If there is no sync worker registered for the table and - * there is some free sync worker slot, start new sync worker - * for the table. + * If there is no sync worker registered for the table and there + * is some free sync worker slot, start new sync worker for the + * table. */ else if (!syncworker && nsyncworkers < max_sync_workers_per_subscription) { - TimestampTz now = GetCurrentTimestamp(); + TimestampTz now = GetCurrentTimestamp(); struct tablesync_start_time_mapping *hentry; bool found; @@ -492,7 +494,7 @@ make_copy_attnamelist(LogicalRepRelMapEntry *rel) for (i = 0; i < desc->natts; i++) { - int remoteattnum = rel->attrmap[i]; + int remoteattnum = rel->attrmap[i]; /* Skip dropped attributes. */ if (desc->attrs[i]->attisdropped) @@ -503,7 +505,7 @@ make_copy_attnamelist(LogicalRepRelMapEntry *rel) continue; attnamelist = lappend(attnamelist, - makeString(rel->remoterel.attnames[remoteattnum])); + makeString(rel->remoterel.attnames[remoteattnum])); } return attnamelist; @@ -516,8 +518,8 @@ make_copy_attnamelist(LogicalRepRelMapEntry *rel) static int copy_read_data(void *outbuf, int minread, int maxread) { - int bytesread = 0; - int avail; + int bytesread = 0; + int avail; /* If there are some leftover data from previous read, use them. */ avail = copybuf->len - copybuf->cursor; @@ -601,13 +603,13 @@ static void fetch_remote_table_info(char *nspname, char *relname, LogicalRepRelation *lrel) { - WalRcvExecResult *res; - StringInfoData cmd; - TupleTableSlot *slot; - Oid tableRow[2] = {OIDOID, CHAROID}; - Oid attrRow[4] = {TEXTOID, OIDOID, INT4OID, BOOLOID}; - bool isnull; - int natt; + WalRcvExecResult *res; + StringInfoData cmd; + TupleTableSlot *slot; + Oid tableRow[2] = {OIDOID, CHAROID}; + Oid attrRow[4] = {TEXTOID, OIDOID, INT4OID, BOOLOID}; + bool isnull; + int natt; lrel->nspname = nspname; lrel->relname = relname; @@ -615,14 +617,14 @@ fetch_remote_table_info(char *nspname, char *relname, /* First fetch Oid and replica identity. */ initStringInfo(&cmd); appendStringInfo(&cmd, "SELECT c.oid, c.relreplident" - " FROM pg_catalog.pg_class c" - " INNER JOIN pg_catalog.pg_namespace n" - " ON (c.relnamespace = n.oid)" - " WHERE n.nspname = %s" - " AND c.relname = %s" - " AND c.relkind = 'r'", - quote_literal_cstr(nspname), - quote_literal_cstr(relname)); + " FROM pg_catalog.pg_class c" + " INNER JOIN pg_catalog.pg_namespace n" + " ON (c.relnamespace = n.oid)" + " WHERE n.nspname = %s" + " AND c.relname = %s" + " AND c.relkind = 'r'", + quote_literal_cstr(nspname), + quote_literal_cstr(relname)); res = walrcv_exec(wrconn, cmd.data, 2, tableRow); if (res->status != WALRCV_OK_TUPLES) @@ -653,7 +655,7 @@ fetch_remote_table_info(char *nspname, char *relname, " a.attnum = ANY(i.indkey)" " FROM pg_catalog.pg_attribute a" " LEFT JOIN pg_catalog.pg_index i" - " ON (i.indexrelid = pg_get_replica_identity_index(%u))" + " ON (i.indexrelid = pg_get_replica_identity_index(%u))" " WHERE a.attnum > 0::pg_catalog.int2" " AND NOT a.attisdropped" " AND a.attrelid = %u" @@ -686,7 +688,7 @@ fetch_remote_table_info(char *nspname, char *relname, /* Should never happen. */ if (++natt >= MaxTupleAttributeNumber) elog(ERROR, "too many columns in remote table \"%s.%s\"", - nspname, relname); + nspname, relname); ExecClearTuple(slot); } @@ -707,9 +709,9 @@ static void copy_table(Relation rel) { LogicalRepRelMapEntry *relmapentry; - LogicalRepRelation lrel; - WalRcvExecResult *res; - StringInfoData cmd; + LogicalRepRelation lrel; + WalRcvExecResult *res; + StringInfoData cmd; CopyState cstate; List *attnamelist; ParseState *pstate; @@ -759,8 +761,8 @@ copy_table(Relation rel) char * LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) { - char *slotname; - char *err; + char *slotname; + char *err; char relstate; XLogRecPtr relstate_lsn; @@ -783,7 +785,7 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) * NAMEDATALEN on the remote that matters, but this scheme will also work * reasonably if that is different.) */ - StaticAssertStmt(NAMEDATALEN >= 32, "NAMEDATALEN too small"); /* for sanity */ + StaticAssertStmt(NAMEDATALEN >= 32, "NAMEDATALEN too small"); /* for sanity */ slotname = psprintf("%.*s_%u_sync_%u", NAMEDATALEN - 28, MySubscription->slotname, @@ -801,7 +803,7 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) case SUBREL_STATE_DATASYNC: { Relation rel; - WalRcvExecResult *res; + WalRcvExecResult *res; SpinLockAcquire(&MyLogicalRepWorker->relmutex); MyLogicalRepWorker->relstate = SUBREL_STATE_DATASYNC; @@ -818,24 +820,23 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) pgstat_report_stat(false); /* - * We want to do the table data sync in single - * transaction. + * We want to do the table data sync in single transaction. */ StartTransactionCommand(); /* * Use standard write lock here. It might be better to - * disallow access to table while it's being synchronized. - * But we don't want to block the main apply process from - * working and it has to open relation in RowExclusiveLock - * when remapping remote relation id to local one. + * disallow access to table while it's being synchronized. But + * we don't want to block the main apply process from working + * and it has to open relation in RowExclusiveLock when + * remapping remote relation id to local one. */ rel = heap_open(MyLogicalRepWorker->relid, RowExclusiveLock); /* - * Create temporary slot for the sync process. - * We do this inside transaction so that we can use the - * snapshot made by the slot to get existing data. + * Create temporary slot for the sync process. We do this + * inside transaction so that we can use the snapshot made by + * the slot to get existing data. */ res = walrcv_exec(wrconn, "BEGIN READ ONLY ISOLATION LEVEL " @@ -849,10 +850,10 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) /* * Create new temporary logical decoding slot. * - * We'll use slot for data copy so make sure the snapshot - * is used for the transaction, that way the COPY will get - * data that is consistent with the lsn used by the slot - * to start decoding. + * We'll use slot for data copy so make sure the snapshot is + * used for the transaction, that way the COPY will get data + * that is consistent with the lsn used by the slot to start + * decoding. */ walrcv_create_slot(wrconn, slotname, true, CRS_USE_SNAPSHOT, origin_startpos); @@ -872,8 +873,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) CommandCounterIncrement(); /* - * We are done with the initial data synchronization, - * update the state. + * We are done with the initial data synchronization, update + * the state. */ SpinLockAcquire(&MyLogicalRepWorker->relmutex); MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCWAIT; @@ -881,8 +882,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) SpinLockRelease(&MyLogicalRepWorker->relmutex); /* - * Wait for main apply worker to either tell us to - * catchup or that we are done. + * Wait for main apply worker to either tell us to catchup or + * that we are done. */ wait_for_sync_status_change(MyLogicalRepWorker->relid, MyLogicalRepWorker->relstate); diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 04813b506e..9d1eab9e1e 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -88,29 +88,29 @@ typedef struct FlushPosition { - dlist_node node; - XLogRecPtr local_end; - XLogRecPtr remote_end; + dlist_node node; + XLogRecPtr local_end; + XLogRecPtr remote_end; } FlushPosition; static dlist_head lsn_mapping = DLIST_STATIC_INIT(lsn_mapping); typedef struct SlotErrCallbackArg { - LogicalRepRelation *rel; + LogicalRepRelation *rel; int attnum; } SlotErrCallbackArg; -static MemoryContext ApplyMessageContext = NULL; -MemoryContext ApplyContext = NULL; +static MemoryContext ApplyMessageContext = NULL; +MemoryContext ApplyContext = NULL; -WalReceiverConn *wrconn = NULL; +WalReceiverConn *wrconn = NULL; -Subscription *MySubscription = NULL; -bool MySubscriptionValid = false; +Subscription *MySubscription = NULL; +bool MySubscriptionValid = false; -bool in_remote_transaction = false; -static XLogRecPtr remote_final_lsn = InvalidXLogRecPtr; +bool in_remote_transaction = false; +static XLogRecPtr remote_final_lsn = InvalidXLogRecPtr; static void send_feedback(XLogRecPtr recvpos, bool force, bool requestReply); @@ -215,7 +215,7 @@ create_estate_for_relation(LogicalRepRelMapEntry *rel) */ static void slot_fill_defaults(LogicalRepRelMapEntry *rel, EState *estate, - TupleTableSlot *slot) + TupleTableSlot *slot) { TupleDesc desc = RelationGetDescr(rel->localrel); int num_phys_attrs = desc->natts; @@ -271,9 +271,9 @@ slot_fill_defaults(LogicalRepRelMapEntry *rel, EState *estate, static void slot_store_error_callback(void *arg) { - SlotErrCallbackArg *errarg = (SlotErrCallbackArg *) arg; - Oid remotetypoid, - localtypoid; + SlotErrCallbackArg *errarg = (SlotErrCallbackArg *) arg; + Oid remotetypoid, + localtypoid; if (errarg->attnum < 0) return; @@ -295,12 +295,12 @@ slot_store_error_callback(void *arg) */ static void slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, - char **values) + char **values) { - int natts = slot->tts_tupleDescriptor->natts; - int i; - SlotErrCallbackArg errarg; - ErrorContextCallback errcallback; + int natts = slot->tts_tupleDescriptor->natts; + int i; + SlotErrCallbackArg errarg; + ErrorContextCallback errcallback; ExecClearTuple(slot); @@ -315,14 +315,14 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, /* Call the "in" function for each non-dropped attribute */ for (i = 0; i < natts; i++) { - Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i]; - int remoteattnum = rel->attrmap[i]; + Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i]; + int remoteattnum = rel->attrmap[i]; if (!att->attisdropped && remoteattnum >= 0 && values[remoteattnum] != NULL) { - Oid typinput; - Oid typioparam; + Oid typinput; + Oid typioparam; errarg.attnum = remoteattnum; @@ -359,12 +359,12 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, */ static void slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, - char **values, bool *replaces) + char **values, bool *replaces) { - int natts = slot->tts_tupleDescriptor->natts; - int i; - SlotErrCallbackArg errarg; - ErrorContextCallback errcallback; + int natts = slot->tts_tupleDescriptor->natts; + int i; + SlotErrCallbackArg errarg; + ErrorContextCallback errcallback; slot_getallattrs(slot); ExecClearTuple(slot); @@ -380,16 +380,16 @@ slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, /* Call the "in" function for each replaced attribute */ for (i = 0; i < natts; i++) { - Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i]; - int remoteattnum = rel->attrmap[i]; + Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i]; + int remoteattnum = rel->attrmap[i]; if (remoteattnum >= 0 && !replaces[remoteattnum]) continue; if (remoteattnum >= 0 && values[remoteattnum] != NULL) { - Oid typinput; - Oid typioparam; + Oid typinput; + Oid typioparam; errarg.attnum = remoteattnum; @@ -418,7 +418,7 @@ slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, static void apply_handle_begin(StringInfo s) { - LogicalRepBeginData begin_data; + LogicalRepBeginData begin_data; logicalrep_read_begin(s, &begin_data); @@ -437,7 +437,7 @@ apply_handle_begin(StringInfo s) static void apply_handle_commit(StringInfo s) { - LogicalRepCommitData commit_data; + LogicalRepCommitData commit_data; logicalrep_read_commit(s, &commit_data); @@ -476,8 +476,8 @@ static void apply_handle_origin(StringInfo s) { /* - * ORIGIN message can only come inside remote transaction and before - * any actual writes. + * ORIGIN message can only come inside remote transaction and before any + * actual writes. */ if (!in_remote_transaction || (IsTransactionState() && !am_tablesync_worker())) @@ -497,7 +497,7 @@ apply_handle_origin(StringInfo s) static void apply_handle_relation(StringInfo s) { - LogicalRepRelation *rel; + LogicalRepRelation *rel; rel = logicalrep_read_rel(s); logicalrep_relmap_update(rel); @@ -512,7 +512,7 @@ apply_handle_relation(StringInfo s) static void apply_handle_type(StringInfo s) { - LogicalRepTyp typ; + LogicalRepTyp typ; logicalrep_read_typ(s, &typ); logicalrep_typmap_update(&typ); @@ -526,7 +526,7 @@ apply_handle_type(StringInfo s) static Oid GetRelationIdentityOrPK(Relation rel) { - Oid idxoid; + Oid idxoid; idxoid = RelationGetReplicaIndex(rel); @@ -543,11 +543,11 @@ static void apply_handle_insert(StringInfo s) { LogicalRepRelMapEntry *rel; - LogicalRepTupleData newtup; - LogicalRepRelId relid; - EState *estate; - TupleTableSlot *remoteslot; - MemoryContext oldctx; + LogicalRepTupleData newtup; + LogicalRepRelId relid; + EState *estate; + TupleTableSlot *remoteslot; + MemoryContext oldctx; ensure_transaction(); @@ -607,15 +607,15 @@ check_relation_updatable(LogicalRepRelMapEntry *rel) return; /* - * We are in error mode so it's fine this is somewhat slow. - * It's better to give user correct error. + * We are in error mode so it's fine this is somewhat slow. It's better to + * give user correct error. */ if (OidIsValid(GetRelationIdentityOrPK(rel->localrel))) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("publisher does not send replica identity column " - "expected by the logical replication target relation \"%s.%s\"", + "expected by the logical replication target relation \"%s.%s\"", rel->remoterel.nspname, rel->remoterel.relname))); } @@ -637,17 +637,17 @@ static void apply_handle_update(StringInfo s) { LogicalRepRelMapEntry *rel; - LogicalRepRelId relid; - Oid idxoid; - EState *estate; - EPQState epqstate; - LogicalRepTupleData oldtup; - LogicalRepTupleData newtup; - bool has_oldtup; - TupleTableSlot *localslot; - TupleTableSlot *remoteslot; - bool found; - MemoryContext oldctx; + LogicalRepRelId relid; + Oid idxoid; + EState *estate; + EPQState epqstate; + LogicalRepTupleData oldtup; + LogicalRepTupleData newtup; + bool has_oldtup; + TupleTableSlot *localslot; + TupleTableSlot *remoteslot; + bool found; + MemoryContext oldctx; ensure_transaction(); @@ -685,8 +685,8 @@ apply_handle_update(StringInfo s) MemoryContextSwitchTo(oldctx); /* - * Try to find tuple using either replica identity index, primary key - * or if needed, sequential scan. + * Try to find tuple using either replica identity index, primary key or + * if needed, sequential scan. */ idxoid = GetRelationIdentityOrPK(rel->localrel); Assert(OidIsValid(idxoid) || @@ -758,15 +758,15 @@ static void apply_handle_delete(StringInfo s) { LogicalRepRelMapEntry *rel; - LogicalRepTupleData oldtup; - LogicalRepRelId relid; - Oid idxoid; - EState *estate; - EPQState epqstate; - TupleTableSlot *remoteslot; - TupleTableSlot *localslot; - bool found; - MemoryContext oldctx; + LogicalRepTupleData oldtup; + LogicalRepRelId relid; + Oid idxoid; + EState *estate; + EPQState epqstate; + TupleTableSlot *remoteslot; + TupleTableSlot *localslot; + bool found; + MemoryContext oldctx; ensure_transaction(); @@ -802,8 +802,8 @@ apply_handle_delete(StringInfo s) MemoryContextSwitchTo(oldctx); /* - * Try to find tuple using either replica identity index, primary key - * or if needed, sequential scan. + * Try to find tuple using either replica identity index, primary key or + * if needed, sequential scan. */ idxoid = GetRelationIdentityOrPK(rel->localrel); Assert(OidIsValid(idxoid) || @@ -826,7 +826,7 @@ apply_handle_delete(StringInfo s) } else { - /* The tuple to be deleted could not be found.*/ + /* The tuple to be deleted could not be found. */ ereport(DEBUG1, (errmsg("logical replication could not find row for delete " "in replication target %s", @@ -856,46 +856,46 @@ apply_handle_delete(StringInfo s) static void apply_dispatch(StringInfo s) { - char action = pq_getmsgbyte(s); + char action = pq_getmsgbyte(s); switch (action) { - /* BEGIN */ + /* BEGIN */ case 'B': apply_handle_begin(s); break; - /* COMMIT */ + /* COMMIT */ case 'C': apply_handle_commit(s); break; - /* INSERT */ + /* INSERT */ case 'I': apply_handle_insert(s); break; - /* UPDATE */ + /* UPDATE */ case 'U': apply_handle_update(s); break; - /* DELETE */ + /* DELETE */ case 'D': apply_handle_delete(s); break; - /* RELATION */ + /* RELATION */ case 'R': apply_handle_relation(s); break; - /* TYPE */ + /* TYPE */ case 'Y': apply_handle_type(s); break; - /* ORIGIN */ + /* ORIGIN */ case 'O': apply_handle_origin(s); break; default: ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("invalid logical replication message type %c", action))); + errmsg("invalid logical replication message type %c", action))); } } @@ -925,7 +925,7 @@ get_flush_position(XLogRecPtr *write, XLogRecPtr *flush, dlist_foreach_modify(iter, &lsn_mapping) { FlushPosition *pos = - dlist_container(FlushPosition, node, iter.cur); + dlist_container(FlushPosition, node, iter.cur); *write = pos->remote_end; @@ -995,12 +995,12 @@ static void LogicalRepApplyLoop(XLogRecPtr last_received) { /* - * Init the ApplyMessageContext which we clean up after each - * replication protocol message. + * Init the ApplyMessageContext which we clean up after each replication + * protocol message. */ ApplyMessageContext = AllocSetContextCreate(ApplyContext, - "ApplyMessageContext", - ALLOCSET_DEFAULT_SIZES); + "ApplyMessageContext", + ALLOCSET_DEFAULT_SIZES); /* mark as idle, before starting to loop */ pgstat_report_activity(STATE_IDLE, NULL); @@ -1039,7 +1039,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received) } else { - int c; + int c; StringInfoData s; /* Reset timeout. */ @@ -1108,7 +1108,8 @@ LogicalRepApplyLoop(XLogRecPtr last_received) { /* * If we didn't get any transactions for a while there might be - * unconsumed invalidation messages in the queue, consume them now. + * unconsumed invalidation messages in the queue, consume them + * now. */ AcceptInvalidationMessages(); if (!MySubscriptionValid) @@ -1126,6 +1127,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received) if (endofstream) { TimeLineID tli; + walrcv_endstreaming(wrconn, &tli); break; } @@ -1152,19 +1154,18 @@ LogicalRepApplyLoop(XLogRecPtr last_received) if (rc & WL_TIMEOUT) { /* - * We didn't receive anything new. If we haven't heard - * anything from the server for more than - * wal_receiver_timeout / 2, ping the server. Also, if - * it's been longer than wal_receiver_status_interval - * since the last update we sent, send a status update to - * the master anyway, to report any progress in applying - * WAL. + * We didn't receive anything new. If we haven't heard anything + * from the server for more than wal_receiver_timeout / 2, ping + * the server. Also, if it's been longer than + * wal_receiver_status_interval since the last update we sent, + * send a status update to the master anyway, to report any + * progress in applying WAL. */ bool requestReply = false; /* - * Check if time since last receive from standby has - * reached the configured limit. + * Check if time since last receive from standby has reached the + * configured limit. */ if (wal_receiver_timeout > 0) { @@ -1180,13 +1181,13 @@ LogicalRepApplyLoop(XLogRecPtr last_received) (errmsg("terminating logical replication worker due to timeout"))); /* - * We didn't receive anything new, for half of - * receiver replication timeout. Ping the server. + * We didn't receive anything new, for half of receiver + * replication timeout. Ping the server. */ if (!ping_sent) { timeout = TimestampTzPlusMilliseconds(last_recv_timestamp, - (wal_receiver_timeout / 2)); + (wal_receiver_timeout / 2)); if (now >= timeout) { requestReply = true; @@ -1211,17 +1212,17 @@ LogicalRepApplyLoop(XLogRecPtr last_received) static void send_feedback(XLogRecPtr recvpos, bool force, bool requestReply) { - static StringInfo reply_message = NULL; - static TimestampTz send_time = 0; + static StringInfo reply_message = NULL; + static TimestampTz send_time = 0; static XLogRecPtr last_recvpos = InvalidXLogRecPtr; static XLogRecPtr last_writepos = InvalidXLogRecPtr; static XLogRecPtr last_flushpos = InvalidXLogRecPtr; - XLogRecPtr writepos; - XLogRecPtr flushpos; + XLogRecPtr writepos; + XLogRecPtr flushpos; TimestampTz now; - bool have_pending_txes; + bool have_pending_txes; /* * If the user doesn't want status to be reported to the publisher, be @@ -1237,8 +1238,8 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply) get_flush_position(&writepos, &flushpos, &have_pending_txes); /* - * No outstanding transactions to flush, we can report the latest - * received position. This is important for synchronous replication. + * No outstanding transactions to flush, we can report the latest received + * position. This is important for synchronous replication. */ if (!have_pending_txes) flushpos = writepos = recvpos; @@ -1262,7 +1263,8 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply) if (!reply_message) { - MemoryContext oldctx = MemoryContextSwitchTo(ApplyContext); + MemoryContext oldctx = MemoryContextSwitchTo(ApplyContext); + reply_message = makeStringInfo(); MemoryContextSwitchTo(oldctx); } @@ -1273,7 +1275,7 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply) pq_sendint64(reply_message, recvpos); /* write */ pq_sendint64(reply_message, flushpos); /* flush */ pq_sendint64(reply_message, writepos); /* apply */ - pq_sendint64(reply_message, now); /* sendTime */ + pq_sendint64(reply_message, now); /* sendTime */ pq_sendbyte(reply_message, requestReply); /* replyRequested */ elog(DEBUG2, "sending feedback (force %d) to recv %X/%X, write %X/%X, flush %X/%X", @@ -1300,9 +1302,9 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply) static void reread_subscription(void) { - MemoryContext oldctx; - Subscription *newsub; - bool started_tx = false; + MemoryContext oldctx; + Subscription *newsub; + bool started_tx = false; /* This function might be called inside or outside of transaction. */ if (!IsTransactionState()) @@ -1317,47 +1319,45 @@ reread_subscription(void) newsub = GetSubscription(MyLogicalRepWorker->subid, true); /* - * Exit if the subscription was removed. - * This normally should not happen as the worker gets killed - * during DROP SUBSCRIPTION. + * Exit if the subscription was removed. This normally should not happen + * as the worker gets killed during DROP SUBSCRIPTION. */ if (!newsub) { ereport(LOG, - (errmsg("logical replication worker for subscription \"%s\" will " - "stop because the subscription was removed", - MySubscription->name))); + (errmsg("logical replication worker for subscription \"%s\" will " + "stop because the subscription was removed", + MySubscription->name))); walrcv_disconnect(wrconn); proc_exit(0); } /* - * Exit if the subscription was disabled. - * This normally should not happen as the worker gets killed - * during ALTER SUBSCRIPTION ... DISABLE. + * Exit if the subscription was disabled. This normally should not happen + * as the worker gets killed during ALTER SUBSCRIPTION ... DISABLE. */ if (!newsub->enabled) { ereport(LOG, - (errmsg("logical replication worker for subscription \"%s\" will " - "stop because the subscription was disabled", - MySubscription->name))); + (errmsg("logical replication worker for subscription \"%s\" will " + "stop because the subscription was disabled", + MySubscription->name))); walrcv_disconnect(wrconn); proc_exit(0); } /* - * Exit if connection string was changed. The launcher will start - * new worker. + * Exit if connection string was changed. The launcher will start new + * worker. */ if (strcmp(newsub->conninfo, MySubscription->conninfo) != 0) { ereport(LOG, - (errmsg("logical replication worker for subscription \"%s\" will " - "restart because the connection information was changed", - MySubscription->name))); + (errmsg("logical replication worker for subscription \"%s\" will " + "restart because the connection information was changed", + MySubscription->name))); walrcv_disconnect(wrconn); proc_exit(0); @@ -1370,9 +1370,9 @@ reread_subscription(void) if (strcmp(newsub->name, MySubscription->name) != 0) { ereport(LOG, - (errmsg("logical replication worker for subscription \"%s\" will " - "restart because subscription was renamed", - MySubscription->name))); + (errmsg("logical replication worker for subscription \"%s\" will " + "restart because subscription was renamed", + MySubscription->name))); walrcv_disconnect(wrconn); proc_exit(0); @@ -1382,30 +1382,30 @@ reread_subscription(void) Assert(newsub->slotname); /* - * We need to make new connection to new slot if slot name has changed - * so exit here as well if that's the case. + * We need to make new connection to new slot if slot name has changed so + * exit here as well if that's the case. */ if (strcmp(newsub->slotname, MySubscription->slotname) != 0) { ereport(LOG, - (errmsg("logical replication worker for subscription \"%s\" will " - "restart because the replication slot name was changed", - MySubscription->name))); + (errmsg("logical replication worker for subscription \"%s\" will " + "restart because the replication slot name was changed", + MySubscription->name))); walrcv_disconnect(wrconn); proc_exit(0); } /* - * Exit if publication list was changed. The launcher will start - * new worker. + * Exit if publication list was changed. The launcher will start new + * worker. */ if (!equal(newsub->publications, MySubscription->publications)) { ereport(LOG, - (errmsg("logical replication worker for subscription \"%s\" will " - "restart because subscription's publications were changed", - MySubscription->name))); + (errmsg("logical replication worker for subscription \"%s\" will " + "restart because subscription's publications were changed", + MySubscription->name))); walrcv_disconnect(wrconn); proc_exit(0); @@ -1448,11 +1448,11 @@ subscription_change_cb(Datum arg, int cacheid, uint32 hashvalue) void ApplyWorkerMain(Datum main_arg) { - int worker_slot = DatumGetInt32(main_arg); - MemoryContext oldctx; - char originname[NAMEDATALEN]; - XLogRecPtr origin_startpos; - char *myslotname; + int worker_slot = DatumGetInt32(main_arg); + MemoryContext oldctx; + char originname[NAMEDATALEN]; + XLogRecPtr origin_startpos; + char *myslotname; WalRcvStreamOptions options; /* Attach to slot */ @@ -1488,8 +1488,8 @@ ApplyWorkerMain(Datum main_arg) /* Load the subscription into persistent memory context. */ ApplyContext = AllocSetContextCreate(TopMemoryContext, - "ApplyContext", - ALLOCSET_DEFAULT_SIZES); + "ApplyContext", + ALLOCSET_DEFAULT_SIZES); StartTransactionCommand(); oldctx = MemoryContextSwitchTo(ApplyContext); MySubscription = GetSubscription(MyLogicalRepWorker->subid, false); @@ -1503,9 +1503,9 @@ ApplyWorkerMain(Datum main_arg) if (!MySubscription->enabled) { ereport(LOG, - (errmsg("logical replication worker for subscription \"%s\" will not " - "start because the subscription was disabled during startup", - MySubscription->name))); + (errmsg("logical replication worker for subscription \"%s\" will not " + "start because the subscription was disabled during startup", + MySubscription->name))); proc_exit(0); } @@ -1530,7 +1530,7 @@ ApplyWorkerMain(Datum main_arg) if (am_tablesync_worker()) { - char *syncslotname; + char *syncslotname; /* This is table synchroniation worker, call initial sync. */ syncslotname = LogicalRepSyncTableStart(&origin_startpos); @@ -1545,10 +1545,10 @@ ApplyWorkerMain(Datum main_arg) else { /* This is main apply worker */ - RepOriginId originid; - TimeLineID startpointTLI; - char *err; - int server_version; + RepOriginId originid; + TimeLineID startpointTLI; + char *err; + int server_version; myslotname = MySubscription->slotname; @@ -1570,9 +1570,8 @@ ApplyWorkerMain(Datum main_arg) (errmsg("could not connect to the publisher: %s", err))); /* - * We don't really use the output identify_system for anything - * but it does some initializations on the upstream so let's still - * call it. + * We don't really use the output identify_system for anything but it + * does some initializations on the upstream so let's still call it. */ (void) walrcv_identify_system(wrconn, &startpointTLI, &server_version); @@ -1580,8 +1579,8 @@ ApplyWorkerMain(Datum main_arg) } /* - * Setup callback for syscache so that we know when something - * changes in the subscription relation state. + * Setup callback for syscache so that we know when something changes in + * the subscription relation state. */ CacheRegisterSyscacheCallback(SUBSCRIPTIONRELMAP, invalidate_syncing_table_states, diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 694f351dd8..5bdfa60ae7 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -29,31 +29,31 @@ PG_MODULE_MAGIC; extern void _PG_output_plugin_init(OutputPluginCallbacks *cb); -static void pgoutput_startup(LogicalDecodingContext * ctx, - OutputPluginOptions *opt, bool is_init); -static void pgoutput_shutdown(LogicalDecodingContext * ctx); +static void pgoutput_startup(LogicalDecodingContext *ctx, + OutputPluginOptions *opt, bool is_init); +static void pgoutput_shutdown(LogicalDecodingContext *ctx); static void pgoutput_begin_txn(LogicalDecodingContext *ctx, - ReorderBufferTXN *txn); + ReorderBufferTXN *txn); static void pgoutput_commit_txn(LogicalDecodingContext *ctx, - ReorderBufferTXN *txn, XLogRecPtr commit_lsn); + ReorderBufferTXN *txn, XLogRecPtr commit_lsn); static void pgoutput_change(LogicalDecodingContext *ctx, - ReorderBufferTXN *txn, Relation rel, - ReorderBufferChange *change); + ReorderBufferTXN *txn, Relation rel, + ReorderBufferChange *change); static bool pgoutput_origin_filter(LogicalDecodingContext *ctx, - RepOriginId origin_id); + RepOriginId origin_id); static bool publications_valid; static List *LoadPublications(List *pubnames); static void publication_invalidation_cb(Datum arg, int cacheid, - uint32 hashvalue); + uint32 hashvalue); /* Entry in the map used to remember which relation schemas we sent. */ typedef struct RelationSyncEntry { - Oid relid; /* relation oid */ - bool schema_sent; /* did we send the schema? */ - bool replicate_valid; + Oid relid; /* relation oid */ + bool schema_sent; /* did we send the schema? */ + bool replicate_valid; PublicationActions pubactions; } RelationSyncEntry; @@ -64,7 +64,7 @@ static void init_rel_sync_cache(MemoryContext decoding_context); static RelationSyncEntry *get_rel_sync_entry(PGOutputData *data, Oid relid); static void rel_sync_cache_relation_cb(Datum arg, Oid relid); static void rel_sync_cache_publication_cb(Datum arg, int cacheid, - uint32 hashvalue); + uint32 hashvalue); /* * Specify output plugin callbacks @@ -130,9 +130,9 @@ parse_output_parameters(List *options, uint32 *protocol_version, if (!SplitIdentifierString(strVal(defel->arg), ',', publication_names)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_NAME), - errmsg("invalid publication_names syntax"))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_NAME), + errmsg("invalid publication_names syntax"))); } else elog(ERROR, "unrecognized pgoutput option: %s", defel->defname); @@ -143,14 +143,14 @@ parse_output_parameters(List *options, uint32 *protocol_version, * Initialize this plugin */ static void -pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt, - bool is_init) +pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, + bool is_init) { - PGOutputData *data = palloc0(sizeof(PGOutputData)); + PGOutputData *data = palloc0(sizeof(PGOutputData)); /* Create our memory context for private allocations. */ data->context = AllocSetContextCreate(ctx->context, - "logical replication output context", + "logical replication output context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); @@ -175,15 +175,15 @@ pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt, /* Check if we support requested protocol */ if (data->protocol_version != LOGICALREP_PROTO_VERSION_NUM) ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("client sent proto_version=%d but we only support protocol %d or lower", + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("client sent proto_version=%d but we only support protocol %d or lower", data->protocol_version, LOGICALREP_PROTO_VERSION_NUM))); if (data->protocol_version < LOGICALREP_PROTO_MIN_VERSION_NUM) ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("client sent proto_version=%d but we only support protocol %d or higher", - data->protocol_version, LOGICALREP_PROTO_MIN_VERSION_NUM))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("client sent proto_version=%d but we only support protocol %d or higher", + data->protocol_version, LOGICALREP_PROTO_MIN_VERSION_NUM))); if (list_length(data->publication_names) < 1) ereport(ERROR, @@ -208,14 +208,14 @@ pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt, static void pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { - bool send_replication_origin = txn->origin_id != InvalidRepOriginId; + bool send_replication_origin = txn->origin_id != InvalidRepOriginId; OutputPluginPrepareWrite(ctx, !send_replication_origin); logicalrep_write_begin(ctx->out, txn); if (send_replication_origin) { - char *origin; + char *origin; /* Message boundary */ OutputPluginWrite(ctx, false); @@ -225,10 +225,10 @@ pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) * XXX: which behaviour do we want here? * * Alternatives: - * - don't send origin message if origin name not found - * (that's what we do now) - * - throw error - that will break replication, not good - * - send some special "unknown" origin + * - don't send origin message if origin name not found + * (that's what we do now) + * - throw error - that will break replication, not good + * - send some special "unknown" origin *---------- */ if (replorigin_by_oid(txn->origin_id, true, &origin)) @@ -243,7 +243,7 @@ pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) */ static void pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, - XLogRecPtr commit_lsn) + XLogRecPtr commit_lsn) { OutputPluginUpdateProgress(ctx); @@ -259,9 +259,9 @@ static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation relation, ReorderBufferChange *change) { - PGOutputData *data = (PGOutputData *) ctx->output_plugin_private; - MemoryContext old; - RelationSyncEntry *relentry; + PGOutputData *data = (PGOutputData *) ctx->output_plugin_private; + MemoryContext old; + RelationSyncEntry *relentry; relentry = get_rel_sync_entry(data, RelationGetRelid(relation)); @@ -333,8 +333,8 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, break; case REORDER_BUFFER_CHANGE_UPDATE: { - HeapTuple oldtuple = change->data.tp.oldtuple ? - &change->data.tp.oldtuple->tuple : NULL; + HeapTuple oldtuple = change->data.tp.oldtuple ? + &change->data.tp.oldtuple->tuple : NULL; OutputPluginPrepareWrite(ctx, true); logicalrep_write_update(ctx->out, relation, oldtuple, @@ -367,7 +367,7 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, */ static bool pgoutput_origin_filter(LogicalDecodingContext *ctx, - RepOriginId origin_id) + RepOriginId origin_id) { return false; } @@ -379,7 +379,7 @@ pgoutput_origin_filter(LogicalDecodingContext *ctx, * of the ctx->context so it will be cleaned up by logical decoding machinery. */ static void -pgoutput_shutdown(LogicalDecodingContext * ctx) +pgoutput_shutdown(LogicalDecodingContext *ctx) { if (RelationSyncCache) { @@ -397,10 +397,10 @@ LoadPublications(List *pubnames) List *result = NIL; ListCell *lc; - foreach (lc, pubnames) + foreach(lc, pubnames) { - char *pubname = (char *) lfirst(lc); - Publication *pub = GetPublicationByName(pubname, false); + char *pubname = (char *) lfirst(lc); + Publication *pub = GetPublicationByName(pubname, false); result = lappend(result, pub); } @@ -417,9 +417,8 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue) publications_valid = false; /* - * Also invalidate per-relation cache so that next time the filtering - * info is checked it will be updated with the new publication - * settings. + * Also invalidate per-relation cache so that next time the filtering info + * is checked it will be updated with the new publication settings. */ rel_sync_cache_publication_cb(arg, cacheid, hashvalue); } @@ -434,7 +433,7 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue) static void init_rel_sync_cache(MemoryContext cachectx) { - HASHCTL ctl; + HASHCTL ctl; MemoryContext old_ctxt; if (RelationSyncCache != NULL) @@ -466,9 +465,9 @@ init_rel_sync_cache(MemoryContext cachectx) static RelationSyncEntry * get_rel_sync_entry(PGOutputData *data, Oid relid) { - RelationSyncEntry *entry; - bool found; - MemoryContext oldctx; + RelationSyncEntry *entry; + bool found; + MemoryContext oldctx; Assert(RelationSyncCache != NULL); @@ -499,9 +498,9 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) } /* - * Build publication cache. We can't use one provided by relcache - * as relcache considers all publications given relation is in, but - * here we only need to consider ones that the subscriber requested. + * Build publication cache. We can't use one provided by relcache as + * relcache considers all publications given relation is in, but here + * we only need to consider ones that the subscriber requested. */ entry->pubactions.pubinsert = entry->pubactions.pubupdate = entry->pubactions.pubdelete = false; @@ -539,7 +538,7 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) static void rel_sync_cache_relation_cb(Datum arg, Oid relid) { - RelationSyncEntry *entry; + RelationSyncEntry *entry; /* * We can get here if the plugin was used in SQL interface as the @@ -558,15 +557,14 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid) * safe point. * * Getting invalidations for relations that aren't in the table is - * entirely normal, since there's no way to unregister for an - * invalidation event. So we don't care if it's found or not. + * entirely normal, since there's no way to unregister for an invalidation + * event. So we don't care if it's found or not. */ entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid, HASH_FIND, NULL); /* - * Reset schema sent status as the relation definition may have - * changed. + * Reset schema sent status as the relation definition may have changed. */ if (entry != NULL) entry->schema_sent = false; @@ -578,8 +576,8 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid) static void rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue) { - HASH_SEQ_STATUS status; - RelationSyncEntry *entry; + HASH_SEQ_STATUS status; + RelationSyncEntry *entry; /* * We can get here if the plugin was used in SQL interface as the @@ -590,8 +588,8 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue) return; /* - * There is no way to find which entry in our cache the hash belongs to - * so mark the whole cache as invalid. + * There is no way to find which entry in our cache the hash belongs to so + * mark the whole cache as invalid. */ hash_seq_init(&status, RelationSyncCache); while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL) diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 5f63d0484a..5386e86aa6 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -502,8 +502,8 @@ ReplicationSlotDropPtr(ReplicationSlot *slot) /* * Rename the slot directory on disk, so that we'll no longer recognize * this as a valid slot. Note that if this fails, we've got to mark the - * slot inactive before bailing out. If we're dropping an ephemeral or - * a temporary slot, we better never fail hard as the caller won't expect + * slot inactive before bailing out. If we're dropping an ephemeral or a + * temporary slot, we better never fail hard as the caller won't expect * the slot to survive and this might get called during error handling. */ if (rename(path, tmppath) == 0) @@ -839,8 +839,8 @@ restart: for (i = 0; i < max_replication_slots; i++) { ReplicationSlot *s; - char *slotname; - int active_pid; + char *slotname; + int active_pid; s = &ReplicationSlotCtl->replication_slots[i]; diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c index 56a9ca9651..bbd26f3d6a 100644 --- a/src/backend/replication/slotfuncs.c +++ b/src/backend/replication/slotfuncs.c @@ -119,11 +119,11 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS) /* * Acquire a logical decoding slot, this will check for conflicting names. - * Initially create persistent slot as ephemeral - that allows us to nicely - * handle errors during initialization because it'll get dropped if this - * transaction fails. We'll make it persistent at the end. - * Temporary slots can be created as temporary from beginning as they get - * dropped on error as well. + * Initially create persistent slot as ephemeral - that allows us to + * nicely handle errors during initialization because it'll get dropped if + * this transaction fails. We'll make it persistent at the end. Temporary + * slots can be created as temporary from beginning as they get dropped on + * error as well. */ ReplicationSlotCreate(NameStr(*name), true, temporary ? RS_TEMPORARY : RS_EPHEMERAL); @@ -132,7 +132,7 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS) * Create logical decoding context, to build the initial snapshot. */ ctx = CreateInitDecodingContext(NameStr(*plugin), NIL, - false, /* do not build snapshot */ + false, /* do not build snapshot */ logical_read_local_xlog_page, NULL, NULL, NULL); @@ -227,7 +227,7 @@ pg_get_replication_slots(PG_FUNCTION_ARGS) Datum values[PG_GET_REPLICATION_SLOTS_COLS]; bool nulls[PG_GET_REPLICATION_SLOTS_COLS]; - ReplicationSlotPersistency persistency; + ReplicationSlotPersistency persistency; TransactionId xmin; TransactionId catalog_xmin; XLogRecPtr restart_lsn; diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index 554f783209..ad213fc454 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -102,17 +102,17 @@ static void SyncRepCancelWait(void); static int SyncRepWakeQueue(bool all, int mode); static bool SyncRepGetSyncRecPtr(XLogRecPtr *writePtr, - XLogRecPtr *flushPtr, - XLogRecPtr *applyPtr, - bool *am_sync); + XLogRecPtr *flushPtr, + XLogRecPtr *applyPtr, + bool *am_sync); static void SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, - XLogRecPtr *flushPtr, - XLogRecPtr *applyPtr, - List *sync_standbys); + XLogRecPtr *flushPtr, + XLogRecPtr *applyPtr, + List *sync_standbys); static void SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, - XLogRecPtr *flushPtr, - XLogRecPtr *applyPtr, - List *sync_standbys, uint8 nth); + XLogRecPtr *flushPtr, + XLogRecPtr *applyPtr, + List *sync_standbys, uint8 nth); static int SyncRepGetStandbyPriority(void); static List *SyncRepGetSyncStandbysPriority(bool *am_sync); static List *SyncRepGetSyncStandbysQuorum(bool *am_sync); @@ -455,7 +455,7 @@ SyncRepReleaseWaiters(void) if (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY) ereport(LOG, (errmsg("standby \"%s\" is now a synchronous standby with priority %u", - application_name, MyWalSnd->sync_standby_priority))); + application_name, MyWalSnd->sync_standby_priority))); else ereport(LOG, (errmsg("standby \"%s\" is now a candidate for quorum synchronous standby", @@ -513,7 +513,7 @@ SyncRepReleaseWaiters(void) */ static bool SyncRepGetSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, - XLogRecPtr *applyPtr, bool *am_sync) + XLogRecPtr *applyPtr, bool *am_sync) { List *sync_standbys; @@ -542,9 +542,9 @@ SyncRepGetSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, * oldest ones among sync standbys. In a quorum-based, they are the Nth * latest ones. * - * SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest positions. - * But we use SyncRepGetOldestSyncRecPtr() for that calculation because - * it's a bit more efficient. + * SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest + * positions. But we use SyncRepGetOldestSyncRecPtr() for that calculation + * because it's a bit more efficient. * * XXX If the numbers of current and requested sync standbys are the same, * we can use SyncRepGetOldestSyncRecPtr() to calculate the synced @@ -572,15 +572,15 @@ static void SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, XLogRecPtr *applyPtr, List *sync_standbys) { - ListCell *cell; + ListCell *cell; /* - * Scan through all sync standbys and calculate the oldest - * Write, Flush and Apply positions. + * Scan through all sync standbys and calculate the oldest Write, Flush + * and Apply positions. */ - foreach (cell, sync_standbys) + foreach(cell, sync_standbys) { - WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)]; + WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)]; XLogRecPtr write; XLogRecPtr flush; XLogRecPtr apply; @@ -606,23 +606,23 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, */ static void SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, - XLogRecPtr *applyPtr, List *sync_standbys, uint8 nth) + XLogRecPtr *applyPtr, List *sync_standbys, uint8 nth) { - ListCell *cell; - XLogRecPtr *write_array; - XLogRecPtr *flush_array; - XLogRecPtr *apply_array; - int len; - int i = 0; + ListCell *cell; + XLogRecPtr *write_array; + XLogRecPtr *flush_array; + XLogRecPtr *apply_array; + int len; + int i = 0; len = list_length(sync_standbys); write_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len); flush_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len); apply_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len); - foreach (cell, sync_standbys) + foreach(cell, sync_standbys) { - WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)]; + WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)]; SpinLockAcquire(&walsnd->mutex); write_array[i] = walsnd->write; @@ -654,8 +654,8 @@ SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, static int cmp_lsn(const void *a, const void *b) { - XLogRecPtr lsn1 = *((const XLogRecPtr *) a); - XLogRecPtr lsn2 = *((const XLogRecPtr *) b); + XLogRecPtr lsn1 = *((const XLogRecPtr *) a); + XLogRecPtr lsn2 = *((const XLogRecPtr *) b); if (lsn1 > lsn2) return -1; @@ -674,7 +674,7 @@ cmp_lsn(const void *a, const void *b) * sync standby. Otherwise it's set to false. */ List * -SyncRepGetSyncStandbys(bool *am_sync) +SyncRepGetSyncStandbys(bool *am_sync) { /* Set default result */ if (am_sync != NULL) @@ -702,8 +702,8 @@ SyncRepGetSyncStandbys(bool *am_sync) static List * SyncRepGetSyncStandbysQuorum(bool *am_sync) { - List *result = NIL; - int i; + List *result = NIL; + int i; volatile WalSnd *walsnd; /* Use volatile pointer to prevent code * rearrangement */ @@ -730,8 +730,8 @@ SyncRepGetSyncStandbysQuorum(bool *am_sync) continue; /* - * Consider this standby as a candidate for quorum sync standbys - * and append it to the result. + * Consider this standby as a candidate for quorum sync standbys and + * append it to the result. */ result = lappend_int(result, i); if (am_sync != NULL && walsnd == MyWalSnd) @@ -955,8 +955,8 @@ SyncRepGetStandbyPriority(void) return 0; /* - * In quorum-based sync replication, all the standbys in the list - * have the same priority, one. + * In quorum-based sync replication, all the standbys in the list have the + * same priority, one. */ return (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY) ? priority : 1; } diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c index 028170c952..2723612718 100644 --- a/src/backend/replication/walreceiver.c +++ b/src/backend/replication/walreceiver.c @@ -1176,9 +1176,12 @@ XLogWalRcvSendHSFeedback(bool immed) { TimestampTz now; TransactionId nextXid; - uint32 xmin_epoch, catalog_xmin_epoch; - TransactionId xmin, catalog_xmin; + uint32 xmin_epoch, + catalog_xmin_epoch; + TransactionId xmin, + catalog_xmin; static TimestampTz sendTime = 0; + /* initially true so we always send at least one feedback message */ static bool master_has_standby_xmin = true; @@ -1211,8 +1214,8 @@ XLogWalRcvSendHSFeedback(bool immed) * * Bailing out here also ensures that we don't send feedback until we've * read our own replication slot state, so we don't tell the master to - * discard needed xmin or catalog_xmin from any slots that may exist - * on this replica. + * discard needed xmin or catalog_xmin from any slots that may exist on + * this replica. */ if (!HotStandbyActive()) return; @@ -1232,7 +1235,7 @@ XLogWalRcvSendHSFeedback(bool immed) * excludes the catalog_xmin. */ xmin = GetOldestXmin(NULL, - PROCARRAY_FLAGS_DEFAULT|PROCARRAY_SLOTS_XMIN); + PROCARRAY_FLAGS_DEFAULT | PROCARRAY_SLOTS_XMIN); ProcArrayGetReplicationSlotXmin(&slot_xmin, &catalog_xmin); @@ -1253,9 +1256,9 @@ XLogWalRcvSendHSFeedback(bool immed) GetNextXidAndEpoch(&nextXid, &xmin_epoch); catalog_xmin_epoch = xmin_epoch; if (nextXid < xmin) - xmin_epoch --; + xmin_epoch--; if (nextXid < catalog_xmin) - catalog_xmin_epoch --; + catalog_xmin_epoch--; elog(DEBUG2, "sending hot standby feedback xmin %u epoch %u catalog_xmin %u catalog_xmin_epoch %u", xmin, xmin_epoch, catalog_xmin, catalog_xmin_epoch); diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index a899841d83..49cce38880 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -197,7 +197,7 @@ static XLogRecPtr logical_startptr = InvalidXLogRecPtr; /* A sample associating a WAL location with the time it was written. */ typedef struct { - XLogRecPtr lsn; + XLogRecPtr lsn; TimestampTz time; } WalTimeSample; @@ -207,12 +207,12 @@ typedef struct /* A mechanism for tracking replication lag. */ static struct { - XLogRecPtr last_lsn; + XLogRecPtr last_lsn; WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]; - int write_head; - int read_heads[NUM_SYNC_REP_WAIT_MODE]; + int write_head; + int read_heads[NUM_SYNC_REP_WAIT_MODE]; WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE]; -} LagTracker; +} LagTracker; /* Signal handlers */ static void WalSndSigHupHandler(SIGNAL_ARGS); @@ -530,7 +530,7 @@ StartReplication(StartReplicationCmd *cmd) if (ThisTimeLineID == 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("IDENTIFY_SYSTEM has not been run before START_REPLICATION"))); + errmsg("IDENTIFY_SYSTEM has not been run before START_REPLICATION"))); /* * We assume here that we're logging enough information in the WAL for @@ -580,8 +580,8 @@ StartReplication(StartReplicationCmd *cmd) sendTimeLineIsHistoric = true; /* - * Check that the timeline the client requested exists, and - * the requested start location is on that timeline. + * Check that the timeline the client requested exists, and the + * requested start location is on that timeline. */ timeLineHistory = readTimeLineHistory(ThisTimeLineID); switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory, @@ -599,8 +599,8 @@ StartReplication(StartReplicationCmd *cmd) * request to start replication from the beginning of the WAL * segment that contains switchpoint, but on the new timeline, so * that it doesn't end up with a partial segment. If you ask for - * too old a starting point, you'll get an error later when we fail - * to find the requested WAL segment in pg_wal. + * too old a starting point, you'll get an error later when we + * fail to find the requested WAL segment in pg_wal. * * XXX: we could be more strict here and only allow a startpoint * that's older than the switchpoint, if it's still in the same @@ -717,9 +717,9 @@ StartReplication(StartReplicationCmd *cmd) MemSet(nulls, false, sizeof(nulls)); /* - * Need a tuple descriptor representing two columns. - * int8 may seem like a surprising data type for this, but in theory - * int4 would not be wide enough for this, as TimeLineID is unsigned. + * Need a tuple descriptor representing two columns. int8 may seem + * like a surprising data type for this, but in theory int4 would not + * be wide enough for this, as TimeLineID is unsigned. */ tupdesc = CreateTemplateTupleDesc(2, false); TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "next_tli", @@ -795,7 +795,7 @@ parseCreateReplSlotOptions(CreateReplicationSlotCmd *cmd, bool reserve_wal_given = false; /* Parse options */ - foreach (lc, cmd->options) + foreach(lc, cmd->options) { DefElem *defel = (DefElem *) lfirst(lc); @@ -883,7 +883,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd) if (cmd->kind == REPLICATION_KIND_LOGICAL) { LogicalDecodingContext *ctx; - bool need_full_snapshot = false; + bool need_full_snapshot = false; /* * Do options check early so that we can bail before calling the @@ -1255,10 +1255,10 @@ WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId TimestampTz now = GetCurrentTimestamp(); /* - * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS - * to avoid flooding the lag tracker when we commit frequently. + * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS to + * avoid flooding the lag tracker when we commit frequently. */ -#define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS 1000 +#define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS 1000 if (!TimestampDifferenceExceeds(sendTime, now, WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS)) return; @@ -1474,8 +1474,8 @@ exec_replication_command(const char *cmd_string) SnapBuildClearExportedSnapshot(); /* - * For aborted transactions, don't allow anything except pure SQL, - * the exec_simple_query() will handle it correctly. + * For aborted transactions, don't allow anything except pure SQL, the + * exec_simple_query() will handle it correctly. */ if (IsAbortedTransactionBlockState() && !IsA(cmd_node, SQLCmd)) ereport(ERROR, @@ -1744,7 +1744,7 @@ ProcessStandbyReplyMessage(void) bool clearLagTimes; TimestampTz now; - static bool fullyAppliedLastTime = false; + static bool fullyAppliedLastTime = false; /* the caller already consumed the msgtype byte */ writePtr = pq_getmsgint64(&reply_message); @@ -1892,7 +1892,7 @@ TransactionIdInRecentPast(TransactionId xid, uint32 epoch) } if (!TransactionIdPrecedesOrEquals(xid, nextXid)) - return false; /* epoch OK, but it's wrapped around */ + return false; /* epoch OK, but it's wrapped around */ return true; } @@ -1974,8 +1974,8 @@ ProcessStandbyHSFeedbackMessage(void) * * If we're using a replication slot we reserve the xmin via that, * otherwise via the walsender's PGXACT entry. We can only track the - * catalog xmin separately when using a slot, so we store the least - * of the two provided when not using a slot. + * catalog xmin separately when using a slot, so we store the least of the + * two provided when not using a slot. * * XXX: It might make sense to generalize the ephemeral slot concept and * always use the slot mechanism to handle the feedback xmin. @@ -2155,8 +2155,8 @@ WalSndLoop(WalSndSendDataCallback send_data) } /* - * At the reception of SIGUSR2, switch the WAL sender to the stopping - * state. + * At the reception of SIGUSR2, switch the WAL sender to the + * stopping state. */ if (got_SIGUSR2) WalSndSetState(WALSNDSTATE_STOPPING); @@ -2588,18 +2588,18 @@ XLogSendPhysical(void) * it seems good enough to capture the time here. We should reach this * after XLogFlush() runs WalSndWakeupProcessRequests(), and although that * may take some time, we read the WAL flush pointer and take the time - * very close to together here so that we'll get a later position if it - * is still moving. + * very close to together here so that we'll get a later position if it is + * still moving. * * Because LagTrackerWriter ignores samples when the LSN hasn't advanced, * this gives us a cheap approximation for the WAL flush time for this * LSN. * * Note that the LSN is not necessarily the LSN for the data contained in - * the present message; it's the end of the WAL, which might be - * further ahead. All the lag tracking machinery cares about is finding - * out when that arbitrary LSN is eventually reported as written, flushed - * and applied, so that it can measure the elapsed time. + * the present message; it's the end of the WAL, which might be further + * ahead. All the lag tracking machinery cares about is finding out when + * that arbitrary LSN is eventually reported as written, flushed and + * applied, so that it can measure the elapsed time. */ LagTrackerWrite(SendRqstPtr, GetCurrentTimestamp()); @@ -2758,8 +2758,8 @@ XLogSendLogical(void) if (record != NULL) { /* - * Note the lack of any call to LagTrackerWrite() which is handled - * by WalSndUpdateProgress which is called by output plugin through + * Note the lack of any call to LagTrackerWrite() which is handled by + * WalSndUpdateProgress which is called by output plugin through * logical decoding write api. */ LogicalDecodingProcessRecord(logical_decoding_ctx, logical_decoding_ctx->reader); @@ -2805,9 +2805,8 @@ WalSndDone(WalSndSendDataCallback send_data) /* * To figure out whether all WAL has successfully been replicated, check - * flush location if valid, write otherwise. Tools like pg_receivewal - * will usually (unless in synchronous mode) return an invalid flush - * location. + * flush location if valid, write otherwise. Tools like pg_receivewal will + * usually (unless in synchronous mode) return an invalid flush location. */ replicatedPtr = XLogRecPtrIsInvalid(MyWalSnd->flush) ? MyWalSnd->write : MyWalSnd->flush; @@ -3077,7 +3076,7 @@ WalSndWaitStopping(void) if (all_stopped) return; - pg_usleep(10000L); /* wait for 10 msec */ + pg_usleep(10000L); /* wait for 10 msec */ } } @@ -3123,7 +3122,7 @@ WalSndGetStateString(WalSndState state) static Interval * offset_to_interval(TimeOffset offset) { - Interval *result = palloc(sizeof(Interval)); + Interval *result = palloc(sizeof(Interval)); result->month = 0; result->day = 0; @@ -3360,9 +3359,9 @@ WalSndKeepaliveIfNecessary(TimestampTz now) static void LagTrackerWrite(XLogRecPtr lsn, TimestampTz local_flush_time) { - bool buffer_full; - int new_write_head; - int i; + bool buffer_full; + int new_write_head; + int i; if (!am_walsender) return; @@ -3448,16 +3447,16 @@ LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now) /* * We didn't cross a time. If there is a future sample that we * haven't reached yet, and we've already reached at least one sample, - * let's interpolate the local flushed time. This is mainly useful for - * reporting a completely stuck apply position as having increasing - * lag, since otherwise we'd have to wait for it to eventually start - * moving again and cross one of our samples before we can show the - * lag increasing. + * let's interpolate the local flushed time. This is mainly useful + * for reporting a completely stuck apply position as having + * increasing lag, since otherwise we'd have to wait for it to + * eventually start moving again and cross one of our samples before + * we can show the lag increasing. */ if (LagTracker.read_heads[head] != LagTracker.write_head && LagTracker.last_read[head].time != 0) { - double fraction; + double fraction; WalTimeSample prev = LagTracker.last_read[head]; WalTimeSample next = LagTracker.buffer[LagTracker.read_heads[head]]; diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c index eab3f6062d..fd3768de17 100644 --- a/src/backend/rewrite/rewriteDefine.c +++ b/src/backend/rewrite/rewriteDefine.c @@ -425,8 +425,8 @@ DefineQueryRewrite(char *rulename, if (event_relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("could not convert partitioned table \"%s\" to a view", - RelationGetRelationName(event_relation)))); + errmsg("could not convert partitioned table \"%s\" to a view", + RelationGetRelationName(event_relation)))); snapshot = RegisterSnapshot(GetLatestSnapshot()); scanDesc = heap_beginscan(event_relation, snapshot, 0, NULL); diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index 4dcb7138e7..35ff8bb3b7 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -792,7 +792,7 @@ rewriteTargetListIU(List *targetList, for (attrno = 1; attrno <= numattrs; attrno++) { TargetEntry *new_tle = new_tles[attrno - 1]; - bool apply_default; + bool apply_default; att_tup = target_relation->rd_att->attrs[attrno - 1]; @@ -806,7 +806,7 @@ rewriteTargetListIU(List *targetList, * tlist entry is a DEFAULT placeholder node. */ apply_default = ((new_tle == NULL && commandType == CMD_INSERT) || - (new_tle && new_tle->expr && IsA(new_tle->expr, SetToDefault))); + (new_tle && new_tle->expr && IsA(new_tle->expr, SetToDefault))); if (commandType == CMD_INSERT) { @@ -818,7 +818,7 @@ rewriteTargetListIU(List *targetList, errmsg("cannot insert into column \"%s\"", NameStr(att_tup->attname)), errdetail("Column \"%s\" is an identity column defined as GENERATED ALWAYS.", NameStr(att_tup->attname)), - errhint("Use OVERRIDING SYSTEM VALUE to override."))); + errhint("Use OVERRIDING SYSTEM VALUE to override."))); } if (att_tup->attidentity == ATTRIBUTE_IDENTITY_BY_DEFAULT && override == OVERRIDING_USER_VALUE) @@ -3275,7 +3275,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events) /* Process the main targetlist ... */ parsetree->targetList = rewriteTargetListIU(parsetree->targetList, parsetree->commandType, - parsetree->override, + parsetree->override, rt_entry_relation, parsetree->resultRelation, &attrnos); diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c index 0e71f058ad..793b2da766 100644 --- a/src/backend/statistics/dependencies.c +++ b/src/backend/statistics/dependencies.c @@ -44,7 +44,7 @@ typedef struct DependencyGeneratorData int current; /* next dependency to return (index) */ AttrNumber ndependencies; /* number of dependencies generated */ AttrNumber *dependencies; /* array of pre-generated dependencies */ -} DependencyGeneratorData; +} DependencyGeneratorData; typedef DependencyGeneratorData *DependencyGenerator; @@ -61,7 +61,7 @@ static bool dependency_is_fully_matched(MVDependency *dependency, static bool dependency_implies_attribute(MVDependency *dependency, AttrNumber attnum); static bool dependency_is_compatible_clause(Node *clause, Index relid, - AttrNumber *attnum); + AttrNumber *attnum); static MVDependency *find_strongest_dependency(StatisticExtInfo *stats, MVDependencies *dependencies, Bitmapset *attnums); @@ -409,7 +409,7 @@ statext_dependencies_build(int numrows, HeapTuple *rows, Bitmapset *attrs, continue; d = (MVDependency *) palloc0(offsetof(MVDependency, attributes) - + k * sizeof(AttrNumber)); + +k * sizeof(AttrNumber)); /* copy the dependency (and keep the indexes into stxkeys) */ d->degree = degree; @@ -431,7 +431,7 @@ statext_dependencies_build(int numrows, HeapTuple *rows, Bitmapset *attrs, dependencies->ndeps++; dependencies = (MVDependencies *) repalloc(dependencies, offsetof(MVDependencies, deps) - + dependencies->ndeps * sizeof(MVDependency)); + +dependencies->ndeps * sizeof(MVDependency)); dependencies->deps[dependencies->ndeps - 1] = d; } @@ -451,7 +451,7 @@ statext_dependencies_build(int numrows, HeapTuple *rows, Bitmapset *attrs, * Serialize list of dependencies into a bytea value. */ bytea * -statext_dependencies_serialize(MVDependencies * dependencies) +statext_dependencies_serialize(MVDependencies *dependencies) { int i; bytea *output; @@ -552,7 +552,7 @@ statext_dependencies_deserialize(bytea *data) /* allocate space for the MCV items */ dependencies = repalloc(dependencies, offsetof(MVDependencies, deps) - + (dependencies->ndeps * sizeof(MVDependency *))); + +(dependencies->ndeps * sizeof(MVDependency *))); for (i = 0; i < dependencies->ndeps; i++) { @@ -573,7 +573,7 @@ statext_dependencies_deserialize(bytea *data) /* now that we know the number of attributes, allocate the dependency */ d = (MVDependency *) palloc0(offsetof(MVDependency, attributes) - + (k * sizeof(AttrNumber))); + +(k * sizeof(AttrNumber))); d->degree = degree; d->nattributes = k; @@ -600,7 +600,7 @@ statext_dependencies_deserialize(bytea *data) * attributes (assuming the clauses are suitable equality clauses) */ static bool -dependency_is_fully_matched(MVDependency * dependency, Bitmapset *attnums) +dependency_is_fully_matched(MVDependency *dependency, Bitmapset *attnums) { int j; @@ -840,7 +840,7 @@ dependency_is_compatible_clause(Node *clause, Index relid, AttrNumber *attnum) * (see the comment in dependencies_clauselist_selectivity). */ static MVDependency * -find_strongest_dependency(StatisticExtInfo * stats, MVDependencies * dependencies, +find_strongest_dependency(StatisticExtInfo *stats, MVDependencies *dependencies, Bitmapset *attnums) { int i; diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c index 3f74cee05f..8d7460c96b 100644 --- a/src/backend/statistics/extended_stats.c +++ b/src/backend/statistics/extended_stats.c @@ -40,11 +40,11 @@ */ typedef struct StatExtEntry { - Oid statOid; /* OID of pg_statistic_ext entry */ - char *schema; /* statistics object's schema */ - char *name; /* statistics object's name */ - Bitmapset *columns; /* attribute numbers covered by the object */ - List *types; /* 'char' list of enabled statistic kinds */ + Oid statOid; /* OID of pg_statistic_ext entry */ + char *schema; /* statistics object's schema */ + char *name; /* statistics object's name */ + Bitmapset *columns; /* attribute numbers covered by the object */ + List *types; /* 'char' list of enabled statistic kinds */ } StatExtEntry; @@ -83,15 +83,15 @@ BuildRelationExtStatistics(Relation onerel, double totalrows, foreach(lc, stats) { - StatExtEntry *stat = (StatExtEntry *) lfirst(lc); - MVNDistinct *ndistinct = NULL; + StatExtEntry *stat = (StatExtEntry *) lfirst(lc); + MVNDistinct *ndistinct = NULL; MVDependencies *dependencies = NULL; - VacAttrStats **stats; - ListCell *lc2; + VacAttrStats **stats; + ListCell *lc2; /* - * Check if we can build these stats based on the column analyzed. - * If not, report this fact (except in autovacuum) and move on. + * Check if we can build these stats based on the column analyzed. If + * not, report this fact (except in autovacuum) and move on. */ stats = lookup_var_attr_stats(onerel, stat->columns, natts, vacattrstats); @@ -114,7 +114,7 @@ BuildRelationExtStatistics(Relation onerel, double totalrows, /* compute statistic of each requested type */ foreach(lc2, stat->types) { - char t = (char) lfirst_int(lc2); + char t = (char) lfirst_int(lc2); if (t == STATS_EXT_NDISTINCT) ndistinct = statext_ndistinct_build(totalrows, numrows, rows, @@ -141,7 +141,7 @@ BuildRelationExtStatistics(Relation onerel, double totalrows, bool statext_is_kind_built(HeapTuple htup, char type) { - AttrNumber attnum; + AttrNumber attnum; switch (type) { @@ -168,8 +168,8 @@ fetch_statentries_for_relation(Relation pg_statext, Oid relid) { SysScanDesc scan; ScanKeyData skey; - HeapTuple htup; - List *result = NIL; + HeapTuple htup; + List *result = NIL; /* * Prepare to scan pg_statistic_ext for entries having stxrelid = this @@ -250,7 +250,7 @@ lookup_var_attr_stats(Relation rel, Bitmapset *attrs, /* lookup VacAttrStats info for the requested columns (same attnum) */ while ((x = bms_next_member(attrs, x)) >= 0) { - int j; + int j; stats[i] = NULL; for (j = 0; j < nvacatts; j++) @@ -273,10 +273,10 @@ lookup_var_attr_stats(Relation rel, Bitmapset *attrs, return NULL; } - /* - * Sanity check that the column is not dropped - stats should have - * been removed in this case. - */ + /* + * Sanity check that the column is not dropped - stats should have + * been removed in this case. + */ Assert(!stats[i]->attr->attisdropped); i++; @@ -367,7 +367,7 @@ multi_sort_init(int ndims) void multi_sort_add_dimension(MultiSortSupport mss, int sortdim, Oid oper) { - SortSupport ssup = &mss->ssup[sortdim]; + SortSupport ssup = &mss->ssup[sortdim]; ssup->ssup_cxt = CurrentMemoryContext; ssup->ssup_collation = DEFAULT_COLLATION_OID; diff --git a/src/backend/statistics/mvdistinct.c b/src/backend/statistics/mvdistinct.c index 47b2490abb..d8d422cd45 100644 --- a/src/backend/statistics/mvdistinct.c +++ b/src/backend/statistics/mvdistinct.c @@ -37,8 +37,8 @@ static double ndistinct_for_combination(double totalrows, int numrows, - HeapTuple *rows, VacAttrStats **stats, - int k, int *combination); + HeapTuple *rows, VacAttrStats **stats, + int k, int *combination); static double estimate_ndistinct(double totalrows, int numrows, int d, int f1); static int n_choose_k(int n, int k); static int num_combinations(int n); @@ -48,11 +48,11 @@ static int num_combinations(int n); /* internal state for generator of k-combinations of n elements */ typedef struct CombinationGenerator { - int k; /* size of the combination */ - int n; /* total number of elements */ - int current; /* index of the next combination to return */ - int ncombinations; /* number of combinations (size of array) */ - int *combinations; /* array of pre-built combinations */ + int k; /* size of the combination */ + int n; /* total number of elements */ + int current; /* index of the next combination to return */ + int ncombinations; /* number of combinations (size of array) */ + int *combinations; /* array of pre-built combinations */ } CombinationGenerator; static CombinationGenerator *generator_init(int n, int k); @@ -87,7 +87,7 @@ statext_ndistinct_build(double totalrows, int numrows, HeapTuple *rows, itemcnt = 0; for (k = 2; k <= numattrs; k++) { - int *combination; + int *combination; CombinationGenerator *generator; /* generate combinations of K out of N elements */ @@ -96,12 +96,12 @@ statext_ndistinct_build(double totalrows, int numrows, HeapTuple *rows, while ((combination = generator_next(generator))) { MVNDistinctItem *item = &result->items[itemcnt]; - int j; + int j; item->attrs = NULL; for (j = 0; j < k; j++) item->attrs = bms_add_member(item->attrs, - stats[combination[j]]->attr->attnum); + stats[combination[j]]->attr->attnum); item->ndistinct = ndistinct_for_combination(totalrows, numrows, rows, stats, k, combination); @@ -166,12 +166,12 @@ statext_ndistinct_serialize(MVNDistinct *ndistinct) * for each item, including number of items for each. */ len = VARHDRSZ + SizeOfMVNDistinct + - ndistinct->nitems * (offsetof(MVNDistinctItem, attrs) + sizeof(int)); + ndistinct->nitems * (offsetof(MVNDistinctItem, attrs) +sizeof(int)); /* and also include space for the actual attribute numbers */ for (i = 0; i < ndistinct->nitems; i++) { - int nmembers; + int nmembers; nmembers = bms_num_members(ndistinct->items[i].attrs); Assert(nmembers >= 2); @@ -198,8 +198,8 @@ statext_ndistinct_serialize(MVNDistinct *ndistinct) for (i = 0; i < ndistinct->nitems; i++) { MVNDistinctItem item = ndistinct->items[i]; - int nmembers = bms_num_members(item.attrs); - int x; + int nmembers = bms_num_members(item.attrs); + int x; memcpy(tmp, &item.ndistinct, sizeof(double)); tmp += sizeof(double); @@ -230,7 +230,7 @@ statext_ndistinct_deserialize(bytea *data) { int i; Size minimum_size; - MVNDistinct ndist; + MVNDistinct ndist; MVNDistinct *ndistinct; char *tmp; @@ -275,12 +275,12 @@ statext_ndistinct_deserialize(bytea *data) if (VARSIZE_ANY_EXHDR(data) < minimum_size) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid MVNDistinct size %zd (expected at least %zd)", - VARSIZE_ANY_EXHDR(data), minimum_size))); + errmsg("invalid MVNDistinct size %zd (expected at least %zd)", + VARSIZE_ANY_EXHDR(data), minimum_size))); /* - * Allocate space for the ndistinct items (no space for each item's attnos: - * those live in bitmapsets allocated separately) + * Allocate space for the ndistinct items (no space for each item's + * attnos: those live in bitmapsets allocated separately) */ ndistinct = palloc0(MAXALIGN(SizeOfMVNDistinct) + (ndist.nitems * sizeof(MVNDistinctItem))); @@ -360,8 +360,8 @@ pg_ndistinct_out(PG_FUNCTION_ARGS) for (i = 0; i < ndist->nitems; i++) { MVNDistinctItem item = ndist->items[i]; - int x = -1; - bool first = true; + int x = -1; + bool first = true; if (i > 0) appendStringInfoString(&str, ", "); @@ -449,16 +449,16 @@ ndistinct_for_combination(double totalrows, int numrows, HeapTuple *rows, } /* - * For each dimension, set up sort-support and fill in the values from - * the sample data. + * For each dimension, set up sort-support and fill in the values from the + * sample data. */ for (i = 0; i < k; i++) { - VacAttrStats *colstat = stats[combination[i]]; + VacAttrStats *colstat = stats[combination[i]]; TypeCacheEntry *type; type = lookup_type_cache(colstat->attrtypid, TYPECACHE_LT_OPR); - if (type->lt_opr == InvalidOid) /* shouldn't happen */ + if (type->lt_opr == InvalidOid) /* shouldn't happen */ elog(ERROR, "cache lookup failed for ordering operator for type %u", colstat->attrtypid); @@ -513,7 +513,7 @@ estimate_ndistinct(double totalrows, int numrows, int d, int f1) denom, ndistinct; - numer = (double) numrows * (double) d; + numer = (double) numrows *(double) d; denom = (double) (numrows - f1) + (double) f1 *(double) numrows / totalrows; @@ -594,7 +594,7 @@ generator_init(int n, int k) state->ncombinations = n_choose_k(n, k); - /* pre-allocate space for all combinations*/ + /* pre-allocate space for all combinations */ state->combinations = (int *) palloc(sizeof(int) * k * state->ncombinations); state->current = 0; @@ -657,7 +657,7 @@ generate_combinations_recurse(CombinationGenerator *state, /* If we haven't filled all the elements, simply recurse. */ if (index < state->k) { - int i; + int i; /* * The values have to be in ascending order, so make sure we start @@ -688,7 +688,7 @@ generate_combinations_recurse(CombinationGenerator *state, static void generate_combinations(CombinationGenerator *state) { - int *current = (int *) palloc0(sizeof(int) * state->k); + int *current = (int *) palloc0(sizeof(int) * state->k); generate_combinations_recurse(state, 0, 0, current); diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c index 81d96a4cc0..2851c5d6a2 100644 --- a/src/backend/storage/file/fd.c +++ b/src/backend/storage/file/fd.c @@ -685,8 +685,8 @@ durable_unlink(const char *fname, int elevel) } /* - * To guarantee that the removal of the file is persistent, fsync - * its parent directory. + * To guarantee that the removal of the file is persistent, fsync its + * parent directory. */ if (fsync_parent_path(fname, elevel) != 0) return -1; diff --git a/src/backend/storage/lmgr/condition_variable.c b/src/backend/storage/lmgr/condition_variable.c index 6f1ef0b7e5..5afb21121b 100644 --- a/src/backend/storage/lmgr/condition_variable.c +++ b/src/backend/storage/lmgr/condition_variable.c @@ -52,7 +52,7 @@ ConditionVariableInit(ConditionVariable *cv) void ConditionVariablePrepareToSleep(ConditionVariable *cv) { - int pgprocno = MyProc->pgprocno; + int pgprocno = MyProc->pgprocno; /* * It's not legal to prepare a sleep until the previous sleep has been @@ -89,10 +89,10 @@ ConditionVariablePrepareToSleep(ConditionVariable *cv) * called in a predicate loop that tests for a specific exit condition and * otherwise sleeps, like so: * - * ConditionVariablePrepareToSleep(cv); [optional] - * while (condition for which we are waiting is not true) - * ConditionVariableSleep(cv, wait_event_info); - * ConditionVariableCancelSleep(); + * ConditionVariablePrepareToSleep(cv); [optional] + * while (condition for which we are waiting is not true) + * ConditionVariableSleep(cv, wait_event_info); + * ConditionVariableCancelSleep(); * * Supply a value from one of the WaitEventXXX enums defined in pgstat.h to * control the contents of pg_stat_activity's wait_event_type and wait_event @@ -101,8 +101,8 @@ ConditionVariablePrepareToSleep(ConditionVariable *cv) void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info) { - WaitEvent event; - bool done = false; + WaitEvent event; + bool done = false; /* * If the caller didn't prepare to sleep explicitly, then do so now and @@ -186,7 +186,7 @@ ConditionVariableCancelSleep(void) bool ConditionVariableSignal(ConditionVariable *cv) { - PGPROC *proc = NULL; + PGPROC *proc = NULL; /* Remove the first process from the wakeup queue (if any). */ SpinLockAcquire(&cv->mutex); @@ -213,13 +213,13 @@ ConditionVariableSignal(ConditionVariable *cv) int ConditionVariableBroadcast(ConditionVariable *cv) { - int nwoken = 0; + int nwoken = 0; /* * Let's just do this the dumbest way possible. We could try to dequeue * all the sleepers at once to save spinlock cycles, but it's a bit hard - * to get that right in the face of possible sleep cancelations, and - * we don't want to loop holding the mutex. + * to get that right in the face of possible sleep cancelations, and we + * don't want to loop holding the mutex. */ while (ConditionVariableSignal(cv)) ++nwoken; diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 3e133941f4..35536e4789 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -497,7 +497,7 @@ RegisterLWLockTranches(void) LWLockTranchesAllocated = 64; LWLockTrancheArray = (char **) MemoryContextAllocZero(TopMemoryContext, - LWLockTranchesAllocated * sizeof(char *)); + LWLockTranchesAllocated * sizeof(char *)); Assert(LWLockTranchesAllocated >= LWTRANCHE_FIRST_USER_DEFINED); } diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c index b0b596d6d9..9bc00b6214 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -1233,7 +1233,7 @@ mdsync(void) INSTR_TIME_SET_CURRENT(sync_start); if (seg != NULL && - FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) >= 0) + FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) >= 0) { /* Success; update statistics about sync timing */ INSTR_TIME_SET_CURRENT(sync_end); diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index 24e5c427c6..1e941fbd60 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -1452,6 +1452,7 @@ ProcessUtilitySlow(ParseState *pstate, break; case T_RefreshMatViewStmt: + /* * REFRESH CONCURRENTLY executes some DDL commands internally. * Inhibit DDL command collection here to avoid those commands @@ -1610,6 +1611,7 @@ ProcessUtilitySlow(ParseState *pstate, case T_AlterPublicationStmt: AlterPublication((AlterPublicationStmt *) parsetree); + /* * AlterPublication calls EventTriggerCollectSimpleCommand * directly diff --git a/src/backend/tsearch/to_tsany.c b/src/backend/tsearch/to_tsany.c index 93c08bcf85..18368d118e 100644 --- a/src/backend/tsearch/to_tsany.c +++ b/src/backend/tsearch/to_tsany.c @@ -27,7 +27,7 @@ typedef struct MorphOpaque typedef struct TSVectorBuildState { - ParsedText *prs; + ParsedText *prs; TSVector result; Oid cfgId; } TSVectorBuildState; @@ -268,10 +268,10 @@ to_tsvector(PG_FUNCTION_ARGS) Datum jsonb_to_tsvector_byid(PG_FUNCTION_ARGS) { - Oid cfgId = PG_GETARG_OID(0); - Jsonb *jb = PG_GETARG_JSONB(1); - TSVectorBuildState state; - ParsedText *prs = (ParsedText *) palloc(sizeof(ParsedText)); + Oid cfgId = PG_GETARG_OID(0); + Jsonb *jb = PG_GETARG_JSONB(1); + TSVectorBuildState state; + ParsedText *prs = (ParsedText *) palloc(sizeof(ParsedText)); prs->words = NULL; state.result = NULL; @@ -284,8 +284,10 @@ jsonb_to_tsvector_byid(PG_FUNCTION_ARGS) if (state.result == NULL) { - /* There weren't any string elements in jsonb, - * so wee need to return an empty vector */ + /* + * There weren't any string elements in jsonb, so wee need to return + * an empty vector + */ if (prs->words != NULL) pfree(prs->words); @@ -301,8 +303,8 @@ jsonb_to_tsvector_byid(PG_FUNCTION_ARGS) Datum jsonb_to_tsvector(PG_FUNCTION_ARGS) { - Jsonb *jb = PG_GETARG_JSONB(0); - Oid cfgId; + Jsonb *jb = PG_GETARG_JSONB(0); + Oid cfgId; cfgId = getTSCurrentConfig(true); PG_RETURN_DATUM(DirectFunctionCall2(jsonb_to_tsvector_byid, @@ -313,10 +315,10 @@ jsonb_to_tsvector(PG_FUNCTION_ARGS) Datum json_to_tsvector_byid(PG_FUNCTION_ARGS) { - Oid cfgId = PG_GETARG_OID(0); - text *json = PG_GETARG_TEXT_P(1); - TSVectorBuildState state; - ParsedText *prs = (ParsedText *) palloc(sizeof(ParsedText)); + Oid cfgId = PG_GETARG_OID(0); + text *json = PG_GETARG_TEXT_P(1); + TSVectorBuildState state; + ParsedText *prs = (ParsedText *) palloc(sizeof(ParsedText)); prs->words = NULL; state.result = NULL; @@ -328,8 +330,10 @@ json_to_tsvector_byid(PG_FUNCTION_ARGS) PG_FREE_IF_COPY(json, 1); if (state.result == NULL) { - /* There weren't any string elements in json, - * so wee need to return an empty vector */ + /* + * There weren't any string elements in json, so wee need to return an + * empty vector + */ if (prs->words != NULL) pfree(prs->words); @@ -345,8 +349,8 @@ json_to_tsvector_byid(PG_FUNCTION_ARGS) Datum json_to_tsvector(PG_FUNCTION_ARGS) { - text *json = PG_GETARG_TEXT_P(0); - Oid cfgId; + text *json = PG_GETARG_TEXT_P(0); + Oid cfgId; cfgId = getTSCurrentConfig(true); PG_RETURN_DATUM(DirectFunctionCall2(json_to_tsvector_byid, @@ -362,7 +366,7 @@ static void add_to_tsvector(void *_state, char *elem_value, int elem_len) { TSVectorBuildState *state = (TSVectorBuildState *) _state; - ParsedText *prs = state->prs; + ParsedText *prs = state->prs; TSVector item_vector; int i; @@ -386,8 +390,8 @@ add_to_tsvector(void *_state, char *elem_value, int elem_len) item_vector = make_tsvector(prs); state->result = (TSVector) DirectFunctionCall2(tsvector_concat, - TSVectorGetDatum(state->result), - PointerGetDatum(item_vector)); + TSVectorGetDatum(state->result), + PointerGetDatum(item_vector)); } else state->result = make_tsvector(prs); diff --git a/src/backend/tsearch/wparser.c b/src/backend/tsearch/wparser.c index 9739558e42..8f4727448f 100644 --- a/src/backend/tsearch/wparser.c +++ b/src/backend/tsearch/wparser.c @@ -38,12 +38,12 @@ typedef struct HeadlineJsonState HeadlineParsedText *prs; TSConfigCacheEntry *cfg; TSParserCacheEntry *prsobj; - TSQuery query; - List *prsoptions; - bool transformed; + TSQuery query; + List *prsoptions; + bool transformed; } HeadlineJsonState; -static text * headline_json_value(void *_state, char *elem_value, int elem_len); +static text *headline_json_value(void *_state, char *elem_value, int elem_len); static void tt_setup_firstcall(FuncCallContext *funcctx, Oid prsid) @@ -382,11 +382,11 @@ ts_headline_opt(PG_FUNCTION_ARGS) Datum ts_headline_jsonb_byid_opt(PG_FUNCTION_ARGS) { - Oid tsconfig = PG_GETARG_OID(0); - Jsonb *jb = PG_GETARG_JSONB(1); - TSQuery query = PG_GETARG_TSQUERY(2); - text *opt = (PG_NARGS() > 3 && PG_GETARG_POINTER(3)) ? PG_GETARG_TEXT_P(3) : NULL; - Jsonb *out; + Oid tsconfig = PG_GETARG_OID(0); + Jsonb *jb = PG_GETARG_JSONB(1); + TSQuery query = PG_GETARG_TSQUERY(2); + text *opt = (PG_NARGS() > 3 && PG_GETARG_POINTER(3)) ? PG_GETARG_TEXT_P(3) : NULL; + Jsonb *out; JsonTransformStringValuesAction action = (JsonTransformStringValuesAction) headline_json_value; HeadlineParsedText prs; HeadlineJsonState *state = palloc0(sizeof(HeadlineJsonState)); @@ -458,11 +458,11 @@ ts_headline_jsonb_opt(PG_FUNCTION_ARGS) Datum ts_headline_json_byid_opt(PG_FUNCTION_ARGS) { - Oid tsconfig = PG_GETARG_OID(0); - text *json = PG_GETARG_TEXT_P(1); - TSQuery query = PG_GETARG_TSQUERY(2); - text *opt = (PG_NARGS() > 3 && PG_GETARG_POINTER(3)) ? PG_GETARG_TEXT_P(3) : NULL; - text *out; + Oid tsconfig = PG_GETARG_OID(0); + text *json = PG_GETARG_TEXT_P(1); + TSQuery query = PG_GETARG_TSQUERY(2); + text *opt = (PG_NARGS() > 3 && PG_GETARG_POINTER(3)) ? PG_GETARG_TEXT_P(3) : NULL; + text *out; JsonTransformStringValuesAction action = (JsonTransformStringValuesAction) headline_json_value; HeadlineParsedText prs; @@ -543,8 +543,8 @@ headline_json_value(void *_state, char *elem_value, int elem_len) HeadlineParsedText *prs = state->prs; TSConfigCacheEntry *cfg = state->cfg; TSParserCacheEntry *prsobj = state->prsobj; - TSQuery query = state->query; - List *prsoptions = state->prsoptions; + TSQuery query = state->query; + List *prsoptions = state->prsoptions; prs->curwords = 0; hlparsetext(cfg->cfgId, prs, query, elem_value, elem_len); diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c index 5afadb65d1..5cb086e50e 100644 --- a/src/backend/utils/adt/cash.c +++ b/src/backend/utils/adt/cash.c @@ -203,7 +203,7 @@ cash_in(PG_FUNCTION_ARGS) /* than the required number of decimal places */ if (isdigit((unsigned char) *s) && (!seen_dot || dec < fpoint)) { - Cash newvalue = (value * 10) - (*s - '0'); + Cash newvalue = (value * 10) - (*s - '0'); if (newvalue / 10 != value) ereport(ERROR, @@ -230,7 +230,7 @@ cash_in(PG_FUNCTION_ARGS) /* round off if there's another digit */ if (isdigit((unsigned char) *s) && *s >= '5') - value--; /* remember we build the value in the negative */ + value--; /* remember we build the value in the negative */ if (value > 0) ereport(ERROR, @@ -241,7 +241,7 @@ cash_in(PG_FUNCTION_ARGS) /* adjust for less than required decimal places */ for (; dec < fpoint; dec++) { - Cash newvalue = value * 10; + Cash newvalue = value * 10; if (newvalue / 10 != value) ereport(ERROR, @@ -279,8 +279,10 @@ cash_in(PG_FUNCTION_ARGS) "money", str))); } - /* If the value is supposed to be positive, flip the sign, but check for - * the most negative number. */ + /* + * If the value is supposed to be positive, flip the sign, but check for + * the most negative number. + */ if (sgn > 0) { result = -value; diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index b0418b18dc..f0725860b4 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -90,8 +90,8 @@ calculate_database_size(Oid dbOid) AclResult aclresult; /* - * User must have connect privilege for target database - * or be a member of pg_read_all_stats + * User must have connect privilege for target database or be a member of + * pg_read_all_stats */ aclresult = pg_database_aclcheck(dbOid, GetUserId(), ACL_CONNECT); if (aclresult != ACLCHECK_OK && @@ -180,8 +180,8 @@ calculate_tablespace_size(Oid tblspcOid) /* * User must be a member of pg_read_all_stats or have CREATE privilege for - * target tablespace, either explicitly granted or implicitly because - * it is default for current database. + * target tablespace, either explicitly granted or implicitly because it + * is default for current database. */ if (tblspcOid != MyDatabaseTableSpace && !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS)) diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 1e21dd5c68..4127bece12 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -1449,10 +1449,10 @@ str_numth(char *dest, char *num, int type) #ifdef USE_ICU -typedef int32_t (*ICU_Convert_Func)(UChar *dest, int32_t destCapacity, - const UChar *src, int32_t srcLength, - const char *locale, - UErrorCode *pErrorCode); +typedef int32_t (*ICU_Convert_Func) (UChar *dest, int32_t destCapacity, + const UChar *src, int32_t srcLength, + const char *locale, + UErrorCode *pErrorCode); static int32_t icu_convert_case(ICU_Convert_Func func, pg_locale_t mylocale, @@ -1461,7 +1461,7 @@ icu_convert_case(ICU_Convert_Func func, pg_locale_t mylocale, UErrorCode status; int32_t len_dest; - len_dest = len_source; /* try first with same length */ + len_dest = len_source; /* try first with same length */ *buff_dest = palloc(len_dest * sizeof(**buff_dest)); status = U_ZERO_ERROR; len_dest = func(*buff_dest, len_dest, buff_source, len_source, @@ -1491,7 +1491,7 @@ u_strToTitle_default_BI(UChar *dest, int32_t destCapacity, NULL, locale, pErrorCode); } -#endif /* USE_ICU */ +#endif /* USE_ICU */ /* * If the system provides the needed functions for wide-character manipulation @@ -1592,7 +1592,10 @@ str_tolower(const char *buff, size_t nbytes, Oid collid) workspace[curr_char] = towlower(workspace[curr_char]); } - /* Make result large enough; case change might change number of bytes */ + /* + * Make result large enough; case change might change number + * of bytes + */ result_size = curr_char * pg_database_encoding_max_length() + 1; result = palloc(result_size); @@ -1607,11 +1610,11 @@ str_tolower(const char *buff, size_t nbytes, Oid collid) result = pnstrdup(buff, nbytes); /* - * Note: we assume that tolower_l() will not be so broken as to need - * an isupper_l() guard test. When using the default collation, we - * apply the traditional Postgres behavior that forces ASCII-style - * treatment of I/i, but in non-default collations you get exactly - * what the collation says. + * Note: we assume that tolower_l() will not be so broken as + * to need an isupper_l() guard test. When using the default + * collation, we apply the traditional Postgres behavior that + * forces ASCII-style treatment of I/i, but in non-default + * collations you get exactly what the collation says. */ for (p = result; *p; p++) { @@ -1672,7 +1675,8 @@ str_toupper(const char *buff, size_t nbytes, Oid collid) #ifdef USE_ICU if (mylocale && mylocale->provider == COLLPROVIDER_ICU) { - int32_t len_uchar, len_conv; + int32_t len_uchar, + len_conv; UChar *buff_uchar; UChar *buff_conv; @@ -1711,7 +1715,10 @@ str_toupper(const char *buff, size_t nbytes, Oid collid) workspace[curr_char] = towupper(workspace[curr_char]); } - /* Make result large enough; case change might change number of bytes */ + /* + * Make result large enough; case change might change number + * of bytes + */ result_size = curr_char * pg_database_encoding_max_length() + 1; result = palloc(result_size); @@ -1726,11 +1733,11 @@ str_toupper(const char *buff, size_t nbytes, Oid collid) result = pnstrdup(buff, nbytes); /* - * Note: we assume that toupper_l() will not be so broken as to need - * an islower_l() guard test. When using the default collation, we - * apply the traditional Postgres behavior that forces ASCII-style - * treatment of I/i, but in non-default collations you get exactly - * what the collation says. + * Note: we assume that toupper_l() will not be so broken as + * to need an islower_l() guard test. When using the default + * collation, we apply the traditional Postgres behavior that + * forces ASCII-style treatment of I/i, but in non-default + * collations you get exactly what the collation says. */ for (p = result; *p; p++) { @@ -1792,7 +1799,8 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) #ifdef USE_ICU if (mylocale && mylocale->provider == COLLPROVIDER_ICU) { - int32_t len_uchar, len_conv; + int32_t len_uchar, + len_conv; UChar *buff_uchar; UChar *buff_conv; @@ -1843,7 +1851,10 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) } } - /* Make result large enough; case change might change number of bytes */ + /* + * Make result large enough; case change might change number + * of bytes + */ result_size = curr_char * pg_database_encoding_max_length() + 1; result = palloc(result_size); @@ -1858,11 +1869,11 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) result = pnstrdup(buff, nbytes); /* - * Note: we assume that toupper_l()/tolower_l() will not be so broken - * as to need guard tests. When using the default collation, we apply - * the traditional Postgres behavior that forces ASCII-style treatment - * of I/i, but in non-default collations you get exactly what the - * collation says. + * Note: we assume that toupper_l()/tolower_l() will not be so + * broken as to need guard tests. When using the default + * collation, we apply the traditional Postgres behavior that + * forces ASCII-style treatment of I/i, but in non-default + * collations you get exactly what the collation says. */ for (p = result; *p; p++) { diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c index 32d6a66688..5b15562ba5 100644 --- a/src/backend/utils/adt/genfile.c +++ b/src/backend/utils/adt/genfile.c @@ -486,7 +486,7 @@ pg_ls_dir_files(FunctionCallInfo fcinfo, char *dir) if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; - TupleDesc tupdesc; + TupleDesc tupdesc; funcctx = SRF_FIRSTCALL_INIT(); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); @@ -523,7 +523,7 @@ pg_ls_dir_files(FunctionCallInfo fcinfo, char *dir) Datum values[3]; bool nulls[3]; char path[MAXPGPATH * 2]; - struct stat attrib; + struct stat attrib; HeapTuple tuple; /* Skip hidden files */ diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index 9fb0e480bf..0c6572d03e 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -1400,7 +1400,7 @@ json_categorize_type(Oid typoid, if (OidIsValid(get_element_type(typoid)) || typoid == ANYARRAYOID || typoid == RECORDARRAYOID) *tcategory = JSONTYPE_ARRAY; - else if (type_is_rowtype(typoid)) /* includes RECORDOID */ + else if (type_is_rowtype(typoid)) /* includes RECORDOID */ *tcategory = JSONTYPE_COMPOSITE; else { diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c index 164f57ef77..952040d5bb 100644 --- a/src/backend/utils/adt/jsonb.c +++ b/src/backend/utils/adt/jsonb.c @@ -647,7 +647,7 @@ jsonb_categorize_type(Oid typoid, if (OidIsValid(get_element_type(typoid)) || typoid == ANYARRAYOID || typoid == RECORDARRAYOID) *tcategory = JSONBTYPE_ARRAY; - else if (type_is_rowtype(typoid)) /* includes RECORDOID */ + else if (type_is_rowtype(typoid)) /* includes RECORDOID */ *tcategory = JSONBTYPE_COMPOSITE; else { diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 3966e43dd5..173584fef6 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -56,20 +56,20 @@ typedef struct OkeysState /* state for iterate_json_string_values function */ typedef struct IterateJsonStringValuesState { - JsonLexContext *lex; - JsonIterateStringValuesAction action; /* an action that will be applied - to each json value */ - void *action_state; /* any necessary context for iteration */ + JsonLexContext *lex; + JsonIterateStringValuesAction action; /* an action that will be + * applied to each json value */ + void *action_state; /* any necessary context for iteration */ } IterateJsonStringValuesState; /* state for transform_json_string_values function */ typedef struct TransformJsonStringValuesState { - JsonLexContext *lex; - StringInfo strval; /* resulting json */ - JsonTransformStringValuesAction action; /* an action that will be applied - to each json value */ - void *action_state; /* any necessary context for transformation */ + JsonLexContext *lex; + StringInfo strval; /* resulting json */ + JsonTransformStringValuesAction action; /* an action that will be + * applied to each json value */ + void *action_state; /* any necessary context for transformation */ } TransformJsonStringValuesState; /* state for json_get* functions */ @@ -154,29 +154,29 @@ typedef struct RecordIOData RecordIOData; /* structure to cache metadata needed for populate_array() */ typedef struct ArrayIOData { - ColumnIOData *element_info; /* metadata cache */ - Oid element_type; /* array element type id */ - int32 element_typmod; /* array element type modifier */ + ColumnIOData *element_info; /* metadata cache */ + Oid element_type; /* array element type id */ + int32 element_typmod; /* array element type modifier */ } ArrayIOData; /* structure to cache metadata needed for populate_composite() */ typedef struct CompositeIOData { /* - * We use pointer to a RecordIOData here because variable-length - * struct RecordIOData can't be used directly in ColumnIOData.io union + * We use pointer to a RecordIOData here because variable-length struct + * RecordIOData can't be used directly in ColumnIOData.io union */ - RecordIOData *record_io; /* metadata cache for populate_record() */ - TupleDesc tupdesc; /* cached tuple descriptor */ + RecordIOData *record_io; /* metadata cache for populate_record() */ + TupleDesc tupdesc; /* cached tuple descriptor */ } CompositeIOData; /* structure to cache metadata needed for populate_domain() */ typedef struct DomainIOData { - ColumnIOData *base_io; /* metadata cache */ - Oid base_typid; /* base type id */ - int32 base_typmod; /* base type modifier */ - void *domain_info; /* opaque cache for domain checks */ + ColumnIOData *base_io; /* metadata cache */ + Oid base_typid; /* base type id */ + int32 base_typmod; /* base type modifier */ + void *domain_info; /* opaque cache for domain checks */ } DomainIOData; /* enumeration type categories */ @@ -193,17 +193,18 @@ typedef enum TypeCat /* structure to cache record metadata needed for populate_record_field() */ struct ColumnIOData { - Oid typid; /* column type id */ - int32 typmod; /* column type modifier */ - TypeCat typcat; /* column type category */ - ScalarIOData scalar_io; /* metadata cache for directi conversion - * through input function */ + Oid typid; /* column type id */ + int32 typmod; /* column type modifier */ + TypeCat typcat; /* column type category */ + ScalarIOData scalar_io; /* metadata cache for directi conversion + * through input function */ union { - ArrayIOData array; - CompositeIOData composite; - DomainIOData domain; - } io; /* metadata cache for various column type categories */ + ArrayIOData array; + CompositeIOData composite; + DomainIOData domain; + } io; /* metadata cache for various column type + * categories */ }; /* structure to cache record metadata needed for populate_record() */ @@ -234,31 +235,32 @@ typedef struct PopulateRecordsetState /* structure to cache metadata needed for populate_record_worker() */ typedef struct PopulateRecordCache { - Oid argtype; /* verified row type of the first argument */ + Oid argtype; /* verified row type of the first argument */ CompositeIOData io; /* metadata cache for populate_composite() */ } PopulateRecordCache; /* common data for populate_array_json() and populate_array_dim_jsonb() */ typedef struct PopulateArrayContext { - ArrayBuildState *astate; /* array build state */ - ArrayIOData *aio; /* metadata cache */ - MemoryContext acxt; /* array build memory context */ - MemoryContext mcxt; /* cache memory context */ - const char *colname; /* for diagnostics only */ - int *dims; /* dimensions */ - int *sizes; /* current dimension counters */ - int ndims; /* number of dimensions */ + ArrayBuildState *astate; /* array build state */ + ArrayIOData *aio; /* metadata cache */ + MemoryContext acxt; /* array build memory context */ + MemoryContext mcxt; /* cache memory context */ + const char *colname; /* for diagnostics only */ + int *dims; /* dimensions */ + int *sizes; /* current dimension counters */ + int ndims; /* number of dimensions */ } PopulateArrayContext; /* state for populate_array_json() */ typedef struct PopulateArrayState { - JsonLexContext *lex; /* json lexer */ + JsonLexContext *lex; /* json lexer */ PopulateArrayContext *ctx; /* context */ - char *element_start; /* start of the current array element */ - char *element_scalar; /* current array element token if it is a scalar */ - JsonTokenType element_type; /* current array element type */ + char *element_start; /* start of the current array element */ + char *element_scalar; /* current array element token if it is a + * scalar */ + JsonTokenType element_type; /* current array element type */ } PopulateArrayState; /* state for json_strip_nulls */ @@ -272,18 +274,18 @@ typedef struct StripnullState /* structure for generalized json/jsonb value passing */ typedef struct JsValue { - bool is_json; /* json/jsonb */ + bool is_json; /* json/jsonb */ union { struct { - char *str; /* json string */ - int len; /* json string length or -1 if null-terminated */ - JsonTokenType type; /* json type */ - } json; /* json value */ + char *str; /* json string */ + int len; /* json string length or -1 if null-terminated */ + JsonTokenType type; /* json type */ + } json; /* json value */ JsonbValue *jsonb; /* jsonb value */ - } val; + } val; } JsValue; typedef struct JsObject @@ -291,9 +293,9 @@ typedef struct JsObject bool is_json; /* json/jsonb */ union { - HTAB *json_hash; + HTAB *json_hash; JsonbContainer *jsonb_cont; - } val; + } val; } JsObject; /* useful macros for testing JsValue properties */ @@ -406,39 +408,39 @@ static void sn_scalar(void *state, char *token, JsonTokenType tokentype); static Datum populate_recordset_worker(FunctionCallInfo fcinfo, const char *funcname, bool have_record_arg); static Datum populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, - bool have_record_arg); + bool have_record_arg); /* helper functions for populate_record[set] */ -static HeapTupleHeader populate_record(TupleDesc tupdesc, RecordIOData **record_info, - HeapTupleHeader template, MemoryContext mcxt, - JsObject *obj); +static HeapTupleHeader populate_record(TupleDesc tupdesc, RecordIOData **record_info, + HeapTupleHeader template, MemoryContext mcxt, + JsObject *obj); static Datum populate_record_field(ColumnIOData *col, Oid typid, int32 typmod, - const char *colname, MemoryContext mcxt, - Datum defaultval, JsValue *jsv, bool *isnull); + const char *colname, MemoryContext mcxt, + Datum defaultval, JsValue *jsv, bool *isnull); static void JsValueToJsObject(JsValue *jsv, JsObject *jso); static Datum populate_composite(CompositeIOData *io, Oid typid, int32 typmod, - const char *colname, MemoryContext mcxt, - HeapTupleHeader defaultval, JsValue *jsv); + const char *colname, MemoryContext mcxt, + HeapTupleHeader defaultval, JsValue *jsv); static Datum populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv); static void prepare_column_cache(ColumnIOData *column, Oid typid, int32 typmod, - MemoryContext mcxt, bool json); + MemoryContext mcxt, bool json); static Datum populate_record_field(ColumnIOData *col, Oid typid, int32 typmod, - const char *colname, MemoryContext mcxt, Datum defaultval, - JsValue *jsv, bool *isnull); -static RecordIOData * allocate_record_info(MemoryContext mcxt, int ncolumns); + const char *colname, MemoryContext mcxt, Datum defaultval, + JsValue *jsv, bool *isnull); +static RecordIOData *allocate_record_info(MemoryContext mcxt, int ncolumns); static bool JsObjectGetField(JsObject *obj, char *field, JsValue *jsv); static void populate_recordset_record(PopulateRecordsetState *state, JsObject *obj); static void populate_array_json(PopulateArrayContext *ctx, char *json, int len); -static void populate_array_dim_jsonb(PopulateArrayContext *ctx, JsonbValue *jbv, - int ndim); +static void populate_array_dim_jsonb(PopulateArrayContext *ctx, JsonbValue *jbv, + int ndim); static void populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim); static void populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims); static void populate_array_check_dimension(PopulateArrayContext *ctx, int ndim); static void populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv); -static Datum populate_array(ArrayIOData *aio, const char *colname, - MemoryContext mcxt, JsValue *jsv); -static Datum populate_domain(DomainIOData *io, Oid typid, const char *colname, - MemoryContext mcxt, JsValue *jsv, bool isnull); +static Datum populate_array(ArrayIOData *aio, const char *colname, + MemoryContext mcxt, JsValue *jsv); +static Datum populate_domain(DomainIOData *io, Oid typid, const char *colname, + MemoryContext mcxt, JsValue *jsv, bool isnull); /* Worker that takes care of common setup for us */ static JsonbValue *findJsonbValueFromContainerLen(JsonbContainer *container, @@ -2319,8 +2321,8 @@ populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim) } else { - StringInfoData indices; - int i; + StringInfoData indices; + int i; initStringInfo(&indices); @@ -2348,7 +2350,7 @@ populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim) static void populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims) { - int i; + int i; Assert(ctx->ndims <= 0); @@ -2360,17 +2362,17 @@ populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims) ctx->sizes = palloc0(sizeof(int) * ndims); for (i = 0; i < ndims; i++) - ctx->dims[i] = -1; /* dimensions are unknown yet */ + ctx->dims[i] = -1; /* dimensions are unknown yet */ } /* check the populated subarray dimension */ static void populate_array_check_dimension(PopulateArrayContext *ctx, int ndim) { - int dim = ctx->sizes[ndim]; /* current dimension counter */ + int dim = ctx->sizes[ndim]; /* current dimension counter */ if (ctx->dims[ndim] == -1) - ctx->dims[ndim] = dim; /* assign dimension if not yet known */ + ctx->dims[ndim] = dim; /* assign dimension if not yet known */ else if (ctx->dims[ndim] != dim) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), @@ -2389,8 +2391,8 @@ populate_array_check_dimension(PopulateArrayContext *ctx, int ndim) static void populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv) { - Datum element; - bool element_isnull; + Datum element; + bool element_isnull; /* populate the array element */ element = populate_record_field(ctx->aio->element_info, @@ -2400,10 +2402,10 @@ populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv) jsv, &element_isnull); accumArrayResult(ctx->astate, element, element_isnull, - ctx->aio->element_type, ctx->acxt); + ctx->aio->element_type, ctx->acxt); Assert(ndim > 0); - ctx->sizes[ndim - 1]++; /* increment current dimension counter */ + ctx->sizes[ndim - 1]++; /* increment current dimension counter */ } /* json object start handler for populate_array_json() */ @@ -2411,7 +2413,7 @@ static void populate_array_object_start(void *_state) { PopulateArrayState *state = (PopulateArrayState *) _state; - int ndim = state->lex->lex_level; + int ndim = state->lex->lex_level; if (state->ctx->ndims <= 0) populate_array_assign_ndims(state->ctx, ndim); @@ -2423,9 +2425,9 @@ populate_array_object_start(void *_state) static void populate_array_array_end(void *_state) { - PopulateArrayState *state = (PopulateArrayState *) _state; - PopulateArrayContext *ctx = state->ctx; - int ndim = state->lex->lex_level; + PopulateArrayState *state = (PopulateArrayState *) _state; + PopulateArrayContext *ctx = state->ctx; + int ndim = state->lex->lex_level; if (ctx->ndims <= 0) populate_array_assign_ndims(ctx, ndim + 1); @@ -2439,7 +2441,7 @@ static void populate_array_element_start(void *_state, bool isnull) { PopulateArrayState *state = (PopulateArrayState *) _state; - int ndim = state->lex->lex_level; + int ndim = state->lex->lex_level; if (state->ctx->ndims <= 0 || ndim == state->ctx->ndims) { @@ -2454,9 +2456,9 @@ populate_array_element_start(void *_state, bool isnull) static void populate_array_element_end(void *_state, bool isnull) { - PopulateArrayState *state = (PopulateArrayState *) _state; - PopulateArrayContext *ctx = state->ctx; - int ndim = state->lex->lex_level; + PopulateArrayState *state = (PopulateArrayState *) _state; + PopulateArrayContext *ctx = state->ctx; + int ndim = state->lex->lex_level; Assert(ctx->ndims > 0); @@ -2476,7 +2478,7 @@ populate_array_element_end(void *_state, bool isnull) else if (state->element_scalar) { jsv.val.json.str = state->element_scalar; - jsv.val.json.len = -1; /* null-terminated */ + jsv.val.json.len = -1; /* null-terminated */ } else { @@ -2493,9 +2495,9 @@ populate_array_element_end(void *_state, bool isnull) static void populate_array_scalar(void *_state, char *token, JsonTokenType tokentype) { - PopulateArrayState *state = (PopulateArrayState *) _state; - PopulateArrayContext *ctx = state->ctx; - int ndim = state->lex->lex_level; + PopulateArrayState *state = (PopulateArrayState *) _state; + PopulateArrayContext *ctx = state->ctx; + int ndim = state->lex->lex_level; if (ctx->ndims <= 0) populate_array_assign_ndims(ctx, ndim); @@ -2515,8 +2517,8 @@ populate_array_scalar(void *_state, char *token, JsonTokenType tokentype) static void populate_array_json(PopulateArrayContext *ctx, char *json, int len) { - PopulateArrayState state; - JsonSemAction sem; + PopulateArrayState state; + JsonSemAction sem; state.lex = makeJsonLexContextCstringLen(json, len, true); state.ctx = ctx; @@ -2539,18 +2541,18 @@ populate_array_json(PopulateArrayContext *ctx, char *json, int len) /* * populate_array_dim_jsonb() -- Iterate recursively through jsonb sub-array - * elements and accumulate result using given ArrayBuildState. + * elements and accumulate result using given ArrayBuildState. */ static void -populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ - JsonbValue *jbv, /* jsonb sub-array */ - int ndim) /* current dimension */ +populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ + JsonbValue *jbv, /* jsonb sub-array */ + int ndim) /* current dimension */ { - JsonbContainer *jbc = jbv->val.binary.data; - JsonbIterator *it; - JsonbIteratorToken tok; - JsonbValue val; - JsValue jsv; + JsonbContainer *jbc = jbv->val.binary.data; + JsonbIterator *it; + JsonbIteratorToken tok; + JsonbValue val; + JsValue jsv; check_stack_depth(); @@ -2567,9 +2569,9 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ tok = JsonbIteratorNext(&it, &val, true); /* - * If the number of dimensions is not yet known and - * we have found end of the array, or the first child element is not - * an array, then assign the number of dimensions now. + * If the number of dimensions is not yet known and we have found end of + * the array, or the first child element is not an array, then assign the + * number of dimensions now. */ if (ctx->ndims <= 0 && (tok == WJB_END_ARRAY || @@ -2585,8 +2587,8 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ while (tok == WJB_ELEM) { /* - * Recurse only if the dimensions of dimensions is still unknown or - * if it is not the innermost dimension. + * Recurse only if the dimensions of dimensions is still unknown or if + * it is not the innermost dimension. */ if (ctx->ndims > 0 && ndim >= ctx->ndims) populate_array_element(ctx, ndim, &jsv); @@ -2613,29 +2615,29 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ /* recursively populate an array from json/jsonb */ static Datum -populate_array(ArrayIOData *aio, - const char *colname, - MemoryContext mcxt, - JsValue *jsv) -{ - PopulateArrayContext ctx; - Datum result; - int *lbs; - int i; +populate_array(ArrayIOData *aio, + const char *colname, + MemoryContext mcxt, + JsValue *jsv) +{ + PopulateArrayContext ctx; + Datum result; + int *lbs; + int i; ctx.aio = aio; ctx.mcxt = mcxt; ctx.acxt = CurrentMemoryContext; ctx.astate = initArrayResult(aio->element_type, ctx.acxt, true); ctx.colname = colname; - ctx.ndims = 0; /* unknown yet */ + ctx.ndims = 0; /* unknown yet */ ctx.dims = NULL; ctx.sizes = NULL; if (jsv->is_json) populate_array_json(&ctx, jsv->val.json.str, jsv->val.json.len >= 0 ? jsv->val.json.len - : strlen(jsv->val.json.str)); + : strlen(jsv->val.json.str)); else { populate_array_dim_jsonb(&ctx, jsv->val.jsonb, 1); @@ -2644,7 +2646,7 @@ populate_array(ArrayIOData *aio, Assert(ctx.ndims > 0); - lbs = palloc(sizeof(int) * ctx.ndims); + lbs = palloc(sizeof(int) * ctx.ndims); for (i = 0; i < ctx.ndims; i++) lbs[i] = 1; @@ -2668,11 +2670,11 @@ JsValueToJsObject(JsValue *jsv, JsObject *jso) { /* convert plain-text json into a hash table */ jso->val.json_hash = - get_json_object_as_hash(jsv->val.json.str, - jsv->val.json.len >= 0 - ? jsv->val.json.len - : strlen(jsv->val.json.str), - "populate_composite"); + get_json_object_as_hash(jsv->val.json.str, + jsv->val.json.len >= 0 + ? jsv->val.json.len + : strlen(jsv->val.json.str), + "populate_composite"); } else { @@ -2689,23 +2691,23 @@ JsValueToJsObject(JsValue *jsv, JsObject *jso) /* recursively populate a composite (row type) value from json/jsonb */ static Datum populate_composite(CompositeIOData *io, - Oid typid, - int32 typmod, - const char *colname, - MemoryContext mcxt, - HeapTupleHeader defaultval, - JsValue *jsv) + Oid typid, + int32 typmod, + const char *colname, + MemoryContext mcxt, + HeapTupleHeader defaultval, + JsValue *jsv) { - HeapTupleHeader tuple; - JsObject jso; + HeapTupleHeader tuple; + JsObject jso; /* acquire cached tuple descriptor */ if (!io->tupdesc || io->tupdesc->tdtypeid != typid || io->tupdesc->tdtypmod != typmod) { - TupleDesc tupdesc = lookup_rowtype_tupdesc(typid, typmod); - MemoryContext oldcxt; + TupleDesc tupdesc = lookup_rowtype_tupdesc(typid, typmod); + MemoryContext oldcxt; if (io->tupdesc) FreeTupleDesc(io->tupdesc); @@ -2750,8 +2752,8 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) jsv->val.json.type == JSON_TOKEN_STRING) { /* - * Add quotes around string value (should be already escaped) - * if converting to json/jsonb. + * Add quotes around string value (should be already escaped) if + * converting to json/jsonb. */ if (len < 0) @@ -2771,7 +2773,7 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) str[len] = '\0'; } else - str = json; /* null-terminated string */ + str = json; /* null-terminated string */ } else { @@ -2779,7 +2781,8 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) if (typid == JSONBOID) { - Jsonb *jsonb = JsonbValueToJsonb(jbv); /* directly use jsonb */ + Jsonb *jsonb = JsonbValueToJsonb(jbv); /* directly use jsonb */ + return JsonbGetDatum(jsonb); } /* convert jsonb to string for typio call */ @@ -2789,19 +2792,20 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) * Convert scalar jsonb (non-scalars are passed here as jbvBinary) * to json string, preserving quotes around top-level strings. */ - Jsonb *jsonb = JsonbValueToJsonb(jbv); + Jsonb *jsonb = JsonbValueToJsonb(jbv); + str = JsonbToCString(NULL, &jsonb->root, VARSIZE(jsonb)); } - else if (jbv->type == jbvString) /* quotes are stripped */ + else if (jbv->type == jbvString) /* quotes are stripped */ str = pnstrdup(jbv->val.string.val, jbv->val.string.len); else if (jbv->type == jbvBool) str = pstrdup(jbv->val.boolean ? "true" : "false"); else if (jbv->type == jbvNumeric) str = DatumGetCString(DirectFunctionCall1(numeric_out, - PointerGetDatum(jbv->val.numeric))); + PointerGetDatum(jbv->val.numeric))); else if (jbv->type == jbvBinary) str = JsonbToCString(NULL, jbv->val.binary.data, - jbv->val.binary.len); + jbv->val.binary.len); else elog(ERROR, "unrecognized jsonb type: %d", (int) jbv->type); } @@ -2816,12 +2820,12 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) } static Datum -populate_domain(DomainIOData *io, - Oid typid, - const char *colname, - MemoryContext mcxt, - JsValue *jsv, - bool isnull) +populate_domain(DomainIOData *io, + Oid typid, + const char *colname, + MemoryContext mcxt, + JsValue *jsv, + bool isnull) { Datum res; @@ -2843,14 +2847,14 @@ populate_domain(DomainIOData *io, /* prepare column metadata cache for the given type */ static void -prepare_column_cache(ColumnIOData *column, - Oid typid, - int32 typmod, - MemoryContext mcxt, - bool json) +prepare_column_cache(ColumnIOData *column, + Oid typid, + int32 typmod, + MemoryContext mcxt, + bool json) { - HeapTuple tup; - Form_pg_type type; + HeapTuple tup; + Form_pg_type type; column->typid = typid; column->typmod = typmod; @@ -2867,7 +2871,7 @@ prepare_column_cache(ColumnIOData *column, column->io.domain.base_typid = type->typbasetype; column->io.domain.base_typmod = type->typtypmod; column->io.domain.base_io = MemoryContextAllocZero(mcxt, - sizeof(ColumnIOData)); + sizeof(ColumnIOData)); column->io.domain.domain_info = NULL; } else if (type->typtype == TYPTYPE_COMPOSITE || typid == RECORDOID) @@ -2880,7 +2884,7 @@ prepare_column_cache(ColumnIOData *column, { column->typcat = TYPECAT_ARRAY; column->io.array.element_info = MemoryContextAllocZero(mcxt, - sizeof(ColumnIOData)); + sizeof(ColumnIOData)); column->io.array.element_type = type->typelem; /* array element typemod stored in attribute's typmod */ column->io.array.element_typmod = typmod; @@ -2891,7 +2895,7 @@ prepare_column_cache(ColumnIOData *column, /* don't need input function when converting from jsonb to jsonb */ if (json || typid != JSONBOID) { - Oid typioproc; + Oid typioproc; getTypeInputInfo(typid, &typioproc, &column->scalar_io.typioparam); fmgr_info_cxt(typioproc, &column->scalar_io.typiofunc, mcxt); @@ -2903,13 +2907,13 @@ prepare_column_cache(ColumnIOData *column, /* recursively populate a record field or an array element from a json/jsonb value */ static Datum populate_record_field(ColumnIOData *col, - Oid typid, - int32 typmod, - const char *colname, - MemoryContext mcxt, - Datum defaultval, - JsValue *jsv, - bool *isnull) + Oid typid, + int32 typmod, + const char *colname, + MemoryContext mcxt, + Datum defaultval, + JsValue *jsv, + bool *isnull) { TypeCat typcat; @@ -2962,9 +2966,9 @@ static RecordIOData * allocate_record_info(MemoryContext mcxt, int ncolumns) { RecordIOData *data = (RecordIOData *) - MemoryContextAlloc(mcxt, - offsetof(RecordIOData, columns) + - ncolumns * sizeof(ColumnIOData)); + MemoryContextAlloc(mcxt, + offsetof(RecordIOData, columns) + + ncolumns * sizeof(ColumnIOData)); data->record_type = InvalidOid; data->record_typmod = 0; @@ -2986,7 +2990,7 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv) jsv->val.json.type = hashentry ? hashentry->type : JSON_TOKEN_NULL; jsv->val.json.str = jsv->val.json.type == JSON_TOKEN_NULL ? NULL : - hashentry->val; + hashentry->val; jsv->val.json.len = jsv->val.json.str ? -1 : 0; /* null-terminated */ return hashentry != NULL; @@ -2994,8 +2998,8 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv) else { jsv->val.jsonb = !obj->val.jsonb_cont ? NULL : - findJsonbValueFromContainerLen(obj->val.jsonb_cont, JB_FOBJECT, - field, strlen(field)); + findJsonbValueFromContainerLen(obj->val.jsonb_cont, JB_FOBJECT, + field, strlen(field)); return jsv->val.jsonb != NULL; } @@ -3003,23 +3007,23 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv) /* populate a record tuple from json/jsonb value */ static HeapTupleHeader -populate_record(TupleDesc tupdesc, - RecordIOData **precord, - HeapTupleHeader defaultval, - MemoryContext mcxt, - JsObject *obj) -{ - RecordIOData *record = *precord; - Datum *values; - bool *nulls; - HeapTuple res; - int ncolumns = tupdesc->natts; - int i; +populate_record(TupleDesc tupdesc, + RecordIOData **precord, + HeapTupleHeader defaultval, + MemoryContext mcxt, + JsObject *obj) +{ + RecordIOData *record = *precord; + Datum *values; + bool *nulls; + HeapTuple res; + int ncolumns = tupdesc->natts; + int i; /* - * if the input json is empty, we can only skip the rest if we were - * passed in a non-null record, since otherwise there may be issues - * with domain nulls. + * if the input json is empty, we can only skip the rest if we were passed + * in a non-null record, since otherwise there may be issues with domain + * nulls. */ if (defaultval && JsObjectIsEmpty(obj)) return defaultval; @@ -3034,7 +3038,7 @@ populate_record(TupleDesc tupdesc, record->record_typmod != tupdesc->tdtypmod) { MemSet(record, 0, offsetof(RecordIOData, columns) + - ncolumns * sizeof(ColumnIOData)); + ncolumns * sizeof(ColumnIOData)); record->record_type = tupdesc->tdtypeid; record->record_typmod = tupdesc->tdtypmod; record->ncolumns = ncolumns; @@ -3067,10 +3071,10 @@ populate_record(TupleDesc tupdesc, for (i = 0; i < ncolumns; ++i) { - Form_pg_attribute att = tupdesc->attrs[i]; - char *colname = NameStr(att->attname); - JsValue field = { 0 }; - bool found; + Form_pg_attribute att = tupdesc->attrs[i]; + char *colname = NameStr(att->attname); + JsValue field = {0}; + bool found; /* Ignore dropped columns in datatype */ if (att->attisdropped) @@ -3116,7 +3120,7 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, { int json_arg_num = have_record_arg ? 1 : 0; Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num); - JsValue jsv = { 0 }; + JsValue jsv = {0}; HeapTupleHeader rec = NULL; Oid tupType; int32 tupTypmod; @@ -3134,7 +3138,7 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, */ if (!cache) fcinfo->flinfo->fn_extra = cache = - MemoryContextAllocZero(fnmcxt, sizeof(*cache)); + MemoryContextAllocZero(fnmcxt, sizeof(*cache)); if (have_record_arg) { @@ -3210,7 +3214,8 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, jsv.val.json.str = VARDATA_ANY(json); jsv.val.json.len = VARSIZE_ANY_EXHDR(json); - jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in populate_composite() */ + jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in + * populate_composite() */ } else { @@ -3417,8 +3422,8 @@ json_to_recordset(PG_FUNCTION_ARGS) static void populate_recordset_record(PopulateRecordsetState *state, JsObject *obj) { - HeapTupleData tuple; - HeapTupleHeader tuphead = populate_record(state->ret_tdesc, + HeapTupleData tuple; + HeapTupleHeader tuphead = populate_record(state->ret_tdesc, state->my_extra, state->rec, state->fn_mcxt, @@ -4793,9 +4798,9 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, void iterate_jsonb_string_values(Jsonb *jb, void *state, JsonIterateStringValuesAction action) { - JsonbIterator *it; - JsonbValue v; - JsonbIteratorToken type; + JsonbIterator *it; + JsonbValue v; + JsonbIteratorToken type; it = JsonbIteratorInit(&jb->root); @@ -4817,7 +4822,7 @@ iterate_json_string_values(text *json, void *action_state, JsonIterateStringValu { JsonLexContext *lex = makeJsonLexContext(json, true); JsonSemAction *sem = palloc0(sizeof(JsonSemAction)); - IterateJsonStringValuesState *state = palloc0(sizeof(IterateJsonStringValuesState)); + IterateJsonStringValuesState *state = palloc0(sizeof(IterateJsonStringValuesState)); state->lex = lex; state->action = action; @@ -4836,7 +4841,8 @@ iterate_json_string_values(text *json, void *action_state, JsonIterateStringValu static void iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype) { - IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state; + IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state; + if (tokentype == JSON_TOKEN_STRING) (*_state->action) (_state->action_state, token, strlen(token)); } @@ -4849,14 +4855,15 @@ iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype) */ Jsonb * transform_jsonb_string_values(Jsonb *jsonb, void *action_state, - JsonTransformStringValuesAction transform_action) + JsonTransformStringValuesAction transform_action) { - JsonbIterator *it; - JsonbValue v, *res = NULL; - JsonbIteratorToken type; - JsonbParseState *st = NULL; - text *out; - bool is_scalar = false; + JsonbIterator *it; + JsonbValue v, + *res = NULL; + JsonbIteratorToken type; + JsonbParseState *st = NULL; + text *out; + bool is_scalar = false; it = JsonbIteratorInit(&jsonb->root); is_scalar = it->isScalar; @@ -4928,6 +4935,7 @@ static void transform_string_values_object_start(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, '{'); } @@ -4935,6 +4943,7 @@ static void transform_string_values_object_end(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, '}'); } @@ -4942,6 +4951,7 @@ static void transform_string_values_array_start(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, '['); } @@ -4949,6 +4959,7 @@ static void transform_string_values_array_end(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, ']'); } @@ -4984,7 +4995,8 @@ transform_string_values_scalar(void *state, char *token, JsonTokenType tokentype if (tokentype == JSON_TOKEN_STRING) { - text *out = (*_state->action) (_state->action_state, token, strlen(token)); + text *out = (*_state->action) (_state->action_state, token, strlen(token)); + escape_json(_state->strval, text_to_cstring(out)); } else diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c index b9806069c2..d4d173480d 100644 --- a/src/backend/utils/adt/like.c +++ b/src/backend/utils/adt/like.c @@ -180,7 +180,7 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation) */ ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_COLLATION), - errmsg("could not determine which collation to use for ILIKE"), + errmsg("could not determine which collation to use for ILIKE"), errhint("Use the COLLATE clause to set the collation explicitly."))); } locale = pg_newlocale_from_collation(collation); @@ -189,9 +189,9 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation) /* * For efficiency reasons, in the single byte case we don't call lower() * on the pattern and text, but instead call SB_lower_char on each - * character. In the multi-byte case we don't have much choice :-(. - * Also, ICU does not support single-character case folding, so we go the - * long way. + * character. In the multi-byte case we don't have much choice :-(. Also, + * ICU does not support single-character case folding, so we go the long + * way. */ if (pg_database_encoding_max_length() > 1 || (locale && locale->provider == COLLPROVIDER_ICU)) diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c index eff4529a6a..c2b52d8046 100644 --- a/src/backend/utils/adt/mac.c +++ b/src/backend/utils/adt/mac.c @@ -40,7 +40,7 @@ typedef struct bool estimating; /* true if estimating cardinality */ hyperLogLogState abbr_card; /* cardinality estimator */ -} macaddr_sortsupport_state; +} macaddr_sortsupport_state; static int macaddr_cmp_internal(macaddr *a1, macaddr *a2); static int macaddr_fast_cmp(Datum x, Datum y, SortSupport ssup); diff --git a/src/backend/utils/adt/mac8.c b/src/backend/utils/adt/mac8.c index c442eae6c1..1ed4183be7 100644 --- a/src/backend/utils/adt/mac8.c +++ b/src/backend/utils/adt/mac8.c @@ -103,7 +103,7 @@ invalid_input: Datum macaddr8_in(PG_FUNCTION_ARGS) { - const unsigned char *str = (unsigned char*) PG_GETARG_CSTRING(0); + const unsigned char *str = (unsigned char *) PG_GETARG_CSTRING(0); const unsigned char *ptr = str; macaddr8 *result; unsigned char a = 0, diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index e2ccac2d2a..24ae3c6886 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -1282,7 +1282,7 @@ pg_newlocale_from_collation(Oid collid) Form_pg_collation collform; const char *collcollate; const char *collctype pg_attribute_unused(); - pg_locale_t result; + pg_locale_t result; Datum collversion; bool isnull; @@ -1294,8 +1294,8 @@ pg_newlocale_from_collation(Oid collid) collcollate = NameStr(collform->collcollate); collctype = NameStr(collform->collctype); - result = malloc(sizeof(* result)); - memset(result, 0, sizeof(* result)); + result = malloc(sizeof(*result)); + memset(result, 0, sizeof(*result)); result->provider = collform->collprovider; if (collform->collprovider == COLLPROVIDER_LIBC) @@ -1308,7 +1308,7 @@ pg_newlocale_from_collation(Oid collid) /* Normal case where they're the same */ #ifndef WIN32 loc = newlocale(LC_COLLATE_MASK | LC_CTYPE_MASK, collcollate, - NULL); + NULL); #else loc = _create_locale(LC_ALL, collcollate); #endif @@ -1330,9 +1330,9 @@ pg_newlocale_from_collation(Oid collid) #else /* - * XXX The _create_locale() API doesn't appear to support this. - * Could perhaps be worked around by changing pg_locale_t to - * contain two separate fields. + * XXX The _create_locale() API doesn't appear to support + * this. Could perhaps be worked around by changing + * pg_locale_t to contain two separate fields. */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -1358,18 +1358,18 @@ pg_newlocale_from_collation(Oid collid) collator = ucol_open(collcollate, &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("could not open collator for locale \"%s\": %s", - collcollate, u_errorName(status)))); + (errmsg("could not open collator for locale \"%s\": %s", + collcollate, u_errorName(status)))); result->info.icu.locale = strdup(collcollate); result->info.icu.ucol = collator; -#else /* not USE_ICU */ +#else /* not USE_ICU */ /* could get here if a collation was created by a build with ICU */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ICU is not supported in this build"), \ - errhint("You need to rebuild PostgreSQL using --with-icu."))); -#endif /* not USE_ICU */ + errhint("You need to rebuild PostgreSQL using --with-icu."))); +#endif /* not USE_ICU */ } collversion = SysCacheGetAttr(COLLOID, tp, Anum_pg_collation_collversion, @@ -1382,9 +1382,11 @@ pg_newlocale_from_collation(Oid collid) actual_versionstr = get_collation_actual_version(collform->collprovider, collcollate); if (!actual_versionstr) { - /* This could happen when specifying a version in CREATE - * COLLATION for a libc locale, or manually creating a mess - * in the catalogs. */ + /* + * This could happen when specifying a version in CREATE + * COLLATION for a libc locale, or manually creating a mess in + * the catalogs. + */ ereport(ERROR, (errmsg("collation \"%s\" has no actual version, but a version was specified", NameStr(collform->collname)))); @@ -1396,13 +1398,13 @@ pg_newlocale_from_collation(Oid collid) (errmsg("collation \"%s\" has version mismatch", NameStr(collform->collname)), errdetail("The collation in the database was created using version %s, " - "but the operating system provides version %s.", + "but the operating system provides version %s.", collversionstr, actual_versionstr), errhint("Rebuild all objects affected by this collation and run " "ALTER COLLATION %s REFRESH VERSION, " - "or build PostgreSQL with the right library version.", + "or build PostgreSQL with the right library version.", quote_qualified_identifier(get_namespace_name(collform->collnamespace), - NameStr(collform->collname))))); + NameStr(collform->collname))))); } ReleaseSysCache(tp); @@ -1478,8 +1480,8 @@ init_icu_converter(void) conv = ucnv_open(icu_encoding_name, &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("could not open ICU converter for encoding \"%s\": %s", - icu_encoding_name, u_errorName(status)))); + (errmsg("could not open ICU converter for encoding \"%s\": %s", + icu_encoding_name, u_errorName(status)))); icu_converter = conv; } @@ -1492,7 +1494,7 @@ icu_to_uchar(UChar **buff_uchar, const char *buff, size_t nbytes) init_icu_converter(); - len_uchar = 2 * nbytes; /* max length per docs */ + len_uchar = 2 * nbytes; /* max length per docs */ *buff_uchar = palloc(len_uchar * sizeof(**buff_uchar)); status = U_ZERO_ERROR; len_uchar = ucnv_toUChars(icu_converter, *buff_uchar, len_uchar, buff, nbytes, &status); diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 43b1475035..9234bc2a97 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -1448,7 +1448,7 @@ pg_get_statisticsobjdef(PG_FUNCTION_ARGS) static char * pg_get_statisticsobj_worker(Oid statextid, bool missing_ok) { - Form_pg_statistic_ext statextrec; + Form_pg_statistic_ext statextrec; HeapTuple statexttup; StringInfoData buf; int colno; @@ -1477,7 +1477,7 @@ pg_get_statisticsobj_worker(Oid statextid, bool missing_ok) nsp = get_namespace_name(statextrec->stxnamespace); appendStringInfo(&buf, "CREATE STATISTICS %s", quote_qualified_identifier(nsp, - NameStr(statextrec->stxname))); + NameStr(statextrec->stxname))); /* * Decode the stxkind column so that we know which stats types to print. @@ -1735,11 +1735,11 @@ pg_get_partkeydef_worker(Oid relid, int prettyFlags, Datum pg_get_partition_constraintdef(PG_FUNCTION_ARGS) { - Oid relationId = PG_GETARG_OID(0); - Expr *constr_expr; - int prettyFlags; - List *context; - char *consrc; + Oid relationId = PG_GETARG_OID(0); + Expr *constr_expr; + int prettyFlags; + List *context; + char *consrc; constr_expr = get_partition_qual_relid(relationId); diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 7028d6387c..6e491bbc21 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -170,7 +170,7 @@ static double eqjoinsel_semi(Oid operator, VariableStatData *vardata1, VariableStatData *vardata2, RelOptInfo *inner_rel); static bool estimate_multivariate_ndistinct(PlannerInfo *root, - RelOptInfo *rel, List **varinfos, double *ndistinct); + RelOptInfo *rel, List **varinfos, double *ndistinct); static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue, Datum lobound, Datum hibound, Oid boundstypid, double *scaledlobound, double *scaledhibound); @@ -3364,8 +3364,8 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List *relvarinfos = NIL; /* - * Split the list of varinfos in two - one for the current rel, - * one for remaining Vars on other rels. + * Split the list of varinfos in two - one for the current rel, one + * for remaining Vars on other rels. */ relvarinfos = lcons(varinfo1, relvarinfos); for_each_cell(l, lnext(list_head(varinfos))) @@ -3388,9 +3388,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, * Get the numdistinct estimate for the Vars of this rel. We * iteratively search for multivariate n-distinct with maximum number * of vars; assuming that each var group is independent of the others, - * we multiply them together. Any remaining relvarinfos after - * no more multivariate matches are found are assumed independent too, - * so their individual ndistinct estimates are multiplied also. + * we multiply them together. Any remaining relvarinfos after no more + * multivariate matches are found are assumed independent too, so + * their individual ndistinct estimates are multiplied also. * * While iterating, count how many separate numdistinct values we * apply. We apply a fudge factor below, but only if we multiplied @@ -3410,7 +3410,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, } else { - foreach (l, relvarinfos) + foreach(l, relvarinfos) { GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l); @@ -3702,12 +3702,12 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel, } /* look for the ndistinct statistics matching the most vars */ - nmatches = 1; /* we require at least two matches */ + nmatches = 1; /* we require at least two matches */ foreach(lc, rel->statlist) { StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc); Bitmapset *shared; - int nshared; + int nshared; /* skip statistics of other kinds */ if (info->kind != STATS_EXT_NDISTINCT) @@ -3745,8 +3745,8 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel, */ if (stats) { - int i; - List *newlist = NIL; + int i; + List *newlist = NIL; MVNDistinctItem *item = NULL; /* Find the specific item that exactly matches the combination */ @@ -7766,8 +7766,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, * * Because we can use all index quals equally when scanning, we can use * the largest correlation (in absolute value) among columns used by the - * query. Start at zero, the worst possible case. If we cannot find - * any correlation statistics, we will keep it as 0. + * query. Start at zero, the worst possible case. If we cannot find any + * correlation statistics, we will keep it as 0. */ *indexCorrelation = 0; @@ -7790,7 +7790,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, */ if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc) elog(ERROR, - "no function provided to release variable stats with"); + "no function provided to release variable stats with"); } else { @@ -7813,11 +7813,11 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, attnum = qinfo->indexcol + 1; if (get_index_stats_hook && - (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata)) + (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata)) { /* - * The hook took control of acquiring a stats tuple. If it did - * supply a tuple, it'd better have supplied a freefunc. + * The hook took control of acquiring a stats tuple. If it + * did supply a tuple, it'd better have supplied a freefunc. */ if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc) @@ -7826,7 +7826,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, else { vardata.statsTuple = SearchSysCache3(STATRELATTINH, - ObjectIdGetDatum(index->indexoid), + ObjectIdGetDatum(index->indexoid), Int16GetDatum(attnum), BoolGetDatum(false)); vardata.freefunc = ReleaseSysCache; @@ -7872,8 +7872,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, /* * Now estimate the number of ranges that we'll touch by using the - * indexCorrelation from the stats. Careful not to divide by zero - * (note we're using the absolute value of the correlation). + * indexCorrelation from the stats. Careful not to divide by zero (note + * we're using the absolute value of the correlation). */ if (*indexCorrelation < 1.0e-10) estimatedRanges = indexRanges; @@ -7888,8 +7888,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, *indexSelectivity = selec; /* - * Compute the index qual costs, much as in genericcostestimate, to add - * to the index costs. + * Compute the index qual costs, much as in genericcostestimate, to add to + * the index costs. */ qual_arg_cost = other_operands_eval_cost(root, qinfos) + orderby_operands_eval_cost(root, path); diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c index 5c64e32719..5dd996f62c 100644 --- a/src/backend/utils/adt/txid.c +++ b/src/backend/utils/adt/txid.c @@ -147,8 +147,8 @@ TransactionIdInRecentPast(uint64 xid_with_epoch, TransactionId *extracted_xid) /* * If the transaction ID has wrapped around, it's definitely too old to * determine the commit status. Otherwise, we can compare it to - * ShmemVariableCache->oldestClogXid to determine whether the relevant CLOG - * entry is guaranteed to still exist. + * ShmemVariableCache->oldestClogXid to determine whether the relevant + * CLOG entry is guaranteed to still exist. */ if (xid_epoch + 1 < now_epoch || (xid_epoch + 1 == now_epoch && xid < now_epoch_last_xid) @@ -454,7 +454,7 @@ txid_current_if_assigned(PG_FUNCTION_ARGS) { txid val; TxidEpoch state; - TransactionId topxid = GetTopTransactionIdIfAny(); + TransactionId topxid = GetTopTransactionIdIfAny(); if (topxid == InvalidTransactionId) PG_RETURN_NULL(); @@ -741,9 +741,9 @@ txid_snapshot_xip(PG_FUNCTION_ARGS) Datum txid_status(PG_FUNCTION_ARGS) { - const char *status; - uint64 xid_with_epoch = PG_GETARG_INT64(0); - TransactionId xid; + const char *status; + uint64 xid_with_epoch = PG_GETARG_INT64(0); + TransactionId xid; /* * We must protect against concurrent truncation of clog entries to avoid @@ -770,8 +770,8 @@ txid_status(PG_FUNCTION_ARGS) * it's aborted if it isn't committed and is older than our * snapshot xmin. * - * Otherwise it must be in-progress (or have been at the time - * we checked commit/abort status). + * Otherwise it must be in-progress (or have been at the time we + * checked commit/abort status). */ if (TransactionIdPrecedes(xid, GetActiveSnapshot()->xmin)) status = gettext_noop("aborted"); diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index 0b0032787b..be399f48f9 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -1557,8 +1557,10 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid) else #endif { - int32_t ulen1, ulen2; - UChar *uchar1, *uchar2; + int32_t ulen1, + ulen2; + UChar *uchar1, + *uchar2; ulen1 = icu_to_uchar(&uchar1, arg1, len1); ulen2 = icu_to_uchar(&uchar2, arg2, len2); @@ -1567,10 +1569,10 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid) uchar1, ulen1, uchar2, ulen2); } -#else /* not USE_ICU */ +#else /* not USE_ICU */ /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", mylocale->provider); -#endif /* not USE_ICU */ +#endif /* not USE_ICU */ } else { @@ -2136,13 +2138,15 @@ varstrfastcmp_locale(Datum x, Datum y, SortSupport ssup) &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("collation failed: %s", u_errorName(status)))); + (errmsg("collation failed: %s", u_errorName(status)))); } else #endif { - int32_t ulen1, ulen2; - UChar *uchar1, *uchar2; + int32_t ulen1, + ulen2; + UChar *uchar1, + *uchar2; ulen1 = icu_to_uchar(&uchar1, a1p, len1); ulen2 = icu_to_uchar(&uchar2, a2p, len2); @@ -2151,10 +2155,10 @@ varstrfastcmp_locale(Datum x, Datum y, SortSupport ssup) uchar1, ulen1, uchar2, ulen2); } -#else /* not USE_ICU */ +#else /* not USE_ICU */ /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", sss->locale->provider); -#endif /* not USE_ICU */ +#endif /* not USE_ICU */ } else { @@ -2300,8 +2304,11 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) } memcpy(sss->buf1, authoritative_data, len); - /* Just like strcoll(), strxfrm() expects a NUL-terminated string. - * Not necessary for ICU, but doesn't hurt. */ + + /* + * Just like strcoll(), strxfrm() expects a NUL-terminated string. Not + * necessary for ICU, but doesn't hurt. + */ sss->buf1[len] = '\0'; sss->last_len1 = len; @@ -2336,13 +2343,13 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) UErrorCode status; uiter_setUTF8(&iter, sss->buf1, len); - state[0] = state[1] = 0; /* won't need that again */ + state[0] = state[1] = 0; /* won't need that again */ status = U_ZERO_ERROR; bsize = ucol_nextSortKeyPart(sss->locale->info.icu.ucol, &iter, state, (uint8_t *) sss->buf2, - Min(sizeof(Datum), sss->buflen2), + Min(sizeof(Datum), sss->buflen2), &status); if (U_FAILURE(status)) ereport(ERROR, @@ -2351,7 +2358,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) else bsize = ucol_getSortKey(sss->locale->info.icu.ucol, uchar, ulen, - (uint8_t *) sss->buf2, sss->buflen2); + (uint8_t *) sss->buf2, sss->buflen2); } else #endif diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 42cffbbdd3..cdcd45419a 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -2385,8 +2385,8 @@ database_get_xml_visible_tables(void) CppAsString2(RELKIND_RELATION) "," CppAsString2(RELKIND_MATVIEW) "," CppAsString2(RELKIND_VIEW) ")" - " AND pg_catalog.has_table_privilege(pg_class.oid, 'SELECT')" - " AND relnamespace IN (" XML_VISIBLE_SCHEMAS ");"); + " AND pg_catalog.has_table_privilege(pg_class.oid, 'SELECT')" + " AND relnamespace IN (" XML_VISIBLE_SCHEMAS ");"); } @@ -4518,9 +4518,8 @@ XmlTableGetValue(TableFuncScanState *state, int colnum, * This line ensure mapping of empty tags to PostgreSQL * value. Usually we would to map a empty tag to empty * string. But this mapping can create empty string when - * user doesn't expect it - when empty tag is enforced - * by libxml2 - when user uses a text() function for - * example. + * user doesn't expect it - when empty tag is enforced by + * libxml2 - when user uses a text() function for example. */ cstr = ""; } diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index a1e6ea2a35..819121638e 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -386,10 +386,9 @@ AddRelcacheInvalidationMessage(InvalidationListHeader *hdr, SharedInvalidationMessage msg; /* - * Don't add a duplicate item. - * We assume dbId need not be checked because it will never change. - * InvalidOid for relId means all relations so we don't need to add - * individual ones when it is present. + * Don't add a duplicate item. We assume dbId need not be checked because + * it will never change. InvalidOid for relId means all relations so we + * don't need to add individual ones when it is present. */ ProcessMessageList(hdr->rclist, if (msg->rc.id == SHAREDINVALRELCACHE_ID && @@ -523,8 +522,8 @@ RegisterRelcacheInvalidation(Oid dbId, Oid relId) /* * If the relation being invalidated is one of those cached in the local - * relcache init file, mark that we need to zap that file at commit. - * Same is true when we are invalidating whole relcache. + * relcache init file, mark that we need to zap that file at commit. Same + * is true when we are invalidating whole relcache. */ if (OidIsValid(dbId) && (RelationIdIsInInitFile(relId) || relId == InvalidOid)) @@ -1139,8 +1138,8 @@ CacheInvalidateHeapTuple(Relation relation, RegisterCatcacheInvalidation); /* - * Now, is this tuple one of the primary definers of a relcache entry? - * See comments in file header for deeper explanation. + * Now, is this tuple one of the primary definers of a relcache entry? See + * comments in file header for deeper explanation. * * Note we ignore newtuple here; we assume an update cannot move a tuple * from being part of one relcache entry to being part of another. diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index b94d475505..4def73ddfb 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -858,7 +858,7 @@ get_attidentity(Oid relid, AttrNumber attnum) if (HeapTupleIsValid(tp)) { Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); - char result; + char result; result = att_tup->attidentity; ReleaseSysCache(tp); diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index abff7474f5..4b5f8107ef 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -89,7 +89,7 @@ static CachedPlanSource *first_saved_plan = NULL; static void ReleaseGenericPlan(CachedPlanSource *plansource); static List *RevalidateCachedQuery(CachedPlanSource *plansource, - QueryEnvironment *queryEnv); + QueryEnvironment *queryEnv); static bool CheckCachedPlan(CachedPlanSource *plansource); static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist, ParamListInfo boundParams, QueryEnvironment *queryEnv); @@ -1520,7 +1520,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire) * acquire a non-conflicting lock. */ if (list_member_int(plannedstmt->resultRelations, rt_index) || - list_member_int(plannedstmt->nonleafResultRelations, rt_index)) + list_member_int(plannedstmt->nonleafResultRelations, rt_index)) lockmode = RowExclusiveLock; else if ((rc = get_plan_rowmark(plannedstmt->rowMarks, rt_index)) != NULL && RowMarkRequiresRowShareLock(rc->markType)) diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 0cd6289f91..c2e8361f2f 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -4504,7 +4504,10 @@ RelationGetStatExtList(Relation relation) */ result = NIL; - /* Prepare to scan pg_statistic_ext for entries having stxrelid = this rel. */ + /* + * Prepare to scan pg_statistic_ext for entries having stxrelid = this + * rel. + */ ScanKeyInit(&skey, Anum_pg_statistic_ext_stxrelid, BTEqualStrategyNumber, F_OIDEQ, @@ -4603,9 +4606,10 @@ RelationSetIndexList(Relation relation, List *indexIds, Oid oidIndex) list_free(relation->rd_indexlist); relation->rd_indexlist = indexIds; relation->rd_oidindex = oidIndex; + /* - * For the moment, assume the target rel hasn't got a pk or replica - * index. We'll load them on demand in the API that wraps access to them. + * For the moment, assume the target rel hasn't got a pk or replica index. + * We'll load them on demand in the API that wraps access to them. */ relation->rd_pkindex = InvalidOid; relation->rd_replidindex = InvalidOid; @@ -5169,7 +5173,7 @@ GetRelationPublicationActions(Relation relation) { List *puboids; ListCell *lc; - MemoryContext oldcxt; + MemoryContext oldcxt; PublicationActions *pubactions = palloc0(sizeof(PublicationActions)); if (relation->rd_pubactions) @@ -5200,8 +5204,8 @@ GetRelationPublicationActions(Relation relation) ReleaseSysCache(tup); /* - * If we know everything is replicated, there is no point to check - * for other publications. + * If we know everything is replicated, there is no point to check for + * other publications. */ if (pubactions->pubinsert && pubactions->pubupdate && pubactions->pubdelete) diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c index f0a16e309c..922718c9d1 100644 --- a/src/backend/utils/cache/syscache.c +++ b/src/backend/utils/cache/syscache.c @@ -661,7 +661,7 @@ static const struct cachedesc cacheinfo[] = { }, 16 }, - {PublicationRelationId, /* PUBLICATIONOID */ + {PublicationRelationId, /* PUBLICATIONOID */ PublicationObjectIndexId, 1, { @@ -672,7 +672,7 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {PublicationRelationId, /* PUBLICATIONNAME */ + {PublicationRelationId, /* PUBLICATIONNAME */ PublicationNameIndexId, 1, { @@ -683,7 +683,7 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {PublicationRelRelationId, /* PUBLICATIONREL */ + {PublicationRelRelationId, /* PUBLICATIONREL */ PublicationRelObjectIndexId, 1, { @@ -694,7 +694,7 @@ static const struct cachedesc cacheinfo[] = { }, 64 }, - {PublicationRelRelationId, /* PUBLICATIONRELMAP */ + {PublicationRelRelationId, /* PUBLICATIONRELMAP */ PublicationRelPrrelidPrpubidIndexId, 2, { @@ -716,7 +716,7 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {SequenceRelationId, /* SEQRELID */ + {SequenceRelationId, /* SEQRELID */ SequenceRelidIndexId, 1, { @@ -760,7 +760,7 @@ static const struct cachedesc cacheinfo[] = { }, 128 }, - {SubscriptionRelationId, /* SUBSCRIPTIONOID */ + {SubscriptionRelationId, /* SUBSCRIPTIONOID */ SubscriptionObjectIndexId, 1, { @@ -771,7 +771,7 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {SubscriptionRelationId, /* SUBSCRIPTIONNAME */ + {SubscriptionRelationId, /* SUBSCRIPTIONNAME */ SubscriptionNameIndexId, 2, { @@ -782,7 +782,7 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {SubscriptionRelRelationId, /* SUBSCRIPTIONRELMAP */ + {SubscriptionRelRelationId, /* SUBSCRIPTIONRELMAP */ SubscriptionRelSrrelidSrsubidIndexId, 2, { diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c index 9739c4c144..28c2583f96 100644 --- a/src/backend/utils/fmgr/dfmgr.c +++ b/src/backend/utils/fmgr/dfmgr.c @@ -65,7 +65,7 @@ char *Dynamic_library_path; static void *internal_load_library(const char *libname); static void incompatible_module_error(const char *libname, - const Pg_magic_struct *module_magic_data) pg_attribute_noreturn(); + const Pg_magic_struct *module_magic_data) pg_attribute_noreturn(); static void internal_unload_library(const char *libname); static bool file_exists(const char *name); static char *expand_dynamic_library_name(const char *name); diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index d9e3bf240d..f6d2b7d63e 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -396,10 +396,10 @@ fetch_finfo_record(void *filehandle, const char *funcname) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not find function information for function \"%s\"", - funcname), + errmsg("could not find function information for function \"%s\"", + funcname), errhint("SQL-callable functions need an accompanying PG_FUNCTION_INFO_V1(funcname)."))); - return NULL; /* silence compiler */ + return NULL; /* silence compiler */ } /* Found, so call it */ diff --git a/src/backend/utils/mb/conv.c b/src/backend/utils/mb/conv.c index 5ce5c9a9c2..d46330b207 100644 --- a/src/backend/utils/mb/conv.c +++ b/src/backend/utils/mb/conv.c @@ -445,7 +445,7 @@ pg_mb_radix_conv(const pg_mb_radix_tree *rt, else return rt->chars16[b4 + rt->b1root - rt->b1_lower]; } - return 0; /* shouldn't happen */ + return 0; /* shouldn't happen */ } /* @@ -607,7 +607,8 @@ UtfToLocal(const unsigned char *utf, int len, /* Now check ordinary map */ if (map) { - uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4); + uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4); + if (converted) { iso = store_coded_char(iso, converted); @@ -731,7 +732,7 @@ LocalToUtf(const unsigned char *iso, int len, if (map) { - uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4); + uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4); if (converted) { diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c index 4a73ec4776..ac0bc915ed 100644 --- a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c +++ b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c @@ -60,37 +60,37 @@ PG_FUNCTION_INFO_V1(utf8_to_iso8859); typedef struct { pg_enc encoding; - const pg_mb_radix_tree *map1; /* to UTF8 map name */ - const pg_mb_radix_tree *map2; /* from UTF8 map name */ + const pg_mb_radix_tree *map1; /* to UTF8 map name */ + const pg_mb_radix_tree *map2; /* from UTF8 map name */ } pg_conv_map; static const pg_conv_map maps[] = { {PG_LATIN2, &iso8859_2_to_unicode_tree, - &iso8859_2_from_unicode_tree}, /* ISO-8859-2 Latin 2 */ + &iso8859_2_from_unicode_tree}, /* ISO-8859-2 Latin 2 */ {PG_LATIN3, &iso8859_3_to_unicode_tree, - &iso8859_3_from_unicode_tree}, /* ISO-8859-3 Latin 3 */ + &iso8859_3_from_unicode_tree}, /* ISO-8859-3 Latin 3 */ {PG_LATIN4, &iso8859_4_to_unicode_tree, - &iso8859_4_from_unicode_tree}, /* ISO-8859-4 Latin 4 */ + &iso8859_4_from_unicode_tree}, /* ISO-8859-4 Latin 4 */ {PG_LATIN5, &iso8859_9_to_unicode_tree, - &iso8859_9_from_unicode_tree}, /* ISO-8859-9 Latin 5 */ + &iso8859_9_from_unicode_tree}, /* ISO-8859-9 Latin 5 */ {PG_LATIN6, &iso8859_10_to_unicode_tree, - &iso8859_10_from_unicode_tree}, /* ISO-8859-10 Latin 6 */ + &iso8859_10_from_unicode_tree}, /* ISO-8859-10 Latin 6 */ {PG_LATIN7, &iso8859_13_to_unicode_tree, - &iso8859_13_from_unicode_tree}, /* ISO-8859-13 Latin 7 */ + &iso8859_13_from_unicode_tree}, /* ISO-8859-13 Latin 7 */ {PG_LATIN8, &iso8859_14_to_unicode_tree, - &iso8859_14_from_unicode_tree}, /* ISO-8859-14 Latin 8 */ + &iso8859_14_from_unicode_tree}, /* ISO-8859-14 Latin 8 */ {PG_LATIN9, &iso8859_15_to_unicode_tree, - &iso8859_15_from_unicode_tree}, /* ISO-8859-15 Latin 9 */ + &iso8859_15_from_unicode_tree}, /* ISO-8859-15 Latin 9 */ {PG_LATIN10, &iso8859_16_to_unicode_tree, - &iso8859_16_from_unicode_tree}, /* ISO-8859-16 Latin 10 */ + &iso8859_16_from_unicode_tree}, /* ISO-8859-16 Latin 10 */ {PG_ISO_8859_5, &iso8859_5_to_unicode_tree, - &iso8859_5_from_unicode_tree}, /* ISO-8859-5 */ + &iso8859_5_from_unicode_tree}, /* ISO-8859-5 */ {PG_ISO_8859_6, &iso8859_6_to_unicode_tree, - &iso8859_6_from_unicode_tree}, /* ISO-8859-6 */ + &iso8859_6_from_unicode_tree}, /* ISO-8859-6 */ {PG_ISO_8859_7, &iso8859_7_to_unicode_tree, - &iso8859_7_from_unicode_tree}, /* ISO-8859-7 */ + &iso8859_7_from_unicode_tree}, /* ISO-8859-7 */ {PG_ISO_8859_8, &iso8859_8_to_unicode_tree, - &iso8859_8_from_unicode_tree}, /* ISO-8859-8 */ + &iso8859_8_from_unicode_tree}, /* ISO-8859-8 */ }; Datum diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c b/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c index 4c8893036c..971de32f6c 100644 --- a/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c +++ b/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c @@ -56,13 +56,13 @@ PG_FUNCTION_INFO_V1(utf8_to_win); typedef struct { pg_enc encoding; - const pg_mb_radix_tree *map1; /* to UTF8 map name */ - const pg_mb_radix_tree *map2; /* from UTF8 map name */ + const pg_mb_radix_tree *map1; /* to UTF8 map name */ + const pg_mb_radix_tree *map2; /* from UTF8 map name */ } pg_conv_map; static const pg_conv_map maps[] = { - {PG_WIN866, &win866_to_unicode_tree, &win866_from_unicode_tree}, - {PG_WIN874, &win874_to_unicode_tree, &win874_from_unicode_tree}, + {PG_WIN866, &win866_to_unicode_tree, &win866_from_unicode_tree}, + {PG_WIN874, &win874_to_unicode_tree, &win874_from_unicode_tree}, {PG_WIN1250, &win1250_to_unicode_tree, &win1250_from_unicode_tree}, {PG_WIN1251, &win1251_to_unicode_tree, &win1251_from_unicode_tree}, {PG_WIN1252, &win1252_to_unicode_tree, &win1252_from_unicode_tree}, diff --git a/src/backend/utils/mb/encnames.c b/src/backend/utils/mb/encnames.c index 444eec25b5..f97505e55a 100644 --- a/src/backend/utils/mb/encnames.c +++ b/src/backend/utils/mb/encnames.c @@ -412,43 +412,43 @@ const pg_enc2gettext pg_enc2gettext_tbl[] = * * NULL entries are not supported by ICU, or their mapping is unclear. */ -static const char * const pg_enc2icu_tbl[] = +static const char *const pg_enc2icu_tbl[] = { - NULL, /* PG_SQL_ASCII */ - "EUC-JP", /* PG_EUC_JP */ - "EUC-CN", /* PG_EUC_CN */ - "EUC-KR", /* PG_EUC_KR */ - "EUC-TW", /* PG_EUC_TW */ - NULL, /* PG_EUC_JIS_2004 */ - "UTF-8", /* PG_UTF8 */ - NULL, /* PG_MULE_INTERNAL */ - "ISO-8859-1", /* PG_LATIN1 */ - "ISO-8859-2", /* PG_LATIN2 */ - "ISO-8859-3", /* PG_LATIN3 */ - "ISO-8859-4", /* PG_LATIN4 */ - "ISO-8859-9", /* PG_LATIN5 */ - "ISO-8859-10", /* PG_LATIN6 */ - "ISO-8859-13", /* PG_LATIN7 */ - "ISO-8859-14", /* PG_LATIN8 */ - "ISO-8859-15", /* PG_LATIN9 */ - NULL, /* PG_LATIN10 */ - "CP1256", /* PG_WIN1256 */ - "CP1258", /* PG_WIN1258 */ - "CP866", /* PG_WIN866 */ - NULL, /* PG_WIN874 */ - "KOI8-R", /* PG_KOI8R */ - "CP1251", /* PG_WIN1251 */ - "CP1252", /* PG_WIN1252 */ - "ISO-8859-5", /* PG_ISO_8859_5 */ - "ISO-8859-6", /* PG_ISO_8859_6 */ - "ISO-8859-7", /* PG_ISO_8859_7 */ - "ISO-8859-8", /* PG_ISO_8859_8 */ - "CP1250", /* PG_WIN1250 */ - "CP1253", /* PG_WIN1253 */ - "CP1254", /* PG_WIN1254 */ - "CP1255", /* PG_WIN1255 */ - "CP1257", /* PG_WIN1257 */ - "KOI8-U", /* PG_KOI8U */ + NULL, /* PG_SQL_ASCII */ + "EUC-JP", /* PG_EUC_JP */ + "EUC-CN", /* PG_EUC_CN */ + "EUC-KR", /* PG_EUC_KR */ + "EUC-TW", /* PG_EUC_TW */ + NULL, /* PG_EUC_JIS_2004 */ + "UTF-8", /* PG_UTF8 */ + NULL, /* PG_MULE_INTERNAL */ + "ISO-8859-1", /* PG_LATIN1 */ + "ISO-8859-2", /* PG_LATIN2 */ + "ISO-8859-3", /* PG_LATIN3 */ + "ISO-8859-4", /* PG_LATIN4 */ + "ISO-8859-9", /* PG_LATIN5 */ + "ISO-8859-10", /* PG_LATIN6 */ + "ISO-8859-13", /* PG_LATIN7 */ + "ISO-8859-14", /* PG_LATIN8 */ + "ISO-8859-15", /* PG_LATIN9 */ + NULL, /* PG_LATIN10 */ + "CP1256", /* PG_WIN1256 */ + "CP1258", /* PG_WIN1258 */ + "CP866", /* PG_WIN866 */ + NULL, /* PG_WIN874 */ + "KOI8-R", /* PG_KOI8R */ + "CP1251", /* PG_WIN1251 */ + "CP1252", /* PG_WIN1252 */ + "ISO-8859-5", /* PG_ISO_8859_5 */ + "ISO-8859-6", /* PG_ISO_8859_6 */ + "ISO-8859-7", /* PG_ISO_8859_7 */ + "ISO-8859-8", /* PG_ISO_8859_8 */ + "CP1250", /* PG_WIN1250 */ + "CP1253", /* PG_WIN1253 */ + "CP1254", /* PG_WIN1254 */ + "CP1255", /* PG_WIN1255 */ + "CP1257", /* PG_WIN1257 */ + "KOI8-U", /* PG_KOI8U */ }; bool @@ -476,7 +476,7 @@ get_encoding_name_for_icu(int encoding) return icu_encoding_name; } -#endif /* not FRONTEND */ +#endif /* not FRONTEND */ /* ---------- diff --git a/src/backend/utils/misc/backend_random.c b/src/backend/utils/misc/backend_random.c index dcc23638e1..d8556143dc 100644 --- a/src/backend/utils/misc/backend_random.c +++ b/src/backend/utils/misc/backend_random.c @@ -53,7 +53,7 @@ bool pg_backend_random(char *dst, int len) { /* should not be called in postmaster */ - Assert (IsUnderPostmaster || !IsPostmasterEnvironment); + Assert(IsUnderPostmaster || !IsPostmasterEnvironment); return pg_strong_random(dst, len); } @@ -69,7 +69,7 @@ typedef struct { bool initialized; unsigned short seed[3]; -} BackendRandomShmemStruct; +} BackendRandomShmemStruct; static BackendRandomShmemStruct *BackendRandomShmem; @@ -106,7 +106,7 @@ pg_backend_random(char *dst, int len) char *end = dst + len; /* should not be called in postmaster */ - Assert (IsUnderPostmaster || !IsPostmasterEnvironment); + Assert(IsUnderPostmaster || !IsPostmasterEnvironment); LWLockAcquire(BackendRandomLock, LW_EXCLUSIVE); @@ -124,8 +124,8 @@ pg_backend_random(char *dst, int len) BackendRandomShmem->seed[2] = (unsigned short) (now.tv_usec >> 16); /* - * Mix in the cancel key, generated by the postmaster. This adds - * what little entropy the postmaster had to the seed. + * Mix in the cancel key, generated by the postmaster. This adds what + * little entropy the postmaster had to the seed. */ BackendRandomShmem->seed[0] ^= (MyCancelKey); BackendRandomShmem->seed[1] ^= (MyCancelKey >> 16); @@ -141,7 +141,7 @@ pg_backend_random(char *dst, int len) /* * pg_jrand48 returns a 32-bit integer. Fill the next 4 bytes from it. */ - r = (uint32) pg_jrand48(BackendRandomShmem->seed); + r = (uint32) pg_jrand48(BackendRandomShmem->seed); for (j = 0; j < 4 && dst < end; j++) { @@ -155,4 +155,4 @@ pg_backend_random(char *dst, int len) } -#endif /* HAVE_STRONG_RANDOM */ +#endif /* HAVE_STRONG_RANDOM */ diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index cb4e621c84..92e1d63b2f 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -151,7 +151,7 @@ static bool check_log_destination(char **newval, void **extra, GucSource source) static void assign_log_destination(const char *newval, void *extra); static bool check_wal_consistency_checking(char **newval, void **extra, - GucSource source); + GucSource source); static void assign_wal_consistency_checking(const char *newval, void *extra); #ifdef HAVE_SYSLOG @@ -2212,7 +2212,7 @@ static struct config_int ConfigureNamesInt[] = {"max_pred_locks_per_page", PGC_SIGHUP, LOCK_MANAGEMENT, gettext_noop("Sets the maximum number of predicate-locked tuples per page."), gettext_noop("If more than this number of tuples on the same page are locked " - "by a connection, those locks are replaced by a page level lock.") + "by a connection, those locks are replaced by a page level lock.") }, &max_predicate_locks_per_page, 2, 0, INT_MAX, @@ -2259,7 +2259,7 @@ static struct config_int ConfigureNamesInt[] = GUC_UNIT_MB }, &min_wal_size_mb, - 5 * (XLOG_SEG_SIZE/ (1024 * 1024)), 2, MAX_KILOBYTES, + 5 * (XLOG_SEG_SIZE / (1024 * 1024)), 2, MAX_KILOBYTES, NULL, NULL, NULL }, @@ -2270,7 +2270,7 @@ static struct config_int ConfigureNamesInt[] = GUC_UNIT_MB }, &max_wal_size_mb, - 64 * (XLOG_SEG_SIZE/ (1024 * 1024)), 2, MAX_KILOBYTES, + 64 * (XLOG_SEG_SIZE / (1024 * 1024)), 2, MAX_KILOBYTES, NULL, assign_max_wal_size, NULL }, @@ -2452,7 +2452,7 @@ static struct config_int ConfigureNamesInt[] = NULL }, &bgwriter_lru_maxpages, - 100, 0, INT_MAX / 2, /* Same upper limit as shared_buffers */ + 100, 0, INT_MAX / 2, /* Same upper limit as shared_buffers */ NULL, NULL, NULL }, @@ -6714,7 +6714,7 @@ GetConfigOption(const char *name, bool missing_ok, bool restrict_superuser) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser or a member of pg_read_all_settings to examine \"%s\"", - name))); + name))); switch (record->vartype) { @@ -6764,7 +6764,7 @@ GetConfigOptionResetString(const char *name) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser or a member of pg_read_all_settings to examine \"%s\"", - name))); + name))); switch (record->vartype) { @@ -8056,7 +8056,7 @@ GetConfigOptionByName(const char *name, const char **varname, bool missing_ok) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser or a member of pg_read_all_settings to examine \"%s\"", - name))); + name))); if (varname) *varname = record->name; @@ -8083,7 +8083,7 @@ GetConfigOptionByNum(int varnum, const char **values, bool *noshow) { if ((conf->flags & GUC_NO_SHOW_ALL) || ((conf->flags & GUC_SUPERUSER_ONLY) && - !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_SETTINGS))) + !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_SETTINGS))) *noshow = true; else *noshow = false; diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 96feacc257..8a8db0fd33 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -2327,8 +2327,8 @@ tuplesort_merge_order(int64 allowedMem) * which in turn can cause the same sort to need more runs, which makes * merging slower even if it can still be done in a single pass. Also, * high order merges are quite slow due to CPU cache effects; it can be - * faster to pay the I/O cost of a polyphase merge than to perform a single - * merge pass across many hundreds of tapes. + * faster to pay the I/O cost of a polyphase merge than to perform a + * single merge pass across many hundreds of tapes. */ mOrder = Max(mOrder, MINORDER); mOrder = Min(mOrder, MAXORDER); diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index 5fa665eafc..b3d4fe3ae2 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -1137,10 +1137,9 @@ AtEOXact_Snapshot(bool isCommit, bool resetXmin) FirstSnapshotSet = false; /* - * During normal commit processing, we call - * ProcArrayEndTransaction() to reset the PgXact->xmin. That call - * happens prior to the call to AtEOXact_Snapshot(), so we need - * not touch xmin here at all. + * During normal commit processing, we call ProcArrayEndTransaction() to + * reset the PgXact->xmin. That call happens prior to the call to + * AtEOXact_Snapshot(), so we need not touch xmin here at all. */ if (resetXmin) SnapshotResetXmin(); |