From a6fd7b7a5f7bf3a8aa3f3d076cf09d922c1c6dd2 Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Wed, 17 May 2017 16:31:56 -0400 Subject: Post-PG 10 beta1 pgindent run perltidy run not included. --- src/backend/access/hash/hash.c | 28 +++++++++----------- src/backend/access/hash/hash_xlog.c | 40 ++++++++++++++-------------- src/backend/access/hash/hashinsert.c | 51 ++++++++++++++++++------------------ src/backend/access/hash/hashpage.c | 50 +++++++++++++++++------------------ src/backend/access/hash/hashutil.c | 27 ++++++++++--------- 5 files changed, 96 insertions(+), 100 deletions(-) (limited to 'src/backend/access/hash') diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index df54638f3e..d0b0547491 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -333,12 +333,12 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir) if (scan->kill_prior_tuple) { /* - * Yes, so remember it for later. (We'll deal with all such - * tuples at once right after leaving the index page or at - * end of scan.) In case if caller reverses the indexscan - * direction it is quite possible that the same item might - * get entered multiple times. But, we don't detect that; - * instead, we just forget any excess entries. + * Yes, so remember it for later. (We'll deal with all such tuples + * at once right after leaving the index page or at end of scan.) + * In case if caller reverses the indexscan direction it is quite + * possible that the same item might get entered multiple times. + * But, we don't detect that; instead, we just forget any excess + * entries. */ if (so->killedItems == NULL) so->killedItems = palloc(MaxIndexTuplesPerPage * @@ -348,7 +348,7 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir) { so->killedItems[so->numKilled].heapTid = so->hashso_heappos; so->killedItems[so->numKilled].indexOffset = - ItemPointerGetOffsetNumber(&(so->hashso_curpos)); + ItemPointerGetOffsetNumber(&(so->hashso_curpos)); so->numKilled++; } } @@ -477,9 +477,8 @@ hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, Relation rel = scan->indexRelation; /* - * Before leaving current page, deal with any killed items. - * Also, ensure that we acquire lock on current page before - * calling _hash_kill_items. + * Before leaving current page, deal with any killed items. Also, ensure + * that we acquire lock on current page before calling _hash_kill_items. */ if (so->numKilled > 0) { @@ -516,9 +515,8 @@ hashendscan(IndexScanDesc scan) Relation rel = scan->indexRelation; /* - * Before leaving current page, deal with any killed items. - * Also, ensure that we acquire lock on current page before - * calling _hash_kill_items. + * Before leaving current page, deal with any killed items. Also, ensure + * that we acquire lock on current page before calling _hash_kill_items. */ if (so->numKilled > 0) { @@ -889,8 +887,8 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, /* * Let us mark the page as clean if vacuum removes the DEAD tuples - * from an index page. We do this by clearing LH_PAGE_HAS_DEAD_TUPLES - * flag. + * from an index page. We do this by clearing + * LH_PAGE_HAS_DEAD_TUPLES flag. */ if (tuples_removed && *tuples_removed > 0 && H_HAS_DEAD_TUPLES(opaque)) diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c index d1c0e6904f..0ea11b2e74 100644 --- a/src/backend/access/hash/hash_xlog.c +++ b/src/backend/access/hash/hash_xlog.c @@ -950,22 +950,22 @@ hash_xlog_update_meta_page(XLogReaderState *record) static TransactionId hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record) { - xl_hash_vacuum_one_page *xlrec; - OffsetNumber *unused; + xl_hash_vacuum_one_page *xlrec; + OffsetNumber *unused; Buffer ibuffer, hbuffer; Page ipage, hpage; - RelFileNode rnode; - BlockNumber blkno; + RelFileNode rnode; + BlockNumber blkno; ItemId iitemid, hitemid; IndexTuple itup; - HeapTupleHeader htuphdr; - BlockNumber hblkno; - OffsetNumber hoffnum; - TransactionId latestRemovedXid = InvalidTransactionId; - int i; + HeapTupleHeader htuphdr; + BlockNumber hblkno; + OffsetNumber hoffnum; + TransactionId latestRemovedXid = InvalidTransactionId; + int i; xlrec = (xl_hash_vacuum_one_page *) XLogRecGetData(record); @@ -984,9 +984,9 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record) return latestRemovedXid; /* - * Check if WAL replay has reached a consistent database state. If not, - * we must PANIC. See the definition of btree_xlog_delete_get_latestRemovedXid - * for more details. + * Check if WAL replay has reached a consistent database state. If not, we + * must PANIC. See the definition of + * btree_xlog_delete_get_latestRemovedXid for more details. */ if (!reachedConsistency) elog(PANIC, "hash_xlog_vacuum_get_latestRemovedXid: cannot operate with inconsistent data"); @@ -1098,11 +1098,11 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record) static void hash_xlog_vacuum_one_page(XLogReaderState *record) { - XLogRecPtr lsn = record->EndRecPtr; + XLogRecPtr lsn = record->EndRecPtr; xl_hash_vacuum_one_page *xldata; - Buffer buffer; - Buffer metabuf; - Page page; + Buffer buffer; + Buffer metabuf; + Page page; XLogRedoAction action; HashPageOpaque pageopaque; @@ -1123,7 +1123,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record) if (InHotStandby) { TransactionId latestRemovedXid = - hash_xlog_vacuum_get_latestRemovedXid(record); + hash_xlog_vacuum_get_latestRemovedXid(record); RelFileNode rnode; XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL); @@ -1146,8 +1146,8 @@ hash_xlog_vacuum_one_page(XLogReaderState *record) } /* - * Mark the page as not containing any LP_DEAD items. See comments - * in _hash_vacuum_one_page() for details. + * Mark the page as not containing any LP_DEAD items. See comments in + * _hash_vacuum_one_page() for details. */ pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES; @@ -1160,7 +1160,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record) if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO) { - Page metapage; + Page metapage; HashMetaPage metap; metapage = BufferGetPage(metabuf); diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index 8699b5bc30..01c8d8006c 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -24,7 +24,7 @@ #include "storage/buf_internals.h" static void _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, - RelFileNode hnode); + RelFileNode hnode); /* * _hash_doinsert() -- Handle insertion of a single index tuple. @@ -63,8 +63,8 @@ restart_insert: /* * Read the metapage. We don't lock it yet; HashMaxItemSize() will - * examine pd_pagesize_version, but that can't change so we can examine - * it without a lock. + * examine pd_pagesize_version, but that can't change so we can examine it + * without a lock. */ metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE); metapage = BufferGetPage(metabuf); @@ -126,10 +126,9 @@ restart_insert: BlockNumber nextblkno; /* - * Check if current page has any DEAD tuples. If yes, - * delete these tuples and see if we can get a space for - * the new item to be inserted before moving to the next - * page in the bucket chain. + * Check if current page has any DEAD tuples. If yes, delete these + * tuples and see if we can get a space for the new item to be + * inserted before moving to the next page in the bucket chain. */ if (H_HAS_DEAD_TUPLES(pageopaque)) { @@ -139,7 +138,7 @@ restart_insert: _hash_vacuum_one_page(rel, metabuf, buf, heapRel->rd_node); if (PageGetFreeSpace(page) >= itemsz) - break; /* OK, now we have enough space */ + break; /* OK, now we have enough space */ } } @@ -337,13 +336,13 @@ static void _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, RelFileNode hnode) { - OffsetNumber deletable[MaxOffsetNumber]; - int ndeletable = 0; + OffsetNumber deletable[MaxOffsetNumber]; + int ndeletable = 0; OffsetNumber offnum, - maxoff; - Page page = BufferGetPage(buf); - HashPageOpaque pageopaque; - HashMetaPage metap; + maxoff; + Page page = BufferGetPage(buf); + HashPageOpaque pageopaque; + HashMetaPage metap; /* Scan each tuple in page to see if it is marked as LP_DEAD */ maxoff = PageGetMaxOffsetNumber(page); @@ -351,7 +350,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - ItemId itemId = PageGetItemId(page, offnum); + ItemId itemId = PageGetItemId(page, offnum); if (ItemIdIsDead(itemId)) deletable[ndeletable++] = offnum; @@ -360,8 +359,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, if (ndeletable > 0) { /* - * Write-lock the meta page so that we can decrement - * tuple count. + * Write-lock the meta page so that we can decrement tuple count. */ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); @@ -374,8 +372,8 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, * Mark the page as not containing any LP_DEAD items. This is not * certainly true (there might be some that have recently been marked, * but weren't included in our target-item list), but it will almost - * always be true and it doesn't seem worth an additional page scan - * to check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint + * always be true and it doesn't seem worth an additional page scan to + * check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint * anyway. */ pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); @@ -390,7 +388,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, /* XLOG stuff */ if (RelationNeedsWAL(rel)) { - xl_hash_vacuum_one_page xlrec; + xl_hash_vacuum_one_page xlrec; XLogRecPtr recptr; xlrec.hnode = hnode; @@ -401,12 +399,12 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, XLogRegisterData((char *) &xlrec, SizeOfHashVacuumOnePage); /* - * We need the target-offsets array whether or not we store the whole - * buffer, to allow us to find the latestRemovedXid on a standby - * server. + * We need the target-offsets array whether or not we store the + * whole buffer, to allow us to find the latestRemovedXid on a + * standby server. */ XLogRegisterData((char *) deletable, - ndeletable * sizeof(OffsetNumber)); + ndeletable * sizeof(OffsetNumber)); XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD); @@ -417,9 +415,10 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, } END_CRIT_SECTION(); + /* - * Releasing write lock on meta page as we have updated - * the tuple count. + * Releasing write lock on meta page as we have updated the tuple + * count. */ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); } diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index bf1ffff4e8..4544889294 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -177,8 +177,8 @@ _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag, pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); /* - * Set hasho_prevblkno with current hashm_maxbucket. This value will - * be used to validate cached HashMetaPageData. See + * Set hasho_prevblkno with current hashm_maxbucket. This value will be + * used to validate cached HashMetaPageData. See * _hash_getbucketbuf_from_hashkey(). */ pageopaque->hasho_prevblkno = max_bucket; @@ -509,8 +509,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, * Choose the number of initial bucket pages to match the fill factor * given the estimated number of tuples. We round up the result to the * total number of buckets which has to be allocated before using its - * _hashm_spare element. However always force at least 2 bucket pages. - * The upper limit is determined by considerations explained in + * _hashm_spare element. However always force at least 2 bucket pages. The + * upper limit is determined by considerations explained in * _hash_expandtable(). */ dnumbuckets = num_tuples / ffactor; @@ -568,8 +568,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, metap->hashm_maxbucket = num_buckets - 1; /* - * Set highmask as next immediate ((2 ^ x) - 1), which should be sufficient - * to cover num_buckets. + * Set highmask as next immediate ((2 ^ x) - 1), which should be + * sufficient to cover num_buckets. */ metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1; metap->hashm_lowmask = (metap->hashm_highmask >> 1); @@ -748,8 +748,8 @@ restart_expand: { /* * Copy bucket mapping info now; refer to the comment in code below - * where we copy this information before calling _hash_splitbucket - * to see why this is okay. + * where we copy this information before calling _hash_splitbucket to + * see why this is okay. */ maxbucket = metap->hashm_maxbucket; highmask = metap->hashm_highmask; @@ -792,8 +792,7 @@ restart_expand: * We treat allocation of buckets as a separate WAL-logged action. * Even if we fail after this operation, won't leak bucket pages; * rather, the next split will consume this space. In any case, even - * without failure we don't use all the space in one split - * operation. + * without failure we don't use all the space in one split operation. */ buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket; if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add)) @@ -870,10 +869,9 @@ restart_expand: /* * Mark the old bucket to indicate that split is in progress. (At - * operation end, we will clear the split-in-progress flag.) Also, - * for a primary bucket page, hasho_prevblkno stores the number of - * buckets that existed as of the last split, so we must update that - * value here. + * operation end, we will clear the split-in-progress flag.) Also, for a + * primary bucket page, hasho_prevblkno stores the number of buckets that + * existed as of the last split, so we must update that value here. */ oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT; oopaque->hasho_prevblkno = maxbucket; @@ -1008,8 +1006,8 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks) /* * Initialize the page. Just zeroing the page won't work; see - * _hash_freeovflpage for similar usage. We take care to make the - * special space valid for the benefit of tools such as pageinspect. + * _hash_freeovflpage for similar usage. We take care to make the special + * space valid for the benefit of tools such as pageinspect. */ _hash_pageinit(page, BLCKSZ); @@ -1462,11 +1460,11 @@ log_split_page(Relation rel, Buffer buf) * _hash_getcachedmetap() -- Returns cached metapage data. * * If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on - * the metapage. If not set, we'll set it before returning if we have to - * refresh the cache, and return with a pin but no lock on it; caller is - * responsible for releasing the pin. + * the metapage. If not set, we'll set it before returning if we have to + * refresh the cache, and return with a pin but no lock on it; caller is + * responsible for releasing the pin. * - * We refresh the cache if it's not initialized yet or force_refresh is true. + * We refresh the cache if it's not initialized yet or force_refresh is true. */ HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh) @@ -1476,13 +1474,13 @@ _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh) Assert(metabuf); if (force_refresh || rel->rd_amcache == NULL) { - char *cache = NULL; + char *cache = NULL; /* - * It's important that we don't set rd_amcache to an invalid - * value. Either MemoryContextAlloc or _hash_getbuf could fail, - * so don't install a pointer to the newly-allocated storage in the - * actual relcache entry until both have succeeeded. + * It's important that we don't set rd_amcache to an invalid value. + * Either MemoryContextAlloc or _hash_getbuf could fail, so don't + * install a pointer to the newly-allocated storage in the actual + * relcache entry until both have succeeeded. */ if (rel->rd_amcache == NULL) cache = MemoryContextAlloc(rel->rd_indexcxt, @@ -1517,7 +1515,7 @@ _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh) * us an opportunity to use the previously saved metapage contents to reach * the target bucket buffer, instead of reading from the metapage every time. * This saves one buffer access every time we want to reach the target bucket - * buffer, which is very helpful savings in bufmgr traffic and contention. + * buffer, which is very helpful savings in bufmgr traffic and contention. * * The access type parameter (HASH_READ or HASH_WRITE) indicates whether the * bucket buffer has to be locked for reading or writing. diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index 9f832f2544..c513c3b842 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -528,20 +528,21 @@ _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket, void _hash_kill_items(IndexScanDesc scan) { - HashScanOpaque so = (HashScanOpaque) scan->opaque; - Page page; - HashPageOpaque opaque; - OffsetNumber offnum, maxoff; - int numKilled = so->numKilled; - int i; - bool killedsomething = false; + HashScanOpaque so = (HashScanOpaque) scan->opaque; + Page page; + HashPageOpaque opaque; + OffsetNumber offnum, + maxoff; + int numKilled = so->numKilled; + int i; + bool killedsomething = false; Assert(so->numKilled > 0); Assert(so->killedItems != NULL); /* - * Always reset the scan state, so we don't look for same - * items on other pages. + * Always reset the scan state, so we don't look for same items on other + * pages. */ so->numKilled = 0; @@ -555,7 +556,7 @@ _hash_kill_items(IndexScanDesc scan) while (offnum <= maxoff) { - ItemId iid = PageGetItemId(page, offnum); + ItemId iid = PageGetItemId(page, offnum); IndexTuple ituple = (IndexTuple) PageGetItem(page, iid); if (ItemPointerEquals(&ituple->t_tid, &so->killedItems[i].heapTid)) @@ -563,15 +564,15 @@ _hash_kill_items(IndexScanDesc scan) /* found the item */ ItemIdMarkDead(iid); killedsomething = true; - break; /* out of inner search loop */ + break; /* out of inner search loop */ } offnum = OffsetNumberNext(offnum); } } /* - * Since this can be redone later if needed, mark as dirty hint. - * Whenever we mark anything LP_DEAD, we also set the page's + * Since this can be redone later if needed, mark as dirty hint. Whenever + * we mark anything LP_DEAD, we also set the page's * LH_PAGE_HAS_DEAD_TUPLES flag, which is likewise just a hint. */ if (killedsomething) -- cgit v1.2.1