summaryrefslogtreecommitdiff
path: root/src/backend/access/hash/hashinsert.c
diff options
context:
space:
mode:
authorRobert Haas <rhaas@postgresql.org>2016-12-23 07:14:37 -0500
committerRobert Haas <rhaas@postgresql.org>2016-12-23 07:14:37 -0500
commit7819ba1ef6c5297b7e27878d2b3d30c5bcef8939 (patch)
treed160d631ed1bb023d3c51371f0cd1a116a9ad174 /src/backend/access/hash/hashinsert.c
parent0a85c102254b72ec7ce16bc504206a1a5c84bd76 (diff)
downloadpostgresql-7819ba1ef6c5297b7e27878d2b3d30c5bcef8939.tar.gz
Remove _hash_chgbufaccess().
This is basically for the same reasons I got rid of _hash_wrtbuf() in commit 25216c98938495fd741bf585dcbef45b3a9ffd40: it's not convenient to have a function which encapsulates MarkBufferDirty(), especially as we move towards having hash indexes be WAL-logged. Patch by me, reviewed (but not entirely endorsed) by Amit Kapila.
Diffstat (limited to 'src/backend/access/hash/hashinsert.c')
-rw-r--r--src/backend/access/hash/hashinsert.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 4b022b5755..46df589a7f 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -104,7 +104,7 @@ restart_insert:
lowmask = metap->hashm_lowmask;
/* Release metapage lock, but keep pin. */
- _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/*
* If the previous iteration of this loop locked the primary page of
@@ -125,7 +125,7 @@ restart_insert:
* Reacquire metapage lock and check that no bucket split has taken
* place while we were awaiting the bucket lock.
*/
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
+ LockBuffer(metabuf, BUFFER_LOCK_SHARE);
oldblkno = blkno;
retry = true;
}
@@ -149,7 +149,7 @@ restart_insert:
if (H_BUCKET_BEING_SPLIT(pageopaque) && IsBufferCleanupOK(buf))
{
/* release the lock on bucket buffer, before completing the split. */
- _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
_hash_finish_split(rel, metabuf, buf, pageopaque->hasho_bucket,
maxbucket, highmask, lowmask);
@@ -180,7 +180,7 @@ restart_insert:
if (buf != bucket_buf)
_hash_relbuf(rel, buf);
else
- _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
}
@@ -192,7 +192,7 @@ restart_insert:
*/
/* release our write lock without modifying buffer */
- _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
/* chain to a new overflow page */
buf = _hash_addovflpage(rel, metabuf, buf, (buf == bucket_buf) ? true : false);
@@ -223,7 +223,7 @@ restart_insert:
* Write-lock the metapage so we can increment the tuple count. After
* incrementing it, check to see if it's time for a split.
*/
- _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
+ LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
metap->hashm_ntuples += 1;
@@ -232,7 +232,8 @@ restart_insert:
(double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1);
/* Write out the metapage and drop lock, but keep pin */
- _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
+ MarkBufferDirty(metabuf);
+ LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/* Attempt to split if a split is needed */
if (do_expand)