summaryrefslogtreecommitdiff
path: root/src/backend/access/hash
diff options
context:
space:
mode:
authorRobert Haas <rhaas@postgresql.org>2017-03-20 15:49:09 -0400
committerRobert Haas <rhaas@postgresql.org>2017-03-20 15:49:09 -0400
commit953477ca3526e28f9aeeb41d23b16eed0084c7d2 (patch)
treead1b7373e82044025156843cbc9c80965d44d7f1 /src/backend/access/hash
parentbc18126a6bcb85b51dc082c3ef4417dc016ebd9c (diff)
downloadpostgresql-953477ca3526e28f9aeeb41d23b16eed0084c7d2.tar.gz
Fixes for single-page hash index vacuum.
Clear LH_PAGE_HAS_DEAD_TUPLES during replay, similar to what gets done for btree. Update hashdesc.c for xl_hash_vacuum_one_page. Oversights in commit 6977b8b7f4dfb40896ff5e2175cad7fdbda862eb spotted by Amit Kapila. Patch by Ashutosh Sharma. Bump WAL version. The original patch to make hash indexes write-ahead logged probably should have done this, and the single page vacuuming patch probably should have done it again, but better late than never. Discussion: http://postgr.es/m/CAA4eK1Kd=mJ9xreovcsh0qMiAj-QqCphHVQ_Lfau1DR9oVjASQ@mail.gmail.com
Diffstat (limited to 'src/backend/access/hash')
-rw-r--r--src/backend/access/hash/hash.c7
-rw-r--r--src/backend/access/hash/hash_xlog.c21
-rw-r--r--src/backend/access/hash/hashinsert.c8
3 files changed, 35 insertions, 1 deletions
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index cfcec3475d..34cc08f12d 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -790,6 +790,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
OffsetNumber deletable[MaxOffsetNumber];
int ndeletable = 0;
bool retain_pin = false;
+ bool clear_dead_marking = false;
vacuum_delay_point();
@@ -877,11 +878,14 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
/*
* Let us mark the page as clean if vacuum removes the DEAD tuples
* from an index page. We do this by clearing LH_PAGE_HAS_DEAD_TUPLES
- * flag. Clearing this flag is just a hint; replay won't redo this.
+ * flag.
*/
if (tuples_removed && *tuples_removed > 0 &&
opaque->hasho_flag & LH_PAGE_HAS_DEAD_TUPLES)
+ {
opaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
+ clear_dead_marking = true;
+ }
MarkBufferDirty(buf);
@@ -891,6 +895,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
xl_hash_delete xlrec;
XLogRecPtr recptr;
+ xlrec.clear_dead_marking = clear_dead_marking;
xlrec.is_primary_bucket_page = (buf == bucket_buf) ? true : false;
XLogBeginInsert();
diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c
index 8647e8c6ad..ac82092ab2 100644
--- a/src/backend/access/hash/hash_xlog.c
+++ b/src/backend/access/hash/hash_xlog.c
@@ -859,6 +859,19 @@ hash_xlog_delete(XLogReaderState *record)
PageIndexMultiDelete(page, unused, unend - unused);
}
+ /*
+ * Mark the page as not containing any LP_DEAD items only if
+ * clear_dead_marking flag is set to true. See comments in
+ * hashbucketcleanup() for details.
+ */
+ if (xldata->clear_dead_marking)
+ {
+ HashPageOpaque pageopaque;
+
+ pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
+ pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
+ }
+
PageSetLSN(page, lsn);
MarkBufferDirty(deletebuf);
}
@@ -1078,6 +1091,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
Buffer metabuf;
Page page;
XLogRedoAction action;
+ HashPageOpaque pageopaque;
xldata = (xl_hash_vacuum_one_page *) XLogRecGetData(record);
@@ -1126,6 +1140,13 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
PageIndexMultiDelete(page, unused, unend - unused);
}
+ /*
+ * Mark the page as not containing any LP_DEAD items. See comments
+ * in _hash_vacuum_one_page() for details.
+ */
+ pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
+ pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
+
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 8b6d0a0ff7..8640e85a5c 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -374,6 +374,14 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
PageIndexMultiDelete(page, deletable, ndeletable);
+ /*
+ * Mark the page as not containing any LP_DEAD items. This is not
+ * certainly true (there might be some that have recently been marked,
+ * but weren't included in our target-item list), but it will almost
+ * always be true and it doesn't seem worth an additional page scan
+ * to check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint
+ * anyway.
+ */
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;