summaryrefslogtreecommitdiff
path: root/src/backend/storage
diff options
context:
space:
mode:
authorAndres Freund <andres@anarazel.de>2023-04-14 11:30:20 -0700
committerAndres Freund <andres@anarazel.de>2023-04-14 11:30:33 -0700
commit43a33ef54e503b61f269d088f2623ba3b9484ad7 (patch)
treea43336dbc7ddae66a571ce4658f062b597c4d41f /src/backend/storage
parente4d905f772f7803d399b6e173ca9aa7ba8a35e5a (diff)
downloadpostgresql-43a33ef54e503b61f269d088f2623ba3b9484ad7.tar.gz
Support RBM_ZERO_AND_CLEANUP_LOCK in ExtendBufferedRelTo(), add tests
For some reason I had not implemented RBM_ZERO_AND_CLEANUP_LOCK support in ExtendBufferedRelTo(), likely thinking it not being reachable. But it is reachable, e.g. when replaying a WAL record for a page in a relation that subsequently is truncated (likely only reachable when doing crash recovery or PITR, not during ongoing streaming replication). As now all of the RBM_* modes are supported, remove assertions checking mode. As we had no test coverage for this scenario, add a new TAP test. There's plenty more that ought to be tested in this area... Reported-by: Tom Lane <tgl@sss.pgh.pa.us> Reported-by: Alexander Lakhin <exclusion@gmail.com> Reviewed-by: Alexander Lakhin <exclusion@gmail.com> Discussion: https://postgr.es/m/392271.1681238924%40sss.pgh.pa.us Discussion: https://postgr.es/m/0b5eb82b-cb99-e0a4-b932-3dc60e2e3926@gmail.com
Diffstat (limited to 'src/backend/storage')
-rw-r--r--src/backend/storage/buffer/bufmgr.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index b316320833..ee6244f9bc 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -888,8 +888,6 @@ ExtendBufferedRelTo(ExtendBufferedWhat eb,
Assert((eb.rel != NULL) != (eb.smgr != NULL));
Assert(eb.smgr == NULL || eb.relpersistence != 0);
Assert(extend_to != InvalidBlockNumber && extend_to > 0);
- Assert(mode == RBM_NORMAL || mode == RBM_ZERO_ON_ERROR ||
- mode == RBM_ZERO_AND_LOCK);
if (eb.smgr == NULL)
{
@@ -933,7 +931,13 @@ ExtendBufferedRelTo(ExtendBufferedWhat eb,
*/
current_size = smgrnblocks(eb.smgr, fork);
- if (mode == RBM_ZERO_AND_LOCK)
+ /*
+ * Since no-one else can be looking at the page contents yet, there is no
+ * difference between an exclusive lock and a cleanup-strength lock. Note
+ * that we pass the original mode to ReadBuffer_common() below, when
+ * falling back to reading the buffer to a concurrent relation extension.
+ */
+ if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
flags |= EB_LOCK_TARGET;
while (current_size < extend_to)
@@ -1008,11 +1012,12 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
{
uint32 flags = EB_SKIP_EXTENSION_LOCK;
- Assert(mode == RBM_NORMAL ||
- mode == RBM_ZERO_AND_LOCK ||
- mode == RBM_ZERO_ON_ERROR);
-
- if (mode == RBM_ZERO_AND_LOCK)
+ /*
+ * Since no-one else can be looking at the page contents yet, there is
+ * no difference between an exclusive lock and a cleanup-strength
+ * lock.
+ */
+ if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
flags |= EB_LOCK_FIRST;
return ExtendBufferedRel(EB_SMGR(smgr, relpersistence),
@@ -1145,9 +1150,10 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
}
/*
- * In RBM_ZERO_AND_LOCK mode, grab the buffer content lock before marking
- * the page as valid, to make sure that no other backend sees the zeroed
- * page before the caller has had a chance to initialize it.
+ * In RBM_ZERO_AND_LOCK / RBM_ZERO_AND_CLEANUP_LOCK mode, grab the buffer
+ * content lock before marking the page as valid, to make sure that no
+ * other backend sees the zeroed page before the caller has had a chance
+ * to initialize it.
*
* Since no-one else can be looking at the page contents yet, there is no
* difference between an exclusive lock and a cleanup-strength lock. (Note