summaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
authorNoah Misch <noah@leadboat.com>2020-03-21 09:38:33 -0700
committerNoah Misch <noah@leadboat.com>2020-03-21 09:38:35 -0700
commite0dd086414f782d9200ad525a1643a9f57a2b497 (patch)
treebf07eafdd96ba96bbcdb42b21e3e9047e6214317 /src/backend/access
parent978da2a95597c97c52b14a27ba29873051ffc59d (diff)
downloadpostgresql-e0dd086414f782d9200ad525a1643a9f57a2b497.tar.gz
Back-patch log_newpage_range().
Back-patch a subset of commit 9155580fd5fc2a0cbb23376dfca7cd21f59c2c7b to v11, v10, 9.6, and 9.5. Include the latest repairs to this function. Use a new XLOG_FPI_MULTI value instead of reusing XLOG_FPI. That way, if an older server reads WAL from this function, that server will PANIC instead of applying just one page of the record. The next commit adds a call to this function. Discussion: https://postgr.es/m/20200304.162919.898938381201316571.horikyota.ntt@gmail.com
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/rmgrdesc/xlogdesc.c6
-rw-r--r--src/backend/access/transam/xlog.c23
-rw-r--r--src/backend/access/transam/xloginsert.c88
3 files changed, 108 insertions, 9 deletions
diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c
index 5e210b9947..bede96e6b5 100644
--- a/src/backend/access/rmgrdesc/xlogdesc.c
+++ b/src/backend/access/rmgrdesc/xlogdesc.c
@@ -77,7 +77,8 @@ xlog_desc(StringInfo buf, XLogReaderState *record)
appendStringInfoString(buf, xlrec->rp_name);
}
- else if (info == XLOG_FPI || info == XLOG_FPI_FOR_HINT)
+ else if (info == XLOG_FPI || info == XLOG_FPI_FOR_HINT ||
+ info == XLOG_FPI_MULTI)
{
/* no further information to print */
}
@@ -181,6 +182,9 @@ xlog_identify(uint8 info)
case XLOG_FPI_FOR_HINT:
id = "FPI_FOR_HINT";
break;
+ case XLOG_FPI_MULTI:
+ id = "FPI_MULTI";
+ break;
}
return id;
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 2ddd8c41a1..a378df9d76 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -9339,7 +9339,7 @@ xlog_redo(XLogReaderState *record)
/* in XLOG rmgr, backup blocks are only used by XLOG_FPI records */
Assert(info == XLOG_FPI || info == XLOG_FPI_FOR_HINT ||
- !XLogRecHasAnyBlockRefs(record));
+ info == XLOG_FPI_MULTI || !XLogRecHasAnyBlockRefs(record));
if (info == XLOG_NEXTOID)
{
@@ -9537,14 +9537,16 @@ xlog_redo(XLogReaderState *record)
{
/* nothing to do here */
}
- else if (info == XLOG_FPI || info == XLOG_FPI_FOR_HINT)
+ else if (info == XLOG_FPI || info == XLOG_FPI_FOR_HINT ||
+ info == XLOG_FPI_MULTI)
{
- Buffer buffer;
+ uint8 block_id;
/*
* Full-page image (FPI) records contain nothing else but a backup
- * block. The block reference must include a full-page image -
- * otherwise there would be no point in this record.
+ * block (or multiple backup blocks). Every block reference must
+ * include a full-page image - otherwise there would be no point in
+ * this record.
*
* No recovery conflicts are generated by these generic records - if a
* resource manager needs to generate conflicts, it has to define a
@@ -9556,9 +9558,14 @@ xlog_redo(XLogReaderState *record)
* XLOG_FPI and XLOG_FPI_FOR_HINT records, they use a different info
* code just to distinguish them for statistics purposes.
*/
- if (XLogReadBufferForRedo(record, 0, &buffer) != BLK_RESTORED)
- elog(ERROR, "unexpected XLogReadBufferForRedo result when restoring backup block");
- UnlockReleaseBuffer(buffer);
+ for (block_id = 0; block_id <= record->max_block_id; block_id++)
+ {
+ Buffer buffer;
+
+ if (XLogReadBufferForRedo(record, block_id, &buffer) != BLK_RESTORED)
+ elog(ERROR, "unexpected XLogReadBufferForRedo result when restoring backup block");
+ UnlockReleaseBuffer(buffer);
+ }
}
else if (info == XLOG_BACKUP_END)
{
diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c
index 49795b48a9..c15b08f05a 100644
--- a/src/backend/access/transam/xloginsert.c
+++ b/src/backend/access/transam/xloginsert.c
@@ -987,6 +987,94 @@ log_newpage_buffer(Buffer buffer, bool page_std)
}
/*
+ * WAL-log a range of blocks in a relation.
+ *
+ * An image of all pages with block numbers 'startblk' <= X < 'endblk' is
+ * written to the WAL. If the range is large, this is done in multiple WAL
+ * records.
+ *
+ * If all page follows the standard page layout, with a PageHeader and unused
+ * space between pd_lower and pd_upper, set 'page_std' to true. That allows
+ * the unused space to be left out from the WAL records, making them smaller.
+ *
+ * NOTE: This function acquires exclusive-locks on the pages. Typically, this
+ * is used on a newly-built relation, and the caller is holding a
+ * AccessExclusiveLock on it, so no other backend can be accessing it at the
+ * same time. If that's not the case, you must ensure that this does not
+ * cause a deadlock through some other means.
+ */
+void
+log_newpage_range(Relation rel, ForkNumber forkNum,
+ BlockNumber startblk, BlockNumber endblk,
+ bool page_std)
+{
+ int flags;
+ BlockNumber blkno;
+
+ flags = REGBUF_FORCE_IMAGE;
+ if (page_std)
+ flags |= REGBUF_STANDARD;
+
+ /*
+ * Iterate over all the pages in the range. They are collected into
+ * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written
+ * for each batch.
+ */
+ XLogEnsureRecordSpace(XLR_MAX_BLOCK_ID - 1, 0);
+
+ blkno = startblk;
+ while (blkno < endblk)
+ {
+ Buffer bufpack[XLR_MAX_BLOCK_ID];
+ XLogRecPtr recptr;
+ int nbufs;
+ int i;
+
+ CHECK_FOR_INTERRUPTS();
+
+ /* Collect a batch of blocks. */
+ nbufs = 0;
+ while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk)
+ {
+ Buffer buf = ReadBufferExtended(rel, forkNum, blkno,
+ RBM_NORMAL, NULL);
+
+ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * Completely empty pages are not WAL-logged. Writing a WAL record
+ * would change the LSN, and we don't want that. We want the page
+ * to stay empty.
+ */
+ if (!PageIsNew(BufferGetPage(buf)))
+ bufpack[nbufs++] = buf;
+ else
+ UnlockReleaseBuffer(buf);
+ blkno++;
+ }
+
+ /* Write WAL record for this batch. */
+ XLogBeginInsert();
+
+ START_CRIT_SECTION();
+ for (i = 0; i < nbufs; i++)
+ {
+ XLogRegisterBuffer(i, bufpack[i], flags);
+ MarkBufferDirty(bufpack[i]);
+ }
+
+ recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI_MULTI);
+
+ for (i = 0; i < nbufs; i++)
+ {
+ PageSetLSN(BufferGetPage(bufpack[i]), recptr);
+ UnlockReleaseBuffer(bufpack[i]);
+ }
+ END_CRIT_SECTION();
+ }
+}
+
+/*
* Allocate working buffers needed for WAL record construction.
*/
void