summaryrefslogtreecommitdiff
path: root/src/backend/access/heap
diff options
context:
space:
mode:
authorPeter Geoghegan <pg@bowt.ie>2022-11-17 14:55:08 -0800
committerPeter Geoghegan <pg@bowt.ie>2022-11-17 14:55:08 -0800
commit1489b1ce728248e04da72aa32f87e9a634ebf9b8 (patch)
tree4a95eb7c2ffc0424f8805056c36b3493019313be /src/backend/access/heap
parent6ff5aa129933fbde034c0d21c28cf05e052511f9 (diff)
downloadpostgresql-1489b1ce728248e04da72aa32f87e9a634ebf9b8.tar.gz
Standardize rmgrdesc recovery conflict XID output.
Standardize on the name snapshotConflictHorizon for all XID fields from WAL records that generate recovery conflicts when in hot standby mode. This supersedes the previous latestRemovedXid naming convention. The new naming convention places emphasis on how the values are actually used by REDO routines. How the values are generated during original execution (details of which vary by record type) is deemphasized. Users of tools like pg_waldump can now grep for snapshotConflictHorizon to see all potential sources of recovery conflicts in a standardized way, without necessarily having to consider which specific record types might be involved. Also bring a couple of WAL record types that didn't follow any kind of naming convention into line. These are heapam's VISIBLE record type and SP-GiST's VACUUM_REDIRECT record type. Now every WAL record whose REDO routine calls ResolveRecoveryConflictWithSnapshot() passes through the snapshotConflictHorizon field from its WAL record. This is follow-up work to the refactoring from commit 9e540599 that made FREEZE_PAGE WAL records use a standard snapshotConflictHorizon style XID cutoff. No bump in XLOG_PAGE_MAGIC, since the underlying format of affected WAL records doesn't change. Author: Peter Geoghegan <pg@bowt.ie> Reviewed-By: Andres Freund <andres@anarazel.de> Discussion: https://postgr.es/m/CAH2-Wzm2CQUmViUq7Opgk=McVREHSOorYaAjR1ZpLYkRN7_dPw@mail.gmail.com
Diffstat (limited to 'src/backend/access/heap')
-rw-r--r--src/backend/access/heap/heapam.c76
-rw-r--r--src/backend/access/heap/pruneheap.c14
2 files changed, 50 insertions, 40 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 9790ba2298..d18c5ca6f5 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -6792,7 +6792,7 @@ heap_freeze_execute_prepared(Relation rel, Buffer buffer,
Page page = BufferGetPage(buffer);
Assert(ntuples > 0);
- Assert(TransactionIdIsValid(FreezeLimit));
+ Assert(TransactionIdIsNormal(FreezeLimit));
START_CRIT_SECTION();
@@ -6815,21 +6815,20 @@ heap_freeze_execute_prepared(Relation rel, Buffer buffer,
int nplans;
xl_heap_freeze_page xlrec;
XLogRecPtr recptr;
- TransactionId latestRemovedXid;
+ TransactionId snapshotConflictHorizon;
/* Prepare deduplicated representation for use in WAL record */
nplans = heap_xlog_freeze_plan(tuples, ntuples, plans, offsets);
/*
- * latestRemovedXid describes the latest processed XID, whereas
* FreezeLimit is (approximately) the first XID not frozen by VACUUM.
* Back up caller's FreezeLimit to avoid false conflicts when
* FreezeLimit is precisely equal to VACUUM's OldestXmin cutoff.
*/
- latestRemovedXid = FreezeLimit;
- TransactionIdRetreat(latestRemovedXid);
+ snapshotConflictHorizon = FreezeLimit;
+ TransactionIdRetreat(snapshotConflictHorizon);
- xlrec.latestRemovedXid = latestRemovedXid;
+ xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
xlrec.nplans = nplans;
XLogBeginInsert();
@@ -7401,15 +7400,21 @@ heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
}
/*
- * If 'tuple' contains any visible XID greater than latestRemovedXid,
- * ratchet forwards latestRemovedXid to the greatest one found.
- * This is used as the basis for generating Hot Standby conflicts, so
- * if a tuple was never visible then removing it should not conflict
- * with queries.
+ * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
+ * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
+ * that caller is in the process of physically removing, e.g. via HOT pruning
+ * or index deletion.
+ *
+ * Caller must initialize its value to InvalidTransactionId, which is
+ * generally interpreted as "definitely no need for a recovery conflict".
+ * Final value must reflect all heap tuples that caller will physically remove
+ * (or remove TID references to) via its ongoing pruning/deletion operation.
+ * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
+ * caller's WAL record) by REDO routine when it replays caller's operation.
*/
void
-HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
- TransactionId *latestRemovedXid)
+HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple,
+ TransactionId *snapshotConflictHorizon)
{
TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
@@ -7417,8 +7422,8 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
if (tuple->t_infomask & HEAP_MOVED)
{
- if (TransactionIdPrecedes(*latestRemovedXid, xvac))
- *latestRemovedXid = xvac;
+ if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
+ *snapshotConflictHorizon = xvac;
}
/*
@@ -7431,11 +7436,9 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
(!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
{
if (xmax != xmin &&
- TransactionIdFollows(xmax, *latestRemovedXid))
- *latestRemovedXid = xmax;
+ TransactionIdFollows(xmax, *snapshotConflictHorizon))
+ *snapshotConflictHorizon = xmax;
}
-
- /* *latestRemovedXid may still be invalid at end */
}
#ifdef USE_PREFETCH
@@ -7558,7 +7561,7 @@ TransactionId
heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
{
/* Initial assumption is that earlier pruning took care of conflict */
- TransactionId latestRemovedXid = InvalidTransactionId;
+ TransactionId snapshotConflictHorizon = InvalidTransactionId;
BlockNumber blkno = InvalidBlockNumber;
Buffer buf = InvalidBuffer;
Page page = NULL;
@@ -7769,8 +7772,8 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
}
/*
- * Maintain latestRemovedXid value for deletion operation as a whole
- * by advancing current value using heap tuple headers. This is
+ * Maintain snapshotConflictHorizon value for deletion operation as a
+ * whole by advancing current value using heap tuple headers. This is
* loosely based on the logic for pruning a HOT chain.
*/
offnum = ItemPointerGetOffsetNumber(htid);
@@ -7805,12 +7808,12 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
* LP_DEAD item. This is okay because the earlier pruning
* operation that made the line pointer LP_DEAD in the first place
* must have considered the original tuple header as part of
- * generating its own latestRemovedXid value.
+ * generating its own snapshotConflictHorizon value.
*
* Relying on XLOG_HEAP2_PRUNE records like this is the same
* strategy that index vacuuming uses in all cases. Index VACUUM
- * WAL records don't even have a latestRemovedXid field of their
- * own for this reason.
+ * WAL records don't even have a snapshotConflictHorizon field of
+ * their own for this reason.
*/
if (!ItemIdIsNormal(lp))
break;
@@ -7824,7 +7827,8 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
!TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
break;
- HeapTupleHeaderAdvanceLatestRemovedXid(htup, &latestRemovedXid);
+ HeapTupleHeaderAdvanceConflictHorizon(htup,
+ &snapshotConflictHorizon);
/*
* If the tuple is not HOT-updated, then we are at the end of this
@@ -7856,7 +7860,7 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
Assert(finalndeltids > 0 || delstate->bottomup);
delstate->ndeltids = finalndeltids;
- return latestRemovedXid;
+ return snapshotConflictHorizon;
}
/*
@@ -8232,6 +8236,9 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
* corresponding visibility map block. Both should have already been modified
* and dirtied.
*
+ * snapshotConflictHorizon comes from the largest xmin on the page being
+ * marked all-visible. REDO routine uses it to generate recovery conflicts.
+ *
* If checksums or wal_log_hints are enabled, we may also generate a full-page
* image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
* REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
@@ -8239,7 +8246,7 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
*/
XLogRecPtr
log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, Buffer vm_buffer,
- TransactionId cutoff_xid, uint8 vmflags)
+ TransactionId snapshotConflictHorizon, uint8 vmflags)
{
xl_heap_visible xlrec;
XLogRecPtr recptr;
@@ -8248,7 +8255,7 @@ log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, Buffer vm_buffer,
Assert(BufferIsValid(heap_buffer));
Assert(BufferIsValid(vm_buffer));
- xlrec.cutoff_xid = cutoff_xid;
+ xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
xlrec.flags = vmflags;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, SizeOfHeapVisible);
@@ -8683,7 +8690,8 @@ heap_xlog_prune(XLogReaderState *record)
* no queries running for which the removed tuples are still visible.
*/
if (InHotStandby)
- ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator);
+ ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
+ rlocator);
/*
* If we have a full-page image, restore it (using a cleanup lock) and
@@ -8851,7 +8859,8 @@ heap_xlog_visible(XLogReaderState *record)
* rather than killing the transaction outright.
*/
if (InHotStandby)
- ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rlocator);
+ ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
+ rlocator);
/*
* Read the heap page, if it still exists. If the heap file has dropped or
@@ -8939,7 +8948,7 @@ heap_xlog_visible(XLogReaderState *record)
visibilitymap_pin(reln, blkno, &vmbuffer);
visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
- xlrec->cutoff_xid, xlrec->flags);
+ xlrec->snapshotConflictHorizon, xlrec->flags);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);
@@ -9105,7 +9114,8 @@ heap_xlog_freeze_page(XLogReaderState *record)
RelFileLocator rlocator;
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
- ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator);
+ ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
+ rlocator);
}
if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 9f43bbe25f..91c5f5e9ef 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -49,7 +49,7 @@ typedef struct
bool old_snap_used;
TransactionId new_prune_xid; /* new prune hint value for page */
- TransactionId latestRemovedXid; /* latest xid to be removed by this prune */
+ TransactionId snapshotConflictHorizon; /* latest xid removed */
int nredirected; /* numbers of entries in arrays below */
int ndead;
int nunused;
@@ -295,7 +295,7 @@ heap_page_prune(Relation relation, Buffer buffer,
prstate.old_snap_xmin = old_snap_xmin;
prstate.old_snap_ts = old_snap_ts;
prstate.old_snap_used = false;
- prstate.latestRemovedXid = InvalidTransactionId;
+ prstate.snapshotConflictHorizon = InvalidTransactionId;
prstate.nredirected = prstate.ndead = prstate.nunused = 0;
memset(prstate.marked, 0, sizeof(prstate.marked));
@@ -418,7 +418,7 @@ heap_page_prune(Relation relation, Buffer buffer,
xl_heap_prune xlrec;
XLogRecPtr recptr;
- xlrec.latestRemovedXid = prstate.latestRemovedXid;
+ xlrec.snapshotConflictHorizon = prstate.snapshotConflictHorizon;
xlrec.nredirected = prstate.nredirected;
xlrec.ndead = prstate.ndead;
@@ -636,8 +636,8 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum, PruneState *prstate)
!HeapTupleHeaderIsHotUpdated(htup))
{
heap_prune_record_unused(prstate, rootoffnum);
- HeapTupleHeaderAdvanceLatestRemovedXid(htup,
- &prstate->latestRemovedXid);
+ HeapTupleHeaderAdvanceConflictHorizon(htup,
+ &prstate->snapshotConflictHorizon);
ndeleted++;
}
@@ -773,8 +773,8 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum, PruneState *prstate)
if (tupdead)
{
latestdead = offnum;
- HeapTupleHeaderAdvanceLatestRemovedXid(htup,
- &prstate->latestRemovedXid);
+ HeapTupleHeaderAdvanceConflictHorizon(htup,
+ &prstate->snapshotConflictHorizon);
}
else if (!recent_dead)
break;