summaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/gist/gistxlog.c4
-rw-r--r--src/backend/access/heap/heapam.c2
-rw-r--r--src/backend/access/heap/heapam_handler.c2
-rw-r--r--src/backend/access/heap/pruneheap.c5
-rw-r--r--src/backend/access/heap/vacuumlazy.c2
-rw-r--r--src/backend/access/nbtree/nbtsplitloc.c2
-rw-r--r--src/backend/access/transam/slru.c6
-rw-r--r--src/backend/access/transam/xlogreader.c4
8 files changed, 13 insertions, 14 deletions
diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c
index 9a86fb3fef..a2ddfd5e69 100644
--- a/src/backend/access/gist/gistxlog.c
+++ b/src/backend/access/gist/gistxlog.c
@@ -580,8 +580,8 @@ gistXLogAssignLSN(void)
int dummy = 0;
/*
- * Records other than SWITCH_WAL must have content. We use an integer 0 to
- * follow the restriction.
+ * Records other than XLOG_SWITCH must have content. We use an integer 0
+ * to follow the restriction.
*/
XLogBeginInsert();
XLogSetRecordFlags(XLOG_MARK_UNIMPORTANT);
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index b300a4675e..0124f37911 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -1223,7 +1223,7 @@ heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
* Calculate the first block and the number of blocks we must scan. We
* could be more aggressive here and perform some more validation to try
* and further narrow the scope of blocks to scan by checking if the
- * lowerItem has an offset above MaxOffsetNumber. In this case, we could
+ * lowestItem has an offset above MaxOffsetNumber. In this case, we could
* advance startBlk by one. Likewise, if highestItem has an offset of 0
* we could scan one fewer blocks. However, such an optimization does not
* seem worth troubling over, currently.
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index e2e35b71ea..cbb35aa73d 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -816,7 +816,7 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
* If the last pages of the scan were empty, we would go to
* the next phase while heap_blks_scanned != heap_blks_total.
* Instead, to ensure that heap_blks_scanned is equivalent to
- * total_heap_blks after the table scan phase, this parameter
+ * heap_blks_total after the table scan phase, this parameter
* is manually updated to the correct value when the table
* scan finishes.
*/
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 3f0342351f..20df39c149 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -198,8 +198,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
/*
* Now that we have buffer lock, get accurate information about the
* page's free space, and recheck the heuristic about whether to
- * prune. (We needn't recheck PageIsPrunable, since no one else could
- * have pruned while we hold pin.)
+ * prune.
*/
if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
{
@@ -490,7 +489,7 @@ heap_page_prune(Relation relation, Buffer buffer,
*
* Due to its cost we also only want to call
* TransactionIdLimitedForOldSnapshots() if necessary, i.e. we might not have
- * done so in heap_hot_prune_opt() if pd_prune_xid was old enough. But we
+ * done so in heap_page_prune_opt() if pd_prune_xid was old enough. But we
* still want to be able to remove rows that are too new to be removed
* according to prstate->vistest, but that can be removed based on
* old_snapshot_threshold. So we call TransactionIdLimitedForOldSnapshots() on
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 442db3551b..cda8889f5e 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -2575,7 +2575,7 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
END_CRIT_SECTION();
/*
- * Now that we have removed the LD_DEAD items from the page, once again
+ * Now that we have removed the LP_DEAD items from the page, once again
* check if the page has become all-visible. The page is already marked
* dirty, exclusively locked, and, if needed, a full page image has been
* emitted.
diff --git a/src/backend/access/nbtree/nbtsplitloc.c b/src/backend/access/nbtree/nbtsplitloc.c
index ecb49bb471..43b67893d9 100644
--- a/src/backend/access/nbtree/nbtsplitloc.c
+++ b/src/backend/access/nbtree/nbtsplitloc.c
@@ -119,7 +119,7 @@ static inline IndexTuple _bt_split_firstright(FindSplitData *state,
* righthand page (which is called firstrightoff), plus a boolean
* indicating whether the new tuple goes on the left or right page. You
* can think of the returned state as a point _between_ two adjacent data
- * items (laftleft and firstright data items) on an imaginary version of
+ * items (lastleft and firstright data items) on an imaginary version of
* origpage that already includes newitem. The bool is necessary to
* disambiguate the case where firstrightoff == newitemoff (i.e. it is
* sometimes needed to determine if the firstright tuple for the split is
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index 2a42f31ec2..71ac70fb40 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -809,7 +809,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruWriteAll fdata)
}
/*
- * During a WriteAll, we may already have the desired file open.
+ * During a SimpleLruWriteAll, we may already have the desired file open.
*/
if (fdata)
{
@@ -864,7 +864,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruWriteAll fdata)
else
{
/*
- * In the unlikely event that we exceed MAX_FLUSH_BUFFERS,
+ * In the unlikely event that we exceed MAX_WRITEALL_BUFFERS,
* fall back to treating it as a standalone write.
*/
fdata = NULL;
@@ -1478,7 +1478,7 @@ SlruPagePrecedesTestOffset(SlruCtl ctl, int per_page, uint32 offset)
*
* This assumes every uint32 >= FirstNormalTransactionId is a valid key. It
* assumes each value occupies a contiguous, fixed-size region of SLRU bytes.
- * (MultiXactMemberCtl separates flags from XIDs. AsyncCtl has
+ * (MultiXactMemberCtl separates flags from XIDs. NotifyCtl has
* variable-length entries, no keys, and no random access. These unit tests
* do not apply to them.)
*/
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index cadea21b37..631f260f79 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -1476,7 +1476,7 @@ err:
}
/*
- * Helper function to ease writing of XLogRoutine->page_read callbacks.
+ * Helper function to ease writing of XLogReaderRoutine->page_read callbacks.
* If this function is used, caller must supply a segment_open callback in
* 'state', as that is used here.
*
@@ -1513,7 +1513,7 @@ WALRead(XLogReaderState *state,
/*
* If the data we want is not in a segment we have open, close what we
* have (if anything) and open the next one, using the caller's
- * provided openSegment callback.
+ * provided segment_open callback.
*/
if (state->seg.ws_file < 0 ||
!XLByteInSeg(recptr, state->seg.ws_segno, state->segcxt.ws_segsize) ||