summaryrefslogtreecommitdiff
path: root/src/backend/access/heap
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/heap')
-rw-r--r--src/backend/access/heap/heapam.c2
-rw-r--r--src/backend/access/heap/heapam_handler.c2
-rw-r--r--src/backend/access/heap/pruneheap.c5
-rw-r--r--src/backend/access/heap/vacuumlazy.c2
4 files changed, 5 insertions, 6 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index b300a4675e..0124f37911 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -1223,7 +1223,7 @@ heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
* Calculate the first block and the number of blocks we must scan. We
* could be more aggressive here and perform some more validation to try
* and further narrow the scope of blocks to scan by checking if the
- * lowerItem has an offset above MaxOffsetNumber. In this case, we could
+ * lowestItem has an offset above MaxOffsetNumber. In this case, we could
* advance startBlk by one. Likewise, if highestItem has an offset of 0
* we could scan one fewer blocks. However, such an optimization does not
* seem worth troubling over, currently.
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index e2e35b71ea..cbb35aa73d 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -816,7 +816,7 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
* If the last pages of the scan were empty, we would go to
* the next phase while heap_blks_scanned != heap_blks_total.
* Instead, to ensure that heap_blks_scanned is equivalent to
- * total_heap_blks after the table scan phase, this parameter
+ * heap_blks_total after the table scan phase, this parameter
* is manually updated to the correct value when the table
* scan finishes.
*/
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 3f0342351f..20df39c149 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -198,8 +198,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
/*
* Now that we have buffer lock, get accurate information about the
* page's free space, and recheck the heuristic about whether to
- * prune. (We needn't recheck PageIsPrunable, since no one else could
- * have pruned while we hold pin.)
+ * prune.
*/
if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
{
@@ -490,7 +489,7 @@ heap_page_prune(Relation relation, Buffer buffer,
*
* Due to its cost we also only want to call
* TransactionIdLimitedForOldSnapshots() if necessary, i.e. we might not have
- * done so in heap_hot_prune_opt() if pd_prune_xid was old enough. But we
+ * done so in heap_page_prune_opt() if pd_prune_xid was old enough. But we
* still want to be able to remove rows that are too new to be removed
* according to prstate->vistest, but that can be removed based on
* old_snapshot_threshold. So we call TransactionIdLimitedForOldSnapshots() on
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 442db3551b..cda8889f5e 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -2575,7 +2575,7 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
END_CRIT_SECTION();
/*
- * Now that we have removed the LD_DEAD items from the page, once again
+ * Now that we have removed the LP_DEAD items from the page, once again
* check if the page has become all-visible. The page is already marked
* dirty, exclusively locked, and, if needed, a full page image has been
* emitted.