summaryrefslogtreecommitdiff
path: root/src/backend/access/index/indexam.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/index/indexam.c')
-rw-r--r--src/backend/access/index/indexam.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 32623965c7..813ebe9d78 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -126,7 +126,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation,
* index_open - open an index relation by relation OID
*
* If lockmode is not "NoLock", the specified kind of lock is
- * obtained on the index. (Generally, NoLock should only be
+ * obtained on the index. (Generally, NoLock should only be
* used if the caller knows it has some appropriate lock on the
* index already.)
*
@@ -287,7 +287,7 @@ index_beginscan_internal(Relation indexRelation,
* index_rescan - (re)start a scan of an index
*
* The caller may specify a new set of scankeys (but the number of keys
- * cannot change). To restart the scan without changing keys, pass NULL
+ * cannot change). To restart the scan without changing keys, pass NULL
* for the key array.
*
* Note that this is also called when first starting an indexscan;
@@ -375,7 +375,7 @@ index_markpos(IndexScanDesc scan)
* returnable tuple in each HOT chain, and so restoring the prior state at the
* granularity of the index AM is sufficient. Since the only current user
* of mark/restore functionality is nodeMergejoin.c, this effectively means
- * that merge-join plans only work for MVCC snapshots. This could be fixed
+ * that merge-join plans only work for MVCC snapshots. This could be fixed
* if necessary, but for now it seems unimportant.
* ----------------
*/
@@ -400,7 +400,7 @@ index_restrpos(IndexScanDesc scan)
* index_getnext - get the next heap tuple from a scan
*
* The result is the next heap tuple satisfying the scan keys and the
- * snapshot, or NULL if no more matching tuples exist. On success,
+ * snapshot, or NULL if no more matching tuples exist. On success,
* the buffer containing the heap tuple is pinned (the pin will be dropped
* at the next index_getnext or index_endscan).
*
@@ -438,7 +438,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
{
/*
* We are resuming scan of a HOT chain after having returned an
- * earlier member. Must still hold pin on current heap page.
+ * earlier member. Must still hold pin on current heap page.
*/
Assert(BufferIsValid(scan->xs_cbuf));
Assert(ItemPointerGetBlockNumber(tid) ==
@@ -556,7 +556,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/*
* The xmin should match the previous xmax value, else chain is
- * broken. (Note: this test is not optional because it protects
+ * broken. (Note: this test is not optional because it protects
* us against the case where the prior chain member's xmax aborted
* since we looked at it.)
*/
@@ -758,7 +758,7 @@ index_vacuum_cleanup(IndexVacuumInfo *info,
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
- * type instead). Only the default functions are stored in relcache
+ * type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
@@ -792,7 +792,7 @@ index_getprocid(Relation irel,
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
- * support procs in the relcache. As above, only the "default"
+ * support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during