aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/common/reloptions.c2
-rw-r--r--src/backend/access/gin/gindatapage.c2
-rw-r--r--src/backend/access/gist/gistget.c2
-rw-r--r--src/backend/access/gist/gistutil.c2
-rw-r--r--src/backend/access/hash/hashovfl.c2
-rw-r--r--src/backend/access/hash/hashpage.c2
-rw-r--r--src/backend/access/heap/heapam.c2
-rw-r--r--src/backend/access/heap/pruneheap.c2
-rw-r--r--src/backend/access/index/genam.c4
-rw-r--r--src/backend/access/spgist/spgscan.c2
-rw-r--r--src/backend/access/transam/clog.c2
-rw-r--r--src/backend/access/transam/xlog.c2
12 files changed, 13 insertions, 13 deletions
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 57730214993..42647b05265 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -69,7 +69,7 @@
* currently executing.
*
* Fillfactor can be set because it applies only to subsequent changes made to
- * data blocks, as documented in heapio.c
+ * data blocks, as documented in hio.c
*
* n_distinct options can be set at ShareUpdateExclusiveLock because they
* are only used during ANALYZE, which uses a ShareUpdateExclusiveLock,
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index 57c3b830d85..21de8ed714d 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -92,7 +92,7 @@ typedef struct
/*
* The following fields represent the items in this segment. If 'items' is
- * not NULL, it contains a palloc'd array of the itemsin this segment. If
+ * not NULL, it contains a palloc'd array of the items in this segment. If
* 'seg' is not NULL, it contains the items in an already-compressed
* format. It can point to an on-disk page (!modified), or a palloc'd
* segment in memory. If both are set, they must represent the same items.
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index 8108fbb7d8e..46d08e06350 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -663,7 +663,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
}
/*
- * Check the last returned tuple and add it to killitems if
+ * Check the last returned tuple and add it to killedItems if
* necessary
*/
if (scan->kill_prior_tuple
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index 49df05653b3..7d1b219bbc8 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -120,7 +120,7 @@ gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
}
/*
- * make plain IndexTupleVector
+ * make plain IndexTuple vector
*/
IndexTupleData *
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index a07bd27a0e3..487103fb798 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -793,7 +793,7 @@ _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage)
* be confused into returning the same tuple more than once or some tuples
* not at all by the rearrangement we are performing here. To prevent
* any concurrent scan to cross the squeeze scan we use lock chaining
- * similar to hasbucketcleanup. Refer comments atop hashbucketcleanup.
+ * similar to hashbucketcleanup. Refer comments atop hashbucketcleanup.
*
* We need to retain a pin on the primary bucket to ensure that no concurrent
* split can start.
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 376ee2a63b5..defdc9b4085 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -509,7 +509,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Choose the number of initial bucket pages to match the fill factor
* given the estimated number of tuples. We round up the result to the
* total number of buckets which has to be allocated before using its
- * _hashm_spare element. However always force at least 2 bucket pages. The
+ * hashm_spares element. However always force at least 2 bucket pages. The
* upper limit is determined by considerations explained in
* _hash_expandtable().
*/
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index d768b9b061c..94309949fac 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -102,7 +102,7 @@ static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 in
static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
uint16 infomask, Relation rel, int *remaining);
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
-static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified,
+static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_changed,
bool *copy);
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 0efe3ce9995..a0d22173cef 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -256,7 +256,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
MarkBufferDirty(buffer);
/*
- * Emit a WAL HEAP_CLEAN record showing what we did
+ * Emit a WAL XLOG_HEAP2_CLEAN record showing what we did
*/
if (RelationNeedsWAL(relation))
{
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 42aaa5bad62..2599b5d3425 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -557,8 +557,8 @@ systable_endscan(SysScanDesc sysscan)
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of
- * wrappers around index_beginscan/index_getnext. The main reason for their
- * existence is to centralize possible future support of lossy operators
+ * wrappers around index_beginscan/index_getnext_slot. The main reason for
+ * their existence is to centralize possible future support of lossy operators
* in catalog scans.
*/
SysScanDesc
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index 557dd18d7e8..1cf28ecf2fd 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -643,7 +643,7 @@ spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
continue;
/*
- * Use infinity distances if innerConsistent() failed to return
+ * Use infinity distances if innerConsistentFn() failed to return
* them or if is a NULL item (their distances are really unused).
*/
distances = out.distances ? out.distances[i] : so->infDistances;
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index d78f706ff7f..34c74d96f8e 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -891,7 +891,7 @@ ExtendCLOG(TransactionId newestXact)
* Remove all CLOG segments before the one holding the passed transaction ID
*
* Before removing any CLOG data, we must flush XLOG to disk, to ensure
- * that any recently-emitted HEAP_FREEZE records have reached disk; otherwise
+ * that any recently-emitted FREEZE_PAGE records have reached disk; otherwise
* a crash and restart might leave us with some unfrozen tuples referencing
* removed CLOG data. We choose to emit a special TRUNCATE XLOG record too.
* Replaying the deletion from XLOG is not critical, since the files could
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index b6c9353cbd2..da3d2509860 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -9158,7 +9158,7 @@ CreateRestartPoint(int flags)
/*
* Update pg_control, using current time. Check that it still shows
- * IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing;
+ * DB_IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing;
* this is a quick hack to make sure nothing really bad happens if somehow
* we get here after the end-of-recovery checkpoint.
*/