aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/gin/ginentrypage.c4
-rw-r--r--src/backend/access/gin/ginfast.c2
-rw-r--r--src/backend/access/heap/heapam.c2
-rw-r--r--src/backend/access/nbtree/README2
-rw-r--r--src/backend/access/transam/multixact.c2
-rw-r--r--src/backend/access/transam/xlog.c2
6 files changed, 7 insertions, 7 deletions
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index a9482da3fb5..70fcddfe407 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -447,9 +447,9 @@ entryIsEnoughSpace(GinBtree btree, Buffer buf, OffsetNumber off)
}
/*
- * Delete tuple on leaf page if tuples was existed and we
+ * Delete tuple on leaf page if tuples existed and we
* should update it, update old child blkno to new right page
- * if child split is occured
+ * if child split occurred
*/
static BlockNumber
entryPreparePage(GinBtree btree, Page page, OffsetNumber off)
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index d201c68f722..b9bfde2ee42 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -856,7 +856,7 @@ ginInsertCleanup(GinState *ginstate,
* added to it. If so, process those entries immediately. There
* shouldn't be very many, so we don't worry about the fact that
* we're doing this with exclusive lock. Insertion algorithm
- * gurantees that inserted row(s) will not continue on next page.
+ * guarantees that inserted row(s) will not continue on next page.
* NOTE: intentionally no vacuum_delay_point in this loop.
*/
if (PageGetMaxOffsetNumber(page) != maxoff)
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 3b7894e8f1a..98d1e559d32 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -1580,7 +1580,7 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
break;
/*
- * When first_call is true (and thus, skip is initally false) we'll
+ * When first_call is true (and thus, skip is initially false) we'll
* return the first tuple we find. But on later passes, heapTuple
* will initially be pointing to the tuple we returned last time.
* Returning it again would be incorrect (and would loop forever),
diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README
index 561ffbb9d47..edf33d90b1e 100644
--- a/src/backend/access/nbtree/README
+++ b/src/backend/access/nbtree/README
@@ -373,7 +373,7 @@ leaf-item deletions (if the deletion brings the leaf page to zero items,
it is now a candidate to be deleted, but that is a separate action).
An insertion that causes a page split is logged as a single WAL entry for
-the changes occuring on the insertion's level --- including update of the
+the changes occurring on the insertion's level --- including update of the
right sibling's left-link --- followed by a second WAL entry for the
insertion on the parent level (which might itself be a page split, requiring
an additional insertion above that, etc).
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 0f4cea124d7..8bdf3879171 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -173,7 +173,7 @@ static MultiXactId *OldestVisibleMXactId;
* Definitions for the backend-local MultiXactId cache.
*
* We use this cache to store known MultiXacts, so we don't need to go to
- * SLRU areas everytime.
+ * SLRU areas every time.
*
* The cache lasts for the duration of a single transaction, the rationale
* for this being that most entries will contain our own TransactionId and
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 8d0aabff109..fe33c87dbf2 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7595,7 +7595,7 @@ LogCheckpointEnd(bool restartpoint)
* CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
* CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
* ignoring checkpoint_completion_target parameter.
- * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occured
+ * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
* since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
* CHECKPOINT_END_OF_RECOVERY).
*