aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/common/heaptuple.c10
-rw-r--r--src/backend/access/common/indextuple.c6
-rw-r--r--src/backend/access/common/printtup.c2
-rw-r--r--src/backend/access/common/reloptions.c4
-rw-r--r--src/backend/access/common/tupconvert.c2
-rw-r--r--src/backend/access/common/tupdesc.c2
-rw-r--r--src/backend/access/gin/ginarrayproc.c2
-rw-r--r--src/backend/access/gin/ginbulk.c2
-rw-r--r--src/backend/access/gin/ginentrypage.c2
-rw-r--r--src/backend/access/gin/ginfast.c8
-rw-r--r--src/backend/access/gin/ginget.c16
-rw-r--r--src/backend/access/gin/gininsert.c2
-rw-r--r--src/backend/access/gin/ginscan.c2
-rw-r--r--src/backend/access/gin/ginutil.c2
-rw-r--r--src/backend/access/gin/ginxlog.c2
-rw-r--r--src/backend/access/gist/gist.c2
-rw-r--r--src/backend/access/gist/gistget.c6
-rw-r--r--src/backend/access/gist/gistscan.c2
-rw-r--r--src/backend/access/gist/gistsplit.c10
-rw-r--r--src/backend/access/gist/gistutil.c8
-rw-r--r--src/backend/access/gist/gistvacuum.c2
-rw-r--r--src/backend/access/gist/gistxlog.c4
-rw-r--r--src/backend/access/hash/hash.c8
-rw-r--r--src/backend/access/hash/hashfunc.c10
-rw-r--r--src/backend/access/hash/hashinsert.c2
-rw-r--r--src/backend/access/hash/hashovfl.c16
-rw-r--r--src/backend/access/hash/hashpage.c16
-rw-r--r--src/backend/access/hash/hashsearch.c4
-rw-r--r--src/backend/access/hash/hashsort.c4
-rw-r--r--src/backend/access/hash/hashutil.c4
-rw-r--r--src/backend/access/heap/heapam.c120
-rw-r--r--src/backend/access/heap/hio.c20
-rw-r--r--src/backend/access/heap/pruneheap.c12
-rw-r--r--src/backend/access/heap/rewriteheap.c14
-rw-r--r--src/backend/access/heap/syncscan.c6
-rw-r--r--src/backend/access/heap/tuptoaster.c10
-rw-r--r--src/backend/access/heap/visibilitymap.c20
-rw-r--r--src/backend/access/index/genam.c6
-rw-r--r--src/backend/access/index/indexam.c14
-rw-r--r--src/backend/access/nbtree/nbtcompare.c2
-rw-r--r--src/backend/access/nbtree/nbtinsert.c28
-rw-r--r--src/backend/access/nbtree/nbtpage.c34
-rw-r--r--src/backend/access/nbtree/nbtree.c10
-rw-r--r--src/backend/access/nbtree/nbtsearch.c28
-rw-r--r--src/backend/access/nbtree/nbtsort.c24
-rw-r--r--src/backend/access/nbtree/nbtutils.c52
-rw-r--r--src/backend/access/nbtree/nbtxlog.c14
-rw-r--r--src/backend/access/spgist/spgdoinsert.c14
-rw-r--r--src/backend/access/spgist/spginsert.c4
-rw-r--r--src/backend/access/spgist/spgscan.c4
-rw-r--r--src/backend/access/spgist/spgtextproc.c8
-rw-r--r--src/backend/access/spgist/spgutils.c6
-rw-r--r--src/backend/access/spgist/spgvacuum.c4
-rw-r--r--src/backend/access/spgist/spgxlog.c6
-rw-r--r--src/backend/access/transam/clog.c10
-rw-r--r--src/backend/access/transam/multixact.c56
-rw-r--r--src/backend/access/transam/slru.c18
-rw-r--r--src/backend/access/transam/subtrans.c4
-rw-r--r--src/backend/access/transam/timeline.c6
-rw-r--r--src/backend/access/transam/transam.c4
-rw-r--r--src/backend/access/transam/twophase.c14
-rw-r--r--src/backend/access/transam/varsup.c12
-rw-r--r--src/backend/access/transam/xact.c62
-rw-r--r--src/backend/access/transam/xlog.c92
-rw-r--r--src/backend/access/transam/xlogreader.c8
65 files changed, 454 insertions, 454 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 8c45fd6b95e..770cc778b18 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -21,7 +21,7 @@
* tuptoaster.c.
*
* This change will break any code that assumes it needn't detoast values
- * that have been put into a tuple but never sent to disk. Hopefully there
+ * that have been put into a tuple but never sent to disk. Hopefully there
* are few such places.
*
* Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
@@ -387,7 +387,7 @@ nocachegetattr(HeapTuple tuple,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (HeapTupleHasVarWidth(tuple))
@@ -454,7 +454,7 @@ nocachegetattr(HeapTuple tuple,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
@@ -549,7 +549,7 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
/*
* cmin and cmax are now both aliases for the same field, which
- * can in fact also be a combo command id. XXX perhaps we should
+ * can in fact also be a combo command id. XXX perhaps we should
* return the "real" cmin or cmax if possible, that is if we are
* inside the originating transaction?
*/
@@ -709,7 +709,7 @@ heap_form_tuple(TupleDesc tupleDescriptor,
len += data_len;
/*
- * Allocate and zero the space needed. Note that the tuple body and
+ * Allocate and zero the space needed. Note that the tuple body and
* HeapTupleData management structure are allocated in one chunk.
*/
tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index 2f96a302011..febfe531940 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -71,7 +71,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
/*
* If value is stored EXTERNAL, must fetch it so we are not depending
- * on outside storage. This should be improved someday.
+ * on outside storage. This should be improved someday.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i])))
{
@@ -281,7 +281,7 @@ nocache_index_getattr(IndexTuple tup,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup))
@@ -348,7 +348,7 @@ nocache_index_getattr(IndexTuple tup,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 5286be5391e..fc99c8cc961 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -181,7 +181,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
* or some similar function; it does not contain a full set of fields.
* The targetlist will be NIL when executing a utility function that does
* not have a plan. If the targetlist isn't NIL then it is a Query node's
- * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
+ * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
* send zeroes for the format codes in that case.
*/
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 17bbcb5dbab..84530dcfa6f 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -519,7 +519,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val,
* Add a new string reloption
*
* "validator" is an optional function pointer that can be used to test the
- * validity of the values. It must elog(ERROR) when the argument string is
+ * validity of the values. It must elog(ERROR) when the argument string is
* not acceptable for the variable. Note that the default value must pass
* the validation.
*/
@@ -847,7 +847,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions)
* is returned.
*
* Note: values of type int, bool and real are allocated as part of the
- * returned array. Values of type string are allocated separately and must
+ * returned array. Values of type string are allocated separately and must
* be freed by the caller.
*/
relopt_value *
diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c
index 075add5bc0e..e83344e3c1a 100644
--- a/src/backend/access/common/tupconvert.c
+++ b/src/backend/access/common/tupconvert.c
@@ -5,7 +5,7 @@
*
* These functions provide conversion between rowtypes that are logically
* equivalent but might have columns in a different order or different sets
- * of dropped columns. There is some overlap of functionality with the
+ * of dropped columns. There is some overlap of functionality with the
* executor's "junkfilter" routines, but these functions work on bare
* HeapTuples rather than TupleTableSlots.
*
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index cc2e1ed0d23..4a5e255f0aa 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -540,7 +540,7 @@ TupleDescInitEntryCollation(TupleDesc desc,
* Given a relation schema (list of ColumnDef nodes), build a TupleDesc.
*
* Note: the default assumption is no OIDs; caller may modify the returned
- * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
+ * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* later on.
*/
TupleDesc
diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c
index 55562c6f380..bd31ee2b53e 100644
--- a/src/backend/access/gin/ginarrayproc.c
+++ b/src/backend/access/gin/ginarrayproc.c
@@ -197,7 +197,7 @@ ginarrayconsistent(PG_FUNCTION_ARGS)
/*
* Must have all elements in check[] true; no discrimination
- * against nulls here. This is because array_contain_compare and
+ * against nulls here. This is because array_contain_compare and
* array_eq handle nulls differently ...
*/
res = true;
diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c
index 37ab49ccb47..d6d02ec3921 100644
--- a/src/backend/access/gin/ginbulk.c
+++ b/src/backend/access/gin/ginbulk.c
@@ -187,7 +187,7 @@ ginInsertBAEntry(BuildAccumulator *accum,
* Since the entries are being inserted into a balanced binary tree, you
* might think that the order of insertion wouldn't be critical, but it turns
* out that inserting the entries in sorted order results in a lot of
- * rebalancing operations and is slow. To prevent this, we attempt to insert
+ * rebalancing operations and is slow. To prevent this, we attempt to insert
* the nodes in an order that will produce a nearly-balanced tree if the input
* is in fact sorted.
*
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index 8cb5148c810..bd9908e3482 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -163,7 +163,7 @@ GinShortenTuple(IndexTuple itup, uint32 nipd)
* Form a non-leaf entry tuple by copying the key data from the given tuple,
* which can be either a leaf or non-leaf entry tuple.
*
- * Any posting list in the source tuple is not copied. The specified child
+ * Any posting list in the source tuple is not copied. The specified child
* block number is inserted into t_tid.
*/
static IndexTuple
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 177a2246bc5..a0d6ae2a005 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -440,7 +440,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
* Create temporary index tuples for a single indexable item (one index column
* for the heap tuple specified by ht_ctid), and append them to the array
* in *collector. They will subsequently be written out using
- * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
+ * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
* temp tuples for a given heap tuple must be written in one call to
* ginHeapTupleFastInsert.
*/
@@ -707,7 +707,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka,
*
* This can be called concurrently by multiple backends, so it must cope.
* On first glance it looks completely not concurrent-safe and not crash-safe
- * either. The reason it's okay is that multiple insertion of the same entry
+ * either. The reason it's okay is that multiple insertion of the same entry
* is detected and treated as a no-op by gininsert.c. If we crash after
* posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
@@ -761,7 +761,7 @@ ginInsertCleanup(GinState *ginstate,
LockBuffer(metabuffer, GIN_UNLOCK);
/*
- * Initialize. All temporary space will be in opCtx
+ * Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
@@ -855,7 +855,7 @@ ginInsertCleanup(GinState *ginstate,
/*
* While we left the page unlocked, more stuff might have gotten
- * added to it. If so, process those entries immediately. There
+ * added to it. If so, process those entries immediately. There
* shouldn't be very many, so we don't worry about the fact that
* we're doing this with exclusive lock. Insertion algorithm
* guarantees that inserted row(s) will not continue on next page.
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 9dcf638dc23..d1462d091ac 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -164,7 +164,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
/*
* Collects TIDs into scanEntry->matchBitmap for all heap tuples that
- * match the search entry. This supports three different match modes:
+ * match the search entry. This supports three different match modes:
*
* 1. Partial-match support: scan from current point until the
* comparePartialFn says we're done.
@@ -260,7 +260,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
/*
* In ALL mode, we are not interested in null items, so we can
* stop if we get to a null-item placeholder (which will be the
- * last entry for a given attnum). We do want to include NULL_KEY
+ * last entry for a given attnum). We do want to include NULL_KEY
* and EMPTY_ITEM entries, though.
*/
if (icategory == GIN_CAT_NULL_ITEM)
@@ -956,14 +956,14 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
* that exact TID, or a lossy reference to the same page.
*
* This logic works only if a keyGetItem stream can never contain both
- * exact and lossy pointers for the same page. Else we could have a
+ * exact and lossy pointers for the same page. Else we could have a
* case like
*
* stream 1 stream 2
- * ... ...
+ * ... ...
* 42/6 42/7
* 50/1 42/0xffff
- * ... ...
+ * ... ...
*
* We would conclude that 42/6 is not a match and advance stream 1,
* thus never detecting the match to the lossy pointer in stream 2.
@@ -992,7 +992,7 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
break;
/*
- * No hit. Update myAdvancePast to this TID, so that on the next pass
+ * No hit. Update myAdvancePast to this TID, so that on the next pass
* we'll move to the next possible entry.
*/
myAdvancePast = *item;
@@ -1508,10 +1508,10 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* First, scan the pending list and collect any matching entries into the
- * bitmap. After we scan a pending item, some other backend could post it
+ * bitmap. After we scan a pending item, some other backend could post it
* into the main index, and so we might visit it a second time during the
* main scan. This is okay because we'll just re-set the same bit in the
- * bitmap. (The possibility of duplicate visits is a major reason why GIN
+ * bitmap. (The possibility of duplicate visits is a major reason why GIN
* can't support the amgettuple API, however.) Note that it would not do
* to scan the main index before the pending list, since concurrent
* cleanup could then make us miss entries entirely.
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index beaa65317f3..af9ef3fab32 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -98,7 +98,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems)
* Adds array of item pointers to tuple's posting list, or
* creates posting tree and tuple pointing to tree in case
* of not enough space. Max size of tuple is defined in
- * GinFormTuple(). Returns a new, modified index tuple.
+ * GinFormTuple(). Returns a new, modified index tuple.
* items[] must be in sorted order with no duplicates.
*/
static IndexTuple
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index afee2dbd924..2dfa241e88a 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -387,7 +387,7 @@ ginNewScanKey(IndexScanDesc scan)
/*
* If the index is version 0, it may be missing null and placeholder
* entries, which would render searches for nulls and full-index scans
- * unreliable. Throw an error if so.
+ * unreliable. Throw an error if so.
*/
if (hasNullQuery && !so->isVoidRes)
{
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 8a716810517..f4a181d02b7 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -436,7 +436,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
* If there's more than one key, sort and unique-ify.
*
* XXX Using qsort here is notationally painful, and the overhead is
- * pretty bad too. For small numbers of keys it'd likely be better to use
+ * pretty bad too. For small numbers of keys it'd likely be better to use
* a simple insertion sort.
*/
if (*nentries > 1)
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index f487ba5c1ad..f43ac50115b 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -675,7 +675,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record)
/*
* In normal operation, shiftList() takes exclusive lock on all the
- * pages-to-be-deleted simultaneously. During replay, however, it should
+ * pages-to-be-deleted simultaneously. During replay, however, it should
* be all right to lock them one at a time. This is dependent on the fact
* that we are deleting pages from the head of the list, and that readers
* share-lock the next page before releasing the one they are on. So we
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 3c2284566b0..65c15fd7b40 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -1382,7 +1382,7 @@ initGISTstate(Relation index)
/*
* If the index column has a specified collation, we should honor that
* while doing comparisons. However, we may have a collatable storage
- * type for a noncollatable indexed data type. If there's no index
+ * type for a noncollatable indexed data type. If there's no index
* collation then specify default collation in case the support
* functions need collation. This is harmless if the support
* functions don't care about collation, so we just do it
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index e97ab8f3fd5..8c76fe15c01 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -31,7 +31,7 @@
*
* On success return for a heap tuple, *recheck_p is set to indicate
* whether recheck is needed. We recheck if any of the consistent() functions
- * request it. recheck is not interesting when examining a non-leaf entry,
+ * request it. recheck is not interesting when examining a non-leaf entry,
* since we must visit the lower index page if there's any doubt.
*
* If we are doing an ordered scan, so->distances[] is filled with distance
@@ -62,7 +62,7 @@ gistindex_keytest(IndexScanDesc scan,
/*
* If it's a leftover invalid tuple from pre-9.1, treat it as a match with
- * minimum possible distances. This means we'll always follow it to the
+ * minimum possible distances. This means we'll always follow it to the
* referenced page.
*/
if (GistTupleIsInvalid(tuple))
@@ -224,7 +224,7 @@ gistindex_keytest(IndexScanDesc scan,
* ntids: if not NULL, gistgetbitmap's output tuple counter
*
* If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap
- * tuples should be reported directly into the bitmap. If they are NULL,
+ * tuples should be reported directly into the bitmap. If they are NULL,
* we're doing a plain or ordered indexscan. For a plain indexscan, heap
* tuple TIDs are returned into so->pageData[]. For an ordered indexscan,
* heap tuple TIDs are pushed into individual search queue items.
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index b5553ffe69b..858e1a67cfc 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -56,7 +56,7 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg)
/*
* If new item is heap tuple, it goes to front of chain; otherwise insert
* it before the first index-page item, so that index pages are visited in
- * LIFO order, ensuring depth-first search of index pages. See comments
+ * LIFO order, ensuring depth-first search of index pages. See comments
* in gist_private.h.
*/
if (GISTSearchItemIsHeap(*newitem))
diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c
index c97f6da156e..5435cc70ccc 100644
--- a/src/backend/access/gist/gistsplit.c
+++ b/src/backend/access/gist/gistsplit.c
@@ -71,7 +71,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
* Recompute unions of left- and right-side subkeys after a page split,
* ignoring any tuples that are marked in spl->spl_dontcare[].
*
- * Note: we always recompute union keys for all index columns. In some cases
+ * Note: we always recompute union keys for all index columns. In some cases
* this might represent duplicate work for the leftmost column(s), but it's
* not safe to assume that "zero penalty to move a tuple" means "the union
* key doesn't change at all". Penalty functions aren't 100% accurate.
@@ -160,7 +160,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
/*
* Remove tuples that are marked don't-cares from the tuple index array a[]
- * of length *len. This is applied separately to the spl_left and spl_right
+ * of length *len. This is applied separately to the spl_left and spl_right
* arrays.
*/
static void
@@ -193,7 +193,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare)
/*
* Place a single don't-care tuple into either the left or right side of the
* split, according to which has least penalty for merging the tuple into
- * the previously-computed union keys. We need consider only columns starting
+ * the previously-computed union keys. We need consider only columns starting
* at attno.
*/
static void
@@ -291,7 +291,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
/*
* There is only one previously defined union, so we just choose swap
- * or not by lowest penalty for that side. We can only get here if a
+ * or not by lowest penalty for that side. We can only get here if a
* secondary split happened to have all NULLs in its column in the
* tuples that the outer recursion level had assigned to one side.
* (Note that the null checks in gistSplitByKey don't prevent the
@@ -427,7 +427,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
sv->spl_rdatum = v->spl_rattr[attno];
/*
- * Let the opclass-specific PickSplit method do its thing. Note that at
+ * Let the opclass-specific PickSplit method do its thing. Note that at
* this point we know there are no null keys in the entryvec.
*/
FunctionCall2Coll(&giststate->picksplitFn[attno],
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index b9c1967ebc0..1aedaec3fae 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -414,7 +414,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
* some inserts to go to other equally-good subtrees.
*
* keep_current_best is -1 if we haven't yet had to make a random choice
- * whether to keep the current best tuple. If we have done so, and
+ * whether to keep the current best tuple. If we have done so, and
* decided to keep it, keep_current_best is 1; if we've decided to
* replace, keep_current_best is 0. (This state will be reset to -1 as
* soon as we've made the replacement, but sometimes we make the choice in
@@ -456,7 +456,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* New best penalty for column. Tentatively select this tuple
- * as the target, and record the best penalty. Then reset the
+ * as the target, and record the best penalty. Then reset the
* next column's penalty to "unknown" (and indirectly, the
* same for all the ones to its right). This will force us to
* adopt this tuple's penalty values as the best for all the
@@ -475,7 +475,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* The current tuple is exactly as good for this column as the
- * best tuple seen so far. The next iteration of this loop
+ * best tuple seen so far. The next iteration of this loop
* will compare the next column.
*/
}
@@ -681,7 +681,7 @@ gistcheckpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index 59c3105b083..dc32c9d5292 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -49,7 +49,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
stats->estimated_count = info->estimated_count;
/*
- * XXX the above is wrong if index is partial. Would it be OK to just
+ * XXX the above is wrong if index is partial. Would it be OK to just
* return NULL, or is there work we must do below?
*/
}
diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c
index 15b7eb024ae..56019c808fe 100644
--- a/src/backend/access/gist/gistxlog.c
+++ b/src/backend/access/gist/gistxlog.c
@@ -38,7 +38,7 @@ static MemoryContext opCtx; /* working memory for operations */
* follow-right flag, because that change is not included in the full-page
* image. To be sure that the intermediate state with the wrong flag value is
* not visible to concurrent Hot Standby queries, this function handles
- * restoring the full-page image as well as updating the flag. (Note that
+ * restoring the full-page image as well as updating the flag. (Note that
* we never need to do anything else to the child page in the current WAL
* action.)
*/
@@ -89,7 +89,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
/*
* We need to acquire and hold lock on target page while updating the left
- * child page. If we have a full-page image of target page, getting the
+ * child page. If we have a full-page image of target page, getting the
* lock is a side-effect of restoring that image. Note that even if the
* target page no longer exists, we'll still attempt to replay the change
* on the child page.
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 8895f585034..b3ebfae2ff9 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -78,7 +78,7 @@ hashbuild(PG_FUNCTION_ARGS)
* (assuming their hash codes are pretty random) there will be no locality
* of access to the index, and if the index is bigger than available RAM
* then we'll thrash horribly. To prevent that scenario, we can sort the
- * tuples by (expected) bucket number. However, such a sort is useless
+ * tuples by (expected) bucket number. However, such a sort is useless
* overhead when the index does fit in RAM. We choose to sort if the
* initial index size exceeds NBuffers.
*
@@ -248,7 +248,7 @@ hashgettuple(PG_FUNCTION_ARGS)
/*
* An insertion into the current index page could have happened while
* we didn't have read lock on it. Re-find our position by looking
- * for the TID we previously returned. (Because we hold share lock on
+ * for the TID we previously returned. (Because we hold share lock on
* the bucket, no deletions or splits could have occurred; therefore
* we can expect that the TID still exists in the current index page,
* at an offset >= where we were.)
@@ -524,7 +524,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
/*
* Read the metapage to fetch original bucket and tuple counts. Also, we
* keep a copy of the last-seen metapage so that we can use its
- * hashm_spares[] values to compute bucket page addresses. This is a bit
+ * hashm_spares[] values to compute bucket page addresses. This is a bit
* hokey but perfectly safe, since the interesting entries in the spares
* array cannot change under us; and it beats rereading the metapage for
* each bucket.
@@ -655,7 +655,7 @@ loop_top:
{
/*
* Otherwise, our count is untrustworthy since we may have
- * double-scanned tuples in split buckets. Proceed by dead-reckoning.
+ * double-scanned tuples in split buckets. Proceed by dead-reckoning.
* (Note: we still return estimated_count = false, because using this
* count is better than not updating reltuples at all.)
*/
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index d5992821933..9a88aa5a432 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -11,7 +11,7 @@
* src/backend/access/hash/hashfunc.c
*
* NOTES
- * These functions are stored in pg_amproc. For each operator class
+ * These functions are stored in pg_amproc. For each operator class
* defined for hash indexes, they compute the hash value of the argument.
*
* Additional hash functions appear in /utils/adt/ files for various
@@ -158,7 +158,7 @@ hashtext(PG_FUNCTION_ARGS)
/*
* Note: this is currently identical in behavior to hashvarlena, but keep
* it as a separate function in case we someday want to do something
- * different in non-C locales. (See also hashbpchar, if so.)
+ * different in non-C locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA_ANY(key),
VARSIZE_ANY_EXHDR(key));
@@ -236,7 +236,7 @@ hashvarlena(PG_FUNCTION_ARGS)
*
* This allows some parallelism. Read-after-writes are good at doubling
* the number of bits affected, so the goal of mixing pulls in the opposite
- * direction from the goal of parallelism. I did what I could. Rotates
+ * direction from the goal of parallelism. I did what I could. Rotates
* seem to cost as much as shifts on every machine I could lay my hands on,
* and rotates are much kinder to the top and bottom bits, so I used rotates.
*----------
@@ -270,7 +270,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* substantial performance increase since final() does not need to
* do well in reverse, but is does need to affect all output bits.
* mix(), on the other hand, does not need to affect all output
- * bits (affecting 32 bits is enough). The original hash function had
+ * bits (affecting 32 bits is enough). The original hash function had
* a single mixing operation that had to satisfy both sets of requirements
* and was slower as a result.
*----------
@@ -291,7 +291,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* k : the key (the unaligned variable-length array of bytes)
* len : the length of the key, counting by bytes
*
- * Returns a uint32 value. Every bit of the key affects every bit of
+ * Returns a uint32 value. Every bit of the key affects every bit of
* the return value. Every 1-bit and 2-bit delta achieves avalanche.
* About 6*len+35 instructions. The best hash table sizes are powers
* of 2. There is no need to do mod a prime (mod is sooo slow!).
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 4508a36bd05..63be2f37872 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -90,7 +90,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 03199218f73..0d13359e6d2 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -80,7 +80,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
- * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
+ * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore). The returned overflow page will be pinned and write-locked;
* it is guaranteed to be empty.
@@ -89,12 +89,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
* That buffer is returned in the same state.
*
* The caller must hold at least share lock on the bucket, to ensure that
- * no one else tries to compact the bucket meanwhile. This guarantees that
+ * no one else tries to compact the bucket meanwhile. This guarantees that
* 'buf' won't stop being part of the bucket while it's unlocked.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
- * immediate successor of the originally passed 'buf'. Additional overflow
+ * immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
@@ -157,7 +157,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/*
* _hash_getovflpage()
*
- * Find an available overflow page and return it. The returned buffer
+ * Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
@@ -253,7 +253,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
* We create the new bitmap page with all pages marked "in use".
* Actually two pages in the new bitmap's range will exist
* immediately: the bitmap page itself, and the following page which
- * is the one we return to the caller. Both of these are correctly
+ * is the one we return to the caller. Both of these are correctly
* marked "in use". Subsequent pages do not exist yet, but it is
* convenient to pre-mark them as "in use" too.
*/
@@ -284,7 +284,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
metap->hashm_spares[splitnum]++;
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@@ -313,7 +313,7 @@ found:
blkno = bitno_to_blkno(metap, bit);
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@@ -494,7 +494,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
/*
* _hash_initbitmap()
*
- * Initialize a new bitmap page. The metapage has a write-lock upon
+ * Initialize a new bitmap page. The metapage has a write-lock upon
* entering the function, and must be written by caller after return.
*
* 'blkno' is the block number of the new bitmap page.
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index e91419b12e3..62b0c062c31 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -49,7 +49,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf,
* of the locking rules). However, we can skip taking lmgr locks when the
* index is local to the current backend (ie, either temp or new in the
* current transaction). No one else can see it, so there's no reason to
- * take locks. We still take buffer-level locks, but not lmgr locks.
+ * take locks. We still take buffer-level locks, but not lmgr locks.
*/
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
@@ -136,7 +136,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
- * _hash_pageinit() is applied automatically. Otherwise it has
+ * _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
@@ -344,7 +344,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
- * as the user-settable fillfactor parameter says. We can compute it
+ * as the user-settable fillfactor parameter says. We can compute it
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
*/
data_width = sizeof(uint32);
@@ -377,7 +377,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
/*
* We initialize the metapage, the first N bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
- * calls to occur. This ensures that the smgr level has the right idea of
+ * calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
@@ -545,7 +545,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Determine which bucket is to be split, and attempt to lock the old
- * bucket. If we can't get the lock, give up.
+ * bucket. If we can't get the lock, give up.
*
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
@@ -603,7 +603,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
}
/*
- * Okay to proceed with split. Update the metapage bucket mapping info.
+ * Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
@@ -641,7 +641,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
* split lock, other splits could begin, so these values might be out of
- * date before _hash_splitbucket finishes. That's okay, since all it
+ * date before _hash_splitbucket finishes. That's okay, since all it
* needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
@@ -876,7 +876,7 @@ _hash_splitbucket(Relation rel,
/*
* We're at the end of the old bucket chain, so we're done partitioning
- * the tuples. Before quitting, call _hash_squeezebucket to ensure the
+ * the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index 91661ba0e03..052809218dc 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -210,7 +210,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
@@ -269,7 +269,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
* _hash_step() -- step to the next valid item in a scan in the bucket.
*
* If no valid record exists in the requested direction, return
- * false. Else, return true and set the hashso_curpos for the
+ * false. Else, return true and set the hashso_curpos for the
* scan to the right thing.
*
* 'bufP' points to the current buffer, which is pinned and read-locked.
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 1dfa7109ca6..26a3f1c437a 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -8,7 +8,7 @@
* thrashing. We use tuplesort.c to sort the given index tuples into order.
*
* Note: if the number of rows in the table has been underestimated,
- * bucket splits may occur during the index build. In that case we'd
+ * bucket splits may occur during the index build. In that case we'd
* be inserting into two or more buckets for each possible masked-off
* hash code value. That's no big problem though, since we'll still have
* plenty of locality of access.
@@ -52,7 +52,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
hspool->index = index;
/*
- * Determine the bitmask for hash code values. Since there are currently
+ * Determine the bitmask for hash code values. Since there are currently
* num_buckets buckets in the index, the appropriate mask can be computed
* as follows.
*
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index aa071d9185a..9773ac790a2 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -160,7 +160,7 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@@ -280,7 +280,7 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull)
*
* Returns the offset of the first index entry having hashkey >= hash_value,
* or the page's max offset plus one if hash_value is greater than all
- * existing hash keys in the page. This is the appropriate place to start
+ * existing hash keys in the page. This is the appropriate place to start
* a search, or to insert a new item.
*/
OffsetNumber
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index a8653e4a96c..ccdcb5be2a2 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -207,7 +207,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
* while the scan is in progress will be invisible to my snapshot anyway.
* (That is not true when using a non-MVCC snapshot. However, we couldn't
* guarantee to return tuples added after scan start anyway, since they
- * might go into pages we already scanned. To guarantee consistent
+ * might go into pages we already scanned. To guarantee consistent
* results for a non-MVCC snapshot, the caller must hold some higher-level
* lock that ensures the interesting tuple(s) won't change.)
*/
@@ -215,7 +215,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
/*
* If the table is large relative to NBuffers, use a bulk-read access
- * strategy and enable synchronized scanning (see syncscan.c). Although
+ * strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
* (However, some callers need to be able to disable one or both of these
@@ -319,7 +319,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
}
/*
- * Be sure to check for interrupts at least once per page. Checks at
+ * Be sure to check for interrupts at least once per page. Checks at
* higher code levels won't be able to stop a seqscan that encounters many
* pages' worth of consecutive dead tuples.
*/
@@ -344,7 +344,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
/*
* We must hold share lock on the buffer content while examining tuple
- * visibility. Afterwards, however, the tuples we have found to be
+ * visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@@ -1120,7 +1120,7 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
*
* Same as relation_openrv, but with an additional missing_ok argument
* allowing a NULL return rather than an error if the relation is not
- * found. (Note that some other causes, such as permissions problems,
+ * found. (Note that some other causes, such as permissions problems,
* will still result in an ereport.)
* ----------------
*/
@@ -1720,7 +1720,7 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
/*
* When first_call is true (and thus, skip is initially false) we'll
- * return the first tuple we find. But on later passes, heapTuple
+ * return the first tuple we find. But on later passes, heapTuple
* will initially be pointing to the tuple we returned last time.
* Returning it again would be incorrect (and would loop forever), so
* we skip it and return the next match we find.
@@ -1802,7 +1802,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
* possibly uncommitted version.
*
* *tid is both an input and an output parameter: it is updated to
- * show the latest version of the row. Note that it will not be changed
+ * show the latest version of the row. Note that it will not be changed
* if no version of the row passes the snapshot test.
*/
void
@@ -1922,7 +1922,7 @@ heap_get_latest_tid(Relation relation,
*
* This is called after we have waited for the XMAX transaction to terminate.
* If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
- * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
+ * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* hint bit if possible --- but beware that that may not yet be possible,
* if the transaction committed asynchronously.
*
@@ -2009,7 +2009,7 @@ FreeBulkInsertState(BulkInsertState bistate)
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
- * TID where the tuple was stored. But note that any toasting of fields
+ * TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
@@ -2038,7 +2038,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
- * lock "gaps" as index page locks do. So we don't need to identify a
+ * lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@@ -2184,7 +2184,7 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
/*
* If the object id of this tuple has already been assigned, trust the
- * caller. There are a couple of ways this can happen. At initial db
+ * caller. There are a couple of ways this can happen. At initial db
* creation, the backend program sets oids for tuples. When we define
* an index, we set the oid. Finally, in the future, we may allow
* users to set their own object ids in order to support a persistent
@@ -2279,7 +2279,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
- * lock "gaps" as index page locks do. So we don't need to identify a
+ * lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@@ -2293,7 +2293,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
int nthispage;
/*
- * Find buffer where at least the next tuple will fit. If the page is
+ * Find buffer where at least the next tuple will fit. If the page is
* all-visible, this will also pin the requisite visibility map page.
*/
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
@@ -2660,10 +2660,10 @@ l1:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
- * other subxacts of this backend. It is legal for us to delete
+ * other subxacts of this backend. It is legal for us to delete
* the tuple in either case, however (the latter case is
* essentially a situation of upgrading our former shared lock to
- * exclusive). We don't bother changing the on-disk hint bits
+ * exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@@ -2739,7 +2739,7 @@ l1:
* If this is the first possibly-multixact-able operation in the current
* transaction, set my per-backend OldestMemberMXactId setting. We can be
* certain that the transaction will never become a member of any older
- * MultiXactIds than that. (We have to do this even if we end up just
+ * MultiXactIds than that. (We have to do this even if we end up just
* using our own TransactionId below, since some other backend could
* incorporate our XID into a MultiXact immediately afterwards.)
*/
@@ -2755,7 +2755,7 @@ l1:
/*
* If this transaction commits, the tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*/
@@ -2859,7 +2859,7 @@ l1:
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@@ -2971,7 +2971,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* Fetch the list of attributes to be checked for HOT update. This is
* wasted effort if we fail to update or have to put the new tuple on a
- * different page. But we must compute the list before obtaining buffer
+ * different page. But we must compute the list before obtaining buffer
* lock --- in the worst case, if we are doing an update on one of the
* relevant system catalogs, we could deadlock if we try to fetch the list
* later. In any case, the relcache caches the data so this is usually
@@ -3052,7 +3052,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
* If this is the first possibly-multixact-able operation in the
* current transaction, set my per-backend OldestMemberMXactId
* setting. We can be certain that the transaction will never become a
- * member of any older MultiXactIds than that. (We have to do this
+ * member of any older MultiXactIds than that. (We have to do this
* even if we end up just using our own TransactionId below, since
* some other backend could incorporate our XID into a MultiXact
* immediately afterwards.)
@@ -3097,7 +3097,7 @@ l2:
/*
* XXX note that we don't consider the "no wait" case here. This
* isn't a problem currently because no caller uses that case, but it
- * should be fixed if such a caller is introduced. It wasn't a
+ * should be fixed if such a caller is introduced. It wasn't a
* problem previously because this code would always wait, but now
* that some tuple locks do not conflict with one of the lock modes we
* use, it is possible that this case is interesting to handle
@@ -3135,7 +3135,7 @@ l2:
* it as locker, unless it is gone completely.
*
* If it's not a multi, we need to check for sleeping conditions
- * before actually going to sleep. If the update doesn't conflict
+ * before actually going to sleep. If the update doesn't conflict
* with the locks, we just continue without sleeping (but making sure
* it is preserved).
*/
@@ -3160,10 +3160,10 @@ l2:
goto l2;
/*
- * Note that the multixact may not be done by now. It could have
+ * Note that the multixact may not be done by now. It could have
* surviving members; our own xact or other subxacts of this
* backend, and also any other concurrent transaction that locked
- * the tuple with KeyShare if we only got TupleLockUpdate. If
+ * the tuple with KeyShare if we only got TupleLockUpdate. If
* this is the case, we have to be careful to mark the updated
* tuple with the surviving members in Xmax.
*
@@ -3369,7 +3369,7 @@ l2:
* If the toaster needs to be activated, OR if the new tuple will not fit
* on the same page as the old, then we need to release the content lock
* (but not the pin!) on the old tuple's buffer while we are off doing
- * TOAST and/or table-file-extension work. We must mark the old tuple to
+ * TOAST and/or table-file-extension work. We must mark the old tuple to
* show that it's already being updated, else other processes may try to
* update it themselves.
*
@@ -3435,7 +3435,7 @@ l2:
* there's more free now than before.
*
* What's more, if we need to get a new page, we will need to acquire
- * buffer locks on both old and new pages. To avoid deadlock against
+ * buffer locks on both old and new pages. To avoid deadlock against
* some other backend trying to get the same two locks in the other
* order, we must be consistent about the order we get the locks in.
* We use the rule "lock the lower-numbered page of the relation
@@ -3495,7 +3495,7 @@ l2:
/*
* At this point newbuf and buffer are both pinned and locked, and newbuf
- * has enough space for the new tuple. If they are the same buffer, only
+ * has enough space for the new tuple. If they are the same buffer, only
* one pin is held.
*/
@@ -3503,7 +3503,7 @@ l2:
{
/*
* Since the new tuple is going into the same page, we might be able
- * to do a HOT update. Check if any of the index columns have been
+ * to do a HOT update. Check if any of the index columns have been
* changed. If not, then HOT update is possible.
*/
if (satisfies_hot)
@@ -3521,13 +3521,13 @@ l2:
/*
* If this transaction commits, the old tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*
* XXX Should we set hint on newbuf as well? If the transaction aborts,
* there would be a prunable tuple in the newbuf; but for now we choose
- * not to optimize for aborts. Note that heap_xlog_update must be kept in
+ * not to optimize for aborts. Note that heap_xlog_update must be kept in
* sync if this decision changes.
*/
PageSetPrunable(page, xid);
@@ -3612,7 +3612,7 @@ l2:
* Mark old tuple for invalidation from system caches at next command
* boundary, and mark the new tuple for invalidation in case we abort. We
* have to do this before releasing the buffer because oldtup is in the
- * buffer. (heaptup is all in local memory, but it's necessary to process
+ * buffer. (heaptup is all in local memory, but it's necessary to process
* both tuple versions in one call to inval.c so we can avoid redundant
* sinval messages.)
*/
@@ -3687,7 +3687,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* Extract the corresponding values. XXX this is pretty inefficient if
- * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
+ * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
* a single heap_deform_tuple call on each tuple, instead? But that
* doesn't work for system columns ...
*/
@@ -3710,7 +3710,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* We do simple binary comparison of the two datums. This may be overly
* strict because there can be multiple binary representations for the
- * same logical value. But we should be OK as long as there are no false
+ * same logical value. But we should be OK as long as there are no false
* positives. Using a type-specific equality operator is messy because
* there could be multiple notions of equality in different operator
* classes; furthermore, we cannot safely invoke user-defined functions
@@ -3832,7 +3832,7 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation,
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@@ -4056,15 +4056,15 @@ l3:
* However, if there are updates, we need to walk the update chain
* to mark future versions of the row as locked, too. That way,
* if somebody deletes that future version, we're protected
- * against the key going away. This locking of future versions
+ * against the key going away. This locking of future versions
* could block momentarily, if a concurrent transaction is
* deleting a key; or it could return a value to the effect that
- * the transaction deleting the key has already committed. So we
+ * the transaction deleting the key has already committed. So we
* do this before re-locking the buffer; otherwise this would be
* prone to deadlocks.
*
* Note that the TID we're locking was grabbed before we unlocked
- * the buffer. For it to change while we're not looking, the
+ * the buffer. For it to change while we're not looking, the
* other properties we're testing for below after re-locking the
* buffer would also change, in which case we would restart this
* loop above.
@@ -4285,7 +4285,7 @@ l3:
* Of course, the multixact might not be done here: if we're
* requesting a light lock mode, other transactions with light
* locks could still be alive, as well as locks owned by our
- * own xact or other subxacts of this backend. We need to
+ * own xact or other subxacts of this backend. We need to
* preserve the surviving MultiXact members. Note that it
* isn't absolutely necessary in the latter case, but doing so
* is simpler.
@@ -4328,7 +4328,7 @@ l3:
/*
* xwait is done, but if xwait had just locked the tuple then
* some other xact could update this tuple before we get to
- * this point. Check for xmax change, and start over if so.
+ * this point. Check for xmax change, and start over if so.
*/
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(
@@ -4337,7 +4337,7 @@ l3:
goto l3;
/*
- * Otherwise check if it committed or aborted. Note we cannot
+ * Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that should have been handled above. So
* that transaction must necessarily be gone by now.
@@ -4417,7 +4417,7 @@ failed:
* If this is the first possibly-multixact-able operation in the current
* transaction, set my per-backend OldestMemberMXactId setting. We can be
* certain that the transaction will never become a member of any older
- * MultiXactIds than that. (We have to do this even if we end up just
+ * MultiXactIds than that. (We have to do this even if we end up just
* using our own TransactionId below, since some other backend could
* incorporate our XID into a MultiXact immediately afterwards.)
*/
@@ -4453,7 +4453,7 @@ failed:
HeapTupleHeaderSetXmax(tuple->t_data, xid);
/*
- * Make sure there is no forward chain link in t_ctid. Note that in the
+ * Make sure there is no forward chain link in t_ctid. Note that in the
* cases where the tuple has been updated, we must not overwrite t_ctid,
* because it was set by the updater. Moreover, if the tuple has been
* updated, we need to follow the update chain to lock the new versions of
@@ -4465,8 +4465,8 @@ failed:
MarkBufferDirty(*buffer);
/*
- * XLOG stuff. You might think that we don't need an XLOG record because
- * there is no state change worth restoring after a crash. You would be
+ * XLOG stuff. You might think that we don't need an XLOG record because
+ * there is no state change worth restoring after a crash. You would be
* wrong however: we have just written either a TransactionId or a
* MultiXactId that may never have been seen on disk before, and we need
* to make sure that there are XLOG entries covering those ID numbers.
@@ -4630,7 +4630,7 @@ l5:
* If the XMAX is already a MultiXactId, then we need to expand it to
* include add_to_xmax; but if all the members were lockers and are
* all gone, we can do away with the IS_MULTI bit and just set
- * add_to_xmax as the only locker/updater. If all lockers are gone
+ * add_to_xmax as the only locker/updater. If all lockers are gone
* and we have an updater that aborted, we can also do without a
* multi.
*
@@ -4712,8 +4712,8 @@ l5:
{
/*
* LOCK_ONLY can be present alone only when a page has been
- * upgraded by pg_upgrade. But in that case,
- * TransactionIdIsInProgress() should have returned false. We
+ * upgraded by pg_upgrade. But in that case,
+ * TransactionIdIsInProgress() should have returned false. We
* assume it's no longer locked in this case.
*/
elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
@@ -4871,7 +4871,7 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
* The other transaction committed. If it was only a locker, then the
* lock is completely gone now and we can return success; but if it
* was an update, then what we do depends on whether the two lock
- * modes conflict. If they conflict, then we must report error to
+ * modes conflict. If they conflict, then we must report error to
* caller. But if they don't, we can fall through to allow the current
* transaction to lock the tuple.
*
@@ -5132,7 +5132,7 @@ l4:
* The initial tuple is assumed to be already locked.
*
* This function doesn't check visibility, it just inconditionally marks the
- * tuple(s) as locked. If any tuple in the updated chain is being deleted
+ * tuple(s) as locked. If any tuple in the updated chain is being deleted
* concurrently (or updated with the key being modified), sleep until the
* transaction doing it is finished.
*
@@ -5156,7 +5156,7 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
* If this is the first possibly-multixact-able operation in the
* current transaction, set my per-backend OldestMemberMXactId
* setting. We can be certain that the transaction will never become a
- * member of any older MultiXactIds than that. (We have to do this
+ * member of any older MultiXactIds than that. (We have to do this
* even if we end up just using our own TransactionId below, since
* some other backend could incorporate our XID into a MultiXact
* immediately afterwards.)
@@ -5175,7 +5175,7 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
* heap_inplace_update - update a tuple "in place" (ie, overwrite it)
*
* Overwriting violates both MVCC and transactional safety, so the uses
- * of this function in Postgres are extremely limited. Nonetheless we
+ * of this function in Postgres are extremely limited. Nonetheless we
* find some places to use it.
*
* The tuple cannot change size, and therefore it's reasonable to assume
@@ -5537,7 +5537,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
* heap_prepare_freeze_tuple
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
- * are older than the specified cutoff XID and cutoff MultiXactId. If so,
+ * are older than the specified cutoff XID and cutoff MultiXactId. If so,
* setup enough state (in the *frz output argument) to later execute and
* WAL-log what we would need to do, and return TRUE. Return FALSE if nothing
* is to be changed.
@@ -5666,7 +5666,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
- * LOCKED. Normalize to INVALID just to be sure no one gets confused.
+ * LOCKED. Normalize to INVALID just to be sure no one gets confused.
* Also get rid of the HEAP_KEYS_UPDATED bit.
*/
frz->t_infomask &= ~HEAP_XMAX_BITS;
@@ -6040,7 +6040,7 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
* heap_tuple_needs_freeze
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
- * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
+ * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
*
* It doesn't matter whether the tuple is alive or dead, we are checking
* to see if a tuple needs to be removed or frozen to avoid wraparound.
@@ -6164,7 +6164,7 @@ heap_restrpos(HeapScanDesc scan)
else
{
/*
- * If we reached end of scan, rs_inited will now be false. We must
+ * If we reached end of scan, rs_inited will now be false. We must
* reset it to true to keep heapgettup from doing the wrong thing.
*/
scan->rs_inited = true;
@@ -6348,7 +6348,7 @@ log_heap_clean(Relation reln, Buffer buffer,
}
/*
- * Perform XLogInsert for a heap-freeze operation. Caller must have already
+ * Perform XLogInsert for a heap-freeze operation. Caller must have already
* modified the buffer and marked it dirty.
*/
XLogRecPtr
@@ -6393,7 +6393,7 @@ log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
/*
* Perform XLogInsert for a heap-visible operation. 'block' is the block
* being marked all-visible, and vm_buffer is the buffer containing the
- * corresponding visibility map block. Both should have already been modified
+ * corresponding visibility map block. Both should have already been modified
* and dirtied.
*
* If checksums are enabled, we also add the heap_buffer to the chain to
@@ -6442,7 +6442,7 @@ log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
}
/*
- * Perform XLogInsert for a heap-update operation. Caller must already
+ * Perform XLogInsert for a heap-update operation. Caller must already
* have modified the buffer(s) and marked them dirty.
*/
static XLogRecPtr
@@ -6772,7 +6772,7 @@ heap_xlog_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
- * LOCKED. Normalize to INVALID just to be sure no one gets confused.
+ * LOCKED. Normalize to INVALID just to be sure no one gets confused.
* Also get rid of the HEAP_KEYS_UPDATED bit.
*/
tuple->t_infomask &= ~HEAP_XMAX_BITS;
@@ -7492,7 +7492,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
/*
* In normal operation, it is important to lock the two pages in
* page-number order, to avoid possible deadlocks against other update
- * operations going the other way. However, during WAL replay there can
+ * operations going the other way. However, during WAL replay there can
* be no other update happening, so we don't need to worry about that. But
* we *do* need to worry that we don't expose an inconsistent state to Hot
* Standby queries --- so the original page can't be unlocked before we've
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 8da26908daf..c6fe2cb0e37 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -146,7 +146,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
/*
* If there are two buffers involved and we pinned just one of them,
* it's possible that the second one became all-visible while we were
- * busy pinning the first one. If it looks like that's a possible
+ * busy pinning the first one. If it looks like that's a possible
* scenario, we'll need to make a second pass through this loop.
*/
if (buffer2 == InvalidBuffer || buffer1 == buffer2
@@ -177,7 +177,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
* NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
* same buffer we select for insertion of the new tuple (this could only
* happen if space is freed in that page after heap_update finds there's not
- * enough there). In that case, the page will be pinned and locked only once.
+ * enough there). In that case, the page will be pinned and locked only once.
*
* For the vmbuffer and vmbuffer_other arguments, we avoid deadlock by
* locking them only after locking the corresponding heap page, and taking
@@ -198,7 +198,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
* for additional constraints needed for safe usage of this behavior.)
*
* The caller can also provide a BulkInsertState object to optimize many
- * insertions into the same relation. This keeps a pin on the current
+ * insertions into the same relation. This keeps a pin on the current
* insertion target page (to save pin/unpin cycles) and also passes a
* BULKWRITE buffer selection strategy object to the buffer manager.
* Passing NULL for bistate selects the default behavior.
@@ -252,7 +252,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We first try to put the tuple on the same page we last inserted a tuple
- * on, as cached in the BulkInsertState or relcache entry. If that
+ * on, as cached in the BulkInsertState or relcache entry. If that
* doesn't work, we ask the Free Space Map to locate a suitable page.
* Since the FSM's info might be out of date, we have to be prepared to
* loop around and retry multiple times. (To insure this isn't an infinite
@@ -284,7 +284,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* If the FSM knows nothing of the rel, try the last page before we
- * give up and extend. This avoids one-tuple-per-page syndrome during
+ * give up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
@@ -306,7 +306,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
* If the page-level all-visible flag is set, caller will need to
* clear both that and the corresponding visibility map bit. However,
* by the time we return, we'll have x-locked the buffer, and we don't
- * want to do any I/O while in that state. So we check the bit here
+ * want to do any I/O while in that state. So we check the bit here
* before taking the lock, and pin the page if it appears necessary.
* Checking without the lock creates a risk of getting the wrong
* answer, so we'll have to recheck after acquiring the lock.
@@ -348,7 +348,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We now have the target page (and the other buffer, if any) pinned
- * and locked. However, since our initial PageIsAllVisible checks
+ * and locked. However, since our initial PageIsAllVisible checks
* were performed before acquiring the lock, the results might now be
* out of date, either for the selected victim buffer, or for the
* other buffer passed by the caller. In that case, we'll need to
@@ -391,7 +391,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Not enough space, so we must give up our page locks and pin (if
- * any) and prepare to look elsewhere. We don't care which order we
+ * any) and prepare to look elsewhere. We don't care which order we
* unlock the two buffers in, so this can be slightly simpler than the
* code above.
*/
@@ -433,7 +433,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* XXX This does an lseek - rather expensive - but at the moment it is the
- * only way to accurately determine how many blocks are in a relation. Is
+ * only way to accurately determine how many blocks are in a relation. Is
* it worth keeping an accurate file length in shared memory someplace,
* rather than relying on the kernel to do it for us?
*/
@@ -453,7 +453,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Release the file-extension lock; it's now OK for someone else to extend
- * the relation some more. Note that we cannot release this lock before
+ * the relation some more. Note that we cannot release this lock before
* we have buffer lock on the new page, or we risk a race condition
* against vacuumlazy.c --- see comments therein.
*/
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 8f0c02d9c2c..9a8db74cb95 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -100,7 +100,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
* Checking free space here is questionable since we aren't holding any
* lock on the buffer; in the worst case we could get a bogus answer. It's
* unlikely to be *seriously* wrong, though, since reading either pd_lower
- * or pd_upper is probably atomic. Avoiding taking a lock seems more
+ * or pd_upper is probably atomic. Avoiding taking a lock seems more
* important than sometimes getting a wrong answer in what is after all
* just a heuristic estimate.
*/
@@ -315,8 +315,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
* OldestXmin is the cutoff XID used to identify dead tuples.
*
* We don't actually change the page here, except perhaps for hint-bit updates
- * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
- * prstate showing the changes to be made. Items to be redirected are added
+ * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
+ * prstate showing the changes to be made. Items to be redirected are added
* to the redirected[] array (two entries per redirection); items to be set to
* LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
* state are added to nowunused[].
@@ -358,7 +358,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
* We need this primarily to handle aborted HOT updates, that is,
* XMIN_INVALID heap-only tuples. Those might not be linked to by
* any chain, since the parent tuple might be re-updated before
- * any pruning occurs. So we have to be able to reap them
+ * any pruning occurs. So we have to be able to reap them
* separately from chain-pruning. (Note that
* HeapTupleHeaderIsHotUpdated will never return true for an
* XMIN_INVALID tuple, so this code will work even when there were
@@ -544,7 +544,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/*
* If the root entry had been a normal tuple, we are deleting it, so
- * count it in the result. But changing a redirect (even to DEAD
+ * count it in the result. But changing a redirect (even to DEAD
* state) doesn't count.
*/
if (ItemIdIsNormal(rootlp))
@@ -633,7 +633,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
* buffer, and is inside a critical section.
*
* This is split out because it is also used by heap_xlog_clean()
- * to replay the WAL record when needed after a crash. Note that the
+ * to replay the WAL record when needed after a crash. Note that the
* arguments are identical to those of log_heap_clean().
*/
void
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index 951894ce5ac..76f972e79f9 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -10,7 +10,7 @@
*
* The caller is responsible for creating the new heap, all catalog
* changes, supplying the tuples to be written to the new heap, and
- * rebuilding indexes. The caller must hold AccessExclusiveLock on the
+ * rebuilding indexes. The caller must hold AccessExclusiveLock on the
* target table, because we assume no one else is writing into it.
*
* To use the facility:
@@ -43,7 +43,7 @@
* to substitute the correct ctid instead.
*
* For each ctid reference from A -> B, we might encounter either A first
- * or B first. (Note that a tuple in the middle of a chain is both A and B
+ * or B first. (Note that a tuple in the middle of a chain is both A and B
* of different pairs.)
*
* If we encounter A first, we'll store the tuple in the unresolved_tups
@@ -58,11 +58,11 @@
* and can write A immediately with the correct ctid.
*
* Entries in the hash tables can be removed as soon as the later tuple
- * is encountered. That helps to keep the memory usage down. At the end,
+ * is encountered. That helps to keep the memory usage down. At the end,
* both tables are usually empty; we should have encountered both A and B
* of each pair. However, it's possible for A to be RECENTLY_DEAD and B
* entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
- * for deadness using OldestXmin is not exact. In such a case we might
+ * for deadness using OldestXmin is not exact. In such a case we might
* encounter B first, and skip it, and find A later. Then A would be added
* to unresolved_tups, and stay there until end of the rewrite. Since
* this case is very unusual, we don't worry about the memory usage.
@@ -78,7 +78,7 @@
* of CLUSTERing on an unchanging key column, we'll see all the versions
* of a given tuple together anyway, and so the peak memory usage is only
* proportional to the number of RECENTLY_DEAD versions of a single row, not
- * in the whole table. Note that if we do fail halfway through a CLUSTER,
+ * in the whole table. Note that if we do fail halfway through a CLUSTER,
* the old table is still valid, so failure is not catastrophic.
*
* We can't use the normal heap_insert function to insert into the new
@@ -287,7 +287,7 @@ end_heap_rewrite(RewriteState state)
}
/*
- * If the rel is WAL-logged, must fsync before commit. We use heap_sync
+ * If the rel is WAL-logged, must fsync before commit. We use heap_sync
* to ensure that the toast table gets fsync'd too.
*
* It's obvious that we must do this when not WAL-logging. It's less
@@ -554,7 +554,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
}
/*
- * Insert a tuple to the new relation. This has to track heap_insert
+ * Insert a tuple to the new relation. This has to track heap_insert
* and its subsidiary functions!
*
* t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
diff --git a/src/backend/access/heap/syncscan.c b/src/backend/access/heap/syncscan.c
index 09384d8850b..884485ccccd 100644
--- a/src/backend/access/heap/syncscan.c
+++ b/src/backend/access/heap/syncscan.c
@@ -4,7 +4,7 @@
* heap scan synchronization support
*
* When multiple backends run a sequential scan on the same table, we try
- * to keep them synchronized to reduce the overall I/O needed. The goal is
+ * to keep them synchronized to reduce the overall I/O needed. The goal is
* to read each page into shared buffer cache only once, and let all backends
* that take part in the shared scan process the page before it falls out of
* the cache.
@@ -26,7 +26,7 @@
* don't want such queries to slow down others.
*
* There can realistically only be a few large sequential scans on different
- * tables in progress at any time. Therefore we just keep the scan positions
+ * tables in progress at any time. Therefore we just keep the scan positions
* in a small LRU list which we scan every time we need to look up or update a
* scan position. The whole mechanism is only applied for tables exceeding
* a threshold size (but that is not the concern of this module).
@@ -243,7 +243,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
* relation, or 0 if no valid location is found.
*
* We expect the caller has just done RelationGetNumberOfBlocks(), and
- * so that number is passed in rather than computing it again. The result
+ * so that number is passed in rather than computing it again. The result
* is guaranteed less than relnblocks (assuming that's > 0).
*/
BlockNumber
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 219e6a635ae..55afdc8ca44 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -552,7 +552,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Fetch it back (without decompression, unless we are forcing
- * PLAIN storage). If necessary, we'll push it out as a new
+ * PLAIN storage). If necessary, we'll push it out as a new
* external value below.
*/
if (VARATT_IS_EXTERNAL(new_value))
@@ -695,7 +695,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
/*
* Second we look for attributes of attstorage 'x' or 'e' that are still
- * inline. But skip this if there's no toast table to push them to.
+ * inline. But skip this if there's no toast table to push them to.
*/
while (heap_compute_data_size(tupleDesc,
toast_values, toast_isnull) > maxDataLen &&
@@ -805,7 +805,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
}
/*
- * Finally we store attributes of type 'm' externally. At this point we
+ * Finally we store attributes of type 'm' externally. At this point we
* increase the target tuple size, so that 'm' attributes aren't stored
* externally unless really necessary.
*/
@@ -1351,7 +1351,7 @@ toast_save_datum(Relation rel, Datum value,
* those versions could easily reference the same toast value.
* When we copy the second or later version of such a row,
* reusing the OID will mean we select an OID that's already
- * in the new toast table. Check for that, and if so, just
+ * in the new toast table. Check for that, and if so, just
* fall through without writing the data again.
*
* While annoying and ugly-looking, this is a good thing
@@ -1417,7 +1417,7 @@ toast_save_datum(Relation rel, Datum value,
heap_insert(toastrel, toasttup, mycid, options, NULL);
/*
- * Create the index entry. We cheat a little here by not using
+ * Create the index entry. We cheat a little here by not using
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table.
*
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index 7f40d89b9f1..f0436859957 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -27,7 +27,7 @@
* the sense that we make sure that whenever a bit is set, we know the
* condition is true, but if a bit is not set, it might or might not be true.
*
- * Clearing a visibility map bit is not separately WAL-logged. The callers
+ * Clearing a visibility map bit is not separately WAL-logged. The callers
* must make sure that whenever a bit is cleared, the bit is cleared on WAL
* replay of the updating operation as well.
*
@@ -36,9 +36,9 @@
* it may still be the case that every tuple on the page is visible to all
* transactions; we just don't know that for certain. The difficulty is that
* there are two bits which are typically set together: the PD_ALL_VISIBLE bit
- * on the page itself, and the visibility map bit. If a crash occurs after the
+ * on the page itself, and the visibility map bit. If a crash occurs after the
* visibility map page makes it to disk and before the updated heap page makes
- * it to disk, redo must set the bit on the heap page. Otherwise, the next
+ * it to disk, redo must set the bit on the heap page. Otherwise, the next
* insert, update, or delete on the heap page will fail to realize that the
* visibility map bit must be cleared, possibly causing index-only scans to
* return wrong answers.
@@ -59,10 +59,10 @@
* the buffer lock over any I/O that may be required to read in the visibility
* map page. To avoid this, we examine the heap page before locking it;
* if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
- * bit. Then, we lock the buffer. But this creates a race condition: there
+ * bit. Then, we lock the buffer. But this creates a race condition: there
* is a possibility that in the time it takes to lock the buffer, the
* PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
- * buffer, pin the visibility map page, and relock the buffer. This shouldn't
+ * buffer, pin the visibility map page, and relock the buffer. This shouldn't
* happen often, because only VACUUM currently sets visibility map bits,
* and the race will only occur if VACUUM processes a given page at almost
* exactly the same time that someone tries to further modify it.
@@ -227,9 +227,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
* visibilitymap_set - set a bit on a previously pinned page
*
* recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
- * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
+ * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
* one provided; in normal running, we generate a new XLOG record and set the
- * page LSN to that value. cutoff_xid is the largest xmin on the page being
+ * page LSN to that value. cutoff_xid is the largest xmin on the page being
* marked all-visible; it is needed for Hot Standby, and can be
* InvalidTransactionId if the page contains no tuples.
*
@@ -320,10 +320,10 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
* releasing *buf after it's done testing and setting bits.
*
* NOTE: This function is typically called without a lock on the heap page,
- * so somebody else could change the bit just after we look at it. In fact,
+ * so somebody else could change the bit just after we look at it. In fact,
* since we don't lock the visibility map page either, it's even possible that
* someone else could have changed the bit just before we look at it, but yet
- * we might see the old value. It is the caller's responsibility to deal with
+ * we might see the old value. It is the caller's responsibility to deal with
* all concurrency issues!
*/
bool
@@ -526,7 +526,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
/*
* We might not have opened the relation at the smgr level yet, or we
- * might have been forced to close it by a sinval message. The code below
+ * might have been forced to close it by a sinval message. The code below
* won't necessarily notice relation extension immediately when extend =
* false, so we rely on sinval messages to ensure that our ideas about the
* size of the map aren't too far out of date.
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 31a419b841e..bc9e807401d 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -44,7 +44,7 @@
*
* At the end of a scan, the AM's endscan routine undoes the locking,
* but does *not* call IndexScanEnd --- the higher-level index_endscan
- * routine does that. (We can't do it in the AM because index_endscan
+ * routine does that. (We can't do it in the AM because index_endscan
* still needs to touch the IndexScanDesc after calling the AM.)
*
* Because of this, the AM does not have a choice whether to call
@@ -187,7 +187,7 @@ BuildIndexValueDescription(Relation indexRelation,
* at rd_opcintype not the index tupdesc.
*
* Note: this is a bit shaky for opclasses that have pseudotype
- * input types such as ANYARRAY or RECORD. Currently, the
+ * input types such as ANYARRAY or RECORD. Currently, the
* typoutput functions associated with the pseudotypes will work
* okay, but we might have to try harder in future.
*/
@@ -413,7 +413,7 @@ systable_endscan(SysScanDesc sysscan)
* index order. Also, for largely historical reasons, the index to use
* is opened and locked by the caller, not here.
*
- * Currently we do not support non-index-based scans here. (In principle
+ * Currently we do not support non-index-based scans here. (In principle
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index b87815544d9..2067347a615 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -81,7 +81,7 @@
*
* Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there
* to check that we don't try to scan or do retail insertions into an index
- * that is currently being rebuilt or pending rebuild. This helps to catch
+ * that is currently being rebuilt or pending rebuild. This helps to catch
* things that don't work when reindexing system catalogs. The assertion
* doesn't prevent the actual rebuild because we don't use RELATION_CHECKS
* when calling the index AM's ambuild routine, and there is no reason for
@@ -146,7 +146,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation,
* index_open - open an index relation by relation OID
*
* If lockmode is not "NoLock", the specified kind of lock is
- * obtained on the index. (Generally, NoLock should only be
+ * obtained on the index. (Generally, NoLock should only be
* used if the caller knows it has some appropriate lock on the
* index already.)
*
@@ -411,7 +411,7 @@ index_markpos(IndexScanDesc scan)
* returnable tuple in each HOT chain, and so restoring the prior state at the
* granularity of the index AM is sufficient. Since the only current user
* of mark/restore functionality is nodeMergejoin.c, this effectively means
- * that merge-join plans only work for MVCC snapshots. This could be fixed
+ * that merge-join plans only work for MVCC snapshots. This could be fixed
* if necessary, but for now it seems unimportant.
* ----------------
*/
@@ -551,7 +551,7 @@ index_fetch_heap(IndexScanDesc scan)
/*
* If we scanned a whole HOT chain and found only dead tuples, tell index
* AM to kill its entry for that TID (this will take effect in the next
- * amgettuple call, in index_getnext_tid). We do not do this when in
+ * amgettuple call, in index_getnext_tid). We do not do this when in
* recovery because it may violate MVCC to do so. See comments in
* RelationGetIndexScan().
*/
@@ -588,7 +588,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
{
/*
* We are resuming scan of a HOT chain after having returned an
- * earlier member. Must still hold pin on current heap page.
+ * earlier member. Must still hold pin on current heap page.
*/
Assert(BufferIsValid(scan->xs_cbuf));
Assert(ItemPointerGetBlockNumber(&scan->xs_ctup.t_self) ==
@@ -758,7 +758,7 @@ index_can_return(Relation indexRelation)
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
- * type instead). Only the default functions are stored in relcache
+ * type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
@@ -792,7 +792,7 @@ index_getprocid(Relation irel,
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
- * support procs in the relcache. As above, only the "default"
+ * support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index 3b1bde12efd..11209b91037 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -25,7 +25,7 @@
* Although any negative int32 (except INT_MIN) is acceptable for reporting
* "<", and any positive int32 is acceptable for reporting ">", routines
* that work on 32-bit or wider datatypes can't just return "a - b".
- * That could overflow and give the wrong answer. Also, one must not
+ * That could overflow and give the wrong answer. Also, one must not
* return INT_MIN to report "<", since some callers will negate the result.
*
* NOTE: it is critical that the comparison function impose a total order
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 8bf4344cded..fa15e8f59a1 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -88,7 +88,7 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel);
* and btinsert. By here, itup is filled in, including the TID.
*
* If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
- * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
+ * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
* UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
* For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
* don't actually insert.
@@ -127,7 +127,7 @@ top:
* If the page was split between the time that we surrendered our read
* lock and acquired our write lock, then this page may no longer be the
* right place for the key we want to insert. In this case, we need to
- * move right in the tree. See Lehman and Yao for an excruciatingly
+ * move right in the tree. See Lehman and Yao for an excruciatingly
* precise description.
*/
buf = _bt_moveright(rel, buf, natts, itup_scankey, false, BT_WRITE);
@@ -207,7 +207,7 @@ top:
* is the first tuple on the next page.
*
* Returns InvalidTransactionId if there is no conflict, else an xact ID
- * we must wait for to see if it commits a conflicting tuple. If an actual
+ * we must wait for to see if it commits a conflicting tuple. If an actual
* conflict is detected, no return --- just ereport().
*
* However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
@@ -289,7 +289,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
/*
* If we are doing a recheck, we expect to find the tuple we
- * are rechecking. It's not a duplicate, but we have to keep
+ * are rechecking. It's not a duplicate, but we have to keep
* scanning.
*/
if (checkUnique == UNIQUE_CHECK_EXISTING &&
@@ -478,7 +478,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* If the new key is equal to one or more existing keys, we can
* legitimately place it anywhere in the series of equal keys --- in fact,
* if the new key is equal to the page's "high key" we can place it on
- * the next page. If it is equal to the high key, and there's not room
+ * the next page. If it is equal to the high key, and there's not room
* to insert the new tuple on the current page without splitting, then
* we can move right hoping to find more free space and avoid a split.
* (We should not move right indefinitely, however, since that leads to
@@ -490,7 +490,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* removing any LP_DEAD tuples.
*
* On entry, *buf and *offsetptr point to the first legal position
- * where the new tuple could be inserted. The caller should hold an
+ * where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
* InvalidOffsetNumber, in which case the function will search for the
* right location within the page if needed. On exit, they point to the
@@ -560,7 +560,7 @@ _bt_findinsertloc(Relation rel,
* on every insert. We implement "get tired" as a random choice,
* since stopping after scanning a fixed number of pages wouldn't work
* well (we'd never reach the right-hand side of previously split
- * pages). Currently the probability of moving right is set at 0.99,
+ * pages). Currently the probability of moving right is set at 0.99,
* which may seem too high to change the behavior much, but it does an
* excellent job of preventing O(N^2) behavior with many equal keys.
*----------
@@ -661,7 +661,7 @@ _bt_findinsertloc(Relation rel,
* + updates the metapage if a true root or fast root is split.
*
* On entry, we must have the right buffer in which to do the
- * insertion, and the buffer must be pinned and write-locked. On return,
+ * insertion, and the buffer must be pinned and write-locked. On return,
* we will have dropped both the pin and the lock on the buffer.
*
* The locking interactions in this code are critical. You should
@@ -925,7 +925,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* origpage is the original page to be split. leftpage is a temporary
* buffer that receives the left-sibling data, which will be copied back
* into origpage on success. rightpage is the new page that receives the
- * right-sibling data. If we fail before reaching the critical section,
+ * right-sibling data. If we fail before reaching the critical section,
* origpage hasn't been modified and leftpage is only workspace. In
* principle we shouldn't need to worry about rightpage either, because it
* hasn't been linked into the btree page structure; but to avoid leaving
@@ -1140,7 +1140,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* page. If you're confused, imagine that page A splits to A B and
* then again, yielding A C B, while vacuum is in progress. Tuples
* originally in A could now be in either B or C, hence vacuum must
- * examine both pages. But if D, our right sibling, has a different
+ * examine both pages. But if D, our right sibling, has a different
* cycleid then it could not contain any tuples that were in A when
* the vacuum started.
*/
@@ -1359,7 +1359,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*
* We return the index of the first existing tuple that should go on the
* righthand page, plus a boolean indicating whether the new tuple goes on
- * the left or right page. The bool is necessary to disambiguate the case
+ * the left or right page. The bool is necessary to disambiguate the case
* where firstright == newitemoff.
*/
static OffsetNumber
@@ -1595,7 +1595,7 @@ _bt_checksplitloc(FindSplitData *state,
*
* On entry, buf and rbuf are the left and right split pages, which we
* still hold write locks on per the L&Y algorithm. We release the
- * write locks once we have write lock on the parent page. (Any sooner,
+ * write locks once we have write lock on the parent page. (Any sooner,
* and it'd be possible for some other process to try to split or delete
* one of these pages, and get confused because it cannot find the downlink.)
*
@@ -1618,7 +1618,7 @@ _bt_insert_parent(Relation rel,
* Here we have to do something Lehman and Yao don't talk about: deal with
* a root split and construction of a new root. If our stack is empty
* then we have just split a node on what had been the root level when we
- * descended the tree. If it was still the root then we perform a
+ * descended the tree. If it was still the root then we perform a
* new-root construction. If it *wasn't* the root anymore, search to find
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
@@ -1768,7 +1768,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/*
* These loops will check every item on the page --- but in an
* order that's attuned to the probability of where it actually
- * is. Scan to the right first, then to the left.
+ * is. Scan to the right first, then to the left.
*/
for (offnum = start;
offnum <= maxoff;
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index f4077533bf5..135a3eeaba1 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -12,7 +12,7 @@
* src/backend/access/nbtree/nbtpage.c
*
* NOTES
- * Postgres btree pages look like ordinary relation pages. The opaque
+ * Postgres btree pages look like ordinary relation pages. The opaque
* data at high addresses includes pointers to left and right siblings
* and flag data describing page state. The first page in a btree, page
* zero, is special -- it stores meta-information describing the tree.
@@ -55,7 +55,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
metaopaque->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not essential
+ * Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) page)->pd_lower =
@@ -73,7 +73,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
*
* The access type parameter (BT_READ or BT_WRITE) controls whether
* a new root page will be created or not. If access = BT_READ,
- * and no root page exists, we just return InvalidBuffer. For
+ * and no root page exists, we just return InvalidBuffer. For
* BT_WRITE, we try to create the root page if it doesn't exist.
* NOTE that the returned root page will have only a read lock set
* on it even if access = BT_WRITE!
@@ -190,7 +190,7 @@ _bt_getroot(Relation rel, int access)
/*
* Metadata initialized by someone else. In order to guarantee no
* deadlocks, we have to release the metadata page and start all
- * over again. (Is that really true? But it's hardly worth trying
+ * over again. (Is that really true? But it's hardly worth trying
* to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
@@ -253,7 +253,7 @@ _bt_getroot(Relation rel, int access)
CacheInvalidateRelcache(rel);
/*
- * swap root write lock for read lock. There is no danger of anyone
+ * swap root write lock for read lock. There is no danger of anyone
* else accessing the new root page while it's unlocked, since no one
* else knows where it is yet.
*/
@@ -321,7 +321,7 @@ _bt_getroot(Relation rel, int access)
* By the time we acquire lock on the root page, it might have been split and
* not be the true root anymore. This is okay for the present uses of this
* routine; we only really need to be able to move up at least one tree level
- * from whatever non-root page we were at. If we ever do need to lock the
+ * from whatever non-root page we were at. If we ever do need to lock the
* one true root page, we could loop here, re-reading the metapage on each
* failure. (Note that it wouldn't do to hold the lock on the metapage while
* moving to the root --- that'd deadlock against any concurrent root split.)
@@ -496,7 +496,7 @@ _bt_checkpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@@ -563,7 +563,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
/*
* _bt_getbuf() -- Get a buffer by block number for read or write.
*
- * blkno == P_NEW means to get an unallocated index page. The page
+ * blkno == P_NEW means to get an unallocated index page. The page
* will be initialized before returning it.
*
* When this routine returns, the appropriate lock is set on the
@@ -594,7 +594,7 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* First see if the FSM knows of any free pages.
*
* We can't trust the FSM's report unreservedly; we have to check that
- * the page is still free. (For example, an already-free page could
+ * the page is still free. (For example, an already-free page could
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
@@ -773,7 +773,7 @@ _bt_page_recyclable(Page page)
/*
* Delete item(s) from a btree page during VACUUM.
*
- * This must only be used for deleting leaf items. Deleting an item on a
+ * This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
@@ -841,7 +841,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf,
/*
* The target-offsets array is not in the buffer, but pretend that it
- * is. When XLogInsert stores the whole buffer, the offsets array
+ * is. When XLogInsert stores the whole buffer, the offsets array
* need not be stored too.
*/
if (nitems > 0)
@@ -1092,7 +1092,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
BTPageOpaque opaque;
/*
- * We can never delete rightmost pages nor root pages. While at it, check
+ * We can never delete rightmost pages nor root pages. While at it, check
* that page is not already deleted and is empty.
*/
page = BufferGetPage(buf);
@@ -1164,7 +1164,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
/*
* During WAL recovery, we can't use _bt_search (for one reason,
* it might invoke user-defined comparison functions that expect
- * facilities not available in recovery mode). Instead, just set
+ * facilities not available in recovery mode). Instead, just set
* up a dummy stack pointing to the left end of the parent tree
* level, from which _bt_getstackbuf will walk right to the parent
* page. Painful, but we don't care too much about performance in
@@ -1199,7 +1199,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
* target page. The sibling that was current a moment ago could have
* split, so we may have to move right. This search could fail if either
* the sibling or the target page was deleted by someone else meanwhile;
- * if so, give up. (Right now, that should never happen, since page
+ * if so, give up. (Right now, that should never happen, since page
* deletion is only done in VACUUM and there shouldn't be multiple VACUUMs
* concurrently on the same table.)
*/
@@ -1228,7 +1228,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
lbuf = InvalidBuffer;
/*
- * Next write-lock the target page itself. It should be okay to take just
+ * Next write-lock the target page itself. It should be okay to take just
* a write lock not a superexclusive lock, since no scans would stop on an
* empty page.
*/
@@ -1350,7 +1350,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
/*
* Check that the parent-page index items we're about to delete/overwrite
- * contain what we expect. This can fail if the index has become corrupt
+ * contain what we expect. This can fail if the index has become corrupt
* for some reason. We want to throw any error before entering the
* critical section --- otherwise it'd be a PANIC.
*
@@ -1434,7 +1434,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
* we're in VACUUM and would not otherwise have an XID. Having already
* updated links to the target, ReadNewTransactionId() suffices as an
* upper bound. Any scan having retained a now-stale link is advertising
- * in its PGXACT an xmin less than or equal to the value we read here. It
+ * in its PGXACT an xmin less than or equal to the value we read here. It
* will continue to do so, holding back RecentGlobalXmin, for the duration
* of that scan.
*/
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index eb396b4b0e7..229d276d9c1 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -151,7 +151,7 @@ btbuild(PG_FUNCTION_ARGS)
/*
* If we are reindexing a pre-existing index, it is critical to send out a
* relcache invalidation SI message to ensure all backends re-read the
- * index metapage. We expect that the caller will ensure that happens
+ * index metapage. We expect that the caller will ensure that happens
* (typically as a side effect of updating index stats, but it must happen
* even if the stats don't change!)
*/
@@ -216,7 +216,7 @@ btbuildempty(PG_FUNCTION_ARGS)
metapage = (Page) palloc(BLCKSZ);
_bt_initmetapage(metapage, P_NONE, 0);
- /* Write the page. If archiving/streaming, XLOG it. */
+ /* Write the page. If archiving/streaming, XLOG it. */
PageSetChecksumInplace(metapage, BTREE_METAPAGE);
smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,
(char *) metapage, true);
@@ -435,7 +435,7 @@ btbeginscan(PG_FUNCTION_ARGS)
/*
* We don't know yet whether the scan will be index-only, so we do not
- * allocate the tuple workspace arrays until btrescan. However, we set up
+ * allocate the tuple workspace arrays until btrescan. However, we set up
* scan->xs_itupdesc whether we'll need it or not, since that's so cheap.
*/
so->currTuples = so->markTuples = NULL;
@@ -480,7 +480,7 @@ btrescan(PG_FUNCTION_ARGS)
/*
* Allocate tuple workspace arrays, if needed for an index-only scan and
- * not already done in a previous rescan call. To save on palloc
+ * not already done in a previous rescan call. To save on palloc
* overhead, both workspaces are allocated as one palloc block; only this
* function and btendscan know that.
*
@@ -960,7 +960,7 @@ restart:
vstate->lastBlockLocked = blkno;
/*
- * Check whether we need to recurse back to earlier pages. What we
+ * Check whether we need to recurse back to earlier pages. What we
* are concerned about is a page split that happened since we started
* the vacuum scan. If the split moved some tuples to a lower page
* then we might have missed 'em. If so, set up for tail recursion.
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index ac98589477b..cfd13038367 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -50,7 +50,7 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
*
* NOTE that the returned buffer is read-locked regardless of the access
* parameter. However, access = BT_WRITE will allow an empty root page
- * to be created and returned. When access = BT_READ, an empty index
+ * to be created and returned. When access = BT_READ, an empty index
* will result in *bufP being set to InvalidBuffer.
*/
BTStack
@@ -227,7 +227,7 @@ _bt_moveright(Relation rel,
* (or leaf keys > given scankey when nextkey is true).
*
* This procedure is not responsible for walking right, it just examines
- * the given page. _bt_binsrch() has no lock or refcount side effects
+ * the given page. _bt_binsrch() has no lock or refcount side effects
* on the buffer.
*/
OffsetNumber
@@ -359,7 +359,7 @@ _bt_compare(Relation rel,
/*
* The scan key is set up with the attribute number associated with each
* term in the key. It is important that, if the index is multi-key, the
- * scan contain the first k key attributes, and that they be in order. If
+ * scan contain the first k key attributes, and that they be in order. If
* you think about how multi-key ordering works, you'll understand why
* this is.
*
@@ -398,7 +398,7 @@ _bt_compare(Relation rel,
/*
* The sk_func needs to be passed the index value as left arg and
* the sk_argument as right arg (they might be of different
- * types). Since it is convenient for callers to think of
+ * types). Since it is convenient for callers to think of
* _bt_compare as comparing the scankey to the index item, we have
* to flip the sign of the comparison result. (Unless it's a DESC
* column, in which case we *don't* flip the sign.)
@@ -427,7 +427,7 @@ _bt_compare(Relation rel,
* _bt_first() -- Find the first item in a scan.
*
* We need to be clever about the direction of scan, the search
- * conditions, and the tree ordering. We find the first item (or,
+ * conditions, and the tree ordering. We find the first item (or,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, the page containing
* the current index tuple is pinned but not locked, and data about
@@ -483,7 +483,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* We want to identify the keys that can be used as starting boundaries;
* these are =, >, or >= keys for a forward scan or =, <, <= keys for
* a backwards scan. We can use keys for multiple attributes so long as
- * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
+ * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
* a > or < boundary or find an attribute with no boundary (which can be
* thought of as the same as "> -infinity"), we can't use keys for any
* attributes to its right, because it would break our simplistic notion
@@ -698,7 +698,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* even if the row comparison is of ">" or "<" type, because the
* condition applied to all but the last row member is effectively
* ">=" or "<=", and so the extra keys don't break the positioning
- * scheme. But, by the same token, if we aren't able to use all
+ * scheme. But, by the same token, if we aren't able to use all
* the row members, then the part of the row comparison that we
* did use has to be treated as just a ">=" or "<=" condition, and
* so we'd better adjust strat_total accordingly.
@@ -817,7 +817,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* Find first item >= scankey, then back up one to arrive at last
- * item < scankey. (Note: this positioning strategy is only used
+ * item < scankey. (Note: this positioning strategy is only used
* for a backward scan, so that is always the correct starting
* position.)
*/
@@ -866,7 +866,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
case BTGreaterEqualStrategyNumber:
/*
- * Find first item >= scankey. (This is only used for forward
+ * Find first item >= scankey. (This is only used for forward
* scans.)
*/
nextkey = false;
@@ -944,7 +944,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
*
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
- * the last item on this page. Adjust the starting offset if needed. (If
+ * the last item on this page. Adjust the starting offset if needed. (If
* this results in an offset before the first item or after the last one,
* _bt_readpage will report no items found, and then we'll step to the
* next page as needed.)
@@ -1260,7 +1260,7 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
* than the walk-right case because of the possibility that the page
* to our left splits while we are in flight to it, plus the
* possibility that the page we were on gets deleted after we leave
- * it. See nbtree/README for details.
+ * it. See nbtree/README for details.
*/
for (;;)
{
@@ -1355,7 +1355,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* anymore, not that its left sibling got split more than four times.
*
* Note that it is correct to test P_ISDELETED not P_IGNORE here,
- * because half-dead pages are still in the sibling chain. Caller
+ * because half-dead pages are still in the sibling chain. Caller
* must reject half-dead pages if wanted.
*/
tries = 0;
@@ -1381,7 +1381,7 @@ _bt_walk_left(Relation rel, Buffer buf)
if (P_ISDELETED(opaque))
{
/*
- * It was deleted. Move right to first nondeleted page (there
+ * It was deleted. Move right to first nondeleted page (there
* must be one); that is the page that has acquired the deleted
* one's keyspace, so stepping left from it will take us where we
* want to be.
@@ -1425,7 +1425,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* _bt_get_endpoint() -- Find the first or last page on a given tree level
*
* If the index is empty, we will return InvalidBuffer; any other failure
- * condition causes ereport(). We will not return a dead page.
+ * condition causes ereport(). We will not return a dead page.
*
* The returned buffer is pinned and read-locked.
*/
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 52c5a2676ef..ec5bb4f7bfb 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -7,7 +7,7 @@
*
* We use tuplesort.c to sort the given index tuples into order.
* Then we scan the index tuples in order and build the btree pages
- * for each level. We load source tuples into leaf-level pages.
+ * for each level. We load source tuples into leaf-level pages.
* Whenever we fill a page at one level, we add a link to it to its
* parent level (starting a new parent level if necessary). When
* done, we write out each final page on each level, adding it to
@@ -42,11 +42,11 @@
*
* Since the index will never be used unless it is completely built,
* from a crash-recovery point of view there is no need to WAL-log the
- * steps of the build. After completing the index build, we can just sync
+ * steps of the build. After completing the index build, we can just sync
* the whole file to disk using smgrimmedsync() before exiting this module.
* This can be seen to be sufficient for crash recovery by considering that
* it's effectively equivalent to what would happen if a CHECKPOINT occurred
- * just after the index build. However, it is clearly not sufficient if the
+ * just after the index build. However, it is clearly not sufficient if the
* DBA is using the WAL log for PITR or replication purposes, since another
* machine would not be able to reconstruct the index from WAL. Therefore,
* we log the completed index pages to WAL if and only if WAL archiving is
@@ -89,7 +89,7 @@ struct BTSpool
};
/*
- * Status record for a btree page being built. We have one of these
+ * Status record for a btree page being built. We have one of these
* for each active tree level.
*
* The reason we need to store a copy of the minimum key is that we'll
@@ -160,7 +160,7 @@ _bt_spoolinit(Relation heap, Relation index, bool isunique, bool isdead)
* We size the sort area as maintenance_work_mem rather than work_mem to
* speed index creation. This should be OK since a single backend can't
* run multiple index creations in parallel. Note that creation of a
- * unique index actually requires two BTSpool objects. We expect that the
+ * unique index actually requires two BTSpool objects. We expect that the
* second one (for dead tuples) won't get very full, so we give it only
* work_mem.
*/
@@ -298,7 +298,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
PageSetChecksumInplace(page, blkno);
/*
- * Now write the page. There's no need for smgr to schedule an fsync for
+ * Now write the page. There's no need for smgr to schedule an fsync for
* this write; we'll do it ourselves before ending the build.
*/
if (blkno == wstate->btws_pages_written)
@@ -423,14 +423,14 @@ _bt_sortaddtup(Page page,
* A leaf page being built looks like:
*
* +----------------+---------------------------------+
- * | PageHeaderData | linp0 linp1 linp2 ... |
+ * | PageHeaderData | linp0 linp1 linp2 ... |
* +-----------+----+---------------------------------+
* | ... linpN | |
* +-----------+--------------------------------------+
* | ^ last |
* | |
* +-------------+------------------------------------+
- * | | itemN ... |
+ * | | itemN ... |
* +-------------+------------------+-----------------+
* | ... item3 item2 item1 | "special space" |
* +--------------------------------+-----------------+
@@ -493,9 +493,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
RelationGetRelationName(wstate->index))));
/*
- * Check to see if page is "full". It's definitely full if the item won't
+ * Check to see if page is "full". It's definitely full if the item won't
* fit. Otherwise, compare to the target freespace derived from the
- * fillfactor. However, we must put at least two items on each page, so
+ * fillfactor. However, we must put at least two items on each page, so
* disregard fillfactor if we don't have that many.
*/
if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY))
@@ -568,7 +568,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
}
/*
- * Write out the old page. We never need to touch it again, so we can
+ * Write out the old page. We never need to touch it again, so we can
* free the opage workspace too.
*/
_bt_blwritepage(wstate, opage, oblkno);
@@ -805,7 +805,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
/*
* If the index is WAL-logged, we must fsync it down to disk before it's
- * safe to commit the transaction. (For a non-WAL-logged index we don't
+ * safe to commit the transaction. (For a non-WAL-logged index we don't
* care since the index will be uninteresting after a crash anyway.)
*
* It's obvious that we must do this when not WAL-logging the build. It's
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 352c77cbea2..99f9f290f4f 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -107,7 +107,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
* comparison data ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(), unless comparison
- * data is first stored into the key entries. Currently this
+ * data is first stored into the key entries. Currently this
* routine is only called by nbtsort.c and tuplesort.c, which have
* their own comparison routines.
*/
@@ -269,7 +269,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
continue;
/*
- * First, deconstruct the array into elements. Anything allocated
+ * First, deconstruct the array into elements. Anything allocated
* here (including a possibly detoasted array value) is in the
* workspace context.
*/
@@ -283,7 +283,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
&elem_values, &elem_nulls, &num_elems);
/*
- * Compress out any null elements. We can ignore them since we assume
+ * Compress out any null elements. We can ignore them since we assume
* all btree operators are strict.
*/
num_nonnulls = 0;
@@ -517,7 +517,7 @@ _bt_compare_array_elements(const void *a, const void *b, void *arg)
* _bt_start_array_keys() -- Initialize array keys at start of a scan
*
* Set up the cur_elem counters and fill in the first sk_argument value for
- * each array scankey. We can't do this until we know the scan direction.
+ * each array scankey. We can't do this until we know the scan direction.
*/
void
_bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
@@ -670,8 +670,8 @@ _bt_restore_array_keys(IndexScanDesc scan)
* so that the index sorts in the desired direction.
*
* One key purpose of this routine is to discover which scan keys must be
- * satisfied to continue the scan. It also attempts to eliminate redundant
- * keys and detect contradictory keys. (If the index opfamily provides
+ * satisfied to continue the scan. It also attempts to eliminate redundant
+ * keys and detect contradictory keys. (If the index opfamily provides
* incomplete sets of cross-type operators, we may fail to detect redundant
* or contradictory keys, but we can survive that.)
*
@@ -702,7 +702,7 @@ _bt_restore_array_keys(IndexScanDesc scan)
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
- * we cannot eliminate either. If there are two such keys of the same
+ * we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
@@ -737,7 +737,7 @@ _bt_restore_array_keys(IndexScanDesc scan)
* Note: the reason we have to copy the preprocessed scan keys into private
* storage is that we are modifying the array based on comparisons of the
* key argument values, which could change on a rescan or after moving to
- * new elements of array keys. Therefore we can't overwrite the source data.
+ * new elements of array keys. Therefore we can't overwrite the source data.
*/
void
_bt_preprocess_keys(IndexScanDesc scan)
@@ -919,7 +919,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Emit the cleaned-up keys into the outkeys[] array, and then
- * mark them if they are required. They are required (possibly
+ * mark them if they are required. They are required (possibly
* only in one direction) if all attrs before this one had "=".
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
@@ -1017,7 +1017,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
- * may not be able to make the comparison. If we can make the comparison
+ * may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* if the comparison could not be made.
*
@@ -1043,7 +1043,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
StrategyNumber strat;
/*
- * First, deal with cases where one or both args are NULL. This should
+ * First, deal with cases where one or both args are NULL. This should
* only happen when the scankeys represent IS NULL/NOT NULL conditions.
*/
if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL)
@@ -1183,7 +1183,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
*
* Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
* NULL comparison value. Since all btree operators are assumed strict,
- * a NULL means that the qual cannot be satisfied. We return TRUE if the
+ * a NULL means that the qual cannot be satisfied. We return TRUE if the
* comparison value isn't NULL, or FALSE if the scan should be abandoned.
*
* This function is applied to the *input* scankey structure; therefore
@@ -1212,7 +1212,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* --- we can treat IS NULL as an equality operator for purposes of search
* strategy.
*
- * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
+ * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
* than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS
* FIRST index.
*
@@ -1284,7 +1284,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* Mark a scankey as "required to continue the scan".
*
* Depending on the operator type, the key may be required for both scan
- * directions or just one. Also, if the key is a row comparison header,
+ * directions or just one. Also, if the key is a row comparison header,
* we have to mark the appropriate subsidiary ScanKeys as required. In
* such cases, the first subsidiary key is required, but subsequent ones
* are required only as long as they correspond to successive index columns
@@ -1296,7 +1296,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* scribbling on a data structure belonging to the index AM's caller, not on
* our private copy. This should be OK because the marking will not change
* from scan to scan within a query, and so we'd just re-mark the same way
- * anyway on a rescan. Something to keep an eye on though.
+ * anyway on a rescan. Something to keep an eye on though.
*/
static void
_bt_mark_scankey_required(ScanKey skey)
@@ -1482,7 +1482,7 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
@@ -1498,8 +1498,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. We can stop regardless of
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
@@ -1593,7 +1593,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
@@ -1609,8 +1609,8 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. We can stop regardless of
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
@@ -1631,7 +1631,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
{
/*
* Unlike the simple-scankey case, this isn't a disallowed case.
- * But it can never match. If all the earlier row comparison
+ * But it can never match. If all the earlier row comparison
* columns are required for the scan direction, we can stop the
* scan, because there can't be another tuple that will succeed.
*/
@@ -1696,7 +1696,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Tuple fails this qual. If it's a required qual for the current
* scan direction, then we can conclude no further tuples will pass,
- * either. Note we have to look at the deciding column, not
+ * either. Note we have to look at the deciding column, not
* necessarily the first or last column of the row condition.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -1722,7 +1722,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* is sufficient for setting LP_DEAD status (which is only a hint).
*
* We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
+ * delete. We cope with cases where items have moved right due to insertions.
* If an item has moved off the current page due to a split, we'll fail to
* find it and do nothing (this is not an error case --- we assume the item
* will eventually get marked in a future indexscan). Note that because we
@@ -1806,8 +1806,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
/*
* The following routines manage a shared-memory area in which we track
* assignment of "vacuum cycle IDs" to currently-active btree vacuuming
- * operations. There is a single counter which increments each time we
- * start a vacuum to assign it a cycle ID. Since multiple vacuums could
+ * operations. There is a single counter which increments each time we
+ * start a vacuum to assign it a cycle ID. Since multiple vacuums could
* be active concurrently, we have to track the cycle ID for each active
* vacuum; this requires at most MaxBackends entries (usually far fewer).
* We assume at most one vacuum can be active for a given index.
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 8e2b0b6459a..027c1cb5479 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -128,7 +128,7 @@ forget_matching_deletion(RelFileNode node, BlockNumber delblk)
* in correct itemno sequence, but physically the opposite order from the
* original, because we insert them in the opposite of itemno order. This
* does not matter in any current btree code, but it's something to keep an
- * eye on. Is it worth changing just on general principles? See also the
+ * eye on. Is it worth changing just on general principles? See also the
* notes in btree_xlog_split().
*/
static void
@@ -179,7 +179,7 @@ _bt_restore_meta(RelFileNode rnode, XLogRecPtr lsn,
pageop->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not essential
+ * Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) metapg)->pd_lower =
@@ -387,7 +387,7 @@ btree_xlog_split(bool onleft, bool isroot,
/*
* Remove the items from the left page that were copied to the
- * right page. Also remove the old high key, if any. (We must
+ * right page. Also remove the old high key, if any. (We must
* remove everything before trying to insert any items, else
* we risk not having enough space.)
*/
@@ -625,7 +625,7 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
/*
* In what follows, we have to examine the previous state of the index
- * page, as well as the heap page(s) it points to. This is only valid if
+ * page, as well as the heap page(s) it points to. This is only valid if
* WAL replay has reached a consistent database state; which means that
* the preceding check is not just an optimization, but is *necessary*. We
* won't have let in any user sessions before we reach consistency.
@@ -634,9 +634,9 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data");
/*
- * Get index page. If the DB is consistent, this should not fail, nor
+ * Get index page. If the DB is consistent, this should not fail, nor
* should any of the heap page fetches below. If one does, we return
- * InvalidTransactionId to cancel all HS transactions. That's probably
+ * InvalidTransactionId to cancel all HS transactions. That's probably
* overkill, but it's safe, and certainly better than panicking here.
*/
ibuffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
@@ -740,7 +740,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
* If we have any conflict processing to do, it must happen before we
* update the page.
*
- * Btree delete records can conflict with standby queries. You might
+ * Btree delete records can conflict with standby queries. You might
* think that vacuum records would conflict as well, but we've handled
* that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
* cleaned by the vacuum of the heap and so we can resolve any conflicts
diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c
index 62d3d3a30ec..57cbe4d2f8b 100644
--- a/src/backend/access/spgist/spgdoinsert.c
+++ b/src/backend/access/spgist/spgdoinsert.c
@@ -25,7 +25,7 @@
/*
* SPPageDesc tracks all info about a page we are inserting into. In some
* situations it actually identifies a tuple, or even a specific node within
- * an inner tuple. But any of the fields can be invalid. If the buffer
+ * an inner tuple. But any of the fields can be invalid. If the buffer
* field is valid, it implies we hold pin and exclusive lock on that buffer.
* page pointer should be valid exactly when buffer is.
*/
@@ -249,7 +249,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
else
{
/*
- * Tuple must be inserted into existing chain. We mustn't change the
+ * Tuple must be inserted into existing chain. We mustn't change the
* chain's head address, but we don't need to chase the entire chain
* to put the tuple at the end; we can insert it second.
*
@@ -814,7 +814,7 @@ doPickSplit(Relation index, SpGistState *state,
* We may not actually insert new tuple because another picksplit may be
* necessary due to too large value, but we will try to allocate enough
* space to include it; and in any case it has to be included in the input
- * for the picksplit function. So don't increment nToInsert yet.
+ * for the picksplit function. So don't increment nToInsert yet.
*/
in.datums[in.nTuples] = SGLTDATUM(newLeafTuple, state);
heapPtrs[in.nTuples] = newLeafTuple->heapPtr;
@@ -872,7 +872,7 @@ doPickSplit(Relation index, SpGistState *state,
/*
* Check to see if the picksplit function failed to separate the values,
* ie, it put them all into the same child node. If so, select allTheSame
- * mode and create a random split instead. See comments for
+ * mode and create a random split instead. See comments for
* checkAllTheSame as to why we need to know if the new leaf tuples could
* fit on one page.
*/
@@ -1037,7 +1037,7 @@ doPickSplit(Relation index, SpGistState *state,
&xlrec.initDest);
/*
- * Attempt to assign node groups to the two pages. We might fail to
+ * Attempt to assign node groups to the two pages. We might fail to
* do so, even if totalLeafSizes is less than the available space,
* because we can't split a group across pages.
*/
@@ -1917,7 +1917,7 @@ spgdoinsert(Relation index, SpGistState *state,
if (current.blkno == InvalidBlockNumber)
{
/*
- * Create a leaf page. If leafSize is too large to fit on a page,
+ * Create a leaf page. If leafSize is too large to fit on a page,
* we won't actually use the page yet, but it simplifies the API
* for doPickSplit to always have a leaf page at hand; so just
* quietly limit our request to a page size.
@@ -2120,7 +2120,7 @@ spgdoinsert(Relation index, SpGistState *state,
out.result.addNode.nodeLabel);
/*
- * Retry insertion into the enlarged node. We assume that
+ * Retry insertion into the enlarged node. We assume that
* we'll get a MatchNode result this time.
*/
goto process_inner_tuple;
diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c
index 2a50d87c74b..a354cb8973f 100644
--- a/src/backend/access/spgist/spginsert.c
+++ b/src/backend/access/spgist/spginsert.c
@@ -163,7 +163,7 @@ spgbuildempty(PG_FUNCTION_ARGS)
page = (Page) palloc(BLCKSZ);
SpGistInitMetapage(page);
- /* Write the page. If archiving/streaming, XLOG it. */
+ /* Write the page. If archiving/streaming, XLOG it. */
PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
(char *) page, true);
@@ -232,7 +232,7 @@ spginsert(PG_FUNCTION_ARGS)
/*
* We might have to repeat spgdoinsert() multiple times, if conflicts
* occur with concurrent insertions. If so, reset the insertCtx each time
- * to avoid cumulative memory consumption. That means we also have to
+ * to avoid cumulative memory consumption. That means we also have to
* redo initSpGistState(), but it's cheap enough not to matter.
*/
while (!spgdoinsert(index, &spgstate, ht_ctid, *values, *isnull))
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index c9d3cb686c3..eb049ede4e7 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -103,7 +103,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
* Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so.
*
* The point here is to eliminate null-related considerations from what the
- * opclass consistent functions need to deal with. We assume all SPGiST-
+ * opclass consistent functions need to deal with. We assume all SPGiST-
* indexable operators are strict, so any null RHS value makes the scan
* condition unsatisfiable. We also pull out any IS NULL/IS NOT NULL
* conditions; their effect is reflected into searchNulls/searchNonNulls.
@@ -600,7 +600,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
if (so->want_itup)
{
/*
- * Reconstruct desired IndexTuple. We have to copy the datum out of
+ * Reconstruct desired IndexTuple. We have to copy the datum out of
* the temp context anyway, so we may as well create the tuple here.
*/
so->indexTups[so->nPtrs] = index_form_tuple(so->indexTupDesc,
diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c
index e430d9c1ace..36fae98287c 100644
--- a/src/backend/access/spgist/spgtextproc.c
+++ b/src/backend/access/spgist/spgtextproc.c
@@ -26,11 +26,11 @@
* In the worst case, a inner tuple in a text radix tree could have as many
* as 256 nodes (one for each possible byte value). Each node can take 16
* bytes on MAXALIGN=8 machines. The inner tuple must fit on an index page
- * of size BLCKSZ. Rather than assuming we know the exact amount of overhead
+ * of size BLCKSZ. Rather than assuming we know the exact amount of overhead
* imposed by page headers, tuple headers, etc, we leave 100 bytes for that
* (the actual overhead should be no more than 56 bytes at this writing, so
* there is slop in this number). So we can safely create prefixes up to
- * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
+ * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
* already 4K, there is no safe prefix length when BLCKSZ is less than 8K;
* it is always possible to get "SPGiST inner tuple size exceeds maximum"
* if there are too many distinct next-byte values at a given place in the
@@ -327,7 +327,7 @@ spg_text_picksplit(PG_FUNCTION_ARGS)
}
/*
- * Sort by label bytes so that we can group the values into nodes. This
+ * Sort by label bytes so that we can group the values into nodes. This
* also ensures that the nodes are ordered by label value, allowing the
* use of binary search in searchChar.
*/
@@ -377,7 +377,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
/*
* Reconstruct values represented at this tuple, including parent data,
- * prefix of this tuple if any, and the node label if any. in->level
+ * prefix of this tuple if any, and the node label if any. in->level
* should be the length of the previously reconstructed value, and the
* number of bytes added here is prefixSize or prefixSize + 1.
*
diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c
index 64fd82fbf30..aed1b8d839b 100644
--- a/src/backend/access/spgist/spgutils.c
+++ b/src/backend/access/spgist/spgutils.c
@@ -235,7 +235,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When requesting an inner page, if we get one with the wrong parity,
* we just release the buffer and try again. We will get a different page
- * because GetFreeIndexPage will have marked the page used in FSM. The page
+ * because GetFreeIndexPage will have marked the page used in FSM. The page
* is entered in our local lastUsedPages cache, so there's some hope of
* making use of it later in this session, but otherwise we rely on VACUUM
* to eventually re-enter the page in FSM, making it available for recycling.
@@ -245,7 +245,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When we return a buffer to the caller, the page is *not* entered into
* the lastUsedPages cache; we expect the caller will do so after it's taken
- * whatever space it will use. This is because after the caller has used up
+ * whatever space it will use. This is because after the caller has used up
* some space, the page might have less space than whatever was cached already
* so we'd rather not trash the old cache entry.
*/
@@ -317,7 +317,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew)
/*
* If possible, increase the space request to include relation's
- * fillfactor. This ensures that when we add unrelated tuples to a page,
+ * fillfactor. This ensures that when we add unrelated tuples to a page,
* we try to keep 100-fillfactor% available for adding tuples that are
* related to the ones already on it. But fillfactor mustn't cause an
* error for requests that would otherwise be legal.
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index 1874bdd0953..c7f1b8ec473 100644
--- a/src/backend/access/spgist/spgvacuum.c
+++ b/src/backend/access/spgist/spgvacuum.c
@@ -211,7 +211,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
* Figure out exactly what we have to do. We do this separately from
* actually modifying the page, mainly so that we have a representation
* that can be dumped into WAL and then the replay code can do exactly
- * the same thing. The output of this step consists of six arrays
+ * the same thing. The output of this step consists of six arrays
* describing four kinds of operations, to be performed in this order:
*
* toDead[]: tuple numbers to be replaced with DEAD tuples
@@ -287,7 +287,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
else
{
/*
- * Second or later live tuple. Arrange to re-chain it to the
+ * Second or later live tuple. Arrange to re-chain it to the
* previous live one, if there was a gap.
*/
if (interveningDeletable)
diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c
index 3f5556f65f7..4fe857d3ed2 100644
--- a/src/backend/access/spgist/spgxlog.c
+++ b/src/backend/access/spgist/spgxlog.c
@@ -41,7 +41,7 @@ fillFakeState(SpGistState *state, spgxlogState stateSrc)
}
/*
- * Add a leaf tuple, or replace an existing placeholder tuple. This is used
+ * Add a leaf tuple, or replace an existing placeholder tuple. This is used
* to replay SpGistPageAddNewItem() operations. If the offset points at an
* existing tuple, it had better be a placeholder tuple.
*/
@@ -462,7 +462,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
}
/*
- * Update parent downlink. Since parent could be in either of the
+ * Update parent downlink. Since parent could be in either of the
* previous two buffers, it's a bit tricky to determine which BKP bit
* applies.
*/
@@ -799,7 +799,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
bbi++;
/*
- * Now we can release the leaf-page locks. It's okay to do this before
+ * Now we can release the leaf-page locks. It's okay to do this before
* updating the parent downlink.
*/
if (BufferIsValid(srcBuffer))
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 6a963b6116d..0add733984d 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -11,15 +11,15 @@
* log can be broken into relatively small, independent segments.
*
* XLOG interactions: this module generates an XLOG record whenever a new
- * CLOG page is initialized to zeroes. Other writes of CLOG come from
+ * CLOG page is initialized to zeroes. Other writes of CLOG come from
* recording of transaction commit or abort in xact.c, which generates its
* own XLOG records for these events and will re-perform the status update
- * on redo; so we need make no additional XLOG entry here. For synchronous
+ * on redo; so we need make no additional XLOG entry here. For synchronous
* transaction commits, the XLOG is guaranteed flushed through the XLOG commit
* record before we are called to log a commit, so the WAL rule "write xlog
* before data" is satisfied automatically. However, for async commits we
* must track the latest LSN affecting each CLOG page, so that we can flush
- * XLOG that far and satisfy the WAL rule. We don't have to worry about this
+ * XLOG that far and satisfy the WAL rule. We don't have to worry about this
* for aborts (whether sync or async), since the post-crash assumption would
* be that such transactions failed anyway.
*
@@ -105,7 +105,7 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids,
* in the tree of xid. In various cases nsubxids may be zero.
*
* lsn must be the WAL location of the commit record when recording an async
- * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
+ * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
* caller guarantees the commit record is already flushed in that case. It
* should be InvalidXLogRecPtr for abort cases, too.
*
@@ -417,7 +417,7 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn)
* Testing during the PostgreSQL 9.2 development cycle revealed that on a
* large multi-processor system, it was possible to have more CLOG page
* requests in flight at one time than the numebr of CLOG buffers which existed
- * at that time, which was hardcoded to 8. Further testing revealed that
+ * at that time, which was hardcoded to 8. Further testing revealed that
* performance dropped off with more than 32 CLOG buffers, possibly because
* the linear buffer search algorithm doesn't scale well.
*
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index bacdbd64eff..8886ce4c672 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -5,7 +5,7 @@
*
* The pg_multixact manager is a pg_clog-like manager that stores an array of
* MultiXactMember for each MultiXactId. It is a fundamental part of the
- * shared-row-lock implementation. Each MultiXactMember is comprised of a
+ * shared-row-lock implementation. Each MultiXactMember is comprised of a
* TransactionId and a set of flag bits. The name is a bit historical:
* originally, a MultiXactId consisted of more than one TransactionId (except
* in rare corner cases), hence "multi". Nowadays, however, it's perfectly
@@ -18,7 +18,7 @@
*
* We use two SLRU areas, one for storing the offsets at which the data
* starts for each MultiXactId in the other one. This trick allows us to
- * store variable length arrays of TransactionIds. (We could alternatively
+ * store variable length arrays of TransactionIds. (We could alternatively
* use one area containing counts and TransactionIds, with valid MultiXactId
* values pointing at slots containing counts; but that way seems less robust
* since it would get completely confused if someone inquired about a bogus
@@ -38,7 +38,7 @@
*
* Like clog.c, and unlike subtrans.c, we have to preserve state across
* crashes and ensure that MXID and offset numbering increases monotonically
- * across a crash. We do this in the same way as it's done for transaction
+ * across a crash. We do this in the same way as it's done for transaction
* IDs: the WAL record is guaranteed to contain evidence of every MXID we
* could need to worry about, and we just make sure that at the end of
* replay, the next-MXID and next-offset counters are at least as large as
@@ -50,7 +50,7 @@
* The minimum value in each database is stored in pg_database, and the
* global minimum is part of pg_control. Any vacuum that is able to
* advance its database's minimum value also computes a new global minimum,
- * and uses this value to truncate older segments. When new multixactid
+ * and uses this value to truncate older segments. When new multixactid
* values are to be created, care is taken that the counter does not
* fall within the wraparound horizon considering the global minimum value.
*
@@ -85,13 +85,13 @@
/*
- * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
+ * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
* used everywhere else in Postgres.
*
* Note: because MultiXactOffsets are 32 bits and wrap around at 0xFFFFFFFF,
* MultiXact page numbering also wraps around at
* 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE, and segment numbering at
- * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need
+ * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need
* take no explicit notice of that fact in this module, except when comparing
* segment and page numbers in TruncateMultiXact (see
* MultiXactOffsetPagePrecedes).
@@ -110,7 +110,7 @@
* additional flag bits for each TransactionId. To do this without getting
* into alignment issues, we store four bytes of flags, and then the
* corresponding 4 Xids. Each such 5-word (20-byte) set we call a "group", and
- * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
+ * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
* per page. This wastes 12 bytes per page, but that's OK -- simplicity (and
* performance) trumps space efficiency here.
*
@@ -161,7 +161,7 @@ static SlruCtlData MultiXactMemberCtlData;
#define MultiXactMemberCtl (&MultiXactMemberCtlData)
/*
- * MultiXact state shared across all backends. All this state is protected
+ * MultiXact state shared across all backends. All this state is protected
* by MultiXactGenLock. (We also use MultiXactOffsetControlLock and
* MultiXactMemberControlLock to guard accesses to the two sets of SLRU
* buffers. For concurrency's sake, we avoid holding more than one of these
@@ -179,7 +179,7 @@ typedef struct MultiXactStateData
MultiXactId lastTruncationPoint;
/*
- * oldest multixact that is still on disk. Anything older than this
+ * oldest multixact that is still on disk. Anything older than this
* should not be consulted.
*/
MultiXactId oldestMultiXactId;
@@ -528,7 +528,7 @@ MultiXactIdIsRunning(MultiXactId multi)
/*
* This could be made faster by having another entry point in procarray.c,
- * walking the PGPROC array only once for all the members. But in most
+ * walking the PGPROC array only once for all the members. But in most
* cases nmembers should be small enough that it doesn't much matter.
*/
for (i = 0; i < nmembers; i++)
@@ -615,7 +615,7 @@ MultiXactIdSetOldestMember(void)
* The value to set is the oldest of nextMXact and all the valid per-backend
* OldestMemberMXactId[] entries. Because of the locking we do, we can be
* certain that no subsequent call to MultiXactIdSetOldestMember can set
- * an OldestMemberMXactId[] entry older than what we compute here. Therefore
+ * an OldestMemberMXactId[] entry older than what we compute here. Therefore
* there is no live transaction, now or later, that can be a member of any
* MultiXactId older than the OldestVisibleMXactId we compute here.
*/
@@ -751,7 +751,7 @@ MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
* heap_lock_tuple() to have put it there, and heap_lock_tuple() generates
* an XLOG record that must follow ours. The normal LSN interlock between
* the data page and that XLOG record will ensure that our XLOG record
- * reaches disk first. If the SLRU members/offsets data reaches disk
+ * reaches disk first. If the SLRU members/offsets data reaches disk
* sooner than the XLOG record, we do not care because we'll overwrite it
* with zeroes unless the XLOG record is there too; see notes at top of
* this file.
@@ -882,7 +882,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
* GetNewMultiXactId
* Get the next MultiXactId.
*
- * Also, reserve the needed amount of space in the "members" area. The
+ * Also, reserve the needed amount of space in the "members" area. The
* starting offset of the reserved space is returned in *offset.
*
* This may generate XLOG records for expansion of the offsets and/or members
@@ -916,7 +916,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
/*----------
* Check to see if it's safe to assign another MultiXactId. This protects
- * against catastrophic data loss due to multixact wraparound. The basic
+ * against catastrophic data loss due to multixact wraparound. The basic
* rules are:
*
* If we're past multiVacLimit, start trying to force autovacuum cycles.
@@ -930,7 +930,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
{
/*
* For safety's sake, we release MultiXactGenLock while sending
- * signals, warnings, etc. This is not so much because we care about
+ * signals, warnings, etc. This is not so much because we care about
* preserving concurrency in this situation, as to avoid any
* possibility of deadlock while doing get_database_name(). First,
* copy all the shared values we'll need in this path.
@@ -1036,7 +1036,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
* until after file extension has succeeded!
*
* We don't care about MultiXactId wraparound here; it will be handled by
- * the next iteration. But note that nextMXact may be InvalidMultiXactId
+ * the next iteration. But note that nextMXact may be InvalidMultiXactId
* or the first value on a segment-beginning page after this routine
* exits, so anyone else looking at the variable must be prepared to deal
* with either case. Similarly, nextOffset may be zero, but we won't use
@@ -1114,16 +1114,16 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
* need to allow an empty set to be returned regardless, if the caller is
* willing to accept it; the caller is expected to check that it's an
* allowed condition (such as ensuring that the infomask bits set on the
- * tuple are consistent with the pg_upgrade scenario). If the caller is
+ * tuple are consistent with the pg_upgrade scenario). If the caller is
* expecting this to be called only on recently created multis, then we
* raise an error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. This raises a
+ * seen, it implies undetected ID wraparound has occurred. This raises a
* hard error.
*
* Shared lock is enough here since we aren't modifying any global state.
- * Acquire it just long enough to grab the current counter values. We may
+ * Acquire it just long enough to grab the current counter values. We may
* need both nextMXact and nextOffset; see below.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -1151,12 +1151,12 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
/*
* Find out the offset at which we need to start reading MultiXactMembers
- * and the number of members in the multixact. We determine the latter as
+ * and the number of members in the multixact. We determine the latter as
* the difference between this multixact's starting offset and the next
* one's. However, there are some corner cases to worry about:
*
* 1. This multixact may be the latest one created, in which case there is
- * no next one to look at. In this case the nextOffset value we just
+ * no next one to look at. In this case the nextOffset value we just
* saved is the correct endpoint.
*
* 2. The next multixact may still be in process of being filled in: that
@@ -1167,11 +1167,11 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
* (because we are careful to pre-zero offset pages). Because
* GetNewMultiXactId will never return zero as the starting offset for a
* multixact, when we read zero as the next multixact's offset, we know we
- * have this case. We sleep for a bit and try again.
+ * have this case. We sleep for a bit and try again.
*
* 3. Because GetNewMultiXactId increments offset zero to offset one to
* handle case #2, there is an ambiguity near the point of offset
- * wraparound. If we see next multixact's offset is one, is that our
+ * wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th
* member slot wasn't filled, it'll contain zero, and zero isn't a valid
@@ -1694,7 +1694,7 @@ multixact_twophase_postabort(TransactionId xid, uint16 info,
/*
* Initialization of shared memory for MultiXact. We use two SLRU areas,
- * thus double memory. Also, reserve space for the shared MultiXactState
+ * thus double memory. Also, reserve space for the shared MultiXactState
* struct and the per-backend MultiXactId arrays (two of those, too).
*/
Size
@@ -1754,7 +1754,7 @@ MultiXactShmemInit(void)
/*
* This func must be called ONCE on system install. It creates the initial
- * MultiXact segments. (The MultiXacts directories are assumed to have been
+ * MultiXact segments. (The MultiXacts directories are assumed to have been
* created by initdb, and MultiXactShmemInit must have been called already.)
*/
void
@@ -1925,7 +1925,7 @@ TrimMultiXact(void)
MultiXactOffsetCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current offsets page. See notes in
+ * Zero out the remainder of the current offsets page. See notes in
* StartupCLOG() for motivation.
*/
entryno = MultiXactIdToOffsetEntry(multi);
@@ -1955,7 +1955,7 @@ TrimMultiXact(void)
MultiXactMemberCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current members page. See notes in
+ * Zero out the remainder of the current members page. See notes in
* TrimCLOG() for motivation.
*/
flagsoff = MXOffsetToFlagsOffset(offset);
@@ -2097,7 +2097,7 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
/*
* We'll start complaining loudly when we get within 10M multis of the
- * stop point. This is kind of arbitrary, but if you let your gas gauge
+ * stop point. This is kind of arbitrary, but if you let your gas gauge
* get down to 1% of full, would you be looking for the next gas station?
* We need to be fairly liberal about this number because there are lots
* of scenarios where most transactions are done by automatic clients that
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index af249022476..603d65fc96e 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -15,7 +15,7 @@
*
* We use a control LWLock to protect the shared data structures, plus
* per-buffer LWLocks that synchronize I/O for each buffer. The control lock
- * must be held to examine or modify any shared state. A process that is
+ * must be held to examine or modify any shared state. A process that is
* reading in or writing out a page buffer does not hold the control lock,
* only the per-buffer lock for the buffer it is working on.
*
@@ -34,7 +34,7 @@
* could have happened while we didn't have the lock).
*
* As with the regular buffer manager, it is possible for another process
- * to re-dirty a page that is currently being written out. This is handled
+ * to re-dirty a page that is currently being written out. This is handled
* by re-setting the page's page_dirty flag.
*
*
@@ -96,7 +96,7 @@ typedef struct SlruFlushData *SlruFlush;
* page_lru_count entries to be "reset" to lower values than they should have,
* in case a process is delayed while it executes this macro. With care in
* SlruSelectLRUPage(), this does little harm, and in any case the absolute
- * worst possible consequence is a nonoptimal choice of page to evict. The
+ * worst possible consequence is a nonoptimal choice of page to evict. The
* gain from allowing concurrent reads of SLRU pages seems worth it.
*/
#define SlruRecentlyUsed(shared, slotno) \
@@ -481,7 +481,7 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
*
* NOTE: only one write attempt is made here. Hence, it is possible that
* the page is still dirty at exit (if someone else re-dirtied it during
- * the write). However, we *do* attempt a fresh write even if the page
+ * the write). However, we *do* attempt a fresh write even if the page
* is already being written; this is for checkpoints.
*
* Control lock must be held at entry, and will be held at exit.
@@ -634,7 +634,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
* In a crash-and-restart situation, it's possible for us to receive
* commands to set the commit status of transactions whose bits are in
* already-truncated segments of the commit log (see notes in
- * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
+ * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
* where the file doesn't exist, and return zeroes instead.
*/
fd = OpenTransientFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
@@ -964,9 +964,9 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If we find any EMPTY slot, just select that one. Else choose a
- * victim page to replace. We normally take the least recently used
+ * victim page to replace. We normally take the least recently used
* valid page, but we will never take the slot containing
- * latest_page_number, even if it appears least recently used. We
+ * latest_page_number, even if it appears least recently used. We
* will select a slot that is already I/O busy only if there is no
* other choice: a read-busy slot will not be least recently used once
* the read finishes, and waiting for an I/O on a write-busy slot is
@@ -1041,7 +1041,7 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If all pages (except possibly the latest one) are I/O busy, we'll
- * have to wait for an I/O to complete and then retry. In that
+ * have to wait for an I/O to complete and then retry. In that
* unhappy case, we choose to wait for the I/O on the least recently
* used slot, on the assumption that it was likely initiated first of
* all the I/Os in progress and may therefore finish first.
@@ -1193,7 +1193,7 @@ restart:;
/*
* Hmm, we have (or may have) I/O operations acting on the page, so
* we've got to wait for them to finish and then start again. This is
- * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
+ * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
* wouldn't it be OK to just discard it without writing it? For now,
* keep the logic the same as it was.)
*/
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index 88c42c88695..1453418d5bd 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -5,7 +5,7 @@
*
* The pg_subtrans manager is a pg_clog-like manager that stores the parent
* transaction Id for each transaction. It is a fundamental part of the
- * nested transactions implementation. A main transaction has a parent
+ * nested transactions implementation. A main transaction has a parent
* of InvalidTransactionId, and each subtransaction has its immediate parent.
* The tree can easily be walked from child to parent, but not in the
* opposite direction.
@@ -191,7 +191,7 @@ SUBTRANSShmemInit(void)
* must have been called already.)
*
* Note: it's not really necessary to create the initial segment now,
- * since slru.c would create it on first write anyway. But we may as well
+ * since slru.c would create it on first write anyway. But we may as well
* do it to be sure the directory is set up correctly.
*/
void
diff --git a/src/backend/access/transam/timeline.c b/src/backend/access/transam/timeline.c
index e47134a65a0..0f34c17e1f6 100644
--- a/src/backend/access/transam/timeline.c
+++ b/src/backend/access/transam/timeline.c
@@ -66,7 +66,7 @@ restoreTimeLineHistoryFiles(TimeLineID begin, TimeLineID end)
* Try to read a timeline's history file.
*
* If successful, return the list of component TLIs (the given TLI followed by
- * its ancestor TLIs). If we can't find the history file, assume that the
+ * its ancestor TLIs). If we can't find the history file, assume that the
* timeline has no parents, and return a list of just the specified timeline
* ID.
*/
@@ -281,7 +281,7 @@ findNewestTimeLine(TimeLineID startTLI)
* reason: human-readable explanation of why the timeline was switched
*
* Currently this is only used at the end recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
void
@@ -418,7 +418,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
/*
* Prefer link() to rename() here just to be really sure that we don't
- * overwrite an existing file. However, there shouldn't be one, so
+ * overwrite an existing file. However, there shouldn't be one, so
* rename() is an acceptable substitute except for the truly paranoid.
*/
#if HAVE_WORKING_LINK
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index b92a27bff36..94165f5fec8 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -145,7 +145,7 @@ TransactionIdDidCommit(TransactionId transactionId)
* be a window just after database startup where we do not have complete
* knowledge in pg_subtrans of the transactions after TransactionXmin.
* StartupSUBTRANS() has ensured that any missing information will be
- * zeroed. Since this case should not happen under normal conditions, it
+ * zeroed. Since this case should not happen under normal conditions, it
* seems reasonable to emit a WARNING for it.
*/
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
@@ -301,7 +301,7 @@ TransactionIdPrecedes(TransactionId id1, TransactionId id2)
{
/*
* If either ID is a permanent XID then we can just do unsigned
- * comparison. If both are normal, do a modulo-2^32 comparison.
+ * comparison. If both are normal, do a modulo-2^32 comparison.
*/
int32 diff;
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index e975f8d26d0..fbdd0abc5d0 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -442,7 +442,7 @@ LockGXact(const char *gid, Oid user)
/*
* Note: it probably would be possible to allow committing from
* another database; but at the moment NOTIFY is known not to work and
- * there may be some other issues as well. Hence disallow until
+ * there may be some other issues as well. Hence disallow until
* someone gets motivated to make it work.
*/
if (MyDatabaseId != proc->databaseId)
@@ -1030,7 +1030,7 @@ EndPrepare(GlobalTransaction gxact)
* out the correct state file CRC, we have an inconsistency: the xact is
* prepared according to WAL but not according to our on-disk state. We
* use a critical section to force a PANIC if we are unable to complete
- * the write --- then, WAL replay should repair the inconsistency. The
+ * the write --- then, WAL replay should repair the inconsistency. The
* odds of a PANIC actually occurring should be very tiny given that we
* were able to write the bogus CRC above.
*
@@ -1068,7 +1068,7 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close two-phase state file: %m")));
/*
- * Mark the prepared transaction as valid. As soon as xact.c marks
+ * Mark the prepared transaction as valid. As soon as xact.c marks
* MyPgXact as not running our XID (which it will do immediately after
* this function returns), others can commit/rollback the xact.
*
@@ -1335,7 +1335,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/*
* In case we fail while running the callbacks, mark the gxact invalid so
* no one else will try to commit/rollback, and so it can be recycled
- * properly later. It is still locked by our XID so it won't go away yet.
+ * properly later. It is still locked by our XID so it won't go away yet.
*
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
*/
@@ -1539,7 +1539,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* This approach creates a race condition: someone else could delete a
* GXACT between the time we release TwoPhaseStateLock and the time we try
- * to open its state file. We handle this by special-casing ENOENT
+ * to open its state file. We handle this by special-casing ENOENT
* failures: if we see that, we verify that the GXACT is no longer valid,
* and if so ignore the failure.
*/
@@ -1620,7 +1620,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* We throw away any prepared xacts with main XID beyond nextXid --- if any
* are present, it suggests that the DBA has done a PITR recovery to an
- * earlier point in time without cleaning out pg_twophase. We dare not
+ * earlier point in time without cleaning out pg_twophase. We dare not
* try to recover such prepared xacts since they likely depend on database
* state that doesn't exist now.
*
@@ -1712,7 +1712,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
* XID, and they may force us to advance nextXid.
*
* We don't expect anyone else to modify nextXid, hence we don't
- * need to hold a lock while examining it. We still acquire the
+ * need to hold a lock while examining it. We still acquire the
* lock to modify it, though.
*/
subxids = (TransactionId *)
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 7252ee25c88..35f63a2ff85 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -39,7 +39,7 @@ VariableCache ShmemVariableCache = NULL;
*
* Note: when this is called, we are actually already inside a valid
* transaction, since XIDs are now not allocated until the transaction
- * does something. So it is safe to do a database lookup if we want to
+ * does something. So it is safe to do a database lookup if we want to
* issue a warning about XID wrap.
*/
TransactionId
@@ -165,20 +165,20 @@ GetNewTransactionId(bool isSubXact)
/*
* Now advance the nextXid counter. This must not happen until after we
* have successfully completed ExtendCLOG() --- if that routine fails, we
- * want the next incoming transaction to try it again. We cannot assign
+ * want the next incoming transaction to try it again. We cannot assign
* more XIDs until there is CLOG space for them.
*/
TransactionIdAdvance(ShmemVariableCache->nextXid);
/*
* We must store the new XID into the shared ProcArray before releasing
- * XidGenLock. This ensures that every active XID older than
+ * XidGenLock. This ensures that every active XID older than
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we
* are relying on fetch/store of an xid to be atomic, else other backends
- * might see a partially-set xid here. But holding both locks at once
+ * might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit. So for now, assume atomicity.
*
* Note that readers of PGXACT xid fields should be careful to fetch the
@@ -289,7 +289,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
/*
* We'll start complaining loudly when we get within 10M transactions of
- * the stop point. This is kind of arbitrary, but if you let your gas
+ * the stop point. This is kind of arbitrary, but if you let your gas
* gauge get down to 1% of full, would you be looking for the next gas
* station? We need to be fairly liberal about this number because there
* are lots of scenarios where most transactions are done by automatic
@@ -390,7 +390,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
* We primarily check whether oldestXidDB is valid. The cases we have in
* mind are that that database was dropped, or the field was reset to zero
* by pg_resetxlog. In either case we should force recalculation of the
- * wrap limit. Also do it if oldestXid is old enough to be forcing
+ * wrap limit. Also do it if oldestXid is old enough to be forcing
* autovacuums or other actions; this ensures we update our state as soon
* as possible once extra overhead is being incurred.
*/
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 77bf9748352..914ff3a3bc0 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -599,7 +599,7 @@ SubTransactionIsActive(SubTransactionId subxid)
*
* "used" must be TRUE if the caller intends to use the command ID to mark
* inserted/updated/deleted tuples. FALSE means the ID is being fetched
- * for read-only purposes (ie, as a snapshot validity cutoff). See
+ * for read-only purposes (ie, as a snapshot validity cutoff). See
* CommandCounterIncrement() for discussion.
*/
CommandId
@@ -686,7 +686,7 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
/*
* We always say that BootstrapTransactionId is "not my transaction ID"
- * even when it is (ie, during bootstrap). Along with the fact that
+ * even when it is (ie, during bootstrap). Along with the fact that
* transam.c always treats BootstrapTransactionId as already committed,
* this causes the tqual.c routines to see all tuples as committed, which
* is what we need during bootstrap. (Bootstrap mode only inserts tuples,
@@ -828,7 +828,7 @@ AtStart_Memory(void)
/*
* If this is the first time through, create a private context for
* AbortTransaction to work in. By reserving some space now, we can
- * insulate AbortTransaction from out-of-memory scenarios. Like
+ * insulate AbortTransaction from out-of-memory scenarios. Like
* ErrorContext, we set it up with slow growth rate and a nonzero minimum
* size, so that space will be reserved immediately.
*/
@@ -931,7 +931,7 @@ AtSubStart_ResourceOwner(void)
Assert(s->parent != NULL);
/*
- * Create a resource owner for the subtransaction. We make it a child of
+ * Create a resource owner for the subtransaction. We make it a child of
* the immediate parent's resource owner.
*/
s->curTransactionOwner =
@@ -951,7 +951,7 @@ AtSubStart_ResourceOwner(void)
* RecordTransactionCommit
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionCommit(void)
@@ -996,7 +996,7 @@ RecordTransactionCommit(void)
/*
* If we didn't create XLOG entries, we're done here; otherwise we
- * should flush those entries the same as a commit record. (An
+ * should flush those entries the same as a commit record. (An
* example of a possible record that wouldn't cause an XID to be
* assigned is a sequence advance record due to nextval() --- we want
* to flush that to disk before reporting commit.)
@@ -1013,7 +1013,7 @@ RecordTransactionCommit(void)
BufmgrCommit();
/*
- * Mark ourselves as within our "commit critical section". This
+ * Mark ourselves as within our "commit critical section". This
* forces any concurrent checkpoint to wait until we've updated
* pg_clog. Without this, it is possible for the checkpoint to set
* REDO after the XLOG record but fail to flush the pg_clog update to
@@ -1021,7 +1021,7 @@ RecordTransactionCommit(void)
* crashes a little later.
*
* Note: we could, but don't bother to, set this flag in
- * RecordTransactionAbort. That's because loss of a transaction abort
+ * RecordTransactionAbort. That's because loss of a transaction abort
* is noncritical; the presumption would be that it aborted, anyway.
*
* It's safe to change the delayChkpt flag of our own backend without
@@ -1122,15 +1122,15 @@ RecordTransactionCommit(void)
/*
* Check if we want to commit asynchronously. We can allow the XLOG flush
* to happen asynchronously if synchronous_commit=off, or if the current
- * transaction has not performed any WAL-logged operation. The latter
+ * transaction has not performed any WAL-logged operation. The latter
* case can arise if the current transaction wrote only to temporary
- * and/or unlogged tables. In case of a crash, the loss of such a
+ * and/or unlogged tables. In case of a crash, the loss of such a
* transaction will be irrelevant since temp tables will be lost anyway,
* and unlogged tables will be truncated. (Given the foregoing, you might
* think that it would be unnecessary to emit the XLOG record at all in
* this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the
- * KnownAssignedXids machinery requires tracking every XID assignment. It
+ * KnownAssignedXids machinery requires tracking every XID assignment. It
* might be OK to skip it only when wal_level < hot_standby, but for now
* we don't.)
*
@@ -1377,7 +1377,7 @@ AtSubCommit_childXids(void)
* RecordTransactionAbort
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionAbort(bool isSubXact)
@@ -1394,7 +1394,7 @@ RecordTransactionAbort(bool isSubXact)
/*
* If we haven't been assigned an XID, nobody will care whether we aborted
- * or not. Hence, we're done in that case. It does not matter if we have
+ * or not. Hence, we're done in that case. It does not matter if we have
* rels to delete (note that this routine is not responsible for actually
* deleting 'em). We cannot have any child XIDs, either.
*/
@@ -1410,7 +1410,7 @@ RecordTransactionAbort(bool isSubXact)
* We have a valid XID, so we should write an ABORT record for it.
*
* We do not flush XLOG to disk here, since the default assumption after a
- * crash would be that we aborted, anyway. For the same reason, we don't
+ * crash would be that we aborted, anyway. For the same reason, we don't
* need to worry about interlocking against checkpoint start.
*/
@@ -1578,7 +1578,7 @@ AtSubAbort_childXids(void)
/*
* We keep the child-XID arrays in TopTransactionContext (see
- * AtSubCommit_childXids). This means we'd better free the array
+ * AtSubCommit_childXids). This means we'd better free the array
* explicitly at abort to avoid leakage.
*/
if (s->childXids != NULL)
@@ -1755,7 +1755,7 @@ StartTransaction(void)
VirtualXactLockTableInsert(vxid);
/*
- * Advertise it in the proc array. We assume assignment of
+ * Advertise it in the proc array. We assume assignment of
* LocalTransactionID is atomic, and the backendId should be set already.
*/
Assert(MyProc->backendId == vxid.backendId);
@@ -1852,7 +1852,7 @@ CommitTransaction(void)
/*
* The remaining actions cannot call any user-defined code, so it's safe
- * to start shutting down within-transaction services. But note that most
+ * to start shutting down within-transaction services. But note that most
* of this stuff could still throw an error, which would switch us into
* the transaction-abort path.
*/
@@ -2057,7 +2057,7 @@ PrepareTransaction(void)
/*
* The remaining actions cannot call any user-defined code, so it's safe
- * to start shutting down within-transaction services. But note that most
+ * to start shutting down within-transaction services. But note that most
* of this stuff could still throw an error, which would switch us into
* the transaction-abort path.
*/
@@ -2177,7 +2177,7 @@ PrepareTransaction(void)
XactLastRecEnd = 0;
/*
- * Let others know about no transaction in progress by me. This has to be
+ * Let others know about no transaction in progress by me. This has to be
* done *after* the prepared transaction has been marked valid, else
* someone may think it is unlocked and recyclable.
*/
@@ -2186,7 +2186,7 @@ PrepareTransaction(void)
/*
* This is all post-transaction cleanup. Note that if an error is raised
* here, it's too late to abort the transaction. This should be just
- * noncritical resource releasing. See notes in CommitTransaction.
+ * noncritical resource releasing. See notes in CommitTransaction.
*/
CallXactCallbacks(XACT_EVENT_PREPARE);
@@ -2364,7 +2364,7 @@ AbortTransaction(void)
ProcArrayEndTransaction(MyProc, latestXid);
/*
- * Post-abort cleanup. See notes in CommitTransaction() concerning
+ * Post-abort cleanup. See notes in CommitTransaction() concerning
* ordering. We can skip all of it if the transaction failed before
* creating a resource owner.
*/
@@ -2599,7 +2599,7 @@ CommitTransactionCommand(void)
/*
* Here we were in a perfectly good transaction block but the user
- * told us to ROLLBACK anyway. We have to abort the transaction
+ * told us to ROLLBACK anyway. We have to abort the transaction
* and then clean up.
*/
case TBLOCK_ABORT_PENDING:
@@ -2619,7 +2619,7 @@ CommitTransactionCommand(void)
/*
* We were just issued a SAVEPOINT inside a transaction block.
- * Start a subtransaction. (DefineSavepoint already did
+ * Start a subtransaction. (DefineSavepoint already did
* PushTransaction, so as to have someplace to put the SUBBEGIN
* state.)
*/
@@ -2823,7 +2823,7 @@ AbortCurrentTransaction(void)
break;
/*
- * Here, we failed while trying to COMMIT. Clean up the
+ * Here, we failed while trying to COMMIT. Clean up the
* transaction and return to idle state (we do not want to stay in
* the transaction).
*/
@@ -2885,7 +2885,7 @@ AbortCurrentTransaction(void)
/*
* If we failed while trying to create a subtransaction, clean up
- * the broken subtransaction and abort the parent. The same
+ * the broken subtransaction and abort the parent. The same
* applies if we get a failure while ending a subtransaction.
*/
case TBLOCK_SUBBEGIN:
@@ -3417,7 +3417,7 @@ UserAbortTransactionBlock(void)
break;
/*
- * We are inside a subtransaction. Mark everything up to top
+ * We are inside a subtransaction. Mark everything up to top
* level as exitable.
*/
case TBLOCK_SUBINPROGRESS:
@@ -3551,7 +3551,7 @@ ReleaseSavepoint(List *options)
break;
/*
- * We are in a non-aborted subtransaction. This is the only valid
+ * We are in a non-aborted subtransaction. This is the only valid
* case.
*/
case TBLOCK_SUBINPROGRESS:
@@ -3608,7 +3608,7 @@ ReleaseSavepoint(List *options)
/*
* Mark "commit pending" all subtransactions up to the target
- * subtransaction. The actual commits will happen when control gets to
+ * subtransaction. The actual commits will happen when control gets to
* CommitTransactionCommand.
*/
xact = CurrentTransactionState;
@@ -3707,7 +3707,7 @@ RollbackToSavepoint(List *options)
/*
* Mark "abort pending" all subtransactions up to the target
- * subtransaction. The actual aborts will happen when control gets to
+ * subtransaction. The actual aborts will happen when control gets to
* CommitTransactionCommand.
*/
xact = CurrentTransactionState;
@@ -4114,7 +4114,7 @@ CommitSubTransaction(void)
CommandCounterIncrement();
/*
- * Prior to 8.4 we marked subcommit in clog at this point. We now only
+ * Prior to 8.4 we marked subcommit in clog at this point. We now only
* perform that step, if required, as part of the atomic update of the
* whole transaction tree at top level commit or abort.
*/
@@ -4573,7 +4573,7 @@ TransStateAsString(TransState state)
/*
* xactGetCommittedChildren
*
- * Gets the list of committed children of the current transaction. The return
+ * Gets the list of committed children of the current transaction. The return
* value is the number of child transactions. *ptr is set to point to an
* array of TransactionIds. The array is allocated in TopTransactionContext;
* the caller should *not* pfree() it (this is a change from pre-8.4 code!).
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 9deadf9ed9e..a437d266a3b 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -94,7 +94,7 @@ bool XLOG_DEBUG = false;
* future XLOG segment as long as there aren't already XLOGfileslop future
* segments; else we'll delete it. This could be made a separate GUC
* variable, but at present I think it's sufficient to hardwire it as
- * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
+ * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
* no more than 2*CheckPointSegments log segments, and we want to recycle all
* of them; the +1 allows boundary cases to happen without wasting a
* delete/create-segment cycle.
@@ -183,7 +183,7 @@ static bool LocalHotStandbyActive = false;
* 0: unconditionally not allowed to insert XLOG
* -1: must check RecoveryInProgress(); disallow until it is false
* Most processes start with -1 and transition to 1 after seeing that recovery
- * is not in progress. But we can also force the value for special cases.
+ * is not in progress. But we can also force the value for special cases.
* The coding in XLogInsertAllowed() depends on the first two of these states
* being numerically the same as bool true and false.
*/
@@ -248,7 +248,7 @@ static bool recoveryStopAfter;
*
* expectedTLEs: a list of TimeLineHistoryEntries for recoveryTargetTLI and the timelines of
* its known parents, newest first (so recoveryTargetTLI is always the
- * first list member). Only these TLIs are expected to be seen in the WAL
+ * first list member). Only these TLIs are expected to be seen in the WAL
* segments we read, and indeed only these TLIs will be considered as
* candidate WAL files to open at all.
*
@@ -277,9 +277,9 @@ XLogRecPtr XactLastRecEnd = InvalidXLogRecPtr;
/*
* RedoRecPtr is this backend's local copy of the REDO record pointer
* (which is almost but not quite the same as a pointer to the most recent
- * CHECKPOINT record). We update this from the shared-memory copy,
+ * CHECKPOINT record). We update this from the shared-memory copy,
* XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
- * hold the Insert lock). See XLogInsert for details. We are also allowed
+ * hold the Insert lock). See XLogInsert for details. We are also allowed
* to update from XLogCtl->Insert.RedoRecPtr if we hold the info_lck;
* see GetRedoRecPtr. A freshly spawned backend obtains the value during
* InitXLOGAccess.
@@ -1428,10 +1428,10 @@ AdvanceXLInsertBuffer(bool new_segment)
* WAL records beginning in this page have removable backup blocks. This
* allows the WAL archiver to know whether it is safe to compress archived
* WAL data by transforming full-block records into the non-full-block
- * format. It is sufficient to record this at the page level because we
+ * format. It is sufficient to record this at the page level because we
* force a page switch (in fact a segment switch) when starting a backup,
* so the flag will be off before any records can be written during the
- * backup. At the end of a backup, the last page will be marked as all
+ * backup. At the end of a backup, the last page will be marked as all
* unsafe when perhaps only part is unsafe, but at worst the archiver
* would miss the opportunity to compress a few records.
*/
@@ -1711,7 +1711,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
{
/*
* Could get here without iterating above loop, in which case we might
- * have no open file or the wrong one. However, we do not need to
+ * have no open file or the wrong one. However, we do not need to
* fsync more than one file.
*/
if (sync_method != SYNC_METHOD_OPEN &&
@@ -1780,7 +1780,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
/*
* If the WALWriter is sleeping, we should kick it to make it come out of
- * low-power mode. Otherwise, determine whether there's a full page of
+ * low-power mode. Otherwise, determine whether there's a full page of
* WAL available to write.
*/
if (!sleeping)
@@ -2058,9 +2058,9 @@ XLogFlush(XLogRecPtr record)
* We normally flush only completed blocks; but if there is nothing to do on
* that basis, we check for unflushed async commits in the current incomplete
* block, and flush through the latest one of those. Thus, if async commits
- * are not being used, we will flush complete blocks only. We can guarantee
+ * are not being used, we will flush complete blocks only. We can guarantee
* that async commits reach disk after at most three cycles; normally only
- * one or two. (When flushing complete blocks, we allow XLogWrite to write
+ * one or two. (When flushing complete blocks, we allow XLogWrite to write
* "flexibly", meaning it can stop at the end of the buffer ring; this makes a
* difference only with very high load or long wal_writer_delay, but imposes
* one extra cycle for the worst case for async commits.)
@@ -2228,7 +2228,7 @@ XLogNeedsFlush(XLogRecPtr record)
* log, seg: identify segment to be created/opened.
*
* *use_existent: if TRUE, OK to use a pre-existing file (else, any
- * pre-existing file will be deleted). On return, TRUE if a pre-existing
+ * pre-existing file will be deleted). On return, TRUE if a pre-existing
* file was used.
*
* use_lock: if TRUE, acquire ControlFileLock while moving file into
@@ -2295,11 +2295,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
errmsg("could not create file \"%s\": %m", tmppath)));
/*
- * Zero-fill the file. We have to do this the hard way to ensure that all
+ * Zero-fill the file. We have to do this the hard way to ensure that all
* the file space has really been allocated --- on platforms that allow
* "holes" in files, just seeking to the end doesn't allocate intermediate
* space. This way, we know that we have all the space and (after the
- * fsync below) that all the indirect blocks are down on disk. Therefore,
+ * fsync below) that all the indirect blocks are down on disk. Therefore,
* fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the
* log file.
*
@@ -2391,7 +2391,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
* a different timeline)
*
* Currently this is only used during recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
static void
@@ -2709,13 +2709,13 @@ XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source)
* the timelines listed in expectedTLEs.
*
* We expect curFileTLI on entry to be the TLI of the preceding file in
- * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
+ * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
* to go backwards; this prevents us from picking up the wrong file when a
* parent timeline extends to higher segment numbers than the child we
* want to read.
*
* If we haven't read the timeline history file yet, read it now, so that
- * we know which TLIs to scan. We don't save the list in expectedTLEs,
+ * we know which TLIs to scan. We don't save the list in expectedTLEs,
* however, unless we actually find a valid segment. That way if there is
* neither a timeline history file nor a WAL segment in the archive, and
* streaming replication is set up, we'll read the timeline history file
@@ -2779,7 +2779,7 @@ XLogFileClose(void)
/*
* WAL segment files will not be re-read in normal operation, so we advise
- * the OS to release any cached pages. But do not do so if WAL archiving
+ * the OS to release any cached pages. But do not do so if WAL archiving
* or streaming is active, because archiver and walsender process could
* use the cache to read the WAL segment.
*/
@@ -2924,7 +2924,7 @@ RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr endptr)
{
/*
* We ignore the timeline part of the XLOG segment identifiers in
- * deciding whether a segment is still needed. This ensures that we
+ * deciding whether a segment is still needed. This ensures that we
* won't prematurely remove a segment from a parent timeline. We could
* probably be a little more proactive about removing segments of
* non-parent timelines, but that would be a whole lot more
@@ -3458,7 +3458,7 @@ rescanLatestTimeLine(void)
* I/O routines for pg_control
*
* *ControlFile is a buffer in shared memory that holds an image of the
- * contents of pg_control. WriteControlFile() initializes pg_control
+ * contents of pg_control. WriteControlFile() initializes pg_control
* given a preloaded buffer, ReadControlFile() loads the buffer from
* the pg_control file (during postmaster or standalone-backend startup),
* and UpdateControlFile() rewrites pg_control after we modify xlog state.
@@ -3863,7 +3863,7 @@ check_wal_buffers(int *newval, void **extra, GucSource source)
{
/*
* If we haven't yet changed the boot_val default of -1, just let it
- * be. We'll fix it when XLOGShmemSize is called.
+ * be. We'll fix it when XLOGShmemSize is called.
*/
if (XLOGbuffers == -1)
return true;
@@ -4367,7 +4367,7 @@ readRecoveryCommandFile(void)
/*
* If user specified recovery_target_timeline, validate it or compute the
- * "latest" value. We can't do this until after we've gotten the restore
+ * "latest" value. We can't do this until after we've gotten the restore
* command and set InArchiveRecovery, because we need to fetch timeline
* history files from the archive.
*/
@@ -4926,7 +4926,7 @@ StartupXLOG(void)
ValidateXLOGDirectoryStructure();
/*
- * Clear out any old relcache cache files. This is *necessary* if we do
+ * Clear out any old relcache cache files. This is *necessary* if we do
* any WAL replay, since that would probably result in the cache files
* being out of sync with database reality. In theory we could leave them
* in place if the database had been cleanly shut down, but it seems
@@ -5805,8 +5805,8 @@ StartupXLOG(void)
/*
* Consider whether we need to assign a new timeline ID.
*
- * If we are doing an archive recovery, we always assign a new ID. This
- * handles a couple of issues. If we stopped short of the end of WAL
+ * If we are doing an archive recovery, we always assign a new ID. This
+ * handles a couple of issues. If we stopped short of the end of WAL
* during recovery, then we are clearly generating a new timeline and must
* assign it a unique new ID. Even if we ran to the end, modifying the
* current last segment is problematic because it may result in trying to
@@ -5879,7 +5879,7 @@ StartupXLOG(void)
/*
* Tricky point here: readBuf contains the *last* block that the LastRec
- * record spans, not the one it starts in. The last block is indeed the
+ * record spans, not the one it starts in. The last block is indeed the
* one we want to use.
*/
if (EndOfLog % XLOG_BLCKSZ == 0)
@@ -5915,7 +5915,7 @@ StartupXLOG(void)
* Write.curridx must point to the *next* page (see XLogWrite()).
*
* Note: it might seem we should do AdvanceXLInsertBuffer() here, but
- * this is sufficient. The first actual attempt to insert a log
+ * this is sufficient. The first actual attempt to insert a log
* record will advance the insert state.
*/
XLogCtl->Write.curridx = NextBufIdx(0);
@@ -6094,7 +6094,7 @@ StartupXLOG(void)
XLogReportParameters();
/*
- * All done. Allow backends to write WAL. (Although the bool flag is
+ * All done. Allow backends to write WAL. (Although the bool flag is
* probably atomic in itself, we use the info_lck here to ensure that
* there are no race conditions concerning visibility of other recent
* updates to shared memory.)
@@ -6254,7 +6254,7 @@ RecoveryInProgress(void)
/*
* Initialize TimeLineID and RedoRecPtr when we discover that recovery
* is finished. InitPostgres() relies upon this behaviour to ensure
- * that InitXLOGAccess() is called at backend startup. (If you change
+ * that InitXLOGAccess() is called at backend startup. (If you change
* this, see also LocalSetXLogInsertAllowed.)
*/
if (!LocalRecoveryInProgress)
@@ -6886,7 +6886,7 @@ CreateCheckPoint(int flags)
/*
* If this isn't a shutdown or forced checkpoint, and we have not inserted
* any XLOG records since the start of the last checkpoint, skip the
- * checkpoint. The idea here is to avoid inserting duplicate checkpoints
+ * checkpoint. The idea here is to avoid inserting duplicate checkpoints
* when the system is idle. That wastes log space, and more importantly it
* exposes us to possible loss of both current and previous checkpoint
* records if the machine crashes just as we're writing the update.
@@ -6991,7 +6991,7 @@ CreateCheckPoint(int flags)
* performing those groups of actions.
*
* One example is end of transaction, so we must wait for any transactions
- * that are currently in commit critical sections. If an xact inserted
+ * that are currently in commit critical sections. If an xact inserted
* its commit record into XLOG just before the REDO point, then a crash
* restart from the REDO point would not replay that record, which means
* that our flushing had better include the xact's update of pg_clog. So
@@ -7186,9 +7186,9 @@ CreateCheckPoint(int flags)
/*
* Truncate pg_subtrans if possible. We can throw away all data before
- * the oldest XMIN of any running transaction. No future transaction will
+ * the oldest XMIN of any running transaction. No future transaction will
* attempt to reference any pg_subtrans entry older than that (see Asserts
- * in subtrans.c). During recovery, though, we mustn't do this because
+ * in subtrans.c). During recovery, though, we mustn't do this because
* StartupSUBTRANS hasn't been called yet.
*/
if (!RecoveryInProgress())
@@ -7558,9 +7558,9 @@ CreateRestartPoint(int flags)
/*
* Truncate pg_subtrans if possible. We can throw away all data before
- * the oldest XMIN of any running transaction. No future transaction will
+ * the oldest XMIN of any running transaction. No future transaction will
* attempt to reference any pg_subtrans entry older than that (see Asserts
- * in subtrans.c). When hot standby is disabled, though, we mustn't do
+ * in subtrans.c). When hot standby is disabled, though, we mustn't do
* this because StartupSUBTRANS hasn't been called yet.
*/
if (EnableHotStandby)
@@ -7633,7 +7633,7 @@ XLogPutNextOid(Oid nextOid)
* We need not flush the NEXTOID record immediately, because any of the
* just-allocated OIDs could only reach disk as part of a tuple insert or
* update that would have its own XLOG record that must follow the NEXTOID
- * record. Therefore, the standard buffer LSN interlock applied to those
+ * record. Therefore, the standard buffer LSN interlock applied to those
* records will ensure no such OID reaches disk before the NEXTOID record
* does.
*
@@ -7969,7 +7969,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* We used to try to take the maximum of ShmemVariableCache->nextOid
* and the recorded nextOid, but that fails if the OID counter wraps
- * around. Since no OID allocation should be happening during replay
+ * around. Since no OID allocation should be happening during replay
* anyway, better to just believe the record exactly. We still take
* OidGenLock while setting the variable, just in case.
*/
@@ -8310,7 +8310,7 @@ get_sync_bit(int method)
/*
* Optimize writes by bypassing kernel cache with O_DIRECT when using
- * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are
+ * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are
* disabled, otherwise the archive command or walsender process will read
* the WAL soon after writing it, which is guaranteed to cause a physical
* read if we bypassed the kernel cache. We also skip the
@@ -8514,7 +8514,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
* during an on-line backup even if not doing so at other times, because
* it's quite possible for the backup dump to obtain a "torn" (partially
* written) copy of a database page if it reads the page concurrently with
- * our write to the same page. This can be fixed as long as the first
+ * our write to the same page. This can be fixed as long as the first
* write to the page in the WAL sequence is a full-page write. Hence, we
* turn on forcePageWrites and then force a CHECKPOINT, to ensure there
* are no dirty pages in shared memory that might get dumped while the
@@ -8558,7 +8558,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
* old timeline IDs. That would otherwise happen if you called
* pg_start_backup() right after restoring from a PITR archive: the
* first WAL segment containing the startup checkpoint has pages in
- * the beginning with the old timeline ID. That can cause trouble at
+ * the beginning with the old timeline ID. That can cause trouble at
* recovery: we won't have a history file covering the old timeline if
* pg_xlog directory was not included in the base backup and the WAL
* archive was cleared too before starting the backup.
@@ -8581,7 +8581,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
bool checkpointfpw;
/*
- * Force a CHECKPOINT. Aside from being necessary to prevent torn
+ * Force a CHECKPOINT. Aside from being necessary to prevent torn
* page problems, this guarantees that two successive backup runs
* will have different checkpoint positions and hence different
* history file names, even if nothing happened in between.
@@ -9234,7 +9234,7 @@ GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli)
*
* If we see a backup_label during recovery, we assume that we are recovering
* from a backup dump file, and we therefore roll forward from the checkpoint
- * identified by the label file, NOT what pg_control says. This avoids the
+ * identified by the label file, NOT what pg_control says. This avoids the
* problem that pg_control might have been archived one or more checkpoints
* later than the start of the dump, and so if we rely on it as the start
* point, we will fail to restore a consistent database state.
@@ -9839,11 +9839,11 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
if (havedata)
{
/*
- * Great, streamed far enough. Open the file if it's
+ * Great, streamed far enough. Open the file if it's
* not open already. Also read the timeline history
* file if we haven't initialized timeline history
* yet; it should be streamed over and present in
- * pg_xlog by now. Use XLOG_FROM_STREAM so that
+ * pg_xlog by now. Use XLOG_FROM_STREAM so that
* source info is set correctly and XLogReceiptTime
* isn't changed.
*/
@@ -9916,9 +9916,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
* in the current WAL page, previously read by XLogPageRead().
*
* 'emode' is the error mode that would be used to report a file-not-found
- * or legitimate end-of-WAL situation. Generally, we use it as-is, but if
+ * or legitimate end-of-WAL situation. Generally, we use it as-is, but if
* we're retrying the exact same record that we've tried previously, only
- * complain the first time to keep the noise down. However, we only do when
+ * complain the first time to keep the noise down. However, we only do when
* reading from pg_xlog, because we don't expect any invalid records in archive
* or in records streamed from master. Files in the archive should be complete,
* and we should never hit the end of WAL because we stop and wait for more WAL
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index f1b52bfe4fa..7c1a201b8ed 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -199,7 +199,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
randAccess = true;
/*
- * RecPtr is pointing to end+1 of the previous WAL record. If we're
+ * RecPtr is pointing to end+1 of the previous WAL record. If we're
* at a page boundary, no more records can fit on the current page. We
* must skip over the page header, but we can't do that until we've
* read in the page, since the header size is variable.
@@ -277,7 +277,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
/*
* If the whole record header is on this page, validate it immediately.
* Otherwise do just a basic sanity check on xl_tot_len, and validate the
- * rest of the header after reading it from the next page. The xl_tot_len
+ * rest of the header after reading it from the next page. The xl_tot_len
* check is necessary here to ensure that we enter the "Need to reassemble
* record" code path below; otherwise we might fail to apply
* ValidXLogRecordHeader at all.
@@ -572,7 +572,7 @@ err:
* Validate an XLOG record header.
*
* This is just a convenience subroutine to avoid duplicated code in
- * XLogReadRecord. It's not intended for use from anywhere else.
+ * XLogReadRecord. It's not intended for use from anywhere else.
*/
static bool
ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
@@ -661,7 +661,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
* data to read in) until we've checked the CRCs.
*
* We assume all of the record (that is, xl_tot_len bytes) has been read
- * into memory at *record. Also, ValidXLogRecordHeader() has accepted the
+ * into memory at *record. Also, ValidXLogRecordHeader() has accepted the
* record's header, which means in particular that xl_tot_len is at least
* SizeOfXlogRecord, so it is safe to fetch xl_len.
*/