aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/brin/brin.c4
-rw-r--r--src/backend/access/brin/brin_bloom.c2
-rw-r--r--src/backend/access/brin/brin_minmax_multi.c6
-rw-r--r--src/backend/access/gist/gistbuild.c2
-rw-r--r--src/backend/access/index/genam.c2
-rw-r--r--src/backend/access/nbtree/nbtpage.c4
-rw-r--r--src/backend/catalog/pg_type.c2
-rw-r--r--src/backend/commands/analyze.c2
-rw-r--r--src/backend/executor/nodeIncrementalSort.c6
-rw-r--r--src/backend/rewrite/rewriteSearchCycle.c2
-rw-r--r--src/backend/statistics/dependencies.c4
-rw-r--r--src/backend/statistics/extended_stats.c2
-rw-r--r--src/backend/storage/ipc/procarray.c2
-rw-r--r--src/backend/tsearch/spell.c2
-rw-r--r--src/backend/utils/activity/backend_status.c4
-rw-r--r--src/backend/utils/adt/multirangetypes.c2
-rw-r--r--src/backend/utils/adt/selfuncs.c2
-rw-r--r--src/bin/pg_rewind/pg_rewind.c2
-rw-r--r--src/bin/pg_waldump/pg_waldump.c2
-rw-r--r--src/common/hmac_openssl.c2
-rw-r--r--src/common/pg_lzcompress.c2
-rw-r--r--src/interfaces/ecpg/preproc/ecpg.c2
-rw-r--r--src/port/bsearch_arg.c2
23 files changed, 31 insertions, 31 deletions
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index c320e215082..c23ea44866a 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -596,7 +596,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
* and if we're violating them. In that case we can
* terminate early, without invoking the support function.
*
- * As there may be more keys, we can only detemine
+ * As there may be more keys, we can only determine
* mismatch within this loop.
*/
if (bdesc->bd_info[attno - 1]->oi_regular_nulls &&
@@ -636,7 +636,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
/*
* Collation from the first key (has to be the same for
- * all keys for the same attribue).
+ * all keys for the same attribute).
*/
collation = keys[attno - 1][0]->sk_collation;
diff --git a/src/backend/access/brin/brin_bloom.c b/src/backend/access/brin/brin_bloom.c
index 2214fb4d0cc..e83c2b82e15 100644
--- a/src/backend/access/brin/brin_bloom.c
+++ b/src/backend/access/brin/brin_bloom.c
@@ -409,7 +409,7 @@ typedef struct BloomOpaque
{
/*
* XXX At this point we only need a single proc (to compute the hash), but
- * let's keep the array just like inclusion and minman opclasses, for
+ * let's keep the array just like inclusion and minmax opclasses, for
* consistency. We may need additional procs in the future.
*/
FmgrInfo extra_procinfos[BLOOM_MAX_PROCNUMS];
diff --git a/src/backend/access/brin/brin_minmax_multi.c b/src/backend/access/brin/brin_minmax_multi.c
index 4163abef3f0..5e4b234cc61 100644
--- a/src/backend/access/brin/brin_minmax_multi.c
+++ b/src/backend/access/brin/brin_minmax_multi.c
@@ -248,7 +248,7 @@ typedef struct DistanceValue
} DistanceValue;
-/* Cache for support and strategy procesures. */
+/* Cache for support and strategy procedures. */
static FmgrInfo *minmax_multi_get_procinfo(BrinDesc *bdesc, uint16 attno,
uint16 procnum);
@@ -1311,7 +1311,7 @@ compare_distances(const void *a, const void *b)
}
/*
- * Given an array of expanded ranges, compute distance of the gaps betwen
+ * Given an array of expanded ranges, compute distance of the gaps between
* the ranges - for ncranges there are (ncranges-1) gaps.
*
* We simply call the "distance" function to compute the (max-min) for pairs
@@ -1623,7 +1623,7 @@ ensure_free_space_in_buffer(BrinDesc *bdesc, Oid colloid,
*
* We don't simply check against range->maxvalues again. The deduplication
* might have freed very little space (e.g. just one value), forcing us to
- * do depuplication very often. In that case it's better to do compaction
+ * do deduplication very often. In that case it's better to do compaction
* and reduce more space.
*/
if (2 * range->nranges + range->nvalues <= range->maxvalues * MINMAX_BUFFER_LOAD_FACTOR)
diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c
index 36edc576a88..f46a42197c9 100644
--- a/src/backend/access/gist/gistbuild.c
+++ b/src/backend/access/gist/gistbuild.c
@@ -115,7 +115,7 @@ typedef struct
/*
* In sorted build, we use a stack of these structs, one for each level,
- * to hold an in-memory buffer of the righmost page at the level. When the
+ * to hold an in-memory buffer of the rightmost page at the level. When the
* page fills up, it is written out and a new page is allocated.
*/
typedef struct GistSortedBuildPageState
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 1c3e937c615..0aa26b448b7 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -633,7 +633,7 @@ systable_endscan(SysScanDesc sysscan)
* Currently we do not support non-index-based scans here. (In principle
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
- * For the moment, therefore, these functions are merely the thinnest of
+ * For the moment, therefore, these functions are merely the thinest of
* wrappers around index_beginscan/index_getnext_slot. The main reason for
* their existence is to centralize possible future support of lossy operators
* in catalog scans.
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index ef48679cc2e..706e16ae949 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -1398,7 +1398,7 @@ _bt_delitems_delete(Relation rel, Buffer buf, TransactionId latestRemovedXid,
* _bt_delitems_delete. These steps must take place before each function's
* critical section begins.
*
- * updatabable and nupdatable are inputs, though note that we will use
+ * updatable and nupdatable are inputs, though note that we will use
* _bt_update_posting() to replace the original itup with a pointer to a final
* version in palloc()'d memory. Caller should free the tuples when its done.
*
@@ -1504,7 +1504,7 @@ _bt_delitems_cmp(const void *a, const void *b)
* some extra index tuples that were practically free for tableam to check in
* passing (when they actually turn out to be safe to delete). It probably
* only makes sense for the tableam to go ahead with these extra checks when
- * it is block-orientated (otherwise the checks probably won't be practically
+ * it is block-oriented (otherwise the checks probably won't be practically
* free, which we rely on). The tableam interface requires the tableam side
* to handle the problem, though, so this is okay (we as an index AM are free
* to make the simplifying assumption that all tableams must be block-based).
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 64f6c7238f7..dc9d28a32c5 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -997,7 +997,7 @@ makeMultirangeTypeName(const char *rangeTypeName, Oid typeNamespace)
* makeUniqueTypeName
* Generate a unique name for a prospective new type
*
- * Given a typeName, return a new palloc'ed name by preprending underscores
+ * Given a typeName, return a new palloc'ed name by prepending underscores
* until a non-conflicting name results.
*
* If tryOriginal, first try with zero underscores.
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index cffcd543029..8aa329a2a03 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -660,7 +660,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
{
/*
* Partitioned tables don't have storage, so we don't set any fields in
- * their pg_class entries except for relpages, which is necessary for
+ * their pg_class entries except for reltuples, which is necessary for
* auto-analyze to work properly.
*/
vac_update_relstats(onerel, -1, totalrows,
diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c
index 459c879f0bb..18f246a8233 100644
--- a/src/backend/executor/nodeIncrementalSort.c
+++ b/src/backend/executor/nodeIncrementalSort.c
@@ -661,9 +661,9 @@ ExecIncrementalSort(PlanState *pstate)
/*
* We're in full sort mode accumulating a minimum number of tuples
* and not checking for prefix key equality yet, so we can't
- * assume the group pivot tuple will reamin the same -- unless
+ * assume the group pivot tuple will remain the same -- unless
* we're using a minimum group size of 1, in which case the pivot
- * is obviously still the pviot.
+ * is obviously still the pivot.
*/
if (nTuples != minGroupSize)
ExecClearTuple(node->group_pivot);
@@ -1162,7 +1162,7 @@ ExecReScanIncrementalSort(IncrementalSortState *node)
}
/*
- * If chgParam of subnode is not null, theni the plan will be re-scanned
+ * If chgParam of subnode is not null, then the plan will be re-scanned
* by the first ExecProcNode.
*/
if (outerPlan->chgParam == NULL)
diff --git a/src/backend/rewrite/rewriteSearchCycle.c b/src/backend/rewrite/rewriteSearchCycle.c
index 1a7d66fa6f9..2d0ac378a81 100644
--- a/src/backend/rewrite/rewriteSearchCycle.c
+++ b/src/backend/rewrite/rewriteSearchCycle.c
@@ -59,7 +59,7 @@
* SQL standard actually does it in that more complicated way), but the
* internal representation allows us to construct it this way.)
*
- * With a search caluse
+ * With a search clause
*
* SEARCH DEPTH FIRST BY col1, col2 SET sqc
*
diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c
index cf8a6d5f68b..ba7decb6a4e 100644
--- a/src/backend/statistics/dependencies.c
+++ b/src/backend/statistics/dependencies.c
@@ -972,7 +972,7 @@ find_strongest_dependency(MVDependencies **dependencies, int ndependencies,
/*
* clauselist_apply_dependencies
* Apply the specified functional dependencies to a list of clauses and
- * return the estimated selecvitity of the clauses that are compatible
+ * return the estimated selectivity of the clauses that are compatible
* with any of the given dependencies.
*
* This will estimate all not-already-estimated clauses that are compatible
@@ -1450,7 +1450,7 @@ dependencies_clauselist_selectivity(PlannerInfo *root,
if (!bms_is_member(listidx, *estimatedclauses))
{
/*
- * If it's a simple column refrence, just extract the attnum. If
+ * If it's a simple column reference, just extract the attnum. If
* it's an expression, assign a negative attnum as if it was a
* system attribute.
*/
diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c
index e54e8aa8e0f..7e11cb9d5f5 100644
--- a/src/backend/statistics/extended_stats.c
+++ b/src/backend/statistics/extended_stats.c
@@ -358,7 +358,7 @@ statext_compute_stattarget(int stattarget, int nattrs, VacAttrStats **stats)
*/
for (i = 0; i < nattrs; i++)
{
- /* keep the maximmum statistics target */
+ /* keep the maximum statistics target */
if (stats[i]->attr->attstattarget > stattarget)
stattarget = stats[i]->attr->attstattarget;
}
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index bf776286de0..5ff8cab394e 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -2131,7 +2131,7 @@ GetSnapshotDataReuse(Snapshot snapshot)
* older than this are known not running any more.
*
* And try to advance the bounds of GlobalVis{Shared,Catalog,Data,Temp}Rels
- * for the benefit of theGlobalVisTest* family of functions.
+ * for the benefit of the GlobalVisTest* family of functions.
*
* Note: this function should probably not be called with an argument that's
* not statically allocated (see xip allocation below).
diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c
index 68160bd5925..ebc89604ac2 100644
--- a/src/backend/tsearch/spell.c
+++ b/src/backend/tsearch/spell.c
@@ -2020,7 +2020,7 @@ NISortAffixes(IspellDict *Conf)
(const unsigned char *) Affix->repl,
(ptr - 1)->len))
{
- /* leave only unique and minimals suffixes */
+ /* leave only unique and minimal suffixes */
ptr->affix = Affix->repl;
ptr->len = Affix->replen;
ptr->issuffix = issuffix;
diff --git a/src/backend/utils/activity/backend_status.c b/src/backend/utils/activity/backend_status.c
index 6110113e56a..5c1b2c25ed2 100644
--- a/src/backend/utils/activity/backend_status.c
+++ b/src/backend/utils/activity/backend_status.c
@@ -1032,10 +1032,10 @@ pgstat_get_my_queryid(void)
if (!MyBEEntry)
return 0;
- /* There's no need for a look around pgstat_begin_read_activity /
+ /* There's no need for a lock around pgstat_begin_read_activity /
* pgstat_end_read_activity here as it's only called from
* pg_stat_get_activity which is already protected, or from the same
- * backend which mean that there won't be concurrent write.
+ * backend which means that there won't be concurrent writes.
*/
return MyBEEntry->st_queryid;
}
diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c
index b3964ea27fd..7ba6ff98604 100644
--- a/src/backend/utils/adt/multirangetypes.c
+++ b/src/backend/utils/adt/multirangetypes.c
@@ -553,7 +553,7 @@ multirange_get_typcache(FunctionCallInfo fcinfo, Oid mltrngtypid)
/*
- * Estimate size occupied by serialized multirage.
+ * Estimate size occupied by serialized multirange.
*/
static Size
multirange_size_estimate(TypeCacheEntry *rangetyp, int32 range_count,
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 0963e2701cb..3d4304cce7a 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -4039,7 +4039,7 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel,
/*
* Process a simple Var expression, by matching it to keys
- * directly. If there's a matchine expression, we'll try
+ * directly. If there's a matching expression, we'll try
* matching it later.
*/
if (IsA(varinfo->var, Var))
diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c
index 9df08ab2b08..38e5d237551 100644
--- a/src/bin/pg_rewind/pg_rewind.c
+++ b/src/bin/pg_rewind/pg_rewind.c
@@ -605,7 +605,7 @@ perform_rewind(filemap_t *filemap, rewind_source *source,
* and the target. But if the source is a standby server, it's possible
* that the last common checkpoint is *after* the standby's restartpoint.
* That implies that the source server has applied the checkpoint record,
- * but hasn't perfomed a corresponding restartpoint yet. Make sure we
+ * but hasn't performed a corresponding restartpoint yet. Make sure we
* start at the restartpoint's redo point in that case.
*
* Use the old version of the source's control file for this. The server
diff --git a/src/bin/pg_waldump/pg_waldump.c b/src/bin/pg_waldump/pg_waldump.c
index d4d6bb25a9f..4ec273e6d28 100644
--- a/src/bin/pg_waldump/pg_waldump.c
+++ b/src/bin/pg_waldump/pg_waldump.c
@@ -323,7 +323,7 @@ WALDumpCloseSegment(XLogReaderState *state)
}
/*
- * pg_waldump's WAL page rader
+ * pg_waldump's WAL page reader
*
* timeline and startptr specifies the LSN, and reads up to endptr.
*/
diff --git a/src/common/hmac_openssl.c b/src/common/hmac_openssl.c
index b5e3065d1a9..5df06839e01 100644
--- a/src/common/hmac_openssl.c
+++ b/src/common/hmac_openssl.c
@@ -34,7 +34,7 @@
/*
* In backend, use an allocation in TopMemoryContext to count for resowner
- * cleanup handling if necesary. For versions of OpenSSL where HMAC_CTX is
+ * cleanup handling if necessary. For versions of OpenSSL where HMAC_CTX is
* known, just use palloc(). In frontend, use malloc to be able to return
* a failure status back to the caller.
*/
diff --git a/src/common/pg_lzcompress.c b/src/common/pg_lzcompress.c
index fdd527f757a..a30a2c2eb83 100644
--- a/src/common/pg_lzcompress.c
+++ b/src/common/pg_lzcompress.c
@@ -147,7 +147,7 @@
*
* For each subsequent entry in the history list, the "good_match"
* is lowered by 10%. So the compressor will be more happy with
- * short matches the farer it has to go back in the history.
+ * short matches the further it has to go back in the history.
* Another "speed against ratio" preference characteristic of
* the algorithm.
*
diff --git a/src/interfaces/ecpg/preproc/ecpg.c b/src/interfaces/ecpg/preproc/ecpg.c
index ee2fa515888..9d861b428b0 100644
--- a/src/interfaces/ecpg/preproc/ecpg.c
+++ b/src/interfaces/ecpg/preproc/ecpg.c
@@ -375,7 +375,7 @@ main(int argc, char *const argv[])
}
cur = NULL;
- /* remove old delared statements if any are still there */
+ /* remove old declared statements if any are still there */
for (list = g_declared_list; list != NULL;)
{
struct declared_list *this = list;
diff --git a/src/port/bsearch_arg.c b/src/port/bsearch_arg.c
index 0f1eaeba83b..8849bdffd25 100644
--- a/src/port/bsearch_arg.c
+++ b/src/port/bsearch_arg.c
@@ -43,7 +43,7 @@
* is odd, moving left simply involves halving lim: e.g., when lim
* is 5 we look at item 2, so we change lim to 2 so that we will
* look at items 0 & 1. If lim is even, the same applies. If lim
- * is odd, moving right again involes halving lim, this time moving
+ * is odd, moving right again involves halving lim, this time moving
* the base up one item past p: e.g., when lim is 5 we change base
* to item 3 and make lim 2 so that we will look at items 3 and 4.
* If lim is even, however, we have to shrink it by one before