diff options
Diffstat (limited to 'src/backend/access')
-rw-r--r-- | src/backend/access/brin/brin.c | 4 | ||||
-rw-r--r-- | src/backend/access/brin/brin_bloom.c | 2 | ||||
-rw-r--r-- | src/backend/access/brin/brin_minmax_multi.c | 6 | ||||
-rw-r--r-- | src/backend/access/gist/gistbuild.c | 2 | ||||
-rw-r--r-- | src/backend/access/index/genam.c | 2 | ||||
-rw-r--r-- | src/backend/access/nbtree/nbtpage.c | 4 |
6 files changed, 10 insertions, 10 deletions
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index c320e215082..c23ea44866a 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -596,7 +596,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm) * and if we're violating them. In that case we can * terminate early, without invoking the support function. * - * As there may be more keys, we can only detemine + * As there may be more keys, we can only determine * mismatch within this loop. */ if (bdesc->bd_info[attno - 1]->oi_regular_nulls && @@ -636,7 +636,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm) /* * Collation from the first key (has to be the same for - * all keys for the same attribue). + * all keys for the same attribute). */ collation = keys[attno - 1][0]->sk_collation; diff --git a/src/backend/access/brin/brin_bloom.c b/src/backend/access/brin/brin_bloom.c index 2214fb4d0cc..e83c2b82e15 100644 --- a/src/backend/access/brin/brin_bloom.c +++ b/src/backend/access/brin/brin_bloom.c @@ -409,7 +409,7 @@ typedef struct BloomOpaque { /* * XXX At this point we only need a single proc (to compute the hash), but - * let's keep the array just like inclusion and minman opclasses, for + * let's keep the array just like inclusion and minmax opclasses, for * consistency. We may need additional procs in the future. */ FmgrInfo extra_procinfos[BLOOM_MAX_PROCNUMS]; diff --git a/src/backend/access/brin/brin_minmax_multi.c b/src/backend/access/brin/brin_minmax_multi.c index 4163abef3f0..5e4b234cc61 100644 --- a/src/backend/access/brin/brin_minmax_multi.c +++ b/src/backend/access/brin/brin_minmax_multi.c @@ -248,7 +248,7 @@ typedef struct DistanceValue } DistanceValue; -/* Cache for support and strategy procesures. */ +/* Cache for support and strategy procedures. */ static FmgrInfo *minmax_multi_get_procinfo(BrinDesc *bdesc, uint16 attno, uint16 procnum); @@ -1311,7 +1311,7 @@ compare_distances(const void *a, const void *b) } /* - * Given an array of expanded ranges, compute distance of the gaps betwen + * Given an array of expanded ranges, compute distance of the gaps between * the ranges - for ncranges there are (ncranges-1) gaps. * * We simply call the "distance" function to compute the (max-min) for pairs @@ -1623,7 +1623,7 @@ ensure_free_space_in_buffer(BrinDesc *bdesc, Oid colloid, * * We don't simply check against range->maxvalues again. The deduplication * might have freed very little space (e.g. just one value), forcing us to - * do depuplication very often. In that case it's better to do compaction + * do deduplication very often. In that case it's better to do compaction * and reduce more space. */ if (2 * range->nranges + range->nvalues <= range->maxvalues * MINMAX_BUFFER_LOAD_FACTOR) diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index 36edc576a88..f46a42197c9 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -115,7 +115,7 @@ typedef struct /* * In sorted build, we use a stack of these structs, one for each level, - * to hold an in-memory buffer of the righmost page at the level. When the + * to hold an in-memory buffer of the rightmost page at the level. When the * page fills up, it is written out and a new page is allocated. */ typedef struct GistSortedBuildPageState diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index 1c3e937c615..0aa26b448b7 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -633,7 +633,7 @@ systable_endscan(SysScanDesc sysscan) * Currently we do not support non-index-based scans here. (In principle * we could do a heapscan and sort, but the uses are in places that * probably don't need to still work with corrupted catalog indexes.) - * For the moment, therefore, these functions are merely the thinnest of + * For the moment, therefore, these functions are merely the thinest of * wrappers around index_beginscan/index_getnext_slot. The main reason for * their existence is to centralize possible future support of lossy operators * in catalog scans. diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index ef48679cc2e..706e16ae949 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -1398,7 +1398,7 @@ _bt_delitems_delete(Relation rel, Buffer buf, TransactionId latestRemovedXid, * _bt_delitems_delete. These steps must take place before each function's * critical section begins. * - * updatabable and nupdatable are inputs, though note that we will use + * updatable and nupdatable are inputs, though note that we will use * _bt_update_posting() to replace the original itup with a pointer to a final * version in palloc()'d memory. Caller should free the tuples when its done. * @@ -1504,7 +1504,7 @@ _bt_delitems_cmp(const void *a, const void *b) * some extra index tuples that were practically free for tableam to check in * passing (when they actually turn out to be safe to delete). It probably * only makes sense for the tableam to go ahead with these extra checks when - * it is block-orientated (otherwise the checks probably won't be practically + * it is block-oriented (otherwise the checks probably won't be practically * free, which we rely on). The tableam interface requires the tableam side * to handle the problem, though, so this is okay (we as an index AM are free * to make the simplifying assumption that all tableams must be block-based). |