diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/backend/access/common/heaptuple.c | 2 | ||||
-rw-r--r-- | src/backend/access/gin/ginfast.c | 4 | ||||
-rw-r--r-- | src/backend/access/gist/gistproc.c | 4 | ||||
-rw-r--r-- | src/backend/access/heap/heapam.c | 4 | ||||
-rw-r--r-- | src/backend/access/heap/rewriteheap.c | 4 | ||||
-rw-r--r-- | src/backend/optimizer/path/costsize.c | 2 | ||||
-rw-r--r-- | src/backend/utils/adt/regproc.c | 2 |
7 files changed, 11 insertions, 11 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 009ebe7a1cb..9b1f358b387 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -807,7 +807,7 @@ heap_modify_tuple(HeapTuple tuple, * repl information, as appropriate. * * NOTE: it's debatable whether to use heap_deform_tuple() here or just - * heap_getattr() only the non-replaced colums. The latter could win if + * heap_getattr() only the non-replaced columns. The latter could win if * there are many replaced columns and few non-replaced ones. However, * heap_deform_tuple costs only O(N) while the heap_getattr way would cost * O(N^2) if there are many non-replaced columns, so it seems better to diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index 09c3e39bf3b..7fc55cf373b 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -881,8 +881,8 @@ ginInsertCleanup(GinState *ginstate, * locking */ /* - * remove readed pages from pending list, at this point all - * content of readed pages is in regular structure + * remove read pages from pending list, at this point all + * content of read pages is in regular structure */ if (shiftList(index, metabuffer, blkno, stats)) { diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c index db0bec6e3e5..9517be82478 100644 --- a/src/backend/access/gist/gistproc.c +++ b/src/backend/access/gist/gistproc.c @@ -578,7 +578,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS) * We first consider splits where b is the lower bound of an entry. * We iterate through all entries, and for each b, calculate the * smallest possible a. Then we consider splits where a is the - * uppper bound of an entry, and for each a, calculate the greatest + * upper bound of an entry, and for each a, calculate the greatest * possible b. * * In the above example, the first loop would consider splits: @@ -628,7 +628,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS) } /* - * Iterate over upper bound of left group finding greates possible + * Iterate over upper bound of left group finding greatest possible * lower bound of right group. */ i1 = nentries - 1; diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 2e3b9d2c2b7..badbea4be51 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -5311,7 +5311,7 @@ l4: * * The initial tuple is assumed to be already locked. * - * This function doesn't check visibility, it just inconditionally marks the + * This function doesn't check visibility, it just unconditionally marks the * tuple(s) as locked. If any tuple in the updated chain is being deleted * concurrently (or updated with the key being modified), sleep until the * transaction doing it is finished. @@ -5798,7 +5798,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, /* * NB -- some of these transformations are only valid because we * know the return Xid is a tuple updater (i.e. not merely a - * locker.) Also note that the only reason we don't explicitely + * locker.) Also note that the only reason we don't explicitly * worry about HEAP_KEYS_UPDATED is because it lives in * t_infomask2 rather than t_infomask. */ diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index 951f3f1a489..7672a075c77 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -763,9 +763,9 @@ raw_heap_insert(RewriteState state, HeapTuple tup) * * Crash-Safety: This module diverts from the usual patterns of doing WAL * since it cannot rely on checkpoint flushing out all buffers and thus - * waiting for exlusive locks on buffers. Usually the XLogInsert() covering + * waiting for exclusive locks on buffers. Usually the XLogInsert() covering * buffer modifications is performed while the buffer(s) that are being - * modified are exlusively locked guaranteeing that both the WAL record and + * modified are exclusively locked guaranteeing that both the WAL record and * the modified heap are on either side of the checkpoint. But since the * mapping files we log aren't in shared_buffers that interlock doesn't work. * diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index f1bb787949c..6946530ed1b 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -931,7 +931,7 @@ cost_tidscan(Path *path, PlannerInfo *root, /* * The TID qual expressions will be computed once, any other baserestrict - * quals once per retrived tuple. + * quals once per retrieved tuple. */ cost_qual_eval(&tid_qual_cost, tidquals, root); diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c index c0314ee5322..1512e9ddc6d 100644 --- a/src/backend/utils/adt/regproc.c +++ b/src/backend/utils/adt/regproc.c @@ -819,7 +819,7 @@ format_operator_internal(Oid operator_oid, bool force_qualify) /* * Would this oper be found (given the right args) by regoperatorin? - * If not, or if caller explicitely requests it, we need to qualify + * If not, or if caller explicitly requests it, we need to qualify * it. */ if (force_qualify || !OperatorIsVisible(operator_oid)) |