aboutsummaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/brin/brin_tuple.c4
-rw-r--r--src/backend/access/nbtree/nbtree.c4
-rw-r--r--src/backend/access/nbtree/nbtxlog.c2
-rw-r--r--src/backend/catalog/heap.c5
-rw-r--r--src/backend/executor/execExpr.c12
-rw-r--r--src/backend/libpq/be-fsstubs.c2
-rw-r--r--src/backend/nodes/tidbitmap.c2
-rw-r--r--src/backend/optimizer/path/equivclass.c9
-rw-r--r--src/backend/optimizer/plan/createplan.c2
-rw-r--r--src/backend/optimizer/util/clauses.c8
-rw-r--r--src/backend/parser/analyze.c2
-rw-r--r--src/backend/parser/gram.y2
-rw-r--r--src/backend/parser/parse_target.c6
-rw-r--r--src/backend/postmaster/bgworker.c2
-rw-r--r--src/backend/replication/logical/logical.c2
-rw-r--r--src/backend/replication/slot.c4
-rw-r--r--src/backend/rewrite/rewriteHandler.c10
-rw-r--r--src/backend/storage/buffer/freelist.c2
-rw-r--r--src/backend/storage/buffer/localbuf.c2
-rw-r--r--src/backend/storage/ipc/barrier.c6
-rw-r--r--src/backend/utils/Gen_dummy_probes.pl2
-rw-r--r--src/backend/utils/adt/arrayfuncs.c2
-rw-r--r--src/backend/utils/adt/date.c2
-rw-r--r--src/backend/utils/adt/datetime.c2
-rw-r--r--src/backend/utils/adt/oracle_compat.c2
-rw-r--r--src/backend/utils/cache/plancache.c2
-rw-r--r--src/backend/utils/mmgr/dsa.c2
-rw-r--r--src/backend/utils/mmgr/freepage.c2
28 files changed, 52 insertions, 52 deletions
diff --git a/src/backend/access/brin/brin_tuple.c b/src/backend/access/brin/brin_tuple.c
index 5abb472ee45..2b3861710c3 100644
--- a/src/backend/access/brin/brin_tuple.c
+++ b/src/backend/access/brin/brin_tuple.c
@@ -1,5 +1,5 @@
/*
- * brin_tuples.c
+ * brin_tuple.c
* Method implementations for tuples in BRIN indexes.
*
* Intended usage is that code outside this file only deals with
@@ -207,7 +207,7 @@ brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple,
/*
* Note that we reverse the sense of null bits in this module: we
* store a 1 for a null attribute rather than a 0. So we must reverse
- * the sense of the att_isnull test in br_deconstruct_tuple as well.
+ * the sense of the att_isnull test in brin_deconstruct_tuple as well.
*/
bitP = ((bits8 *) ((char *) rettuple + SizeOfBrinTuple)) - 1;
bitmask = HIGHBIT;
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 85e54ac44b8..4cfd5289ad7 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -621,8 +621,8 @@ btparallelrescan(IndexScanDesc scan)
/*
* _bt_parallel_seize() -- Begin the process of advancing the scan to a new
- * page. Other scans must wait until we call bt_parallel_release() or
- * bt_parallel_done().
+ * page. Other scans must wait until we call _bt_parallel_release()
+ * or _bt_parallel_done().
*
* The return value is true if we successfully seized the scan and false
* if we did not. The latter case occurs if no pages remain for the current
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 6532a25d3d4..3147ea47268 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -181,7 +181,7 @@ btree_xlog_insert(bool isleaf, bool ismeta, XLogReaderState *record)
if (PageAddItem(page, (Item) datapos, datalen, xlrec->offnum,
false, false) == InvalidOffsetNumber)
- elog(PANIC, "btree_insert_redo: failed to add item");
+ elog(PANIC, "btree_xlog_insert: failed to add item");
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 86820eecfc7..3b8c8b193a7 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -1588,9 +1588,8 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
/*
* Grab an exclusive lock on the target table, which we will NOT release
* until end of transaction. (In the simple case where we are directly
- * dropping this column, AlterTableDropColumn already did this ... but
- * when cascading from a drop of some other object, we may not have any
- * lock.)
+ * dropping this column, ATExecDropColumn already did this ... but when
+ * cascading from a drop of some other object, we may not have any lock.)
*/
rel = relation_open(relid, AccessExclusiveLock);
diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c
index e4a6c20ed02..e4e05753eee 100644
--- a/src/backend/executor/execExpr.c
+++ b/src/backend/executor/execExpr.c
@@ -1200,12 +1200,12 @@ ExecInitExprRec(Expr *node, ExprState *state,
* field's values[]/nulls[] entries as both the caseval
* source and the result address for this subexpression.
* That's okay only because (1) both FieldStore and
- * ArrayRef evaluate their arg or refexpr inputs first,
- * and (2) any such CaseTestExpr is directly the arg or
- * refexpr input. So any read of the caseval will occur
- * before there's a chance to overwrite it. Also, if
- * multiple entries in the newvals/fieldnums lists target
- * the same field, they'll effectively be applied
+ * SubscriptingRef evaluate their arg or refexpr inputs
+ * first, and (2) any such CaseTestExpr is directly the
+ * arg or refexpr input. So any read of the caseval will
+ * occur before there's a chance to overwrite it. Also,
+ * if multiple entries in the newvals/fieldnums lists
+ * target the same field, they'll effectively be applied
* left-to-right which is what we want.
*/
save_innermost_caseval = state->innermost_caseval;
diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c
index 68f83a9bfd8..97add3f257d 100644
--- a/src/backend/libpq/be-fsstubs.c
+++ b/src/backend/libpq/be-fsstubs.c
@@ -62,7 +62,7 @@
* A non-null entry is a pointer to a LargeObjectDesc allocated in the
* LO private memory context "fscxt". The cookies array itself is also
* dynamically allocated in that context. Its current allocated size is
- * cookies_len entries, of which any unused entries will be NULL.
+ * cookies_size entries, of which any unused entries will be NULL.
*/
static LargeObjectDesc **cookies = NULL;
static int cookies_size = 0;
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index 9b913feb896..bf534599969 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -934,7 +934,7 @@ tbm_extract_page_tuple(PagetableEntry *page, TBMIterateResult *output)
}
/*
- * tbm_advance_schunkbit - Advance the chunkbit
+ * tbm_advance_schunkbit - Advance the schunkbit
*/
static inline void
tbm_advance_schunkbit(PagetableEntry *chunk, int *schunkbitp)
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index b50e9ccdf14..688d9b07075 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -929,11 +929,10 @@ generate_base_implied_equalities_no_const(PlannerInfo *root,
/*
* We scan the EC members once and track the last-seen member for each
* base relation. When we see another member of the same base relation,
- * we generate "prev_mem = cur_mem". This results in the minimum number
- * of derived clauses, but it's possible that it will fail when a
- * different ordering would succeed. XXX FIXME: use a UNION-FIND
- * algorithm similar to the way we build merged ECs. (Use a list-of-lists
- * for each rel.)
+ * we generate "prev_em = cur_em". This results in the minimum number of
+ * derived clauses, but it's possible that it will fail when a different
+ * ordering would succeed. XXX FIXME: use a UNION-FIND algorithm similar
+ * to the way we build merged ECs. (Use a list-of-lists for each rel.)
*/
prev_ems = (EquivalenceMember **)
palloc0(root->simple_rel_array_size * sizeof(EquivalenceMember *));
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 608d5adfed2..12fba56285d 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -3911,7 +3911,7 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
}
/*
- * create_custom_plan
+ * create_customscan_plan
*
* Transform a CustomPath into a Plan.
*/
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 2e84d6b3b4f..d78f4e64c1b 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -3409,10 +3409,10 @@ eval_const_expressions_mutator(Node *node,
{
/*
* This case could be folded into the generic handling used
- * for ArrayRef etc. But because the simplification logic is
- * so trivial, applying evaluate_expr() to perform it would be
- * a heavy overhead. BooleanTest is probably common enough to
- * justify keeping this bespoke implementation.
+ * for SubscriptingRef etc. But because the simplification
+ * logic is so trivial, applying evaluate_expr() to perform it
+ * would be a heavy overhead. BooleanTest is probably common
+ * enough to justify keeping this bespoke implementation.
*/
BooleanTest *btest = (BooleanTest *) node;
BooleanTest *newbtest;
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index b13c2461835..345a8e61977 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -2082,7 +2082,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
* Select common collation. A common collation is required for
* all set operators except UNION ALL; see SQL:2008 7.13 <query
* expression> Syntax Rule 15c. (If we fail to identify a common
- * collation for a UNION ALL column, the curCollations element
+ * collation for a UNION ALL column, the colCollations element
* will be set to InvalidOid, which may result in a runtime error
* if something at a higher query level wants to use the column's
* collation.)
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 8311b1dd467..208b4a1f28a 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -12542,7 +12542,7 @@ SimpleTypename:
* Note that ConstInterval is not included here since it must
* be pushed up higher in the rules to accommodate the postfix
* options (e.g. INTERVAL '1' YEAR). Likewise, we have to handle
- * the generic-type-name case in AExprConst to avoid premature
+ * the generic-type-name case in AexprConst to avoid premature
* reduce/reduce conflicts against function names.
*/
ConstTypename:
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index ba470366e10..b70d92b9550 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -695,9 +695,9 @@ transformAssignmentIndirection(ParseState *pstate,
/*
* Set up a substitution. We abuse CaseTestExpr for this. It's safe
* to do so because the only nodes that will be above the CaseTestExpr
- * in the finished expression will be FieldStore and ArrayRef nodes.
- * (There could be other stuff in the tree, but it will be within
- * other child fields of those node types.)
+ * in the finished expression will be FieldStore and SubscriptingRef
+ * nodes. (There could be other stuff in the tree, but it will be
+ * within other child fields of those node types.)
*/
CaseTestExpr *ctest = makeNode(CaseTestExpr);
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index f5db5a8c4ab..b66b517aca9 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -525,7 +525,7 @@ ResetBackgroundWorkerCrashTimes(void)
if (rw->rw_worker.bgw_restart_time == BGW_NEVER_RESTART)
{
/*
- * Workers marked BGW_NVER_RESTART shouldn't get relaunched after
+ * Workers marked BGW_NEVER_RESTART shouldn't get relaunched after
* the crash, so forget about them. (If we wait until after the
* crash to forget about them, and they are parallel workers,
* parallel_terminate_count will get incremented after we've
diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
index bbd38c06d19..9853be6d1c2 100644
--- a/src/backend/replication/logical/logical.c
+++ b/src/backend/replication/logical/logical.c
@@ -114,7 +114,7 @@ CheckLogicalDecodingRequirements(void)
}
/*
- * Helper function for CreateInitialDecodingContext() and
+ * Helper function for CreateInitDecodingContext() and
* CreateDecodingContext() performing common tasks.
*/
static LogicalDecodingContext *
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index 55c306e4654..3861b8f583c 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -1334,7 +1334,9 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
return;
}
- /* Check CreateSlot() for the reasoning of using a crit. section. */
+ /*
+ * Check CreateSlotOnDisk() for the reasoning of using a critical section.
+ */
START_CRIT_SECTION();
fsync_fname(path, false);
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index ea40c287333..5b047d16629 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -980,11 +980,11 @@ process_matched_tle(TargetEntry *src_tle,
*
* As a further complication, the destination column might be a domain,
* resulting in each assignment containing a CoerceToDomain node over a
- * FieldStore or ArrayRef. These should have matching target domains,
- * so we strip them and reconstitute a single CoerceToDomain over the
- * combined FieldStore/ArrayRef nodes. (Notice that this has the result
- * that the domain's checks are applied only after we do all the field or
- * element updates, not after each one. This is arguably desirable.)
+ * FieldStore or SubscriptingRef. These should have matching target
+ * domains, so we strip them and reconstitute a single CoerceToDomain over
+ * the combined FieldStore/SubscriptingRef nodes. (Notice that this has the
+ * result that the domain's checks are applied only after we do all the
+ * field or element updates, not after each one. This is arguably desirable.)
*----------
*/
src_expr = (Node *) src_tle->expr;
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index 06659ab2653..c8d4e6f9e42 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -220,7 +220,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state)
* If asked, we need to waken the bgwriter. Since we don't want to rely on
* a spinlock for this we force a read from shared memory once, and then
* set the latch based on that value. We need to go through that length
- * because otherwise bgprocno might be reset while/after we check because
+ * because otherwise bgwprocno might be reset while/after we check because
* the compiler might just reread from memory.
*
* This can possibly set the latch of the wrong process if the bgwriter
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index c462ea82a92..391b6d6e16f 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -537,7 +537,7 @@ GetLocalBufferStorage(void)
/*
* CheckForLocalBufferLeaks - ensure this backend holds no local buffer pins
*
- * This is just like CheckBufferLeaks(), but for local buffers.
+ * This is just like CheckForBufferLeaks(), but for local buffers.
*/
static void
CheckForLocalBufferLeaks(void)
diff --git a/src/backend/storage/ipc/barrier.c b/src/backend/storage/ipc/barrier.c
index 69ed034e528..83cbe33107c 100644
--- a/src/backend/storage/ipc/barrier.c
+++ b/src/backend/storage/ipc/barrier.c
@@ -226,9 +226,9 @@ BarrierAttach(Barrier *barrier)
}
/*
- * Detach from a barrier. This may release other waiters from BarrierWait and
- * advance the phase if they were only waiting for this backend. Return true
- * if this participant was the last to detach.
+ * Detach from a barrier. This may release other waiters from
+ * BarrierArriveAndWait() and advance the phase if they were only waiting for
+ * this backend. Return true if this participant was the last to detach.
*/
bool
BarrierDetach(Barrier *barrier)
diff --git a/src/backend/utils/Gen_dummy_probes.pl b/src/backend/utils/Gen_dummy_probes.pl
index a662775cc12..a4b58ad69f5 100644
--- a/src/backend/utils/Gen_dummy_probes.pl
+++ b/src/backend/utils/Gen_dummy_probes.pl
@@ -14,7 +14,7 @@
#
#-------------------------------------------------------------------------
-# turn off perlcritic for autogened code
+# turn off perlcritic for autogenerated code
## no critic
$0 =~ s/^.*?(\w+)[\.\w+]*$/$1/;
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index f9f621e7591..8fcdf829229 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -1322,7 +1322,7 @@ array_recv(PG_FUNCTION_ARGS)
lBound[i] = pq_getmsgint(buf, 4);
/*
- * Check overflow of upper bound. (ArrayNItems() below checks that
+ * Check overflow of upper bound. (ArrayGetNItems() below checks that
* dim[i] >= 0)
*/
if (dim[i] != 0)
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index e440a4fedd7..4b1afb10f92 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -1374,7 +1374,7 @@ time_scale(PG_FUNCTION_ARGS)
/* AdjustTimeForTypmod()
* Force the precision of the time value to a specified value.
- * Uses *exactly* the same code as in AdjustTimestampForTypemod()
+ * Uses *exactly* the same code as in AdjustTimestampForTypmod()
* but we make a separate copy because those types do not
* have a fundamental tie together but rather a coincidence of
* implementation. - thomas
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index e9add385ba5..54ea69f7f16 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -3029,7 +3029,7 @@ DecodeSpecial(int field, char *lowtoken, int *val)
}
-/* ClearPgTM
+/* ClearPgTm
*
* Zero out a pg_tm and associated fsec_t
*/
diff --git a/src/backend/utils/adt/oracle_compat.c b/src/backend/utils/adt/oracle_compat.c
index f78f5cda53e..0fdfee58253 100644
--- a/src/backend/utils/adt/oracle_compat.c
+++ b/src/backend/utils/adt/oracle_compat.c
@@ -527,7 +527,7 @@ dotrim(const char *string, int stringlen,
*
* Syntax:
*
- * bytea byteatrim(byta string, bytea set)
+ * bytea byteatrim(bytea string, bytea set)
*
* Purpose:
*
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 4c114439cf9..abc30628920 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -504,7 +504,7 @@ DropCachedPlan(CachedPlanSource *plansource)
plansource->is_saved = false;
}
- /* Decrement generic CachePlan's refcount and drop if no longer needed */
+ /* Decrement generic CachedPlan's refcount and drop if no longer needed */
ReleaseGenericPlan(plansource);
/* Mark it no longer valid */
diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c
index 4b826cdaa50..900cd8357ca 100644
--- a/src/backend/utils/mmgr/dsa.c
+++ b/src/backend/utils/mmgr/dsa.c
@@ -2258,7 +2258,7 @@ check_for_freed_segments(dsa_area *area)
}
/*
- * Workhorse for check_for_free_segments(), and also used directly in path
+ * Workhorse for check_for_freed_segments(), and also used directly in path
* where the area lock is already held. This should be called after acquiring
* the lock but before looking up any segment by index number, to make sure we
* unmap any stale segments that might have previously had the same index as a
diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c
index 0c9e98973ab..9a1ae13ab1d 100644
--- a/src/backend/utils/mmgr/freepage.c
+++ b/src/backend/utils/mmgr/freepage.c
@@ -231,7 +231,7 @@ FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
/*
* FreePageManagerGetInternal may have set contiguous_pages_dirty.
- * Recompute contigous_pages if so.
+ * Recompute contiguous_pages if so.
*/
FreePageManagerUpdateLargest(fpm);