aboutsummaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/common/bufmask.c2
-rw-r--r--src/backend/access/gin/gindatapage.c2
-rw-r--r--src/backend/access/heap/rewriteheap.c4
-rw-r--r--src/backend/access/spgist/spgvacuum.c2
-rw-r--r--src/backend/access/transam/xact.c2
-rw-r--r--src/backend/access/transam/xlog.c15
-rw-r--r--src/backend/catalog/pg_aggregate.c2
-rw-r--r--src/backend/commands/dbcommands.c2
-rw-r--r--src/backend/commands/operatorcmds.c3
-rw-r--r--src/backend/libpq/auth.c11
-rw-r--r--src/backend/postmaster/bgwriter.c8
-rw-r--r--src/backend/replication/walsender.c4
-rw-r--r--src/backend/storage/ipc/procarray.c2
-rw-r--r--src/backend/storage/ipc/standby.c2
-rw-r--r--src/backend/storage/lmgr/lwlock.c2
-rw-r--r--src/backend/storage/smgr/md.c2
-rw-r--r--src/backend/tcop/pquery.c4
-rw-r--r--src/backend/utils/adt/arrayfuncs.c2
-rw-r--r--src/backend/utils/adt/like_match.c3
-rw-r--r--src/backend/utils/mmgr/aset.c2
20 files changed, 36 insertions, 40 deletions
diff --git a/src/backend/access/common/bufmask.c b/src/backend/access/common/bufmask.c
index ee1c6f234a2..bcd9bd007b7 100644
--- a/src/backend/access/common/bufmask.c
+++ b/src/backend/access/common/bufmask.c
@@ -20,7 +20,7 @@
#include "access/bufmask.h"
/*
- * mask_page_lsn
+ * mask_page_lsn_and_checksum
*
* In consistency checks, the LSN of the two pages compared will likely be
* different because of concurrent operations when the WAL is generated and
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index 21de8ed714d..e8c34d6b1f6 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -1371,7 +1371,7 @@ disassembleLeaf(Page page)
if (GinPageIsCompressed(page))
{
/*
- * Create a leafSegment entry for each segment.
+ * Create a leafSegmentInfo entry for each segment.
*/
seg = GinDataLeafPageGetPostingList(page);
segbegin = (Pointer) seg;
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index 72a448ad316..a17508a82fb 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -237,7 +237,7 @@ static void logical_end_heap_rewrite(RewriteState state);
* new_heap new, locked heap relation to insert tuples to
* oldest_xmin xid used by the caller to determine which tuples are dead
* freeze_xid xid before which tuples will be frozen
- * min_multi multixact before which multis will be removed
+ * cutoff_multi multixact before which multis will be removed
* use_wal should the inserts to the new heap be WAL-logged?
*
* Returns an opaque RewriteState, allocated in current memory context,
@@ -787,7 +787,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
* Instead we simply write the mapping files out to disk, *before* the
* XLogInsert() is performed. That guarantees that either the XLogInsert() is
* inserted after the checkpoint's redo pointer or that the checkpoint (via
- * LogicalRewriteHeapCheckpoint()) has flushed the (partial) mapping file to
+ * CheckPointLogicalRewriteHeap()) has flushed the (partial) mapping file to
* disk. That leaves the tail end that has not yet been flushed open to
* corruption, which is solved by including the current offset in the
* xl_heap_rewrite_mapping records and truncating the mapping file to it
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index 2b1662a267d..478d4c0d612 100644
--- a/src/backend/access/spgist/spgvacuum.c
+++ b/src/backend/access/spgist/spgvacuum.c
@@ -842,7 +842,7 @@ spgvacuumscan(spgBulkDeleteState *bds)
}
}
- /* Propagate local lastUsedPage cache to metablock */
+ /* Propagate local lastUsedPages cache to metablock */
SpGistUpdateMetaPage(index);
/*
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index d7930c077de..1bbaeeebf4d 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -1988,7 +1988,7 @@ StartTransaction(void)
/*
* Advertise it in the proc array. We assume assignment of
- * LocalTransactionID is atomic, and the backendId should be set already.
+ * localTransactionId is atomic, and the backendId should be set already.
*/
Assert(MyProc->backendId == vxid.backendId);
MyProc->lxid = vxid.localTransactionId;
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index da3d2509860..f5535238573 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -1796,11 +1796,11 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto)
do
{
/*
- * See if this insertion is in progress. LWLockWait will wait for
- * the lock to be released, or for the 'value' to be set by a
- * LWLockUpdateVar call. When a lock is initially acquired, its
- * value is 0 (InvalidXLogRecPtr), which means that we don't know
- * where it's inserting yet. We will have to wait for it. If
+ * See if this insertion is in progress. LWLockWaitForVar will
+ * wait for the lock to be released, or for the 'value' to be set
+ * by a LWLockUpdateVar call. When a lock is initially acquired,
+ * its value is 0 (InvalidXLogRecPtr), which means that we don't
+ * know where it's inserting yet. We will have to wait for it. If
* it's a small insertion, the record will most likely fit on the
* same page and the inserter will release the lock without ever
* calling LWLockUpdateVar. But if it has to sleep, it will
@@ -6024,7 +6024,10 @@ recoveryApplyDelay(XLogReaderState *record)
TimestampDifference(GetCurrentTimestamp(), recoveryDelayUntilTime,
&secs, &microsecs);
- /* NB: We're ignoring waits below min_apply_delay's resolution. */
+ /*
+ * NB: We're ignoring waits below recovery_min_apply_delay's
+ * resolution.
+ */
if (secs <= 0 && microsecs / 1000 <= 0)
break;
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index 7cab039dedc..201242e7965 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -733,7 +733,7 @@ AggregateCreate(const char *aggName,
* Create dependencies for the aggregate (above and beyond those already
* made by ProcedureCreate). Note: we don't need an explicit dependency
* on aggTransType since we depend on it indirectly through transfn.
- * Likewise for aggmTransType using the mtransfunc, if it exists.
+ * Likewise for aggmTransType using the mtransfn, if it exists.
*
* If we're replacing an existing definition, ProcedureCreate deleted all
* our existing dependencies, so we have to do the same things here either
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index fc1e1564a61..95881a85509 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -1854,7 +1854,7 @@ get_db_info(const char *name, LOCKMODE lockmode,
/* limit of frozen XIDs */
if (dbFrozenXidP)
*dbFrozenXidP = dbform->datfrozenxid;
- /* minimum MultixactId */
+ /* minimum MultiXactId */
if (dbMinMultiP)
*dbMinMultiP = dbform->datminmxid;
/* default tablespace for this database */
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index 17f54410a00..d733aa4826b 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -27,9 +27,6 @@
* "create operator":
* operators
*
- * Most of the parse-tree manipulation routines are defined in
- * commands/manip.c.
- *
*-------------------------------------------------------------------------
*/
#include "postgres.h"
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 9358219aa60..e17fd9a317f 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -1796,14 +1796,9 @@ interpret_ident_response(const char *ident_response,
/*
- * Talk to the ident server on host "remote_ip_addr" and find out who
- * owns the tcp connection from his port "remote_port" to port
- * "local_port_addr" on host "local_ip_addr". Return the user name the
- * ident server gives as "*ident_user".
- *
- * IP addresses and port numbers are in network byte order.
- *
- * But iff we're unable to get the information from ident, return false.
+ * Talk to the ident server on "remote_addr" and find out who
+ * owns the tcp connection to "local_addr"
+ * It the username successfully retrieved, check the usermap.
*
* XXX: Using WaitLatchOrSocket() and doing a CHECK_FOR_INTERRUPTS() if the
* latch was set would improve the responsiveness to timeouts/cancellations.
diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index e6b6c549de5..8ec16a3fb8d 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -291,10 +291,10 @@ BackgroundWriterMain(void)
* significantly bigger than BgWriterDelay, so we don't complicate the
* overall timeout handling but just assume we're going to get called
* often enough even if hibernation mode is active. It's not that
- * important that log_snap_interval_ms is met strictly. To make sure
- * we're not waking the disk up unnecessarily on an idle system we
- * check whether there has been any WAL inserted since the last time
- * we've logged a running xacts.
+ * important that LOG_SNAPSHOT_INTERVAL_MS is met strictly. To make
+ * sure we're not waking the disk up unnecessarily on an idle system
+ * we check whether there has been any WAL inserted since the last
+ * time we've logged a running xacts.
*
* We do this logging in the bgwriter as it is the only process that
* is run regularly and returns to its mainloop all the time. E.g.
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index e7a59b0a921..e172dad07f4 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -2259,7 +2259,7 @@ WalSndLoop(WalSndSendDataCallback send_data)
WL_SOCKET_READABLE;
/*
- * Use fresh timestamp, not last_processed, to reduce the chance
+ * Use fresh timestamp, not last_processing, to reduce the chance
* of reaching wal_sender_timeout before sending a keepalive.
*/
sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
@@ -2666,7 +2666,7 @@ XLogSendPhysical(void)
* very close to together here so that we'll get a later position if it is
* still moving.
*
- * Because LagTrackerWriter ignores samples when the LSN hasn't advanced,
+ * Because LagTrackerWrite ignores samples when the LSN hasn't advanced,
* this gives us a cheap approximation for the WAL flush time for this
* LSN.
*
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index ae6780011b8..fadab62950d 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -3169,7 +3169,7 @@ DisplayXidCache(void)
*
* When we throw away subXIDs from KnownAssignedXids, we need to keep track of
* that, similarly to tracking overflow of a PGPROC's subxids array. We do
- * that by remembering the lastOverflowedXID, ie the last thrown-away subXID.
+ * that by remembering the lastOverflowedXid, ie the last thrown-away subXID.
* As long as that is within the range of interesting XIDs, we have to assume
* that subXIDs are missing from snapshots. (Note that subXID overflow occurs
* on primary when 65th subXID arrives, whereas on standby it occurs when 64th
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 25b7e314afc..01ddffec400 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -99,7 +99,7 @@ InitRecoveryTransactionEnvironment(void)
* Lock a virtual transaction id for Startup process.
*
* We need to do GetNextLocalTransactionId() because
- * SharedInvalBackendInit() leaves localTransactionid invalid and the lock
+ * SharedInvalBackendInit() leaves localTransactionId invalid and the lock
* manager doesn't like that at all.
*
* Note that we don't need to run XactLockTableInsert() because nobody
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index bc1aa88322b..c77d47c01c6 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -232,7 +232,7 @@ LOG_LWDEBUG(const char *where, LWLock *lock, const char *msg)
static void init_lwlock_stats(void);
static void print_lwlock_stats(int code, Datum arg);
-static lwlock_stats * get_lwlock_stats_entry(LWLock *lockid);
+static lwlock_stats * get_lwlock_stats_entry(LWLock *lock);
static void
init_lwlock_stats(void)
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 52136ad5580..07f3c93d3fe 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -731,7 +731,7 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
* mdnblocks() -- Get the number of blocks stored in a relation.
*
* Important side effect: all active segments of the relation are opened
- * and added to the mdfd_seg_fds array. If this routine has not been
+ * and added to the md_seg_fds array. If this routine has not been
* called, then only segments up to the last one actually touched
* are present in the array.
*/
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 1ed2838ad45..9e48adc53ce 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -1023,8 +1023,8 @@ FillPortalStore(Portal portal, bool isTopLevel)
/*
* Run the portal to completion just as for the default
- * MULTI_QUERY case, but send the primary query's output to the
- * tuplestore. Auxiliary query outputs are discarded. Set the
+ * PORTAL_MULTI_QUERY case, but send the primary query's output to
+ * the tuplestore. Auxiliary query outputs are discarded. Set the
* portal's holdSnapshot to the snapshot used (or a copy of it).
*/
PortalRunMulti(portal, isTopLevel, true,
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 8fcdf829229..ec8fbb9a7da 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -139,7 +139,7 @@ static void array_insert_slice(ArrayType *destArray, ArrayType *origArray,
int *st, int *endp,
int typlen, bool typbyval, char typalign);
static int array_cmp(FunctionCallInfo fcinfo);
-static ArrayType *create_array_envelope(int ndims, int *dimv, int *lbv, int nbytes,
+static ArrayType *create_array_envelope(int ndims, int *dimv, int *lbsv, int nbytes,
Oid elmtype, int dataoffset);
static ArrayType *array_fill_internal(ArrayType *dims, ArrayType *lbs,
Datum value, bool isnull, Oid elmtype,
diff --git a/src/backend/utils/adt/like_match.c b/src/backend/utils/adt/like_match.c
index 9055a938132..5b322559aa4 100644
--- a/src/backend/utils/adt/like_match.c
+++ b/src/backend/utils/adt/like_match.c
@@ -27,7 +27,8 @@
/*
* Originally written by Rich $alz, mirror!rs, Wed Nov 26 19:03:17 EST 1986.
* Rich $alz is now <rsalz@bbn.com>.
- * Special thanks to Lars Mathiesen <thorinn@diku.dk> for the LABORT code.
+ * Special thanks to Lars Mathiesen <thorinn@diku.dk> for the
+ * LIKE_ABORT code.
*
* This code was shamelessly stolen from the "pql" code by myself and
* slightly modified :)
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 6e4a3434394..6b63d6f85d0 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -915,7 +915,7 @@ AllocSetAlloc(MemoryContext context, Size size)
/*
* We could be asking for pretty big blocks here, so cope if malloc
- * fails. But give up if there's less than a meg or so available...
+ * fails. But give up if there's less than 1 MB or so available...
*/
while (block == NULL && blksize > 1024 * 1024)
{