aboutsummaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
authorAndres Freund <andres@anarazel.de>2015-04-26 18:42:31 +0200
committerAndres Freund <andres@anarazel.de>2015-04-26 18:42:31 +0200
commit6aab1f45acaa4cf90e62357ebdf5e6a38829204e (patch)
treeded75b929485ab4331a292150b0823ed2d825fb1 /src/backend
parent9fe1d9ac6820cff354a5402114b96dae1221eba3 (diff)
downloadpostgresql-6aab1f45acaa4cf90e62357ebdf5e6a38829204e.tar.gz
postgresql-6aab1f45acaa4cf90e62357ebdf5e6a38829204e.zip
Fix various typos and grammar errors in comments.
Author: Dmitriy Olshevskiy Discussion: 553D00A6.4090205@bk.ru
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/brin/brin_tuple.c2
-rw-r--r--src/backend/access/nbtree/nbtree.c2
-rw-r--r--src/backend/access/transam/twophase.c2
-rw-r--r--src/backend/catalog/objectaddress.c4
-rw-r--r--src/backend/commands/indexcmds.c2
-rw-r--r--src/backend/executor/nodeModifyTable.c2
-rw-r--r--src/backend/postmaster/bgworker.c2
-rw-r--r--src/backend/replication/logical/snapbuild.c2
-rw-r--r--src/backend/storage/lmgr/lwlock.c4
-rw-r--r--src/backend/utils/cache/inval.c2
10 files changed, 12 insertions, 12 deletions
diff --git a/src/backend/access/brin/brin_tuple.c b/src/backend/access/brin/brin_tuple.c
index 93f00f6a8fe..08fa998a525 100644
--- a/src/backend/access/brin/brin_tuple.c
+++ b/src/backend/access/brin/brin_tuple.c
@@ -304,7 +304,7 @@ brin_free_tuple(BrinTuple *tuple)
}
/*
- * Create an palloc'd copy of a BrinTuple.
+ * Create a palloc'd copy of a BrinTuple.
*/
BrinTuple *
brin_copy_tuple(BrinTuple *tuple, Size len)
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 9a6dcdd4b1d..c2d52faa960 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -40,7 +40,7 @@ typedef struct
BTSpool *spool;
/*
- * spool2 is needed only when the index is an unique index. Dead tuples
+ * spool2 is needed only when the index is a unique index. Dead tuples
* are put into spool2 instead of spool in order to avoid uniqueness
* check.
*/
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 3ac339bebfc..d9a3fabb8f8 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -291,7 +291,7 @@ AtAbort_Twophase(void)
}
/*
- * This is called after we have finished transfering state to the prepared
+ * This is called after we have finished transferring state to the prepared
* PGXACT entry.
*/
void
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index 5e1bda4ed2c..10f0396561b 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -100,7 +100,7 @@ typedef struct
AclObjectKind acl_kind; /* ACL_KIND_* of this object type */
bool is_nsp_name_unique; /* can the nsp/name combination (or
* name alone, if there's no
- * namespace) be considered an unique
+ * namespace) be considered a unique
* identifier for an object of this
* class? */
} ObjectPropertyType;
@@ -3241,7 +3241,7 @@ pg_identify_object(PG_FUNCTION_ARGS)
/*
* We only return the object name if it can be used (together with
- * the schema name, if any) as an unique identifier.
+ * the schema name, if any) as a unique identifier.
*/
if (get_object_namensp_unique(address.classId))
{
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 99acd4a6a2c..351d48ece62 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -1051,7 +1051,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
*/
/*
- * A expression using mutable functions is probably wrong,
+ * An expression using mutable functions is probably wrong,
* since if you aren't going to get the same result for the
* same data every time, it's not clear what the index entries
* mean at all.
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 06ec82e2461..31666edfa8a 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -304,7 +304,7 @@ ExecInsert(TupleTableSlot *slot,
* inserting the record into the heap and all indexes.
*
* ExecWithCheckOptions will elog(ERROR) if a violation is found, so
- * the tuple will never be seen, if it violates the the WITH CHECK
+ * the tuple will never be seen, if it violates the WITH CHECK
* OPTION.
*
* ExecWithCheckOptions() will skip any WCOs which are not of
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index 99f4b65ea65..d4939415f07 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -130,7 +130,7 @@ BackgroundWorkerShmemInit(void)
/*
* Copy contents of worker list into shared memory. Record the shared
* memory slot assigned to each worker. This ensures a 1-to-1
- * correspondence betwen the postmaster's private list and the array
+ * correspondence between the postmaster's private list and the array
* in shared memory.
*/
slist_foreach(siter, &BackgroundWorkerList)
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 9b40bc8eca5..c878f629854 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -1597,7 +1597,7 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn)
/*
* We may overwrite the work from some other backend, but that's ok, our
- * snapshot is valid as well, we'll just have done some superflous work.
+ * snapshot is valid as well, we'll just have done some superfluous work.
*/
if (rename(tmppath, path) != 0)
{
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 5813e71da02..1acd2f090b5 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -848,7 +848,7 @@ LWLockDequeueSelf(LWLock *lock)
/*
* Somebody else dequeued us and has or will wake us up. Deal with the
- * superflous absorption of a wakeup.
+ * superfluous absorption of a wakeup.
*/
/*
@@ -1183,7 +1183,7 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
{
/*
* Wait until awakened. Like in LWLockAcquire, be prepared for bogus
- * wakups, because we share the semaphore with ProcWaitForSignal.
+ * wakeups, because we share the semaphore with ProcWaitForSignal.
*/
LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 8826a5d50b2..1907a874588 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -516,7 +516,7 @@ RegisterRelcacheInvalidation(Oid dbId, Oid relId)
/*
* RegisterSnapshotInvalidation
*
- * Register a invalidation event for MVCC scans against a given catalog.
+ * Register an invalidation event for MVCC scans against a given catalog.
* Only needed for catalogs that don't have catcaches.
*/
static void