aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/common/heaptuple.c12
-rw-r--r--src/backend/access/common/tupdesc.c6
-rw-r--r--src/backend/access/gist/gistget.c6
-rw-r--r--src/backend/access/gist/gistutil.c16
-rw-r--r--src/backend/access/gist/gistvacuum.c3
-rw-r--r--src/backend/access/hash/hashovfl.c8
-rw-r--r--src/backend/access/hash/hashpage.c16
-rw-r--r--src/backend/access/hash/hashutil.c23
-rw-r--r--src/backend/access/heap/heapam.c32
-rw-r--r--src/backend/access/heap/hio.c12
-rw-r--r--src/backend/access/heap/tuptoaster.c12
-rw-r--r--src/backend/access/index/genam.c6
-rw-r--r--src/backend/access/nbtree/nbtinsert.c38
-rw-r--r--src/backend/access/nbtree/nbtpage.c51
-rw-r--r--src/backend/access/nbtree/nbtree.c10
-rw-r--r--src/backend/access/nbtree/nbtsearch.c39
-rw-r--r--src/backend/access/nbtree/nbtsort.c8
-rw-r--r--src/backend/access/nbtree/nbtutils.c6
-rw-r--r--src/backend/access/transam/multixact.c83
-rw-r--r--src/backend/access/transam/slru.c26
-rw-r--r--src/backend/access/transam/subtrans.c12
-rw-r--r--src/backend/access/transam/transam.c8
-rw-r--r--src/backend/access/transam/twophase.c24
-rw-r--r--src/backend/access/transam/varsup.c10
-rw-r--r--src/backend/access/transam/xact.c57
-rw-r--r--src/backend/access/transam/xlog.c114
26 files changed, 324 insertions, 314 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 69634b5763f..f6683acd3d3 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.103 2005/11/20 19:49:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.104 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -512,11 +512,11 @@ nocachegetattr(HeapTuple tuple,
/*
* Now we know that we have to walk the tuple CAREFULLY.
*
- * Note - This loop is a little tricky. For each non-null attribute, we
- * have to first account for alignment padding before the attr, then
- * advance over the attr based on its length. Nulls have no storage
- * and no alignment padding either. We can use/set attcacheoff until
- * we pass either a null or a var-width attribute.
+ * Note - This loop is a little tricky. For each non-null attribute,
+ * we have to first account for alignment padding before the attr,
+ * then advance over the attr based on its length. Nulls have no
+ * storage and no alignment padding either. We can use/set
+ * attcacheoff until we pass either a null or a var-width attribute.
*/
for (i = 0; i < attnum; i++)
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index cfa455beec9..64cb5c7c2bb 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.112 2005/10/15 02:49:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.113 2005/11/22 18:17:05 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -49,8 +49,8 @@ CreateTemplateTupleDesc(int natts, bool hasoid)
* Allocate enough memory for the tuple descriptor, including the
* attribute rows, and set up the attribute row pointers.
*
- * Note: we assume that sizeof(struct tupleDesc) is a multiple of the struct
- * pointer alignment requirement, and hence we don't need to insert
+ * Note: we assume that sizeof(struct tupleDesc) is a multiple of the
+ * struct pointer alignment requirement, and hence we don't need to insert
* alignment padding between the struct and the array of attribute row
* pointers.
*/
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index bc8f9e0c075..5e4cf7fcc22 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.53 2005/11/06 22:39:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.54 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -396,8 +396,8 @@ gistindex_keytest(IndexTuple tuple,
* are the index datum (as a GISTENTRY*), the comparison datum, and
* the comparison operator's strategy number and subtype from pg_amop.
*
- * (Presently there's no need to pass the subtype since it'll always be
- * zero, but might as well pass it for possible future use.)
+ * (Presently there's no need to pass the subtype since it'll always
+ * be zero, but might as well pass it for possible future use.)
*/
test = FunctionCall4(&key->sk_func,
PointerGetDatum(&de),
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index faf261b2af5..63cf056fc54 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.8 2005/11/06 22:39:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.9 2005/11/22 18:17:05 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -877,16 +877,17 @@ gistcheckpage(Relation rel, Buffer buf)
Page page = BufferGetPage(buf);
/*
- * ReadBuffer verifies that every newly-read page passes PageHeaderIsValid,
- * which means it either contains a reasonably sane page header or is
- * all-zero. We have to defend against the all-zero case, however.
+ * ReadBuffer verifies that every newly-read page passes
+ * PageHeaderIsValid, which means it either contains a reasonably sane
+ * page header or is all-zero. We have to defend against the all-zero
+ * case, however.
*/
if (PageIsNew(page))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("index \"%s\" contains unexpected zero page at block %u",
- RelationGetRelationName(rel),
- BufferGetBlockNumber(buf)),
+ errmsg("index \"%s\" contains unexpected zero page at block %u",
+ RelationGetRelationName(rel),
+ BufferGetBlockNumber(buf)),
errhint("Please REINDEX it.")));
/*
@@ -925,6 +926,7 @@ gistNewBuffer(Relation r)
break; /* nothing left in FSM */
buffer = ReadBuffer(r, blkno);
+
/*
* We have to guard against the possibility that someone else already
* recycled this page; the buffer may be locked if so.
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index 60725e5e05b..31c560a83ad 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.10 2005/11/06 22:39:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.11 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,6 +65,7 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
lencompleted = 16;
buffer = ReadBuffer(gv->index, blkno);
+
/*
* This is only used during VACUUM FULL, so we need not bother to lock
* individual index pages
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index aa433bc70a0..c40973c7710 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.48 2005/11/06 19:29:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.49 2005/11/22 18:17:05 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -488,9 +488,9 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
* It is okay to write-lock the new bitmap page while holding metapage
* write lock, because no one else could be contending for the new page.
*
- * There is some loss of concurrency in possibly doing I/O for the new page
- * while holding the metapage lock, but this path is taken so seldom that
- * it's not worth worrying about.
+ * There is some loss of concurrency in possibly doing I/O for the new
+ * page while holding the metapage lock, but this path is taken so seldom
+ * that it's not worth worrying about.
*/
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
pg = BufferGetPage(buf);
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index ab0bf935fb6..2739bc21089 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.53 2005/11/06 19:29:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.54 2005/11/22 18:17:05 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -402,8 +402,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
*
- * Ideally we would lock the new bucket too before proceeding, but if we are
- * about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
+ * Ideally we would lock the new bucket too before proceeding, but if we
+ * are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
* correct yet. For simplicity we update the metapage first and then
* lock. This should be okay because no one else should be trying to lock
* the new bucket yet...
@@ -422,11 +422,11 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Okay to proceed with split. Update the metapage bucket mapping info.
*
- * Since we are scribbling on the metapage data right in the shared buffer,
- * any failure in this next little bit leaves us with a big problem: the
- * metapage is effectively corrupt but could get written back to disk. We
- * don't really expect any failure, but just to be sure, establish a
- * critical section.
+ * Since we are scribbling on the metapage data right in the shared
+ * buffer, any failure in this next little bit leaves us with a big
+ * problem: the metapage is effectively corrupt but could get written back
+ * to disk. We don't really expect any failure, but just to be sure,
+ * establish a critical section.
*/
START_CRIT_SECTION();
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 34d395c04ea..3cd573e3684 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.43 2005/11/06 19:29:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.44 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -115,23 +115,24 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
Page page = BufferGetPage(buf);
/*
- * ReadBuffer verifies that every newly-read page passes PageHeaderIsValid,
- * which means it either contains a reasonably sane page header or is
- * all-zero. We have to defend against the all-zero case, however.
+ * ReadBuffer verifies that every newly-read page passes
+ * PageHeaderIsValid, which means it either contains a reasonably sane
+ * page header or is all-zero. We have to defend against the all-zero
+ * case, however.
*/
if (PageIsNew(page))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("index \"%s\" contains unexpected zero page at block %u",
- RelationGetRelationName(rel),
- BufferGetBlockNumber(buf)),
+ errmsg("index \"%s\" contains unexpected zero page at block %u",
+ RelationGetRelationName(rel),
+ BufferGetBlockNumber(buf)),
errhint("Please REINDEX it.")));
/*
* Additionally check that the special area looks sane.
*/
if (((PageHeader) (page))->pd_special !=
- (BLCKSZ - MAXALIGN(sizeof(HashPageOpaqueData))))
+ (BLCKSZ - MAXALIGN(sizeof(HashPageOpaqueData))))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("index \"%s\" contains corrupted page at block %u",
@@ -146,9 +147,9 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
if ((opaque->hasho_flag & flags) == 0)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("index \"%s\" contains corrupted page at block %u",
- RelationGetRelationName(rel),
- BufferGetBlockNumber(buf)),
+ errmsg("index \"%s\" contains corrupted page at block %u",
+ RelationGetRelationName(rel),
+ BufferGetBlockNumber(buf)),
errhint("Please REINDEX it.")));
}
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 3b4de80b7b8..8505e11437b 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.202 2005/11/20 19:49:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.203 2005/11/22 18:17:06 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -1080,7 +1080,7 @@ heap_get_latest_tid(Relation relation,
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
- * TID where the tuple was stored. But note that any toasting of fields
+ * TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
@@ -1127,8 +1127,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
* If the new tuple is too big for storage or contains already toasted
* out-of-line attributes from some other relation, invoke the toaster.
*
- * Note: below this point, heaptup is the data we actually intend to
- * store into the relation; tup is the caller's original untoasted data.
+ * Note: below this point, heaptup is the data we actually intend to store
+ * into the relation; tup is the caller's original untoasted data.
*/
if (HeapTupleHasExternal(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
@@ -1215,8 +1215,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* If tuple is cachable, mark it for invalidation from the caches in case
* we abort. Note it is OK to do this after WriteBuffer releases the
- * buffer, because the heaptup data structure is all in local memory,
- * not in the shared buffer.
+ * buffer, because the heaptup data structure is all in local memory, not
+ * in the shared buffer.
*/
CacheInvalidateHeapTuple(relation, heaptup);
@@ -1323,8 +1323,8 @@ l1:
* heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
- * If we are forced to "start over" below, we keep the tuple lock; this
- * arranges that we stay at the head of the line while rechecking
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
@@ -1567,7 +1567,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
*
* On success, the header fields of *newtup are updated to match the new
* stored tuple; in particular, newtup->t_self is set to the TID where the
- * new tuple was inserted. However, any TOAST changes in the new tuple's
+ * new tuple was inserted. However, any TOAST changes in the new tuple's
* data are not reflected into *newtup.
*
* In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
@@ -1638,8 +1638,8 @@ l2:
* heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
- * If we are forced to "start over" below, we keep the tuple lock; this
- * arranges that we stay at the head of the line while rechecking
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
@@ -1771,8 +1771,8 @@ l2:
* show that it's already being updated, else other processes may try to
* update it themselves.
*
- * We need to invoke the toaster if there are already any out-of-line toasted
- * values present, or if the new tuple is over-threshold.
+ * We need to invoke the toaster if there are already any out-of-line
+ * toasted values present, or if the new tuple is over-threshold.
*/
newtupsize = MAXALIGN(newtup->t_len);
@@ -1875,7 +1875,7 @@ l2:
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
- RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
+ RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
if (!already_marked)
{
@@ -2111,8 +2111,8 @@ l3:
* LockTuple will release us when we are next-in-line for the tuple.
* We must do this even if we are share-locking.
*
- * If we are forced to "start over" below, we keep the tuple lock; this
- * arranges that we stay at the head of the line while rechecking
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 800ee4a805b..d66c43c3021 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.58 2005/10/15 02:49:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.59 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -296,11 +296,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Remember the new page as our target for future insertions.
*
- * XXX should we enter the new page into the free space map immediately, or
- * just keep it for this backend's exclusive use in the short run (until
- * VACUUM sees it)? Seems to depend on whether you expect the current
- * backend to make more insertions or not, which is probably a good bet
- * most of the time. So for now, don't add it to FSM yet.
+ * XXX should we enter the new page into the free space map immediately,
+ * or just keep it for this backend's exclusive use in the short run
+ * (until VACUUM sees it)? Seems to depend on whether you expect the
+ * current backend to make more insertions or not, which is probably a
+ * good bet most of the time. So for now, don't add it to FSM yet.
*/
relation->rd_targblock = BufferGetBlockNumber(buffer);
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index c18ea54282c..1b762597cbd 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.55 2005/11/20 19:49:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.56 2005/11/22 18:17:06 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -1074,8 +1074,8 @@ toast_save_datum(Relation rel, Datum value)
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table.
*
- * Note also that there had better not be any user-created index on the
- * TOAST table, since we don't bother to update anything else.
+ * Note also that there had better not be any user-created index on
+ * the TOAST table, since we don't bother to update anything else.
*/
index_insert(toastidx, t_values, t_isnull,
&(toasttup->t_self),
@@ -1213,9 +1213,9 @@ toast_fetch_datum(varattrib *attr)
/*
* Read the chunks by index
*
- * Note that because the index is actually on (valueid, chunkidx) we will see
- * the chunks in chunkidx order, even though we didn't explicitly ask for
- * it.
+ * Note that because the index is actually on (valueid, chunkidx) we will
+ * see the chunks in chunkidx order, even though we didn't explicitly ask
+ * for it.
*/
nextidx = 0;
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 5563b9d5975..d32a9f9db9e 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.50 2005/11/20 19:49:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.51 2005/11/22 18:17:06 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@@ -202,8 +202,8 @@ systable_beginscan(Relation heapRelation,
/*
* Change attribute numbers to be index column numbers.
*
- * This code could be generalized to search for the index key numbers to
- * substitute, but for now there's no need.
+ * This code could be generalized to search for the index key numbers
+ * to substitute, but for now there's no need.
*/
for (i = 0; i < nkeys; i++)
{
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 3ef40a5cb6c..6a66a356c6d 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.128 2005/11/06 19:29:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.129 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -104,8 +104,8 @@ top:
* If we're not allowing duplicates, make sure the key isn't already in
* the index.
*
- * NOTE: obviously, _bt_check_unique can only detect keys that are already in
- * the index; so it cannot defend against concurrent insertions of the
+ * NOTE: obviously, _bt_check_unique can only detect keys that are already
+ * in the index; so it cannot defend against concurrent insertions of the
* same key. We protect against that by means of holding a write lock on
* the target page. Any other would-be inserter of the same key must
* acquire a write lock on the same target page, so only one would-be
@@ -114,8 +114,8 @@ top:
* our insertion, so no later inserter can fail to see our insertion.
* (This requires some care in _bt_insertonpg.)
*
- * If we must wait for another xact, we release the lock while waiting, and
- * then must start over completely.
+ * If we must wait for another xact, we release the lock while waiting,
+ * and then must start over completely.
*/
if (index_is_unique)
{
@@ -193,8 +193,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/*
* We can skip items that are marked killed.
*
- * Formerly, we applied _bt_isequal() before checking the kill flag,
- * so as to fall out of the item loop as soon as possible.
+ * Formerly, we applied _bt_isequal() before checking the kill
+ * flag, so as to fall out of the item loop as soon as possible.
* However, in the presence of heavy update activity an index may
* contain many killed items with the same key; running
* _bt_isequal() on each killed item gets expensive. Furthermore
@@ -431,11 +431,11 @@ _bt_insertonpg(Relation rel,
/*
* step right to next non-dead page
*
- * must write-lock that page before releasing write lock on current
- * page; else someone else's _bt_check_unique scan could fail to
- * see our insertion. write locks on intermediate dead pages
- * won't do because we don't know when they will get de-linked
- * from the tree.
+ * must write-lock that page before releasing write lock on
+ * current page; else someone else's _bt_check_unique scan could
+ * fail to see our insertion. write locks on intermediate dead
+ * pages won't do because we don't know when they will get
+ * de-linked from the tree.
*/
Buffer rbuf = InvalidBuffer;
@@ -471,9 +471,9 @@ _bt_insertonpg(Relation rel,
/*
* Do we need to split the page to fit the item on it?
*
- * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result, so
- * this comparison is correct even though we appear to be accounting only
- * for the item and not for its line pointer.
+ * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
+ * so this comparison is correct even though we appear to be accounting
+ * only for the item and not for its line pointer.
*/
if (PageGetFreeSpace(page) < itemsz)
{
@@ -1158,10 +1158,10 @@ _bt_insert_parent(Relation rel,
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
*
- * If we have to search for the parent level, we do so by re-descending from
- * the root. This is not super-efficient, but it's rare enough not to
- * matter. (This path is also taken when called from WAL recovery --- we
- * have no stack in that case.)
+ * If we have to search for the parent level, we do so by re-descending
+ * from the root. This is not super-efficient, but it's rare enough not
+ * to matter. (This path is also taken when called from WAL recovery ---
+ * we have no stack in that case.)
*/
if (is_root)
{
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 7433c778104..c356dc082f0 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.89 2005/11/06 19:29:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.90 2005/11/22 18:17:06 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -412,16 +412,17 @@ _bt_checkpage(Relation rel, Buffer buf)
Page page = BufferGetPage(buf);
/*
- * ReadBuffer verifies that every newly-read page passes PageHeaderIsValid,
- * which means it either contains a reasonably sane page header or is
- * all-zero. We have to defend against the all-zero case, however.
+ * ReadBuffer verifies that every newly-read page passes
+ * PageHeaderIsValid, which means it either contains a reasonably sane
+ * page header or is all-zero. We have to defend against the all-zero
+ * case, however.
*/
if (PageIsNew(page))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("index \"%s\" contains unexpected zero page at block %u",
- RelationGetRelationName(rel),
- BufferGetBlockNumber(buf)),
+ errmsg("index \"%s\" contains unexpected zero page at block %u",
+ RelationGetRelationName(rel),
+ BufferGetBlockNumber(buf)),
errhint("Please REINDEX it.")));
/*
@@ -440,7 +441,7 @@ _bt_checkpage(Relation rel, Buffer buf)
/*
* _bt_getbuf() -- Get a buffer by block number for read or write.
*
- * blkno == P_NEW means to get an unallocated index page. The page
+ * blkno == P_NEW means to get an unallocated index page. The page
* will be initialized before returning it.
*
* When this routine returns, the appropriate lock is set on the
@@ -475,21 +476,21 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
- * In fact, it's worse than that: we can't even assume that it's safe to
- * take a lock on the reported page. If somebody else has a lock on
- * it, or even worse our own caller does, we could deadlock. (The
+ * In fact, it's worse than that: we can't even assume that it's safe
+ * to take a lock on the reported page. If somebody else has a lock
+ * on it, or even worse our own caller does, we could deadlock. (The
* own-caller scenario is actually not improbable. Consider an index
* on a serial or timestamp column. Nearly all splits will be at the
* rightmost page, so it's entirely likely that _bt_split will call us
- * while holding a lock on the page most recently acquired from FSM.
- * A VACUUM running concurrently with the previous split could well
- * have placed that page back in FSM.)
+ * while holding a lock on the page most recently acquired from FSM. A
+ * VACUUM running concurrently with the previous split could well have
+ * placed that page back in FSM.)
*
- * To get around that, we ask for only a conditional lock on the reported
- * page. If we fail, then someone else is using the page, and we may
- * reasonably assume it's not free. (If we happen to be wrong, the
- * worst consequence is the page will be lost to use till the next
- * VACUUM, which is no big problem.)
+ * To get around that, we ask for only a conditional lock on the
+ * reported page. If we fail, then someone else is using the page,
+ * and we may reasonably assume it's not free. (If we happen to be
+ * wrong, the worst consequence is the page will be lost to use till
+ * the next VACUUM, which is no big problem.)
*/
for (;;)
{
@@ -839,12 +840,12 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
* We have to lock the pages we need to modify in the standard order:
* moving right, then up. Else we will deadlock against other writers.
*
- * So, we need to find and write-lock the current left sibling of the target
- * page. The sibling that was current a moment ago could have split, so
- * we may have to move right. This search could fail if either the
- * sibling or the target page was deleted by someone else meanwhile; if
- * so, give up. (Right now, that should never happen, since page deletion
- * is only done in VACUUM and there shouldn't be multiple VACUUMs
+ * So, we need to find and write-lock the current left sibling of the
+ * target page. The sibling that was current a moment ago could have
+ * split, so we may have to move right. This search could fail if either
+ * the sibling or the target page was deleted by someone else meanwhile;
+ * if so, give up. (Right now, that should never happen, since page
+ * deletion is only done in VACUUM and there shouldn't be multiple VACUUMs
* concurrently on the same table.)
*/
if (leftsib != P_NONE)
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 9ddc326bd2f..70aca882e6c 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.133 2005/11/06 19:29:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.134 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -307,8 +307,8 @@ btgettuple(PG_FUNCTION_ARGS)
* Save heap TID to use it in _bt_restscan. Then release the read lock on
* the buffer so that we aren't blocking other backends.
*
- * NOTE: we do keep the pin on the buffer! This is essential to ensure that
- * someone else doesn't delete the index entry we are stopped on.
+ * NOTE: we do keep the pin on the buffer! This is essential to ensure
+ * that someone else doesn't delete the index entry we are stopped on.
*/
if (res)
{
@@ -774,8 +774,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/*
* We can't use _bt_getbuf() here because it always applies
- * _bt_checkpage(), which will barf on an all-zero page.
- * We want to recycle all-zero pages, not fail.
+ * _bt_checkpage(), which will barf on an all-zero page. We want to
+ * recycle all-zero pages, not fail.
*/
buf = ReadBuffer(rel, blkno);
LockBuffer(buf, BT_READ);
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index e487b498820..9c42797b95c 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.96 2005/10/18 01:06:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.97 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -164,10 +164,11 @@ _bt_moveright(Relation rel,
*
* When nextkey = true: move right if the scan key is >= page's high key.
*
- * The page could even have split more than once, so scan as far as needed.
+ * The page could even have split more than once, so scan as far as
+ * needed.
*
- * We also have to move right if we followed a link that brought us to a dead
- * page.
+ * We also have to move right if we followed a link that brought us to a
+ * dead page.
*/
cmpval = nextkey ? 0 : 1;
@@ -255,8 +256,8 @@ _bt_binsrch(Relation rel,
* For nextkey=false (cmpval=1), the loop invariant is: all slots before
* 'low' are < scan key, all slots at or after 'high' are >= scan key.
*
- * For nextkey=true (cmpval=0), the loop invariant is: all slots before 'low'
- * are <= scan key, all slots at or after 'high' are > scan key.
+ * For nextkey=true (cmpval=0), the loop invariant is: all slots before
+ * 'low' are <= scan key, all slots at or after 'high' are > scan key.
*
* We can fall out when high == low.
*/
@@ -282,8 +283,8 @@ _bt_binsrch(Relation rel,
* At this point we have high == low, but be careful: they could point
* past the last slot on the page.
*
- * On a leaf page, we always return the first key >= scan key (resp. > scan
- * key), which could be the last slot + 1.
+ * On a leaf page, we always return the first key >= scan key (resp. >
+ * scan key), which could be the last slot + 1.
*/
if (P_ISLEAF(opaque))
return low;
@@ -350,8 +351,8 @@ _bt_compare(Relation rel,
* you think about how multi-key ordering works, you'll understand why
* this is.
*
- * We don't test for violation of this condition here, however. The initial
- * setup for the index scan had better have gotten it right (see
+ * We don't test for violation of this condition here, however. The
+ * initial setup for the index scan had better have gotten it right (see
* _bt_first).
*/
@@ -692,9 +693,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* where we need to start the scan, and set flag variables to control the
* code below.
*
- * If nextkey = false, _bt_search and _bt_binsrch will locate the first item
- * >= scan key. If nextkey = true, they will locate the first item > scan
- * key.
+ * If nextkey = false, _bt_search and _bt_binsrch will locate the first
+ * item >= scan key. If nextkey = true, they will locate the first item >
+ * scan key.
*
* If goback = true, we will then step back one item, while if goback =
* false, we will start the scan on the located item.
@@ -819,9 +820,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* than or equal to the scan key and we know that everything on later
* pages is greater than scan key.
*
- * The actually desired starting point is either this item or the prior one,
- * or in the end-of-page case it's the first item on the next page or the
- * last item on this page. We apply _bt_step if needed to get to the
+ * The actually desired starting point is either this item or the prior
+ * one, or in the end-of-page case it's the first item on the next page or
+ * the last item on this page. We apply _bt_step if needed to get to the
* right place.
*
* If _bt_step fails (meaning we fell off the end of the index in one
@@ -1044,9 +1045,9 @@ _bt_walk_left(Relation rel, Buffer buf)
* the original page got deleted and isn't in the sibling chain at all
* anymore, not that its left sibling got split more than four times.
*
- * Note that it is correct to test P_ISDELETED not P_IGNORE here, because
- * half-dead pages are still in the sibling chain. Caller must reject
- * half-dead pages if wanted.
+ * Note that it is correct to test P_ISDELETED not P_IGNORE here,
+ * because half-dead pages are still in the sibling chain. Caller
+ * must reject half-dead pages if wanted.
*/
tries = 0;
for (;;)
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 6ee5d42b63a..aa1bb0028df 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -56,7 +56,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.95 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.96 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -487,9 +487,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
* the per-page available space. Note that at this point, btisz doesn't
* include the ItemId.
*
- * NOTE: similar code appears in _bt_insertonpg() to defend against oversize
- * items being inserted into an already-existing index. But during
- * creation of an index, we don't go through there.
+ * NOTE: similar code appears in _bt_insertonpg() to defend against
+ * oversize items being inserted into an already-existing index. But
+ * during creation of an index, we don't go through there.
*/
if (btisz > BTMaxItemSize(npage))
ereport(ERROR,
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 7d60c98f38d..f4c2243943b 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.65 2005/10/18 01:06:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.66 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -549,8 +549,8 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
* able to conclude no further tuples will pass, either. We have
* to look at the scan direction and the qual type.
*
- * Note: the only case in which we would keep going after failing a
- * required qual is if there are partially-redundant quals that
+ * Note: the only case in which we would keep going after failing
+ * a required qual is if there are partially-redundant quals that
* _bt_preprocess_keys() was unable to eliminate. For example,
* given "x > 4 AND x > 10" where both are cross-type comparisons
* and so not removable, we might start the scan at the x = 4
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index bb532bfe6a6..798d9dc61b6 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.12 2005/11/05 21:19:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.13 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -129,22 +129,23 @@ typedef struct MultiXactStateData
* member of a MultiXact, and that MultiXact would have to be created
* during or after the lock acquisition.)
*
- * OldestVisibleMXactId[k] is the oldest MultiXactId each backend's current
- * transaction(s) think is potentially live, or InvalidMultiXactId when
- * not in a transaction or not in a transaction that's paid any attention
- * to MultiXacts yet. This is computed when first needed in a given
- * transaction, and cleared at transaction end. We can compute it as the
- * minimum of the valid OldestMemberMXactId[] entries at the time we
- * compute it (using nextMXact if none are valid). Each backend is
+ * OldestVisibleMXactId[k] is the oldest MultiXactId each backend's
+ * current transaction(s) think is potentially live, or InvalidMultiXactId
+ * when not in a transaction or not in a transaction that's paid any
+ * attention to MultiXacts yet. This is computed when first needed in a
+ * given transaction, and cleared at transaction end. We can compute it
+ * as the minimum of the valid OldestMemberMXactId[] entries at the time
+ * we compute it (using nextMXact if none are valid). Each backend is
* required not to attempt to access any SLRU data for MultiXactIds older
* than its own OldestVisibleMXactId[] setting; this is necessary because
* the checkpointer could truncate away such data at any instant.
*
- * The checkpointer can compute the safe truncation point as the oldest valid
- * value among all the OldestMemberMXactId[] and OldestVisibleMXactId[]
- * entries, or nextMXact if none are valid. Clearly, it is not possible
- * for any later-computed OldestVisibleMXactId value to be older than
- * this, and so there is no risk of truncating data that is still needed.
+ * The checkpointer can compute the safe truncation point as the oldest
+ * valid value among all the OldestMemberMXactId[] and
+ * OldestVisibleMXactId[] entries, or nextMXact if none are valid.
+ * Clearly, it is not possible for any later-computed OldestVisibleMXactId
+ * value to be older than this, and so there is no risk of truncating data
+ * that is still needed.
*/
MultiXactId perBackendXactIds[1]; /* VARIABLE LENGTH ARRAY */
} MultiXactStateData;
@@ -631,8 +632,8 @@ CreateMultiXactId(int nxids, TransactionId *xids)
}
/*
- * Assign the MXID and offsets range to use, and make sure there is
- * space in the OFFSETs and MEMBERs files. NB: this routine does
+ * Assign the MXID and offsets range to use, and make sure there is space
+ * in the OFFSETs and MEMBERs files. NB: this routine does
* START_CRIT_SECTION().
*/
multi = GetNewMultiXactId(nxids, &offset);
@@ -788,9 +789,9 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
ExtendMultiXactOffset(result);
/*
- * Reserve the members space, similarly to above. Also, be
- * careful not to return zero as the starting offset for any multixact.
- * See GetMultiXactIdMembers() for motivation.
+ * Reserve the members space, similarly to above. Also, be careful not to
+ * return zero as the starting offset for any multixact. See
+ * GetMultiXactIdMembers() for motivation.
*/
nextOffset = MultiXactState->nextOffset;
if (nextOffset == 0)
@@ -804,8 +805,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
ExtendMultiXactMember(nextOffset, nxids);
/*
- * Critical section from here until caller has written the data into
- * the just-reserved SLRU space; we don't want to error out with a partly
+ * Critical section from here until caller has written the data into the
+ * just-reserved SLRU space; we don't want to error out with a partly
* written MultiXact structure. (In particular, failing to write our
* start offset after advancing nextMXact would effectively corrupt the
* previous MultiXact.)
@@ -819,8 +820,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
* We don't care about MultiXactId wraparound here; it will be handled by
* the next iteration. But note that nextMXact may be InvalidMultiXactId
* after this routine exits, so anyone else looking at the variable must
- * be prepared to deal with that. Similarly, nextOffset may be zero,
- * but we won't use that as the actual start offset of the next multixact.
+ * be prepared to deal with that. Similarly, nextOffset may be zero, but
+ * we won't use that as the actual start offset of the next multixact.
*/
(MultiXactState->nextMXact)++;
@@ -881,7 +882,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
* SLRU data if we did try to examine it.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. We just
+ * seen, it implies undetected ID wraparound has occurred. We just
* silently assume that such an ID is no longer running.
*
* Shared lock is enough here since we aren't modifying any global state.
@@ -897,7 +898,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Acquire the shared lock just long enough to grab the current counter
- * values. We may need both nextMXact and nextOffset; see below.
+ * values. We may need both nextMXact and nextOffset; see below.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -915,27 +916,27 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Find out the offset at which we need to start reading MultiXactMembers
- * and the number of members in the multixact. We determine the latter
- * as the difference between this multixact's starting offset and the
- * next one's. However, there are some corner cases to worry about:
+ * and the number of members in the multixact. We determine the latter as
+ * the difference between this multixact's starting offset and the next
+ * one's. However, there are some corner cases to worry about:
*
- * 1. This multixact may be the latest one created, in which case there
- * is no next one to look at. In this case the nextOffset value we just
+ * 1. This multixact may be the latest one created, in which case there is
+ * no next one to look at. In this case the nextOffset value we just
* saved is the correct endpoint.
*
- * 2. The next multixact may still be in process of being filled in:
- * that is, another process may have done GetNewMultiXactId but not yet
- * written the offset entry for that ID. In that scenario, it is
- * guaranteed that the offset entry for that multixact exists (because
- * GetNewMultiXactId won't release MultiXactGenLock until it does)
- * but contains zero (because we are careful to pre-zero offset pages).
- * Because GetNewMultiXactId will never return zero as the starting offset
- * for a multixact, when we read zero as the next multixact's offset, we
- * know we have this case. We sleep for a bit and try again.
+ * 2. The next multixact may still be in process of being filled in: that
+ * is, another process may have done GetNewMultiXactId but not yet written
+ * the offset entry for that ID. In that scenario, it is guaranteed that
+ * the offset entry for that multixact exists (because GetNewMultiXactId
+ * won't release MultiXactGenLock until it does) but contains zero
+ * (because we are careful to pre-zero offset pages). Because
+ * GetNewMultiXactId will never return zero as the starting offset for a
+ * multixact, when we read zero as the next multixact's offset, we know we
+ * have this case. We sleep for a bit and try again.
*
- * 3. Because GetNewMultiXactId increments offset zero to offset one
- * to handle case #2, there is an ambiguity near the point of offset
- * wraparound. If we see next multixact's offset is one, is that our
+ * 3. Because GetNewMultiXactId increments offset zero to offset one to
+ * handle case #2, there is an ambiguity near the point of offset
+ * wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th
* member slot wasn't filled, it'll contain zero, and zero isn't a valid
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index 23b022fa218..6997ce1fc52 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -15,7 +15,7 @@
*
* We use a control LWLock to protect the shared data structures, plus
* per-buffer LWLocks that synchronize I/O for each buffer. The control lock
- * must be held to examine or modify any shared state. A process that is
+ * must be held to examine or modify any shared state. A process that is
* reading in or writing out a page buffer does not hold the control lock,
* only the per-buffer lock for the buffer it is working on.
*
@@ -37,7 +37,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.30 2005/11/05 21:19:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.31 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -236,13 +236,14 @@ SimpleLruWaitIO(SlruCtl ctl, int slotno)
LWLockAcquire(shared->buffer_locks[slotno], LW_SHARED);
LWLockRelease(shared->buffer_locks[slotno]);
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
+
/*
* If the slot is still in an io-in-progress state, then either someone
* already started a new I/O on the slot, or a previous I/O failed and
- * neglected to reset the page state. That shouldn't happen, really,
- * but it seems worth a few extra cycles to check and recover from it.
- * We can cheaply test for failure by seeing if the buffer lock is still
- * held (we assume that transaction abort would release the lock).
+ * neglected to reset the page state. That shouldn't happen, really, but
+ * it seems worth a few extra cycles to check and recover from it. We can
+ * cheaply test for failure by seeing if the buffer lock is still held (we
+ * assume that transaction abort would release the lock).
*/
if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS)
@@ -252,7 +253,8 @@ SimpleLruWaitIO(SlruCtl ctl, int slotno)
/* indeed, the I/O must have failed */
if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
shared->page_status[slotno] = SLRU_PAGE_EMPTY;
- else /* write_in_progress */
+ else
+ /* write_in_progress */
{
shared->page_status[slotno] = SLRU_PAGE_VALID;
shared->page_dirty[slotno] = true;
@@ -375,8 +377,8 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
}
/*
- * Do nothing if page is not dirty, or if buffer no longer contains
- * the same page we were called for.
+ * Do nothing if page is not dirty, or if buffer no longer contains the
+ * same page we were called for.
*/
if (!shared->page_dirty[slotno] ||
shared->page_status[slotno] != SLRU_PAGE_VALID ||
@@ -384,8 +386,8 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
return;
/*
- * Mark the slot write-busy, and clear the dirtybit. After this point,
- * a transaction status update on this page will mark it dirty again.
+ * Mark the slot write-busy, and clear the dirtybit. After this point, a
+ * transaction status update on this page will mark it dirty again.
*/
shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
shared->page_dirty[slotno] = false;
@@ -902,7 +904,7 @@ restart:;
/*
* Hmm, we have (or may have) I/O operations acting on the page, so
* we've got to wait for them to finish and then start again. This is
- * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
+ * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
* wouldn't it be OK to just discard it without writing it? For now,
* keep the logic the same as it was.)
*/
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index 319b17d9458..e81ec6f62d5 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -22,7 +22,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.12 2005/11/05 21:19:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.13 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -261,8 +261,8 @@ ShutdownSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
- * This is not actually necessary from a correctness point of view. We do it
- * merely as a debugging aid.
+ * This is not actually necessary from a correctness point of view. We do
+ * it merely as a debugging aid.
*/
SimpleLruFlush(SubTransCtl, false);
}
@@ -276,9 +276,9 @@ CheckPointSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
- * This is not actually necessary from a correctness point of view. We do it
- * merely to improve the odds that writing of dirty pages is done by the
- * checkpoint process and not by backends.
+ * This is not actually necessary from a correctness point of view. We do
+ * it merely to improve the odds that writing of dirty pages is done by
+ * the checkpoint process and not by backends.
*/
SimpleLruFlush(SubTransCtl, true);
}
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index 59852520521..d929ca5f734 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.66 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.67 2005/11/22 18:17:07 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -174,9 +174,9 @@ TransactionIdDidCommit(TransactionId transactionId)
* pg_subtrans; instead assume that the parent crashed without cleaning up
* its children.
*
- * Originally we Assert'ed that the result of SubTransGetParent was not zero.
- * However with the introduction of prepared transactions, there can be a
- * window just after database startup where we do not have complete
+ * Originally we Assert'ed that the result of SubTransGetParent was not
+ * zero. However with the introduction of prepared transactions, there can
+ * be a window just after database startup where we do not have complete
* knowledge in pg_subtrans of the transactions after TransactionXmin.
* StartupSUBTRANS() has ensured that any missing information will be
* zeroed. Since this case should not happen under normal conditions, it
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 5423060653d..ffdee8388b3 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.16 2005/10/29 00:31:50 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.17 2005/11/22 18:17:07 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
@@ -851,10 +851,10 @@ EndPrepare(GlobalTransaction gxact)
/*
* Create the 2PC state file.
*
- * Note: because we use BasicOpenFile(), we are responsible for ensuring the
- * FD gets closed in any error exit path. Once we get into the critical
- * section, though, it doesn't matter since any failure causes PANIC
- * anyway.
+ * Note: because we use BasicOpenFile(), we are responsible for ensuring
+ * the FD gets closed in any error exit path. Once we get into the
+ * critical section, though, it doesn't matter since any failure causes
+ * PANIC anyway.
*/
TwoPhaseFilePath(path, xid);
@@ -911,8 +911,8 @@ EndPrepare(GlobalTransaction gxact)
* The state file isn't valid yet, because we haven't written the correct
* CRC yet. Before we do that, insert entry in WAL and flush it to disk.
*
- * Between the time we have written the WAL entry and the time we write out
- * the correct state file CRC, we have an inconsistency: the xact is
+ * Between the time we have written the WAL entry and the time we write
+ * out the correct state file CRC, we have an inconsistency: the xact is
* prepared according to WAL but not according to our on-disk state. We
* use a critical section to force a PANIC if we are unable to complete
* the write --- then, WAL replay should repair the inconsistency. The
@@ -1344,11 +1344,11 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
* it just long enough to make a list of the XIDs that require fsyncing,
* and then do the I/O afterwards.
*
- * This approach creates a race condition: someone else could delete a GXACT
- * between the time we release TwoPhaseStateLock and the time we try to
- * open its state file. We handle this by special-casing ENOENT failures:
- * if we see that, we verify that the GXACT is no longer valid, and if so
- * ignore the failure.
+ * This approach creates a race condition: someone else could delete a
+ * GXACT between the time we release TwoPhaseStateLock and the time we try
+ * to open its state file. We handle this by special-casing ENOENT
+ * failures: if we see that, we verify that the GXACT is no longer valid,
+ * and if so ignore the failure.
*/
if (max_prepared_xacts <= 0)
return; /* nothing to do */
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 874a9736c70..e65d63a013e 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.68 2005/10/29 00:31:50 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.69 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,8 +56,8 @@ GetNewTransactionId(bool isSubXact)
* (which gives an escape hatch to the DBA who ignored all those
* warnings).
*
- * Test is coded to fall out as fast as possible during normal operation, ie,
- * when the warn limit is set and we haven't violated it.
+ * Test is coded to fall out as fast as possible during normal operation,
+ * ie, when the warn limit is set and we haven't violated it.
*/
if (TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidWarnLimit) &&
TransactionIdIsValid(ShmemVariableCache->xidWarnLimit))
@@ -268,8 +268,8 @@ GetNewObjectId(void)
* right after a wrap occurs, so as to avoid a possibly large number of
* iterations in GetNewOid.) Note we are relying on unsigned comparison.
*
- * During initdb, we start the OID generator at FirstBootstrapObjectId, so we
- * only enforce wrapping to that point when in bootstrap or standalone
+ * During initdb, we start the OID generator at FirstBootstrapObjectId, so
+ * we only enforce wrapping to that point when in bootstrap or standalone
* mode. The first time through this routine after normal postmaster
* start, the counter will be forced up to FirstNormalObjectId. This
* mechanism leaves the OIDs between FirstBootstrapObjectId and
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index ea19e075640..323a50ba636 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.215 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.216 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -750,8 +750,8 @@ RecordTransactionCommit(void)
* XLOG record generated by nextval will hit the disk before we report
* the transaction committed.
*
- * Note: if we generated a commit record above, MyXactMadeXLogEntry will
- * certainly be set now.
+ * Note: if we generated a commit record above, MyXactMadeXLogEntry
+ * will certainly be set now.
*/
if (MyXactMadeXLogEntry)
{
@@ -762,8 +762,8 @@ RecordTransactionCommit(void)
* because on most Unixen, the minimum select() delay is 10msec or
* more, which is way too long.)
*
- * We do not sleep if enableFsync is not turned on, nor if there are
- * fewer than CommitSiblings other backends with active
+ * We do not sleep if enableFsync is not turned on, nor if there
+ * are fewer than CommitSiblings other backends with active
* transactions.
*/
if (CommitDelay > 0 && enableFsync &&
@@ -993,10 +993,10 @@ RecordTransactionAbort(void)
* nowhere in permanent storage, so no one else will ever care if it
* committed.)
*
- * We do not flush XLOG to disk unless deleting files, since the default
- * assumption after a crash would be that we aborted, anyway. For the
- * same reason, we don't need to worry about interlocking against
- * checkpoint start.
+ * We do not flush XLOG to disk unless deleting files, since the
+ * default assumption after a crash would be that we aborted, anyway.
+ * For the same reason, we don't need to worry about interlocking
+ * against checkpoint start.
*/
if (MyLastRecPtr.xrecoff != 0 || nrels > 0)
{
@@ -1042,8 +1042,8 @@ RecordTransactionAbort(void)
* Mark the transaction aborted in clog. This is not absolutely
* necessary but we may as well do it while we are here.
*
- * The ordering here isn't critical but it seems best to mark the parent
- * first. This assures an atomic transition of all the
+ * The ordering here isn't critical but it seems best to mark the
+ * parent first. This assures an atomic transition of all the
* subtransactions to aborted state from the point of view of
* concurrent TransactionIdDidAbort calls.
*/
@@ -1520,11 +1520,11 @@ CommitTransaction(void)
* it's too late to abort the transaction. This should be just
* noncritical resource releasing.
*
- * The ordering of operations is not entirely random. The idea is: release
- * resources visible to other backends (eg, files, buffer pins); then
- * release locks; then release backend-local resources. We want to release
- * locks at the point where any backend waiting for us will see our
- * transaction as being fully cleaned up.
+ * The ordering of operations is not entirely random. The idea is:
+ * release resources visible to other backends (eg, files, buffer pins);
+ * then release locks; then release backend-local resources. We want to
+ * release locks at the point where any backend waiting for us will see
+ * our transaction as being fully cleaned up.
*
* Resources that can be associated with individual queries are handled by
* the ResourceOwner mechanism. The other calls here are for backend-wide
@@ -1630,9 +1630,9 @@ PrepareTransaction(void)
* Do pre-commit processing (most of this stuff requires database access,
* and in fact could still cause an error...)
*
- * It is possible for PrepareHoldablePortals to invoke functions that queue
- * deferred triggers, and it's also possible that triggers create holdable
- * cursors. So we have to loop until there's nothing left to do.
+ * It is possible for PrepareHoldablePortals to invoke functions that
+ * queue deferred triggers, and it's also possible that triggers create
+ * holdable cursors. So we have to loop until there's nothing left to do.
*/
for (;;)
{
@@ -1715,9 +1715,9 @@ PrepareTransaction(void)
/*
* Here is where we really truly prepare.
*
- * We have to record transaction prepares even if we didn't make any updates,
- * because the transaction manager might get confused if we lose a global
- * transaction.
+ * We have to record transaction prepares even if we didn't make any
+ * updates, because the transaction manager might get confused if we lose
+ * a global transaction.
*/
EndPrepare(gxact);
@@ -1868,10 +1868,11 @@ AbortTransaction(void)
* s->currentUser, since it may not be set yet; instead rely on internal
* state of miscinit.c.
*
- * (Note: it is not necessary to restore session authorization here because
- * that can only be changed via GUC, and GUC will take care of rolling it
- * back if need be. However, an error within a SECURITY DEFINER function
- * could send control here with the wrong current userid.)
+ * (Note: it is not necessary to restore session authorization here
+ * because that can only be changed via GUC, and GUC will take care of
+ * rolling it back if need be. However, an error within a SECURITY
+ * DEFINER function could send control here with the wrong current
+ * userid.)
*/
AtAbort_UserId();
@@ -2353,8 +2354,8 @@ AbortCurrentTransaction(void)
/*
* Here, we are already in an aborted transaction state and are
- * waiting for a ROLLBACK, but for some reason we failed again!
- * So we just remain in the abort state.
+ * waiting for a ROLLBACK, but for some reason we failed again! So
+ * we just remain in the abort state.
*/
case TBLOCK_ABORT:
case TBLOCK_SUBABORT:
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 66db5d9dd26..54d2f2cc788 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.222 2005/10/29 00:31:50 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.223 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -571,11 +571,11 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* the whole record in the order "rdata, then backup blocks, then record
* header".
*
- * We may have to loop back to here if a race condition is detected below. We
- * could prevent the race by doing all this work while holding the insert
- * lock, but it seems better to avoid doing CRC calculations while holding
- * the lock. This means we have to be careful about modifying the rdata
- * chain until we know we aren't going to loop back again. The only
+ * We may have to loop back to here if a race condition is detected below.
+ * We could prevent the race by doing all this work while holding the
+ * insert lock, but it seems better to avoid doing CRC calculations while
+ * holding the lock. This means we have to be careful about modifying the
+ * rdata chain until we know we aren't going to loop back again. The only
* change we allow ourselves to make earlier is to set rdt->data = NULL in
* chain items we have decided we will have to back up the whole buffer
* for. This is OK because we will certainly decide the same thing again
@@ -763,9 +763,9 @@ begin:;
* now irrevocably changed the input rdata chain. At the exit of this
* loop, write_len includes the backup block data.
*
- * Also set the appropriate info bits to show which buffers were backed up.
- * The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct buffer
- * value (ignoring InvalidBuffer) appearing in the rdata chain.
+ * Also set the appropriate info bits to show which buffers were backed
+ * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct
+ * buffer value (ignoring InvalidBuffer) appearing in the rdata chain.
*/
write_len = len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -1666,20 +1666,20 @@ XLogFlush(XLogRecPtr record)
* problem; most likely, the requested flush point is past end of XLOG.
* This has been seen to occur when a disk page has a corrupted LSN.
*
- * Formerly we treated this as a PANIC condition, but that hurts the system's
- * robustness rather than helping it: we do not want to take down the
- * whole system due to corruption on one data page. In particular, if the
- * bad page is encountered again during recovery then we would be unable
- * to restart the database at all! (This scenario has actually happened
- * in the field several times with 7.1 releases. Note that we cannot get
- * here while InRedo is true, but if the bad page is brought in and marked
- * dirty during recovery then CreateCheckPoint will try to flush it at the
- * end of recovery.)
+ * Formerly we treated this as a PANIC condition, but that hurts the
+ * system's robustness rather than helping it: we do not want to take down
+ * the whole system due to corruption on one data page. In particular, if
+ * the bad page is encountered again during recovery then we would be
+ * unable to restart the database at all! (This scenario has actually
+ * happened in the field several times with 7.1 releases. Note that we
+ * cannot get here while InRedo is true, but if the bad page is brought in
+ * and marked dirty during recovery then CreateCheckPoint will try to
+ * flush it at the end of recovery.)
*
- * The current approach is to ERROR under normal conditions, but only WARNING
- * during recovery, so that the system can be brought up even if there's a
- * corrupt LSN. Note that for calls from xact.c, the ERROR will be
- * promoted to PANIC since xact.c calls this routine inside a critical
+ * The current approach is to ERROR under normal conditions, but only
+ * WARNING during recovery, so that the system can be brought up even if
+ * there's a corrupt LSN. Note that for calls from xact.c, the ERROR will
+ * be promoted to PANIC since xact.c calls this routine inside a critical
* section. However, calls from bufmgr.c are not within critical sections
* and so we will not force a restart for a bad LSN on a data page.
*/
@@ -2152,14 +2152,14 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* preserved correctly when we copied to archive. Our aim is robustness,
* so we elect not to do this.
*
- * If we cannot obtain the log file from the archive, however, we will try to
- * use the XLOGDIR file if it exists. This is so that we can make use of
- * log segments that weren't yet transferred to the archive.
+ * If we cannot obtain the log file from the archive, however, we will try
+ * to use the XLOGDIR file if it exists. This is so that we can make use
+ * of log segments that weren't yet transferred to the archive.
*
- * Notice that we don't actually overwrite any files when we copy back from
- * archive because the recoveryRestoreCommand may inadvertently restore
- * inappropriate xlogs, or they may be corrupt, so we may wish to fallback
- * to the segments remaining in current XLOGDIR later. The
+ * Notice that we don't actually overwrite any files when we copy back
+ * from archive because the recoveryRestoreCommand may inadvertently
+ * restore inappropriate xlogs, or they may be corrupt, so we may wish to
+ * fallback to the segments remaining in current XLOGDIR later. The
* copy-from-archive filename is always the same, ensuring that we don't
* run out of disk space on long recoveries.
*/
@@ -2246,11 +2246,11 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* command apparently succeeded, but let's make sure the file is
* really there now and has the correct size.
*
- * XXX I made wrong-size a fatal error to ensure the DBA would notice it,
- * but is that too strong? We could try to plow ahead with a local
- * copy of the file ... but the problem is that there probably isn't
- * one, and we'd incorrectly conclude we've reached the end of WAL and
- * we're done recovering ...
+ * XXX I made wrong-size a fatal error to ensure the DBA would notice
+ * it, but is that too strong? We could try to plow ahead with a
+ * local copy of the file ... but the problem is that there probably
+ * isn't one, and we'd incorrectly conclude we've reached the end of
+ * WAL and we're done recovering ...
*/
if (stat(xlogpath, &stat_buf) == 0)
{
@@ -3533,8 +3533,8 @@ ReadControlFile(void)
/*
* Do compatibility checking immediately. We do this here for 2 reasons:
*
- * (1) if the database isn't compatible with the backend executable, we want
- * to abort before we can possibly do any damage;
+ * (1) if the database isn't compatible with the backend executable, we
+ * want to abort before we can possibly do any damage;
*
* (2) this code is executed in the postmaster, so the setlocale() will
* propagate to forked backends, which aren't going to read this file for
@@ -4148,9 +4148,9 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
* descriptive of what our current database state is, because that is what
* we replayed from.
*
- * Note that if we are establishing a new timeline, ThisTimeLineID is already
- * set to the new value, and so we will create a new file instead of
- * overwriting any existing file.
+ * Note that if we are establishing a new timeline, ThisTimeLineID is
+ * already set to the new value, and so we will create a new file instead
+ * of overwriting any existing file.
*/
snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
XLogFilePath(xlogpath, ThisTimeLineID, endLogId, endLogSeg);
@@ -4341,8 +4341,8 @@ StartupXLOG(void)
/*
* Read control file and check XLOG status looks valid.
*
- * Note: in most control paths, *ControlFile is already valid and we need not
- * do ReadControlFile() here, but might as well do it to be sure.
+ * Note: in most control paths, *ControlFile is already valid and we need
+ * not do ReadControlFile() here, but might as well do it to be sure.
*/
ReadControlFile();
@@ -4766,14 +4766,14 @@ StartupXLOG(void)
/*
* Perform a new checkpoint to update our recovery activity to disk.
*
- * Note that we write a shutdown checkpoint rather than an on-line one.
- * This is not particularly critical, but since we may be assigning a
- * new TLI, using a shutdown checkpoint allows us to have the rule
- * that TLI only changes in shutdown checkpoints, which allows some
- * extra error checking in xlog_redo.
+ * Note that we write a shutdown checkpoint rather than an on-line
+ * one. This is not particularly critical, but since we may be
+ * assigning a new TLI, using a shutdown checkpoint allows us to have
+ * the rule that TLI only changes in shutdown checkpoints, which
+ * allows some extra error checking in xlog_redo.
*
- * In case we had to use the secondary checkpoint, make sure that it will
- * still be shown as the secondary checkpoint after this
+ * In case we had to use the secondary checkpoint, make sure that it
+ * will still be shown as the secondary checkpoint after this
* CreateCheckPoint operation; we don't want the broken primary
* checkpoint to become prevCheckPoint...
*/
@@ -5106,10 +5106,10 @@ CreateCheckPoint(bool shutdown, bool force)
* (Perhaps it'd make even more sense to checkpoint only when the previous
* checkpoint record is in a different xlog page?)
*
- * We have to make two tests to determine that nothing has happened since the
- * start of the last checkpoint: current insertion point must match the
- * end of the last checkpoint record, and its redo pointer must point to
- * itself.
+ * We have to make two tests to determine that nothing has happened since
+ * the start of the last checkpoint: current insertion point must match
+ * the end of the last checkpoint record, and its redo pointer must point
+ * to itself.
*/
if (!shutdown && !force)
{
@@ -5198,11 +5198,11 @@ CreateCheckPoint(bool shutdown, bool force)
* Having constructed the checkpoint record, ensure all shmem disk buffers
* and commit-log buffers are flushed to disk.
*
- * This I/O could fail for various reasons. If so, we will fail to complete
- * the checkpoint, but there is no reason to force a system panic.
- * Accordingly, exit critical section while doing it. (If we are doing a
- * shutdown checkpoint, we probably *should* panic --- but that will
- * happen anyway because we'll still be inside the critical section
+ * This I/O could fail for various reasons. If so, we will fail to
+ * complete the checkpoint, but there is no reason to force a system
+ * panic. Accordingly, exit critical section while doing it. (If we are
+ * doing a shutdown checkpoint, we probably *should* panic --- but that
+ * will happen anyway because we'll still be inside the critical section
* established by ShutdownXLOG.)
*/
END_CRIT_SECTION();