aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2005-11-22 18:23:31 +0000
committerBruce Momjian <bruce@momjian.us>2005-11-22 18:23:31 +0000
commitbef7764835725e5d8468da1c139e9020be689b95 (patch)
tree71075b16ab6ed5152b31757e5dd65cd2b9383ba0 /src/backend/access
parentc8de36352fe72ae2265eb53a6e1bf334e4f24888 (diff)
downloadpostgresql-bef7764835725e5d8468da1c139e9020be689b95.tar.gz
postgresql-bef7764835725e5d8468da1c139e9020be689b95.zip
Re-run pgindent, fixing a problem where comment lines after a blank
comment line where output as too long, and update typedefs for /lib directory. Also fix case where identifiers were used as variable names in the backend, but as typedefs in ecpg (favor the backend for indenting). Backpatch to 8.1.X.
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/common/heaptuple.c12
-rw-r--r--src/backend/access/common/tupdesc.c6
-rw-r--r--src/backend/access/gist/gistget.c6
-rw-r--r--src/backend/access/hash/hashovfl.c8
-rw-r--r--src/backend/access/hash/hashpage.c16
-rw-r--r--src/backend/access/heap/heapam.c32
-rw-r--r--src/backend/access/heap/hio.c12
-rw-r--r--src/backend/access/heap/tuptoaster.c12
-rw-r--r--src/backend/access/index/genam.c6
-rw-r--r--src/backend/access/nbtree/nbtinsert.c38
-rw-r--r--src/backend/access/nbtree/nbtpage.c36
-rw-r--r--src/backend/access/nbtree/nbtree.c10
-rw-r--r--src/backend/access/nbtree/nbtsearch.c39
-rw-r--r--src/backend/access/nbtree/nbtsort.c8
-rw-r--r--src/backend/access/nbtree/nbtutils.c6
-rw-r--r--src/backend/access/rtree/rtree.c32
-rw-r--r--src/backend/access/transam/multixact.c83
-rw-r--r--src/backend/access/transam/slru.c16
-rw-r--r--src/backend/access/transam/subtrans.c12
-rw-r--r--src/backend/access/transam/transam.c8
-rw-r--r--src/backend/access/transam/twophase.c24
-rw-r--r--src/backend/access/transam/varsup.c10
-rw-r--r--src/backend/access/transam/xact.c57
-rw-r--r--src/backend/access/transam/xlog.c114
24 files changed, 303 insertions, 300 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 5551f744bb2..bda7a67172c 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.102 2005/10/19 22:30:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.102.2.1 2005/11/22 18:23:03 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -512,11 +512,11 @@ nocachegetattr(HeapTuple tuple,
/*
* Now we know that we have to walk the tuple CAREFULLY.
*
- * Note - This loop is a little tricky. For each non-null attribute, we
- * have to first account for alignment padding before the attr, then
- * advance over the attr based on its length. Nulls have no storage
- * and no alignment padding either. We can use/set attcacheoff until
- * we pass either a null or a var-width attribute.
+ * Note - This loop is a little tricky. For each non-null attribute,
+ * we have to first account for alignment padding before the attr,
+ * then advance over the attr based on its length. Nulls have no
+ * storage and no alignment padding either. We can use/set
+ * attcacheoff until we pass either a null or a var-width attribute.
*/
for (i = 0; i < attnum; i++)
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index cfa455beec9..d6b63bd79bd 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.112 2005/10/15 02:49:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.112.2.1 2005/11/22 18:23:03 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -49,8 +49,8 @@ CreateTemplateTupleDesc(int natts, bool hasoid)
* Allocate enough memory for the tuple descriptor, including the
* attribute rows, and set up the attribute row pointers.
*
- * Note: we assume that sizeof(struct tupleDesc) is a multiple of the struct
- * pointer alignment requirement, and hence we don't need to insert
+ * Note: we assume that sizeof(struct tupleDesc) is a multiple of the
+ * struct pointer alignment requirement, and hence we don't need to insert
* alignment padding between the struct and the array of attribute row
* pointers.
*/
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index 5ae48bd66e3..1e02ec082f4 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.52 2005/10/06 02:29:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.52.2.1 2005/11/22 18:23:03 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -392,8 +392,8 @@ gistindex_keytest(IndexTuple tuple,
* are the index datum (as a GISTENTRY*), the comparison datum, and
* the comparison operator's strategy number and subtype from pg_amop.
*
- * (Presently there's no need to pass the subtype since it'll always be
- * zero, but might as well pass it for possible future use.)
+ * (Presently there's no need to pass the subtype since it'll always
+ * be zero, but might as well pass it for possible future use.)
*/
test = FunctionCall4(&key->sk_func,
PointerGetDatum(&de),
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 7289d9a0b35..6fadfb20c0a 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.47 2005/10/15 02:49:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.47.2.1 2005/11/22 18:23:03 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -488,9 +488,9 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
* It is okay to write-lock the new bitmap page while holding metapage
* write lock, because no one else could be contending for the new page.
*
- * There is some loss of concurrency in possibly doing I/O for the new page
- * while holding the metapage lock, but this path is taken so seldom that
- * it's not worth worrying about.
+ * There is some loss of concurrency in possibly doing I/O for the new
+ * page while holding the metapage lock, but this path is taken so seldom
+ * that it's not worth worrying about.
*/
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
pg = BufferGetPage(buf);
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index b40c20b480b..a7da7609d79 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.52 2005/10/15 02:49:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.52.2.1 2005/11/22 18:23:03 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -400,8 +400,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
*
- * Ideally we would lock the new bucket too before proceeding, but if we are
- * about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
+ * Ideally we would lock the new bucket too before proceeding, but if we
+ * are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
* correct yet. For simplicity we update the metapage first and then
* lock. This should be okay because no one else should be trying to lock
* the new bucket yet...
@@ -420,11 +420,11 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Okay to proceed with split. Update the metapage bucket mapping info.
*
- * Since we are scribbling on the metapage data right in the shared buffer,
- * any failure in this next little bit leaves us with a big problem: the
- * metapage is effectively corrupt but could get written back to disk. We
- * don't really expect any failure, but just to be sure, establish a
- * critical section.
+ * Since we are scribbling on the metapage data right in the shared
+ * buffer, any failure in this next little bit leaves us with a big
+ * problem: the metapage is effectively corrupt but could get written back
+ * to disk. We don't really expect any failure, but just to be sure,
+ * establish a critical section.
*/
START_CRIT_SECTION();
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index c6d300fe482..4e9c0ff8bba 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.200.2.1 2005/11/20 18:38:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.200.2.2 2005/11/22 18:23:03 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -1089,7 +1089,7 @@ heap_get_latest_tid(Relation relation,
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
- * TID where the tuple was stored. But note that any toasting of fields
+ * TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
@@ -1136,8 +1136,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
* If the new tuple is too big for storage or contains already toasted
* out-of-line attributes from some other relation, invoke the toaster.
*
- * Note: below this point, heaptup is the data we actually intend to
- * store into the relation; tup is the caller's original untoasted data.
+ * Note: below this point, heaptup is the data we actually intend to store
+ * into the relation; tup is the caller's original untoasted data.
*/
if (HeapTupleHasExternal(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
@@ -1224,8 +1224,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* If tuple is cachable, mark it for invalidation from the caches in case
* we abort. Note it is OK to do this after WriteBuffer releases the
- * buffer, because the heaptup data structure is all in local memory,
- * not in the shared buffer.
+ * buffer, because the heaptup data structure is all in local memory, not
+ * in the shared buffer.
*/
CacheInvalidateHeapTuple(relation, heaptup);
@@ -1333,8 +1333,8 @@ l1:
* heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
- * If we are forced to "start over" below, we keep the tuple lock; this
- * arranges that we stay at the head of the line while rechecking
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
@@ -1577,7 +1577,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
*
* On success, the header fields of *newtup are updated to match the new
* stored tuple; in particular, newtup->t_self is set to the TID where the
- * new tuple was inserted. However, any TOAST changes in the new tuple's
+ * new tuple was inserted. However, any TOAST changes in the new tuple's
* data are not reflected into *newtup.
*
* In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
@@ -1649,8 +1649,8 @@ l2:
* heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
- * If we are forced to "start over" below, we keep the tuple lock; this
- * arranges that we stay at the head of the line while rechecking
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
@@ -1782,8 +1782,8 @@ l2:
* show that it's already being updated, else other processes may try to
* update it themselves.
*
- * We need to invoke the toaster if there are already any out-of-line toasted
- * values present, or if the new tuple is over-threshold.
+ * We need to invoke the toaster if there are already any out-of-line
+ * toasted values present, or if the new tuple is over-threshold.
*/
newtupsize = MAXALIGN(newtup->t_len);
@@ -1886,7 +1886,7 @@ l2:
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
- RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
+ RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
if (!already_marked)
{
@@ -2123,8 +2123,8 @@ l3:
* LockTuple will release us when we are next-in-line for the tuple.
* We must do this even if we are share-locking.
*
- * If we are forced to "start over" below, we keep the tuple lock; this
- * arranges that we stay at the head of the line while rechecking
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 800ee4a805b..440c94bf56b 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.58 2005/10/15 02:49:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.58.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -296,11 +296,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Remember the new page as our target for future insertions.
*
- * XXX should we enter the new page into the free space map immediately, or
- * just keep it for this backend's exclusive use in the short run (until
- * VACUUM sees it)? Seems to depend on whether you expect the current
- * backend to make more insertions or not, which is probably a good bet
- * most of the time. So for now, don't add it to FSM yet.
+ * XXX should we enter the new page into the free space map immediately,
+ * or just keep it for this backend's exclusive use in the short run
+ * (until VACUUM sees it)? Seems to depend on whether you expect the
+ * current backend to make more insertions or not, which is probably a
+ * good bet most of the time. So for now, don't add it to FSM yet.
*/
relation->rd_targblock = BufferGetBlockNumber(buffer);
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index de5396a5150..2292f696512 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.53.2.1 2005/11/20 18:38:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.53.2.2 2005/11/22 18:23:04 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -1075,8 +1075,8 @@ toast_save_datum(Relation rel, Datum value)
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table.
*
- * Note also that there had better not be any user-created index on the
- * TOAST table, since we don't bother to update anything else.
+ * Note also that there had better not be any user-created index on
+ * the TOAST table, since we don't bother to update anything else.
*/
index_insert(toastidx, t_values, t_isnull,
&(toasttup->t_self),
@@ -1214,9 +1214,9 @@ toast_fetch_datum(varattrib *attr)
/*
* Read the chunks by index
*
- * Note that because the index is actually on (valueid, chunkidx) we will see
- * the chunks in chunkidx order, even though we didn't explicitly ask for
- * it.
+ * Note that because the index is actually on (valueid, chunkidx) we will
+ * see the chunks in chunkidx order, even though we didn't explicitly ask
+ * for it.
*/
nextidx = 0;
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index ed604f9c5dc..7303bd2e604 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.49 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.49.2.1 2005/11/22 18:23:04 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@@ -203,8 +203,8 @@ systable_beginscan(Relation heapRelation,
/*
* Change attribute numbers to be index column numbers.
*
- * This code could be generalized to search for the index key numbers to
- * substitute, but for now there's no need.
+ * This code could be generalized to search for the index key numbers
+ * to substitute, but for now there's no need.
*/
for (i = 0; i < nkeys; i++)
{
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 33c7612aac5..669459bac76 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.127 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.127.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -104,8 +104,8 @@ top:
* If we're not allowing duplicates, make sure the key isn't already in
* the index.
*
- * NOTE: obviously, _bt_check_unique can only detect keys that are already in
- * the index; so it cannot defend against concurrent insertions of the
+ * NOTE: obviously, _bt_check_unique can only detect keys that are already
+ * in the index; so it cannot defend against concurrent insertions of the
* same key. We protect against that by means of holding a write lock on
* the target page. Any other would-be inserter of the same key must
* acquire a write lock on the same target page, so only one would-be
@@ -114,8 +114,8 @@ top:
* our insertion, so no later inserter can fail to see our insertion.
* (This requires some care in _bt_insertonpg.)
*
- * If we must wait for another xact, we release the lock while waiting, and
- * then must start over completely.
+ * If we must wait for another xact, we release the lock while waiting,
+ * and then must start over completely.
*/
if (index_is_unique)
{
@@ -193,8 +193,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/*
* We can skip items that are marked killed.
*
- * Formerly, we applied _bt_isequal() before checking the kill flag,
- * so as to fall out of the item loop as soon as possible.
+ * Formerly, we applied _bt_isequal() before checking the kill
+ * flag, so as to fall out of the item loop as soon as possible.
* However, in the presence of heavy update activity an index may
* contain many killed items with the same key; running
* _bt_isequal() on each killed item gets expensive. Furthermore
@@ -431,11 +431,11 @@ _bt_insertonpg(Relation rel,
/*
* step right to next non-dead page
*
- * must write-lock that page before releasing write lock on current
- * page; else someone else's _bt_check_unique scan could fail to
- * see our insertion. write locks on intermediate dead pages
- * won't do because we don't know when they will get de-linked
- * from the tree.
+ * must write-lock that page before releasing write lock on
+ * current page; else someone else's _bt_check_unique scan could
+ * fail to see our insertion. write locks on intermediate dead
+ * pages won't do because we don't know when they will get
+ * de-linked from the tree.
*/
Buffer rbuf = InvalidBuffer;
@@ -471,9 +471,9 @@ _bt_insertonpg(Relation rel,
/*
* Do we need to split the page to fit the item on it?
*
- * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result, so
- * this comparison is correct even though we appear to be accounting only
- * for the item and not for its line pointer.
+ * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
+ * so this comparison is correct even though we appear to be accounting
+ * only for the item and not for its line pointer.
*/
if (PageGetFreeSpace(page) < itemsz)
{
@@ -1158,10 +1158,10 @@ _bt_insert_parent(Relation rel,
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
*
- * If we have to search for the parent level, we do so by re-descending from
- * the root. This is not super-efficient, but it's rare enough not to
- * matter. (This path is also taken when called from WAL recovery --- we
- * have no stack in that case.)
+ * If we have to search for the parent level, we do so by re-descending
+ * from the root. This is not super-efficient, but it's rare enough not
+ * to matter. (This path is also taken when called from WAL recovery ---
+ * we have no stack in that case.)
*/
if (is_root)
{
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 927860030c8..8464d5478f6 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.88 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.88.2.1 2005/11/22 18:23:04 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -440,21 +440,21 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
- * In fact, it's worse than that: we can't even assume that it's safe to
- * take a lock on the reported page. If somebody else has a lock on
- * it, or even worse our own caller does, we could deadlock. (The
+ * In fact, it's worse than that: we can't even assume that it's safe
+ * to take a lock on the reported page. If somebody else has a lock
+ * on it, or even worse our own caller does, we could deadlock. (The
* own-caller scenario is actually not improbable. Consider an index
* on a serial or timestamp column. Nearly all splits will be at the
* rightmost page, so it's entirely likely that _bt_split will call us
- * while holding a lock on the page most recently acquired from FSM.
- * A VACUUM running concurrently with the previous split could well
- * have placed that page back in FSM.)
+ * while holding a lock on the page most recently acquired from FSM. A
+ * VACUUM running concurrently with the previous split could well have
+ * placed that page back in FSM.)
*
- * To get around that, we ask for only a conditional lock on the reported
- * page. If we fail, then someone else is using the page, and we may
- * reasonably assume it's not free. (If we happen to be wrong, the
- * worst consequence is the page will be lost to use till the next
- * VACUUM, which is no big problem.)
+ * To get around that, we ask for only a conditional lock on the
+ * reported page. If we fail, then someone else is using the page,
+ * and we may reasonably assume it's not free. (If we happen to be
+ * wrong, the worst consequence is the page will be lost to use till
+ * the next VACUUM, which is no big problem.)
*/
for (;;)
{
@@ -803,12 +803,12 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
* We have to lock the pages we need to modify in the standard order:
* moving right, then up. Else we will deadlock against other writers.
*
- * So, we need to find and write-lock the current left sibling of the target
- * page. The sibling that was current a moment ago could have split, so
- * we may have to move right. This search could fail if either the
- * sibling or the target page was deleted by someone else meanwhile; if
- * so, give up. (Right now, that should never happen, since page deletion
- * is only done in VACUUM and there shouldn't be multiple VACUUMs
+ * So, we need to find and write-lock the current left sibling of the
+ * target page. The sibling that was current a moment ago could have
+ * split, so we may have to move right. This search could fail if either
+ * the sibling or the target page was deleted by someone else meanwhile;
+ * if so, give up. (Right now, that should never happen, since page
+ * deletion is only done in VACUUM and there shouldn't be multiple VACUUMs
* concurrently on the same table.)
*/
if (leftsib != P_NONE)
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 10e2fe6190d..8612554ca57 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.132 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.132.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -307,8 +307,8 @@ btgettuple(PG_FUNCTION_ARGS)
* Save heap TID to use it in _bt_restscan. Then release the read lock on
* the buffer so that we aren't blocking other backends.
*
- * NOTE: we do keep the pin on the buffer! This is essential to ensure that
- * someone else doesn't delete the index entry we are stopped on.
+ * NOTE: we do keep the pin on the buffer! This is essential to ensure
+ * that someone else doesn't delete the index entry we are stopped on.
*/
if (res)
{
@@ -734,8 +734,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* buffer and it will be fully initialized before we can examine it. (See
* also vacuumlazy.c, which has the same issue.)
*
- * We can skip locking for new or temp relations, however, since no one else
- * could be accessing them.
+ * We can skip locking for new or temp relations, however, since no one
+ * else could be accessing them.
*/
needLock = !RELATION_IS_LOCAL(rel);
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index e487b498820..5f795073f0b 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.96 2005/10/18 01:06:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.96.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -164,10 +164,11 @@ _bt_moveright(Relation rel,
*
* When nextkey = true: move right if the scan key is >= page's high key.
*
- * The page could even have split more than once, so scan as far as needed.
+ * The page could even have split more than once, so scan as far as
+ * needed.
*
- * We also have to move right if we followed a link that brought us to a dead
- * page.
+ * We also have to move right if we followed a link that brought us to a
+ * dead page.
*/
cmpval = nextkey ? 0 : 1;
@@ -255,8 +256,8 @@ _bt_binsrch(Relation rel,
* For nextkey=false (cmpval=1), the loop invariant is: all slots before
* 'low' are < scan key, all slots at or after 'high' are >= scan key.
*
- * For nextkey=true (cmpval=0), the loop invariant is: all slots before 'low'
- * are <= scan key, all slots at or after 'high' are > scan key.
+ * For nextkey=true (cmpval=0), the loop invariant is: all slots before
+ * 'low' are <= scan key, all slots at or after 'high' are > scan key.
*
* We can fall out when high == low.
*/
@@ -282,8 +283,8 @@ _bt_binsrch(Relation rel,
* At this point we have high == low, but be careful: they could point
* past the last slot on the page.
*
- * On a leaf page, we always return the first key >= scan key (resp. > scan
- * key), which could be the last slot + 1.
+ * On a leaf page, we always return the first key >= scan key (resp. >
+ * scan key), which could be the last slot + 1.
*/
if (P_ISLEAF(opaque))
return low;
@@ -350,8 +351,8 @@ _bt_compare(Relation rel,
* you think about how multi-key ordering works, you'll understand why
* this is.
*
- * We don't test for violation of this condition here, however. The initial
- * setup for the index scan had better have gotten it right (see
+ * We don't test for violation of this condition here, however. The
+ * initial setup for the index scan had better have gotten it right (see
* _bt_first).
*/
@@ -692,9 +693,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* where we need to start the scan, and set flag variables to control the
* code below.
*
- * If nextkey = false, _bt_search and _bt_binsrch will locate the first item
- * >= scan key. If nextkey = true, they will locate the first item > scan
- * key.
+ * If nextkey = false, _bt_search and _bt_binsrch will locate the first
+ * item >= scan key. If nextkey = true, they will locate the first item >
+ * scan key.
*
* If goback = true, we will then step back one item, while if goback =
* false, we will start the scan on the located item.
@@ -819,9 +820,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* than or equal to the scan key and we know that everything on later
* pages is greater than scan key.
*
- * The actually desired starting point is either this item or the prior one,
- * or in the end-of-page case it's the first item on the next page or the
- * last item on this page. We apply _bt_step if needed to get to the
+ * The actually desired starting point is either this item or the prior
+ * one, or in the end-of-page case it's the first item on the next page or
+ * the last item on this page. We apply _bt_step if needed to get to the
* right place.
*
* If _bt_step fails (meaning we fell off the end of the index in one
@@ -1044,9 +1045,9 @@ _bt_walk_left(Relation rel, Buffer buf)
* the original page got deleted and isn't in the sibling chain at all
* anymore, not that its left sibling got split more than four times.
*
- * Note that it is correct to test P_ISDELETED not P_IGNORE here, because
- * half-dead pages are still in the sibling chain. Caller must reject
- * half-dead pages if wanted.
+ * Note that it is correct to test P_ISDELETED not P_IGNORE here,
+ * because half-dead pages are still in the sibling chain. Caller
+ * must reject half-dead pages if wanted.
*/
tries = 0;
for (;;)
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 6ee5d42b63a..8bfa8130a23 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -56,7 +56,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.95 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.95.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -487,9 +487,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
* the per-page available space. Note that at this point, btisz doesn't
* include the ItemId.
*
- * NOTE: similar code appears in _bt_insertonpg() to defend against oversize
- * items being inserted into an already-existing index. But during
- * creation of an index, we don't go through there.
+ * NOTE: similar code appears in _bt_insertonpg() to defend against
+ * oversize items being inserted into an already-existing index. But
+ * during creation of an index, we don't go through there.
*/
if (btisz > BTMaxItemSize(npage))
ereport(ERROR,
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 7d60c98f38d..27ec83a0f0c 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.65 2005/10/18 01:06:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.65.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -549,8 +549,8 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
* able to conclude no further tuples will pass, either. We have
* to look at the scan direction and the qual type.
*
- * Note: the only case in which we would keep going after failing a
- * required qual is if there are partially-redundant quals that
+ * Note: the only case in which we would keep going after failing
+ * a required qual is if there are partially-redundant quals that
* _bt_preprocess_keys() was unable to eliminate. For example,
* given "x > 4 AND x > 10" where both are cross-type comparisons
* and so not removable, we might start the scan at the x = 4
diff --git a/src/backend/access/rtree/rtree.c b/src/backend/access/rtree/rtree.c
index d684101d261..652daa38203 100644
--- a/src/backend/access/rtree/rtree.c
+++ b/src/backend/access/rtree/rtree.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/rtree/rtree.c,v 1.92 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/rtree/rtree.c,v 1.92.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -527,9 +527,9 @@ rtdosplit(Relation r,
* the left page, we expect it to get smaller. This happens in the
* internal insertion routine.
*
- * 3) Insert a pointer to the right page in the parent. This may cause the
- * parent to split. If it does, we need to repeat steps one and two for
- * each split node in the tree.
+ * 3) Insert a pointer to the right page in the parent. This may cause
+ * the parent to split. If it does, we need to repeat steps one and two
+ * for each split node in the tree.
*/
/* adjust active scans */
@@ -834,8 +834,8 @@ rtpicksplit(Relation r,
/*
* Now split up the regions between the two seeds.
*
- * The cost_vector array will contain hints for determining where each tuple
- * should go. Each record in the array will contain a boolean,
+ * The cost_vector array will contain hints for determining where each
+ * tuple should go. Each record in the array will contain a boolean,
* choose_left, that indicates which node the tuple prefers to be on, and
* the absolute difference in cost between putting the tuple in its
* favored node and in the other node.
@@ -848,9 +848,9 @@ rtpicksplit(Relation r,
* First, build the cost_vector array. The new index tuple will also be
* handled in this loop, and represented in the array, with i==newitemoff.
*
- * In the case of variable size tuples it is possible that we only have the
- * two seeds and no other tuples, in which case we don't do any of this
- * cost_vector stuff.
+ * In the case of variable size tuples it is possible that we only have
+ * the two seeds and no other tuples, in which case we don't do any of
+ * this cost_vector stuff.
*/
/* to keep compiler quiet */
@@ -966,11 +966,11 @@ rtpicksplit(Relation r,
* need not worry about any other problem than failing to fit the new
* item.)
*
- * Guttman's algorithm actually has two factors to consider (in order):
- * 1. if one node has so many tuples already assigned to it that the
- * other needs all the rest in order to satisfy the condition that
- * neither node has fewer than m tuples, then that is decisive; 2.
- * otherwise, choose the page that shows the smaller enlargement of
+ * Guttman's algorithm actually has two factors to consider (in
+ * order): 1. if one node has so many tuples already assigned to it
+ * that the other needs all the rest in order to satisfy the condition
+ * that neither node has fewer than m tuples, then that is decisive;
+ * 2. otherwise, choose the page that shows the smaller enlargement of
* its union area.
*
* I have chosen m = M/2, where M is the maximum number of tuples on a
@@ -979,8 +979,8 @@ rtpicksplit(Relation r,
* tuple on a page, if it is really big. But even with variable size
* tuples we still try to get m as close as possible to M/2.)
*
- * The question of which page shows the smaller enlargement of its union
- * area has already been answered, and the answer stored in the
+ * The question of which page shows the smaller enlargement of its
+ * union area has already been answered, and the answer stored in the
* choose_left field of the SPLITCOST record.
*/
left_feasible = (left_avail_space >= item_1_sz &&
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index af254da173d..f55f2c2c2fa 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.11 2005/10/28 19:00:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.11.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -129,22 +129,23 @@ typedef struct MultiXactStateData
* member of a MultiXact, and that MultiXact would have to be created
* during or after the lock acquisition.)
*
- * OldestVisibleMXactId[k] is the oldest MultiXactId each backend's current
- * transaction(s) think is potentially live, or InvalidMultiXactId when
- * not in a transaction or not in a transaction that's paid any attention
- * to MultiXacts yet. This is computed when first needed in a given
- * transaction, and cleared at transaction end. We can compute it as the
- * minimum of the valid OldestMemberMXactId[] entries at the time we
- * compute it (using nextMXact if none are valid). Each backend is
+ * OldestVisibleMXactId[k] is the oldest MultiXactId each backend's
+ * current transaction(s) think is potentially live, or InvalidMultiXactId
+ * when not in a transaction or not in a transaction that's paid any
+ * attention to MultiXacts yet. This is computed when first needed in a
+ * given transaction, and cleared at transaction end. We can compute it
+ * as the minimum of the valid OldestMemberMXactId[] entries at the time
+ * we compute it (using nextMXact if none are valid). Each backend is
* required not to attempt to access any SLRU data for MultiXactIds older
* than its own OldestVisibleMXactId[] setting; this is necessary because
* the checkpointer could truncate away such data at any instant.
*
- * The checkpointer can compute the safe truncation point as the oldest valid
- * value among all the OldestMemberMXactId[] and OldestVisibleMXactId[]
- * entries, or nextMXact if none are valid. Clearly, it is not possible
- * for any later-computed OldestVisibleMXactId value to be older than
- * this, and so there is no risk of truncating data that is still needed.
+ * The checkpointer can compute the safe truncation point as the oldest
+ * valid value among all the OldestMemberMXactId[] and
+ * OldestVisibleMXactId[] entries, or nextMXact if none are valid.
+ * Clearly, it is not possible for any later-computed OldestVisibleMXactId
+ * value to be older than this, and so there is no risk of truncating data
+ * that is still needed.
*/
MultiXactId perBackendXactIds[1]; /* VARIABLE LENGTH ARRAY */
} MultiXactStateData;
@@ -631,8 +632,8 @@ CreateMultiXactId(int nxids, TransactionId *xids)
}
/*
- * Assign the MXID and offsets range to use, and make sure there is
- * space in the OFFSETs and MEMBERs files. NB: this routine does
+ * Assign the MXID and offsets range to use, and make sure there is space
+ * in the OFFSETs and MEMBERs files. NB: this routine does
* START_CRIT_SECTION().
*/
multi = GetNewMultiXactId(nxids, &offset);
@@ -788,9 +789,9 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
ExtendMultiXactOffset(result);
/*
- * Reserve the members space, similarly to above. Also, be
- * careful not to return zero as the starting offset for any multixact.
- * See GetMultiXactIdMembers() for motivation.
+ * Reserve the members space, similarly to above. Also, be careful not to
+ * return zero as the starting offset for any multixact. See
+ * GetMultiXactIdMembers() for motivation.
*/
nextOffset = MultiXactState->nextOffset;
if (nextOffset == 0)
@@ -804,8 +805,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
ExtendMultiXactMember(nextOffset, nxids);
/*
- * Critical section from here until caller has written the data into
- * the just-reserved SLRU space; we don't want to error out with a partly
+ * Critical section from here until caller has written the data into the
+ * just-reserved SLRU space; we don't want to error out with a partly
* written MultiXact structure. (In particular, failing to write our
* start offset after advancing nextMXact would effectively corrupt the
* previous MultiXact.)
@@ -819,8 +820,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
* We don't care about MultiXactId wraparound here; it will be handled by
* the next iteration. But note that nextMXact may be InvalidMultiXactId
* after this routine exits, so anyone else looking at the variable must
- * be prepared to deal with that. Similarly, nextOffset may be zero,
- * but we won't use that as the actual start offset of the next multixact.
+ * be prepared to deal with that. Similarly, nextOffset may be zero, but
+ * we won't use that as the actual start offset of the next multixact.
*/
(MultiXactState->nextMXact)++;
@@ -881,7 +882,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
* SLRU data if we did try to examine it.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. We just
+ * seen, it implies undetected ID wraparound has occurred. We just
* silently assume that such an ID is no longer running.
*
* Shared lock is enough here since we aren't modifying any global state.
@@ -897,7 +898,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Acquire the shared lock just long enough to grab the current counter
- * values. We may need both nextMXact and nextOffset; see below.
+ * values. We may need both nextMXact and nextOffset; see below.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -915,27 +916,27 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Find out the offset at which we need to start reading MultiXactMembers
- * and the number of members in the multixact. We determine the latter
- * as the difference between this multixact's starting offset and the
- * next one's. However, there are some corner cases to worry about:
+ * and the number of members in the multixact. We determine the latter as
+ * the difference between this multixact's starting offset and the next
+ * one's. However, there are some corner cases to worry about:
*
- * 1. This multixact may be the latest one created, in which case there
- * is no next one to look at. In this case the nextOffset value we just
+ * 1. This multixact may be the latest one created, in which case there is
+ * no next one to look at. In this case the nextOffset value we just
* saved is the correct endpoint.
*
- * 2. The next multixact may still be in process of being filled in:
- * that is, another process may have done GetNewMultiXactId but not yet
- * written the offset entry for that ID. In that scenario, it is
- * guaranteed that the offset entry for that multixact exists (because
- * GetNewMultiXactId won't release MultiXactGenLock until it does)
- * but contains zero (because we are careful to pre-zero offset pages).
- * Because GetNewMultiXactId will never return zero as the starting offset
- * for a multixact, when we read zero as the next multixact's offset, we
- * know we have this case. We sleep for a bit and try again.
+ * 2. The next multixact may still be in process of being filled in: that
+ * is, another process may have done GetNewMultiXactId but not yet written
+ * the offset entry for that ID. In that scenario, it is guaranteed that
+ * the offset entry for that multixact exists (because GetNewMultiXactId
+ * won't release MultiXactGenLock until it does) but contains zero
+ * (because we are careful to pre-zero offset pages). Because
+ * GetNewMultiXactId will never return zero as the starting offset for a
+ * multixact, when we read zero as the next multixact's offset, we know we
+ * have this case. We sleep for a bit and try again.
*
- * 3. Because GetNewMultiXactId increments offset zero to offset one
- * to handle case #2, there is an ambiguity near the point of offset
- * wraparound. If we see next multixact's offset is one, is that our
+ * 3. Because GetNewMultiXactId increments offset zero to offset one to
+ * handle case #2, there is an ambiguity near the point of offset
+ * wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th
* member slot wasn't filled, it'll contain zero, and zero isn't a valid
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index b8273d84f59..041c6cf84e0 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.29 2005/11/03 00:23:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.29.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -278,7 +278,7 @@ SimpleLruReadPage(SlruCtl ctl, int pageno, TransactionId xid)
SlruRecentlyUsed(shared, slotno);
/*
- * We must grab the per-buffer lock to do I/O. To avoid deadlock,
+ * We must grab the per-buffer lock to do I/O. To avoid deadlock,
* must release ControlLock while waiting for per-buffer lock.
* Fortunately, most of the time the per-buffer lock shouldn't be
* already held, so we can do this:
@@ -352,10 +352,10 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
pageno = shared->page_number[slotno];
/*
- * We must grab the per-buffer lock to do I/O. To avoid deadlock,
- * must release ControlLock while waiting for per-buffer lock.
- * Fortunately, most of the time the per-buffer lock shouldn't be
- * already held, so we can do this:
+ * We must grab the per-buffer lock to do I/O. To avoid deadlock, must
+ * release ControlLock while waiting for per-buffer lock. Fortunately,
+ * most of the time the per-buffer lock shouldn't be already held, so we
+ * can do this:
*/
if (!LWLockConditionalAcquire(shared->buffer_locks[slotno],
LW_EXCLUSIVE))
@@ -754,8 +754,8 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* We need to do I/O. Normal case is that we have to write it out,
* but it's possible in the worst case to have selected a read-busy
- * page. In that case we just wait for someone else to complete
- * the I/O, which we can do by waiting for the per-buffer lock.
+ * page. In that case we just wait for someone else to complete the
+ * I/O, which we can do by waiting for the per-buffer lock.
*/
if (shared->page_status[bestslot] == SLRU_PAGE_READ_IN_PROGRESS)
{
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index 7671eb6a45e..3c111e62c66 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -22,7 +22,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.11 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.11.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -261,8 +261,8 @@ ShutdownSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
- * This is not actually necessary from a correctness point of view. We do it
- * merely as a debugging aid.
+ * This is not actually necessary from a correctness point of view. We do
+ * it merely as a debugging aid.
*/
SimpleLruFlush(SubTransCtl, false);
}
@@ -276,9 +276,9 @@ CheckPointSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
- * This is not actually necessary from a correctness point of view. We do it
- * merely to improve the odds that writing of dirty pages is done by the
- * checkpoint process and not by backends.
+ * This is not actually necessary from a correctness point of view. We do
+ * it merely to improve the odds that writing of dirty pages is done by
+ * the checkpoint process and not by backends.
*/
SimpleLruFlush(SubTransCtl, true);
}
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index 59852520521..ed6c4bb608c 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.66 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.66.2.1 2005/11/22 18:23:05 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -174,9 +174,9 @@ TransactionIdDidCommit(TransactionId transactionId)
* pg_subtrans; instead assume that the parent crashed without cleaning up
* its children.
*
- * Originally we Assert'ed that the result of SubTransGetParent was not zero.
- * However with the introduction of prepared transactions, there can be a
- * window just after database startup where we do not have complete
+ * Originally we Assert'ed that the result of SubTransGetParent was not
+ * zero. However with the introduction of prepared transactions, there can
+ * be a window just after database startup where we do not have complete
* knowledge in pg_subtrans of the transactions after TransactionXmin.
* StartupSUBTRANS() has ensured that any missing information will be
* zeroed. Since this case should not happen under normal conditions, it
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 5423060653d..ca44e19a8ef 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.16 2005/10/29 00:31:50 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.16.2.1 2005/11/22 18:23:05 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
@@ -851,10 +851,10 @@ EndPrepare(GlobalTransaction gxact)
/*
* Create the 2PC state file.
*
- * Note: because we use BasicOpenFile(), we are responsible for ensuring the
- * FD gets closed in any error exit path. Once we get into the critical
- * section, though, it doesn't matter since any failure causes PANIC
- * anyway.
+ * Note: because we use BasicOpenFile(), we are responsible for ensuring
+ * the FD gets closed in any error exit path. Once we get into the
+ * critical section, though, it doesn't matter since any failure causes
+ * PANIC anyway.
*/
TwoPhaseFilePath(path, xid);
@@ -911,8 +911,8 @@ EndPrepare(GlobalTransaction gxact)
* The state file isn't valid yet, because we haven't written the correct
* CRC yet. Before we do that, insert entry in WAL and flush it to disk.
*
- * Between the time we have written the WAL entry and the time we write out
- * the correct state file CRC, we have an inconsistency: the xact is
+ * Between the time we have written the WAL entry and the time we write
+ * out the correct state file CRC, we have an inconsistency: the xact is
* prepared according to WAL but not according to our on-disk state. We
* use a critical section to force a PANIC if we are unable to complete
* the write --- then, WAL replay should repair the inconsistency. The
@@ -1344,11 +1344,11 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
* it just long enough to make a list of the XIDs that require fsyncing,
* and then do the I/O afterwards.
*
- * This approach creates a race condition: someone else could delete a GXACT
- * between the time we release TwoPhaseStateLock and the time we try to
- * open its state file. We handle this by special-casing ENOENT failures:
- * if we see that, we verify that the GXACT is no longer valid, and if so
- * ignore the failure.
+ * This approach creates a race condition: someone else could delete a
+ * GXACT between the time we release TwoPhaseStateLock and the time we try
+ * to open its state file. We handle this by special-casing ENOENT
+ * failures: if we see that, we verify that the GXACT is no longer valid,
+ * and if so ignore the failure.
*/
if (max_prepared_xacts <= 0)
return; /* nothing to do */
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 874a9736c70..21c0e069219 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.68 2005/10/29 00:31:50 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.68.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,8 +56,8 @@ GetNewTransactionId(bool isSubXact)
* (which gives an escape hatch to the DBA who ignored all those
* warnings).
*
- * Test is coded to fall out as fast as possible during normal operation, ie,
- * when the warn limit is set and we haven't violated it.
+ * Test is coded to fall out as fast as possible during normal operation,
+ * ie, when the warn limit is set and we haven't violated it.
*/
if (TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidWarnLimit) &&
TransactionIdIsValid(ShmemVariableCache->xidWarnLimit))
@@ -268,8 +268,8 @@ GetNewObjectId(void)
* right after a wrap occurs, so as to avoid a possibly large number of
* iterations in GetNewOid.) Note we are relying on unsigned comparison.
*
- * During initdb, we start the OID generator at FirstBootstrapObjectId, so we
- * only enforce wrapping to that point when in bootstrap or standalone
+ * During initdb, we start the OID generator at FirstBootstrapObjectId, so
+ * we only enforce wrapping to that point when in bootstrap or standalone
* mode. The first time through this routine after normal postmaster
* start, the counter will be forced up to FirstNormalObjectId. This
* mechanism leaves the OIDs between FirstBootstrapObjectId and
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index ea19e075640..a1bac34e168 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.215 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.215.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -750,8 +750,8 @@ RecordTransactionCommit(void)
* XLOG record generated by nextval will hit the disk before we report
* the transaction committed.
*
- * Note: if we generated a commit record above, MyXactMadeXLogEntry will
- * certainly be set now.
+ * Note: if we generated a commit record above, MyXactMadeXLogEntry
+ * will certainly be set now.
*/
if (MyXactMadeXLogEntry)
{
@@ -762,8 +762,8 @@ RecordTransactionCommit(void)
* because on most Unixen, the minimum select() delay is 10msec or
* more, which is way too long.)
*
- * We do not sleep if enableFsync is not turned on, nor if there are
- * fewer than CommitSiblings other backends with active
+ * We do not sleep if enableFsync is not turned on, nor if there
+ * are fewer than CommitSiblings other backends with active
* transactions.
*/
if (CommitDelay > 0 && enableFsync &&
@@ -993,10 +993,10 @@ RecordTransactionAbort(void)
* nowhere in permanent storage, so no one else will ever care if it
* committed.)
*
- * We do not flush XLOG to disk unless deleting files, since the default
- * assumption after a crash would be that we aborted, anyway. For the
- * same reason, we don't need to worry about interlocking against
- * checkpoint start.
+ * We do not flush XLOG to disk unless deleting files, since the
+ * default assumption after a crash would be that we aborted, anyway.
+ * For the same reason, we don't need to worry about interlocking
+ * against checkpoint start.
*/
if (MyLastRecPtr.xrecoff != 0 || nrels > 0)
{
@@ -1042,8 +1042,8 @@ RecordTransactionAbort(void)
* Mark the transaction aborted in clog. This is not absolutely
* necessary but we may as well do it while we are here.
*
- * The ordering here isn't critical but it seems best to mark the parent
- * first. This assures an atomic transition of all the
+ * The ordering here isn't critical but it seems best to mark the
+ * parent first. This assures an atomic transition of all the
* subtransactions to aborted state from the point of view of
* concurrent TransactionIdDidAbort calls.
*/
@@ -1520,11 +1520,11 @@ CommitTransaction(void)
* it's too late to abort the transaction. This should be just
* noncritical resource releasing.
*
- * The ordering of operations is not entirely random. The idea is: release
- * resources visible to other backends (eg, files, buffer pins); then
- * release locks; then release backend-local resources. We want to release
- * locks at the point where any backend waiting for us will see our
- * transaction as being fully cleaned up.
+ * The ordering of operations is not entirely random. The idea is:
+ * release resources visible to other backends (eg, files, buffer pins);
+ * then release locks; then release backend-local resources. We want to
+ * release locks at the point where any backend waiting for us will see
+ * our transaction as being fully cleaned up.
*
* Resources that can be associated with individual queries are handled by
* the ResourceOwner mechanism. The other calls here are for backend-wide
@@ -1630,9 +1630,9 @@ PrepareTransaction(void)
* Do pre-commit processing (most of this stuff requires database access,
* and in fact could still cause an error...)
*
- * It is possible for PrepareHoldablePortals to invoke functions that queue
- * deferred triggers, and it's also possible that triggers create holdable
- * cursors. So we have to loop until there's nothing left to do.
+ * It is possible for PrepareHoldablePortals to invoke functions that
+ * queue deferred triggers, and it's also possible that triggers create
+ * holdable cursors. So we have to loop until there's nothing left to do.
*/
for (;;)
{
@@ -1715,9 +1715,9 @@ PrepareTransaction(void)
/*
* Here is where we really truly prepare.
*
- * We have to record transaction prepares even if we didn't make any updates,
- * because the transaction manager might get confused if we lose a global
- * transaction.
+ * We have to record transaction prepares even if we didn't make any
+ * updates, because the transaction manager might get confused if we lose
+ * a global transaction.
*/
EndPrepare(gxact);
@@ -1868,10 +1868,11 @@ AbortTransaction(void)
* s->currentUser, since it may not be set yet; instead rely on internal
* state of miscinit.c.
*
- * (Note: it is not necessary to restore session authorization here because
- * that can only be changed via GUC, and GUC will take care of rolling it
- * back if need be. However, an error within a SECURITY DEFINER function
- * could send control here with the wrong current userid.)
+ * (Note: it is not necessary to restore session authorization here
+ * because that can only be changed via GUC, and GUC will take care of
+ * rolling it back if need be. However, an error within a SECURITY
+ * DEFINER function could send control here with the wrong current
+ * userid.)
*/
AtAbort_UserId();
@@ -2353,8 +2354,8 @@ AbortCurrentTransaction(void)
/*
* Here, we are already in an aborted transaction state and are
- * waiting for a ROLLBACK, but for some reason we failed again!
- * So we just remain in the abort state.
+ * waiting for a ROLLBACK, but for some reason we failed again! So
+ * we just remain in the abort state.
*/
case TBLOCK_ABORT:
case TBLOCK_SUBABORT:
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 66db5d9dd26..5722540b0c2 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.222 2005/10/29 00:31:50 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.222.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -571,11 +571,11 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* the whole record in the order "rdata, then backup blocks, then record
* header".
*
- * We may have to loop back to here if a race condition is detected below. We
- * could prevent the race by doing all this work while holding the insert
- * lock, but it seems better to avoid doing CRC calculations while holding
- * the lock. This means we have to be careful about modifying the rdata
- * chain until we know we aren't going to loop back again. The only
+ * We may have to loop back to here if a race condition is detected below.
+ * We could prevent the race by doing all this work while holding the
+ * insert lock, but it seems better to avoid doing CRC calculations while
+ * holding the lock. This means we have to be careful about modifying the
+ * rdata chain until we know we aren't going to loop back again. The only
* change we allow ourselves to make earlier is to set rdt->data = NULL in
* chain items we have decided we will have to back up the whole buffer
* for. This is OK because we will certainly decide the same thing again
@@ -763,9 +763,9 @@ begin:;
* now irrevocably changed the input rdata chain. At the exit of this
* loop, write_len includes the backup block data.
*
- * Also set the appropriate info bits to show which buffers were backed up.
- * The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct buffer
- * value (ignoring InvalidBuffer) appearing in the rdata chain.
+ * Also set the appropriate info bits to show which buffers were backed
+ * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct
+ * buffer value (ignoring InvalidBuffer) appearing in the rdata chain.
*/
write_len = len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -1666,20 +1666,20 @@ XLogFlush(XLogRecPtr record)
* problem; most likely, the requested flush point is past end of XLOG.
* This has been seen to occur when a disk page has a corrupted LSN.
*
- * Formerly we treated this as a PANIC condition, but that hurts the system's
- * robustness rather than helping it: we do not want to take down the
- * whole system due to corruption on one data page. In particular, if the
- * bad page is encountered again during recovery then we would be unable
- * to restart the database at all! (This scenario has actually happened
- * in the field several times with 7.1 releases. Note that we cannot get
- * here while InRedo is true, but if the bad page is brought in and marked
- * dirty during recovery then CreateCheckPoint will try to flush it at the
- * end of recovery.)
+ * Formerly we treated this as a PANIC condition, but that hurts the
+ * system's robustness rather than helping it: we do not want to take down
+ * the whole system due to corruption on one data page. In particular, if
+ * the bad page is encountered again during recovery then we would be
+ * unable to restart the database at all! (This scenario has actually
+ * happened in the field several times with 7.1 releases. Note that we
+ * cannot get here while InRedo is true, but if the bad page is brought in
+ * and marked dirty during recovery then CreateCheckPoint will try to
+ * flush it at the end of recovery.)
*
- * The current approach is to ERROR under normal conditions, but only WARNING
- * during recovery, so that the system can be brought up even if there's a
- * corrupt LSN. Note that for calls from xact.c, the ERROR will be
- * promoted to PANIC since xact.c calls this routine inside a critical
+ * The current approach is to ERROR under normal conditions, but only
+ * WARNING during recovery, so that the system can be brought up even if
+ * there's a corrupt LSN. Note that for calls from xact.c, the ERROR will
+ * be promoted to PANIC since xact.c calls this routine inside a critical
* section. However, calls from bufmgr.c are not within critical sections
* and so we will not force a restart for a bad LSN on a data page.
*/
@@ -2152,14 +2152,14 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* preserved correctly when we copied to archive. Our aim is robustness,
* so we elect not to do this.
*
- * If we cannot obtain the log file from the archive, however, we will try to
- * use the XLOGDIR file if it exists. This is so that we can make use of
- * log segments that weren't yet transferred to the archive.
+ * If we cannot obtain the log file from the archive, however, we will try
+ * to use the XLOGDIR file if it exists. This is so that we can make use
+ * of log segments that weren't yet transferred to the archive.
*
- * Notice that we don't actually overwrite any files when we copy back from
- * archive because the recoveryRestoreCommand may inadvertently restore
- * inappropriate xlogs, or they may be corrupt, so we may wish to fallback
- * to the segments remaining in current XLOGDIR later. The
+ * Notice that we don't actually overwrite any files when we copy back
+ * from archive because the recoveryRestoreCommand may inadvertently
+ * restore inappropriate xlogs, or they may be corrupt, so we may wish to
+ * fallback to the segments remaining in current XLOGDIR later. The
* copy-from-archive filename is always the same, ensuring that we don't
* run out of disk space on long recoveries.
*/
@@ -2246,11 +2246,11 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* command apparently succeeded, but let's make sure the file is
* really there now and has the correct size.
*
- * XXX I made wrong-size a fatal error to ensure the DBA would notice it,
- * but is that too strong? We could try to plow ahead with a local
- * copy of the file ... but the problem is that there probably isn't
- * one, and we'd incorrectly conclude we've reached the end of WAL and
- * we're done recovering ...
+ * XXX I made wrong-size a fatal error to ensure the DBA would notice
+ * it, but is that too strong? We could try to plow ahead with a
+ * local copy of the file ... but the problem is that there probably
+ * isn't one, and we'd incorrectly conclude we've reached the end of
+ * WAL and we're done recovering ...
*/
if (stat(xlogpath, &stat_buf) == 0)
{
@@ -3533,8 +3533,8 @@ ReadControlFile(void)
/*
* Do compatibility checking immediately. We do this here for 2 reasons:
*
- * (1) if the database isn't compatible with the backend executable, we want
- * to abort before we can possibly do any damage;
+ * (1) if the database isn't compatible with the backend executable, we
+ * want to abort before we can possibly do any damage;
*
* (2) this code is executed in the postmaster, so the setlocale() will
* propagate to forked backends, which aren't going to read this file for
@@ -4148,9 +4148,9 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
* descriptive of what our current database state is, because that is what
* we replayed from.
*
- * Note that if we are establishing a new timeline, ThisTimeLineID is already
- * set to the new value, and so we will create a new file instead of
- * overwriting any existing file.
+ * Note that if we are establishing a new timeline, ThisTimeLineID is
+ * already set to the new value, and so we will create a new file instead
+ * of overwriting any existing file.
*/
snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
XLogFilePath(xlogpath, ThisTimeLineID, endLogId, endLogSeg);
@@ -4341,8 +4341,8 @@ StartupXLOG(void)
/*
* Read control file and check XLOG status looks valid.
*
- * Note: in most control paths, *ControlFile is already valid and we need not
- * do ReadControlFile() here, but might as well do it to be sure.
+ * Note: in most control paths, *ControlFile is already valid and we need
+ * not do ReadControlFile() here, but might as well do it to be sure.
*/
ReadControlFile();
@@ -4766,14 +4766,14 @@ StartupXLOG(void)
/*
* Perform a new checkpoint to update our recovery activity to disk.
*
- * Note that we write a shutdown checkpoint rather than an on-line one.
- * This is not particularly critical, but since we may be assigning a
- * new TLI, using a shutdown checkpoint allows us to have the rule
- * that TLI only changes in shutdown checkpoints, which allows some
- * extra error checking in xlog_redo.
+ * Note that we write a shutdown checkpoint rather than an on-line
+ * one. This is not particularly critical, but since we may be
+ * assigning a new TLI, using a shutdown checkpoint allows us to have
+ * the rule that TLI only changes in shutdown checkpoints, which
+ * allows some extra error checking in xlog_redo.
*
- * In case we had to use the secondary checkpoint, make sure that it will
- * still be shown as the secondary checkpoint after this
+ * In case we had to use the secondary checkpoint, make sure that it
+ * will still be shown as the secondary checkpoint after this
* CreateCheckPoint operation; we don't want the broken primary
* checkpoint to become prevCheckPoint...
*/
@@ -5106,10 +5106,10 @@ CreateCheckPoint(bool shutdown, bool force)
* (Perhaps it'd make even more sense to checkpoint only when the previous
* checkpoint record is in a different xlog page?)
*
- * We have to make two tests to determine that nothing has happened since the
- * start of the last checkpoint: current insertion point must match the
- * end of the last checkpoint record, and its redo pointer must point to
- * itself.
+ * We have to make two tests to determine that nothing has happened since
+ * the start of the last checkpoint: current insertion point must match
+ * the end of the last checkpoint record, and its redo pointer must point
+ * to itself.
*/
if (!shutdown && !force)
{
@@ -5198,11 +5198,11 @@ CreateCheckPoint(bool shutdown, bool force)
* Having constructed the checkpoint record, ensure all shmem disk buffers
* and commit-log buffers are flushed to disk.
*
- * This I/O could fail for various reasons. If so, we will fail to complete
- * the checkpoint, but there is no reason to force a system panic.
- * Accordingly, exit critical section while doing it. (If we are doing a
- * shutdown checkpoint, we probably *should* panic --- but that will
- * happen anyway because we'll still be inside the critical section
+ * This I/O could fail for various reasons. If so, we will fail to
+ * complete the checkpoint, but there is no reason to force a system
+ * panic. Accordingly, exit critical section while doing it. (If we are
+ * doing a shutdown checkpoint, we probably *should* panic --- but that
+ * will happen anyway because we'll still be inside the critical section
* established by ShutdownXLOG.)
*/
END_CRIT_SECTION();