aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2007-11-15 21:14:46 +0000
committerBruce Momjian <bruce@momjian.us>2007-11-15 21:14:46 +0000
commitfdf5a5efb7b28c13085fe7313658de8d7b9914f6 (patch)
treea75cf1422fa1eef4e801cf502b148d8ce1b5dfe7 /src/backend/access
parent3adc760fb92eab1a8720337a8bf9b66486609eb3 (diff)
downloadpostgresql-fdf5a5efb7b28c13085fe7313658de8d7b9914f6.tar.gz
postgresql-fdf5a5efb7b28c13085fe7313658de8d7b9914f6.zip
pgindent run for 8.3.
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/common/heaptuple.c92
-rw-r--r--src/backend/access/common/indextuple.c24
-rw-r--r--src/backend/access/common/reloptions.c6
-rw-r--r--src/backend/access/gin/ginarrayproc.c13
-rw-r--r--src/backend/access/gin/ginbtree.c6
-rw-r--r--src/backend/access/gin/gindatapage.c20
-rw-r--r--src/backend/access/gin/ginentrypage.c18
-rw-r--r--src/backend/access/gin/ginget.c54
-rw-r--r--src/backend/access/gin/ginscan.c12
-rw-r--r--src/backend/access/gin/ginutil.c8
-rw-r--r--src/backend/access/gin/ginvacuum.c35
-rw-r--r--src/backend/access/gin/ginxlog.c11
-rw-r--r--src/backend/access/gist/gist.c6
-rw-r--r--src/backend/access/gist/gistget.c21
-rw-r--r--src/backend/access/gist/gistproc.c26
-rw-r--r--src/backend/access/gist/gistvacuum.c4
-rw-r--r--src/backend/access/hash/hash.c4
-rw-r--r--src/backend/access/hash/hashfunc.c6
-rw-r--r--src/backend/access/hash/hashovfl.c12
-rw-r--r--src/backend/access/hash/hashpage.c48
-rw-r--r--src/backend/access/heap/heapam.c213
-rw-r--r--src/backend/access/heap/pruneheap.c218
-rw-r--r--src/backend/access/heap/rewriteheap.c203
-rw-r--r--src/backend/access/heap/syncscan.c58
-rw-r--r--src/backend/access/heap/tuptoaster.c69
-rw-r--r--src/backend/access/index/indexam.c45
-rw-r--r--src/backend/access/nbtree/nbtinsert.c130
-rw-r--r--src/backend/access/nbtree/nbtpage.c56
-rw-r--r--src/backend/access/nbtree/nbtsearch.c10
-rw-r--r--src/backend/access/nbtree/nbtutils.c49
-rw-r--r--src/backend/access/nbtree/nbtxlog.c41
-rw-r--r--src/backend/access/transam/clog.c18
-rw-r--r--src/backend/access/transam/multixact.c8
-rw-r--r--src/backend/access/transam/transam.c14
-rw-r--r--src/backend/access/transam/twophase.c48
-rw-r--r--src/backend/access/transam/twophase_rmgr.c4
-rw-r--r--src/backend/access/transam/varsup.c26
-rw-r--r--src/backend/access/transam/xact.c156
-rw-r--r--src/backend/access/transam/xlog.c150
39 files changed, 990 insertions, 952 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index a6dab8da121..eb8b136cbd9 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -28,7 +28,7 @@
* without explicitly invoking the toaster.
*
* This change will break any code that assumes it needn't detoast values
- * that have been put into a tuple but never sent to disk. Hopefully there
+ * that have been put into a tuple but never sent to disk. Hopefully there
* are few such places.
*
* Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
@@ -57,7 +57,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.118 2007/11/07 12:24:23 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.119 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -99,19 +99,19 @@ heap_compute_data_size(TupleDesc tupleDesc,
for (i = 0; i < numberOfAttributes; i++)
{
- Datum val;
+ Datum val;
if (isnull[i])
continue;
val = values[i];
- if (ATT_IS_PACKABLE(att[i]) &&
+ if (ATT_IS_PACKABLE(att[i]) &&
VARATT_CAN_MAKE_SHORT(DatumGetPointer(val)))
{
/*
- * we're anticipating converting to a short varlena header,
- * so adjust length and don't count any alignment
+ * we're anticipating converting to a short varlena header, so
+ * adjust length and don't count any alignment
*/
data_length += VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(val));
}
@@ -147,19 +147,19 @@ ComputeDataSize(TupleDesc tupleDesc,
for (i = 0; i < numberOfAttributes; i++)
{
- Datum val;
+ Datum val;
if (nulls[i] != ' ')
continue;
val = values[i];
- if (ATT_IS_PACKABLE(att[i]) &&
+ if (ATT_IS_PACKABLE(att[i]) &&
VARATT_CAN_MAKE_SHORT(DatumGetPointer(val)))
{
/*
- * we're anticipating converting to a short varlena header,
- * so adjust length and don't count any alignment
+ * we're anticipating converting to a short varlena header, so
+ * adjust length and don't count any alignment
*/
data_length += VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(val));
}
@@ -195,6 +195,7 @@ heap_fill_tuple(TupleDesc tupleDesc,
int i;
int numberOfAttributes = tupleDesc->natts;
Form_pg_attribute *att = tupleDesc->attrs;
+
#ifdef USE_ASSERT_CHECKING
char *start = data;
#endif
@@ -238,8 +239,8 @@ heap_fill_tuple(TupleDesc tupleDesc,
}
/*
- * XXX we use the att_align macros on the pointer value itself,
- * not on an offset. This is a bit of a hack.
+ * XXX we use the att_align macros on the pointer value itself, not on
+ * an offset. This is a bit of a hack.
*/
if (att[i]->attbyval)
@@ -327,6 +328,7 @@ DataFill(TupleDesc tupleDesc,
int i;
int numberOfAttributes = tupleDesc->natts;
Form_pg_attribute *att = tupleDesc->attrs;
+
#ifdef USE_ASSERT_CHECKING
char *start = data;
#endif
@@ -370,8 +372,8 @@ DataFill(TupleDesc tupleDesc,
}
/*
- * XXX we use the att_align macros on the pointer value itself,
- * not on an offset. This is a bit of a hack.
+ * XXX we use the att_align macros on the pointer value itself, not on
+ * an offset. This is a bit of a hack.
*/
if (att[i]->attbyval)
@@ -611,8 +613,8 @@ nocachegetattr(HeapTuple tuple,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize
- * the cached offsets for these attrs.
+ * target. If there aren't any, it's safe to cheaply initialize the
+ * cached offsets for these attrs.
*/
if (HeapTupleHasVarWidth(tuple))
{
@@ -673,8 +675,8 @@ nocachegetattr(HeapTuple tuple,
int i;
/*
- * Now we know that we have to walk the tuple CAREFULLY. But we
- * still might be able to cache some offsets for next time.
+ * Now we know that we have to walk the tuple CAREFULLY. But we still
+ * might be able to cache some offsets for next time.
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
@@ -683,12 +685,12 @@ nocachegetattr(HeapTuple tuple,
* attcacheoff until we reach either a null or a var-width attribute.
*/
off = 0;
- for (i = 0; ; i++) /* loop exit is at "break" */
+ for (i = 0;; i++) /* loop exit is at "break" */
{
if (HeapTupleHasNulls(tuple) && att_isnull(i, bp))
{
usecache = false;
- continue; /* this cannot be the target att */
+ continue; /* this cannot be the target att */
}
/* If we know the next offset, we can skip the rest */
@@ -697,10 +699,10 @@ nocachegetattr(HeapTuple tuple,
else if (att[i]->attlen == -1)
{
/*
- * We can only cache the offset for a varlena attribute
- * if the offset is already suitably aligned, so that there
- * would be no pad bytes in any case: then the offset will
- * be valid for either an aligned or unaligned value.
+ * We can only cache the offset for a varlena attribute if the
+ * offset is already suitably aligned, so that there would be
+ * no pad bytes in any case: then the offset will be valid for
+ * either an aligned or unaligned value.
*/
if (usecache &&
off == att_align_nominal(off, att[i]->attalign))
@@ -771,11 +773,12 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
break;
case MinCommandIdAttributeNumber:
case MaxCommandIdAttributeNumber:
+
/*
- * cmin and cmax are now both aliases for the same field,
- * which can in fact also be a combo command id. XXX perhaps we
- * should return the "real" cmin or cmax if possible, that is
- * if we are inside the originating transaction?
+ * cmin and cmax are now both aliases for the same field, which
+ * can in fact also be a combo command id. XXX perhaps we should
+ * return the "real" cmin or cmax if possible, that is if we are
+ * inside the originating transaction?
*/
result = CommandIdGetDatum(HeapTupleHeaderGetRawCommandId(tup->t_data));
break;
@@ -855,7 +858,8 @@ heap_form_tuple(TupleDesc tupleDescriptor,
{
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
- Size len, data_len;
+ Size len,
+ data_len;
int hoff;
bool hasnull = false;
Form_pg_attribute *att = tupleDescriptor->attrs;
@@ -965,7 +969,8 @@ heap_formtuple(TupleDesc tupleDescriptor,
{
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
- Size len, data_len;
+ Size len,
+ data_len;
int hoff;
bool hasnull = false;
Form_pg_attribute *att = tupleDescriptor->attrs;
@@ -1263,10 +1268,10 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
else if (thisatt->attlen == -1)
{
/*
- * We can only cache the offset for a varlena attribute
- * if the offset is already suitably aligned, so that there
- * would be no pad bytes in any case: then the offset will
- * be valid for either an aligned or unaligned value.
+ * We can only cache the offset for a varlena attribute if the
+ * offset is already suitably aligned, so that there would be no
+ * pad bytes in any case: then the offset will be valid for either
+ * an aligned or unaligned value.
*/
if (!slow &&
off == att_align_nominal(off, thisatt->attalign))
@@ -1375,10 +1380,10 @@ heap_deformtuple(HeapTuple tuple,
else if (thisatt->attlen == -1)
{
/*
- * We can only cache the offset for a varlena attribute
- * if the offset is already suitably aligned, so that there
- * would be no pad bytes in any case: then the offset will
- * be valid for either an aligned or unaligned value.
+ * We can only cache the offset for a varlena attribute if the
+ * offset is already suitably aligned, so that there would be no
+ * pad bytes in any case: then the offset will be valid for either
+ * an aligned or unaligned value.
*/
if (!slow &&
off == att_align_nominal(off, thisatt->attalign))
@@ -1484,10 +1489,10 @@ slot_deform_tuple(TupleTableSlot *slot, int natts)
else if (thisatt->attlen == -1)
{
/*
- * We can only cache the offset for a varlena attribute
- * if the offset is already suitably aligned, so that there
- * would be no pad bytes in any case: then the offset will
- * be valid for either an aligned or unaligned value.
+ * We can only cache the offset for a varlena attribute if the
+ * offset is already suitably aligned, so that there would be no
+ * pad bytes in any case: then the offset will be valid for either
+ * an aligned or unaligned value.
*/
if (!slow &&
off == att_align_nominal(off, thisatt->attalign))
@@ -1791,7 +1796,8 @@ heap_form_minimal_tuple(TupleDesc tupleDescriptor,
bool *isnull)
{
MinimalTuple tuple; /* return tuple */
- Size len, data_len;
+ Size len,
+ data_len;
int hoff;
bool hasnull = false;
Form_pg_attribute *att = tupleDescriptor->attrs;
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index 5412ca0cf3d..892363b3a99 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.83 2007/11/07 12:24:24 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.84 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -77,7 +77,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
{
untoasted_values[i] =
PointerGetDatum(heap_tuple_fetch_attr((struct varlena *)
- DatumGetPointer(values[i])));
+ DatumGetPointer(values[i])));
untoasted_free[i] = true;
}
@@ -309,8 +309,8 @@ nocache_index_getattr(IndexTuple tup,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize
- * the cached offsets for these attrs.
+ * target. If there aren't any, it's safe to cheaply initialize the
+ * cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup))
{
@@ -371,8 +371,8 @@ nocache_index_getattr(IndexTuple tup,
int i;
/*
- * Now we know that we have to walk the tuple CAREFULLY. But we
- * still might be able to cache some offsets for next time.
+ * Now we know that we have to walk the tuple CAREFULLY. But we still
+ * might be able to cache some offsets for next time.
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
@@ -381,12 +381,12 @@ nocache_index_getattr(IndexTuple tup,
* attcacheoff until we reach either a null or a var-width attribute.
*/
off = 0;
- for (i = 0; ; i++) /* loop exit is at "break" */
+ for (i = 0;; i++) /* loop exit is at "break" */
{
if (IndexTupleHasNulls(tup) && att_isnull(i, bp))
{
usecache = false;
- continue; /* this cannot be the target att */
+ continue; /* this cannot be the target att */
}
/* If we know the next offset, we can skip the rest */
@@ -395,10 +395,10 @@ nocache_index_getattr(IndexTuple tup,
else if (att[i]->attlen == -1)
{
/*
- * We can only cache the offset for a varlena attribute
- * if the offset is already suitably aligned, so that there
- * would be no pad bytes in any case: then the offset will
- * be valid for either an aligned or unaligned value.
+ * We can only cache the offset for a varlena attribute if the
+ * offset is already suitably aligned, so that there would be
+ * no pad bytes in any case: then the offset will be valid for
+ * either an aligned or unaligned value.
*/
if (usecache &&
off == att_align_nominal(off, att[i]->attalign))
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 9f40fc59d3f..7e4afd70bd5 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.5 2007/06/03 22:16:02 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.6 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -206,8 +206,8 @@ parseRelOptions(Datum options, int numkeywords, const char *const * keywords,
if (values[j] && validate)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" specified more than once",
- keywords[j])));
+ errmsg("parameter \"%s\" specified more than once",
+ keywords[j])));
value_len = text_len - kw_len - 1;
value = (char *) palloc(value_len + 1);
memcpy(value, text_str + kw_len + 1, value_len);
diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c
index d608bedb605..430b72a92b2 100644
--- a/src/backend/access/gin/ginarrayproc.c
+++ b/src/backend/access/gin/ginarrayproc.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginarrayproc.c,v 1.10 2007/08/21 01:11:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginarrayproc.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -60,17 +60,18 @@ ginarrayextract(PG_FUNCTION_ARGS)
elmlen, elmbyval, elmalign,
&entries, NULL, (int *) nentries);
- if ( *nentries == 0 && PG_NARGS() == 3 )
+ if (*nentries == 0 && PG_NARGS() == 3)
{
- switch( PG_GETARG_UINT16(2) ) /* StrategyNumber */
+ switch (PG_GETARG_UINT16(2)) /* StrategyNumber */
{
case GinOverlapStrategy:
- *nentries = -1; /* nobody can be found */
- break;
+ *nentries = -1; /* nobody can be found */
+ break;
case GinContainsStrategy:
case GinContainedStrategy:
case GinEqualStrategy:
- default: /* require fullscan: GIN can't find void arrays */
+ default: /* require fullscan: GIN can't find void
+ * arrays */
break;
}
}
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 1a711e93c64..a89c384dfc3 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginbtree.c,v 1.9 2007/06/05 12:47:49 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginbtree.c,v 1.10 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -317,8 +317,8 @@ ginInsertValue(GinBtree btree, GinBtreeStack *stack)
Page newlpage;
/*
- * newlpage is a pointer to memory page, it doesn't associate
- * with buffer, stack->buffer should be untouched
+ * newlpage is a pointer to memory page, it doesn't associate with
+ * buffer, stack->buffer should be untouched
*/
newlpage = btree->splitPage(btree, stack->buffer, rbuffer, stack->off, &rdata);
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index d9242c667a6..eb6ccfc0b40 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/gindatapage.c,v 1.7 2007/06/04 15:56:28 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/gindatapage.c,v 1.8 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -358,7 +358,7 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
static XLogRecData rdata[3];
int sizeofitem = GinSizeOfItem(page);
static ginxlogInsert data;
- int cnt=0;
+ int cnt = 0;
*prdata = rdata;
Assert(GinPageIsData(page));
@@ -373,14 +373,14 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
data.isData = TRUE;
data.isLeaf = GinPageIsLeaf(page) ? TRUE : FALSE;
- /*
- * Prevent full page write if child's split occurs. That is needed
- * to remove incomplete splits while replaying WAL
- *
- * data.updateBlkno contains new block number (of newly created right page)
- * for recently splited page.
+ /*
+ * Prevent full page write if child's split occurs. That is needed to
+ * remove incomplete splits while replaying WAL
+ *
+ * data.updateBlkno contains new block number (of newly created right
+ * page) for recently splited page.
*/
- if ( data.updateBlkno == InvalidBlockNumber )
+ if (data.updateBlkno == InvalidBlockNumber)
{
rdata[0].buffer = buf;
rdata[0].buffer_std = FALSE;
@@ -393,7 +393,7 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
rdata[cnt].buffer = InvalidBuffer;
rdata[cnt].data = (char *) &data;
rdata[cnt].len = sizeof(ginxlogInsert);
- rdata[cnt].next = &rdata[cnt+1];
+ rdata[cnt].next = &rdata[cnt + 1];
cnt++;
rdata[cnt].buffer = InvalidBuffer;
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index 2c335aea0cd..134c5f99dd0 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.10 2007/10/29 13:49:21 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -354,7 +354,7 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
static XLogRecData rdata[3];
OffsetNumber placed;
static ginxlogInsert data;
- int cnt=0;
+ int cnt = 0;
*prdata = rdata;
data.updateBlkno = entryPreparePage(btree, page, off);
@@ -372,14 +372,14 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
data.isData = false;
data.isLeaf = GinPageIsLeaf(page) ? TRUE : FALSE;
- /*
- * Prevent full page write if child's split occurs. That is needed
- * to remove incomplete splits while replaying WAL
+ /*
+ * Prevent full page write if child's split occurs. That is needed to
+ * remove incomplete splits while replaying WAL
*
- * data.updateBlkno contains new block number (of newly created right page)
- * for recently splited page.
+ * data.updateBlkno contains new block number (of newly created right
+ * page) for recently splited page.
*/
- if ( data.updateBlkno == InvalidBlockNumber )
+ if (data.updateBlkno == InvalidBlockNumber)
{
rdata[0].buffer = buf;
rdata[0].buffer_std = TRUE;
@@ -392,7 +392,7 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
rdata[cnt].buffer = InvalidBuffer;
rdata[cnt].data = (char *) &data;
rdata[cnt].len = sizeof(ginxlogInsert);
- rdata[cnt].next = &rdata[cnt+1];
+ rdata[cnt].next = &rdata[cnt + 1];
cnt++;
rdata[cnt].buffer = InvalidBuffer;
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 66949f964c8..b964f036a08 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.8 2007/06/04 15:56:28 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.9 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -23,29 +23,29 @@ findItemInPage(Page page, ItemPointer item, OffsetNumber *off)
OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
int res;
- if ( GinPageGetOpaque(page)->flags & GIN_DELETED )
+ if (GinPageGetOpaque(page)->flags & GIN_DELETED)
/* page was deleted by concurrent vacuum */
return false;
- if ( *off > maxoff || *off == InvalidOffsetNumber )
+ if (*off > maxoff || *off == InvalidOffsetNumber)
res = -1;
else
res = compareItemPointers(item, (ItemPointer) GinDataPageGetItem(page, *off));
- if ( res == 0 )
+ if (res == 0)
{
/* page isn't changed */
- return true;
- }
- else if ( res > 0 )
+ return true;
+ }
+ else if (res > 0)
{
- /*
- * some items was added before our position, look further to find
- * it or first greater
+ /*
+ * some items was added before our position, look further to find it
+ * or first greater
*/
-
+
(*off)++;
- for (; *off <= maxoff; (*off)++)
+ for (; *off <= maxoff; (*off)++)
{
res = compareItemPointers(item, (ItemPointer) GinDataPageGetItem(page, *off));
@@ -53,7 +53,7 @@ findItemInPage(Page page, ItemPointer item, OffsetNumber *off)
return true;
if (res < 0)
- {
+ {
(*off)--;
return true;
}
@@ -61,20 +61,20 @@ findItemInPage(Page page, ItemPointer item, OffsetNumber *off)
}
else
{
- /*
- * some items was deleted before our position, look from begining
- * to find it or first greater
+ /*
+ * some items was deleted before our position, look from begining to
+ * find it or first greater
*/
- for(*off = FirstOffsetNumber; *off<= maxoff; (*off)++)
+ for (*off = FirstOffsetNumber; *off <= maxoff; (*off)++)
{
res = compareItemPointers(item, (ItemPointer) GinDataPageGetItem(page, *off));
- if ( res == 0 )
+ if (res == 0)
return true;
if (res < 0)
- {
+ {
(*off)--;
return true;
}
@@ -174,7 +174,7 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry, bool firs
page = BufferGetPage(entry->buffer);
/* try to find curItem in current buffer */
- if ( findItemInPage(page, &entry->curItem, &entry->offset) )
+ if (findItemInPage(page, &entry->curItem, &entry->offset))
return;
/* walk to right */
@@ -186,13 +186,13 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry, bool firs
page = BufferGetPage(entry->buffer);
entry->offset = InvalidOffsetNumber;
- if ( findItemInPage(page, &entry->curItem, &entry->offset) )
+ if (findItemInPage(page, &entry->curItem, &entry->offset))
return;
}
/*
- * curItem and any greated items was deleted by concurrent vacuum,
- * so we finished scan with currrent entry
+ * curItem and any greated items was deleted by concurrent vacuum, so
+ * we finished scan with currrent entry
*/
}
}
@@ -221,10 +221,10 @@ startScanKey(Relation index, GinState *ginstate, GinScanKey key)
if (GinFuzzySearchLimit > 0)
{
/*
- * If all of keys more than threshold we will try to reduce result,
- * we hope (and only hope, for intersection operation of array our
- * supposition isn't true), that total result will not more than
- * minimal predictNumberResult.
+ * If all of keys more than threshold we will try to reduce
+ * result, we hope (and only hope, for intersection operation of
+ * array our supposition isn't true), that total result will not
+ * more than minimal predictNumberResult.
*/
for (i = 0; i < key->nentries; i++)
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index 2eb1ba95b4b..2e40f8b8d8c 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginscan.c,v 1.10 2007/05/27 03:50:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginscan.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -164,13 +164,13 @@ newScanKey(IndexScanDesc scan)
UInt16GetDatum(scankey[i].sk_strategy)
)
);
- if ( nEntryValues < 0 )
+ if (nEntryValues < 0)
{
/*
- * extractQueryFn signals that nothing will be found,
- * so we can just set isVoidRes flag...
+ * extractQueryFn signals that nothing will be found, so we can
+ * just set isVoidRes flag...
*/
- so->isVoidRes = true;
+ so->isVoidRes = true;
break;
}
if (entryValues == NULL || nEntryValues == 0)
@@ -187,7 +187,7 @@ newScanKey(IndexScanDesc scan)
if (so->nkeys == 0 && !so->isVoidRes)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("GIN index does not support search with void query")));
+ errmsg("GIN index does not support search with void query")));
pgstat_count_index_scan(scan->indexRelation);
}
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index e704e8051eb..488a58beb5e 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginutil.c,v 1.10 2007/01/31 15:09:45 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginutil.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -126,17 +126,17 @@ compareEntries(GinState *ginstate, Datum a, Datum b)
&ginstate->compareFn,
a, b
)
- );
+ );
}
typedef struct
{
FmgrInfo *cmpDatumFunc;
bool *needUnique;
-} cmpEntriesData;
+} cmpEntriesData;
static int
-cmpEntries(const Datum *a, const Datum *b, cmpEntriesData *arg)
+cmpEntries(const Datum *a, const Datum *b, cmpEntriesData * arg)
{
int res = DatumGetInt32(FunctionCall2(arg->cmpDatumFunc,
*a, *b));
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 1f26869d646..9c0482a8903 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.17 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.18 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -28,7 +28,7 @@ typedef struct
IndexBulkDeleteCallback callback;
void *callback_state;
GinState ginstate;
- BufferAccessStrategy strategy;
+ BufferAccessStrategy strategy;
} GinVacuumState;
@@ -160,14 +160,14 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
/*
* We should be sure that we don't concurrent with inserts, insert process
* never release root page until end (but it can unlock it and lock
- * again). New scan can't start but previously started
- * ones work concurrently.
+ * again). New scan can't start but previously started ones work
+ * concurrently.
*/
- if ( isRoot )
+ if (isRoot)
LockBufferForCleanup(buffer);
else
- LockBuffer(buffer, GIN_EXCLUSIVE);
+ LockBuffer(buffer, GIN_EXCLUSIVE);
Assert(GinPageIsData(page));
@@ -240,8 +240,8 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot)
{
Buffer dBuffer = ReadBufferWithStrategy(gvs->index, deleteBlkno, gvs->strategy);
- Buffer lBuffer = (leftBlkno == InvalidBlockNumber) ?
- InvalidBuffer : ReadBufferWithStrategy(gvs->index, leftBlkno, gvs->strategy);
+ Buffer lBuffer = (leftBlkno == InvalidBlockNumber) ?
+ InvalidBuffer : ReadBufferWithStrategy(gvs->index, leftBlkno, gvs->strategy);
Buffer pBuffer = ReadBufferWithStrategy(gvs->index, parentBlkno, gvs->strategy);
Page page,
parentPage;
@@ -268,17 +268,20 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
parentPage = BufferGetPage(pBuffer);
#ifdef USE_ASSERT_CHECKING
- do {
- PostingItem *tod=(PostingItem *) GinDataPageGetItem(parentPage, myoff);
- Assert( PostingItemGetBlockNumber(tod) == deleteBlkno );
- } while(0);
+ do
+ {
+ PostingItem *tod = (PostingItem *) GinDataPageGetItem(parentPage, myoff);
+
+ Assert(PostingItemGetBlockNumber(tod) == deleteBlkno);
+ } while (0);
#endif
PageDeletePostingItem(parentPage, myoff);
page = BufferGetPage(dBuffer);
+
/*
- * we shouldn't change rightlink field to save
- * workability of running search scan
+ * we shouldn't change rightlink field to save workability of running
+ * search scan
*/
GinPageGetOpaque(page)->flags = GIN_DELETED;
@@ -363,8 +366,8 @@ typedef struct DataPageDeleteStack
struct DataPageDeleteStack *child;
struct DataPageDeleteStack *parent;
- BlockNumber blkno; /* current block number */
- BlockNumber leftBlkno; /* rightest non-deleted page on left */
+ BlockNumber blkno; /* current block number */
+ BlockNumber leftBlkno; /* rightest non-deleted page on left */
bool isRoot;
} DataPageDeleteStack;
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index 79fbb496b54..7649e4c9003 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.10 2007/10/29 19:26:57 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -135,7 +135,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
Assert(data->isDelete == FALSE);
Assert(GinPageIsData(page));
- if ( ! XLByteLE(lsn, PageGetLSN(page)) )
+ if (!XLByteLE(lsn, PageGetLSN(page)))
{
if (data->isLeaf)
{
@@ -170,6 +170,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
if (!data->isLeaf && data->updateBlkno != InvalidBlockNumber)
{
PostingItem *pitem = (PostingItem *) (XLogRecGetData(record) + sizeof(ginxlogInsert));
+
forgetIncompleteSplit(data->node, PostingItemGetBlockNumber(pitem), data->updateBlkno);
}
@@ -180,7 +181,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
Assert(!GinPageIsData(page));
- if ( ! XLByteLE(lsn, PageGetLSN(page)) )
+ if (!XLByteLE(lsn, PageGetLSN(page)))
{
if (data->updateBlkno != InvalidBlockNumber)
{
@@ -202,7 +203,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), data->offset, false, false) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in %u/%u/%u",
- data->node.spcNode, data->node.dbNode, data->node.relNode);
+ data->node.spcNode, data->node.dbNode, data->node.relNode);
}
if (!data->isLeaf && data->updateBlkno != InvalidBlockNumber)
@@ -212,7 +213,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
}
}
- if ( ! XLByteLE(lsn, PageGetLSN(page)) )
+ if (!XLByteLE(lsn, PageGetLSN(page)))
{
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 0c1b94d7d38..770c2023bd7 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.147 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.148 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -360,8 +360,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate)
ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
/*
- * fill page, we can do it because all these pages are new
- * (ie not linked in tree or masked by temp page
+ * fill page, we can do it because all these pages are new (ie not
+ * linked in tree or masked by temp page
*/
data = (char *) (ptr->list);
for (i = 0; i < ptr->block.num; i++)
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index ba7a8ab959f..cb1919ac6e0 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.67 2007/09/12 22:10:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.68 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -383,13 +383,12 @@ gistindex_keytest(IndexTuple tuple,
/*
* On non-leaf page we can't conclude that child hasn't NULL
* values because of assumption in GiST: uinon (VAL, NULL) is VAL
- * But if on non-leaf page key IS NULL then all childs
- * has NULL.
+ * But if on non-leaf page key IS NULL then all childs has NULL.
*/
- Assert( key->sk_flags & SK_SEARCHNULL );
+ Assert(key->sk_flags & SK_SEARCHNULL);
- if ( GistPageIsLeaf(p) && !isNull )
+ if (GistPageIsLeaf(p) && !isNull)
return false;
}
else if (isNull)
@@ -404,12 +403,14 @@ gistindex_keytest(IndexTuple tuple,
FALSE, isNull);
/*
- * Call the Consistent function to evaluate the test. The arguments
- * are the index datum (as a GISTENTRY*), the comparison datum, and
- * the comparison operator's strategy number and subtype from pg_amop.
+ * Call the Consistent function to evaluate the test. The
+ * arguments are the index datum (as a GISTENTRY*), the comparison
+ * datum, and the comparison operator's strategy number and
+ * subtype from pg_amop.
*
- * (Presently there's no need to pass the subtype since it'll always
- * be zero, but might as well pass it for possible future use.)
+ * (Presently there's no need to pass the subtype since it'll
+ * always be zero, but might as well pass it for possible future
+ * use.)
*/
test = FunctionCall4(&key->sk_func,
PointerGetDatum(&de),
diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c
index 590be9133fb..e461b5923d9 100644
--- a/src/backend/access/gist/gistproc.c
+++ b/src/backend/access/gist/gistproc.c
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.11 2007/09/07 17:04:26 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.12 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -394,20 +394,22 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
ADDLIST(listT, unionT, posT, i);
}
-#define LIMIT_RATIO 0.1
+#define LIMIT_RATIO 0.1
#define _IS_BADRATIO(x,y) ( (y) == 0 || (float)(x)/(float)(y) < LIMIT_RATIO )
#define IS_BADRATIO(x,y) ( _IS_BADRATIO((x),(y)) || _IS_BADRATIO((y),(x)) )
/* bad disposition, try to split by centers of boxes */
- if ( IS_BADRATIO(posR, posL) && IS_BADRATIO(posT, posB) )
+ if (IS_BADRATIO(posR, posL) && IS_BADRATIO(posT, posB))
{
- double avgCenterX=0.0, avgCenterY=0.0;
- double CenterX, CenterY;
+ double avgCenterX = 0.0,
+ avgCenterY = 0.0;
+ double CenterX,
+ CenterY;
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
cur = DatumGetBoxP(entryvec->vector[i].key);
- avgCenterX += ((double)cur->high.x + (double)cur->low.x)/2.0;
- avgCenterY += ((double)cur->high.y + (double)cur->low.y)/2.0;
+ avgCenterX += ((double) cur->high.x + (double) cur->low.x) / 2.0;
+ avgCenterY += ((double) cur->high.y + (double) cur->low.y) / 2.0;
}
avgCenterX /= maxoff;
@@ -417,11 +419,11 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
cur = DatumGetBoxP(entryvec->vector[i].key);
-
- CenterX = ((double)cur->high.x + (double)cur->low.x)/2.0;
- CenterY = ((double)cur->high.y + (double)cur->low.y)/2.0;
- if (CenterX < avgCenterX)
+ CenterX = ((double) cur->high.x + (double) cur->low.x) / 2.0;
+ CenterY = ((double) cur->high.y + (double) cur->low.y) / 2.0;
+
+ if (CenterX < avgCenterX)
ADDLIST(listL, unionL, posL, i);
else if (CenterX == avgCenterX)
{
@@ -442,7 +444,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
else
ADDLIST(listB, unionB, posB, i);
}
- else
+ else
ADDLIST(listT, unionT, posT, i);
}
}
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index 212995e7c57..dace2c89067 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.32 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.33 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,7 +35,7 @@ typedef struct
Relation index;
MemoryContext opCtx;
GistBulkDeleteResult *result;
- BufferAccessStrategy strategy;
+ BufferAccessStrategy strategy;
} GistVacuum;
typedef struct
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index d3f54c934be..5933b02e8eb 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.96 2007/09/12 22:10:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.97 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -548,7 +548,7 @@ loop_top:
vacuum_delay_point();
buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
- LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
+ LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
info->strategy);
page = BufferGetPage(buf);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 71fe34c8a20..4d1b1ed45cc 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.53 2007/09/21 22:52:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.54 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -103,8 +103,8 @@ hashfloat4(PG_FUNCTION_ARGS)
* To support cross-type hashing of float8 and float4, we want to return
* the same hash value hashfloat8 would produce for an equal float8 value.
* So, widen the value to float8 and hash that. (We must do this rather
- * than have hashfloat8 try to narrow its value to float4; that could
- * fail on overflow.)
+ * than have hashfloat8 try to narrow its value to float4; that could fail
+ * on overflow.)
*/
key8 = key;
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index e4ea24a62d1..c510c6e65b7 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.60 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.61 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -156,7 +156,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/*
* _hash_getovflpage()
*
- * Find an available overflow page and return it. The returned buffer
+ * Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
@@ -402,9 +402,9 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
bucket = ovflopaque->hasho_bucket;
/*
- * Zero the page for debugging's sake; then write and release it.
- * (Note: if we failed to zero the page here, we'd have problems
- * with the Assert in _hash_pageinit() when the page is reused.)
+ * Zero the page for debugging's sake; then write and release it. (Note:
+ * if we failed to zero the page here, we'd have problems with the Assert
+ * in _hash_pageinit() when the page is reused.)
*/
MemSet(ovflpage, 0, BufferGetPageSize(ovflbuf));
_hash_wrtbuf(rel, ovflbuf);
@@ -420,7 +420,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
Buffer prevbuf = _hash_getbuf_with_strategy(rel,
prevblkno,
HASH_WRITE,
- LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
+ LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
bstrategy);
Page prevpage = BufferGetPage(prevbuf);
HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage);
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 807dbed8a8c..07f27001a89 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.70 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.71 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -37,7 +37,7 @@
static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
- uint32 nblocks);
+ uint32 nblocks);
static void _hash_splitbucket(Relation rel, Buffer metabuf,
Bucket obucket, Bucket nbucket,
BlockNumber start_oblkno,
@@ -138,7 +138,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
- * _hash_pageinit() is applied automatically. Otherwise it has
+ * _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
@@ -184,7 +184,7 @@ _hash_getinitbuf(Relation rel, BlockNumber blkno)
Buffer
_hash_getnewbuf(Relation rel, BlockNumber blkno)
{
- BlockNumber nblocks = RelationGetNumberOfBlocks(rel);
+ BlockNumber nblocks = RelationGetNumberOfBlocks(rel);
Buffer buf;
if (blkno == P_NEW)
@@ -354,10 +354,10 @@ _hash_metapinit(Relation rel)
ffactor = 10;
/*
- * We initialize the metapage, the first two bucket pages, and the
- * first bitmap page in sequence, using _hash_getnewbuf to cause
- * smgrextend() calls to occur. This ensures that the smgr level
- * has the right idea of the physical index length.
+ * We initialize the metapage, the first two bucket pages, and the first
+ * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
+ * calls to occur. This ensures that the smgr level has the right idea of
+ * the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE);
pg = BufferGetPage(metabuf);
@@ -501,15 +501,16 @@ _hash_expandtable(Relation rel, Buffer metabuf)
goto fail;
/*
- * Can't split anymore if maxbucket has reached its maximum possible value.
+ * Can't split anymore if maxbucket has reached its maximum possible
+ * value.
*
* Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
* the calculation maxbucket+1 mustn't overflow). Currently we restrict
* to half that because of overflow looping in _hash_log2() and
* insufficient space in hashm_spares[]. It's moot anyway because an
- * index with 2^32 buckets would certainly overflow BlockNumber and
- * hence _hash_alloc_buckets() would fail, but if we supported buckets
- * smaller than a disk block then this would be an independent constraint.
+ * index with 2^32 buckets would certainly overflow BlockNumber and hence
+ * _hash_alloc_buckets() would fail, but if we supported buckets smaller
+ * than a disk block then this would be an independent constraint.
*/
if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
goto fail;
@@ -536,10 +537,10 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Likewise lock the new bucket (should never fail).
*
- * Note: it is safe to compute the new bucket's blkno here, even though
- * we may still need to update the BUCKET_TO_BLKNO mapping. This is
- * because the current value of hashm_spares[hashm_ovflpoint] correctly
- * shows where we are going to put a new splitpoint's worth of buckets.
+ * Note: it is safe to compute the new bucket's blkno here, even though we
+ * may still need to update the BUCKET_TO_BLKNO mapping. This is because
+ * the current value of hashm_spares[hashm_ovflpoint] correctly shows
+ * where we are going to put a new splitpoint's worth of buckets.
*/
start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
@@ -557,11 +558,12 @@ _hash_expandtable(Relation rel, Buffer metabuf)
if (spare_ndx > metap->hashm_ovflpoint)
{
Assert(spare_ndx == metap->hashm_ovflpoint + 1);
+
/*
- * The number of buckets in the new splitpoint is equal to the
- * total number already in existence, i.e. new_bucket. Currently
- * this maps one-to-one to blocks required, but someday we may need
- * a more complicated calculation here.
+ * The number of buckets in the new splitpoint is equal to the total
+ * number already in existence, i.e. new_bucket. Currently this maps
+ * one-to-one to blocks required, but someday we may need a more
+ * complicated calculation here.
*/
if (!_hash_alloc_buckets(rel, start_nblkno, new_bucket))
{
@@ -673,14 +675,14 @@ fail:
static bool
_hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
{
- BlockNumber lastblock;
+ BlockNumber lastblock;
char zerobuf[BLCKSZ];
lastblock = firstblock + nblocks - 1;
/*
- * Check for overflow in block number calculation; if so, we cannot
- * extend the index anymore.
+ * Check for overflow in block number calculation; if so, we cannot extend
+ * the index anymore.
*/
if (lastblock < firstblock || lastblock == InvalidBlockNumber)
return false;
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 052393fc6b9..20027592b50 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.244 2007/11/07 12:24:24 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.245 2007/11/15 21:14:32 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -60,9 +60,9 @@
static HeapScanDesc heap_beginscan_internal(Relation relation,
- Snapshot snapshot,
- int nkeys, ScanKey key,
- bool is_bitmapscan);
+ Snapshot snapshot,
+ int nkeys, ScanKey key,
+ bool is_bitmapscan);
static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
@@ -85,18 +85,18 @@ initscan(HeapScanDesc scan, ScanKey key)
* Determine the number of blocks we have to scan.
*
* It is sufficient to do this once at scan start, since any tuples added
- * while the scan is in progress will be invisible to my snapshot
- * anyway. (That is not true when using a non-MVCC snapshot. However,
- * we couldn't guarantee to return tuples added after scan start anyway,
- * since they might go into pages we already scanned. To guarantee
- * consistent results for a non-MVCC snapshot, the caller must hold some
- * higher-level lock that ensures the interesting tuple(s) won't change.)
+ * while the scan is in progress will be invisible to my snapshot anyway.
+ * (That is not true when using a non-MVCC snapshot. However, we couldn't
+ * guarantee to return tuples added after scan start anyway, since they
+ * might go into pages we already scanned. To guarantee consistent
+ * results for a non-MVCC snapshot, the caller must hold some higher-level
+ * lock that ensures the interesting tuple(s) won't change.)
*/
scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
/*
* If the table is large relative to NBuffers, use a bulk-read access
- * strategy and enable synchronized scanning (see syncscan.c). Although
+ * strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
*
@@ -140,8 +140,8 @@ initscan(HeapScanDesc scan, ScanKey key)
memcpy(scan->rs_key, key, scan->rs_nkeys * sizeof(ScanKeyData));
/*
- * Currently, we don't have a stats counter for bitmap heap scans
- * (but the underlying bitmap index scans will be counted).
+ * Currently, we don't have a stats counter for bitmap heap scans (but the
+ * underlying bitmap index scans will be counted).
*/
if (!scan->rs_bitmapscan)
pgstat_count_heap_scan(scan->rs_rd);
@@ -283,7 +283,7 @@ heapgettup(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
- page = scan->rs_startblock; /* first page */
+ page = scan->rs_startblock; /* first page */
heapgetpage(scan, page);
lineoff = FirstOffsetNumber; /* first offnum */
scan->rs_inited = true;
@@ -317,6 +317,7 @@ heapgettup(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
+
/*
* Disable reporting to syncscan logic in a backwards scan; it's
* not very likely anyone else is doing the same thing at the same
@@ -459,9 +460,9 @@ heapgettup(HeapScanDesc scan,
finished = (page == scan->rs_startblock);
/*
- * Report our new scan position for synchronization purposes.
- * We don't do that when moving backwards, however. That would
- * just mess up any other forward-moving scanners.
+ * Report our new scan position for synchronization purposes. We
+ * don't do that when moving backwards, however. That would just
+ * mess up any other forward-moving scanners.
*
* Note: we do this before checking for end of scan so that the
* final state of the position hint is back at the start of the
@@ -554,7 +555,7 @@ heapgettup_pagemode(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
- page = scan->rs_startblock; /* first page */
+ page = scan->rs_startblock; /* first page */
heapgetpage(scan, page);
lineindex = 0;
scan->rs_inited = true;
@@ -585,6 +586,7 @@ heapgettup_pagemode(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
+
/*
* Disable reporting to syncscan logic in a backwards scan; it's
* not very likely anyone else is doing the same thing at the same
@@ -719,9 +721,9 @@ heapgettup_pagemode(HeapScanDesc scan,
finished = (page == scan->rs_startblock);
/*
- * Report our new scan position for synchronization purposes.
- * We don't do that when moving backwards, however. That would
- * just mess up any other forward-moving scanners.
+ * Report our new scan position for synchronization purposes. We
+ * don't do that when moving backwards, however. That would just
+ * mess up any other forward-moving scanners.
*
* Note: we do this before checking for end of scan so that the
* final state of the position hint is back at the start of the
@@ -1057,7 +1059,7 @@ heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
* heap_beginscan - begin relation scan
*
* heap_beginscan_bm is an alternative entry point for setting up a HeapScanDesc
- * for a bitmap heap scan. Although that scan technology is really quite
+ * for a bitmap heap scan. Although that scan technology is really quite
* unlike a standard seqscan, there is just enough commonality to make it
* worth using the same data structure.
* ----------------
@@ -1423,10 +1425,10 @@ bool
heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
bool *all_dead)
{
- Page dp = (Page) BufferGetPage(buffer);
+ Page dp = (Page) BufferGetPage(buffer);
TransactionId prev_xmax = InvalidTransactionId;
OffsetNumber offnum;
- bool at_chain_start;
+ bool at_chain_start;
if (all_dead)
*all_dead = true;
@@ -1438,7 +1440,7 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
/* Scan through possible multiple members of HOT-chain */
for (;;)
{
- ItemId lp;
+ ItemId lp;
HeapTupleData heapTuple;
/* check for bogus TID */
@@ -1472,7 +1474,8 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
break;
/*
- * The xmin should match the previous xmax value, else chain is broken.
+ * The xmin should match the previous xmax value, else chain is
+ * broken.
*/
if (TransactionIdIsValid(prev_xmax) &&
!TransactionIdEquals(prev_xmax,
@@ -1499,8 +1502,8 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
*all_dead = false;
/*
- * Check to see if HOT chain continues past this tuple; if so
- * fetch the next offnum and loop around.
+ * Check to see if HOT chain continues past this tuple; if so fetch
+ * the next offnum and loop around.
*/
if (HeapTupleIsHotUpdated(&heapTuple))
{
@@ -1511,7 +1514,7 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
prev_xmax = HeapTupleHeaderGetXmax(heapTuple.t_data);
}
else
- break; /* end of chain */
+ break; /* end of chain */
}
return false;
@@ -1528,8 +1531,8 @@ bool
heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
bool *all_dead)
{
- bool result;
- Buffer buffer;
+ bool result;
+ Buffer buffer;
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@@ -1665,7 +1668,7 @@ heap_get_latest_tid(Relation relation,
*
* This is called after we have waited for the XMAX transaction to terminate.
* If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
- * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
+ * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* hint bit if possible --- but beware that that may not yet be possible,
* if the transaction committed asynchronously. Hence callers should look
* only at XMAX_INVALID.
@@ -2069,7 +2072,7 @@ l1:
/*
* If this transaction commits, the tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*/
@@ -2252,15 +2255,15 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* Fetch the list of attributes to be checked for HOT update. This is
- * wasted effort if we fail to update or have to put the new tuple on
- * a different page. But we must compute the list before obtaining
- * buffer lock --- in the worst case, if we are doing an update on one
- * of the relevant system catalogs, we could deadlock if we try to
- * fetch the list later. In any case, the relcache caches the data
- * so this is usually pretty cheap.
+ * wasted effort if we fail to update or have to put the new tuple on a
+ * different page. But we must compute the list before obtaining buffer
+ * lock --- in the worst case, if we are doing an update on one of the
+ * relevant system catalogs, we could deadlock if we try to fetch the list
+ * later. In any case, the relcache caches the data so this is usually
+ * pretty cheap.
*
- * Note that we get a copy here, so we need not worry about relcache
- * flush happening midway through.
+ * Note that we get a copy here, so we need not worry about relcache flush
+ * happening midway through.
*/
hot_attrs = RelationGetIndexAttrBitmap(relation);
@@ -2555,7 +2558,7 @@ l2:
{
/*
* Since the new tuple is going into the same page, we might be able
- * to do a HOT update. Check if any of the index columns have been
+ * to do a HOT update. Check if any of the index columns have been
* changed. If not, then HOT update is possible.
*/
if (HeapSatisfiesHOTUpdate(relation, hot_attrs, &oldtup, heaptup))
@@ -2573,14 +2576,14 @@ l2:
/*
* If this transaction commits, the old tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*
- * XXX Should we set hint on newbuf as well? If the transaction
- * aborts, there would be a prunable tuple in the newbuf; but for now
- * we choose not to optimize for aborts. Note that heap_xlog_update
- * must be kept in sync if this decision changes.
+ * XXX Should we set hint on newbuf as well? If the transaction aborts,
+ * there would be a prunable tuple in the newbuf; but for now we choose
+ * not to optimize for aborts. Note that heap_xlog_update must be kept in
+ * sync if this decision changes.
*/
PageSetPrunable(dp, xid);
@@ -2695,22 +2698,24 @@ static bool
heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
HeapTuple tup1, HeapTuple tup2)
{
- Datum value1, value2;
- bool isnull1, isnull2;
+ Datum value1,
+ value2;
+ bool isnull1,
+ isnull2;
Form_pg_attribute att;
/*
* If it's a whole-tuple reference, say "not equal". It's not really
- * worth supporting this case, since it could only succeed after a
- * no-op update, which is hardly a case worth optimizing for.
+ * worth supporting this case, since it could only succeed after a no-op
+ * update, which is hardly a case worth optimizing for.
*/
if (attrnum == 0)
return false;
/*
- * Likewise, automatically say "not equal" for any system attribute
- * other than OID and tableOID; we cannot expect these to be consistent
- * in a HOT chain, or even to be set correctly yet in the new tuple.
+ * Likewise, automatically say "not equal" for any system attribute other
+ * than OID and tableOID; we cannot expect these to be consistent in a HOT
+ * chain, or even to be set correctly yet in the new tuple.
*/
if (attrnum < 0)
{
@@ -2720,17 +2725,17 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
}
/*
- * Extract the corresponding values. XXX this is pretty inefficient
- * if there are many indexed columns. Should HeapSatisfiesHOTUpdate
- * do a single heap_deform_tuple call on each tuple, instead? But
- * that doesn't work for system columns ...
+ * Extract the corresponding values. XXX this is pretty inefficient if
+ * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
+ * single heap_deform_tuple call on each tuple, instead? But that doesn't
+ * work for system columns ...
*/
value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
/*
- * If one value is NULL and other is not, then they are certainly
- * not equal
+ * If one value is NULL and other is not, then they are certainly not
+ * equal
*/
if (isnull1 != isnull2)
return false;
@@ -2744,7 +2749,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* We do simple binary comparison of the two datums. This may be overly
* strict because there can be multiple binary representations for the
- * same logical value. But we should be OK as long as there are no false
+ * same logical value. But we should be OK as long as there are no false
* positives. Using a type-specific equality operator is messy because
* there could be multiple notions of equality in different operator
* classes; furthermore, we cannot safely invoke user-defined functions
@@ -2758,7 +2763,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
else
{
Assert(attrnum <= tupdesc->natts);
- att = tupdesc->attrs[attrnum - 1];
+ att = tupdesc->attrs[attrnum - 1];
return datumIsEqual(value1, value2, att->attbyval, att->attlen);
}
}
@@ -2779,7 +2784,7 @@ static bool
HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
HeapTuple oldtup, HeapTuple newtup)
{
- int attrnum;
+ int attrnum;
while ((attrnum = bms_first_member(hot_attrs)) >= 0)
{
@@ -3094,15 +3099,15 @@ l3:
}
/*
- * We might already hold the desired lock (or stronger), possibly under
- * a different subtransaction of the current top transaction. If so,
- * there is no need to change state or issue a WAL record. We already
- * handled the case where this is true for xmax being a MultiXactId,
- * so now check for cases where it is a plain TransactionId.
+ * We might already hold the desired lock (or stronger), possibly under a
+ * different subtransaction of the current top transaction. If so, there
+ * is no need to change state or issue a WAL record. We already handled
+ * the case where this is true for xmax being a MultiXactId, so now check
+ * for cases where it is a plain TransactionId.
*
* Note in particular that this covers the case where we already hold
- * exclusive lock on the tuple and the caller only wants shared lock.
- * It would certainly not do to give up the exclusive lock.
+ * exclusive lock on the tuple and the caller only wants shared lock. It
+ * would certainly not do to give up the exclusive lock.
*/
xmax = HeapTupleHeaderGetXmax(tuple->t_data);
old_infomask = tuple->t_data->t_infomask;
@@ -3179,8 +3184,8 @@ l3:
{
/*
* If the XMAX is a valid TransactionId, then we need to
- * create a new MultiXactId that includes both the old
- * locker and our own TransactionId.
+ * create a new MultiXactId that includes both the old locker
+ * and our own TransactionId.
*/
xid = MultiXactIdCreate(xmax, xid);
new_infomask |= HEAP_XMAX_IS_MULTI;
@@ -3214,8 +3219,8 @@ l3:
/*
* Store transaction information of xact locking the tuple.
*
- * Note: Cmax is meaningless in this context, so don't set it; this
- * avoids possibly generating a useless combo CID.
+ * Note: Cmax is meaningless in this context, so don't set it; this avoids
+ * possibly generating a useless combo CID.
*/
tuple->t_data->t_infomask = new_infomask;
HeapTupleHeaderClearHotUpdated(tuple->t_data);
@@ -3425,6 +3430,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
buf = InvalidBuffer;
}
HeapTupleHeaderSetXmin(tuple, FrozenTransactionId);
+
/*
* Might as well fix the hint bits too; usually XMIN_COMMITTED will
* already be set here, but there's a small chance not.
@@ -3437,9 +3443,9 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* When we release shared lock, it's possible for someone else to change
* xmax before we get the lock back, so repeat the check after acquiring
- * exclusive lock. (We don't need this pushup for xmin, because only
- * VACUUM could be interested in changing an existing tuple's xmin,
- * and there's only one VACUUM allowed on a table at a time.)
+ * exclusive lock. (We don't need this pushup for xmin, because only
+ * VACUUM could be interested in changing an existing tuple's xmin, and
+ * there's only one VACUUM allowed on a table at a time.)
*/
recheck_xmax:
if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
@@ -3454,13 +3460,14 @@ recheck_xmax:
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
- goto recheck_xmax; /* see comment above */
+ goto recheck_xmax; /* see comment above */
}
HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
+
/*
- * The tuple might be marked either XMAX_INVALID or
- * XMAX_COMMITTED + LOCKED. Normalize to INVALID just to be
- * sure no one gets confused.
+ * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
+ * + LOCKED. Normalize to INVALID just to be sure no one gets
+ * confused.
*/
tuple->t_infomask &= ~HEAP_XMAX_COMMITTED;
tuple->t_infomask |= HEAP_XMAX_INVALID;
@@ -3506,8 +3513,9 @@ recheck_xvac:
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
- goto recheck_xvac; /* see comment above */
+ goto recheck_xvac; /* see comment above */
}
+
/*
* If a MOVED_OFF tuple is not dead, the xvac transaction must
* have failed; whereas a non-dead MOVED_IN tuple must mean the
@@ -3517,9 +3525,10 @@ recheck_xvac:
HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
else
HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
+
/*
- * Might as well fix the hint bits too; usually XMIN_COMMITTED will
- * already be set here, but there's a small chance not.
+ * Might as well fix the hint bits too; usually XMIN_COMMITTED
+ * will already be set here, but there's a small chance not.
*/
Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
tuple->t_infomask |= HEAP_XMIN_COMMITTED;
@@ -3632,8 +3641,8 @@ log_heap_clean(Relation reln, Buffer buffer,
/*
* The OffsetNumber arrays are not actually in the buffer, but we pretend
* that they are. When XLogInsert stores the whole buffer, the offset
- * arrays need not be stored too. Note that even if all three arrays
- * are empty, we want to expose the buffer as a candidate for whole-page
+ * arrays need not be stored too. Note that even if all three arrays are
+ * empty, we want to expose the buffer as a candidate for whole-page
* storage, since this record type implies a defragmentation operation
* even if no item pointers changed state.
*/
@@ -3686,7 +3695,7 @@ log_heap_clean(Relation reln, Buffer buffer,
}
/*
- * Perform XLogInsert for a heap-freeze operation. Caller must already
+ * Perform XLogInsert for a heap-freeze operation. Caller must already
* have modified the buffer and marked it dirty.
*/
XLogRecPtr
@@ -3711,9 +3720,9 @@ log_heap_freeze(Relation reln, Buffer buffer,
rdata[0].next = &(rdata[1]);
/*
- * The tuple-offsets array is not actually in the buffer, but pretend
- * that it is. When XLogInsert stores the whole buffer, the offsets array
- * need not be stored too.
+ * The tuple-offsets array is not actually in the buffer, but pretend that
+ * it is. When XLogInsert stores the whole buffer, the offsets array need
+ * not be stored too.
*/
if (offcnt > 0)
{
@@ -3853,7 +3862,7 @@ log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
* for writing the page to disk after calling this routine.
*
* Note: all current callers build pages in private memory and write them
- * directly to smgr, rather than using bufmgr. Therefore there is no need
+ * directly to smgr, rather than using bufmgr. Therefore there is no need
* to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
* the critical section.
*
@@ -3905,9 +3914,9 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
Page page;
OffsetNumber *offnum;
OffsetNumber *end;
- int nredirected;
- int ndead;
- int i;
+ int nredirected;
+ int ndead;
+ int i;
if (record->xl_info & XLR_BKP_BLOCK_1)
return;
@@ -3934,12 +3943,12 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
{
OffsetNumber fromoff = *offnum++;
OffsetNumber tooff = *offnum++;
- ItemId fromlp = PageGetItemId(page, fromoff);
+ ItemId fromlp = PageGetItemId(page, fromoff);
if (clean_move)
{
/* Physically move the "to" item to the "from" slot */
- ItemId tolp = PageGetItemId(page, tooff);
+ ItemId tolp = PageGetItemId(page, tooff);
HeapTupleHeader htup;
*fromlp = *tolp;
@@ -3962,7 +3971,7 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
for (i = 0; i < ndead; i++)
{
OffsetNumber off = *offnum++;
- ItemId lp = PageGetItemId(page, off);
+ ItemId lp = PageGetItemId(page, off);
ItemIdSetDead(lp);
}
@@ -3971,14 +3980,14 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
while (offnum < end)
{
OffsetNumber off = *offnum++;
- ItemId lp = PageGetItemId(page, off);
+ ItemId lp = PageGetItemId(page, off);
ItemIdSetUnused(lp);
}
/*
- * Finally, repair any fragmentation, and update the page's hint bit
- * about whether it has free pointers.
+ * Finally, repair any fragmentation, and update the page's hint bit about
+ * whether it has free pointers.
*/
PageRepairFragmentation(page);
@@ -4617,7 +4626,7 @@ heap_desc(StringInfo buf, uint8 xl_info, char *rec)
{
xl_heap_update *xlrec = (xl_heap_update *) rec;
- if (xl_info & XLOG_HEAP_INIT_PAGE) /* can this case happen? */
+ if (xl_info & XLOG_HEAP_INIT_PAGE) /* can this case happen? */
appendStringInfo(buf, "hot_update(init): ");
else
appendStringInfo(buf, "hot_update: ");
@@ -4724,7 +4733,7 @@ heap_sync(Relation rel)
/* toast heap, if any */
if (OidIsValid(rel->rd_rel->reltoastrelid))
{
- Relation toastrel;
+ Relation toastrel;
toastrel = heap_open(rel->rd_rel->reltoastrelid, AccessShareLock);
FlushRelationBuffers(toastrel);
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 9723241547f..067b23f24cc 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.3 2007/10/24 13:05:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.4 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,21 +22,21 @@
/* Local functions */
-static int heap_prune_chain(Relation relation, Buffer buffer,
- OffsetNumber rootoffnum,
- TransactionId OldestXmin,
- OffsetNumber *redirected, int *nredirected,
- OffsetNumber *nowdead, int *ndead,
- OffsetNumber *nowunused, int *nunused,
- bool redirect_move);
+static int heap_prune_chain(Relation relation, Buffer buffer,
+ OffsetNumber rootoffnum,
+ TransactionId OldestXmin,
+ OffsetNumber *redirected, int *nredirected,
+ OffsetNumber *nowdead, int *ndead,
+ OffsetNumber *nowunused, int *nunused,
+ bool redirect_move);
static void heap_prune_record_redirect(OffsetNumber *redirected,
- int *nredirected,
- OffsetNumber offnum,
- OffsetNumber rdoffnum);
+ int *nredirected,
+ OffsetNumber offnum,
+ OffsetNumber rdoffnum);
static void heap_prune_record_dead(OffsetNumber *nowdead, int *ndead,
- OffsetNumber offnum);
+ OffsetNumber offnum);
static void heap_prune_record_unused(OffsetNumber *nowunused, int *nunused,
- OffsetNumber offnum);
+ OffsetNumber offnum);
/*
@@ -70,16 +70,16 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
return;
/*
- * We prune when a previous UPDATE failed to find enough space on the
- * page for a new tuple version, or when free space falls below the
- * relation's fill-factor target (but not less than 10%).
+ * We prune when a previous UPDATE failed to find enough space on the page
+ * for a new tuple version, or when free space falls below the relation's
+ * fill-factor target (but not less than 10%).
*
- * Checking free space here is questionable since we aren't holding
- * any lock on the buffer; in the worst case we could get a bogus
- * answer. It's unlikely to be *seriously* wrong, though, since
- * reading either pd_lower or pd_upper is probably atomic. Avoiding
- * taking a lock seems better than sometimes getting a wrong answer
- * in what is after all just a heuristic estimate.
+ * Checking free space here is questionable since we aren't holding any
+ * lock on the buffer; in the worst case we could get a bogus answer.
+ * It's unlikely to be *seriously* wrong, though, since reading either
+ * pd_lower or pd_upper is probably atomic. Avoiding taking a lock seems
+ * better than sometimes getting a wrong answer in what is after all just
+ * a heuristic estimate.
*/
minfree = RelationGetTargetPageFreeSpace(relation,
HEAP_DEFAULT_FILLFACTOR);
@@ -93,9 +93,9 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
/*
* Now that we have buffer lock, get accurate information about the
- * page's free space, and recheck the heuristic about whether to prune.
- * (We needn't recheck PageIsPrunable, since no one else could have
- * pruned while we hold pin.)
+ * page's free space, and recheck the heuristic about whether to
+ * prune. (We needn't recheck PageIsPrunable, since no one else could
+ * have pruned while we hold pin.)
*/
if (PageIsFull(dp) || PageGetHeapFreeSpace((Page) dp) < minfree)
{
@@ -119,7 +119,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
*
* If redirect_move is set, we remove redirecting line pointers by
* updating the root line pointer to point directly to the first non-dead
- * tuple in the chain. NOTE: eliminating the redirect changes the first
+ * tuple in the chain. NOTE: eliminating the redirect changes the first
* tuple's effective CTID, and is therefore unsafe except within VACUUM FULL.
* The only reason we support this capability at all is that by using it,
* VACUUM FULL need not cope with LP_REDIRECT items at all; which seems a
@@ -136,18 +136,18 @@ int
heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
bool redirect_move, bool report_stats)
{
- int ndeleted = 0;
- Page page = BufferGetPage(buffer);
- OffsetNumber offnum,
- maxoff;
- OffsetNumber redirected[MaxHeapTuplesPerPage * 2];
- OffsetNumber nowdead[MaxHeapTuplesPerPage];
- OffsetNumber nowunused[MaxHeapTuplesPerPage];
- int nredirected = 0;
- int ndead = 0;
- int nunused = 0;
- bool page_was_full = false;
- TransactionId save_prune_xid;
+ int ndeleted = 0;
+ Page page = BufferGetPage(buffer);
+ OffsetNumber offnum,
+ maxoff;
+ OffsetNumber redirected[MaxHeapTuplesPerPage * 2];
+ OffsetNumber nowdead[MaxHeapTuplesPerPage];
+ OffsetNumber nowunused[MaxHeapTuplesPerPage];
+ int nredirected = 0;
+ int ndead = 0;
+ int nunused = 0;
+ bool page_was_full = false;
+ TransactionId save_prune_xid;
START_CRIT_SECTION();
@@ -159,7 +159,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
save_prune_xid = ((PageHeader) page)->pd_prune_xid;
PageClearPrunable(page);
- /*
+ /*
* Also clear the "page is full" flag if it is set, since there's no point
* in repeating the prune/defrag process until something else happens to
* the page.
@@ -176,7 +176,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
- ItemId itemid = PageGetItemId(page, offnum);
+ ItemId itemid = PageGetItemId(page, offnum);
/* Nothing to do if slot is empty or already dead */
if (!ItemIdIsUsed(itemid) || ItemIdIsDead(itemid))
@@ -233,9 +233,9 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
END_CRIT_SECTION();
/*
- * If requested, report the number of tuples reclaimed to pgstats.
- * This is ndeleted minus ndead, because we don't want to count a now-DEAD
- * root item as a deletion for this purpose.
+ * If requested, report the number of tuples reclaimed to pgstats. This is
+ * ndeleted minus ndead, because we don't want to count a now-DEAD root
+ * item as a deletion for this purpose.
*/
if (report_stats && ndeleted > ndead)
pgstat_update_heap_dead_tuples(relation, ndeleted - ndead);
@@ -243,19 +243,17 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
/*
* XXX Should we update the FSM information of this page ?
*
- * There are two schools of thought here. We may not want to update
- * FSM information so that the page is not used for unrelated
- * UPDATEs/INSERTs and any free space in this page will remain
- * available for further UPDATEs in *this* page, thus improving
- * chances for doing HOT updates.
+ * There are two schools of thought here. We may not want to update FSM
+ * information so that the page is not used for unrelated UPDATEs/INSERTs
+ * and any free space in this page will remain available for further
+ * UPDATEs in *this* page, thus improving chances for doing HOT updates.
*
- * But for a large table and where a page does not receive further
- * UPDATEs for a long time, we might waste this space by not
- * updating the FSM information. The relation may get extended and
- * fragmented further.
+ * But for a large table and where a page does not receive further UPDATEs
+ * for a long time, we might waste this space by not updating the FSM
+ * information. The relation may get extended and fragmented further.
*
- * One possibility is to leave "fillfactor" worth of space in this
- * page and update FSM with the remaining space.
+ * One possibility is to leave "fillfactor" worth of space in this page
+ * and update FSM with the remaining space.
*
* In any case, the current FSM implementation doesn't accept
* one-page-at-a-time updates, so this is all academic for now.
@@ -298,17 +296,17 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
OffsetNumber *nowunused, int *nunused,
bool redirect_move)
{
- int ndeleted = 0;
- Page dp = (Page) BufferGetPage(buffer);
- TransactionId priorXmax = InvalidTransactionId;
- ItemId rootlp;
- HeapTupleHeader htup;
- OffsetNumber latestdead = InvalidOffsetNumber,
- maxoff = PageGetMaxOffsetNumber(dp),
- offnum;
- OffsetNumber chainitems[MaxHeapTuplesPerPage];
- int nchain = 0,
- i;
+ int ndeleted = 0;
+ Page dp = (Page) BufferGetPage(buffer);
+ TransactionId priorXmax = InvalidTransactionId;
+ ItemId rootlp;
+ HeapTupleHeader htup;
+ OffsetNumber latestdead = InvalidOffsetNumber,
+ maxoff = PageGetMaxOffsetNumber(dp),
+ offnum;
+ OffsetNumber chainitems[MaxHeapTuplesPerPage];
+ int nchain = 0,
+ i;
rootlp = PageGetItemId(dp, rootoffnum);
@@ -321,14 +319,14 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
if (HeapTupleHeaderIsHeapOnly(htup))
{
/*
- * If the tuple is DEAD and doesn't chain to anything else, mark it
- * unused immediately. (If it does chain, we can only remove it as
- * part of pruning its chain.)
+ * If the tuple is DEAD and doesn't chain to anything else, mark
+ * it unused immediately. (If it does chain, we can only remove
+ * it as part of pruning its chain.)
*
* We need this primarily to handle aborted HOT updates, that is,
- * XMIN_INVALID heap-only tuples. Those might not be linked to
- * by any chain, since the parent tuple might be re-updated before
- * any pruning occurs. So we have to be able to reap them
+ * XMIN_INVALID heap-only tuples. Those might not be linked to by
+ * any chain, since the parent tuple might be re-updated before
+ * any pruning occurs. So we have to be able to reap them
* separately from chain-pruning.
*
* Note that we might first arrive at a dead heap-only tuple
@@ -354,9 +352,9 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/* while not end of the chain */
for (;;)
{
- ItemId lp;
- bool tupdead,
- recent_dead;
+ ItemId lp;
+ bool tupdead,
+ recent_dead;
/* Some sanity checks */
if (offnum < FirstOffsetNumber || offnum > maxoff)
@@ -368,9 +366,9 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
break;
/*
- * If we are looking at the redirected root line pointer,
- * jump to the first normal tuple in the chain. If we find
- * a redirect somewhere else, stop --- it must not be same chain.
+ * If we are looking at the redirected root line pointer, jump to the
+ * first normal tuple in the chain. If we find a redirect somewhere
+ * else, stop --- it must not be same chain.
*/
if (ItemIdIsRedirected(lp))
{
@@ -382,9 +380,9 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
}
/*
- * Likewise, a dead item pointer can't be part of the chain.
- * (We already eliminated the case of dead root tuple outside
- * this function.)
+ * Likewise, a dead item pointer can't be part of the chain. (We
+ * already eliminated the case of dead root tuple outside this
+ * function.)
*/
if (ItemIdIsDead(lp))
break;
@@ -417,6 +415,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
case HEAPTUPLE_RECENTLY_DEAD:
recent_dead = true;
+
/*
* This tuple may soon become DEAD. Update the hint field so
* that the page is reconsidered for pruning in future.
@@ -425,6 +424,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
+
/*
* This tuple may soon become DEAD. Update the hint field so
* that the page is reconsidered for pruning in future.
@@ -434,11 +434,12 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
case HEAPTUPLE_LIVE:
case HEAPTUPLE_INSERT_IN_PROGRESS:
+
/*
* If we wanted to optimize for aborts, we might consider
* marking the page prunable when we see INSERT_IN_PROGRESS.
- * But we don't. See related decisions about when to mark
- * the page prunable in heapam.c.
+ * But we don't. See related decisions about when to mark the
+ * page prunable in heapam.c.
*/
break;
@@ -486,12 +487,12 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
* Mark as unused each intermediate item that we are able to remove
* from the chain.
*
- * When the previous item is the last dead tuple seen, we are at
- * the right candidate for redirection.
+ * When the previous item is the last dead tuple seen, we are at the
+ * right candidate for redirection.
*/
for (i = 1; (i < nchain) && (chainitems[i - 1] != latestdead); i++)
{
- ItemId lp = PageGetItemId(dp, chainitems[i]);
+ ItemId lp = PageGetItemId(dp, chainitems[i]);
ItemIdSetUnused(lp);
heap_prune_record_unused(nowunused, nunused, chainitems[i]);
@@ -499,17 +500,17 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
}
/*
- * If the root entry had been a normal tuple, we are deleting it,
- * so count it in the result. But changing a redirect (even to
- * DEAD state) doesn't count.
+ * If the root entry had been a normal tuple, we are deleting it, so
+ * count it in the result. But changing a redirect (even to DEAD
+ * state) doesn't count.
*/
if (ItemIdIsNormal(rootlp))
ndeleted++;
/*
* If the DEAD tuple is at the end of the chain, the entire chain is
- * dead and the root line pointer can be marked dead. Otherwise
- * just redirect the root to the correct chain member.
+ * dead and the root line pointer can be marked dead. Otherwise just
+ * redirect the root to the correct chain member.
*/
if (i >= nchain)
{
@@ -528,25 +529,25 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
{
/*
* We found a redirect item that doesn't point to a valid follow-on
- * item. This can happen if the loop in heap_page_prune caused us
- * to visit the dead successor of a redirect item before visiting
- * the redirect item. We can clean up by setting the redirect item
- * to DEAD state.
+ * item. This can happen if the loop in heap_page_prune caused us to
+ * visit the dead successor of a redirect item before visiting the
+ * redirect item. We can clean up by setting the redirect item to
+ * DEAD state.
*/
ItemIdSetDead(rootlp);
heap_prune_record_dead(nowdead, ndead, rootoffnum);
}
/*
- * If requested, eliminate LP_REDIRECT items by moving tuples. Note that
+ * If requested, eliminate LP_REDIRECT items by moving tuples. Note that
* if the root item is LP_REDIRECT and doesn't point to a valid follow-on
* item, we already killed it above.
*/
if (redirect_move && ItemIdIsRedirected(rootlp))
{
OffsetNumber firstoffnum = ItemIdGetRedirect(rootlp);
- ItemId firstlp = PageGetItemId(dp, firstoffnum);
- HeapTupleData firsttup;
+ ItemId firstlp = PageGetItemId(dp, firstoffnum);
+ HeapTupleData firsttup;
Assert(ItemIdIsNormal(firstlp));
/* Set up firsttup to reference the tuple at its existing CTID */
@@ -558,15 +559,15 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
firsttup.t_tableOid = RelationGetRelid(relation);
/*
- * Mark the tuple for invalidation. Needed because we're changing
- * its CTID.
+ * Mark the tuple for invalidation. Needed because we're changing its
+ * CTID.
*/
CacheInvalidateHeapTuple(relation, &firsttup);
/*
- * Change heap-only status of the tuple because after the line
- * pointer manipulation, it's no longer a heap-only tuple, but is
- * directly pointed to by index entries.
+ * Change heap-only status of the tuple because after the line pointer
+ * manipulation, it's no longer a heap-only tuple, but is directly
+ * pointed to by index entries.
*/
Assert(HeapTupleIsHeapOnly(&firsttup));
HeapTupleClearHeapOnly(&firsttup);
@@ -594,7 +595,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/* Record newly-redirected item pointer */
static void
heap_prune_record_redirect(OffsetNumber *redirected, int *nredirected,
- OffsetNumber offnum, OffsetNumber rdoffnum)
+ OffsetNumber offnum, OffsetNumber rdoffnum)
{
Assert(*nredirected < MaxHeapTuplesPerPage);
redirected[*nredirected * 2] = offnum;
@@ -641,17 +642,18 @@ heap_prune_record_unused(OffsetNumber *nowunused, int *nunused,
void
heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
{
- OffsetNumber offnum, maxoff;
+ OffsetNumber offnum,
+ maxoff;
MemSet(root_offsets, 0, MaxHeapTuplesPerPage * sizeof(OffsetNumber));
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum++)
{
- ItemId lp = PageGetItemId(page, offnum);
- HeapTupleHeader htup;
- OffsetNumber nextoffnum;
- TransactionId priorXmax;
+ ItemId lp = PageGetItemId(page, offnum);
+ HeapTupleHeader htup;
+ OffsetNumber nextoffnum;
+ TransactionId priorXmax;
/* skip unused and dead items */
if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index e8c5eec50ac..20c5938ff27 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -10,7 +10,7 @@
*
* The caller is responsible for creating the new heap, all catalog
* changes, supplying the tuples to be written to the new heap, and
- * rebuilding indexes. The caller must hold AccessExclusiveLock on the
+ * rebuilding indexes. The caller must hold AccessExclusiveLock on the
* target table, because we assume no one else is writing into it.
*
* To use the facility:
@@ -18,13 +18,13 @@
* begin_heap_rewrite
* while (fetch next tuple)
* {
- * if (tuple is dead)
- * rewrite_heap_dead_tuple
- * else
- * {
- * // do any transformations here if required
- * rewrite_heap_tuple
- * }
+ * if (tuple is dead)
+ * rewrite_heap_dead_tuple
+ * else
+ * {
+ * // do any transformations here if required
+ * rewrite_heap_tuple
+ * }
* }
* end_heap_rewrite
*
@@ -43,7 +43,7 @@
* to substitute the correct ctid instead.
*
* For each ctid reference from A -> B, we might encounter either A first
- * or B first. (Note that a tuple in the middle of a chain is both A and B
+ * or B first. (Note that a tuple in the middle of a chain is both A and B
* of different pairs.)
*
* If we encounter A first, we'll store the tuple in the unresolved_tups
@@ -58,11 +58,11 @@
* and can write A immediately with the correct ctid.
*
* Entries in the hash tables can be removed as soon as the later tuple
- * is encountered. That helps to keep the memory usage down. At the end,
+ * is encountered. That helps to keep the memory usage down. At the end,
* both tables are usually empty; we should have encountered both A and B
* of each pair. However, it's possible for A to be RECENTLY_DEAD and B
* entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
- * for deadness using OldestXmin is not exact. In such a case we might
+ * for deadness using OldestXmin is not exact. In such a case we might
* encounter B first, and skip it, and find A later. Then A would be added
* to unresolved_tups, and stay there until end of the rewrite. Since
* this case is very unusual, we don't worry about the memory usage.
@@ -78,7 +78,7 @@
* of CLUSTERing on an unchanging key column, we'll see all the versions
* of a given tuple together anyway, and so the peak memory usage is only
* proportional to the number of RECENTLY_DEAD versions of a single row, not
- * in the whole table. Note that if we do fail halfway through a CLUSTER,
+ * in the whole table. Note that if we do fail halfway through a CLUSTER,
* the old table is still valid, so failure is not catastrophic.
*
* We can't use the normal heap_insert function to insert into the new
@@ -96,7 +96,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.7 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.8 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -116,20 +116,20 @@
*/
typedef struct RewriteStateData
{
- Relation rs_new_rel; /* destination heap */
- Page rs_buffer; /* page currently being built */
- BlockNumber rs_blockno; /* block where page will go */
- bool rs_buffer_valid; /* T if any tuples in buffer */
- bool rs_use_wal; /* must we WAL-log inserts? */
- TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
+ Relation rs_new_rel; /* destination heap */
+ Page rs_buffer; /* page currently being built */
+ BlockNumber rs_blockno; /* block where page will go */
+ bool rs_buffer_valid; /* T if any tuples in buffer */
+ bool rs_use_wal; /* must we WAL-log inserts? */
+ TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
* determine tuple visibility */
- TransactionId rs_freeze_xid; /* Xid that will be used as freeze
- * cutoff point */
- MemoryContext rs_cxt; /* for hash tables and entries and
- * tuples in them */
- HTAB *rs_unresolved_tups; /* unmatched A tuples */
- HTAB *rs_old_new_tid_map; /* unmatched B tuples */
-} RewriteStateData;
+ TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
+ * point */
+ MemoryContext rs_cxt; /* for hash tables and entries and tuples in
+ * them */
+ HTAB *rs_unresolved_tups; /* unmatched A tuples */
+ HTAB *rs_old_new_tid_map; /* unmatched B tuples */
+} RewriteStateData;
/*
* The lookup keys for the hash tables are tuple TID and xmin (we must check
@@ -139,27 +139,27 @@ typedef struct RewriteStateData
*/
typedef struct
{
- TransactionId xmin; /* tuple xmin */
+ TransactionId xmin; /* tuple xmin */
ItemPointerData tid; /* tuple location in old heap */
-} TidHashKey;
+} TidHashKey;
/*
* Entry structures for the hash tables
*/
typedef struct
{
- TidHashKey key; /* expected xmin/old location of B tuple */
+ TidHashKey key; /* expected xmin/old location of B tuple */
ItemPointerData old_tid; /* A's location in the old heap */
- HeapTuple tuple; /* A's tuple contents */
-} UnresolvedTupData;
+ HeapTuple tuple; /* A's tuple contents */
+} UnresolvedTupData;
typedef UnresolvedTupData *UnresolvedTup;
typedef struct
{
- TidHashKey key; /* actual xmin/old location of B tuple */
+ TidHashKey key; /* actual xmin/old location of B tuple */
ItemPointerData new_tid; /* where we put it in the new heap */
-} OldToNewMappingData;
+} OldToNewMappingData;
typedef OldToNewMappingData *OldToNewMapping;
@@ -189,8 +189,8 @@ begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
HASHCTL hash_ctl;
/*
- * To ease cleanup, make a separate context that will contain
- * the RewriteState struct itself plus all subsidiary data.
+ * To ease cleanup, make a separate context that will contain the
+ * RewriteState struct itself plus all subsidiary data.
*/
rw_cxt = AllocSetContextCreate(CurrentMemoryContext,
"Table rewrite",
@@ -221,7 +221,7 @@ begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
state->rs_unresolved_tups =
hash_create("Rewrite / Unresolved ctids",
- 128, /* arbitrary initial size */
+ 128, /* arbitrary initial size */
&hash_ctl,
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
@@ -229,7 +229,7 @@ begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
state->rs_old_new_tid_map =
hash_create("Rewrite / Old to new tid map",
- 128, /* arbitrary initial size */
+ 128, /* arbitrary initial size */
&hash_ctl,
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
@@ -250,8 +250,8 @@ end_heap_rewrite(RewriteState state)
UnresolvedTup unresolved;
/*
- * Write any remaining tuples in the UnresolvedTups table. If we have
- * any left, they should in fact be dead, but let's err on the safe side.
+ * Write any remaining tuples in the UnresolvedTups table. If we have any
+ * left, they should in fact be dead, but let's err on the safe side.
*
* XXX this really is a waste of code no?
*/
@@ -276,15 +276,15 @@ end_heap_rewrite(RewriteState state)
}
/*
- * If the rel isn't temp, must fsync before commit. We use heap_sync
- * to ensure that the toast table gets fsync'd too.
+ * If the rel isn't temp, must fsync before commit. We use heap_sync to
+ * ensure that the toast table gets fsync'd too.
*
* It's obvious that we must do this when not WAL-logging. It's less
- * obvious that we have to do it even if we did WAL-log the pages.
- * The reason is the same as in tablecmds.c's copy_relation_data():
- * we're writing data that's not in shared buffers, and so a CHECKPOINT
- * occurring during the rewriteheap operation won't have fsync'd data
- * we wrote before the checkpoint.
+ * obvious that we have to do it even if we did WAL-log the pages. The
+ * reason is the same as in tablecmds.c's copy_relation_data(): we're
+ * writing data that's not in shared buffers, and so a CHECKPOINT
+ * occurring during the rewriteheap operation won't have fsync'd data we
+ * wrote before the checkpoint.
*/
if (!state->rs_new_rel->rd_istemp)
heap_sync(state->rs_new_rel);
@@ -310,17 +310,17 @@ rewrite_heap_tuple(RewriteState state,
{
MemoryContext old_cxt;
ItemPointerData old_tid;
- TidHashKey hashkey;
- bool found;
- bool free_new;
+ TidHashKey hashkey;
+ bool found;
+ bool free_new;
old_cxt = MemoryContextSwitchTo(state->rs_cxt);
/*
* Copy the original tuple's visibility information into new_tuple.
*
- * XXX we might later need to copy some t_infomask2 bits, too?
- * Right now, we intentionally clear the HOT status bits.
+ * XXX we might later need to copy some t_infomask2 bits, too? Right now,
+ * we intentionally clear the HOT status bits.
*/
memcpy(&new_tuple->t_data->t_choice.t_heap,
&old_tuple->t_data->t_choice.t_heap,
@@ -335,16 +335,16 @@ rewrite_heap_tuple(RewriteState state,
* While we have our hands on the tuple, we may as well freeze any
* very-old xmin or xmax, so that future VACUUM effort can be saved.
*
- * Note we abuse heap_freeze_tuple() a bit here, since it's expecting
- * to be given a pointer to a tuple in a disk buffer. It happens
- * though that we can get the right things to happen by passing
- * InvalidBuffer for the buffer.
+ * Note we abuse heap_freeze_tuple() a bit here, since it's expecting to
+ * be given a pointer to a tuple in a disk buffer. It happens though that
+ * we can get the right things to happen by passing InvalidBuffer for the
+ * buffer.
*/
heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, InvalidBuffer);
/*
- * Invalid ctid means that ctid should point to the tuple itself.
- * We'll override it later if the tuple is part of an update chain.
+ * Invalid ctid means that ctid should point to the tuple itself. We'll
+ * override it later if the tuple is part of an update chain.
*/
ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
@@ -369,9 +369,9 @@ rewrite_heap_tuple(RewriteState state,
if (mapping != NULL)
{
/*
- * We've already copied the tuple that t_ctid points to, so we
- * can set the ctid of this tuple to point to the new location,
- * and insert it right away.
+ * We've already copied the tuple that t_ctid points to, so we can
+ * set the ctid of this tuple to point to the new location, and
+ * insert it right away.
*/
new_tuple->t_data->t_ctid = mapping->new_tid;
@@ -405,10 +405,10 @@ rewrite_heap_tuple(RewriteState state,
}
/*
- * Now we will write the tuple, and then check to see if it is the
- * B tuple in any new or known pair. When we resolve a known pair,
- * we will be able to write that pair's A tuple, and then we have to
- * check if it resolves some other pair. Hence, we need a loop here.
+ * Now we will write the tuple, and then check to see if it is the B tuple
+ * in any new or known pair. When we resolve a known pair, we will be
+ * able to write that pair's A tuple, and then we have to check if it
+ * resolves some other pair. Hence, we need a loop here.
*/
old_tid = old_tuple->t_self;
free_new = false;
@@ -422,13 +422,12 @@ rewrite_heap_tuple(RewriteState state,
new_tid = new_tuple->t_self;
/*
- * If the tuple is the updated version of a row, and the prior
- * version wouldn't be DEAD yet, then we need to either resolve
- * the prior version (if it's waiting in rs_unresolved_tups),
- * or make an entry in rs_old_new_tid_map (so we can resolve it
- * when we do see it). The previous tuple's xmax would equal this
- * one's xmin, so it's RECENTLY_DEAD if and only if the xmin is
- * not before OldestXmin.
+ * If the tuple is the updated version of a row, and the prior version
+ * wouldn't be DEAD yet, then we need to either resolve the prior
+ * version (if it's waiting in rs_unresolved_tups), or make an entry
+ * in rs_old_new_tid_map (so we can resolve it when we do see it).
+ * The previous tuple's xmax would equal this one's xmin, so it's
+ * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
*/
if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
!TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data),
@@ -449,9 +448,9 @@ rewrite_heap_tuple(RewriteState state,
if (unresolved != NULL)
{
/*
- * We have seen and memorized the previous tuple already.
- * Now that we know where we inserted the tuple its t_ctid
- * points to, fix its t_ctid and insert it to the new heap.
+ * We have seen and memorized the previous tuple already. Now
+ * that we know where we inserted the tuple its t_ctid points
+ * to, fix its t_ctid and insert it to the new heap.
*/
if (free_new)
heap_freetuple(new_tuple);
@@ -461,8 +460,8 @@ rewrite_heap_tuple(RewriteState state,
new_tuple->t_data->t_ctid = new_tid;
/*
- * We don't need the hash entry anymore, but don't free
- * its tuple just yet.
+ * We don't need the hash entry anymore, but don't free its
+ * tuple just yet.
*/
hash_search(state->rs_unresolved_tups, &hashkey,
HASH_REMOVE, &found);
@@ -474,8 +473,8 @@ rewrite_heap_tuple(RewriteState state,
else
{
/*
- * Remember the new tid of this tuple. We'll use it to set
- * the ctid when we find the previous tuple in the chain.
+ * Remember the new tid of this tuple. We'll use it to set the
+ * ctid when we find the previous tuple in the chain.
*/
OldToNewMapping mapping;
@@ -506,22 +505,22 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
{
/*
* If we have already seen an earlier tuple in the update chain that
- * points to this tuple, let's forget about that earlier tuple. It's
- * in fact dead as well, our simple xmax < OldestXmin test in
- * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It
- * happens when xmin of a tuple is greater than xmax, which sounds
+ * points to this tuple, let's forget about that earlier tuple. It's in
+ * fact dead as well, our simple xmax < OldestXmin test in
+ * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
+ * when xmin of a tuple is greater than xmax, which sounds
* counter-intuitive but is perfectly valid.
*
- * We don't bother to try to detect the situation the other way
- * round, when we encounter the dead tuple first and then the
- * recently dead one that points to it. If that happens, we'll
- * have some unmatched entries in the UnresolvedTups hash table
- * at the end. That can happen anyway, because a vacuum might
- * have removed the dead tuple in the chain before us.
+ * We don't bother to try to detect the situation the other way round,
+ * when we encounter the dead tuple first and then the recently dead one
+ * that points to it. If that happens, we'll have some unmatched entries
+ * in the UnresolvedTups hash table at the end. That can happen anyway,
+ * because a vacuum might have removed the dead tuple in the chain before
+ * us.
*/
UnresolvedTup unresolved;
- TidHashKey hashkey;
- bool found;
+ TidHashKey hashkey;
+ bool found;
memset(&hashkey, 0, sizeof(hashkey));
hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
@@ -541,7 +540,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
}
/*
- * Insert a tuple to the new relation. This has to track heap_insert
+ * Insert a tuple to the new relation. This has to track heap_insert
* and its subsidiary functions!
*
* t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
@@ -551,11 +550,12 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
static void
raw_heap_insert(RewriteState state, HeapTuple tup)
{
- Page page = state->rs_buffer;
- Size pageFreeSpace, saveFreeSpace;
- Size len;
- OffsetNumber newoff;
- HeapTuple heaptup;
+ Page page = state->rs_buffer;
+ Size pageFreeSpace,
+ saveFreeSpace;
+ Size len;
+ OffsetNumber newoff;
+ HeapTuple heaptup;
/*
* If the new tuple is too big for storage or contains already toasted
@@ -610,7 +610,8 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
/*
* Now write the page. We say isTemp = true even if it's not a
* temp table, because there's no need for smgr to schedule an
- * fsync for this write; we'll do it ourselves in end_heap_rewrite.
+ * fsync for this write; we'll do it ourselves in
+ * end_heap_rewrite.
*/
RelationOpenSmgr(state->rs_new_rel);
smgrextend(state->rs_new_rel->rd_smgr, state->rs_blockno,
@@ -638,12 +639,12 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);
/*
- * Insert the correct position into CTID of the stored tuple, too,
- * if the caller didn't supply a valid CTID.
+ * Insert the correct position into CTID of the stored tuple, too, if the
+ * caller didn't supply a valid CTID.
*/
- if(!ItemPointerIsValid(&tup->t_data->t_ctid))
+ if (!ItemPointerIsValid(&tup->t_data->t_ctid))
{
- ItemId newitemid;
+ ItemId newitemid;
HeapTupleHeader onpage_tup;
newitemid = PageGetItemId(page, newoff);
diff --git a/src/backend/access/heap/syncscan.c b/src/backend/access/heap/syncscan.c
index 795efccc090..7b0653c9baa 100644
--- a/src/backend/access/heap/syncscan.c
+++ b/src/backend/access/heap/syncscan.c
@@ -4,7 +4,7 @@
* heap scan synchronization support
*
* When multiple backends run a sequential scan on the same table, we try
- * to keep them synchronized to reduce the overall I/O needed. The goal is
+ * to keep them synchronized to reduce the overall I/O needed. The goal is
* to read each page into shared buffer cache only once, and let all backends
* that take part in the shared scan process the page before it falls out of
* the cache.
@@ -26,7 +26,7 @@
* don't want such queries to slow down others.
*
* There can realistically only be a few large sequential scans on different
- * tables in progress at any time. Therefore we just keep the scan positions
+ * tables in progress at any time. Therefore we just keep the scan positions
* in a small LRU list which we scan every time we need to look up or update a
* scan position. The whole mechanism is only applied for tables exceeding
* a threshold size (but that is not the concern of this module).
@@ -40,7 +40,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/syncscan.c,v 1.1 2007/06/08 18:23:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/syncscan.c,v 1.2 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,7 +52,7 @@
/* GUC variables */
#ifdef TRACE_SYNCSCAN
-bool trace_syncscan = false;
+bool trace_syncscan = false;
#endif
@@ -89,21 +89,21 @@ typedef struct ss_scan_location_t
{
RelFileNode relfilenode; /* identity of a relation */
BlockNumber location; /* last-reported location in the relation */
-} ss_scan_location_t;
+} ss_scan_location_t;
typedef struct ss_lru_item_t
{
- struct ss_lru_item_t *prev;
- struct ss_lru_item_t *next;
- ss_scan_location_t location;
-} ss_lru_item_t;
+ struct ss_lru_item_t *prev;
+ struct ss_lru_item_t *next;
+ ss_scan_location_t location;
+} ss_lru_item_t;
typedef struct ss_scan_locations_t
{
- ss_lru_item_t *head;
- ss_lru_item_t *tail;
- ss_lru_item_t items[1]; /* SYNC_SCAN_NELEM items */
-} ss_scan_locations_t;
+ ss_lru_item_t *head;
+ ss_lru_item_t *tail;
+ ss_lru_item_t items[1]; /* SYNC_SCAN_NELEM items */
+} ss_scan_locations_t;
#define SizeOfScanLocations(N) offsetof(ss_scan_locations_t, items[N])
@@ -112,7 +112,7 @@ static ss_scan_locations_t *scan_locations;
/* prototypes for internal functions */
static BlockNumber ss_search(RelFileNode relfilenode,
- BlockNumber location, bool set);
+ BlockNumber location, bool set);
/*
@@ -130,8 +130,8 @@ SyncScanShmemSize(void)
void
SyncScanShmemInit(void)
{
- int i;
- bool found;
+ int i;
+ bool found;
scan_locations = (ss_scan_locations_t *)
ShmemInitStruct("Sync Scan Locations List",
@@ -186,20 +186,20 @@ SyncScanShmemInit(void)
static BlockNumber
ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
{
- ss_lru_item_t *item;
+ ss_lru_item_t *item;
item = scan_locations->head;
for (;;)
{
- bool match;
+ bool match;
match = RelFileNodeEquals(item->location.relfilenode, relfilenode);
if (match || item->next == NULL)
{
/*
- * If we reached the end of list and no match was found,
- * take over the last entry
+ * If we reached the end of list and no match was found, take over
+ * the last entry
*/
if (!match)
{
@@ -242,7 +242,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
* relation, or 0 if no valid location is found.
*
* We expect the caller has just done RelationGetNumberOfBlocks(), and
- * so that number is passed in rather than computing it again. The result
+ * so that number is passed in rather than computing it again. The result
* is guaranteed less than relnblocks (assuming that's > 0).
*/
BlockNumber
@@ -257,8 +257,8 @@ ss_get_location(Relation rel, BlockNumber relnblocks)
/*
* If the location is not a valid block number for this scan, start at 0.
*
- * This can happen if for instance a VACUUM truncated the table
- * since the location was saved.
+ * This can happen if for instance a VACUUM truncated the table since the
+ * location was saved.
*/
if (startloc >= relnblocks)
startloc = 0;
@@ -294,12 +294,12 @@ ss_report_location(Relation rel, BlockNumber location)
#endif
/*
- * To reduce lock contention, only report scan progress every N pages.
- * For the same reason, don't block if the lock isn't immediately
- * available. Missing a few updates isn't critical, it just means that a
- * new scan that wants to join the pack will start a little bit behind the
- * head of the scan. Hopefully the pages are still in OS cache and the
- * scan catches up quickly.
+ * To reduce lock contention, only report scan progress every N pages. For
+ * the same reason, don't block if the lock isn't immediately available.
+ * Missing a few updates isn't critical, it just means that a new scan
+ * that wants to join the pack will start a little bit behind the head of
+ * the scan. Hopefully the pages are still in OS cache and the scan
+ * catches up quickly.
*/
if ((location % SYNC_SCAN_REPORT_INTERVAL) == 0)
{
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 4f62b1f8598..0a8873f9945 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.78 2007/10/11 18:19:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.79 2007/11/15 21:14:32 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -72,9 +72,9 @@ do { \
static void toast_delete_datum(Relation rel, Datum value);
static Datum toast_save_datum(Relation rel, Datum value,
- bool use_wal, bool use_fsm);
-static struct varlena *toast_fetch_datum(struct varlena *attr);
-static struct varlena *toast_fetch_datum_slice(struct varlena *attr,
+ bool use_wal, bool use_fsm);
+static struct varlena *toast_fetch_datum(struct varlena * attr);
+static struct varlena *toast_fetch_datum_slice(struct varlena * attr,
int32 sliceoffset, int32 length);
@@ -90,9 +90,9 @@ static struct varlena *toast_fetch_datum_slice(struct varlena *attr,
----------
*/
struct varlena *
-heap_tuple_fetch_attr(struct varlena *attr)
+heap_tuple_fetch_attr(struct varlena * attr)
{
- struct varlena *result;
+ struct varlena *result;
if (VARATT_IS_EXTERNAL(attr))
{
@@ -121,7 +121,7 @@ heap_tuple_fetch_attr(struct varlena *attr)
* ----------
*/
struct varlena *
-heap_tuple_untoast_attr(struct varlena *attr)
+heap_tuple_untoast_attr(struct varlena * attr)
{
if (VARATT_IS_EXTERNAL(attr))
{
@@ -156,8 +156,8 @@ heap_tuple_untoast_attr(struct varlena *attr)
/*
* This is a short-header varlena --- convert to 4-byte header format
*/
- Size data_size = VARSIZE_SHORT(attr) - VARHDRSZ_SHORT;
- Size new_size = data_size + VARHDRSZ;
+ Size data_size = VARSIZE_SHORT(attr) - VARHDRSZ_SHORT;
+ Size new_size = data_size + VARHDRSZ;
struct varlena *new_attr;
new_attr = (struct varlena *) palloc(new_size);
@@ -178,12 +178,12 @@ heap_tuple_untoast_attr(struct varlena *attr)
* ----------
*/
struct varlena *
-heap_tuple_untoast_attr_slice(struct varlena *attr,
+heap_tuple_untoast_attr_slice(struct varlena * attr,
int32 sliceoffset, int32 slicelength)
{
struct varlena *preslice;
struct varlena *result;
- char *attrdata;
+ char *attrdata;
int32 attrsize;
if (VARATT_IS_EXTERNAL(attr))
@@ -205,7 +205,7 @@ heap_tuple_untoast_attr_slice(struct varlena *attr,
if (VARATT_IS_COMPRESSED(preslice))
{
PGLZ_Header *tmp = (PGLZ_Header *) preslice;
- Size size = PGLZ_RAW_SIZE(tmp) + VARHDRSZ;
+ Size size = PGLZ_RAW_SIZE(tmp) + VARHDRSZ;
preslice = (struct varlena *) palloc(size);
SET_VARSIZE(preslice, size);
@@ -300,7 +300,7 @@ toast_raw_datum_size(Datum value)
Size
toast_datum_size(Datum value)
{
- struct varlena *attr = (struct varlena *) DatumGetPointer(value);
+ struct varlena *attr = (struct varlena *) DatumGetPointer(value);
Size result;
if (VARATT_IS_EXTERNAL(attr))
@@ -469,8 +469,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
for (i = 0; i < numAttrs; i++)
{
- struct varlena *old_value;
- struct varlena *new_value;
+ struct varlena *old_value;
+ struct varlena *new_value;
if (oldtup != NULL)
{
@@ -488,7 +488,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
VARATT_IS_EXTERNAL(old_value))
{
if (toast_isnull[i] || !VARATT_IS_EXTERNAL(new_value) ||
- memcmp((char *) old_value, (char *) new_value,
+ memcmp((char *) old_value, (char *) new_value,
VARSIZE_EXTERNAL(old_value)) != 0)
{
/*
@@ -543,7 +543,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Fetch it back (without decompression, unless we are forcing
- * PLAIN storage). If necessary, we'll push it out as a new
+ * PLAIN storage). If necessary, we'll push it out as a new
* external value below.
*/
if (VARATT_IS_EXTERNAL(new_value))
@@ -656,7 +656,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
/*
* Second we look for attributes of attstorage 'x' or 'e' that are still
- * inline. But skip this if there's no toast table to push them to.
+ * inline. But skip this if there's no toast table to push them to.
*/
while (heap_compute_data_size(tupleDesc,
toast_values, toast_isnull) > maxDataLen &&
@@ -956,7 +956,7 @@ toast_flatten_tuple_attribute(Datum value,
has_nulls = true;
else if (att[i]->attlen == -1)
{
- struct varlena *new_value;
+ struct varlena *new_value;
new_value = (struct varlena *) DatumGetPointer(toast_values[i]);
if (VARATT_IS_EXTERNAL(new_value) ||
@@ -1046,7 +1046,8 @@ toast_compress_datum(Datum value)
Assert(!VARATT_IS_COMPRESSED(value));
/*
- * No point in wasting a palloc cycle if value is too short for compression
+ * No point in wasting a palloc cycle if value is too short for
+ * compression
*/
if (valsize < PGLZ_strategy_default->min_input_size)
return PointerGetDatum(NULL);
@@ -1110,8 +1111,8 @@ toast_save_datum(Relation rel, Datum value,
/*
* Get the data pointer and length, and compute va_rawsize and va_extsize.
*
- * va_rawsize is the size of the equivalent fully uncompressed datum,
- * so we have to adjust for short headers.
+ * va_rawsize is the size of the equivalent fully uncompressed datum, so
+ * we have to adjust for short headers.
*
* va_extsize is the actual size of the data payload in the toast records.
*/
@@ -1119,7 +1120,7 @@ toast_save_datum(Relation rel, Datum value,
{
data_p = VARDATA_SHORT(value);
data_todo = VARSIZE_SHORT(value) - VARHDRSZ_SHORT;
- toast_pointer.va_rawsize = data_todo + VARHDRSZ; /* as if not short */
+ toast_pointer.va_rawsize = data_todo + VARHDRSZ; /* as if not short */
toast_pointer.va_extsize = data_todo;
}
else if (VARATT_IS_COMPRESSED(value))
@@ -1283,7 +1284,7 @@ toast_delete_datum(Relation rel, Datum value)
* ----------
*/
static struct varlena *
-toast_fetch_datum(struct varlena *attr)
+toast_fetch_datum(struct varlena * attr)
{
Relation toastrel;
Relation toastidx;
@@ -1299,7 +1300,7 @@ toast_fetch_datum(struct varlena *attr)
int32 numchunks;
Pointer chunk;
bool isnull;
- char *chunkdata;
+ char *chunkdata;
int32 chunksize;
/* Must copy to access aligned fields */
@@ -1365,7 +1366,7 @@ toast_fetch_datum(struct varlena *attr)
{
/* should never happen */
elog(ERROR, "found toasted toast chunk");
- chunksize = 0; /* keep compiler quiet */
+ chunksize = 0; /* keep compiler quiet */
chunkdata = NULL;
}
@@ -1384,12 +1385,12 @@ toast_fetch_datum(struct varlena *attr)
residx, numchunks,
toast_pointer.va_valueid);
}
- else if (residx == numchunks-1)
+ else if (residx == numchunks - 1)
{
if ((residx * TOAST_MAX_CHUNK_SIZE + chunksize) != ressize)
elog(ERROR, "unexpected chunk size %d (expected %d) in final chunk %d for toast value %u",
chunksize,
- (int) (ressize - residx*TOAST_MAX_CHUNK_SIZE),
+ (int) (ressize - residx * TOAST_MAX_CHUNK_SIZE),
residx,
toast_pointer.va_valueid);
}
@@ -1397,7 +1398,7 @@ toast_fetch_datum(struct varlena *attr)
elog(ERROR, "unexpected chunk number %d for toast value %u (out of range %d..%d)",
residx,
toast_pointer.va_valueid,
- 0, numchunks-1);
+ 0, numchunks - 1);
/*
* Copy the data into proper place in our result
@@ -1435,7 +1436,7 @@ toast_fetch_datum(struct varlena *attr)
* ----------
*/
static struct varlena *
-toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
+toast_fetch_datum_slice(struct varlena * attr, int32 sliceoffset, int32 length)
{
Relation toastrel;
Relation toastidx;
@@ -1457,7 +1458,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
int totalchunks;
Pointer chunk;
bool isnull;
- char *chunkdata;
+ char *chunkdata;
int32 chunksize;
int32 chcpystrt;
int32 chcpyend;
@@ -1574,7 +1575,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
{
/* should never happen */
elog(ERROR, "found toasted toast chunk");
- chunksize = 0; /* keep compiler quiet */
+ chunksize = 0; /* keep compiler quiet */
chunkdata = NULL;
}
@@ -1593,7 +1594,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
residx, totalchunks,
toast_pointer.va_valueid);
}
- else if (residx == totalchunks-1)
+ else if (residx == totalchunks - 1)
{
if ((residx * TOAST_MAX_CHUNK_SIZE + chunksize) != attrsize)
elog(ERROR, "unexpected chunk size %d (expected %d) in final chunk %d for toast value %u when fetching slice",
@@ -1606,7 +1607,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
elog(ERROR, "unexpected chunk number %d for toast value %u (out of range %d..%d)",
residx,
toast_pointer.va_valueid,
- 0, totalchunks-1);
+ 0, totalchunks - 1);
/*
* Copy the data into proper place in our result
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index fd727ca68c8..5f1092db054 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.99 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.100 2007/11/15 21:14:32 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
@@ -379,7 +379,7 @@ index_markpos(IndexScanDesc scan)
* returnable tuple in each HOT chain, and so restoring the prior state at the
* granularity of the index AM is sufficient. Since the only current user
* of mark/restore functionality is nodeMergejoin.c, this effectively means
- * that merge-join plans only work for MVCC snapshots. This could be fixed
+ * that merge-join plans only work for MVCC snapshots. This could be fixed
* if necessary, but for now it seems unimportant.
* ----------------
*/
@@ -413,7 +413,7 @@ HeapTuple
index_getnext(IndexScanDesc scan, ScanDirection direction)
{
HeapTuple heapTuple = &scan->xs_ctup;
- ItemPointer tid = &heapTuple->t_self;
+ ItemPointer tid = &heapTuple->t_self;
FmgrInfo *procedure;
SCAN_CHECKS;
@@ -429,14 +429,14 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
for (;;)
{
OffsetNumber offnum;
- bool at_chain_start;
- Page dp;
+ bool at_chain_start;
+ Page dp;
if (scan->xs_next_hot != InvalidOffsetNumber)
{
/*
- * We are resuming scan of a HOT chain after having returned
- * an earlier member. Must still hold pin on current heap page.
+ * We are resuming scan of a HOT chain after having returned an
+ * earlier member. Must still hold pin on current heap page.
*/
Assert(BufferIsValid(scan->xs_cbuf));
Assert(ItemPointerGetBlockNumber(tid) ==
@@ -506,7 +506,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/* Scan through possible multiple members of HOT-chain */
for (;;)
{
- ItemId lp;
+ ItemId lp;
ItemPointer ctid;
/* check for bogus TID */
@@ -532,8 +532,8 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
}
/*
- * We must initialize all of *heapTuple (ie, scan->xs_ctup)
- * since it is returned to the executor on success.
+ * We must initialize all of *heapTuple (ie, scan->xs_ctup) since
+ * it is returned to the executor on success.
*/
heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
heapTuple->t_len = ItemIdGetLength(lp);
@@ -544,20 +544,21 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/*
* Shouldn't see a HEAP_ONLY tuple at chain start. (This test
* should be unnecessary, since the chain root can't be removed
- * while we have pin on the index entry, but let's make it anyway.)
+ * while we have pin on the index entry, but let's make it
+ * anyway.)
*/
if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
break;
/*
* The xmin should match the previous xmax value, else chain is
- * broken. (Note: this test is not optional because it protects
- * us against the case where the prior chain member's xmax
- * aborted since we looked at it.)
+ * broken. (Note: this test is not optional because it protects
+ * us against the case where the prior chain member's xmax aborted
+ * since we looked at it.)
*/
if (TransactionIdIsValid(scan->xs_prev_xmax) &&
!TransactionIdEquals(scan->xs_prev_xmax,
- HeapTupleHeaderGetXmin(heapTuple->t_data)))
+ HeapTupleHeaderGetXmin(heapTuple->t_data)))
break;
/* If it's visible per the snapshot, we must return it */
@@ -565,10 +566,10 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
scan->xs_cbuf))
{
/*
- * If the snapshot is MVCC, we know that it could accept
- * at most one member of the HOT chain, so we can skip
- * examining any more members. Otherwise, check for
- * continuation of the HOT-chain, and set state for next time.
+ * If the snapshot is MVCC, we know that it could accept at
+ * most one member of the HOT chain, so we can skip examining
+ * any more members. Otherwise, check for continuation of the
+ * HOT-chain, and set state for next time.
*/
if (IsMVCCSnapshot(scan->xs_snapshot))
scan->xs_next_hot = InvalidOffsetNumber;
@@ -615,7 +616,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
}
else
break; /* end of chain */
- } /* loop over a single HOT chain */
+ } /* loop over a single HOT chain */
LockBuffer(scan->xs_cbuf, BUFFER_LOCK_UNLOCK);
@@ -788,7 +789,7 @@ index_vacuum_cleanup(IndexVacuumInfo *info,
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
- * type instead). Only the default functions are stored in relcache
+ * type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
@@ -822,7 +823,7 @@ index_getprocid(Relation irel,
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
- * support procs in the relcache. As above, only the "default"
+ * support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 5f7ecbe16da..413767ffeec 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.160 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.161 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,7 +32,7 @@ typedef struct
OffsetNumber newitemoff; /* where the new item is to be inserted */
int leftspace; /* space available for items on left page */
int rightspace; /* space available for items on right page */
- int olddataitemstotal; /* space taken by old items */
+ int olddataitemstotal; /* space taken by old items */
bool have_split; /* found a valid split? */
@@ -222,7 +222,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
if (!ItemIdIsDead(curitemid))
{
ItemPointerData htid;
- bool all_dead;
+ bool all_dead;
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
@@ -239,8 +239,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
/*
* We check the whole HOT-chain to see if there is any tuple
- * that satisfies SnapshotDirty. This is necessary because
- * we have just a single index entry for the entire chain.
+ * that satisfies SnapshotDirty. This is necessary because we
+ * have just a single index entry for the entire chain.
*/
if (heap_hot_search(&htid, heapRel, &SnapshotDirty, &all_dead))
{
@@ -267,15 +267,16 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* is itself now committed dead --- if so, don't complain.
* This is a waste of time in normal scenarios but we must
* do it to support CREATE INDEX CONCURRENTLY.
- *
+ *
* We must follow HOT-chains here because during
* concurrent index build, we insert the root TID though
* the actual tuple may be somewhere in the HOT-chain.
- * While following the chain we might not stop at the exact
- * tuple which triggered the insert, but that's OK because
- * if we find a live tuple anywhere in this chain, we have
- * a unique key conflict. The other live tuple is not part
- * of this chain because it had a different index entry.
+ * While following the chain we might not stop at the
+ * exact tuple which triggered the insert, but that's OK
+ * because if we find a live tuple anywhere in this chain,
+ * we have a unique key conflict. The other live tuple is
+ * not part of this chain because it had a different index
+ * entry.
*/
htid = itup->t_tid;
if (heap_hot_search(&htid, heapRel, SnapshotSelf, NULL))
@@ -293,8 +294,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
ereport(ERROR,
(errcode(ERRCODE_UNIQUE_VIOLATION),
- errmsg("duplicate key value violates unique constraint \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("duplicate key value violates unique constraint \"%s\"",
+ RelationGetRelationName(rel))));
}
else if (all_dead)
{
@@ -372,7 +373,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* On entry, *buf and *offsetptr point to the first legal position
* where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
- * InvalidOffsetNumber, in which case the function will search the right
+ * InvalidOffsetNumber, in which case the function will search the right
* location within the page if needed. On exit, they point to the chosen
* insert location. If findinsertloc decided to move right, the lock and
* pin on the original page will be released and the new page returned to
@@ -389,11 +390,12 @@ _bt_findinsertloc(Relation rel,
ScanKey scankey,
IndexTuple newtup)
{
- Buffer buf = *bufptr;
- Page page = BufferGetPage(buf);
- Size itemsz;
+ Buffer buf = *bufptr;
+ Page page = BufferGetPage(buf);
+ Size itemsz;
BTPageOpaque lpageop;
- bool movedright, vacuumed;
+ bool movedright,
+ vacuumed;
OffsetNumber newitemoff;
OffsetNumber firstlegaloff = *offsetptr;
@@ -447,19 +449,21 @@ _bt_findinsertloc(Relation rel,
Buffer rbuf;
/*
- * before considering moving right, see if we can obtain enough
- * space by erasing LP_DEAD items
+ * before considering moving right, see if we can obtain enough space
+ * by erasing LP_DEAD items
*/
if (P_ISLEAF(lpageop) && P_HAS_GARBAGE(lpageop))
{
_bt_vacuum_one_page(rel, buf);
- /* remember that we vacuumed this page, because that makes
- * the hint supplied by the caller invalid */
+ /*
+ * remember that we vacuumed this page, because that makes the
+ * hint supplied by the caller invalid
+ */
vacuumed = true;
if (PageGetFreeSpace(page) >= itemsz)
- break; /* OK, now we have enough space */
+ break; /* OK, now we have enough space */
}
/*
@@ -473,11 +477,10 @@ _bt_findinsertloc(Relation rel,
/*
* step right to next non-dead page
*
- * must write-lock that page before releasing write lock on
- * current page; else someone else's _bt_check_unique scan could
- * fail to see our insertion. write locks on intermediate dead
- * pages won't do because we don't know when they will get
- * de-linked from the tree.
+ * must write-lock that page before releasing write lock on current
+ * page; else someone else's _bt_check_unique scan could fail to see
+ * our insertion. write locks on intermediate dead pages won't do
+ * because we don't know when they will get de-linked from the tree.
*/
rbuf = InvalidBuffer;
@@ -501,17 +504,16 @@ _bt_findinsertloc(Relation rel,
}
/*
- * Now we are on the right page, so find the insert position. If we
- * moved right at all, we know we should insert at the start of the
- * page. If we didn't move right, we can use the firstlegaloff hint
- * if the caller supplied one, unless we vacuumed the page which
- * might have moved tuples around making the hint invalid. If we
- * didn't move right or can't use the hint, find the position
- * by searching.
+ * Now we are on the right page, so find the insert position. If we moved
+ * right at all, we know we should insert at the start of the page. If we
+ * didn't move right, we can use the firstlegaloff hint if the caller
+ * supplied one, unless we vacuumed the page which might have moved tuples
+ * around making the hint invalid. If we didn't move right or can't use
+ * the hint, find the position by searching.
*/
if (movedright)
newitemoff = P_FIRSTDATAKEY(lpageop);
- else if(firstlegaloff != InvalidOffsetNumber && !vacuumed)
+ else if (firstlegaloff != InvalidOffsetNumber && !vacuumed)
newitemoff = firstlegaloff;
else
newitemoff = _bt_binsrch(rel, buf, keysz, scankey, false);
@@ -982,8 +984,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* the data by reinserting it into a new left page. (XXX the latter
* comment is probably obsolete.)
*
- * We need to do this before writing the WAL record, so that XLogInsert can
- * WAL log an image of the page if necessary.
+ * We need to do this before writing the WAL record, so that XLogInsert
+ * can WAL log an image of the page if necessary.
*/
PageRestoreTempPage(leftpage, origpage);
@@ -1033,10 +1035,10 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* Log the new item and its offset, if it was inserted on the left
* page. (If it was put on the right page, we don't need to explicitly
* WAL log it because it's included with all the other items on the
- * right page.) Show the new item as belonging to the left page buffer,
- * so that it is not stored if XLogInsert decides it needs a full-page
- * image of the left page. We store the offset anyway, though, to
- * support archive compression of these records.
+ * right page.) Show the new item as belonging to the left page
+ * buffer, so that it is not stored if XLogInsert decides it needs a
+ * full-page image of the left page. We store the offset anyway,
+ * though, to support archive compression of these records.
*/
if (newitemonleft)
{
@@ -1052,31 +1054,31 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
lastrdata->data = (char *) newitem;
lastrdata->len = MAXALIGN(newitemsz);
- lastrdata->buffer = buf; /* backup block 1 */
+ lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer_std = true;
}
else
{
/*
- * Although we don't need to WAL-log the new item, we still
- * need XLogInsert to consider storing a full-page image of the
- * left page, so make an empty entry referencing that buffer.
- * This also ensures that the left page is always backup block 1.
+ * Although we don't need to WAL-log the new item, we still need
+ * XLogInsert to consider storing a full-page image of the left
+ * page, so make an empty entry referencing that buffer. This also
+ * ensures that the left page is always backup block 1.
*/
lastrdata->next = lastrdata + 1;
lastrdata++;
lastrdata->data = NULL;
lastrdata->len = 0;
- lastrdata->buffer = buf; /* backup block 1 */
+ lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer_std = true;
}
/*
* Log the contents of the right page in the format understood by
* _bt_restore_page(). We set lastrdata->buffer to InvalidBuffer,
- * because we're going to recreate the whole page anyway, so it
- * should never be stored by XLogInsert.
+ * because we're going to recreate the whole page anyway, so it should
+ * never be stored by XLogInsert.
*
* Direct access to page is not good but faster - we should implement
* some new func in page API. Note we only store the tuples
@@ -1101,7 +1103,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
lastrdata->data = NULL;
lastrdata->len = 0;
- lastrdata->buffer = sbuf; /* backup block 2 */
+ lastrdata->buffer = sbuf; /* backup block 2 */
lastrdata->buffer_std = true;
}
@@ -1275,9 +1277,10 @@ _bt_findsplitloc(Relation rel,
olddataitemstoleft += itemsz;
}
- /* If the new item goes as the last item, check for splitting so that
- * all the old items go to the left page and the new item goes to the
- * right page.
+ /*
+ * If the new item goes as the last item, check for splitting so that all
+ * the old items go to the left page and the new item goes to the right
+ * page.
*/
if (newitemoff > maxoff && !goodenoughfound)
_bt_checksplitloc(&state, newitemoff, false, olddataitemstotal, 0);
@@ -1314,16 +1317,16 @@ _bt_checksplitloc(FindSplitData *state,
int olddataitemstoleft,
Size firstoldonrightsz)
{
- int leftfree,
- rightfree;
- Size firstrightitemsz;
- bool newitemisfirstonright;
+ int leftfree,
+ rightfree;
+ Size firstrightitemsz;
+ bool newitemisfirstonright;
/* Is the new item going to be the first item on the right page? */
newitemisfirstonright = (firstoldonright == state->newitemoff
&& !newitemonleft);
- if(newitemisfirstonright)
+ if (newitemisfirstonright)
firstrightitemsz = state->newitemsz;
else
firstrightitemsz = firstoldonrightsz;
@@ -1334,9 +1337,8 @@ _bt_checksplitloc(FindSplitData *state,
(state->olddataitemstotal - olddataitemstoleft);
/*
- * The first item on the right page becomes the high key of the
- * left page; therefore it counts against left space as well as right
- * space.
+ * The first item on the right page becomes the high key of the left page;
+ * therefore it counts against left space as well as right space.
*/
leftfree -= firstrightitemsz;
@@ -1875,8 +1877,8 @@ _bt_vacuum_one_page(Relation rel, Buffer buffer)
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
- * Scan over all items to see which ones need to be deleted
- * according to LP_DEAD flags.
+ * Scan over all items to see which ones need to be deleted according to
+ * LP_DEAD flags.
*/
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index f62e4b3c5ee..8eee5a74cc7 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.103 2007/09/12 22:10:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.104 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -751,8 +751,8 @@ _bt_parent_deletion_safe(Relation rel, BlockNumber target, BTStack stack)
/*
* In recovery mode, assume the deletion being replayed is valid. We
- * can't always check it because we won't have a full search stack,
- * and we should complain if there's a problem, anyway.
+ * can't always check it because we won't have a full search stack, and we
+ * should complain if there's a problem, anyway.
*/
if (InRecovery)
return true;
@@ -781,8 +781,8 @@ _bt_parent_deletion_safe(Relation rel, BlockNumber target, BTStack stack)
{
/*
* It's only child, so safe if parent would itself be removable.
- * We have to check the parent itself, and then recurse to
- * test the conditions at the parent's parent.
+ * We have to check the parent itself, and then recurse to test
+ * the conditions at the parent's parent.
*/
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque))
{
@@ -887,18 +887,18 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid));
/*
- * To avoid deadlocks, we'd better drop the target page lock before
- * going further.
+ * To avoid deadlocks, we'd better drop the target page lock before going
+ * further.
*/
_bt_relbuf(rel, buf);
/*
- * We need an approximate pointer to the page's parent page. We use
- * the standard search mechanism to search for the page's high key; this
- * will give us a link to either the current parent or someplace to its
- * left (if there are multiple equal high keys). In recursion cases,
- * the caller already generated a search stack and we can just re-use
- * that work.
+ * We need an approximate pointer to the page's parent page. We use the
+ * standard search mechanism to search for the page's high key; this will
+ * give us a link to either the current parent or someplace to its left
+ * (if there are multiple equal high keys). In recursion cases, the
+ * caller already generated a search stack and we can just re-use that
+ * work.
*/
if (stack == NULL)
{
@@ -933,11 +933,11 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
/*
* During WAL recovery, we can't use _bt_search (for one reason,
* it might invoke user-defined comparison functions that expect
- * facilities not available in recovery mode). Instead, just
- * set up a dummy stack pointing to the left end of the parent
- * tree level, from which _bt_getstackbuf will walk right to the
- * parent page. Painful, but we don't care too much about
- * performance in this scenario.
+ * facilities not available in recovery mode). Instead, just set
+ * up a dummy stack pointing to the left end of the parent tree
+ * level, from which _bt_getstackbuf will walk right to the parent
+ * page. Painful, but we don't care too much about performance in
+ * this scenario.
*/
pbuf = _bt_get_endpoint(rel, targetlevel + 1, false);
stack = (BTStack) palloc(sizeof(BTStackData));
@@ -951,10 +951,10 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
/*
* We cannot delete a page that is the rightmost child of its immediate
- * parent, unless it is the only child --- in which case the parent has
- * to be deleted too, and the same condition applies recursively to it.
- * We have to check this condition all the way up before trying to delete.
- * We don't need to re-test when deleting a non-leaf page, though.
+ * parent, unless it is the only child --- in which case the parent has to
+ * be deleted too, and the same condition applies recursively to it. We
+ * have to check this condition all the way up before trying to delete. We
+ * don't need to re-test when deleting a non-leaf page, though.
*/
if (targetlevel == 0 &&
!_bt_parent_deletion_safe(rel, target, stack))
@@ -1072,8 +1072,8 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
* might be possible to push the fast root even further down, but the odds
* of doing so are slim, and the locking considerations daunting.)
*
- * We don't support handling this in the case where the parent is
- * becoming half-dead, even though it theoretically could occur.
+ * We don't support handling this in the case where the parent is becoming
+ * half-dead, even though it theoretically could occur.
*
* We can safely acquire a lock on the metapage here --- see comments for
* _bt_newroot().
@@ -1287,10 +1287,10 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
_bt_relbuf(rel, lbuf);
/*
- * If parent became half dead, recurse to delete it. Otherwise, if
- * right sibling is empty and is now the last child of the parent, recurse
- * to try to delete it. (These cases cannot apply at the same time,
- * though the second case might itself recurse to the first.)
+ * If parent became half dead, recurse to delete it. Otherwise, if right
+ * sibling is empty and is now the last child of the parent, recurse to
+ * try to delete it. (These cases cannot apply at the same time, though
+ * the second case might itself recurse to the first.)
*
* When recursing to parent, we hold the lock on the target page until
* done. This delays any insertions into the keyspace that was just
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index b947d770aa2..7b71f544f86 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.113 2007/05/27 03:50:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.114 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -637,17 +637,17 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* even if the row comparison is of ">" or "<" type, because the
* condition applied to all but the last row member is effectively
* ">=" or "<=", and so the extra keys don't break the positioning
- * scheme. But, by the same token, if we aren't able to use all
+ * scheme. But, by the same token, if we aren't able to use all
* the row members, then the part of the row comparison that we
- * did use has to be treated as just a ">=" or "<=" condition,
- * and so we'd better adjust strat_total accordingly.
+ * did use has to be treated as just a ">=" or "<=" condition, and
+ * so we'd better adjust strat_total accordingly.
*/
if (i == keysCount - 1)
{
bool used_all_subkeys = false;
Assert(!(subkey->sk_flags & SK_ROW_END));
- for(;;)
+ for (;;)
{
subkey++;
Assert(subkey->sk_flags & SK_ROW_MEMBER);
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 6d85695c3d3..a1b0125f787 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.86 2007/09/12 22:10:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.87 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -205,7 +205,7 @@ _bt_freestack(BTStack stack)
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
- * we cannot eliminate either. If there are two such keys of the same
+ * we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
@@ -265,13 +265,13 @@ _bt_preprocess_keys(IndexScanDesc scan)
{
/*
* We treat all btree operators as strict (even if they're not so
- * marked in pg_proc). This means that it is impossible for an
- * operator condition with a NULL comparison constant to succeed,
- * and we can reject it right away.
+ * marked in pg_proc). This means that it is impossible for an
+ * operator condition with a NULL comparison constant to succeed, and
+ * we can reject it right away.
*
* However, we now also support "x IS NULL" clauses as search
- * conditions, so in that case keep going. The planner has not
- * filled in any particular strategy in this case, so set it to
+ * conditions, so in that case keep going. The planner has not filled
+ * in any particular strategy in this case, so set it to
* BTEqualStrategyNumber --- we can treat IS NULL as an equality
* operator for purposes of search strategy.
*/
@@ -303,8 +303,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Initialize for processing of keys for attr 1.
*
- * xform[i] points to the currently best scan key of strategy type i+1;
- * it is NULL if we haven't yet found such a key for this attr.
+ * xform[i] points to the currently best scan key of strategy type i+1; it
+ * is NULL if we haven't yet found such a key for this attr.
*/
attno = 1;
memset(xform, 0, sizeof(xform));
@@ -464,6 +464,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
memcpy(outkey, cur, sizeof(ScanKeyData));
if (numberOfEqualCols == attno - 1)
_bt_mark_scankey_required(outkey);
+
/*
* We don't support RowCompare using equality; such a qual would
* mess up the numberOfEqualCols tracking.
@@ -514,9 +515,9 @@ _bt_preprocess_keys(IndexScanDesc scan)
else
{
/*
- * We can't determine which key is more restrictive. Keep
- * the previous one in xform[j] and push this one directly
- * to the output array.
+ * We can't determine which key is more restrictive. Keep the
+ * previous one in xform[j] and push this one directly to the
+ * output array.
*/
ScanKey outkey = &outkeys[new_numberOfKeys++];
@@ -542,7 +543,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
- * may not be able to make the comparison. If we can make the comparison
+ * may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* if the comparison could not be made.
*
@@ -608,8 +609,8 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
* indexscan initiated by syscache lookup will use cross-data-type
* operators.)
*
- * If the sk_strategy was flipped by _bt_mark_scankey_with_indoption,
- * we have to un-flip it to get the correct opfamily member.
+ * If the sk_strategy was flipped by _bt_mark_scankey_with_indoption, we
+ * have to un-flip it to get the correct opfamily member.
*/
strat = op->sk_strategy;
if (op->sk_flags & SK_BT_DESC)
@@ -654,7 +655,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
static void
_bt_mark_scankey_with_indoption(ScanKey skey, int16 *indoption)
{
- int addflags;
+ int addflags;
addflags = indoption[skey->sk_attno - 1] << SK_BT_INDOPTION_SHIFT;
if ((addflags & SK_BT_DESC) && !(skey->sk_flags & SK_BT_DESC))
@@ -874,8 +875,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual is
- * one of the "must match" subset. On a forward scan,
+ * index attr. On a backward scan, we can stop if this qual
+ * is one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
if ((key->sk_flags & SK_BT_REQBKWD) &&
@@ -887,8 +888,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. On a backward scan,
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((key->sk_flags & SK_BT_REQFWD) &&
@@ -978,7 +979,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual is
- * one of the "must match" subset. On a forward scan,
+ * one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQBKWD) &&
@@ -991,7 +992,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. On a backward scan,
+ * one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -1264,8 +1265,8 @@ _bt_start_vacuum(Relation rel)
LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
/*
- * Assign the next cycle ID, being careful to avoid zero as well as
- * the reserved high values.
+ * Assign the next cycle ID, being careful to avoid zero as well as the
+ * reserved high values.
*/
result = ++(btvacinfo->cycle_ctr);
if (result == 0 || result > MAX_BT_CYCLE_ID)
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 499129c48f1..79aae66201f 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.46 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.47 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,7 +40,7 @@ typedef struct bt_incomplete_action
BlockNumber rightblk; /* right half of split */
/* these fields are for a delete: */
BlockNumber delblk; /* parent block to be deleted */
-} bt_incomplete_action;
+} bt_incomplete_action;
static List *incomplete_actions;
@@ -271,8 +271,8 @@ btree_xlog_split(bool onleft, bool isroot,
char *datapos;
int datalen;
OffsetNumber newitemoff = 0;
- Item newitem = NULL;
- Size newitemsz = 0;
+ Item newitem = NULL;
+ Size newitemsz = 0;
reln = XLogOpenRelation(xlrec->node);
@@ -343,15 +343,15 @@ btree_xlog_split(bool onleft, bool isroot,
* Reconstruct left (original) sibling if needed. Note that this code
* ensures that the items remaining on the left page are in the correct
* item number order, but it does not reproduce the physical order they
- * would have had. Is this worth changing? See also _bt_restore_page().
+ * would have had. Is this worth changing? See also _bt_restore_page().
*/
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
- Buffer lbuf = XLogReadBuffer(reln, xlrec->leftsib, false);
+ Buffer lbuf = XLogReadBuffer(reln, xlrec->leftsib, false);
if (BufferIsValid(lbuf))
{
- Page lpage = (Page) BufferGetPage(lbuf);
+ Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
if (!XLByteLE(lsn, PageGetLSN(lpage)))
@@ -359,19 +359,20 @@ btree_xlog_split(bool onleft, bool isroot,
OffsetNumber off;
OffsetNumber maxoff = PageGetMaxOffsetNumber(lpage);
OffsetNumber deletable[MaxOffsetNumber];
- int ndeletable = 0;
- ItemId hiItemId;
- Item hiItem;
+ int ndeletable = 0;
+ ItemId hiItemId;
+ Item hiItem;
/*
- * Remove the items from the left page that were copied to
- * the right page. Also remove the old high key, if any.
- * (We must remove everything before trying to insert any
- * items, else we risk not having enough space.)
+ * Remove the items from the left page that were copied to the
+ * right page. Also remove the old high key, if any. (We must
+ * remove everything before trying to insert any items, else
+ * we risk not having enough space.)
*/
if (!P_RIGHTMOST(lopaque))
{
deletable[ndeletable++] = P_HIKEY;
+
/*
* newitemoff is given to us relative to the original
* page's item numbering, so adjust it for this deletion.
@@ -421,11 +422,11 @@ btree_xlog_split(bool onleft, bool isroot,
/* Fix left-link of the page to the right of the new right sibling */
if (xlrec->rnext != P_NONE && !(record->xl_info & XLR_BKP_BLOCK_2))
{
- Buffer buffer = XLogReadBuffer(reln, xlrec->rnext, false);
+ Buffer buffer = XLogReadBuffer(reln, xlrec->rnext, false);
if (BufferIsValid(buffer))
{
- Page page = (Page) BufferGetPage(buffer);
+ Page page = (Page) BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@@ -795,7 +796,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
- xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
+ xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@@ -807,7 +808,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
- xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
+ xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@@ -819,7 +820,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
- xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
+ xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@@ -831,7 +832,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
- xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
+ xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 419c8656065..72be0e855a7 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -14,19 +14,19 @@
* CLOG page is initialized to zeroes. Other writes of CLOG come from
* recording of transaction commit or abort in xact.c, which generates its
* own XLOG records for these events and will re-perform the status update
- * on redo; so we need make no additional XLOG entry here. For synchronous
+ * on redo; so we need make no additional XLOG entry here. For synchronous
* transaction commits, the XLOG is guaranteed flushed through the XLOG commit
* record before we are called to log a commit, so the WAL rule "write xlog
* before data" is satisfied automatically. However, for async commits we
* must track the latest LSN affecting each CLOG page, so that we can flush
- * XLOG that far and satisfy the WAL rule. We don't have to worry about this
+ * XLOG that far and satisfy the WAL rule. We don't have to worry about this
* for aborts (whether sync or async), since the post-crash assumption would
* be that such transactions failed anyway.
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.44 2007/09/05 18:10:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.45 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,8 +60,8 @@
#define TransactionIdToBIndex(xid) ((xid) % (TransactionId) CLOG_XACTS_PER_BYTE)
/* We store the latest async LSN for each group of transactions */
-#define CLOG_XACTS_PER_LSN_GROUP 32 /* keep this a power of 2 */
-#define CLOG_LSNS_PER_PAGE (CLOG_XACTS_PER_PAGE / CLOG_XACTS_PER_LSN_GROUP)
+#define CLOG_XACTS_PER_LSN_GROUP 32 /* keep this a power of 2 */
+#define CLOG_LSNS_PER_PAGE (CLOG_XACTS_PER_PAGE / CLOG_XACTS_PER_LSN_GROUP)
#define GetLSNIndex(slotno, xid) ((slotno) * CLOG_LSNS_PER_PAGE + \
((xid) % (TransactionId) CLOG_XACTS_PER_PAGE) / CLOG_XACTS_PER_LSN_GROUP)
@@ -85,7 +85,7 @@ static void WriteTruncateXlogRec(int pageno);
* Record the final state of a transaction in the commit log.
*
* lsn must be the WAL location of the commit record when recording an async
- * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
+ * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
* caller guarantees the commit record is already flushed in that case. It
* should be InvalidXLogRecPtr for abort cases, too.
*
@@ -159,7 +159,7 @@ TransactionIdSetStatus(TransactionId xid, XidStatus status, XLogRecPtr lsn)
* an LSN that is late enough to be able to guarantee that if we flush up to
* that LSN then we will have flushed the transaction's commit record to disk.
* The result is not necessarily the exact LSN of the transaction's commit
- * record! For example, for long-past transactions (those whose clog pages
+ * record! For example, for long-past transactions (those whose clog pages
* already migrated to disk), we'll return InvalidXLogRecPtr. Also, because
* we group transactions on the same clog page to conserve storage, we might
* return the LSN of a later transaction that falls into the same group.
@@ -486,8 +486,8 @@ clog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&pageno, XLogRecGetData(record), sizeof(int));
/*
- * During XLOG replay, latest_page_number isn't set up yet; insert
- * a suitable value to bypass the sanity test in SimpleLruTruncate.
+ * During XLOG replay, latest_page_number isn't set up yet; insert a
+ * suitable value to bypass the sanity test in SimpleLruTruncate.
*/
ClogCtl->shared->latest_page_number = pageno;
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index b34fa9be785..61a59961d71 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.25 2007/09/05 18:10:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.26 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -380,9 +380,9 @@ MultiXactIdIsRunning(MultiXactId multi)
}
/*
- * Checking for myself is cheap compared to looking in shared memory,
- * so first do the equivalent of MultiXactIdIsCurrent(). This is not
- * needed for correctness, it's just a fast path.
+ * Checking for myself is cheap compared to looking in shared memory, so
+ * first do the equivalent of MultiXactIdIsCurrent(). This is not needed
+ * for correctness, it's just a fast path.
*/
for (i = 0; i < nmembers; i++)
{
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index e53b05e04d5..db0f79c47c4 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.71 2007/09/08 20:31:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.72 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -440,14 +440,14 @@ TransactionId
TransactionIdLatest(TransactionId mainxid,
int nxids, const TransactionId *xids)
{
- TransactionId result;
+ TransactionId result;
/*
- * In practice it is highly likely that the xids[] array is sorted, and
- * so we could save some cycles by just taking the last child XID, but
- * this probably isn't so performance-critical that it's worth depending
- * on that assumption. But just to show we're not totally stupid, scan
- * the array back-to-front to avoid useless assignments.
+ * In practice it is highly likely that the xids[] array is sorted, and so
+ * we could save some cycles by just taking the last child XID, but this
+ * probably isn't so performance-critical that it's worth depending on
+ * that assumption. But just to show we're not totally stupid, scan the
+ * array back-to-front to avoid useless assignments.
*/
result = mainxid;
while (--nxids >= 0)
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 6ce9d1b5864..2888adbc374 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.37 2007/10/24 20:55:36 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.38 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
@@ -397,15 +397,15 @@ LockGXact(const char *gid, Oid user)
errhint("Must be superuser or the user that prepared the transaction.")));
/*
- * Note: it probably would be possible to allow committing from another
- * database; but at the moment NOTIFY is known not to work and there
- * may be some other issues as well. Hence disallow until someone
- * gets motivated to make it work.
+ * Note: it probably would be possible to allow committing from
+ * another database; but at the moment NOTIFY is known not to work and
+ * there may be some other issues as well. Hence disallow until
+ * someone gets motivated to make it work.
*/
if (MyDatabaseId != gxact->proc.databaseId)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("prepared transaction belongs to another database"),
+ errmsg("prepared transaction belongs to another database"),
errhint("Connect to the database where the transaction was prepared to finish it.")));
/* OK for me to lock it */
@@ -937,11 +937,11 @@ EndPrepare(GlobalTransaction gxact)
* odds of a PANIC actually occurring should be very tiny given that we
* were able to write the bogus CRC above.
*
- * We have to set inCommit here, too; otherwise a checkpoint
- * starting immediately after the WAL record is inserted could complete
- * without fsync'ing our state file. (This is essentially the same kind
- * of race condition as the COMMIT-to-clog-write case that
- * RecordTransactionCommit uses inCommit for; see notes there.)
+ * We have to set inCommit here, too; otherwise a checkpoint starting
+ * immediately after the WAL record is inserted could complete without
+ * fsync'ing our state file. (This is essentially the same kind of race
+ * condition as the COMMIT-to-clog-write case that RecordTransactionCommit
+ * uses inCommit for; see notes there.)
*
* We save the PREPARE record's location in the gxact for later use by
* CheckPointTwoPhase.
@@ -985,8 +985,8 @@ EndPrepare(GlobalTransaction gxact)
MarkAsPrepared(gxact);
/*
- * Now we can mark ourselves as out of the commit critical section:
- * a checkpoint starting after this will certainly see the gxact as a
+ * Now we can mark ourselves as out of the commit critical section: a
+ * checkpoint starting after this will certainly see the gxact as a
* candidate for fsyncing.
*/
MyProc->inCommit = false;
@@ -1272,8 +1272,8 @@ RemoveTwoPhaseFile(TransactionId xid, bool giveWarning)
if (errno != ENOENT || giveWarning)
ereport(WARNING,
(errcode_for_file_access(),
- errmsg("could not remove two-phase state file \"%s\": %m",
- path)));
+ errmsg("could not remove two-phase state file \"%s\": %m",
+ path)));
}
/*
@@ -1500,8 +1500,8 @@ PrescanPreparedTransactions(void)
if (buf == NULL)
{
ereport(WARNING,
- (errmsg("removing corrupt two-phase state file \"%s\"",
- clde->d_name)));
+ (errmsg("removing corrupt two-phase state file \"%s\"",
+ clde->d_name)));
RemoveTwoPhaseFile(xid, true);
continue;
}
@@ -1511,8 +1511,8 @@ PrescanPreparedTransactions(void)
if (!TransactionIdEquals(hdr->xid, xid))
{
ereport(WARNING,
- (errmsg("removing corrupt two-phase state file \"%s\"",
- clde->d_name)));
+ (errmsg("removing corrupt two-phase state file \"%s\"",
+ clde->d_name)));
RemoveTwoPhaseFile(xid, true);
pfree(buf);
continue;
@@ -1599,8 +1599,8 @@ RecoverPreparedTransactions(void)
if (buf == NULL)
{
ereport(WARNING,
- (errmsg("removing corrupt two-phase state file \"%s\"",
- clde->d_name)));
+ (errmsg("removing corrupt two-phase state file \"%s\"",
+ clde->d_name)));
RemoveTwoPhaseFile(xid, true);
continue;
}
@@ -1711,9 +1711,9 @@ RecordTransactionCommitPrepared(TransactionId xid,
recptr = XLogInsert(RM_XACT_ID, XLOG_XACT_COMMIT_PREPARED, rdata);
/*
- * We don't currently try to sleep before flush here ... nor is there
- * any support for async commit of a prepared xact (the very idea is
- * probably a contradiction)
+ * We don't currently try to sleep before flush here ... nor is there any
+ * support for async commit of a prepared xact (the very idea is probably
+ * a contradiction)
*/
/* Flush XLOG to disk */
diff --git a/src/backend/access/transam/twophase_rmgr.c b/src/backend/access/transam/twophase_rmgr.c
index 9c2f14a1a38..84d1e9caefc 100644
--- a/src/backend/access/transam/twophase_rmgr.c
+++ b/src/backend/access/transam/twophase_rmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.5 2007/05/27 03:50:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.6 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,7 +38,7 @@ const TwoPhaseCallback twophase_postcommit_callbacks[TWOPHASE_RM_MAX_ID + 1] =
lock_twophase_postcommit, /* Lock */
inval_twophase_postcommit, /* Inval */
flatfile_twophase_postcommit, /* flat file update */
- notify_twophase_postcommit, /* notify/listen */
+ notify_twophase_postcommit, /* notify/listen */
pgstat_twophase_postcommit /* pgstat */
};
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 14332c6ab26..d7a5183d4cd 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.79 2007/09/08 20:31:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.80 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,9 +73,9 @@ GetNewTransactionId(bool isSubXact)
TransactionIdIsValid(ShmemVariableCache->xidVacLimit))
{
/*
- * To avoid swamping the postmaster with signals, we issue the
- * autovac request only once per 64K transaction starts. This
- * still gives plenty of chances before we get into real trouble.
+ * To avoid swamping the postmaster with signals, we issue the autovac
+ * request only once per 64K transaction starts. This still gives
+ * plenty of chances before we get into real trouble.
*/
if (IsUnderPostmaster && (xid % 65536) == 0)
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);
@@ -119,9 +119,9 @@ GetNewTransactionId(bool isSubXact)
/*
* We must store the new XID into the shared ProcArray before releasing
- * XidGenLock. This ensures that every active XID older than
- * latestCompletedXid is present in the ProcArray, which is essential
- * for correct OldestXmin tracking; see src/backend/access/transam/README.
+ * XidGenLock. This ensures that every active XID older than
+ * latestCompletedXid is present in the ProcArray, which is essential for
+ * correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyProc without acquiring ProcArrayLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
@@ -249,18 +249,18 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid,
xidWarnLimit -= FirstNormalTransactionId;
/*
- * We'll start trying to force autovacuums when oldest_datfrozenxid
- * gets to be more than autovacuum_freeze_max_age transactions old.
+ * We'll start trying to force autovacuums when oldest_datfrozenxid gets
+ * to be more than autovacuum_freeze_max_age transactions old.
*
- * Note: guc.c ensures that autovacuum_freeze_max_age is in a sane
- * range, so that xidVacLimit will be well before xidWarnLimit.
+ * Note: guc.c ensures that autovacuum_freeze_max_age is in a sane range,
+ * so that xidVacLimit will be well before xidWarnLimit.
*
* Note: autovacuum_freeze_max_age is a PGC_POSTMASTER parameter so that
* we don't have to worry about dealing with on-the-fly changes in its
* value. It doesn't look practical to update shared state from a GUC
* assign hook (too many processes would try to execute the hook,
- * resulting in race conditions as well as crashes of those not
- * connected to shared memory). Perhaps this can be improved someday.
+ * resulting in race conditions as well as crashes of those not connected
+ * to shared memory). Perhaps this can be improved someday.
*/
xidVacLimit = oldest_datfrozenxid + autovacuum_freeze_max_age;
if (xidVacLimit < FirstNormalTransactionId)
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index b7ab9585865..04804c38711 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.252 2007/11/10 14:36:44 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.253 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -274,8 +274,8 @@ IsTransactionState(void)
TransactionState s = CurrentTransactionState;
/*
- * TRANS_DEFAULT and TRANS_ABORT are obviously unsafe states. However,
- * we also reject the startup/shutdown states TRANS_START, TRANS_COMMIT,
+ * TRANS_DEFAULT and TRANS_ABORT are obviously unsafe states. However, we
+ * also reject the startup/shutdown states TRANS_START, TRANS_COMMIT,
* TRANS_PREPARE since it might be too soon or too late within those
* transition states to do anything interesting. Hence, the only "valid"
* state is TRANS_INPROGRESS.
@@ -372,7 +372,7 @@ GetCurrentTransactionIdIfAny(void)
static void
AssignTransactionId(TransactionState s)
{
- bool isSubXact = (s->parent != NULL);
+ bool isSubXact = (s->parent != NULL);
ResourceOwner currentOwner;
/* Assert that caller didn't screw up */
@@ -400,9 +400,9 @@ AssignTransactionId(TransactionState s)
SubTransSetParent(s->transactionId, s->parent->transactionId);
/*
- * Acquire lock on the transaction XID. (We assume this cannot block.)
- * We have to ensure that the lock is assigned to the transaction's
- * own ResourceOwner.
+ * Acquire lock on the transaction XID. (We assume this cannot block.) We
+ * have to ensure that the lock is assigned to the transaction's own
+ * ResourceOwner.
*/
currentOwner = CurrentResourceOwner;
PG_TRY();
@@ -626,9 +626,9 @@ AtStart_Memory(void)
/*
* If this is the first time through, create a private context for
* AbortTransaction to work in. By reserving some space now, we can
- * insulate AbortTransaction from out-of-memory scenarios. Like
- * ErrorContext, we set it up with slow growth rate and a nonzero
- * minimum size, so that space will be reserved immediately.
+ * insulate AbortTransaction from out-of-memory scenarios. Like
+ * ErrorContext, we set it up with slow growth rate and a nonzero minimum
+ * size, so that space will be reserved immediately.
*/
if (TransactionAbortContext == NULL)
TransactionAbortContext =
@@ -749,7 +749,7 @@ AtSubStart_ResourceOwner(void)
* RecordTransactionCommit
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*
* This is exported only to support an ugly hack in VACUUM FULL.
*/
@@ -757,7 +757,7 @@ TransactionId
RecordTransactionCommit(void)
{
TransactionId xid = GetTopTransactionIdIfAny();
- bool markXidCommitted = TransactionIdIsValid(xid);
+ bool markXidCommitted = TransactionIdIsValid(xid);
TransactionId latestXid = InvalidTransactionId;
int nrels;
RelFileNode *rels;
@@ -770,29 +770,29 @@ RecordTransactionCommit(void)
nchildren = xactGetCommittedChildren(&children);
/*
- * If we haven't been assigned an XID yet, we neither can, nor do we
- * want to write a COMMIT record.
+ * If we haven't been assigned an XID yet, we neither can, nor do we want
+ * to write a COMMIT record.
*/
if (!markXidCommitted)
{
/*
* We expect that every smgrscheduleunlink is followed by a catalog
- * update, and hence XID assignment, so we shouldn't get here with
- * any pending deletes. Use a real test not just an Assert to check
- * this, since it's a bit fragile.
+ * update, and hence XID assignment, so we shouldn't get here with any
+ * pending deletes. Use a real test not just an Assert to check this,
+ * since it's a bit fragile.
*/
if (nrels != 0)
elog(ERROR, "cannot commit a transaction that deleted files but has no xid");
/* Can't have child XIDs either; AssignTransactionId enforces this */
Assert(nchildren == 0);
-
+
/*
* If we didn't create XLOG entries, we're done here; otherwise we
- * should flush those entries the same as a commit record. (An
+ * should flush those entries the same as a commit record. (An
* example of a possible record that wouldn't cause an XID to be
- * assigned is a sequence advance record due to nextval() --- we
- * want to flush that to disk before reporting commit.)
+ * assigned is a sequence advance record due to nextval() --- we want
+ * to flush that to disk before reporting commit.)
*/
if (XactLastRecEnd.xrecoff == 0)
goto cleanup;
@@ -802,30 +802,29 @@ RecordTransactionCommit(void)
/*
* Begin commit critical section and insert the commit XLOG record.
*/
- XLogRecData rdata[3];
- int lastrdata = 0;
- xl_xact_commit xlrec;
+ XLogRecData rdata[3];
+ int lastrdata = 0;
+ xl_xact_commit xlrec;
/* Tell bufmgr and smgr to prepare for commit */
BufmgrCommit();
/*
- * Mark ourselves as within our "commit critical section". This
+ * Mark ourselves as within our "commit critical section". This
* forces any concurrent checkpoint to wait until we've updated
- * pg_clog. Without this, it is possible for the checkpoint to
- * set REDO after the XLOG record but fail to flush the pg_clog
- * update to disk, leading to loss of the transaction commit if
- * the system crashes a little later.
+ * pg_clog. Without this, it is possible for the checkpoint to set
+ * REDO after the XLOG record but fail to flush the pg_clog update to
+ * disk, leading to loss of the transaction commit if the system
+ * crashes a little later.
*
* Note: we could, but don't bother to, set this flag in
- * RecordTransactionAbort. That's because loss of a transaction
- * abort is noncritical; the presumption would be that it aborted,
- * anyway.
+ * RecordTransactionAbort. That's because loss of a transaction abort
+ * is noncritical; the presumption would be that it aborted, anyway.
*
- * It's safe to change the inCommit flag of our own backend
- * without holding the ProcArrayLock, since we're the only one
- * modifying it. This makes checkpoint's determination of which
- * xacts are inCommit a bit fuzzy, but it doesn't matter.
+ * It's safe to change the inCommit flag of our own backend without
+ * holding the ProcArrayLock, since we're the only one modifying it.
+ * This makes checkpoint's determination of which xacts are inCommit a
+ * bit fuzzy, but it doesn't matter.
*/
START_CRIT_SECTION();
MyProc->inCommit = true;
@@ -864,7 +863,7 @@ RecordTransactionCommit(void)
* Check if we want to commit asynchronously. If the user has set
* synchronous_commit = off, and we're not doing cleanup of any non-temp
* rels nor committing any command that wanted to force sync commit, then
- * we can defer flushing XLOG. (We must not allow asynchronous commit if
+ * we can defer flushing XLOG. (We must not allow asynchronous commit if
* there are any non-temp tables to be deleted, because we might delete
* the files before the COMMIT record is flushed to disk. We do allow
* asynchronous commit if all to-be-deleted tables are temporary though,
@@ -875,15 +874,14 @@ RecordTransactionCommit(void)
/*
* Synchronous commit case.
*
- * Sleep before flush! So we can flush more than one commit
- * records per single fsync. (The idea is some other backend
- * may do the XLogFlush while we're sleeping. This needs work
- * still, because on most Unixen, the minimum select() delay
- * is 10msec or more, which is way too long.)
+ * Sleep before flush! So we can flush more than one commit records
+ * per single fsync. (The idea is some other backend may do the
+ * XLogFlush while we're sleeping. This needs work still, because on
+ * most Unixen, the minimum select() delay is 10msec or more, which is
+ * way too long.)
*
- * We do not sleep if enableFsync is not turned on, nor if
- * there are fewer than CommitSiblings other backends with
- * active transactions.
+ * We do not sleep if enableFsync is not turned on, nor if there are
+ * fewer than CommitSiblings other backends with active transactions.
*/
if (CommitDelay > 0 && enableFsync &&
CountActiveBackends() >= CommitSiblings)
@@ -906,15 +904,15 @@ RecordTransactionCommit(void)
/*
* Asynchronous commit case.
*
- * Report the latest async commit LSN, so that
- * the WAL writer knows to flush this commit.
+ * Report the latest async commit LSN, so that the WAL writer knows to
+ * flush this commit.
*/
XLogSetAsyncCommitLSN(XactLastRecEnd);
/*
- * We must not immediately update the CLOG, since we didn't
- * flush the XLOG. Instead, we store the LSN up to which
- * the XLOG must be flushed before the CLOG may be updated.
+ * We must not immediately update the CLOG, since we didn't flush the
+ * XLOG. Instead, we store the LSN up to which the XLOG must be
+ * flushed before the CLOG may be updated.
*/
if (markXidCommitted)
{
@@ -925,8 +923,8 @@ RecordTransactionCommit(void)
}
/*
- * If we entered a commit critical section, leave it now, and
- * let checkpoints proceed.
+ * If we entered a commit critical section, leave it now, and let
+ * checkpoints proceed.
*/
if (markXidCommitted)
{
@@ -1068,11 +1066,11 @@ RecordSubTransactionCommit(void)
* We do not log the subcommit in XLOG; it doesn't matter until the
* top-level transaction commits.
*
- * We must mark the subtransaction subcommitted in the CLOG if
- * it had a valid XID assigned. If it did not, nobody else will
- * ever know about the existence of this subxact. We don't
- * have to deal with deletions scheduled for on-commit here, since
- * they'll be reassigned to our parent (who might still abort).
+ * We must mark the subtransaction subcommitted in the CLOG if it had a
+ * valid XID assigned. If it did not, nobody else will ever know about
+ * the existence of this subxact. We don't have to deal with deletions
+ * scheduled for on-commit here, since they'll be reassigned to our parent
+ * (who might still abort).
*/
if (TransactionIdIsValid(xid))
{
@@ -1095,7 +1093,7 @@ RecordSubTransactionCommit(void)
* RecordTransactionAbort
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionAbort(bool isSubXact)
@@ -1106,15 +1104,15 @@ RecordTransactionAbort(bool isSubXact)
RelFileNode *rels;
int nchildren;
TransactionId *children;
- XLogRecData rdata[3];
- int lastrdata = 0;
- xl_xact_abort xlrec;
+ XLogRecData rdata[3];
+ int lastrdata = 0;
+ xl_xact_abort xlrec;
/*
- * If we haven't been assigned an XID, nobody will care whether we
- * aborted or not. Hence, we're done in that case. It does not matter
- * if we have rels to delete (note that this routine is not responsible
- * for actually deleting 'em). We cannot have any child XIDs, either.
+ * If we haven't been assigned an XID, nobody will care whether we aborted
+ * or not. Hence, we're done in that case. It does not matter if we have
+ * rels to delete (note that this routine is not responsible for actually
+ * deleting 'em). We cannot have any child XIDs, either.
*/
if (!TransactionIdIsValid(xid))
{
@@ -1128,7 +1126,7 @@ RecordTransactionAbort(bool isSubXact)
* We have a valid XID, so we should write an ABORT record for it.
*
* We do not flush XLOG to disk here, since the default assumption after a
- * crash would be that we aborted, anyway. For the same reason, we don't
+ * crash would be that we aborted, anyway. For the same reason, we don't
* need to worry about interlocking against checkpoint start.
*/
@@ -1189,10 +1187,10 @@ RecordTransactionAbort(bool isSubXact)
* having flushed the ABORT record to disk, because in event of a crash
* we'd be assumed to have aborted anyway.
*
- * The ordering here isn't critical but it seems best to mark the
- * parent first. This assures an atomic transition of all the
- * subtransactions to aborted state from the point of view of
- * concurrent TransactionIdDidAbort calls.
+ * The ordering here isn't critical but it seems best to mark the parent
+ * first. This assures an atomic transition of all the subtransactions to
+ * aborted state from the point of view of concurrent
+ * TransactionIdDidAbort calls.
*/
TransactionIdAbort(xid);
TransactionIdAbortTree(nchildren, children);
@@ -1231,9 +1229,9 @@ static void
AtAbort_Memory(void)
{
/*
- * Switch into TransactionAbortContext, which should have some free
- * space even if nothing else does. We'll work in this context until
- * we've finished cleaning up.
+ * Switch into TransactionAbortContext, which should have some free space
+ * even if nothing else does. We'll work in this context until we've
+ * finished cleaning up.
*
* It is barely possible to get here when we've not been able to create
* TransactionAbortContext yet; if so use TopMemoryContext.
@@ -1438,7 +1436,7 @@ StartTransaction(void)
VirtualXactLockTableInsert(vxid);
/*
- * Advertise it in the proc array. We assume assignment of
+ * Advertise it in the proc array. We assume assignment of
* LocalTransactionID is atomic, and the backendId should be set already.
*/
Assert(MyProc->backendId == vxid.backendId);
@@ -1449,8 +1447,8 @@ StartTransaction(void)
/*
* set transaction_timestamp() (a/k/a now()). We want this to be the same
* as the first command's statement_timestamp(), so don't do a fresh
- * GetCurrentTimestamp() call (which'd be expensive anyway). Also,
- * mark xactStopTimestamp as unset.
+ * GetCurrentTimestamp() call (which'd be expensive anyway). Also, mark
+ * xactStopTimestamp as unset.
*/
xactStartTimestamp = stmtStartTimestamp;
xactStopTimestamp = 0;
@@ -1576,8 +1574,8 @@ CommitTransaction(void)
PG_TRACE1(transaction__commit, MyProc->lxid);
/*
- * Let others know about no transaction in progress by me. Note that
- * this must be done _before_ releasing locks we hold and _after_
+ * Let others know about no transaction in progress by me. Note that this
+ * must be done _before_ releasing locks we hold and _after_
* RecordTransactionCommit.
*/
ProcArrayEndTransaction(MyProc, latestXid);
@@ -2503,7 +2501,7 @@ AbortCurrentTransaction(void)
* inside a function or multi-query querystring. (We will always fail if
* this is false, but it's convenient to centralize the check here instead of
* making callers do it.)
- * stmtType: statement type name, for error messages.
+ * stmtType: statement type name, for error messages.
*/
void
PreventTransactionChain(bool isTopLevel, const char *stmtType)
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 36adc20848e..3218c134e52 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.287 2007/11/15 20:36:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.288 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -80,7 +80,7 @@ bool XLOG_DEBUG = false;
* future XLOG segment as long as there aren't already XLOGfileslop future
* segments; else we'll delete it. This could be made a separate GUC
* variable, but at present I think it's sufficient to hardwire it as
- * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
+ * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
* no more than 2*CheckPointSegments log segments, and we want to recycle all
* of them; the +1 allows boundary cases to happen without wasting a
* delete/create-segment cycle.
@@ -287,7 +287,7 @@ typedef struct XLogCtlData
XLogwrtResult LogwrtResult;
uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */
TransactionId ckptXid;
- XLogRecPtr asyncCommitLSN; /* LSN of newest async commit */
+ XLogRecPtr asyncCommitLSN; /* LSN of newest async commit */
/* Protected by WALWriteLock: */
XLogCtlWrite Write;
@@ -737,8 +737,8 @@ begin:;
* full-block records into the non-full-block format.
*
* Note: we could just set the flag whenever !forcePageWrites, but
- * defining it like this leaves the info bit free for some potential
- * other use in records without any backup blocks.
+ * defining it like this leaves the info bit free for some potential other
+ * use in records without any backup blocks.
*/
if ((info & XLR_BKP_BLOCK_MASK) && !Insert->forcePageWrites)
info |= XLR_BKP_REMOVABLE;
@@ -1345,10 +1345,10 @@ static bool
XLogCheckpointNeeded(void)
{
/*
- * A straight computation of segment number could overflow 32
- * bits. Rather than assuming we have working 64-bit
- * arithmetic, we compare the highest-order bits separately,
- * and force a checkpoint immediately when they change.
+ * A straight computation of segment number could overflow 32 bits.
+ * Rather than assuming we have working 64-bit arithmetic, we compare the
+ * highest-order bits separately, and force a checkpoint immediately when
+ * they change.
*/
uint32 old_segno,
new_segno;
@@ -1361,7 +1361,7 @@ XLogCheckpointNeeded(void)
new_segno = (openLogId % XLogSegSize) * XLogSegsPerFile + openLogSeg;
new_highbits = openLogId / XLogSegSize;
if (new_highbits != old_highbits ||
- new_segno >= old_segno + (uint32) (CheckPointSegments-1))
+ new_segno >= old_segno + (uint32) (CheckPointSegments - 1))
return true;
return false;
}
@@ -1558,9 +1558,9 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
/*
* Signal bgwriter to start a checkpoint if we've consumed too
* much xlog since the last one. For speed, we first check
- * using the local copy of RedoRecPtr, which might be
- * out of date; if it looks like a checkpoint is needed,
- * forcibly update RedoRecPtr and recheck.
+ * using the local copy of RedoRecPtr, which might be out of
+ * date; if it looks like a checkpoint is needed, forcibly
+ * update RedoRecPtr and recheck.
*/
if (IsUnderPostmaster &&
XLogCheckpointNeeded())
@@ -1779,9 +1779,9 @@ XLogFlush(XLogRecPtr record)
* We normally flush only completed blocks; but if there is nothing to do on
* that basis, we check for unflushed async commits in the current incomplete
* block, and flush through the latest one of those. Thus, if async commits
- * are not being used, we will flush complete blocks only. We can guarantee
+ * are not being used, we will flush complete blocks only. We can guarantee
* that async commits reach disk after at most three cycles; normally only
- * one or two. (We allow XLogWrite to write "flexibly", meaning it can stop
+ * one or two. (We allow XLogWrite to write "flexibly", meaning it can stop
* at the end of the buffer ring; this makes a difference only with very high
* load or long wal_writer_delay, but imposes one extra cycle for the worst
* case for async commits.)
@@ -1861,6 +1861,7 @@ void
XLogAsyncCommitFlush(void)
{
XLogRecPtr WriteRqstPtr;
+
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
@@ -2252,7 +2253,7 @@ InstallXLogFileSegment(uint32 *log, uint32 *seg, char *tmppath,
LWLockRelease(ControlFileLock);
return false;
}
-#endif /* WIN32 */
+#endif /* WIN32 */
ereport(ERROR,
(errcode_for_file_access(),
@@ -2432,8 +2433,8 @@ RestoreArchivedFile(char *path, const char *xlogfname,
int rc;
bool signaled;
struct stat stat_buf;
- uint32 restartLog;
- uint32 restartSeg;
+ uint32 restartLog;
+ uint32 restartSeg;
/*
* When doing archive recovery, we always prefer an archived log file even
@@ -2511,8 +2512,8 @@ RestoreArchivedFile(char *path, const char *xlogfname,
sp++;
XLByteToSeg(ControlFile->checkPointCopy.redo,
restartLog, restartSeg);
- XLogFileName(lastRestartPointFname,
- ControlFile->checkPointCopy.ThisTimeLineID,
+ XLogFileName(lastRestartPointFname,
+ ControlFile->checkPointCopy.ThisTimeLineID,
restartLog, restartSeg);
StrNCpy(dp, lastRestartPointFname, endp - dp);
dp += strlen(dp);
@@ -2594,17 +2595,17 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* incorrectly. We have to assume the former.
*
* However, if the failure was due to any sort of signal, it's best to
- * punt and abort recovery. (If we "return false" here, upper levels
- * will assume that recovery is complete and start up the database!)
- * It's essential to abort on child SIGINT and SIGQUIT, because per spec
+ * punt and abort recovery. (If we "return false" here, upper levels will
+ * assume that recovery is complete and start up the database!) It's
+ * essential to abort on child SIGINT and SIGQUIT, because per spec
* system() ignores SIGINT and SIGQUIT while waiting; if we see one of
* those it's a good bet we should have gotten it too. Aborting on other
* signals such as SIGTERM seems a good idea as well.
*
- * Per the Single Unix Spec, shells report exit status > 128 when
- * a called command died on a signal. Also, 126 and 127 are used to
- * report problems such as an unfindable command; treat those as fatal
- * errors too.
+ * Per the Single Unix Spec, shells report exit status > 128 when a called
+ * command died on a signal. Also, 126 and 127 are used to report
+ * problems such as an unfindable command; treat those as fatal errors
+ * too.
*/
signaled = WIFSIGNALED(rc) || WEXITSTATUS(rc) > 125;
@@ -3981,8 +3982,8 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d,"
- " but the server was compiled with TOAST_MAX_CHUNK_SIZE %d.",
- ControlFile->toast_max_chunk_size, (int) TOAST_MAX_CHUNK_SIZE),
+ " but the server was compiled with TOAST_MAX_CHUNK_SIZE %d.",
+ ControlFile->toast_max_chunk_size, (int) TOAST_MAX_CHUNK_SIZE),
errhint("It looks like you need to recompile or initdb.")));
#ifdef HAVE_INT64_TIMESTAMP
@@ -4430,7 +4431,7 @@ readRecoveryCommandFile(void)
*/
recoveryTargetTime =
DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in,
- CStringGetDatum(tok2),
+ CStringGetDatum(tok2),
ObjectIdGetDatum(InvalidOid),
Int32GetDatum(-1)));
ereport(LOG,
@@ -4629,7 +4630,7 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
{
bool stopsHere;
uint8 record_info;
- TimestampTz recordXtime;
+ TimestampTz recordXtime;
/* We only consider stopping at COMMIT or ABORT records */
if (record->xl_rmid != RM_XACT_ID)
@@ -4781,11 +4782,11 @@ StartupXLOG(void)
(errmsg("database system was interrupted while in recovery at log time %s",
str_time(ControlFile->checkPointCopy.time)),
errhint("If this has occurred more than once some data might be corrupted"
- " and you might need to choose an earlier recovery target.")));
+ " and you might need to choose an earlier recovery target.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
ereport(LOG,
- (errmsg("database system was interrupted; last known up at %s",
- str_time(ControlFile->time))));
+ (errmsg("database system was interrupted; last known up at %s",
+ str_time(ControlFile->time))));
/* This is just to allow attaching to startup process with a debugger */
#ifdef XLOG_REPLAY_DELAY
@@ -4879,9 +4880,9 @@ StartupXLOG(void)
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
ereport(DEBUG1,
- (errmsg("redo record is at %X/%X; shutdown %s",
- checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
- wasShutdown ? "TRUE" : "FALSE")));
+ (errmsg("redo record is at %X/%X; shutdown %s",
+ checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
+ wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1,
(errmsg("next transaction ID: %u/%u; next OID: %u",
checkPoint.nextXidEpoch, checkPoint.nextXid,
@@ -4920,7 +4921,7 @@ StartupXLOG(void)
{
if (wasShutdown)
ereport(PANIC,
- (errmsg("invalid redo record in shutdown checkpoint")));
+ (errmsg("invalid redo record in shutdown checkpoint")));
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
@@ -5045,7 +5046,7 @@ StartupXLOG(void)
*/
if (recoveryStopsHere(record, &recoveryApply))
{
- reachedStopPoint = true; /* see below */
+ reachedStopPoint = true; /* see below */
recoveryContinue = false;
if (!recoveryApply)
break;
@@ -5087,8 +5088,8 @@ StartupXLOG(void)
ReadRecPtr.xlogid, ReadRecPtr.xrecoff)));
if (recoveryLastXTime)
ereport(LOG,
- (errmsg("last completed transaction was at log time %s",
- timestamptz_to_str(recoveryLastXTime))));
+ (errmsg("last completed transaction was at log time %s",
+ timestamptz_to_str(recoveryLastXTime))));
InRedo = false;
}
else
@@ -5116,7 +5117,7 @@ StartupXLOG(void)
if (reachedStopPoint) /* stopped because of stop request */
ereport(FATAL,
(errmsg("requested recovery stop point is before end time of backup dump")));
- else /* ran off end of WAL */
+ else /* ran off end of WAL */
ereport(FATAL,
(errmsg("WAL ends before end time of backup dump")));
}
@@ -5124,12 +5125,12 @@ StartupXLOG(void)
/*
* Consider whether we need to assign a new timeline ID.
*
- * If we are doing an archive recovery, we always assign a new ID. This
- * handles a couple of issues. If we stopped short of the end of WAL
+ * If we are doing an archive recovery, we always assign a new ID. This
+ * handles a couple of issues. If we stopped short of the end of WAL
* during recovery, then we are clearly generating a new timeline and must
* assign it a unique new ID. Even if we ran to the end, modifying the
- * current last segment is problematic because it may result in trying
- * to overwrite an already-archived copy of that segment, and we encourage
+ * current last segment is problematic because it may result in trying to
+ * overwrite an already-archived copy of that segment, and we encourage
* DBAs to make their archive_commands reject that. We can dodge the
* problem by making the new active segment have a new timeline ID.
*
@@ -5472,7 +5473,7 @@ GetInsertRecPtr(void)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- XLogRecPtr recptr;
+ XLogRecPtr recptr;
SpinLockAcquire(&xlogctl->info_lck);
recptr = xlogctl->LogwrtRqst.Write;
@@ -5576,8 +5577,12 @@ LogCheckpointStart(int flags)
static void
LogCheckpointEnd(void)
{
- long write_secs, sync_secs, total_secs;
- int write_usecs, sync_usecs, total_usecs;
+ long write_secs,
+ sync_secs,
+ total_secs;
+ int write_usecs,
+ sync_usecs,
+ total_usecs;
CheckpointStats.ckpt_end_t = GetCurrentTimestamp();
@@ -5601,9 +5606,9 @@ LogCheckpointEnd(void)
CheckpointStats.ckpt_segs_added,
CheckpointStats.ckpt_segs_removed,
CheckpointStats.ckpt_segs_recycled,
- write_secs, write_usecs/1000,
- sync_secs, sync_usecs/1000,
- total_secs, total_usecs/1000);
+ write_secs, write_usecs / 1000,
+ sync_secs, sync_usecs / 1000,
+ total_secs, total_usecs / 1000);
}
/*
@@ -5665,9 +5670,9 @@ CreateCheckPoint(int flags)
}
/*
- * Let smgr prepare for checkpoint; this has to happen before we
- * determine the REDO pointer. Note that smgr must not do anything
- * that'd have to be undone if we decide no checkpoint is needed.
+ * Let smgr prepare for checkpoint; this has to happen before we determine
+ * the REDO pointer. Note that smgr must not do anything that'd have to
+ * be undone if we decide no checkpoint is needed.
*/
smgrpreckpt();
@@ -5761,8 +5766,8 @@ CreateCheckPoint(int flags)
LWLockRelease(WALInsertLock);
/*
- * If enabled, log checkpoint start. We postpone this until now
- * so as not to log anything if we decided to skip the checkpoint.
+ * If enabled, log checkpoint start. We postpone this until now so as not
+ * to log anything if we decided to skip the checkpoint.
*/
if (log_checkpoints)
LogCheckpointStart(flags);
@@ -5782,11 +5787,11 @@ CreateCheckPoint(int flags)
* checkpoint take a bit longer than to hold locks longer than necessary.
* (In fact, the whole reason we have this issue is that xact.c does
* commit record XLOG insertion and clog update as two separate steps
- * protected by different locks, but again that seems best on grounds
- * of minimizing lock contention.)
+ * protected by different locks, but again that seems best on grounds of
+ * minimizing lock contention.)
*
- * A transaction that has not yet set inCommit when we look cannot be
- * at risk, since he's not inserted his commit record yet; and one that's
+ * A transaction that has not yet set inCommit when we look cannot be at
+ * risk, since he's not inserted his commit record yet; and one that's
* already cleared it is not at risk either, since he's done fixing clog
* and we will correctly flush the update below. So we cannot miss any
* xacts we need to wait for.
@@ -5794,8 +5799,9 @@ CreateCheckPoint(int flags)
nInCommit = GetTransactionsInCommit(&inCommitXids);
if (nInCommit > 0)
{
- do {
- pg_usleep(10000L); /* wait for 10 msec */
+ do
+ {
+ pg_usleep(10000L); /* wait for 10 msec */
} while (HaveTransactionsInCommit(inCommitXids, nInCommit));
}
pfree(inCommitXids);
@@ -5946,7 +5952,7 @@ CheckPointGuts(XLogRecPtr checkPointRedo, int flags)
CheckPointCLOG();
CheckPointSUBTRANS();
CheckPointMultiXact();
- CheckPointBuffers(flags); /* performs all required fsyncs */
+ CheckPointBuffers(flags); /* performs all required fsyncs */
/* We deliberately delay 2PC checkpointing as long as possible */
CheckPointTwoPhase(checkPointRedo);
}
@@ -6046,14 +6052,14 @@ XLogPutNextOid(Oid nextOid)
* does.
*
* Note, however, that the above statement only covers state "within" the
- * database. When we use a generated OID as a file or directory name,
- * we are in a sense violating the basic WAL rule, because that filesystem
+ * database. When we use a generated OID as a file or directory name, we
+ * are in a sense violating the basic WAL rule, because that filesystem
* change may reach disk before the NEXTOID WAL record does. The impact
- * of this is that if a database crash occurs immediately afterward,
- * we might after restart re-generate the same OID and find that it
- * conflicts with the leftover file or directory. But since for safety's
- * sake we always loop until finding a nonconflicting filename, this poses
- * no real problem in practice. See pgsql-hackers discussion 27-Sep-2006.
+ * of this is that if a database crash occurs immediately afterward, we
+ * might after restart re-generate the same OID and find that it conflicts
+ * with the leftover file or directory. But since for safety's sake we
+ * always loop until finding a nonconflicting filename, this poses no real
+ * problem in practice. See pgsql-hackers discussion 27-Sep-2006.
*/
}
@@ -6673,7 +6679,7 @@ pg_switch_xlog(PG_FUNCTION_ARGS)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to switch transaction log files"))));
+ (errmsg("must be superuser to switch transaction log files"))));
switchpoint = RequestXLogSwitch();