aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2001-03-22 04:01:46 +0000
committerBruce Momjian <bruce@momjian.us>2001-03-22 04:01:46 +0000
commit9e1552607a9dc6bc23e43d46770a9063ade4f3f0 (patch)
tree6a230d81917ebc004e40cd46c48f2aa27eec153e /src/backend/access
parent6cf8707b828b14b5c2336076ce358b18b67829d6 (diff)
downloadpostgresql-9e1552607a9dc6bc23e43d46770a9063ade4f3f0.tar.gz
postgresql-9e1552607a9dc6bc23e43d46770a9063ade4f3f0.zip
pgindent run. Make it all clean.
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/common/heaptuple.c12
-rw-r--r--src/backend/access/common/indextuple.c28
-rw-r--r--src/backend/access/common/printtup.c27
-rw-r--r--src/backend/access/common/tupdesc.c18
-rw-r--r--src/backend/access/gist/gist.c698
-rw-r--r--src/backend/access/gist/gistget.c4
-rw-r--r--src/backend/access/gist/gistscan.c14
-rw-r--r--src/backend/access/hash/hash.c57
-rw-r--r--src/backend/access/hash/hashfunc.c14
-rw-r--r--src/backend/access/heap/heapam.c384
-rw-r--r--src/backend/access/heap/hio.c12
-rw-r--r--src/backend/access/heap/tuptoaster.c472
-rw-r--r--src/backend/access/index/istrat.c16
-rw-r--r--src/backend/access/nbtree/nbtcompare.c11
-rw-r--r--src/backend/access/nbtree/nbtinsert.c606
-rw-r--r--src/backend/access/nbtree/nbtpage.c60
-rw-r--r--src/backend/access/nbtree/nbtree.c271
-rw-r--r--src/backend/access/nbtree/nbtsearch.c93
-rw-r--r--src/backend/access/nbtree/nbtsort.c103
-rw-r--r--src/backend/access/nbtree/nbtutils.c49
-rw-r--r--src/backend/access/rtree/rtget.c6
-rw-r--r--src/backend/access/rtree/rtproc.c19
-rw-r--r--src/backend/access/rtree/rtree.c111
-rw-r--r--src/backend/access/rtree/rtscan.c14
-rw-r--r--src/backend/access/transam/rmgr.c34
-rw-r--r--src/backend/access/transam/transam.c6
-rw-r--r--src/backend/access/transam/transsup.c6
-rw-r--r--src/backend/access/transam/varsup.c31
-rw-r--r--src/backend/access/transam/xact.c98
-rw-r--r--src/backend/access/transam/xid.c7
-rw-r--r--src/backend/access/transam/xlog.c643
-rw-r--r--src/backend/access/transam/xlogutils.c143
32 files changed, 2145 insertions, 1922 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 03d180e36fe..9bb08054943 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.69 2001/01/24 19:42:46 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.70 2001/03/22 03:59:11 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@@ -306,8 +306,8 @@ nocachegetattr(HeapTuple tuple,
int j;
/*
- * In for(), we test <= and not < because we want to see
- * if we can go past it in initializing offsets.
+ * In for(), we test <= and not < because we want to see if we
+ * can go past it in initializing offsets.
*/
for (j = 0; j <= attnum; j++)
{
@@ -321,9 +321,9 @@ nocachegetattr(HeapTuple tuple,
}
/*
- * If slow is false, and we got here, we know that we have a tuple with
- * no nulls or varlenas before the target attribute. If possible, we
- * also want to initialize the remainder of the attribute cached
+ * If slow is false, and we got here, we know that we have a tuple
+ * with no nulls or varlenas before the target attribute. If possible,
+ * we also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index e503d9b888d..da8129f307f 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.52 2001/02/22 21:48:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.53 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,9 +45,11 @@ index_formtuple(TupleDesc tupleDescriptor,
bool hasnull = false;
uint16 tupmask = 0;
int numberOfAttributes = tupleDescriptor->natts;
+
#ifdef TOAST_INDEX_HACK
Datum untoasted_value[INDEX_MAX_KEYS];
bool untoasted_free[INDEX_MAX_KEYS];
+
#endif
if (numberOfAttributes > INDEX_MAX_KEYS)
@@ -57,7 +59,7 @@ index_formtuple(TupleDesc tupleDescriptor,
#ifdef TOAST_INDEX_HACK
for (i = 0; i < numberOfAttributes; i++)
{
- Form_pg_attribute att = tupleDescriptor->attrs[i];
+ Form_pg_attribute att = tupleDescriptor->attrs[i];
untoasted_value[i] = value[i];
untoasted_free[i] = false;
@@ -73,20 +75,20 @@ index_formtuple(TupleDesc tupleDescriptor,
if (VARATT_IS_EXTERNAL(value[i]))
{
untoasted_value[i] = PointerGetDatum(
- heap_tuple_fetch_attr(
- (varattrib *) DatumGetPointer(value[i])));
+ heap_tuple_fetch_attr(
+ (varattrib *) DatumGetPointer(value[i])));
untoasted_free[i] = true;
}
/*
- * If value is above size target, and is of a compressible datatype,
- * try to compress it in-line.
+ * If value is above size target, and is of a compressible
+ * datatype, try to compress it in-line.
*/
if (VARATT_SIZE(untoasted_value[i]) > TOAST_INDEX_TARGET &&
!VARATT_IS_EXTENDED(untoasted_value[i]) &&
(att->attstorage == 'x' || att->attstorage == 'm'))
{
- Datum cvalue = toast_compress_datum(untoasted_value[i]);
+ Datum cvalue = toast_compress_datum(untoasted_value[i]);
if (DatumGetPointer(cvalue) != NULL)
{
@@ -146,8 +148,8 @@ index_formtuple(TupleDesc tupleDescriptor,
/*
* We do this because DataFill wants to initialize a "tupmask" which
* is used for HeapTuples, but we want an indextuple infomask. The
- * only relevant info is the "has variable attributes" field.
- * We have already set the hasnull bit above.
+ * only relevant info is the "has variable attributes" field. We have
+ * already set the hasnull bit above.
*/
if (tupmask & HEAP_HASVARLENA)
@@ -315,9 +317,9 @@ nocache_index_getattr(IndexTuple tup,
}
/*
- * If slow is false, and we got here, we know that we have a tuple with
- * no nulls or varlenas before the target attribute. If possible, we
- * also want to initialize the remainder of the attribute cached
+ * If slow is false, and we got here, we know that we have a tuple
+ * with no nulls or varlenas before the target attribute. If possible,
+ * we also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)
@@ -391,9 +393,7 @@ nocache_index_getattr(IndexTuple tup,
usecache = false;
}
else
- {
off += att[i]->attlen;
- }
}
off = att_align(off, att[attnum]->attlen, att[attnum]->attalign);
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 4f47ef0d451..d44bfe973e0 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.57 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.58 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@ getTypeOutputInfo(Oid type, Oid *typOutput, Oid *typElem,
*typOutput = pt->typoutput;
*typElem = pt->typelem;
- *typIsVarlena = (! pt->typbyval) && (pt->typlen == -1);
+ *typIsVarlena = (!pt->typbyval) && (pt->typlen == -1);
ReleaseSysCache(typeTuple);
return OidIsValid(*typOutput);
}
@@ -200,9 +200,10 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
continue;
if (OidIsValid(thisState->typoutput))
{
+
/*
- * If we have a toasted datum, forcibly detoast it here to avoid
- * memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to
+ * avoid memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -210,9 +211,9 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
attr = origattr;
outputstr = DatumGetCString(FunctionCall3(&thisState->finfo,
- attr,
- ObjectIdGetDatum(thisState->typelem),
- Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
+ attr,
+ ObjectIdGetDatum(thisState->typelem),
+ Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
pq_sendcountedtext(&buf, outputstr, strlen(outputstr));
@@ -308,9 +309,10 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
if (getTypeOutputInfo(typeinfo->attrs[i]->atttypid,
&typoutput, &typelem, &typisvarlena))
{
+
/*
- * If we have a toasted datum, forcibly detoast it here to avoid
- * memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to
+ * avoid memory leakage inside the type's output routine.
*/
if (typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -318,9 +320,9 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
attr = origattr;
value = DatumGetCString(OidFunctionCall3(typoutput,
- attr,
- ObjectIdGetDatum(typelem),
- Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
+ attr,
+ ObjectIdGetDatum(typelem),
+ Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
printatt((unsigned) i + 1, typeinfo->attrs[i], value);
@@ -405,6 +407,7 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
/* send # of bytes, and opaque data */
if (thisState->typisvarlena)
{
+
/*
* If we have a toasted datum, must detoast before sending.
*/
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index 86bc1a56f82..e07c6296d15 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.71 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.72 2001/03/22 03:59:11 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -242,9 +242,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/*
* We do not need to check every single field here, and in fact
* some fields such as attdispersion probably shouldn't be
- * compared. We can also disregard attnum (it was used to
- * place the row in the attrs array) and everything derived
- * from the column datatype.
+ * compared. We can also disregard attnum (it was used to place
+ * the row in the attrs array) and everything derived from the
+ * column datatype.
*/
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0)
return false;
@@ -276,8 +276,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/*
* We can't assume that the items are always read from the
- * system catalogs in the same order; so use the adnum field to
- * identify the matching item to compare.
+ * system catalogs in the same order; so use the adnum field
+ * to identify the matching item to compare.
*/
for (j = 0; j < n; defval2++, j++)
{
@@ -298,9 +298,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
ConstrCheck *check2 = constr2->check;
/*
- * Similarly, don't assume that the checks are always read
- * in the same order; match them up by name and contents.
- * (The name *should* be unique, but...)
+ * Similarly, don't assume that the checks are always read in
+ * the same order; match them up by name and contents. (The
+ * name *should* be unique, but...)
*/
for (j = 0; j < n; check2++, j++)
{
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 9e3f935bd67..1c5577b88a0 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -6,7 +6,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.71 2001/03/07 21:20:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.72 2001/03/22 03:59:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,61 +25,62 @@
#include "access/xlogutils.h"
-/* result's status */
+/* result's status */
#define INSERTED 0x01
#define SPLITED 0x02
/* non-export function prototypes */
-static void gistdoinsert(Relation r,
- IndexTuple itup,
- InsertIndexResult *res,
- GISTSTATE *GISTstate);
-static int gistlayerinsert( Relation r, BlockNumber blkno,
- IndexTuple **itup,
- int *len,
- InsertIndexResult *res,
- GISTSTATE *giststate );
-static OffsetNumber gistwritebuffer( Relation r,
- Page page,
- IndexTuple *itup,
- int len,
- OffsetNumber off,
- GISTSTATE *giststate );
-static int gistnospace( Page page,
- IndexTuple *itvec, int len );
-static IndexTuple * gistreadbuffer( Relation r,
- Buffer buffer, int *len );
-static IndexTuple * gistjoinvector(
- IndexTuple *itvec, int *len,
- IndexTuple *additvec, int addlen );
-static IndexTuple gistunion( Relation r, IndexTuple *itvec,
- int len, GISTSTATE *giststate );
-static IndexTuple gistgetadjusted( Relation r,
- IndexTuple oldtup,
- IndexTuple addtup,
- GISTSTATE *giststate );
-static IndexTuple * gistSplit(Relation r,
- Buffer buffer,
- IndexTuple *itup,
- int *len,
- GISTSTATE *giststate,
- InsertIndexResult *res);
-static void gistnewroot(GISTSTATE *giststate, Relation r,
+static void gistdoinsert(Relation r,
+ IndexTuple itup,
+ InsertIndexResult *res,
+ GISTSTATE *GISTstate);
+static int gistlayerinsert(Relation r, BlockNumber blkno,
+ IndexTuple **itup,
+ int *len,
+ InsertIndexResult *res,
+ GISTSTATE *giststate);
+static OffsetNumber gistwritebuffer(Relation r,
+ Page page,
+ IndexTuple *itup,
+ int len,
+ OffsetNumber off,
+ GISTSTATE *giststate);
+static int gistnospace(Page page,
+ IndexTuple *itvec, int len);
+static IndexTuple *gistreadbuffer(Relation r,
+ Buffer buffer, int *len);
+static IndexTuple *gistjoinvector(
+ IndexTuple *itvec, int *len,
+ IndexTuple *additvec, int addlen);
+static IndexTuple gistunion(Relation r, IndexTuple *itvec,
+ int len, GISTSTATE *giststate);
+static IndexTuple gistgetadjusted(Relation r,
+ IndexTuple oldtup,
+ IndexTuple addtup,
+ GISTSTATE *giststate);
+static IndexTuple *gistSplit(Relation r,
+ Buffer buffer,
+ IndexTuple *itup,
+ int *len,
+ GISTSTATE *giststate,
+ InsertIndexResult *res);
+static void gistnewroot(GISTSTATE *giststate, Relation r,
IndexTuple *itup, int len);
static void GISTInitBuffer(Buffer b, uint32 f);
-static OffsetNumber gistchoose(Relation r, Page p,
- IndexTuple it,
- GISTSTATE *giststate);
-static IndexTuple gist_tuple_replacekey(Relation r,
- GISTENTRY entry, IndexTuple t);
-static void gistcentryinit(GISTSTATE *giststate,
- GISTENTRY *e, char *pr,
- Relation r, Page pg,
- OffsetNumber o, int b, bool l);
+static OffsetNumber gistchoose(Relation r, Page p,
+ IndexTuple it,
+ GISTSTATE *giststate);
+static IndexTuple gist_tuple_replacekey(Relation r,
+ GISTENTRY entry, IndexTuple t);
+static void gistcentryinit(GISTSTATE *giststate,
+ GISTENTRY *e, char *pr,
+ Relation r, Page pg,
+ OffsetNumber o, int b, bool l);
#undef GISTDEBUG
#ifdef GISTDEBUG
static void gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff);
+
#endif
/*
@@ -88,12 +89,14 @@ static void gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber c
Datum
gistbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -105,9 +108,11 @@ gistbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
GISTSTATE giststate;
@@ -181,6 +186,7 @@ gistbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -262,9 +268,7 @@ gistbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -297,7 +301,7 @@ gistbuild(PG_FUNCTION_ARGS)
}
#ifdef GISTDEBUG
-gist_dumptree(index, 0, GISTP_ROOT, 0);
+ gist_dumptree(index, 0, GISTP_ROOT, 0);
#endif
PG_RETURN_VOID();
@@ -312,12 +316,14 @@ gist_dumptree(index, 0, GISTP_ROOT, 0);
Datum
gistinsert(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+
#endif
InsertIndexResult res;
IndexTuple itup;
@@ -380,7 +386,7 @@ gistPageAddItem(GISTSTATE *giststate,
{
GISTENTRY tmpcentry;
IndexTuple itup = (IndexTuple) item;
- OffsetNumber retval;
+ OffsetNumber retval;
/*
* recompress the item given that we now know the exact page and
@@ -394,7 +400,7 @@ gistPageAddItem(GISTSTATE *giststate,
offsetNumber, dentry->bytes, FALSE);
*newtup = gist_tuple_replacekey(r, tmpcentry, itup);
retval = PageAddItem(page, (Item) *newtup, IndexTupleSize(*newtup),
- offsetNumber, flags);
+ offsetNumber, flags);
if (retval == InvalidOffsetNumber)
elog(ERROR, "gist: failed to add index item to %s",
RelationGetRelationName(r));
@@ -405,189 +411,213 @@ gistPageAddItem(GISTSTATE *giststate,
return (retval);
}
-static void
-gistdoinsert( Relation r,
- IndexTuple itup,
- InsertIndexResult *res,
- GISTSTATE *giststate ) {
+static void
+gistdoinsert(Relation r,
+ IndexTuple itup,
+ InsertIndexResult *res,
+ GISTSTATE *giststate)
+{
IndexTuple *instup;
- int i,ret,len = 1;
+ int i,
+ ret,
+ len = 1;
+
+ instup = (IndexTuple *) palloc(sizeof(IndexTuple));
+ instup[0] = (IndexTuple) palloc(IndexTupleSize(itup));
+ memcpy(instup[0], itup, IndexTupleSize(itup));
- instup = ( IndexTuple* ) palloc( sizeof(IndexTuple) );
- instup[0] = ( IndexTuple ) palloc( IndexTupleSize( itup ) );
- memcpy( instup[0], itup, IndexTupleSize( itup ) );
-
ret = gistlayerinsert(r, GISTP_ROOT, &instup, &len, res, giststate);
- if ( ret & SPLITED )
- gistnewroot( giststate, r, instup, len );
+ if (ret & SPLITED)
+ gistnewroot(giststate, r, instup, len);
- for(i=0;i<len;i++)
- pfree( instup[i] );
- pfree( instup );
+ for (i = 0; i < len; i++)
+ pfree(instup[i]);
+ pfree(instup);
}
static int
-gistlayerinsert( Relation r, BlockNumber blkno,
- IndexTuple **itup, /* in - out, has compressed entry */
- int *len , /* in - out */
- InsertIndexResult *res, /* out */
- GISTSTATE *giststate ) {
- Buffer buffer;
- Page page;
- OffsetNumber child;
- int ret;
+gistlayerinsert(Relation r, BlockNumber blkno,
+ IndexTuple **itup, /* in - out, has compressed entry */
+ int *len, /* in - out */
+ InsertIndexResult *res, /* out */
+ GISTSTATE *giststate)
+{
+ Buffer buffer;
+ Page page;
+ OffsetNumber child;
+ int ret;
GISTPageOpaque opaque;
buffer = ReadBuffer(r, blkno);
page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
- if (!(opaque->flags & F_LEAF)) {
+ if (!(opaque->flags & F_LEAF))
+ {
/* internal page, so we must walk on tree */
/* len IS equial 1 */
- ItemId iid;
+ ItemId iid;
BlockNumber nblkno;
ItemPointerData oldtid;
- IndexTuple oldtup;
-
- child = gistchoose( r, page, *(*itup), giststate );
+ IndexTuple oldtup;
+
+ child = gistchoose(r, page, *(*itup), giststate);
iid = PageGetItemId(page, child);
oldtup = (IndexTuple) PageGetItem(page, iid);
nblkno = ItemPointerGetBlockNumber(&(oldtup->t_tid));
- /*
- * After this call:
- * 1. if child page was splited, then itup contains
- * keys for each page
- * 2. if child page wasn't splited, then itup contains
- * additional for adjustement of current key
+ /*
+ * After this call: 1. if child page was splited, then itup
+ * contains keys for each page 2. if child page wasn't splited,
+ * then itup contains additional for adjustement of current key
*/
- ret = gistlayerinsert( r, nblkno, itup, len, res, giststate );
+ ret = gistlayerinsert(r, nblkno, itup, len, res, giststate);
/* nothing inserted in child */
- if ( ! (ret & INSERTED) ) {
+ if (!(ret & INSERTED))
+ {
ReleaseBuffer(buffer);
- return 0x00;
+ return 0x00;
}
- /* child does not splited */
- if ( ! (ret & SPLITED) ) {
- IndexTuple newtup = gistgetadjusted( r, oldtup, (*itup)[0], giststate );
- if ( ! newtup ) {
+ /* child does not splited */
+ if (!(ret & SPLITED))
+ {
+ IndexTuple newtup = gistgetadjusted(r, oldtup, (*itup)[0], giststate);
+
+ if (!newtup)
+ {
/* not need to update key */
ReleaseBuffer(buffer);
return 0x00;
}
- pfree( (*itup)[0] ); /* !!! */
+ pfree((*itup)[0]); /* !!! */
(*itup)[0] = newtup;
}
- /* key is modified, so old version must be deleted */
+ /* key is modified, so old version must be deleted */
ItemPointerSet(&oldtid, blkno, child);
DirectFunctionCall2(gistdelete,
- PointerGetDatum(r),
- PointerGetDatum(&oldtid));
+ PointerGetDatum(r),
+ PointerGetDatum(&oldtid));
}
- ret = INSERTED;
+ ret = INSERTED;
- if ( gistnospace(page, (*itup), *len) ) {
+ if (gistnospace(page, (*itup), *len))
+ {
/* no space for insertion */
IndexTuple *itvec;
- int tlen;
+ int tlen;
ret |= SPLITED;
- itvec = gistreadbuffer( r, buffer, &tlen );
- itvec = gistjoinvector( itvec, &tlen, (*itup), *len );
- pfree( (*itup) );
- (*itup) = gistSplit( r, buffer, itvec, &tlen, giststate,
- (opaque->flags & F_LEAF) ? res : NULL ); /*res only for inserting in leaf*/
- ReleaseBuffer( buffer );
- pfree( itvec );
- *len = tlen; /* now tlen >= 2 */
- } else {
+ itvec = gistreadbuffer(r, buffer, &tlen);
+ itvec = gistjoinvector(itvec, &tlen, (*itup), *len);
+ pfree((*itup));
+ (*itup) = gistSplit(r, buffer, itvec, &tlen, giststate,
+ (opaque->flags & F_LEAF) ? res : NULL); /* res only for
+ * inserting in leaf */
+ ReleaseBuffer(buffer);
+ pfree(itvec);
+ *len = tlen; /* now tlen >= 2 */
+ }
+ else
+ {
/* enogth space */
- OffsetNumber off, l;
+ OffsetNumber off,
+ l;
- off = ( PageIsEmpty(page) ) ?
- FirstOffsetNumber
+ off = (PageIsEmpty(page)) ?
+ FirstOffsetNumber
:
- OffsetNumberNext(PageGetMaxOffsetNumber(page));
- l = gistwritebuffer( r, page, (*itup), *len, off, giststate );
+ OffsetNumberNext(PageGetMaxOffsetNumber(page));
+ l = gistwritebuffer(r, page, (*itup), *len, off, giststate);
WriteBuffer(buffer);
- /* set res if insert into leaf page, in
- this case, len = 1 always */
- if ( res && (opaque->flags & F_LEAF) )
+ /*
+ * set res if insert into leaf page, in this case, len = 1 always
+ */
+ if (res && (opaque->flags & F_LEAF))
ItemPointerSet(&((*res)->pointerData), blkno, l);
- if ( *len > 1 ) { /* previos insert ret & SPLITED != 0 */
- int i;
- /* child was splited, so we must form union
- * for insertion in parent */
- IndexTuple newtup = gistunion(r, (*itup), *len, giststate);
- for(i=0; i<*len; i++)
- pfree( (*itup)[i] );
+ if (*len > 1)
+ { /* previos insert ret & SPLITED != 0 */
+ int i;
+
+ /*
+ * child was splited, so we must form union for insertion in
+ * parent
+ */
+ IndexTuple newtup = gistunion(r, (*itup), *len, giststate);
+
+ for (i = 0; i < *len; i++)
+ pfree((*itup)[i]);
(*itup)[0] = newtup;
*len = 1;
}
}
-
- return ret;
-}
-/*
+ return ret;
+}
+
+/*
* Write itup vector to page, has no control of free space
*/
static OffsetNumber
-gistwritebuffer( Relation r, Page page, IndexTuple *itup,
- int len, OffsetNumber off, GISTSTATE *giststate) {
+gistwritebuffer(Relation r, Page page, IndexTuple *itup,
+ int len, OffsetNumber off, GISTSTATE *giststate)
+{
OffsetNumber l = InvalidOffsetNumber;
- int i;
- GISTENTRY tmpdentry;
- IndexTuple newtup;
-
- for(i=0; i<len; i++) {
- l = gistPageAddItem(giststate, r, page,
- (Item) itup[i], IndexTupleSize(itup[i]),
- off, LP_USED, &tmpdentry, &newtup);
- off = OffsetNumberNext( off );
+ int i;
+ GISTENTRY tmpdentry;
+ IndexTuple newtup;
+
+ for (i = 0; i < len; i++)
+ {
+ l = gistPageAddItem(giststate, r, page,
+ (Item) itup[i], IndexTupleSize(itup[i]),
+ off, LP_USED, &tmpdentry, &newtup);
+ off = OffsetNumberNext(off);
if (tmpdentry.pred != (((char *) itup[i]) + sizeof(IndexTupleData)) && tmpdentry.pred)
pfree(tmpdentry.pred);
if (itup[i] != newtup)
pfree(newtup);
}
- return l;
+ return l;
}
/*
* Check space for itup vector on page
*/
-static int
-gistnospace( Page page, IndexTuple *itvec, int len ) {
- int size = 0;
- int i;
- for(i=0; i<len; i++)
- size += IndexTupleSize( itvec[i] )+4; /* ??? */
+static int
+gistnospace(Page page, IndexTuple *itvec, int len)
+{
+ int size = 0;
+ int i;
- return (PageGetFreeSpace(page) < size);
-}
+ for (i = 0; i < len; i++)
+ size += IndexTupleSize(itvec[i]) + 4; /* ??? */
+
+ return (PageGetFreeSpace(page) < size);
+}
/*
* Read buffer into itup vector
*/
static IndexTuple *
-gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) {
- OffsetNumber i, maxoff;
- IndexTuple *itvec;
- Page p = (Page) BufferGetPage(buffer);
+gistreadbuffer(Relation r, Buffer buffer, int *len /* out */ )
+{
+ OffsetNumber i,
+ maxoff;
+ IndexTuple *itvec;
+ Page p = (Page) BufferGetPage(buffer);
- *len=0;
+ *len = 0;
maxoff = PageGetMaxOffsetNumber(p);
- itvec = palloc( sizeof(IndexTuple) * maxoff );
- for(i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
- itvec[ (*len)++ ] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
+ itvec = palloc(sizeof(IndexTuple) * maxoff);
+ for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
+ itvec[(*len)++] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
return itvec;
}
@@ -596,9 +626,10 @@ gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) {
* join two vectors into one
*/
static IndexTuple *
-gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen ) {
- itvec = (IndexTuple*) repalloc( (void*)itvec, sizeof(IndexTuple) * ( (*len) + addlen ) );
- memmove( &itvec[*len], additvec, sizeof(IndexTuple) * addlen );
+gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
+{
+ itvec = (IndexTuple *) repalloc((void *) itvec, sizeof(IndexTuple) * ((*len) + addlen));
+ memmove(&itvec[*len], additvec, sizeof(IndexTuple) * addlen);
*len += addlen;
return itvec;
}
@@ -607,115 +638,124 @@ gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen )
* return union of itup vector
*/
static IndexTuple
-gistunion( Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate ) {
- bytea *evec;
- char *datum;
- int datumsize, i;
- GISTENTRY centry;
- char isnull;
- IndexTuple newtup;
+gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
+{
+ bytea *evec;
+ char *datum;
+ int datumsize,
+ i;
+ GISTENTRY centry;
+ char isnull;
+ IndexTuple newtup;
evec = (bytea *) palloc(len * sizeof(GISTENTRY) + VARHDRSZ);
VARATT_SIZEP(evec) = len * sizeof(GISTENTRY) + VARHDRSZ;
- for ( i = 0 ; i< len ; i++ )
+ for (i = 0; i < len; i++)
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[i],
- (char*) itvec[i] + sizeof(IndexTupleData),
- (Relation)NULL, (Page)NULL, (OffsetNumber)NULL,
- IndexTupleSize((IndexTuple)itvec[i]) - sizeof(IndexTupleData), FALSE);
+ (char *) itvec[i] + sizeof(IndexTupleData),
+ (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
+ IndexTupleSize((IndexTuple) itvec[i]) - sizeof(IndexTupleData), FALSE);
datum = (char *)
DatumGetPointer(FunctionCall2(&giststate->unionFn,
- PointerGetDatum(evec),
- PointerGetDatum(&datumsize)));
+ PointerGetDatum(evec),
+ PointerGetDatum(&datumsize)));
+
+ for (i = 0; i < len; i++)
+ if (((GISTENTRY *) VARDATA(evec))[i].pred &&
+ ((GISTENTRY *) VARDATA(evec))[i].pred !=
+ ((char *) (itvec[i]) + sizeof(IndexTupleData)))
+ pfree(((GISTENTRY *) VARDATA(evec))[i].pred);
- for ( i = 0 ; i< len ; i++ )
- if ( ((GISTENTRY *) VARDATA(evec))[i].pred &&
- ((GISTENTRY *) VARDATA(evec))[i].pred !=
- ((char*)( itvec[i] )+ sizeof(IndexTupleData)) )
- pfree( ((GISTENTRY *) VARDATA(evec))[i].pred );
-
- pfree( evec );
+ pfree(evec);
- gistcentryinit(giststate, &centry, datum,
- (Relation)NULL, (Page)NULL, (OffsetNumber)NULL,
- datumsize, FALSE);
+ gistcentryinit(giststate, &centry, datum,
+ (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
+ datumsize, FALSE);
isnull = (centry.pred) ? ' ' : 'n';
- newtup = (IndexTuple) index_formtuple( r->rd_att, (Datum *) &centry.pred, &isnull );
+ newtup = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &centry.pred, &isnull);
if (centry.pred != datum)
- pfree( datum );
+ pfree(datum);
return newtup;
-}
+}
/*
* Forms union of oldtup and addtup, if union == oldtup then return NULL
*/
static IndexTuple
-gistgetadjusted( Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate ) {
- bytea *evec;
- char *datum;
- int datumsize;
- bool result;
- char isnull;
- GISTENTRY centry, *ev0p, *ev1p;
- IndexTuple newtup = NULL;
-
+gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
+{
+ bytea *evec;
+ char *datum;
+ int datumsize;
+ bool result;
+ char isnull;
+ GISTENTRY centry,
+ *ev0p,
+ *ev1p;
+ IndexTuple newtup = NULL;
+
evec = (bytea *) palloc(2 * sizeof(GISTENTRY) + VARHDRSZ);
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[0],
- (char*) oldtup + sizeof(IndexTupleData), (Relation) NULL,
- (Page) NULL, (OffsetNumber) 0,
- IndexTupleSize((IndexTuple)oldtup) - sizeof(IndexTupleData), FALSE);
+ (char *) oldtup + sizeof(IndexTupleData), (Relation) NULL,
+ (Page) NULL, (OffsetNumber) 0,
+ IndexTupleSize((IndexTuple) oldtup) - sizeof(IndexTupleData), FALSE);
ev0p = &((GISTENTRY *) VARDATA(evec))[0];
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[1],
- (char*) addtup + sizeof(IndexTupleData), (Relation) NULL,
- (Page) NULL, (OffsetNumber) 0,
- IndexTupleSize((IndexTuple)addtup) - sizeof(IndexTupleData), FALSE);
+ (char *) addtup + sizeof(IndexTupleData), (Relation) NULL,
+ (Page) NULL, (OffsetNumber) 0,
+ IndexTupleSize((IndexTuple) addtup) - sizeof(IndexTupleData), FALSE);
ev1p = &((GISTENTRY *) VARDATA(evec))[1];
datum = (char *)
DatumGetPointer(FunctionCall2(&giststate->unionFn,
- PointerGetDatum(evec),
- PointerGetDatum(&datumsize)));
+ PointerGetDatum(evec),
+ PointerGetDatum(&datumsize)));
- if ( ! ( ev0p->pred && ev1p->pred ) ) {
- result = ( ev0p->pred == NULL && ev1p->pred == NULL );
- } else {
+ if (!(ev0p->pred && ev1p->pred))
+ result = (ev0p->pred == NULL && ev1p->pred == NULL);
+ else
+ {
FunctionCall3(&giststate->equalFn,
- PointerGetDatum(ev0p->pred),
- PointerGetDatum(datum),
- PointerGetDatum(&result));
+ PointerGetDatum(ev0p->pred),
+ PointerGetDatum(datum),
+ PointerGetDatum(&result));
}
- if ( result ) {
+ if (result)
+ {
/* not need to update key */
- pfree( datum );
- } else {
+ pfree(datum);
+ }
+ else
+ {
gistcentryinit(giststate, &centry, datum, ev0p->rel, ev0p->page,
- ev0p->offset, datumsize, FALSE);
+ ev0p->offset, datumsize, FALSE);
isnull = (centry.pred) ? ' ' : 'n';
- newtup = (IndexTuple) index_formtuple( r->rd_att, (Datum *) &centry.pred, &isnull );
- newtup->t_tid = oldtup->t_tid;
+ newtup = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &centry.pred, &isnull);
+ newtup->t_tid = oldtup->t_tid;
if (centry.pred != datum)
- pfree( datum );
+ pfree(datum);
}
- if ( ev0p->pred &&
- ev0p->pred != (char*) oldtup + sizeof(IndexTupleData) )
- pfree( ev0p->pred );
- if ( ev1p->pred &&
- ev1p->pred != (char*) addtup + sizeof(IndexTupleData) )
- pfree( ev1p->pred );
- pfree( evec );
+ if (ev0p->pred &&
+ ev0p->pred != (char *) oldtup + sizeof(IndexTupleData))
+ pfree(ev0p->pred);
+ if (ev1p->pred &&
+ ev1p->pred != (char *) addtup + sizeof(IndexTupleData))
+ pfree(ev1p->pred);
+ pfree(evec);
- return newtup;
+ return newtup;
}
-
+
/*
* gistSplit -- split a page in the tree.
*/
@@ -728,19 +768,27 @@ gistSplit(Relation r,
InsertIndexResult *res)
{
Page p;
- Buffer leftbuf, rightbuf;
- Page left, right;
- OffsetNumber *spl_left, *spl_right;
- IndexTuple *lvectup, *rvectup, *newtup;
- int leftoff, rightoff;
- BlockNumber lbknum, rbknum;
+ Buffer leftbuf,
+ rightbuf;
+ Page left,
+ right;
+ OffsetNumber *spl_left,
+ *spl_right;
+ IndexTuple *lvectup,
+ *rvectup,
+ *newtup;
+ int leftoff,
+ rightoff;
+ BlockNumber lbknum,
+ rbknum;
GISTPageOpaque opaque;
- char isnull;
+ char isnull;
GIST_SPLITVEC v;
bytea *entryvec;
bool *decompvec;
GISTENTRY tmpentry;
- int i, nlen;
+ int i,
+ nlen;
p = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(p);
@@ -773,17 +821,17 @@ gistSplit(Relation r,
right = (Page) BufferGetPage(rightbuf);
/* generate the item array */
- entryvec = (bytea *) palloc(VARHDRSZ + (*len+1) * sizeof(GISTENTRY));
- decompvec = (bool *) palloc(VARHDRSZ + (*len+1) * sizeof(bool));
- VARATT_SIZEP(entryvec) = (*len+1) * sizeof(GISTENTRY) + VARHDRSZ;
+ entryvec = (bytea *) palloc(VARHDRSZ + (*len + 1) * sizeof(GISTENTRY));
+ decompvec = (bool *) palloc(VARHDRSZ + (*len + 1) * sizeof(bool));
+ VARATT_SIZEP(entryvec) = (*len + 1) * sizeof(GISTENTRY) + VARHDRSZ;
for (i = 1; i <= *len; i++)
{
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(entryvec))[i],
- (((char *) itup[i-1]) + sizeof(IndexTupleData)),
+ (((char *) itup[i - 1]) + sizeof(IndexTupleData)),
r, p, i,
- IndexTupleSize(itup[i-1]) - sizeof(IndexTupleData), FALSE);
+ IndexTupleSize(itup[i - 1]) - sizeof(IndexTupleData), FALSE);
if ((char *) (((GISTENTRY *) VARDATA(entryvec))[i].pred)
- == (((char *) itup[i-1]) + sizeof(IndexTupleData)))
+ == (((char *) itup[i - 1]) + sizeof(IndexTupleData)))
decompvec[i] = FALSE;
else
decompvec[i] = TRUE;
@@ -791,8 +839,8 @@ gistSplit(Relation r,
/* now let the user-defined picksplit function set up the split vector */
FunctionCall2(&giststate->picksplitFn,
- PointerGetDatum(entryvec),
- PointerGetDatum(&v));
+ PointerGetDatum(entryvec),
+ PointerGetDatum(&v));
/* clean up the entry vector: its preds need to be deleted, too */
for (i = 1; i <= *len; i++)
@@ -801,35 +849,43 @@ gistSplit(Relation r,
pfree(entryvec);
pfree(decompvec);
- spl_left = v.spl_left; spl_right = v.spl_right;
-
+ spl_left = v.spl_left;
+ spl_right = v.spl_right;
+
/* form left and right vector */
- lvectup = (IndexTuple*) palloc( sizeof( IndexTuple )*v.spl_nleft );
- rvectup = (IndexTuple*) palloc( sizeof( IndexTuple )*v.spl_nright );
+ lvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nleft);
+ rvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nright);
leftoff = rightoff = 0;
- for( i=1; i <= *len; i++ ) {
- if (i == *(spl_left) || ( i==*len && *(spl_left) != FirstOffsetNumber ) ) {
- lvectup[ leftoff++ ] = itup[ i-1 ];
+ for (i = 1; i <= *len; i++)
+ {
+ if (i == *(spl_left) || (i == *len && *(spl_left) != FirstOffsetNumber))
+ {
+ lvectup[leftoff++] = itup[i - 1];
spl_left++;
- } else {
- rvectup[ rightoff++ ] = itup[ i-1 ];
+ }
+ else
+ {
+ rvectup[rightoff++] = itup[i - 1];
spl_right++;
}
}
/* write on disk (may be need another split) */
- if ( gistnospace(right, rvectup, v.spl_nright) ) {
+ if (gistnospace(right, rvectup, v.spl_nright))
+ {
nlen = v.spl_nright;
- newtup = gistSplit(r, rightbuf, rvectup, &nlen, giststate,
- ( res && rvectup[ nlen-1 ] == itup[ *len - 1 ] ) ? res : NULL );
- ReleaseBuffer( rightbuf );
- } else {
+ newtup = gistSplit(r, rightbuf, rvectup, &nlen, giststate,
+ (res && rvectup[nlen - 1] == itup[*len - 1]) ? res : NULL);
+ ReleaseBuffer(rightbuf);
+ }
+ else
+ {
OffsetNumber l;
-
- l = gistwritebuffer( r, right, rvectup, v.spl_nright, FirstOffsetNumber, giststate );
+
+ l = gistwritebuffer(r, right, rvectup, v.spl_nright, FirstOffsetNumber, giststate);
WriteBuffer(rightbuf);
- if ( res )
+ if (res)
ItemPointerSet(&((*res)->pointerData), rbknum, l);
gistcentryinit(giststate, &tmpentry, v.spl_rdatum, (Relation) NULL,
(Page) NULL, (OffsetNumber) 0,
@@ -839,32 +895,35 @@ gistSplit(Relation r,
v.spl_rdatum = tmpentry.pred;
nlen = 1;
- newtup = (IndexTuple*) palloc( sizeof(IndexTuple) * 1);
- isnull = ( v.spl_rdatum ) ? ' ' : 'n';
+ newtup = (IndexTuple *) palloc(sizeof(IndexTuple) * 1);
+ isnull = (v.spl_rdatum) ? ' ' : 'n';
newtup[0] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_rdatum), &isnull);
ItemPointerSet(&(newtup[0]->t_tid), rbknum, 1);
}
- if ( gistnospace(left, lvectup, v.spl_nleft) ) {
- int llen = v.spl_nleft;
+ if (gistnospace(left, lvectup, v.spl_nleft))
+ {
+ int llen = v.spl_nleft;
IndexTuple *lntup;
- lntup = gistSplit(r, leftbuf, lvectup, &llen, giststate,
- ( res && lvectup[ llen-1 ] == itup[ *len - 1 ] ) ? res : NULL );
- ReleaseBuffer( leftbuf );
+ lntup = gistSplit(r, leftbuf, lvectup, &llen, giststate,
+ (res && lvectup[llen - 1] == itup[*len - 1]) ? res : NULL);
+ ReleaseBuffer(leftbuf);
- newtup = gistjoinvector( newtup, &nlen, lntup, llen );
- pfree( lntup );
- } else {
+ newtup = gistjoinvector(newtup, &nlen, lntup, llen);
+ pfree(lntup);
+ }
+ else
+ {
OffsetNumber l;
-
- l = gistwritebuffer( r, left, lvectup, v.spl_nleft, FirstOffsetNumber, giststate );
- if ( BufferGetBlockNumber(buffer) != GISTP_ROOT)
+
+ l = gistwritebuffer(r, left, lvectup, v.spl_nleft, FirstOffsetNumber, giststate);
+ if (BufferGetBlockNumber(buffer) != GISTP_ROOT)
PageRestoreTempPage(left, p);
WriteBuffer(leftbuf);
- if ( res )
+ if (res)
ItemPointerSet(&((*res)->pointerData), lbknum, l);
gistcentryinit(giststate, &tmpentry, v.spl_ldatum, (Relation) NULL,
(Page) NULL, (OffsetNumber) 0,
@@ -874,10 +933,10 @@ gistSplit(Relation r,
v.spl_ldatum = tmpentry.pred;
nlen += 1;
- newtup = (IndexTuple*) repalloc( (void*)newtup, sizeof(IndexTuple) * nlen);
- isnull = ( v.spl_ldatum ) ? ' ' : 'n';
- newtup[nlen-1] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_ldatum), &isnull);
- ItemPointerSet(&(newtup[nlen-1]->t_tid), lbknum, 1);
+ newtup = (IndexTuple *) repalloc((void *) newtup, sizeof(IndexTuple) * nlen);
+ isnull = (v.spl_ldatum) ? ' ' : 'n';
+ newtup[nlen - 1] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_ldatum), &isnull);
+ ItemPointerSet(&(newtup[nlen - 1]->t_tid), lbknum, 1);
}
@@ -885,10 +944,10 @@ gistSplit(Relation r,
gistadjscans(r, GISTOP_SPLIT, BufferGetBlockNumber(buffer), FirstOffsetNumber);
/* !!! pfree */
- pfree( rvectup );
- pfree( lvectup );
- pfree( v.spl_left );
- pfree( v.spl_right );
+ pfree(rvectup);
+ pfree(lvectup);
+ pfree(v.spl_left);
+ pfree(v.spl_right);
*len = nlen;
return newtup;
@@ -903,8 +962,8 @@ gistnewroot(GISTSTATE *giststate, Relation r, IndexTuple *itup, int len)
b = ReadBuffer(r, GISTP_ROOT);
GISTInitBuffer(b, 0);
p = BufferGetPage(b);
-
- gistwritebuffer( r, p, itup, len, FirstOffsetNumber, giststate );
+
+ gistwritebuffer(r, p, itup, len, FirstOffsetNumber, giststate);
WriteBuffer(b);
}
@@ -1000,8 +1059,8 @@ gistfreestack(GISTSTACK *s)
Datum
gistdelete(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
BlockNumber blkno;
OffsetNumber offnum;
Buffer buf;
@@ -1101,7 +1160,7 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
char *datum = (((char *) t) + sizeof(IndexTupleData));
/* if new entry fits in index tuple, copy it in */
- if ((Size) entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData) || (Size) entry.bytes == 0 )
+ if ((Size) entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData) || (Size) entry.bytes == 0)
{
memcpy(datum, entry.pred, entry.bytes);
/* clear out old size */
@@ -1116,9 +1175,9 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
/* generate a new index tuple for the compressed entry */
TupleDesc tupDesc = r->rd_att;
IndexTuple newtup;
- char isnull;
+ char isnull;
- isnull = ( entry.pred ) ? ' ' : 'n';
+ isnull = (entry.pred) ? ' ' : 'n';
newtup = (IndexTuple) index_formtuple(tupDesc,
(Datum *) &(entry.pred),
&isnull);
@@ -1181,38 +1240,40 @@ gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
Page page;
GISTPageOpaque opaque;
IndexTuple which;
- ItemId iid;
- OffsetNumber i,maxoff;
- BlockNumber cblk;
- char *pred;
+ ItemId iid;
+ OffsetNumber i,
+ maxoff;
+ BlockNumber cblk;
+ char *pred;
- pred = (char*) palloc( sizeof(char)*level+1 );
+ pred = (char *) palloc(sizeof(char) * level + 1);
MemSet(pred, '\t', level);
- pred[level]='\0';
+ pred[level] = '\0';
buffer = ReadBuffer(r, blk);
page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
-
- maxoff = PageGetMaxOffsetNumber( page );
-
- elog(NOTICE,"%sPage: %d %s blk: %d maxoff: %d free: %d", pred, coff, ( opaque->flags & F_LEAF ) ? "LEAF" : "INTE", (int)blk, (int)maxoff, PageGetFreeSpace(page));
-
- for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
+
+ maxoff = PageGetMaxOffsetNumber(page);
+
+ elog(NOTICE, "%sPage: %d %s blk: %d maxoff: %d free: %d", pred, coff, (opaque->flags & F_LEAF) ? "LEAF" : "INTE", (int) blk, (int) maxoff, PageGetFreeSpace(page));
+
+ for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
+ {
iid = PageGetItemId(page, i);
which = (IndexTuple) PageGetItem(page, iid);
cblk = ItemPointerGetBlockNumber(&(which->t_tid));
-#ifdef PRINTTUPLE
- elog(NOTICE,"%s Tuple. blk: %d size: %d", pred, (int)cblk, IndexTupleSize( which ) );
-#endif
-
- if ( ! ( opaque->flags & F_LEAF ) ) {
- gist_dumptree( r, level+1, cblk, i );
- }
+#ifdef PRINTTUPLE
+ elog(NOTICE, "%s Tuple. blk: %d size: %d", pred, (int) cblk, IndexTupleSize(which));
+#endif
+
+ if (!(opaque->flags & F_LEAF))
+ gist_dumptree(r, level + 1, cblk, i);
}
ReleaseBuffer(buffer);
pfree(pred);
}
+
#endif /* defined GISTDEBUG */
void
@@ -1220,15 +1281,14 @@ gist_redo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "gist_redo: unimplemented");
}
-
+
void
gist_undo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "gist_undo: unimplemented");
}
-
+
void
-gist_desc(char *buf, uint8 xl_info, char* rec)
+gist_desc(char *buf, uint8 xl_info, char *rec)
{
}
-
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index f7b49430d07..8f3b5dd475c 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -32,8 +32,8 @@ static bool gistindex_keytest(IndexTuple tuple, TupleDesc tupdesc,
Datum
gistgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/* if we have it cached in the scan desc, just return the value */
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index d37a8c07763..ba21fee3c33 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -72,9 +72,9 @@ gistbeginscan(PG_FUNCTION_ARGS)
Datum
gistrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- bool fromEnd = PG_GETARG_BOOL(1);
- ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ bool fromEnd = PG_GETARG_BOOL(1);
+ ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
GISTScanOpaque p;
int i;
@@ -160,7 +160,7 @@ gistrescan(PG_FUNCTION_ARGS)
Datum
gistmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
GISTScanOpaque p;
GISTSTACK *o,
*n,
@@ -196,7 +196,7 @@ gistmarkpos(PG_FUNCTION_ARGS)
Datum
gistrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
GISTScanOpaque p;
GISTSTACK *o,
*n,
@@ -232,8 +232,8 @@ gistrestrpos(PG_FUNCTION_ARGS)
Datum
gistendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- GISTScanOpaque p;
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ GISTScanOpaque p;
p = (GISTScanOpaque) s->opaque;
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 44a8b225e8f..aa76ba232a0 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.49 2001/02/22 21:48:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.50 2001/03/22 03:59:12 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -41,12 +41,14 @@ bool BuildingHash = false;
Datum
hashbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -59,9 +61,11 @@ hashbuild(PG_FUNCTION_ARGS)
nitups;
HashItem hitem;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@@ -117,6 +121,7 @@ hashbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -191,9 +196,7 @@ hashbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -241,12 +244,14 @@ hashbuild(PG_FUNCTION_ARGS)
Datum
hashinsert(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+
#endif
InsertIndexResult res;
HashItem hitem;
@@ -276,8 +281,8 @@ hashinsert(PG_FUNCTION_ARGS)
Datum
hashgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/*
@@ -326,11 +331,13 @@ hashbeginscan(PG_FUNCTION_ARGS)
Datum
hashrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
- bool fromEnd = PG_GETARG_BOOL(1);
+ bool fromEnd = PG_GETARG_BOOL(1);
+
#endif
- ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
+ ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;
HashScanOpaque so;
@@ -367,7 +374,7 @@ hashrescan(PG_FUNCTION_ARGS)
Datum
hashendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
HashScanOpaque so;
@@ -405,7 +412,7 @@ hashendscan(PG_FUNCTION_ARGS)
Datum
hashmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
HashScanOpaque so;
@@ -437,7 +444,7 @@ hashmarkpos(PG_FUNCTION_ARGS)
Datum
hashrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
HashScanOpaque so;
@@ -468,8 +475,8 @@ hashrestrpos(PG_FUNCTION_ARGS)
Datum
hashdelete(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
/* adjust any active scans that will be affected by this deletion */
_hash_adjscans(rel, tid);
@@ -491,8 +498,8 @@ hash_undo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "hash_undo: unimplemented");
}
-
+
void
-hash_desc(char *buf, uint8 xl_info, char* rec)
+hash_desc(char *buf, uint8 xl_info, char *rec)
{
}
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 30defc1a57b..4cb157c702c 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.29 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.30 2001/03/22 03:59:13 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -25,32 +25,32 @@
Datum
hashchar(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_CHAR(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_CHAR(0)));
}
Datum
hashint2(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_INT16(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_INT16(0)));
}
Datum
hashint4(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ PG_GETARG_UINT32(0));
+ PG_RETURN_UINT32(~PG_GETARG_UINT32(0));
}
Datum
hashint8(PG_FUNCTION_ARGS)
{
/* we just use the low 32 bits... */
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_INT64(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_INT64(0)));
}
Datum
hashoid(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_OID(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_OID(0)));
}
Datum
@@ -93,7 +93,7 @@ hashint2vector(PG_FUNCTION_ARGS)
Datum
hashname(PG_FUNCTION_ARGS)
{
- char *key = NameStr(* PG_GETARG_NAME(0));
+ char *key = NameStr(*PG_GETARG_NAME(0));
return hash_any((char *) key, NAMEDATALEN);
}
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 9748daa194d..b55717744c1 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,14 +8,14 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.110 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.111 2001/03/22 03:59:13 momjian Exp $
*
*
* INTERFACE ROUTINES
* heapgettup - fetch next heap tuple from a scan
* heap_open - open a heap relation by relationId
* heap_openr - open a heap relation by name
- * heap_open[r]_nofail - same, but return NULL on failure instead of elog
+ * heap_open[r]_nofail - same, but return NULL on failure instead of elog
* heap_close - close a heap relation
* heap_beginscan - begin relation scan
* heap_rescan - restart a relation scan
@@ -88,16 +88,16 @@
#include "access/xlogutils.h"
-XLogRecPtr log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
- Buffer newbuf, HeapTuple newtup);
-XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
- char *unused, int unlen);
+XLogRecPtr log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
+ Buffer newbuf, HeapTuple newtup);
+XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
+ char *unused, int unlen);
/* comments are in heap_update */
-static xl_heaptid _locked_tuple_;
+static xl_heaptid _locked_tuple_;
static void _heap_unlock_tuple(void *data);
-static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
- ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
+static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
+ ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
/* ----------------------------------------------------------------
@@ -249,7 +249,7 @@ heapgettup(Relation relation,
OffsetNumber lineoff;
int linesleft;
ItemPointer tid = (tuple->t_data == NULL) ?
- (ItemPointer) NULL : &(tuple->t_self);
+ (ItemPointer) NULL : &(tuple->t_self);
/* ----------------
* increment access statistics
@@ -286,7 +286,7 @@ heapgettup(Relation relation,
if (!ItemPointerIsValid(tid))
Assert(!PointerIsValid(tid));
-
+
tuple->t_tableOid = relation->rd_id;
/* ----------------
@@ -538,9 +538,9 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
(
(tupleDesc)->attrs[(attnum) - 1]->attcacheoff >= 0 ?
(
- fetchatt((tupleDesc)->attrs[(attnum) - 1],
- (char *) (tup)->t_data + (tup)->t_data->t_hoff +
- (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
+ fetchatt((tupleDesc)->attrs[(attnum) - 1],
+ (char *) (tup)->t_data + (tup)->t_data->t_hoff +
+ (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
)
:
nocachegetattr((tup), (attnum), (tupleDesc), (isnull))
@@ -564,7 +564,8 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
)
);
}
-#endif /* defined(DISABLE_COMPLEX_MACRO)*/
+
+#endif /* defined(DISABLE_COMPLEX_MACRO) */
/* ----------------------------------------------------------------
@@ -791,8 +792,8 @@ heap_beginscan(Relation relation,
scan->rs_nkeys = (short) nkeys;
/*
- * we do this here instead of in initscan() because heap_rescan
- * also calls initscan() and we don't want to allocate memory again
+ * we do this here instead of in initscan() because heap_rescan also
+ * calls initscan() and we don't want to allocate memory again
*/
if (nkeys)
scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
@@ -1316,7 +1317,7 @@ heap_get_latest_tid(Relation relation,
Oid
heap_insert(Relation relation, HeapTuple tup)
{
- Buffer buffer;
+ Buffer buffer;
/* increment access statistics */
IncrHeapAccessStat(local_insert);
@@ -1350,7 +1351,7 @@ heap_insert(Relation relation, HeapTuple tup)
* toasted attributes from some other relation, invoke the toaster.
* ----------
*/
- if (HeapTupleHasExtended(tup) ||
+ if (HeapTupleHasExtended(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
heap_tuple_toast_attrs(relation, tup, NULL);
#endif
@@ -1364,17 +1365,17 @@ heap_insert(Relation relation, HeapTuple tup)
/* XLOG stuff */
{
- xl_heap_insert xlrec;
- xl_heap_header xlhdr;
- XLogRecPtr recptr;
- XLogRecData rdata[3];
- Page page = BufferGetPage(buffer);
- uint8 info = XLOG_HEAP_INSERT;
+ xl_heap_insert xlrec;
+ xl_heap_header xlhdr;
+ XLogRecPtr recptr;
+ XLogRecData rdata[3];
+ Page page = BufferGetPage(buffer);
+ uint8 info = XLOG_HEAP_INSERT;
xlrec.target.node = relation->rd_node;
xlrec.target.tid = tup->t_self;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapInsert;
rdata[0].next = &(rdata[1]);
@@ -1383,12 +1384,12 @@ heap_insert(Relation relation, HeapTuple tup)
xlhdr.t_hoff = tup->t_data->t_hoff;
xlhdr.mask = tup->t_data->t_infomask;
rdata[1].buffer = buffer;
- rdata[1].data = (char*)&xlhdr;
+ rdata[1].data = (char *) &xlhdr;
rdata[1].len = SizeOfHeapHeader;
rdata[1].next = &(rdata[2]);
rdata[2].buffer = buffer;
- rdata[2].data = (char*) tup->t_data + offsetof(HeapTupleHeaderData, t_bits);
+ rdata[2].data = (char *) tup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[2].len = tup->t_len - offsetof(HeapTupleHeaderData, t_bits);
rdata[2].next = NULL;
@@ -1411,10 +1412,10 @@ heap_insert(Relation relation, HeapTuple tup)
WriteBuffer(buffer);
/*
- * If tuple is cachable, mark it for rollback from the caches
- * in case we abort. Note it is OK to do this after WriteBuffer
- * releases the buffer, because the "tup" data structure is all
- * in local memory, not in the shared buffer.
+ * If tuple is cachable, mark it for rollback from the caches in case
+ * we abort. Note it is OK to do this after WriteBuffer releases the
+ * buffer, because the "tup" data structure is all in local memory,
+ * not in the shared buffer.
*/
RelationMark4RollbackHeapTuple(relation, tup);
@@ -1513,14 +1514,14 @@ l1:
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
/* XLOG stuff */
{
- xl_heap_delete xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
+ xl_heap_delete xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
xlrec.target.node = relation->rd_node;
xlrec.target.tid = tp.t_self;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapDelete;
rdata[0].next = &(rdata[1]);
@@ -1551,9 +1552,10 @@ l1:
#endif
/*
- * Mark tuple for invalidation from system caches at next command boundary.
- * We have to do this before WriteBuffer because we need to look at the
- * contents of the tuple, so we need to hold our refcount on the buffer.
+ * Mark tuple for invalidation from system caches at next command
+ * boundary. We have to do this before WriteBuffer because we need to
+ * look at the contents of the tuple, so we need to hold our refcount
+ * on the buffer.
*/
RelationInvalidateHeapTuple(relation, &tp);
@@ -1567,7 +1569,7 @@ l1:
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via elog().
*/
void
@@ -1636,6 +1638,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
oldtup.t_data = (HeapTupleHeader) PageGetItem(dp, lp);
oldtup.t_len = ItemIdGetLength(lp);
oldtup.t_self = *otid;
+
/*
* Note: beyond this point, use oldtup not otid to refer to old tuple.
* otid may very well point at newtup->t_self, which we will overwrite
@@ -1701,23 +1704,24 @@ l2:
/*
* If the toaster needs to be activated, OR if the new tuple will not
- * fit on the same page as the old, then we need to release the context
- * lock (but not the pin!) on the old tuple's buffer while we are off
- * doing TOAST and/or table-file-extension work. We must mark the old
- * tuple to show that it's already being updated, else other processes
- * may try to update it themselves. To avoid second XLOG log record,
- * we use xact mgr hook to unlock old tuple without reading log if xact
- * will abort before update is logged. In the event of crash prio logging,
- * TQUAL routines will see HEAP_XMAX_UNLOGGED flag...
+ * fit on the same page as the old, then we need to release the
+ * context lock (but not the pin!) on the old tuple's buffer while we
+ * are off doing TOAST and/or table-file-extension work. We must mark
+ * the old tuple to show that it's already being updated, else other
+ * processes may try to update it themselves. To avoid second XLOG log
+ * record, we use xact mgr hook to unlock old tuple without reading
+ * log if xact will abort before update is logged. In the event of
+ * crash prio logging, TQUAL routines will see HEAP_XMAX_UNLOGGED
+ * flag...
*
- * NOTE: this trick is useless currently but saved for future
- * when we'll implement UNDO and will re-use transaction IDs
- * after postmaster startup.
+ * NOTE: this trick is useless currently but saved for future when we'll
+ * implement UNDO and will re-use transaction IDs after postmaster
+ * startup.
*
* We need to invoke the toaster if there are already any toasted values
* present, or if the new tuple is over-threshold.
*/
- need_toast = (HeapTupleHasExtended(&oldtup) ||
+ need_toast = (HeapTupleHasExtended(&oldtup) ||
HeapTupleHasExtended(newtup) ||
(MAXALIGN(newtup->t_len) > TOAST_TUPLE_THRESHOLD));
@@ -1726,7 +1730,7 @@ l2:
{
_locked_tuple_.node = relation->rd_node;
_locked_tuple_.tid = oldtup.t_self;
- XactPushRollback(_heap_unlock_tuple, (void*) &_locked_tuple_);
+ XactPushRollback(_heap_unlock_tuple, (void *) &_locked_tuple_);
TransactionIdStore(GetCurrentTransactionId(),
&(oldtup.t_data->t_xmax));
@@ -1762,7 +1766,7 @@ l2:
/* NO ELOG(ERROR) from here till changes are logged */
START_CRIT_SECTION();
- RelationPutHeapTuple(relation, newbuf, newtup); /* insert new tuple */
+ RelationPutHeapTuple(relation, newbuf, newtup); /* insert new tuple */
if (already_marked)
{
@@ -1784,7 +1788,7 @@ l2:
/* XLOG stuff */
{
- XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
+ XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
newbuf, newtup, false);
if (newbuf != buffer)
@@ -1814,10 +1818,10 @@ l2:
WriteBuffer(buffer);
/*
- * If new tuple is cachable, mark it for rollback from the caches
- * in case we abort. Note it is OK to do this after WriteBuffer
- * releases the buffer, because the "newtup" data structure is all
- * in local memory, not in the shared buffer.
+ * If new tuple is cachable, mark it for rollback from the caches in
+ * case we abort. Note it is OK to do this after WriteBuffer releases
+ * the buffer, because the "newtup" data structure is all in local
+ * memory, not in the shared buffer.
*/
RelationMark4RollbackHeapTuple(relation, newtup);
@@ -1829,7 +1833,7 @@ l2:
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via elog().
*/
void
@@ -2129,14 +2133,14 @@ heap_restrpos(HeapScanDesc scan)
XLogRecPtr
log_heap_clean(Relation reln, Buffer buffer, char *unused, int unlen)
{
- xl_heap_clean xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[3];
+ xl_heap_clean xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[3];
xlrec.node = reln->rd_node;
xlrec.block = BufferGetBlockNumber(buffer);
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapClean;
rdata[0].next = &(rdata[1]);
@@ -2157,27 +2161,27 @@ log_heap_clean(Relation reln, Buffer buffer, char *unused, int unlen)
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CLEAN, rdata);
- return(recptr);
+ return (recptr);
}
static XLogRecPtr
-log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
+log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
Buffer newbuf, HeapTuple newtup, bool move)
{
- char tbuf[MAXALIGN(sizeof(xl_heap_header)) + 2 * sizeof(TransactionId)];
- xl_heap_update xlrec;
- xl_heap_header *xlhdr = (xl_heap_header*) tbuf;
- int hsize = SizeOfHeapHeader;
- XLogRecPtr recptr;
- XLogRecData rdata[4];
- Page page = BufferGetPage(newbuf);
- uint8 info = (move) ? XLOG_HEAP_MOVE : XLOG_HEAP_UPDATE;
+ char tbuf[MAXALIGN(sizeof(xl_heap_header)) + 2 * sizeof(TransactionId)];
+ xl_heap_update xlrec;
+ xl_heap_header *xlhdr = (xl_heap_header *) tbuf;
+ int hsize = SizeOfHeapHeader;
+ XLogRecPtr recptr;
+ XLogRecData rdata[4];
+ Page page = BufferGetPage(newbuf);
+ uint8 info = (move) ? XLOG_HEAP_MOVE : XLOG_HEAP_UPDATE;
xlrec.target.node = reln->rd_node;
xlrec.target.tid = from;
xlrec.newtid = newtup->t_self;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapUpdate;
rdata[0].next = &(rdata[1]);
@@ -2190,9 +2194,9 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
xlhdr->t_natts = newtup->t_data->t_natts;
xlhdr->t_hoff = newtup->t_data->t_hoff;
xlhdr->mask = newtup->t_data->t_infomask;
- if (move) /* remember xmin & xmax */
+ if (move) /* remember xmin & xmax */
{
- TransactionId xmax;
+ TransactionId xmax;
if (newtup->t_data->t_infomask & HEAP_XMAX_INVALID ||
newtup->t_data->t_infomask & HEAP_MARKED_FOR_UPDATE)
@@ -2200,17 +2204,17 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
else
xmax = newtup->t_data->t_xmax;
memcpy(tbuf + hsize, &xmax, sizeof(TransactionId));
- memcpy(tbuf + hsize + sizeof(TransactionId),
- &(newtup->t_data->t_xmin), sizeof(TransactionId));
+ memcpy(tbuf + hsize + sizeof(TransactionId),
+ &(newtup->t_data->t_xmin), sizeof(TransactionId));
hsize += (2 * sizeof(TransactionId));
}
rdata[2].buffer = newbuf;
- rdata[2].data = (char*)xlhdr;
+ rdata[2].data = (char *) xlhdr;
rdata[2].len = hsize;
rdata[2].next = &(rdata[3]);
rdata[3].buffer = newbuf;
- rdata[3].data = (char*) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
+ rdata[3].data = (char *) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[3].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits);
rdata[3].next = NULL;
@@ -2224,23 +2228,23 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
recptr = XLogInsert(RM_HEAP_ID, info, rdata);
- return(recptr);
+ return (recptr);
}
XLogRecPtr
-log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
- Buffer newbuf, HeapTuple newtup)
+log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
+ Buffer newbuf, HeapTuple newtup)
{
- return(log_heap_update(reln, oldbuf, from, newbuf, newtup, true));
+ return (log_heap_update(reln, oldbuf, from, newbuf, newtup, true));
}
static void
heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_heap_clean *xlrec = (xl_heap_clean*) XLogRecGetData(record);
- Relation reln;
- Buffer buffer;
- Page page;
+ xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
+ Relation reln;
+ Buffer buffer;
+ Page page;
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
return;
@@ -2266,15 +2270,15 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (record->xl_len > SizeOfHeapClean)
{
- char unbuf[BLCKSZ];
- OffsetNumber *unused = (OffsetNumber*)unbuf;
- char *unend;
- ItemId lp;
+ char unbuf[BLCKSZ];
+ OffsetNumber *unused = (OffsetNumber *) unbuf;
+ char *unend;
+ ItemId lp;
- memcpy(unbuf, (char*)xlrec + SizeOfHeapClean, record->xl_len - SizeOfHeapClean);
+ memcpy(unbuf, (char *) xlrec + SizeOfHeapClean, record->xl_len - SizeOfHeapClean);
unend = unbuf + (record->xl_len - SizeOfHeapClean);
- while((char*)unused < unend)
+ while ((char *) unused < unend)
{
lp = ((PageHeader) page)->pd_linp + *unused;
lp->lp_flags &= ~LP_USED;
@@ -2289,13 +2293,13 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_heap_delete *xlrec = (xl_heap_delete*) XLogRecGetData(record);
- Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
- Buffer buffer;
- Page page;
- OffsetNumber offnum;
- ItemId lp = NULL;
- HeapTupleHeader htup;
+ xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
+ Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
if (redo && (record->xl_info & XLR_BKP_BLOCK_1))
return;
@@ -2303,7 +2307,7 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer(false, reln,
+ buffer = XLogReadBuffer(false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "heap_delete_%sdo: no block", (redo) ? "re" : "un");
@@ -2320,7 +2324,8 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
return;
}
}
- else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_delete_undo: bad page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@@ -2337,7 +2342,7 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
htup->t_xmax = record->xl_xid;
htup->t_cmax = FirstCommandId;
htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
- HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
+ HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
@@ -2350,12 +2355,12 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_heap_insert *xlrec = (xl_heap_insert*) XLogRecGetData(record);
- Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
- Buffer buffer;
- Page page;
- OffsetNumber offnum;
- HeapTupleHeader htup;
+ xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
+ Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ HeapTupleHeader htup;
if (redo && (record->xl_info & XLR_BKP_BLOCK_1))
return;
@@ -2363,7 +2368,7 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer((redo) ? true : false, reln,
+ buffer = XLogReadBuffer((redo) ? true : false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
return;
@@ -2375,9 +2380,9 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (redo)
{
- char tbuf[MaxTupleSize];
- xl_heap_header xlhdr;
- uint32 newlen;
+ char tbuf[MaxTupleSize];
+ xl_heap_header xlhdr;
+ uint32 newlen;
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
{
@@ -2396,9 +2401,9 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
elog(STOP, "heap_insert_redo: invalid max offset number");
newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
- memcpy((char*)&xlhdr, (char*)xlrec + SizeOfHeapInsert, SizeOfHeapHeader);
- memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
- (char*)xlrec + SizeOfHeapInsert + SizeOfHeapHeader, newlen);
+ memcpy((char *) &xlhdr, (char *) xlrec + SizeOfHeapInsert, SizeOfHeapHeader);
+ memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
+ (char *) xlrec + SizeOfHeapInsert + SizeOfHeapHeader, newlen);
newlen += offsetof(HeapTupleHeaderData, t_bits);
htup = (HeapTupleHeader) tbuf;
htup->t_oid = xlhdr.t_oid;
@@ -2408,19 +2413,20 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
htup->t_cmin = FirstCommandId;
htup->t_xmax = htup->t_cmax = 0;
htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask;
-
- offnum = PageAddItem(page, (Item)htup, newlen, offnum,
- LP_USED | OverwritePageMode);
+
+ offnum = PageAddItem(page, (Item) htup, newlen, offnum,
+ LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber)
elog(STOP, "heap_insert_redo: failed to add tuple");
PageSetLSN(page, lsn);
- PageSetSUI(page, ThisStartUpID); /* prev sui */
+ PageSetSUI(page, ThisStartUpID); /* prev sui */
UnlockAndWriteBuffer(buffer);
return;
}
/* undo insert */
- if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_insert_undo: bad page LSN");
elog(STOP, "heap_insert_undo: unimplemented");
@@ -2432,16 +2438,16 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
{
- xl_heap_update *xlrec = (xl_heap_update*) XLogRecGetData(record);
- Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
- Buffer buffer;
- bool samepage =
- (ItemPointerGetBlockNumber(&(xlrec->newtid)) ==
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
- Page page;
- OffsetNumber offnum;
- ItemId lp = NULL;
- HeapTupleHeader htup;
+ xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
+ Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+ Buffer buffer;
+ bool samepage =
+ (ItemPointerGetBlockNumber(&(xlrec->newtid)) ==
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
if (!RelationIsValid(reln))
return;
@@ -2451,7 +2457,7 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
/* Deal with old tuple version */
- buffer = XLogReadBuffer(false, reln,
+ buffer = XLogReadBuffer(false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "heap_update_%sdo: no block", (redo) ? "re" : "un");
@@ -2470,7 +2476,8 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
goto newt;
}
}
- else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_update_undo: bad old tuple page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@@ -2487,7 +2494,7 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
if (move)
{
TransactionIdStore(record->xl_xid, (TransactionId *) &(htup->t_cmin));
- htup->t_infomask &=
+ htup->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
htup->t_infomask |= HEAP_MOVED_OFF;
}
@@ -2496,7 +2503,7 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
htup->t_xmax = record->xl_xid;
htup->t_cmax = FirstCommandId;
htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
- HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
+ HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
}
if (samepage)
goto newsame;
@@ -2514,11 +2521,11 @@ newt:;
if (redo &&
((record->xl_info & XLR_BKP_BLOCK_2) ||
- ((record->xl_info & XLR_BKP_BLOCK_1) && samepage)))
+ ((record->xl_info & XLR_BKP_BLOCK_1) && samepage)))
return;
- buffer = XLogReadBuffer((redo) ? true : false, reln,
- ItemPointerGetBlockNumber(&(xlrec->newtid)));
+ buffer = XLogReadBuffer((redo) ? true : false, reln,
+ ItemPointerGetBlockNumber(&(xlrec->newtid)));
if (!BufferIsValid(buffer))
return;
@@ -2531,10 +2538,10 @@ newsame:;
if (redo)
{
- char tbuf[MaxTupleSize];
- xl_heap_header xlhdr;
- int hsize;
- uint32 newlen;
+ char tbuf[MaxTupleSize];
+ xl_heap_header xlhdr;
+ int hsize;
+ uint32 newlen;
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
{
@@ -2557,9 +2564,9 @@ newsame:;
hsize += (2 * sizeof(TransactionId));
newlen = record->xl_len - hsize;
- memcpy((char*)&xlhdr, (char*)xlrec + SizeOfHeapUpdate, SizeOfHeapHeader);
- memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
- (char*)xlrec + hsize, newlen);
+ memcpy((char *) &xlhdr, (char *) xlrec + SizeOfHeapUpdate, SizeOfHeapHeader);
+ memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
+ (char *) xlrec + hsize, newlen);
newlen += offsetof(HeapTupleHeaderData, t_bits);
htup = (HeapTupleHeader) tbuf;
htup->t_oid = xlhdr.t_oid;
@@ -2568,13 +2575,13 @@ newsame:;
if (move)
{
hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
- memcpy(&(htup->t_xmax), (char*)xlrec + hsize, sizeof(TransactionId));
- memcpy(&(htup->t_xmin),
- (char*)xlrec + hsize + sizeof(TransactionId), sizeof(TransactionId));
+ memcpy(&(htup->t_xmax), (char *) xlrec + hsize, sizeof(TransactionId));
+ memcpy(&(htup->t_xmin),
+ (char *) xlrec + hsize + sizeof(TransactionId), sizeof(TransactionId));
TransactionIdStore(record->xl_xid, (TransactionId *) &(htup->t_cmin));
htup->t_infomask = xlhdr.mask;
- htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
- HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
+ htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
+ HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
htup->t_infomask |= HEAP_MOVED_IN;
}
else
@@ -2584,19 +2591,20 @@ newsame:;
htup->t_xmax = htup->t_cmax = 0;
htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask;
}
-
- offnum = PageAddItem(page, (Item)htup, newlen, offnum,
- LP_USED | OverwritePageMode);
+
+ offnum = PageAddItem(page, (Item) htup, newlen, offnum,
+ LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber)
elog(STOP, "heap_update_redo: failed to add tuple");
PageSetLSN(page, lsn);
- PageSetSUI(page, ThisStartUpID); /* prev sui */
+ PageSetSUI(page, ThisStartUpID); /* prev sui */
UnlockAndWriteBuffer(buffer);
return;
}
/* undo */
- if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_update_undo: bad new tuple page LSN");
elog(STOP, "heap_update_undo: unimplemented");
@@ -2606,19 +2614,19 @@ newsame:;
static void
_heap_unlock_tuple(void *data)
{
- xl_heaptid *xltid = (xl_heaptid*) data;
- Relation reln = XLogOpenRelation(false, RM_HEAP_ID, xltid->node);
- Buffer buffer;
- Page page;
- OffsetNumber offnum;
- ItemId lp;
- HeapTupleHeader htup;
+ xl_heaptid *xltid = (xl_heaptid *) data;
+ Relation reln = XLogOpenRelation(false, RM_HEAP_ID, xltid->node);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp;
+ HeapTupleHeader htup;
if (!RelationIsValid(reln))
elog(STOP, "_heap_unlock_tuple: can't open relation");
- buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xltid->tid)));
+ buffer = XLogReadBuffer(false, reln,
+ ItemPointerGetBlockNumber(&(xltid->tid)));
if (!BufferIsValid(buffer))
elog(STOP, "_heap_unlock_tuple: can't read buffer");
@@ -2636,8 +2644,8 @@ _heap_unlock_tuple(void *data)
htup = (HeapTupleHeader) PageGetItem(page, lp);
- if (htup->t_xmax != GetCurrentTransactionId() ||
- htup->t_cmax != GetCurrentCommandId())
+ if (htup->t_xmax != GetCurrentTransactionId() ||
+ htup->t_cmax != GetCurrentCommandId())
elog(STOP, "_heap_unlock_tuple: invalid xmax/cmax in rollback");
htup->t_infomask &= ~HEAP_XMAX_UNLOGGED;
htup->t_infomask |= HEAP_XMAX_INVALID;
@@ -2645,9 +2653,10 @@ _heap_unlock_tuple(void *data)
return;
}
-void heap_redo(XLogRecPtr lsn, XLogRecord *record)
+void
+heap_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= XLOG_HEAP_OPMASK;
if (info == XLOG_HEAP_INSERT)
@@ -2664,9 +2673,10 @@ void heap_redo(XLogRecPtr lsn, XLogRecord *record)
elog(STOP, "heap_redo: unknown op code %u", info);
}
-void heap_undo(XLogRecPtr lsn, XLogRecord *record)
+void
+heap_undo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= XLOG_HEAP_OPMASK;
if (info == XLOG_HEAP_INSERT)
@@ -2687,46 +2697,50 @@ static void
out_target(char *buf, xl_heaptid *target)
{
sprintf(buf + strlen(buf), "node %u/%u; tid %u/%u",
- target->node.tblNode, target->node.relNode,
- ItemPointerGetBlockNumber(&(target->tid)),
- ItemPointerGetOffsetNumber(&(target->tid)));
+ target->node.tblNode, target->node.relNode,
+ ItemPointerGetBlockNumber(&(target->tid)),
+ ItemPointerGetOffsetNumber(&(target->tid)));
}
-
+
void
-heap_desc(char *buf, uint8 xl_info, char* rec)
+heap_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
info &= XLOG_HEAP_OPMASK;
if (info == XLOG_HEAP_INSERT)
{
- xl_heap_insert *xlrec = (xl_heap_insert*) rec;
+ xl_heap_insert *xlrec = (xl_heap_insert *) rec;
+
strcat(buf, "insert: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_HEAP_DELETE)
{
- xl_heap_delete *xlrec = (xl_heap_delete*) rec;
+ xl_heap_delete *xlrec = (xl_heap_delete *) rec;
+
strcat(buf, "delete: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_HEAP_UPDATE || info == XLOG_HEAP_MOVE)
{
- xl_heap_update *xlrec = (xl_heap_update*) rec;
+ xl_heap_update *xlrec = (xl_heap_update *) rec;
+
if (info == XLOG_HEAP_UPDATE)
strcat(buf, "update: ");
else
strcat(buf, "move: ");
out_target(buf, &(xlrec->target));
sprintf(buf + strlen(buf), "; new %u/%u",
- ItemPointerGetBlockNumber(&(xlrec->newtid)),
- ItemPointerGetOffsetNumber(&(xlrec->newtid)));
+ ItemPointerGetBlockNumber(&(xlrec->newtid)),
+ ItemPointerGetOffsetNumber(&(xlrec->newtid)));
}
else if (info == XLOG_HEAP_CLEAN)
{
- xl_heap_clean *xlrec = (xl_heap_clean*) rec;
+ xl_heap_clean *xlrec = (xl_heap_clean *) rec;
+
sprintf(buf + strlen(buf), "clean: node %u/%u; blk %u",
- xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
+ xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
}
else
strcat(buf, "UNKNOWN");
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 64e7262e868..94dedbf87b9 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Id: hio.c,v 1.35 2001/01/24 19:42:48 momjian Exp $
+ * $Id: hio.c,v 1.36 2001/03/22 03:59:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,7 +19,7 @@
#include "access/hio.h"
/*
- * RelationPutHeapTuple - place tuple at specified page
+ * RelationPutHeapTuple - place tuple at specified page
*
* !!! ELOG(ERROR) IS DISALLOWED HERE !!!
*
@@ -69,7 +69,7 @@ RelationPutHeapTuple(Relation relation,
*
* Returns (locked) buffer with free space >= given len.
*
- * Note that we use LockPage to lock relation for extension. We can
+ * Note that we use LockPage to lock relation for extension. We can
* do this as long as in all other places we use page-level locking
* for indices only. Alternatively, we could define pseudo-table as
* we do for transactions with XactLockTable.
@@ -92,7 +92,7 @@ RelationGetBufferForTuple(Relation relation, Size len)
*/
if (len > MaxTupleSize)
elog(ERROR, "Tuple is too big: size %lu, max size %ld",
- (unsigned long)len, MaxTupleSize);
+ (unsigned long) len, MaxTupleSize);
if (!relation->rd_myxactonly)
LockPage(relation, 0, ExclusiveLock);
@@ -140,13 +140,13 @@ RelationGetBufferForTuple(Relation relation, Size len)
{
/* We should not get here given the test at the top */
elog(STOP, "Tuple is too big: size %lu",
- (unsigned long)len);
+ (unsigned long) len);
}
}
if (!relation->rd_myxactonly)
UnlockPage(relation, 0, ExclusiveLock);
- return(buffer);
+ return (buffer);
}
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index a3cf6ae7116..d0e60681e77 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.17 2001/02/15 20:57:01 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.18 2001/03/22 03:59:13 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -41,12 +41,12 @@
#undef TOAST_DEBUG
-static void toast_delete(Relation rel, HeapTuple oldtup);
-static void toast_delete_datum(Relation rel, Datum value);
-static void toast_insert_or_update(Relation rel, HeapTuple newtup,
- HeapTuple oldtup);
-static Datum toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value);
-static varattrib *toast_fetch_datum(varattrib *attr);
+static void toast_delete(Relation rel, HeapTuple oldtup);
+static void toast_delete_datum(Relation rel, Datum value);
+static void toast_insert_or_update(Relation rel, HeapTuple newtup,
+ HeapTuple oldtup);
+static Datum toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value);
+static varattrib *toast_fetch_datum(varattrib *attr);
/* ----------
@@ -70,14 +70,14 @@ heap_tuple_toast_attrs(Relation rel, HeapTuple newtup, HeapTuple oldtup)
/* ----------
* heap_tuple_fetch_attr -
*
- * Public entry point to get back a toasted value
+ * Public entry point to get back a toasted value
* external storage (possibly still in compressed format).
* ----------
*/
-varattrib *
+varattrib *
heap_tuple_fetch_attr(varattrib *attr)
{
- varattrib *result;
+ varattrib *result;
if (VARATT_IS_EXTERNAL(attr))
{
@@ -94,7 +94,7 @@ heap_tuple_fetch_attr(varattrib *attr)
* ----------
*/
result = attr;
- }
+ }
return result;
}
@@ -107,10 +107,10 @@ heap_tuple_fetch_attr(varattrib *attr)
* or external storage.
* ----------
*/
-varattrib *
+varattrib *
heap_tuple_untoast_attr(varattrib *attr)
{
- varattrib *result;
+ varattrib *result;
if (VARATT_IS_EXTERNAL(attr))
{
@@ -121,14 +121,14 @@ heap_tuple_untoast_attr(varattrib *attr)
* Fetch it from the toast heap and decompress.
* ----------
*/
- varattrib *tmp;
+ varattrib *tmp;
tmp = toast_fetch_datum(attr);
- result = (varattrib *)palloc(attr->va_content.va_external.va_rawsize
- + VARHDRSZ);
+ result = (varattrib *) palloc(attr->va_content.va_external.va_rawsize
+ + VARHDRSZ);
VARATT_SIZEP(result) = attr->va_content.va_external.va_rawsize
- + VARHDRSZ;
- pglz_decompress((PGLZ_Header *)tmp, VARATT_DATA(result));
+ + VARHDRSZ;
+ pglz_decompress((PGLZ_Header *) tmp, VARATT_DATA(result));
pfree(tmp);
}
@@ -147,11 +147,11 @@ heap_tuple_untoast_attr(varattrib *attr)
* This is a compressed value inside of the main tuple
* ----------
*/
- result = (varattrib *)palloc(attr->va_content.va_compressed.va_rawsize
- + VARHDRSZ);
+ result = (varattrib *) palloc(attr->va_content.va_compressed.va_rawsize
+ + VARHDRSZ);
VARATT_SIZEP(result) = attr->va_content.va_compressed.va_rawsize
- + VARHDRSZ;
- pglz_decompress((PGLZ_Header *)attr, VARATT_DATA(result));
+ + VARHDRSZ;
+ pglz_decompress((PGLZ_Header *) attr, VARATT_DATA(result));
}
else
/* ----------
@@ -173,21 +173,21 @@ heap_tuple_untoast_attr(varattrib *attr)
static void
toast_delete(Relation rel, HeapTuple oldtup)
{
- TupleDesc tupleDesc;
- Form_pg_attribute *att;
- int numAttrs;
- int i;
- Datum value;
- bool isnull;
+ TupleDesc tupleDesc;
+ Form_pg_attribute *att;
+ int numAttrs;
+ int i;
+ Datum value;
+ bool isnull;
/* ----------
* Get the tuple descriptor, the number of and attribute
* descriptors.
* ----------
*/
- tupleDesc = rel->rd_att;
- numAttrs = tupleDesc->natts;
- att = tupleDesc->attrs;
+ tupleDesc = rel->rd_att;
+ numAttrs = tupleDesc->natts;
+ att = tupleDesc->attrs;
/* ----------
* Check for external stored attributes and delete them
@@ -216,35 +216,35 @@ toast_delete(Relation rel, HeapTuple oldtup)
static void
toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
{
- TupleDesc tupleDesc;
- Form_pg_attribute *att;
- int numAttrs;
- int i;
- bool old_isnull;
- bool new_isnull;
-
- bool need_change = false;
- bool need_free = false;
- bool need_delold = false;
- bool has_nulls = false;
-
- Size maxDataLen;
-
- char toast_action[MaxHeapAttributeNumber];
- char toast_nulls[MaxHeapAttributeNumber];
- Datum toast_values[MaxHeapAttributeNumber];
- int32 toast_sizes[MaxHeapAttributeNumber];
- bool toast_free[MaxHeapAttributeNumber];
- bool toast_delold[MaxHeapAttributeNumber];
+ TupleDesc tupleDesc;
+ Form_pg_attribute *att;
+ int numAttrs;
+ int i;
+ bool old_isnull;
+ bool new_isnull;
+
+ bool need_change = false;
+ bool need_free = false;
+ bool need_delold = false;
+ bool has_nulls = false;
+
+ Size maxDataLen;
+
+ char toast_action[MaxHeapAttributeNumber];
+ char toast_nulls[MaxHeapAttributeNumber];
+ Datum toast_values[MaxHeapAttributeNumber];
+ int32 toast_sizes[MaxHeapAttributeNumber];
+ bool toast_free[MaxHeapAttributeNumber];
+ bool toast_delold[MaxHeapAttributeNumber];
/* ----------
* Get the tuple descriptor, the number of and attribute
* descriptors and the location of the tuple values.
* ----------
*/
- tupleDesc = rel->rd_att;
- numAttrs = tupleDesc->natts;
- att = tupleDesc->attrs;
+ tupleDesc = rel->rd_att;
+ numAttrs = tupleDesc->natts;
+ att = tupleDesc->attrs;
/* ----------
* Then collect information about the values given
@@ -255,14 +255,14 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* 'x' incompressible, but OK to move off
* ----------
*/
- memset(toast_action, ' ', numAttrs * sizeof(char));
- memset(toast_nulls, ' ', numAttrs * sizeof(char));
- memset(toast_free, 0, numAttrs * sizeof(bool));
- memset(toast_delold, 0, numAttrs * sizeof(bool));
+ memset(toast_action, ' ', numAttrs * sizeof(char));
+ memset(toast_nulls, ' ', numAttrs * sizeof(char));
+ memset(toast_free, 0, numAttrs * sizeof(bool));
+ memset(toast_delold, 0, numAttrs * sizeof(bool));
for (i = 0; i < numAttrs; i++)
{
- varattrib *old_value;
- varattrib *new_value;
+ varattrib *old_value;
+ varattrib *new_value;
if (oldtup != NULL)
{
@@ -270,25 +270,25 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* For UPDATE get the old and new values of this attribute
* ----------
*/
- old_value = (varattrib *)DatumGetPointer(
- heap_getattr(oldtup, i + 1, tupleDesc, &old_isnull));
- toast_values[i] =
- heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
- new_value = (varattrib *)DatumGetPointer(toast_values[i]);
+ old_value = (varattrib *) DatumGetPointer(
+ heap_getattr(oldtup, i + 1, tupleDesc, &old_isnull));
+ toast_values[i] =
+ heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
+ new_value = (varattrib *) DatumGetPointer(toast_values[i]);
/* ----------
* If the old value is an external stored one, check if it
* has changed so we have to delete it later.
* ----------
*/
- if (!old_isnull && att[i]->attlen == -1 &&
- VARATT_IS_EXTERNAL(old_value))
+ if (!old_isnull && att[i]->attlen == -1 &&
+ VARATT_IS_EXTERNAL(old_value))
{
if (new_isnull || !VARATT_IS_EXTERNAL(new_value) ||
- old_value->va_content.va_external.va_rowid !=
- new_value->va_content.va_external.va_rowid ||
- old_value->va_content.va_external.va_attno !=
- new_value->va_content.va_external.va_attno)
+ old_value->va_content.va_external.va_rowid !=
+ new_value->va_content.va_external.va_rowid ||
+ old_value->va_content.va_external.va_attno !=
+ new_value->va_content.va_external.va_attno)
{
/* ----------
* The old external store value isn't needed any
@@ -318,8 +318,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* For INSERT simply get the new value
* ----------
*/
- toast_values[i] =
- heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
+ toast_values[i] =
+ heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
}
/* ----------
@@ -356,7 +356,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (VARATT_IS_EXTERNAL(DatumGetPointer(toast_values[i])))
{
toast_values[i] = PointerGetDatum(heap_tuple_untoast_attr(
- (varattrib *)DatumGetPointer(toast_values[i])));
+ (varattrib *) DatumGetPointer(toast_values[i])));
toast_free[i] = true;
need_change = true;
need_free = true;
@@ -366,7 +366,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Remember the size of this attribute
* ----------
*/
- toast_sizes[i] = VARATT_SIZE(DatumGetPointer(toast_values[i]));
+ toast_sizes[i] = VARATT_SIZE(DatumGetPointer(toast_values[i]));
}
else
{
@@ -375,7 +375,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
toast_action[i] = 'p';
- toast_sizes[i] = att[i]->attlen;
+ toast_sizes[i] = att[i]->attlen;
}
}
@@ -384,7 +384,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
*
* 1: Inline compress attributes with attstorage 'x'
* 2: Store attributes with attstorage 'x' or 'e' external
- * 3: Inline compress attributes with attstorage 'm'
+ * 3: Inline compress attributes with attstorage 'm'
* 4: Store attributes with attstorage 'm' external
* ----------
*/
@@ -398,12 +398,12 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen)
+ maxDataLen)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
- Datum new_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
+ Datum new_value;
/* ----------
* Search for the biggest yet uncompressed internal attribute
@@ -420,7 +420,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -431,24 +431,28 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Attempt to compress it inline
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- new_value = toast_compress_datum(old_value);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ new_value = toast_compress_datum(old_value);
if (DatumGetPointer(new_value) != NULL)
{
/* successful compression */
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_values[i] = new_value;
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
- need_change = true;
- need_free = true;
+ toast_values[i] = new_value;
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ need_change = true;
+ need_free = true;
}
else
{
- /* incompressible data, ignore on subsequent compression passes */
+
+ /*
+ * incompressible data, ignore on subsequent compression
+ * passes
+ */
toast_action[i] = 'x';
}
}
@@ -459,11 +463,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
+ maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
/* ----------
* Search for the biggest yet inlined attribute with
@@ -481,7 +485,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -492,21 +496,21 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Store this external
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- toast_action[i] = 'p';
- toast_values[i] = toast_save_datum(rel,
- newtup->t_data->t_oid,
- i + 1,
- toast_values[i]);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ toast_action[i] = 'p';
+ toast_values[i] = toast_save_datum(rel,
+ newtup->t_data->t_oid,
+ i + 1,
+ toast_values[i]);
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
need_change = true;
- need_free = true;
+ need_free = true;
}
/* ----------
@@ -515,12 +519,12 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen)
+ maxDataLen)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
- Datum new_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
+ Datum new_value;
/* ----------
* Search for the biggest yet uncompressed internal attribute
@@ -537,7 +541,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -548,24 +552,28 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Attempt to compress it inline
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- new_value = toast_compress_datum(old_value);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ new_value = toast_compress_datum(old_value);
if (DatumGetPointer(new_value) != NULL)
{
/* successful compression */
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_values[i] = new_value;
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
- need_change = true;
- need_free = true;
+ toast_values[i] = new_value;
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ need_change = true;
+ need_free = true;
}
else
{
- /* incompressible data, ignore on subsequent compression passes */
+
+ /*
+ * incompressible data, ignore on subsequent compression
+ * passes
+ */
toast_action[i] = 'x';
}
}
@@ -575,11 +583,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
+ maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
/* ----------
* Search for the biggest yet inlined attribute with
@@ -597,7 +605,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -608,21 +616,21 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Store this external
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- toast_action[i] = 'p';
- toast_values[i] = toast_save_datum(rel,
- newtup->t_data->t_oid,
- i + 1,
- toast_values[i]);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ toast_action[i] = 'p';
+ toast_values[i] = toast_save_datum(rel,
+ newtup->t_data->t_oid,
+ i + 1,
+ toast_values[i]);
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
need_change = true;
- need_free = true;
+ need_free = true;
}
/* ----------
@@ -632,10 +640,10 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
*/
if (need_change)
{
- char *new_data;
- int32 new_len;
- MemoryContext oldcxt;
- HeapTupleHeader olddata;
+ char *new_data;
+ int32 new_len;
+ MemoryContext oldcxt;
+ HeapTupleHeader olddata;
/* ----------
* Calculate the new size of the tuple
@@ -662,24 +670,24 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
memcpy(new_data, newtup->t_data, newtup->t_data->t_hoff);
- newtup->t_data = (HeapTupleHeader)new_data;
+ newtup->t_data = (HeapTupleHeader) new_data;
newtup->t_len = new_len;
- DataFill((char *)(MAXALIGN((long)new_data +
- offsetof(HeapTupleHeaderData, t_bits) +
- ((has_nulls) ? BITMAPLEN(numAttrs) : 0))),
- tupleDesc,
- toast_values,
- toast_nulls,
- &(newtup->t_data->t_infomask),
- has_nulls ? newtup->t_data->t_bits : NULL);
+ DataFill((char *) (MAXALIGN((long) new_data +
+ offsetof(HeapTupleHeaderData, t_bits) +
+ ((has_nulls) ? BITMAPLEN(numAttrs) : 0))),
+ tupleDesc,
+ toast_values,
+ toast_nulls,
+ &(newtup->t_data->t_infomask),
+ has_nulls ? newtup->t_data->t_bits : NULL);
/* ----------
* In the case we modified a previously modified tuple again,
* free the memory from the previous run
* ----------
*/
- if ((char *)olddata != ((char *)newtup + HEAPTUPLESIZE))
+ if ((char *) olddata != ((char *) newtup + HEAPTUPLESIZE))
pfree(olddata);
/* ----------
@@ -723,7 +731,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
Datum
toast_compress_datum(Datum value)
{
- varattrib *tmp;
+ varattrib *tmp;
tmp = (varattrib *) palloc(sizeof(PGLZ_Header) + VARATT_SIZE(value));
pglz_compress(VARATT_DATA(value), VARATT_SIZE(value) - VARHDRSZ,
@@ -754,45 +762,45 @@ toast_compress_datum(Datum value)
static Datum
toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
{
- Relation toastrel;
- Relation toastidx;
- HeapTuple toasttup;
- InsertIndexResult idxres;
- TupleDesc toasttupDesc;
- Datum t_values[3];
- char t_nulls[3];
- varattrib *result;
- char chunk_data[VARHDRSZ + TOAST_MAX_CHUNK_SIZE];
- int32 chunk_size;
- int32 chunk_seq = 0;
- char *data_p;
- int32 data_todo;
+ Relation toastrel;
+ Relation toastidx;
+ HeapTuple toasttup;
+ InsertIndexResult idxres;
+ TupleDesc toasttupDesc;
+ Datum t_values[3];
+ char t_nulls[3];
+ varattrib *result;
+ char chunk_data[VARHDRSZ + TOAST_MAX_CHUNK_SIZE];
+ int32 chunk_size;
+ int32 chunk_seq = 0;
+ char *data_p;
+ int32 data_todo;
/* ----------
* Create the varattrib reference
* ----------
*/
- result = (varattrib *)palloc(sizeof(varattrib));
+ result = (varattrib *) palloc(sizeof(varattrib));
- result->va_header = sizeof(varattrib) | VARATT_FLAG_EXTERNAL;
+ result->va_header = sizeof(varattrib) | VARATT_FLAG_EXTERNAL;
if (VARATT_IS_COMPRESSED(value))
{
result->va_header |= VARATT_FLAG_COMPRESSED;
- result->va_content.va_external.va_rawsize =
- ((varattrib *)value)->va_content.va_compressed.va_rawsize;
+ result->va_content.va_external.va_rawsize =
+ ((varattrib *) value)->va_content.va_compressed.va_rawsize;
}
else
result->va_content.va_external.va_rawsize = VARATT_SIZE(value);
-
- result->va_content.va_external.va_extsize =
- VARATT_SIZE(value) - VARHDRSZ;
- result->va_content.va_external.va_valueid = newoid();
- result->va_content.va_external.va_toastrelid =
- rel->rd_rel->reltoastrelid;
- result->va_content.va_external.va_toastidxid =
- rel->rd_rel->reltoastidxid;
- result->va_content.va_external.va_rowid = mainoid;
- result->va_content.va_external.va_attno = attno;
+
+ result->va_content.va_external.va_extsize =
+ VARATT_SIZE(value) - VARHDRSZ;
+ result->va_content.va_external.va_valueid = newoid();
+ result->va_content.va_external.va_toastrelid =
+ rel->rd_rel->reltoastrelid;
+ result->va_content.va_external.va_toastidxid =
+ rel->rd_rel->reltoastidxid;
+ result->va_content.va_external.va_rowid = mainoid;
+ result->va_content.va_external.va_attno = attno;
/* ----------
* Initialize constant parts of the tuple data
@@ -808,8 +816,8 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
* Get the data to process
* ----------
*/
- data_p = VARATT_DATA(value);
- data_todo = VARATT_SIZE(value) - VARHDRSZ;
+ data_p = VARATT_DATA(value);
+ data_todo = VARATT_SIZE(value) - VARHDRSZ;
/* ----------
* Open the toast relation
@@ -818,9 +826,9 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
toastrel = heap_open(rel->rd_rel->reltoastrelid, RowExclusiveLock);
toasttupDesc = toastrel->rd_att;
toastidx = index_open(rel->rd_rel->reltoastidxid);
-
+
/* ----------
- * Split up the item into chunks
+ * Split up the item into chunks
* ----------
*/
while (data_todo > 0)
@@ -848,8 +856,8 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
*/
heap_insert(toastrel, toasttup);
idxres = index_insert(toastidx, t_values, t_nulls,
- &(toasttup->t_self),
- toastrel);
+ &(toasttup->t_self),
+ toastrel);
if (idxres == NULL)
elog(ERROR, "Failed to insert index entry for TOAST tuple");
@@ -888,14 +896,14 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
static void
toast_delete_datum(Relation rel, Datum value)
{
- register varattrib *attr = (varattrib *)value;
- Relation toastrel;
- Relation toastidx;
- ScanKeyData toastkey;
- IndexScanDesc toastscan;
- HeapTupleData toasttup;
- RetrieveIndexResult indexRes;
- Buffer buffer;
+ register varattrib *attr = (varattrib *) value;
+ Relation toastrel;
+ Relation toastidx;
+ ScanKeyData toastkey;
+ IndexScanDesc toastscan;
+ HeapTupleData toasttup;
+ RetrieveIndexResult indexRes;
+ Buffer buffer;
if (!VARATT_IS_EXTERNAL(attr))
return;
@@ -904,8 +912,8 @@ toast_delete_datum(Relation rel, Datum value)
* Open the toast relation and it's index
* ----------
*/
- toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
- RowExclusiveLock);
+ toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
+ RowExclusiveLock);
toastidx = index_open(attr->va_content.va_external.va_toastidxid);
/* ----------
@@ -913,10 +921,10 @@ toast_delete_datum(Relation rel, Datum value)
* ----------
*/
ScanKeyEntryInitialize(&toastkey,
- (bits16) 0,
- (AttrNumber) 1,
- (RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ (bits16) 0,
+ (AttrNumber) 1,
+ (RegProcedure) F_OIDEQ,
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/* ----------
* Read the chunks by index
@@ -961,36 +969,36 @@ toast_delete_datum(Relation rel, Datum value)
static varattrib *
toast_fetch_datum(varattrib *attr)
{
- Relation toastrel;
- Relation toastidx;
- ScanKeyData toastkey;
- IndexScanDesc toastscan;
- HeapTupleData toasttup;
- HeapTuple ttup;
- TupleDesc toasttupDesc;
- RetrieveIndexResult indexRes;
- Buffer buffer;
-
- varattrib *result;
- int32 ressize;
- int32 residx;
- int numchunks;
- Pointer chunk;
- bool isnull;
- int32 chunksize;
-
- char *chunks_found;
- char *chunks_expected;
+ Relation toastrel;
+ Relation toastidx;
+ ScanKeyData toastkey;
+ IndexScanDesc toastscan;
+ HeapTupleData toasttup;
+ HeapTuple ttup;
+ TupleDesc toasttupDesc;
+ RetrieveIndexResult indexRes;
+ Buffer buffer;
+
+ varattrib *result;
+ int32 ressize;
+ int32 residx;
+ int numchunks;
+ Pointer chunk;
+ bool isnull;
+ int32 chunksize;
+
+ char *chunks_found;
+ char *chunks_expected;
ressize = attr->va_content.va_external.va_extsize;
- numchunks = ((ressize - 1) / TOAST_MAX_CHUNK_SIZE) + 1;
+ numchunks = ((ressize - 1) / TOAST_MAX_CHUNK_SIZE) + 1;
- chunks_found = palloc(numchunks);
+ chunks_found = palloc(numchunks);
chunks_expected = palloc(numchunks);
- memset(chunks_found, 0, numchunks);
+ memset(chunks_found, 0, numchunks);
memset(chunks_expected, 1, numchunks);
- result = (varattrib *)palloc(ressize + VARHDRSZ);
+ result = (varattrib *) palloc(ressize + VARHDRSZ);
VARATT_SIZEP(result) = ressize + VARHDRSZ;
if (VARATT_IS_COMPRESSED(attr))
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
@@ -999,8 +1007,8 @@ toast_fetch_datum(varattrib *attr)
* Open the toast relation and it's index
* ----------
*/
- toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
- AccessShareLock);
+ toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
+ AccessShareLock);
toasttupDesc = toastrel->rd_att;
toastidx = index_open(attr->va_content.va_external.va_toastidxid);
@@ -1009,10 +1017,10 @@ toast_fetch_datum(varattrib *attr)
* ----------
*/
ScanKeyEntryInitialize(&toastkey,
- (bits16) 0,
- (AttrNumber) 1,
- (RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ (bits16) 0,
+ (AttrNumber) 1,
+ (RegProcedure) F_OIDEQ,
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/* ----------
* Read the chunks by index
@@ -1049,7 +1057,7 @@ toast_fetch_datum(varattrib *attr)
elog(ERROR, "unexpected chunk number %d for toast value %d",
residx,
attr->va_content.va_external.va_valueid);
- if (residx < numchunks-1)
+ if (residx < numchunks - 1)
{
if (chunksize != TOAST_MAX_CHUNK_SIZE)
elog(ERROR, "unexpected chunk size %d in chunk %d for toast value %d",
@@ -1072,7 +1080,7 @@ toast_fetch_datum(varattrib *attr)
* Copy the data into proper place in our result
* ----------
*/
- memcpy(((char *)VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE,
+ memcpy(((char *) VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE,
VARATT_DATA(chunk),
chunksize);
@@ -1085,7 +1093,7 @@ toast_fetch_datum(varattrib *attr)
*/
if (memcmp(chunks_found, chunks_expected, numchunks) != 0)
elog(ERROR, "not all toast chunks found for value %d",
- attr->va_content.va_external.va_valueid);
+ attr->va_content.va_external.va_valueid);
pfree(chunks_expected);
pfree(chunks_found);
diff --git a/src/backend/access/index/istrat.c b/src/backend/access/index/istrat.c
index 1cc2c42c01c..3b016392068 100644
--- a/src/backend/access/index/istrat.c
+++ b/src/backend/access/index/istrat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.48 2001/01/24 19:42:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.49 2001/03/22 03:59:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -239,8 +239,8 @@ StrategyTermEvaluate(StrategyTerm term,
break;
case SK_NEGATE:
- result = ! DatumGetBool(FunctionCall2(&entry->sk_func,
- left, right));
+ result = !DatumGetBool(FunctionCall2(&entry->sk_func,
+ left, right));
break;
case SK_COMMUTE:
@@ -249,8 +249,8 @@ StrategyTermEvaluate(StrategyTerm term,
break;
case SK_NEGATE | SK_COMMUTE:
- result = ! DatumGetBool(FunctionCall2(&entry->sk_func,
- right, left));
+ result = !DatumGetBool(FunctionCall2(&entry->sk_func,
+ right, left));
break;
default:
@@ -263,6 +263,7 @@ StrategyTermEvaluate(StrategyTerm term,
return result;
}
+
#endif
/* ----------------
@@ -465,6 +466,7 @@ RelationInvokeStrategy(Relation relation,
}
+
#endif
/* ----------------
@@ -519,7 +521,7 @@ OperatorRelationFillScanKeyEntry(Relation operatorRelation,
if (!RegProcedureIsValid(entry->sk_procedure))
elog(ERROR,
- "OperatorRelationFillScanKeyEntry: no procedure for operator %u",
+ "OperatorRelationFillScanKeyEntry: no procedure for operator %u",
operatorObjectId);
fmgr_info(entry->sk_procedure, &entry->sk_func);
@@ -597,9 +599,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
}
if (cachesearch)
- {
ReleaseSysCache(tuple);
- }
else
{
heap_endscan(scan);
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index 435a7f72dde..fc85906d9b2 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.40 2001/01/24 19:42:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.41 2001/03/22 03:59:14 momjian Exp $
*
* NOTES
*
@@ -150,8 +150,8 @@ btoidvectorcmp(PG_FUNCTION_ARGS)
Datum
btabstimecmp(PG_FUNCTION_ARGS)
{
- AbsoluteTime a = PG_GETARG_ABSOLUTETIME(0);
- AbsoluteTime b = PG_GETARG_ABSOLUTETIME(1);
+ AbsoluteTime a = PG_GETARG_ABSOLUTETIME(0);
+ AbsoluteTime b = PG_GETARG_ABSOLUTETIME(1);
if (AbsoluteTimeIsBefore(a, b))
PG_RETURN_INT32(-1);
@@ -236,9 +236,10 @@ bttextcmp(PG_FUNCTION_ARGS)
if (res == 0 && VARSIZE(a) != VARSIZE(b))
{
+
/*
- * The two strings are the same in the first len bytes,
- * and they are of different lengths.
+ * The two strings are the same in the first len bytes, and they
+ * are of different lengths.
*/
if (VARSIZE(a) < VARSIZE(b))
res = -1;
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 325e585e3cc..f2112de6777 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.81 2001/02/07 23:35:33 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.82 2001/03/22 03:59:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -23,23 +23,23 @@
typedef struct
{
/* context data for _bt_checksplitloc */
- Size newitemsz; /* size of new item to be inserted */
- bool non_leaf; /* T if splitting an internal node */
+ Size newitemsz; /* size of new item to be inserted */
+ bool non_leaf; /* T if splitting an internal node */
- bool have_split; /* found a valid split? */
+ bool have_split; /* found a valid split? */
/* these fields valid only if have_split is true */
- bool newitemonleft; /* new item on left or right of best split */
+ bool newitemonleft; /* new item on left or right of best split */
OffsetNumber firstright; /* best split point */
- int best_delta; /* best size delta so far */
+ int best_delta; /* best size delta so far */
} FindSplitData;
extern bool FixBTree;
-Buffer _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release);
+Buffer _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release);
static void _bt_fixtree(Relation rel, BlockNumber blkno);
-static void _bt_fixbranch(Relation rel, BlockNumber lblkno,
- BlockNumber rblkno, BTStack true_stack);
+static void _bt_fixbranch(Relation rel, BlockNumber lblkno,
+ BlockNumber rblkno, BTStack true_stack);
static void _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit);
static void _bt_fixup(Relation rel, Buffer buf);
static OffsetNumber _bt_getoff(Page page, BlockNumber blkno);
@@ -47,34 +47,34 @@ static OffsetNumber _bt_getoff(Page page, BlockNumber blkno);
static Buffer _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf);
static TransactionId _bt_check_unique(Relation rel, BTItem btitem,
- Relation heapRel, Buffer buf,
- ScanKey itup_scankey);
+ Relation heapRel, Buffer buf,
+ ScanKey itup_scankey);
static InsertIndexResult _bt_insertonpg(Relation rel, Buffer buf,
- BTStack stack,
- int keysz, ScanKey scankey,
- BTItem btitem,
- OffsetNumber afteritem);
-static void _bt_insertuple(Relation rel, Buffer buf,
- Size itemsz, BTItem btitem, OffsetNumber newitemoff);
+ BTStack stack,
+ int keysz, ScanKey scankey,
+ BTItem btitem,
+ OffsetNumber afteritem);
+static void _bt_insertuple(Relation rel, Buffer buf,
+ Size itemsz, BTItem btitem, OffsetNumber newitemoff);
static Buffer _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
- OffsetNumber newitemoff, Size newitemsz,
- BTItem newitem, bool newitemonleft,
- OffsetNumber *itup_off, BlockNumber *itup_blkno);
+ OffsetNumber newitemoff, Size newitemsz,
+ BTItem newitem, bool newitemonleft,
+ OffsetNumber *itup_off, BlockNumber *itup_blkno);
static OffsetNumber _bt_findsplitloc(Relation rel, Page page,
- OffsetNumber newitemoff,
- Size newitemsz,
- bool *newitemonleft);
+ OffsetNumber newitemoff,
+ Size newitemsz,
+ bool *newitemonleft);
static void _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
- int leftfree, int rightfree,
- bool newitemonleft, Size firstrightitemsz);
+ int leftfree, int rightfree,
+ bool newitemonleft, Size firstrightitemsz);
static Buffer _bt_getstackbuf(Relation rel, BTStack stack, int access);
static void _bt_pgaddtup(Relation rel, Page page,
- Size itemsize, BTItem btitem,
- OffsetNumber itup_off, const char *where);
+ Size itemsize, BTItem btitem,
+ OffsetNumber itup_off, const char *where);
static bool _bt_isequal(TupleDesc itupdesc, Page page, OffsetNumber offnum,
- int keysz, ScanKey scankey);
+ int keysz, ScanKey scankey);
-static Relation _xlheapRel; /* temporary hack */
+static Relation _xlheapRel; /* temporary hack */
/*
* _bt_doinsert() -- Handle insertion of a single btitem in the tree.
@@ -114,8 +114,8 @@ top:
buf = _bt_moveright(rel, buf, natts, itup_scankey, BT_WRITE);
/*
- * If we're not allowing duplicates, make sure the key isn't
- * already in the index. XXX this belongs somewhere else, likely
+ * If we're not allowing duplicates, make sure the key isn't already
+ * in the index. XXX this belongs somewhere else, likely
*/
if (index_is_unique)
{
@@ -134,7 +134,7 @@ top:
}
}
- _xlheapRel = heapRel; /* temporary hack */
+ _xlheapRel = heapRel; /* temporary hack */
/* do the insertion */
res = _bt_insertonpg(rel, buf, stack, natts, itup_scankey, btitem, 0);
@@ -150,7 +150,7 @@ top:
* _bt_check_unique() -- Check for violation of unique index constraint
*
* Returns NullTransactionId if there is no conflict, else an xact ID we
- * must wait for to see if it commits a conflicting tuple. If an actual
+ * must wait for to see if it commits a conflicting tuple. If an actual
* conflict is detected, no return --- just elog().
*/
static TransactionId
@@ -171,8 +171,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
maxoff = PageGetMaxOffsetNumber(page);
/*
- * Find first item >= proposed new item. Note we could also get
- * a pointer to end-of-page here.
+ * Find first item >= proposed new item. Note we could also get a
+ * pointer to end-of-page here.
*/
offset = _bt_binsrch(rel, buf, natts, itup_scankey);
@@ -187,24 +187,24 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
BlockNumber nblkno;
/*
- * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
- * how we handling NULLs - and so we must not use _bt_compare
- * in real comparison, but only for ordering/finding items on
- * pages. - vadim 03/24/97
+ * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's how we
+ * handling NULLs - and so we must not use _bt_compare in real
+ * comparison, but only for ordering/finding items on pages. -
+ * vadim 03/24/97
*
- * make sure the offset points to an actual key
- * before trying to compare it...
+ * make sure the offset points to an actual key before trying to
+ * compare it...
*/
if (offset <= maxoff)
{
- if (! _bt_isequal(itupdesc, page, offset, natts, itup_scankey))
+ if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
break; /* we're past all the equal tuples */
/*
- * Have to check is inserted heap tuple deleted one (i.e.
- * just moved to another place by vacuum)! We only need to
- * do this once, but don't want to do it at all unless
- * we see equal tuples, so as not to slow down unequal case.
+ * Have to check is inserted heap tuple deleted one (i.e. just
+ * moved to another place by vacuum)! We only need to do this
+ * once, but don't want to do it at all unless we see equal
+ * tuples, so as not to slow down unequal case.
*/
if (chtup)
{
@@ -220,11 +220,11 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
cbti = (BTItem) PageGetItem(page, PageGetItemId(page, offset));
htup.t_self = cbti->bti_itup.t_tid;
heap_fetch(heapRel, SnapshotDirty, &htup, &buffer);
- if (htup.t_data != NULL) /* it is a duplicate */
+ if (htup.t_data != NULL) /* it is a duplicate */
{
TransactionId xwait =
- (TransactionIdIsValid(SnapshotDirty->xmin)) ?
- SnapshotDirty->xmin : SnapshotDirty->xmax;
+ (TransactionIdIsValid(SnapshotDirty->xmin)) ?
+ SnapshotDirty->xmin : SnapshotDirty->xmax;
/*
* If this tuple is being updated by other transaction
@@ -238,6 +238,7 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/* Tell _bt_doinsert to wait... */
return xwait;
}
+
/*
* Otherwise we have a definite conflict.
*/
@@ -304,7 +305,7 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
* NOTE: if the new key is equal to one or more existing keys, we can
* legitimately place it anywhere in the series of equal keys --- in fact,
* if the new key is equal to the page's "high key" we can place it on
- * the next page. If it is equal to the high key, and there's not room
+ * the next page. If it is equal to the high key, and there's not room
* to insert the new tuple on the current page without splitting, then
* we can move right hoping to find more free space and avoid a split.
* (We should not move right indefinitely, however, since that leads to
@@ -358,16 +359,14 @@ _bt_insertonpg(Relation rel,
*/
if (itemsz > (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %lu exceeds maximum %lu",
- (unsigned long)itemsz,
- (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
+ (unsigned long) itemsz,
+ (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
/*
* Determine exactly where new item will go.
*/
if (afteritem > 0)
- {
newitemoff = afteritem + 1;
- }
else
{
/*----------
@@ -383,12 +382,12 @@ _bt_insertonpg(Relation rel,
* on every insert. We implement "get tired" as a random choice,
* since stopping after scanning a fixed number of pages wouldn't work
* well (we'd never reach the right-hand side of previously split
- * pages). Currently the probability of moving right is set at 0.99,
+ * pages). Currently the probability of moving right is set at 0.99,
* which may seem too high to change the behavior much, but it does an
* excellent job of preventing O(N^2) behavior with many equal keys.
*----------
*/
- bool movedright = false;
+ bool movedright = false;
while (PageGetFreeSpace(page) < itemsz &&
!P_RIGHTMOST(lpageop) &&
@@ -396,7 +395,7 @@ _bt_insertonpg(Relation rel,
random() > (MAX_RANDOM_VALUE / 100))
{
/* step right one page */
- BlockNumber rblkno = lpageop->btpo_next;
+ BlockNumber rblkno = lpageop->btpo_next;
_bt_relbuf(rel, buf, BT_WRITE);
buf = _bt_getbuf(rel, rblkno, BT_WRITE);
@@ -404,10 +403,11 @@ _bt_insertonpg(Relation rel,
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
movedright = true;
}
+
/*
- * Now we are on the right page, so find the insert position.
- * If we moved right at all, we know we should insert at the
- * start of the page, else must find the position by searching.
+ * Now we are on the right page, so find the insert position. If
+ * we moved right at all, we know we should insert at the start of
+ * the page, else must find the position by searching.
*/
if (movedright)
newitemoff = P_FIRSTDATAKEY(lpageop);
@@ -418,9 +418,9 @@ _bt_insertonpg(Relation rel,
/*
* Do we need to split the page to fit the item on it?
*
- * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its
- * result, so this comparison is correct even though we appear to
- * be accounting only for the item and not for its line pointer.
+ * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
+ * so this comparison is correct even though we appear to be
+ * accounting only for the item and not for its line pointer.
*/
if (PageGetFreeSpace(page) < itemsz)
{
@@ -468,7 +468,7 @@ _bt_insertonpg(Relation rel,
if (is_root)
{
- Buffer rootbuf;
+ Buffer rootbuf;
Assert(stack == (BTStack) NULL);
/* create a new root node and release the split buffers */
@@ -481,7 +481,7 @@ _bt_insertonpg(Relation rel,
{
InsertIndexResult newres;
BTItem new_item;
- BTStackData fakestack;
+ BTStackData fakestack;
BTItem ritem;
Buffer pbuf;
@@ -489,10 +489,11 @@ _bt_insertonpg(Relation rel,
if (stack == (BTStack) NULL)
{
elog(DEBUG, "btree: concurrent ROOT page split");
+
/*
* If root page splitter failed to create new root page
- * then old root' btpo_parent still points to metapage.
- * We have to fix root page in this case.
+ * then old root' btpo_parent still points to metapage. We
+ * have to fix root page in this case.
*/
if (BTreeInvalidParent(lpageop))
{
@@ -531,9 +532,9 @@ _bt_insertonpg(Relation rel,
* item! We want to find parent pointing to where we are,
* right ? - vadim 05/27/97
*
- * Interestingly, this means we didn't *really* need to stack
- * the parent key at all; all we really care about is the
- * saved block and offset as a starting point for our search...
+ * Interestingly, this means we didn't *really* need to stack the
+ * parent key at all; all we really care about is the saved
+ * block and offset as a starting point for our search...
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
bknum, P_HIKEY);
@@ -583,25 +584,26 @@ formres:;
}
static void
-_bt_insertuple(Relation rel, Buffer buf,
- Size itemsz, BTItem btitem, OffsetNumber newitemoff)
+_bt_insertuple(Relation rel, Buffer buf,
+ Size itemsz, BTItem btitem, OffsetNumber newitemoff)
{
- Page page = BufferGetPage(buf);
- BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
+ Page page = BufferGetPage(buf);
+ BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
START_CRIT_SECTION();
_bt_pgaddtup(rel, page, itemsz, btitem, newitemoff, "page");
/* XLOG stuff */
{
- xl_btree_insert xlrec;
- uint8 flag = XLOG_BTREE_INSERT;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
- BTItemData truncitem;
- xlrec.target.node = rel->rd_node;
+ xl_btree_insert xlrec;
+ uint8 flag = XLOG_BTREE_INSERT;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
+ BTItemData truncitem;
+
+ xlrec.target.node = rel->rd_node;
ItemPointerSet(&(xlrec.target.tid), BufferGetBlockNumber(buf), newitemoff);
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeInsert;
rdata[0].next = &(rdata[1]);
@@ -610,14 +612,14 @@ _bt_insertuple(Relation rel, Buffer buf,
{
truncitem = *btitem;
truncitem.bti_itup.t_info = sizeof(BTItemData);
- rdata[1].data = (char*)&truncitem;
+ rdata[1].data = (char *) &truncitem;
rdata[1].len = sizeof(BTItemData);
}
else
{
- rdata[1].data = (char*)btitem;
- rdata[1].len = IndexTupleDSize(btitem->bti_itup) +
- (sizeof(BTItemData) - sizeof(IndexTupleData));
+ rdata[1].data = (char *) btitem;
+ rdata[1].len = IndexTupleDSize(btitem->bti_itup) +
+ (sizeof(BTItemData) - sizeof(IndexTupleData));
}
rdata[1].buffer = buf;
rdata[1].next = NULL;
@@ -700,8 +702,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
/*
* If the page we're splitting is not the rightmost page at its level
- * in the tree, then the first entry on the page is the high key
- * for the page. We need to copy that to the right half. Otherwise
+ * in the tree, then the first entry on the page is the high key for
+ * the page. We need to copy that to the right half. Otherwise
* (meaning the rightmost page case), all the items on the right half
* will be user data.
*/
@@ -779,13 +781,13 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
if (i < firstright)
{
_bt_pgaddtup(rel, leftpage, itemsz, item, leftoff,
- "left sibling");
+ "left sibling");
leftoff = OffsetNumberNext(leftoff);
}
else
{
_bt_pgaddtup(rel, rightpage, itemsz, item, rightoff,
- "right sibling");
+ "right sibling");
rightoff = OffsetNumberNext(rightoff);
}
}
@@ -812,11 +814,11 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
}
/*
- * We have to grab the right sibling (if any) and fix the prev
- * pointer there. We are guaranteed that this is deadlock-free
- * since no other writer will be holding a lock on that page
- * and trying to move left, and all readers release locks on a page
- * before trying to fetch its neighbors.
+ * We have to grab the right sibling (if any) and fix the prev pointer
+ * there. We are guaranteed that this is deadlock-free since no other
+ * writer will be holding a lock on that page and trying to move left,
+ * and all readers release locks on a page before trying to fetch its
+ * neighbors.
*/
if (!P_RIGHTMOST(ropaque))
@@ -834,12 +836,12 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*/
START_CRIT_SECTION();
{
- xl_btree_split xlrec;
- int flag = (newitemonleft) ?
- XLOG_BTREE_SPLEFT : XLOG_BTREE_SPLIT;
- BlockNumber blkno;
- XLogRecPtr recptr;
- XLogRecData rdata[4];
+ xl_btree_split xlrec;
+ int flag = (newitemonleft) ?
+ XLOG_BTREE_SPLEFT : XLOG_BTREE_SPLIT;
+ BlockNumber blkno;
+ XLogRecPtr recptr;
+ XLogRecData rdata[4];
xlrec.target.node = rel->rd_node;
ItemPointerSet(&(xlrec.target.tid), *itup_blkno, *itup_off);
@@ -856,31 +858,33 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
BlockIdSet(&(xlrec.parentblk), lopaque->btpo_parent);
BlockIdSet(&(xlrec.leftblk), lopaque->btpo_prev);
BlockIdSet(&(xlrec.rightblk), ropaque->btpo_next);
- /*
- * Dirrect access to page is not good but faster - we should
+
+ /*
+ * Dirrect access to page is not good but faster - we should
* implement some new func in page API.
*/
- xlrec.leftlen = ((PageHeader)leftpage)->pd_special -
- ((PageHeader)leftpage)->pd_upper;
+ xlrec.leftlen = ((PageHeader) leftpage)->pd_special -
+ ((PageHeader) leftpage)->pd_upper;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeSplit;
rdata[0].next = &(rdata[1]);
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)leftpage + ((PageHeader)leftpage)->pd_upper;
+ rdata[1].data = (char *) leftpage + ((PageHeader) leftpage)->pd_upper;
rdata[1].len = xlrec.leftlen;
rdata[1].next = &(rdata[2]);
rdata[2].buffer = InvalidBuffer;
- rdata[2].data = (char*)rightpage + ((PageHeader)rightpage)->pd_upper;
- rdata[2].len = ((PageHeader)rightpage)->pd_special -
- ((PageHeader)rightpage)->pd_upper;
+ rdata[2].data = (char *) rightpage + ((PageHeader) rightpage)->pd_upper;
+ rdata[2].len = ((PageHeader) rightpage)->pd_special -
+ ((PageHeader) rightpage)->pd_upper;
rdata[2].next = NULL;
if (!P_RIGHTMOST(ropaque))
{
- BTPageOpaque sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
+ BTPageOpaque sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
+
sopaque->btpo_prev = BufferGetBlockNumber(rbuf);
rdata[2].next = &(rdata[3]);
@@ -942,7 +946,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*
* We return the index of the first existing tuple that should go on the
* righthand page, plus a boolean indicating whether the new tuple goes on
- * the left or right page. The bool is necessary to disambiguate the case
+ * the left or right page. The bool is necessary to disambiguate the case
* where firstright == newitemoff.
*/
static OffsetNumber
@@ -968,23 +972,23 @@ _bt_findsplitloc(Relation rel,
/* Passed-in newitemsz is MAXALIGNED but does not include line pointer */
newitemsz += sizeof(ItemIdData);
state.newitemsz = newitemsz;
- state.non_leaf = ! P_ISLEAF(opaque);
+ state.non_leaf = !P_ISLEAF(opaque);
state.have_split = false;
/* Total free space available on a btree page, after fixed overhead */
leftspace = rightspace =
PageGetPageSize(page) - sizeof(PageHeaderData) -
MAXALIGN(sizeof(BTPageOpaqueData))
- + sizeof(ItemIdData);
+ +sizeof(ItemIdData);
/*
- * Finding the best possible split would require checking all the possible
- * split points, because of the high-key and left-key special cases.
- * That's probably more work than it's worth; instead, stop as soon as
- * we find a "good-enough" split, where good-enough is defined as an
- * imbalance in free space of no more than pagesize/16 (arbitrary...)
- * This should let us stop near the middle on most pages, instead of
- * plowing to the end.
+ * Finding the best possible split would require checking all the
+ * possible split points, because of the high-key and left-key special
+ * cases. That's probably more work than it's worth; instead, stop as
+ * soon as we find a "good-enough" split, where good-enough is defined
+ * as an imbalance in free space of no more than pagesize/16
+ * (arbitrary...) This should let us stop near the middle on most
+ * pages, instead of plowing to the end.
*/
goodenough = leftspace / 16;
@@ -1024,6 +1028,7 @@ _bt_findsplitloc(Relation rel,
*/
leftfree = leftspace - dataitemstoleft - (int) itemsz;
rightfree = rightspace - (dataitemtotal - dataitemstoleft);
+
/*
* Will the new item go to left or right of split?
*/
@@ -1051,10 +1056,10 @@ _bt_findsplitloc(Relation rel,
}
/*
- * I believe it is not possible to fail to find a feasible split,
- * but just in case ...
+ * I believe it is not possible to fail to find a feasible split, but
+ * just in case ...
*/
- if (! state.have_split)
+ if (!state.have_split)
elog(FATAL, "_bt_findsplitloc: can't find a feasible split point for %s",
RelationGetRelationName(rel));
@@ -1071,6 +1076,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
int leftfree, int rightfree,
bool newitemonleft, Size firstrightitemsz)
{
+
/*
* Account for the new item on whichever side it is to be put.
*/
@@ -1078,19 +1084,21 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
leftfree -= (int) state->newitemsz;
else
rightfree -= (int) state->newitemsz;
+
/*
- * If we are not on the leaf level, we will be able to discard the
- * key data from the first item that winds up on the right page.
+ * If we are not on the leaf level, we will be able to discard the key
+ * data from the first item that winds up on the right page.
*/
if (state->non_leaf)
rightfree += (int) firstrightitemsz -
(int) (MAXALIGN(sizeof(BTItemData)) + sizeof(ItemIdData));
+
/*
* If feasible split point, remember best delta.
*/
if (leftfree >= 0 && rightfree >= 0)
{
- int delta = leftfree - rightfree;
+ int delta = leftfree - rightfree;
if (delta < 0)
delta = -delta;
@@ -1134,10 +1142,11 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
maxoff = PageGetMaxOffsetNumber(page);
start = stack->bts_offset;
+
/*
- * _bt_insertonpg set bts_offset to InvalidOffsetNumber in the
- * case of concurrent ROOT page split. Also, watch out for
- * possibility that page has a high key now when it didn't before.
+ * _bt_insertonpg set bts_offset to InvalidOffsetNumber in the case of
+ * concurrent ROOT page split. Also, watch out for possibility that
+ * page has a high key now when it didn't before.
*/
if (start < P_FIRSTDATAKEY(opaque))
start = P_FIRSTDATAKEY(opaque);
@@ -1159,11 +1168,15 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
return buf;
}
}
- /* by here, the item we're looking for moved right at least one page */
+
+ /*
+ * by here, the item we're looking for moved right at least one
+ * page
+ */
if (P_RIGHTMOST(opaque))
{
_bt_relbuf(rel, buf, access);
- return(InvalidBuffer);
+ return (InvalidBuffer);
}
blkno = opaque->btpo_next;
@@ -1190,27 +1203,27 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
*
* On entry, lbuf (the old root) and rbuf (its new peer) are write-
* locked. On exit, a new root page exists with entries for the
- * two new children, metapage is updated and unlocked/unpinned.
- * The new root buffer is returned to caller which has to unlock/unpin
- * lbuf, rbuf & rootbuf.
+ * two new children, metapage is updated and unlocked/unpinned.
+ * The new root buffer is returned to caller which has to unlock/unpin
+ * lbuf, rbuf & rootbuf.
*/
static Buffer
_bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
{
- Buffer rootbuf;
- Page lpage,
- rpage,
- rootpage;
- BlockNumber lbkno,
- rbkno;
- BlockNumber rootblknum;
- BTPageOpaque rootopaque;
- ItemId itemid;
- BTItem item;
- Size itemsz;
- BTItem new_item;
- Buffer metabuf;
- Page metapg;
+ Buffer rootbuf;
+ Page lpage,
+ rpage,
+ rootpage;
+ BlockNumber lbkno,
+ rbkno;
+ BlockNumber rootblknum;
+ BTPageOpaque rootopaque;
+ ItemId itemid;
+ BTItem item;
+ Size itemsz;
+ BTItem new_item;
+ Buffer metabuf;
+ Page metapg;
BTMetaPageData *metad;
/* get a new root page */
@@ -1236,9 +1249,9 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
rpage = BufferGetPage(rbuf);
/*
- * Make sure pages in old root level have valid parent links --- we will
- * need this in _bt_insertonpg() if a concurrent root split happens (see
- * README).
+ * Make sure pages in old root level have valid parent links --- we
+ * will need this in _bt_insertonpg() if a concurrent root split
+ * happens (see README).
*/
((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(rpage))->btpo_parent =
@@ -1264,8 +1277,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
pfree(new_item);
/*
- * Create downlink item for right page. The key for it is obtained from
- * the "high key" position in the left page.
+ * Create downlink item for right page. The key for it is obtained
+ * from the "high key" position in the left page.
*/
itemid = PageGetItemId(lpage, P_HIKEY);
itemsz = ItemIdGetLength(itemid);
@@ -1285,26 +1298,26 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
/* XLOG stuff */
{
- xl_btree_newroot xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
+ xl_btree_newroot xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
xlrec.node = rel->rd_node;
xlrec.level = metad->btm_level;
BlockIdSet(&(xlrec.rootblk), rootblknum);
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeNewroot;
rdata[0].next = &(rdata[1]);
- /*
- * Dirrect access to page is not good but faster - we should
+ /*
+ * Dirrect access to page is not good but faster - we should
* implement some new func in page API.
*/
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)rootpage + ((PageHeader) rootpage)->pd_upper;
- rdata[1].len = ((PageHeader)rootpage)->pd_special -
- ((PageHeader)rootpage)->pd_upper;
+ rdata[1].data = (char *) rootpage + ((PageHeader) rootpage)->pd_upper;
+ rdata[1].len = ((PageHeader) rootpage)->pd_special -
+ ((PageHeader) rootpage)->pd_upper;
rdata[1].next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT, rdata);
@@ -1325,7 +1338,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
/* write and let go of metapage buffer */
_bt_wrtbuf(rel, metabuf);
- return(rootbuf);
+ return (rootbuf);
}
/*
@@ -1339,24 +1352,31 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
Buffer
_bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
{
- Buffer rootbuf;
- BlockNumber rootblk;
- Page rootpage;
- XLogRecPtr rootLSN;
- Page oldrootpage = BufferGetPage(oldrootbuf);
- BTPageOpaque oldrootopaque = (BTPageOpaque)
- PageGetSpecialPointer(oldrootpage);
- Buffer buf, leftbuf, rightbuf;
- Page page, leftpage, rightpage;
- BTPageOpaque opaque, leftopaque, rightopaque;
- OffsetNumber newitemoff;
- BTItem btitem, ritem;
- Size itemsz;
-
- if (! P_LEFTMOST(oldrootopaque) || P_RIGHTMOST(oldrootopaque))
+ Buffer rootbuf;
+ BlockNumber rootblk;
+ Page rootpage;
+ XLogRecPtr rootLSN;
+ Page oldrootpage = BufferGetPage(oldrootbuf);
+ BTPageOpaque oldrootopaque = (BTPageOpaque)
+ PageGetSpecialPointer(oldrootpage);
+ Buffer buf,
+ leftbuf,
+ rightbuf;
+ Page page,
+ leftpage,
+ rightpage;
+ BTPageOpaque opaque,
+ leftopaque,
+ rightopaque;
+ OffsetNumber newitemoff;
+ BTItem btitem,
+ ritem;
+ Size itemsz;
+
+ if (!P_LEFTMOST(oldrootopaque) || P_RIGHTMOST(oldrootopaque))
elog(ERROR, "bt_fixroot: not valid old root page");
- /* Read right neighbor and create new root page*/
+ /* Read right neighbor and create new root page */
leftbuf = _bt_getbuf(rel, oldrootopaque->btpo_next, BT_WRITE);
leftpage = BufferGetPage(leftbuf);
leftopaque = (BTPageOpaque) PageGetSpecialPointer(leftpage);
@@ -1377,26 +1397,26 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
*
* If concurrent process will split one of pages on this level then it
* will see either btpo_parent == metablock or btpo_parent == rootblk.
- * In first case it will give up its locks and walk to the leftmost page
- * (oldrootbuf) in _bt_fixup() - ie it will wait for us and let us
- * continue. In second case it will try to lock rootbuf keeping its locks
- * on buffers we already passed, also waiting for us. If we'll have to
- * unlock rootbuf (split it) and that process will have to split page
- * of new level we created (level of rootbuf) then it will wait while
- * we create upper level. Etc.
+ * In first case it will give up its locks and walk to the leftmost
+ * page (oldrootbuf) in _bt_fixup() - ie it will wait for us and let
+ * us continue. In second case it will try to lock rootbuf keeping its
+ * locks on buffers we already passed, also waiting for us. If we'll
+ * have to unlock rootbuf (split it) and that process will have to
+ * split page of new level we created (level of rootbuf) then it will
+ * wait while we create upper level. Etc.
*/
- while(! P_RIGHTMOST(leftopaque))
+ while (!P_RIGHTMOST(leftopaque))
{
rightbuf = _bt_getbuf(rel, leftopaque->btpo_next, BT_WRITE);
rightpage = BufferGetPage(rightbuf);
rightopaque = (BTPageOpaque) PageGetSpecialPointer(rightpage);
/*
- * Update LSN & StartUpID of child page buffer to ensure that
- * it will be written on disk after flushing log record for new
- * root creation. Unfortunately, for the moment (?) we do not
- * log this operation and so possibly break our rule to log entire
- * page content on first after checkpoint modification.
+ * Update LSN & StartUpID of child page buffer to ensure that it
+ * will be written on disk after flushing log record for new root
+ * creation. Unfortunately, for the moment (?) we do not log this
+ * operation and so possibly break our rule to log entire page
+ * content on first after checkpoint modification.
*/
HOLD_INTERRUPTS();
rightopaque->btpo_parent = rootblk;
@@ -1416,17 +1436,17 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
if (PageGetFreeSpace(page) < itemsz)
{
- Buffer newbuf;
- OffsetNumber firstright;
- OffsetNumber itup_off;
- BlockNumber itup_blkno;
- bool newitemonleft;
+ Buffer newbuf;
+ OffsetNumber firstright;
+ OffsetNumber itup_off;
+ BlockNumber itup_blkno;
+ bool newitemonleft;
firstright = _bt_findsplitloc(rel, page,
- newitemoff, itemsz, &newitemonleft);
+ newitemoff, itemsz, &newitemonleft);
newbuf = _bt_split(rel, buf, firstright,
- newitemoff, itemsz, btitem, newitemonleft,
- &itup_off, &itup_blkno);
+ newitemoff, itemsz, btitem, newitemonleft,
+ &itup_off, &itup_blkno);
/* Keep lock on new "root" buffer ! */
if (buf != rootbuf)
_bt_relbuf(rel, buf, BT_WRITE);
@@ -1450,10 +1470,10 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
/*
* Here we hold locks on old root buffer, new root buffer we've
- * created with _bt_newroot() - rootbuf, - and buf we've used
- * for last insert ops - buf. If rootbuf != buf then we have to
- * create at least one more level. And if "release" is TRUE
- * then we give up oldrootbuf.
+ * created with _bt_newroot() - rootbuf, - and buf we've used for last
+ * insert ops - buf. If rootbuf != buf then we have to create at least
+ * one more level. And if "release" is TRUE then we give up
+ * oldrootbuf.
*/
if (release)
_bt_wrtbuf(rel, oldrootbuf);
@@ -1461,10 +1481,10 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
if (rootbuf != buf)
{
_bt_wrtbuf(rel, buf);
- return(_bt_fixroot(rel, rootbuf, true));
+ return (_bt_fixroot(rel, rootbuf, true));
}
- return(rootbuf);
+ return (rootbuf);
}
/*
@@ -1474,17 +1494,17 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
static void
_bt_fixtree(Relation rel, BlockNumber blkno)
{
- Buffer buf;
- Page page;
- BTPageOpaque opaque;
- BlockNumber pblkno;
+ Buffer buf;
+ Page page;
+ BTPageOpaque opaque;
+ BlockNumber pblkno;
- for ( ; ; )
+ for (;;)
{
buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- if (! P_LEFTMOST(opaque) || P_ISLEAF(opaque))
+ if (!P_LEFTMOST(opaque) || P_ISLEAF(opaque))
elog(ERROR, "bt_fixtree[%s]: invalid start page (need to recreate index)", RelationGetRelationName(rel));
pblkno = opaque->btpo_parent;
@@ -1534,25 +1554,26 @@ _bt_fixtree(Relation rel, BlockNumber blkno)
static void
_bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
{
- BlockNumber blkno = BufferGetBlockNumber(buf);
- Page page;
- BTPageOpaque opaque;
- BlockNumber cblkno[3];
- OffsetNumber coff[3];
- Buffer cbuf[3];
- Page cpage[3];
- BTPageOpaque copaque[3];
- BTItem btitem;
- int cidx, i;
- bool goodbye = false;
- char tbuf[BLCKSZ];
+ BlockNumber blkno = BufferGetBlockNumber(buf);
+ Page page;
+ BTPageOpaque opaque;
+ BlockNumber cblkno[3];
+ OffsetNumber coff[3];
+ Buffer cbuf[3];
+ Page cpage[3];
+ BTPageOpaque copaque[3];
+ BTItem btitem;
+ int cidx,
+ i;
+ bool goodbye = false;
+ char tbuf[BLCKSZ];
page = BufferGetPage(buf);
/* copy page to temp storage */
memmove(tbuf, page, PageGetPageSize(page));
_bt_relbuf(rel, buf, BT_READ);
- page = (Page)tbuf;
+ page = (Page) tbuf;
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/* Initialize first child data */
@@ -1564,20 +1585,21 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
cbuf[0] = _bt_getbuf(rel, cblkno[0], BT_READ);
cpage[0] = BufferGetPage(cbuf[0]);
copaque[0] = (BTPageOpaque) PageGetSpecialPointer(cpage[0]);
- if (P_LEFTMOST(opaque) && ! P_LEFTMOST(copaque[0]))
+ if (P_LEFTMOST(opaque) && !P_LEFTMOST(copaque[0]))
elog(ERROR, "bt_fixtlevel[%s]: non-leftmost child page of leftmost parent (need to recreate index)", RelationGetRelationName(rel));
/* caller should take care and avoid this */
if (P_RIGHTMOST(copaque[0]))
elog(ERROR, "bt_fixtlevel[%s]: invalid start child (need to recreate index)", RelationGetRelationName(rel));
- for ( ; ; )
+ for (;;)
{
+
/*
- * Read up to 2 more child pages and look for pointers
- * to them in *saved* parent page
+ * Read up to 2 more child pages and look for pointers to them in
+ * *saved* parent page
*/
coff[1] = coff[2] = InvalidOffsetNumber;
- for (cidx = 0; cidx < 2; )
+ for (cidx = 0; cidx < 2;)
{
cidx++;
cblkno[cidx] = (copaque[cidx - 1])->btpo_next;
@@ -1609,20 +1631,20 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
if (coff[1] == InvalidOffsetNumber ||
(cidx == 2 && coff[2] == InvalidOffsetNumber))
{
- Buffer newbuf;
- Page newpage;
- BTPageOpaque newopaque;
- BTItem ritem;
- Size itemsz;
- OffsetNumber newitemoff;
- BlockNumber parblk[3];
- BTStackData stack;
+ Buffer newbuf;
+ Page newpage;
+ BTPageOpaque newopaque;
+ BTItem ritem;
+ Size itemsz;
+ OffsetNumber newitemoff;
+ BlockNumber parblk[3];
+ BTStackData stack;
stack.bts_parent = NULL;
stack.bts_blkno = blkno;
stack.bts_offset = InvalidOffsetNumber;
ItemPointerSet(&(stack.bts_btitem.bti_itup.t_tid),
- cblkno[0], P_HIKEY);
+ cblkno[0], P_HIKEY);
buf = _bt_getstackbuf(rel, &stack, BT_WRITE);
if (buf == InvalidBuffer)
@@ -1644,19 +1666,19 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
if (coff[i] != InvalidOffsetNumber)
{
if (parblk[i] == parblk[i - 1] &&
- coff[i] != coff[i - 1] + 1)
+ coff[i] != coff[i - 1] + 1)
elog(ERROR, "bt_fixlevel[%s]: invalid item order(2) (need to recreate index)", RelationGetRelationName(rel));
continue;
}
/* Have to check next page ? */
- if ((! P_RIGHTMOST(opaque)) &&
- coff[i - 1] == PageGetMaxOffsetNumber(page)) /* yes */
+ if ((!P_RIGHTMOST(opaque)) &&
+ coff[i - 1] == PageGetMaxOffsetNumber(page)) /* yes */
{
newbuf = _bt_getbuf(rel, opaque->btpo_next, BT_WRITE);
newpage = BufferGetPage(newbuf);
newopaque = (BTPageOpaque) PageGetSpecialPointer(newpage);
coff[i] = _bt_getoff(newpage, cblkno[i]);
- if (coff[i] != InvalidOffsetNumber) /* found ! */
+ if (coff[i] != InvalidOffsetNumber) /* found ! */
{
if (coff[i] != P_FIRSTDATAKEY(newopaque))
elog(ERROR, "bt_fixlevel[%s]: invalid item order(3) (need to recreate index)", RelationGetRelationName(rel));
@@ -1673,7 +1695,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
}
/* insert pointer */
ritem = (BTItem) PageGetItem(cpage[i - 1],
- PageGetItemId(cpage[i - 1], P_HIKEY));
+ PageGetItemId(cpage[i - 1], P_HIKEY));
btitem = _bt_formitem(&(ritem->bti_itup));
ItemPointerSet(&(btitem->bti_itup.t_tid), cblkno[i], P_HIKEY);
itemsz = IndexTupleDSize(btitem->bti_itup)
@@ -1684,16 +1706,16 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
if (PageGetFreeSpace(page) < itemsz)
{
- OffsetNumber firstright;
- OffsetNumber itup_off;
- BlockNumber itup_blkno;
- bool newitemonleft;
+ OffsetNumber firstright;
+ OffsetNumber itup_off;
+ BlockNumber itup_blkno;
+ bool newitemonleft;
firstright = _bt_findsplitloc(rel, page,
- newitemoff, itemsz, &newitemonleft);
+ newitemoff, itemsz, &newitemonleft);
newbuf = _bt_split(rel, buf, firstright,
- newitemoff, itemsz, btitem, newitemonleft,
- &itup_off, &itup_blkno);
+ newitemoff, itemsz, btitem, newitemonleft,
+ &itup_off, &itup_blkno);
/* what buffer we need in ? */
if (newitemonleft)
_bt_relbuf(rel, newbuf, BT_WRITE);
@@ -1720,7 +1742,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
/* copy page with pointer to cblkno[cidx] to temp storage */
memmove(tbuf, page, PageGetPageSize(page));
_bt_relbuf(rel, buf, BT_WRITE);
- page = (Page)tbuf;
+ page = (Page) tbuf;
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
}
@@ -1760,18 +1782,19 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
* but it doesn't guarantee full consistency of tree.)
*/
static void
-_bt_fixbranch(Relation rel, BlockNumber lblkno,
- BlockNumber rblkno, BTStack true_stack)
+_bt_fixbranch(Relation rel, BlockNumber lblkno,
+ BlockNumber rblkno, BTStack true_stack)
{
- BlockNumber blkno = true_stack->bts_blkno;
- BTStackData stack;
- BTPageOpaque opaque;
- Buffer buf, rbuf;
- Page page;
- OffsetNumber offnum;
+ BlockNumber blkno = true_stack->bts_blkno;
+ BTStackData stack;
+ BTPageOpaque opaque;
+ Buffer buf,
+ rbuf;
+ Page page;
+ OffsetNumber offnum;
true_stack = true_stack->bts_parent;
- for ( ; ; )
+ for (;;)
{
buf = _bt_getbuf(rel, blkno, BT_READ);
@@ -1779,8 +1802,8 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
_bt_fixlevel(rel, buf, rblkno);
/*
- * Here parent level should have pointers for both
- * lblkno and rblkno and we have to find them.
+ * Here parent level should have pointers for both lblkno and
+ * rblkno and we have to find them.
*/
stack.bts_parent = NULL;
stack.bts_blkno = blkno;
@@ -1792,7 +1815,7 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
page = BufferGetPage(buf);
offnum = _bt_getoff(page, rblkno);
- if (offnum != InvalidOffsetNumber) /* right pointer found */
+ if (offnum != InvalidOffsetNumber) /* right pointer found */
{
if (offnum <= stack.bts_offset)
elog(ERROR, "bt_fixbranch[%s]: invalid item order (need to recreate index)", RelationGetRelationName(rel));
@@ -1829,10 +1852,10 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
}
/*
- * Well, we are on the level that was root or unexistent when
- * we started traversing tree down. If btpo_parent is updated
- * then we'll use it to continue, else we'll fix/restore upper
- * levels entirely.
+ * Well, we are on the level that was root or unexistent when we
+ * started traversing tree down. If btpo_parent is updated then
+ * we'll use it to continue, else we'll fix/restore upper levels
+ * entirely.
*/
if (!BTreeInvalidParent(opaque))
{
@@ -1874,18 +1897,18 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
static void
_bt_fixup(Relation rel, Buffer buf)
{
- Page page;
- BTPageOpaque opaque;
- BlockNumber blkno;
+ Page page;
+ BTPageOpaque opaque;
+ BlockNumber blkno;
- for ( ; ; )
+ for (;;)
{
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
- * If someone else already created parent pages
- * then it's time for _bt_fixtree() to check upper
- * levels and fix them, if required.
+ * If someone else already created parent pages then it's time for
+ * _bt_fixtree() to check upper levels and fix them, if required.
*/
if (!BTreeInvalidParent(opaque))
{
@@ -1904,13 +1927,12 @@ _bt_fixup(Relation rel, Buffer buf)
}
/*
- * Ok, we are on the leftmost page, it's write locked
- * by us and its btpo_parent points to meta page - time
- * for _bt_fixroot().
+ * Ok, we are on the leftmost page, it's write locked by us and its
+ * btpo_parent points to meta page - time for _bt_fixroot().
*/
elog(NOTICE, "bt_fixup[%s]: fixing root page", RelationGetRelationName(rel));
- buf = _bt_fixroot(rel, buf, true);
- _bt_relbuf(rel, buf, BT_WRITE);
+ buf = _bt_fixroot(rel, buf, true);
+ _bt_relbuf(rel, buf, BT_WRITE);
return;
}
@@ -1918,23 +1940,23 @@ _bt_fixup(Relation rel, Buffer buf)
static OffsetNumber
_bt_getoff(Page page, BlockNumber blkno)
{
- BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
- OffsetNumber offnum = P_FIRSTDATAKEY(opaque);
- BlockNumber curblkno;
- ItemId itemid;
- BTItem item;
-
- for ( ; offnum <= maxoff; offnum++)
+ BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+ OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
+ OffsetNumber offnum = P_FIRSTDATAKEY(opaque);
+ BlockNumber curblkno;
+ ItemId itemid;
+ BTItem item;
+
+ for (; offnum <= maxoff; offnum++)
{
itemid = PageGetItemId(page, offnum);
item = (BTItem) PageGetItem(page, itemid);
curblkno = ItemPointerGetBlockNumber(&(item->bti_itup.t_tid));
if (curblkno == blkno)
- return(offnum);
+ return (offnum);
}
- return(InvalidOffsetNumber);
+ return (InvalidOffsetNumber);
}
/*
@@ -1961,9 +1983,9 @@ _bt_pgaddtup(Relation rel,
const char *where)
{
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- BTItemData truncitem;
+ BTItemData truncitem;
- if (! P_ISLEAF(opaque) && itup_off == P_FIRSTDATAKEY(opaque))
+ if (!P_ISLEAF(opaque) && itup_off == P_FIRSTDATAKEY(opaque))
{
memcpy(&truncitem, btitem, sizeof(BTItemData));
truncitem.bti_itup.t_info = sizeof(BTItemData);
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 4c854fe7913..460d6c834c1 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.50 2001/02/07 23:35:33 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.51 2001/03/22 03:59:14 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -28,7 +28,7 @@
#include "miscadmin.h"
#include "storage/lmgr.h"
-extern bool FixBTree; /* comments in nbtree.c */
+extern bool FixBTree; /* comments in nbtree.c */
extern Buffer _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release);
/*
@@ -100,7 +100,7 @@ _bt_metapinit(Relation rel)
*
* The access type parameter (BT_READ or BT_WRITE) controls whether
* a new root page will be created or not. If access = BT_READ,
- * and no root page exists, we just return InvalidBuffer. For
+ * and no root page exists, we just return InvalidBuffer. For
* BT_WRITE, we try to create the root page if it doesn't exist.
* NOTE that the returned root page will have only a read lock set
* on it even if access = BT_WRITE!
@@ -178,20 +178,20 @@ _bt_getroot(Relation rel, int access)
/* XLOG stuff */
{
- xl_btree_newroot xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata;
+ xl_btree_newroot xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata;
xlrec.node = rel->rd_node;
xlrec.level = 1;
BlockIdSet(&(xlrec.rootblk), rootblkno);
rdata.buffer = InvalidBuffer;
- rdata.data = (char*)&xlrec;
+ rdata.data = (char *) &xlrec;
rdata.len = SizeOfBtreeNewroot;
rdata.next = NULL;
recptr = XLogInsert(RM_BTREE_ID,
- XLOG_BTREE_NEWROOT|XLOG_BTREE_LEAF, &rdata);
+ XLOG_BTREE_NEWROOT | XLOG_BTREE_LEAF, &rdata);
PageSetLSN(rootpage, recptr);
PageSetSUI(rootpage, ThisStartUpID);
@@ -212,6 +212,7 @@ _bt_getroot(Relation rel, int access)
}
else
{
+
/*
* Metadata initialized by someone else. In order to
* guarantee no deadlocks, we have to release the metadata
@@ -232,30 +233,31 @@ _bt_getroot(Relation rel, int access)
/*
* Race condition: If the root page split between the time we looked
* at the metadata page and got the root buffer, then we got the wrong
- * buffer. Release it and try again.
+ * buffer. Release it and try again.
*/
rootpage = BufferGetPage(rootbuf);
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
- if (! P_ISROOT(rootopaque))
+ if (!P_ISROOT(rootopaque))
{
+
/*
- * It happened, but if root page splitter failed to create
- * new root page then we'll go in loop trying to call
- * _bt_getroot again and again.
+ * It happened, but if root page splitter failed to create new
+ * root page then we'll go in loop trying to call _bt_getroot
+ * again and again.
*/
if (FixBTree)
{
- Buffer newrootbuf;
+ Buffer newrootbuf;
-check_parent:;
- if (BTreeInvalidParent(rootopaque)) /* unupdated! */
+ check_parent:;
+ if (BTreeInvalidParent(rootopaque)) /* unupdated! */
{
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_WRITE);
/* handle concurrent fix of root page */
- if (BTreeInvalidParent(rootopaque)) /* unupdated! */
+ if (BTreeInvalidParent(rootopaque)) /* unupdated! */
{
elog(NOTICE, "bt_getroot[%s]: fixing root page", RelationGetRelationName(rel));
newrootbuf = _bt_fixroot(rel, rootbuf, true);
@@ -266,20 +268,22 @@ check_parent:;
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
/* New root might be splitted while changing lock */
if (P_ISROOT(rootopaque))
- return(rootbuf);
+ return (rootbuf);
/* rootbuf is read locked */
goto check_parent;
}
- else /* someone else already fixed root */
+ else
+/* someone else already fixed root */
{
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_READ);
}
}
+
/*
- * Ok, here we have old root page with btpo_parent pointing
- * to upper level - check parent page because of there is
- * good chance that parent is root page.
+ * Ok, here we have old root page with btpo_parent pointing to
+ * upper level - check parent page because of there is good
+ * chance that parent is root page.
*/
newrootbuf = _bt_getbuf(rel, rootopaque->btpo_parent, BT_READ);
_bt_relbuf(rel, rootbuf, BT_READ);
@@ -287,7 +291,7 @@ check_parent:;
rootpage = BufferGetPage(rootbuf);
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
if (P_ISROOT(rootopaque))
- return(rootbuf);
+ return (rootbuf);
/* no luck -:( */
}
@@ -366,7 +370,7 @@ _bt_relbuf(Relation rel, Buffer buf, int access)
* and a pin on the buffer.
*
* NOTE: actually, the buffer manager just marks the shared buffer page
- * dirty here, the real I/O happens later. Since we can't persuade the
+ * dirty here, the real I/O happens later. Since we can't persuade the
* Unix kernel to schedule disk writes in a particular order, there's not
* much point in worrying about this. The most we can say is that all the
* writes will occur before commit.
@@ -468,14 +472,14 @@ _bt_pagedel(Relation rel, ItemPointer tid)
PageIndexTupleDelete(page, offno);
/* XLOG stuff */
{
- xl_btree_delete xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
+ xl_btree_delete xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
xlrec.target.node = rel->rd_node;
xlrec.target.tid = *tid;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeDelete;
rdata[0].next = &(rdata[1]);
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index f02dfcbd128..97d99da4fde 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.78 2001/02/07 23:35:33 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.79 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,7 +30,8 @@
bool BuildingBtree = false; /* see comment in btbuild() */
bool FastBuild = true; /* use sort/build instead */
- /* of insertion build */
+
+ /* of insertion build */
/*
@@ -52,12 +53,14 @@ static void _bt_restscan(IndexScanDesc scan);
Datum
btbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -69,9 +72,11 @@ btbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@@ -79,15 +84,16 @@ btbuild(PG_FUNCTION_ARGS)
BTItem btitem;
bool usefast;
Snapshot snapshot;
- TransactionId XmaxRecent;
+ TransactionId XmaxRecent;
+
/*
- * spool2 is needed only when the index is an unique index.
- * Dead tuples are put into spool2 instead of spool in
- * order to avoid uniqueness check.
+ * spool2 is needed only when the index is an unique index. Dead
+ * tuples are put into spool2 instead of spool in order to avoid
+ * uniqueness check.
*/
- BTSpool *spool2 = NULL;
+ BTSpool *spool2 = NULL;
bool tupleIsAlive;
- int dead_count;
+ int dead_count;
/* note that this is a new btree */
BuildingBtree = true;
@@ -103,7 +109,7 @@ btbuild(PG_FUNCTION_ARGS)
#ifdef BTREE_BUILD_STATS
if (Show_btree_build_stats)
ResetUsage();
-#endif /* BTREE_BUILD_STATS */
+#endif /* BTREE_BUILD_STATS */
/* initialize the btree index metadata page (if this is a new index) */
if (oldPred == NULL)
@@ -155,10 +161,10 @@ btbuild(PG_FUNCTION_ARGS)
if (usefast)
{
spool = _bt_spoolinit(index, indexInfo->ii_Unique);
+
/*
- * Different from spool,the uniqueness isn't checked
- * for spool2.
- */
+ * Different from spool,the uniqueness isn't checked for spool2.
+ */
if (indexInfo->ii_Unique)
spool2 = _bt_spoolinit(index, false);
}
@@ -187,12 +193,13 @@ btbuild(PG_FUNCTION_ARGS)
}
else
tupleIsAlive = true;
-
+
MemoryContextReset(econtext->ecxt_per_tuple_memory);
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -253,8 +260,7 @@ btbuild(PG_FUNCTION_ARGS)
* btree pages - NULLs greater NOT_NULLs and NULL = NULL is TRUE.
* Sure, it's just rule for placing/finding items and no more -
* keytest'll return FALSE for a = 5 for items having 'a' isNULL.
- * Look at _bt_compare for how it works.
- * - vadim 03/23/97
+ * Look at _bt_compare for how it works. - vadim 03/23/97
*
* if (itup->t_info & INDEX_NULL_MASK) { pfree(itup); continue; }
*/
@@ -271,7 +277,8 @@ btbuild(PG_FUNCTION_ARGS)
{
if (tupleIsAlive || !spool2)
_bt_spool(btitem, spool);
- else /* dead tuples are put into spool2 */
+ else
+/* dead tuples are put into spool2 */
{
dead_count++;
_bt_spool(btitem, spool2);
@@ -288,7 +295,7 @@ btbuild(PG_FUNCTION_ARGS)
/* okay, all heap tuples are indexed */
heap_endscan(hscan);
- if (spool2 && !dead_count) /* spool2 was found to be unnecessary */
+ if (spool2 && !dead_count) /* spool2 was found to be unnecessary */
{
_bt_spooldestroy(spool2);
spool2 = NULL;
@@ -296,9 +303,7 @@ btbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -322,7 +327,7 @@ btbuild(PG_FUNCTION_ARGS)
ShowUsage();
ResetUsage();
}
-#endif /* BTREE_BUILD_STATS */
+#endif /* BTREE_BUILD_STATS */
/*
* Since we just counted the tuples in the heap, we update its stats
@@ -368,11 +373,11 @@ btbuild(PG_FUNCTION_ARGS)
Datum
btinsert(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
InsertIndexResult res;
BTItem btitem;
IndexTuple itup;
@@ -396,8 +401,8 @@ btinsert(PG_FUNCTION_ARGS)
Datum
btgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/*
@@ -408,10 +413,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (ItemPointerIsValid(&(scan->currentItemData)))
{
+
/*
* Restore scan position using heap TID returned by previous call
- * to btgettuple(). _bt_restscan() re-grabs the read lock on
- * the buffer, too.
+ * to btgettuple(). _bt_restscan() re-grabs the read lock on the
+ * buffer, too.
*/
_bt_restscan(scan);
res = _bt_next(scan, dir);
@@ -421,8 +427,8 @@ btgettuple(PG_FUNCTION_ARGS)
/*
* Save heap TID to use it in _bt_restscan. Then release the read
- * lock on the buffer so that we aren't blocking other backends.
- * NOTE: we do keep the pin on the buffer!
+ * lock on the buffer so that we aren't blocking other backends. NOTE:
+ * we do keep the pin on the buffer!
*/
if (res)
{
@@ -461,11 +467,13 @@ btbeginscan(PG_FUNCTION_ARGS)
Datum
btrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
- bool fromEnd = PG_GETARG_BOOL(1);
+ bool fromEnd = PG_GETARG_BOOL(1);
+
#endif
- ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
+ ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;
BTScanOpaque so;
@@ -540,7 +548,7 @@ btmovescan(IndexScanDesc scan, Datum v)
Datum
btendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
BTScanOpaque so;
@@ -578,7 +586,7 @@ btendscan(PG_FUNCTION_ARGS)
Datum
btmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
BTScanOpaque so;
@@ -610,7 +618,7 @@ btmarkpos(PG_FUNCTION_ARGS)
Datum
btrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
BTScanOpaque so;
@@ -640,8 +648,8 @@ btrestrpos(PG_FUNCTION_ARGS)
Datum
btdelete(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
/* adjust any active scans that will be affected by this deletion */
_bt_adjscans(rel, tid);
@@ -671,8 +679,8 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno;
/*
- * Get back the read lock we were holding on the buffer.
- * (We still have a reference-count pin on it, though.)
+ * Get back the read lock we were holding on the buffer. (We still
+ * have a reference-count pin on it, though.)
*/
LockBuffer(buf, BT_READ);
@@ -689,13 +697,13 @@ _bt_restscan(IndexScanDesc scan)
if (!ItemPointerIsValid(&target))
{
ItemPointerSetOffsetNumber(current,
- OffsetNumberPrev(P_FIRSTDATAKEY(opaque)));
+ OffsetNumberPrev(P_FIRSTDATAKEY(opaque)));
return;
}
/*
- * The item we were on may have moved right due to insertions.
- * Find it again.
+ * The item we were on may have moved right due to insertions. Find it
+ * again.
*/
for (;;)
{
@@ -717,7 +725,8 @@ _bt_restscan(IndexScanDesc scan)
}
/*
- * By here, the item we're looking for moved right at least one page
+ * By here, the item we're looking for moved right at least one
+ * page
*/
if (P_RIGHTMOST(opaque))
elog(FATAL, "_bt_restscan: my bits moved right off the end of the world!"
@@ -742,14 +751,14 @@ _bt_restore_page(Page page, char *from, int len)
Size itemsz;
char *end = from + len;
- for ( ; from < end; )
+ for (; from < end;)
{
memcpy(&btdata, from, sizeof(BTItemData));
itemsz = IndexTupleDSize(btdata.bti_itup) +
- (sizeof(BTItemData) - sizeof(IndexTupleData));
+ (sizeof(BTItemData) - sizeof(IndexTupleData));
itemsz = MAXALIGN(itemsz);
if (PageAddItem(page, (Item) from, itemsz,
- FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
+ FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(STOP, "_bt_restore_page: can't add item to page");
from += itemsz;
}
@@ -758,20 +767,20 @@ _bt_restore_page(Page page, char *from, int len)
static void
btree_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_delete *xlrec;
- Relation reln;
- Buffer buffer;
- Page page;
+ xl_btree_delete *xlrec;
+ Relation reln;
+ Buffer buffer;
+ Page page;
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
return;
- xlrec = (xl_btree_delete*) XLogRecGetData(record);
+ xlrec = (xl_btree_delete *) XLogRecGetData(record);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ buffer = XLogReadBuffer(false, reln,
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "btree_delete_redo: block unfound");
page = (Page) BufferGetPage(buffer);
@@ -796,21 +805,21 @@ btree_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_insert *xlrec;
- Relation reln;
- Buffer buffer;
- Page page;
- BTPageOpaque pageop;
+ xl_btree_insert *xlrec;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ BTPageOpaque pageop;
if (redo && (record->xl_info & XLR_BKP_BLOCK_1))
return;
- xlrec = (xl_btree_insert*) XLogRecGetData(record);
+ xlrec = (xl_btree_insert *) XLogRecGetData(record);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ buffer = XLogReadBuffer(false, reln,
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "btree_insert_%sdo: block unfound", (redo) ? "re" : "un");
page = (Page) BufferGetPage(buffer);
@@ -825,11 +834,11 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
UnlockAndReleaseBuffer(buffer);
return;
}
- if (PageAddItem(page, (Item)((char*)xlrec + SizeOfBtreeInsert),
- record->xl_len - SizeOfBtreeInsert,
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
- LP_USED) == InvalidOffsetNumber)
- elog(STOP, "btree_insert_redo: failed to add item");
+ if (PageAddItem(page, (Item) ((char *) xlrec + SizeOfBtreeInsert),
+ record->xl_len - SizeOfBtreeInsert,
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ LP_USED) == InvalidOffsetNumber)
+ elog(STOP, "btree_insert_redo: failed to add item");
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
@@ -840,7 +849,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_insert_undo: bad page LSN");
- if (! P_ISLEAF(pageop))
+ if (!P_ISLEAF(pageop))
{
UnlockAndReleaseBuffer(buffer);
return;
@@ -855,14 +864,14 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_split *xlrec = (xl_btree_split*) XLogRecGetData(record);
- Relation reln;
- BlockNumber blkno;
- Buffer buffer;
- Page page;
- BTPageOpaque pageop;
- char *op = (redo) ? "redo" : "undo";
- bool isleaf = (record->xl_info & XLOG_BTREE_LEAF);
+ xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record);
+ Relation reln;
+ BlockNumber blkno;
+ Buffer buffer;
+ Page page;
+ BTPageOpaque pageop;
+ char *op = (redo) ? "redo" : "undo";
+ bool isleaf = (record->xl_info & XLOG_BTREE_LEAF);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln))
@@ -870,7 +879,7 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
/* Left (original) sibling */
blkno = (onleft) ? ItemPointerGetBlockNumber(&(xlrec->target.tid)) :
- BlockIdGetBlockNumber(&(xlrec->otherblk));
+ BlockIdGetBlockNumber(&(xlrec->otherblk));
buffer = XLogReadBuffer(false, reln, blkno);
if (!BufferIsValid(buffer))
elog(STOP, "btree_split_%s: lost left sibling", op);
@@ -892,13 +901,14 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
pageop->btpo_next = ItemPointerGetBlockNumber(&(xlrec->target.tid));
pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0;
- _bt_restore_page(page, (char*)xlrec + SizeOfBtreeSplit, xlrec->leftlen);
+ _bt_restore_page(page, (char *) xlrec + SizeOfBtreeSplit, xlrec->leftlen);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
}
- else /* undo */
+ else
+/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad left sibling LSN");
@@ -906,8 +916,8 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
}
/* Right (new) sibling */
- blkno = (onleft) ? BlockIdGetBlockNumber(&(xlrec->otherblk)) :
- ItemPointerGetBlockNumber(&(xlrec->target.tid));
+ blkno = (onleft) ? BlockIdGetBlockNumber(&(xlrec->otherblk)) :
+ ItemPointerGetBlockNumber(&(xlrec->target.tid));
buffer = XLogReadBuffer((redo) ? true : false, reln, blkno);
if (!BufferIsValid(buffer))
elog(STOP, "btree_split_%s: lost right sibling", op);
@@ -922,21 +932,22 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
if (redo)
{
pageop->btpo_parent = BlockIdGetBlockNumber(&(xlrec->parentblk));
- pageop->btpo_prev = (onleft) ?
- ItemPointerGetBlockNumber(&(xlrec->target.tid)) :
- BlockIdGetBlockNumber(&(xlrec->otherblk));
+ pageop->btpo_prev = (onleft) ?
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)) :
+ BlockIdGetBlockNumber(&(xlrec->otherblk));
pageop->btpo_next = BlockIdGetBlockNumber(&(xlrec->rightblk));
pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0;
_bt_restore_page(page,
- (char*)xlrec + SizeOfBtreeSplit + xlrec->leftlen,
- record->xl_len - SizeOfBtreeSplit - xlrec->leftlen);
+ (char *) xlrec + SizeOfBtreeSplit + xlrec->leftlen,
+ record->xl_len - SizeOfBtreeSplit - xlrec->leftlen);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
}
- else /* undo */
+ else
+/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad right sibling LSN");
@@ -965,9 +976,9 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
return;
}
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
- pageop->btpo_prev = (onleft) ?
- BlockIdGetBlockNumber(&(xlrec->otherblk)) :
- ItemPointerGetBlockNumber(&(xlrec->target.tid));
+ pageop->btpo_prev = (onleft) ?
+ BlockIdGetBlockNumber(&(xlrec->otherblk)) :
+ ItemPointerGetBlockNumber(&(xlrec->target.tid));
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
@@ -977,14 +988,14 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
static void
btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_newroot *xlrec = (xl_btree_newroot*) XLogRecGetData(record);
- Relation reln;
- Buffer buffer;
- Page page;
- BTPageOpaque pageop;
- Buffer metabuf;
- Page metapg;
- BTMetaPageData md;
+ xl_btree_newroot *xlrec = (xl_btree_newroot *) XLogRecGetData(record);
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ BTPageOpaque pageop;
+ Buffer metabuf;
+ Page metapg;
+ BTMetaPageData md;
if (!redo)
return;
@@ -1011,8 +1022,8 @@ btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (record->xl_len > SizeOfBtreeNewroot)
_bt_restore_page(page,
- (char*)xlrec + SizeOfBtreeNewroot,
- record->xl_len - SizeOfBtreeNewroot);
+ (char *) xlrec + SizeOfBtreeNewroot,
+ record->xl_len - SizeOfBtreeNewroot);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
@@ -1037,7 +1048,7 @@ btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
void
btree_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= ~XLOG_BTREE_LEAF;
if (info == XLOG_BTREE_DELETE)
@@ -1045,9 +1056,9 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
else if (info == XLOG_BTREE_INSERT)
btree_xlog_insert(true, lsn, record);
else if (info == XLOG_BTREE_SPLIT)
- btree_xlog_split(true, false, lsn, record); /* new item on the right */
+ btree_xlog_split(true, false, lsn, record); /* new item on the right */
else if (info == XLOG_BTREE_SPLEFT)
- btree_xlog_split(true, true, lsn, record); /* new item on the left */
+ btree_xlog_split(true, true, lsn, record); /* new item on the left */
else if (info == XLOG_BTREE_NEWROOT)
btree_xlog_newroot(true, lsn, record);
else
@@ -1057,7 +1068,7 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
void
btree_undo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= ~XLOG_BTREE_LEAF;
if (info == XLOG_BTREE_DELETE)
@@ -1065,9 +1076,9 @@ btree_undo(XLogRecPtr lsn, XLogRecord *record)
else if (info == XLOG_BTREE_INSERT)
btree_xlog_insert(false, lsn, record);
else if (info == XLOG_BTREE_SPLIT)
- btree_xlog_split(false, false, lsn, record);/* new item on the right */
+ btree_xlog_split(false, false, lsn, record); /* new item on the right */
else if (info == XLOG_BTREE_SPLEFT)
- btree_xlog_split(false, true, lsn, record); /* new item on the left */
+ btree_xlog_split(false, true, lsn, record); /* new item on the left */
else if (info == XLOG_BTREE_NEWROOT)
btree_xlog_newroot(false, lsn, record);
else
@@ -1078,45 +1089,49 @@ static void
out_target(char *buf, xl_btreetid *target)
{
sprintf(buf + strlen(buf), "node %u/%u; tid %u/%u",
- target->node.tblNode, target->node.relNode,
- ItemPointerGetBlockNumber(&(target->tid)),
- ItemPointerGetOffsetNumber(&(target->tid)));
+ target->node.tblNode, target->node.relNode,
+ ItemPointerGetBlockNumber(&(target->tid)),
+ ItemPointerGetOffsetNumber(&(target->tid)));
}
-
+
void
-btree_desc(char *buf, uint8 xl_info, char* rec)
+btree_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
info &= ~XLOG_BTREE_LEAF;
if (info == XLOG_BTREE_INSERT)
{
- xl_btree_insert *xlrec = (xl_btree_insert*) rec;
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+
strcat(buf, "insert: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_BTREE_DELETE)
{
- xl_btree_delete *xlrec = (xl_btree_delete*) rec;
+ xl_btree_delete *xlrec = (xl_btree_delete *) rec;
+
strcat(buf, "delete: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_BTREE_SPLIT || info == XLOG_BTREE_SPLEFT)
{
- xl_btree_split *xlrec = (xl_btree_split*) rec;
- sprintf(buf + strlen(buf), "split(%s): ",
- (info == XLOG_BTREE_SPLIT) ? "right" : "left");
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
+
+ sprintf(buf + strlen(buf), "split(%s): ",
+ (info == XLOG_BTREE_SPLIT) ? "right" : "left");
out_target(buf, &(xlrec->target));
sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- BlockIdGetBlockNumber(&xlrec->otherblk),
- BlockIdGetBlockNumber(&xlrec->rightblk));
+ BlockIdGetBlockNumber(&xlrec->otherblk),
+ BlockIdGetBlockNumber(&xlrec->rightblk));
}
else if (info == XLOG_BTREE_NEWROOT)
{
- xl_btree_newroot *xlrec = (xl_btree_newroot*) rec;
+ xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
+
sprintf(buf + strlen(buf), "root: node %u/%u; blk %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- BlockIdGetBlockNumber(&xlrec->rootblk));
+ xlrec->node.tblNode, xlrec->node.relNode,
+ BlockIdGetBlockNumber(&xlrec->rootblk));
}
else
strcat(buf, "UNKNOWN");
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 6f41ab9c847..d8b8e0682a0 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.63 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.64 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,20 +32,20 @@ static RetrieveIndexResult _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
*
* NOTE that the returned buffer is read-locked regardless of the access
* parameter. However, access = BT_WRITE will allow an empty root page
- * to be created and returned. When access = BT_READ, an empty index
+ * to be created and returned. When access = BT_READ, an empty index
* will result in *bufP being set to InvalidBuffer.
*/
BTStack
_bt_search(Relation rel, int keysz, ScanKey scankey,
Buffer *bufP, int access)
{
- BTStack stack_in = NULL;
+ BTStack stack_in = NULL;
/* Get the root page to start with */
*bufP = _bt_getroot(rel, access);
/* If index is empty and access = BT_READ, no root page is created. */
- if (! BufferIsValid(*bufP))
+ if (!BufferIsValid(*bufP))
return (BTStack) NULL;
/* Loop iterates once per level descended in the tree */
@@ -79,13 +79,13 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
par_blkno = BufferGetBlockNumber(*bufP);
/*
- * We need to save the bit image of the index entry we chose in the
- * parent page on a stack. In case we split the tree, we'll use this
- * bit image to figure out what our real parent page is, in case the
- * parent splits while we're working lower in the tree. See the paper
- * by Lehman and Yao for how this is detected and handled. (We use the
- * child link to disambiguate duplicate keys in the index -- Lehman
- * and Yao disallow duplicate keys.)
+ * We need to save the bit image of the index entry we chose in
+ * the parent page on a stack. In case we split the tree, we'll
+ * use this bit image to figure out what our real parent page is,
+ * in case the parent splits while we're working lower in the
+ * tree. See the paper by Lehman and Yao for how this is detected
+ * and handled. (We use the child link to disambiguate duplicate
+ * keys in the index -- Lehman and Yao disallow duplicate keys.)
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;
@@ -98,9 +98,9 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
*bufP = _bt_getbuf(rel, blkno, BT_READ);
/*
- * Race -- the page we just grabbed may have split since we read its
- * pointer in the parent. If it has, we may need to move right to its
- * new sibling. Do that.
+ * Race -- the page we just grabbed may have split since we read
+ * its pointer in the parent. If it has, we may need to move
+ * right to its new sibling. Do that.
*/
*bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ);
@@ -127,7 +127,7 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
*
* On entry, we have the buffer pinned and a lock of the proper type.
* If we move right, we release the buffer and lock and acquire the
- * same on the right sibling. Return value is the buffer we stop at.
+ * same on the right sibling. Return value is the buffer we stop at.
*/
Buffer
_bt_moveright(Relation rel,
@@ -153,7 +153,7 @@ _bt_moveright(Relation rel,
_bt_compare(rel, keysz, scankey, page, P_HIKEY) > 0)
{
/* step right one page */
- BlockNumber rblkno = opaque->btpo_next;
+ BlockNumber rblkno = opaque->btpo_next;
_bt_relbuf(rel, buf, access);
buf = _bt_getbuf(rel, rblkno, access);
@@ -184,7 +184,7 @@ _bt_moveright(Relation rel,
* find all leaf keys >= given scankey.
*
* This procedure is not responsible for walking right, it just examines
- * the given page. _bt_binsrch() has no lock or refcount side effects
+ * the given page. _bt_binsrch() has no lock or refcount side effects
* on the buffer.
*/
OffsetNumber
@@ -299,7 +299,7 @@ _bt_compare(Relation rel,
* Force result ">" if target item is first data item on an internal
* page --- see NOTE above.
*/
- if (! P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
+ if (!P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
return 1;
btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum));
@@ -327,7 +327,7 @@ _bt_compare(Relation rel,
datum = index_getattr(itup, entry->sk_attno, itupdesc, &isNull);
/* see comments about NULLs handling in btbuild */
- if (entry->sk_flags & SK_ISNULL) /* key is NULL */
+ if (entry->sk_flags & SK_ISNULL) /* key is NULL */
{
if (isNull)
result = 0; /* NULL "=" NULL */
@@ -458,10 +458,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
_bt_orderkeys(rel, so);
/*
- * Quit now if _bt_orderkeys() discovered that the scan keys can
- * never be satisfied (eg, x == 1 AND x > 2).
+ * Quit now if _bt_orderkeys() discovered that the scan keys can never
+ * be satisfied (eg, x == 1 AND x > 2).
*/
- if (! so->qual_ok)
+ if (!so->qual_ok)
return (RetrieveIndexResult) NULL;
/*
@@ -484,17 +484,16 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
strat = _bt_getstrat(rel, attno,
so->keyData[i].sk_procedure);
+
/*
* Can we use this key as a starting boundary for this attr?
*
- * We can use multiple keys if they look like, say, = >= =
- * but we have to stop after accepting a > or < boundary.
+ * We can use multiple keys if they look like, say, = >= = but we
+ * have to stop after accepting a > or < boundary.
*/
if (strat == strat_total ||
strat == BTEqualStrategyNumber)
- {
nKeyIs[keysCount++] = i;
- }
else if (ScanDirectionIsBackward(dir) &&
(strat == BTLessStrategyNumber ||
strat == BTLessEqualStrategyNumber))
@@ -536,7 +535,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
for (i = 0; i < keysCount; i++)
{
j = nKeyIs[i];
- /* _bt_orderkeys disallows it, but it's place to add some code later */
+
+ /*
+ * _bt_orderkeys disallows it, but it's place to add some code
+ * later
+ */
if (so->keyData[j].sk_flags & SK_ISNULL)
{
pfree(nKeyIs);
@@ -562,7 +565,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/* don't need to keep the stack around... */
_bt_freestack(stack);
- if (! BufferIsValid(buf))
+ if (!BufferIsValid(buf))
{
/* Only get here if index is completely empty */
ItemPointerSetInvalid(current);
@@ -601,6 +604,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
switch (strat_total)
{
case BTLessStrategyNumber:
+
/*
* Back up one to arrive at last item < scankey
*/
@@ -612,6 +616,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTLessEqualStrategyNumber:
+
/*
* We need to find the last item <= scankey, so step forward
* till we find one > scankey, then step back one.
@@ -645,9 +650,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTEqualStrategyNumber:
+
/*
- * Make sure we are on the first equal item; might have to step
- * forward if currently at end of page.
+ * Make sure we are on the first equal item; might have to
+ * step forward if currently at end of page.
*/
if (offnum > PageGetMaxOffsetNumber(page))
{
@@ -661,7 +667,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
}
result = _bt_compare(rel, keysCount, scankeys, page, offnum);
if (result != 0)
- goto nomatches; /* no equal items! */
+ goto nomatches; /* no equal items! */
+
/*
* If a backward scan was specified, need to start with last
* equal item not first one.
@@ -685,6 +692,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTGreaterEqualStrategyNumber:
+
/*
* We want the first item >= scankey, which is where we are...
* unless we're not anywhere at all...
@@ -700,9 +708,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTGreaterStrategyNumber:
+
/*
- * We want the first item > scankey, so make sure we are on
- * an item and then step over any equal items.
+ * We want the first item > scankey, so make sure we are on an
+ * item and then step over any equal items.
*/
if (offnum > PageGetMaxOffsetNumber(page))
{
@@ -850,11 +859,12 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
*bufP = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(*bufP);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
* If the adjacent page just split, then we have to walk
- * right to find the block that's now adjacent to where
- * we were. Because pages only split right, we don't have
- * to worry about this failing to terminate.
+ * right to find the block that's now adjacent to where we
+ * were. Because pages only split right, we don't have to
+ * worry about this failing to terminate.
*/
while (opaque->btpo_next != obknum)
{
@@ -912,12 +922,12 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
/*
* Scan down to the leftmost or rightmost leaf page. This is a
- * simplified version of _bt_search(). We don't maintain a stack
+ * simplified version of _bt_search(). We don't maintain a stack
* since we know we won't need it.
*/
buf = _bt_getroot(rel, BT_READ);
- if (! BufferIsValid(buf))
+ if (!BufferIsValid(buf))
{
/* empty index... */
ItemPointerSetInvalid(current);
@@ -981,7 +991,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
Assert(P_RIGHTMOST(opaque));
start = PageGetMaxOffsetNumber(page);
- if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty page */
+ if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty
+ * page */
start = P_FIRSTDATAKEY(opaque);
}
else
@@ -995,8 +1006,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
so->btso_curbuf = buf;
/*
- * Left/rightmost page could be empty due to deletions,
- * if so step till we find a nonempty page.
+ * Left/rightmost page could be empty due to deletions, if so step
+ * till we find a nonempty page.
*/
if (start > maxoff)
{
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index e9224a485af..2aca6bf7cfc 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -6,7 +6,7 @@
*
* We use tuplesort.c to sort the given index tuples into order.
* Then we scan the index tuples in order and build the btree pages
- * for each level. We load source tuples into leaf-level pages.
+ * for each level. We load source tuples into leaf-level pages.
* Whenever we fill a page at one level, we add a link to it to its
* parent level (starting a new parent level if necessary). When
* done, we write out each final page on each level, adding it to
@@ -35,7 +35,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.59 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.60 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,7 +57,7 @@ struct BTSpool
};
/*
- * Status record for a btree page being built. We have one of these
+ * Status record for a btree page being built. We have one of these
* for each active tree level.
*
* The reason we need to store a copy of the minimum key is that we'll
@@ -73,11 +73,13 @@ typedef struct BTPageState
{
Buffer btps_buf; /* current buffer & page */
Page btps_page;
- BTItem btps_minkey; /* copy of minimum key (first item) on page */
+ BTItem btps_minkey; /* copy of minimum key (first item) on
+ * page */
OffsetNumber btps_lastoff; /* last item offset loaded */
int btps_level; /* tree level (0 = leaf) */
- Size btps_full; /* "full" if less than this much free space */
- struct BTPageState *btps_next; /* link to parent level, if any */
+ Size btps_full; /* "full" if less than this much free
+ * space */
+ struct BTPageState *btps_next; /* link to parent level, if any */
} BTPageState;
@@ -92,7 +94,7 @@ static void _bt_blnewpage(Relation index, Buffer *buf, Page *page, int flags);
static BTPageState *_bt_pagestate(Relation index, int flags, int level);
static void _bt_slideleft(Relation index, Buffer buf, Page page);
static void _bt_sortaddtup(Page page, Size itemsize,
- BTItem btitem, OffsetNumber itup_off);
+ BTItem btitem, OffsetNumber itup_off);
static void _bt_buildadd(Relation index, BTPageState *state, BTItem bti);
static void _bt_uppershutdown(Relation index, BTPageState *state);
static void _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2);
@@ -162,7 +164,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
ShowUsage();
ResetUsage();
}
-#endif /* BTREE_BUILD_STATS */
+#endif /* BTREE_BUILD_STATS */
tuplesort_performsort(btspool->sortstate);
if (btspool2)
@@ -269,9 +271,9 @@ _bt_sortaddtup(Page page,
OffsetNumber itup_off)
{
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- BTItemData truncitem;
+ BTItemData truncitem;
- if (! P_ISLEAF(opaque) && itup_off == P_FIRSTKEY)
+ if (!P_ISLEAF(opaque) && itup_off == P_FIRSTKEY)
{
memcpy(&truncitem, btitem, sizeof(BTItemData));
truncitem.bti_itup.t_info = sizeof(BTItemData);
@@ -290,7 +292,7 @@ _bt_sortaddtup(Page page,
* We must be careful to observe the page layout conventions of nbtsearch.c:
* - rightmost pages start data items at P_HIKEY instead of at P_FIRSTKEY.
* - on non-leaf pages, the key portion of the first item need not be
- * stored, we should store only the link.
+ * stored, we should store only the link.
*
* A leaf page being built looks like:
*
@@ -347,11 +349,12 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
*/
if (btisz > (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %lu exceeds maximum %ld",
- (unsigned long)btisz,
- (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
+ (unsigned long) btisz,
+ (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
if (pgspc < btisz || pgspc < state->btps_full)
{
+
/*
* Item won't fit on this page, or we feel the page is full enough
* already. Finish off the page and write it out.
@@ -388,9 +391,9 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
/*
- * Link the old buffer into its parent, using its minimum key.
- * If we don't have a parent, we have to create one;
- * this adds a new btree level.
+ * Link the old buffer into its parent, using its minimum key. If
+ * we don't have a parent, we have to create one; this adds a new
+ * btree level.
*/
if (state->btps_next == (BTPageState *) NULL)
{
@@ -405,8 +408,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/*
* Save a copy of the minimum key for the new page. We have to
- * copy it off the old page, not the new one, in case we are
- * not at leaf level.
+ * copy it off the old page, not the new one, in case we are not
+ * at leaf level.
*/
state->btps_minkey = _bt_formitem(&(obti->bti_itup));
@@ -414,13 +417,13 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
* Set the sibling links for both pages, and parent links too.
*
* It's not necessary to set the parent link at all, because it's
- * only used for handling concurrent root splits, but we may as well
- * do it as a debugging aid. Note we set new page's link as well
- * as old's, because if the new page turns out to be the last of
- * the level, _bt_uppershutdown won't change it. The links may be
- * out of date by the time the build finishes, but that's OK; they
- * need only point to a left-sibling of the true parent. See the
- * README file for more info.
+ * only used for handling concurrent root splits, but we may as
+ * well do it as a debugging aid. Note we set new page's link as
+ * well as old's, because if the new page turns out to be the last
+ * of the level, _bt_uppershutdown won't change it. The links may
+ * be out of date by the time the build finishes, but that's OK;
+ * they need only point to a left-sibling of the true parent. See
+ * the README file for more info.
*/
{
BTPageOpaque oopaque = (BTPageOpaque) PageGetSpecialPointer(opage);
@@ -434,7 +437,7 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
}
/*
- * Write out the old page. We never want to see it again, so we
+ * Write out the old page. We never want to see it again, so we
* can give up our lock (if we had one; most likely BuildingBtree
* is set, so we aren't locking).
*/
@@ -449,8 +452,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/*
* If the new item is the first for its page, stash a copy for later.
* Note this will only happen for the first item on a level; on later
- * pages, the first item for a page is copied from the prior page
- * in the code above.
+ * pages, the first item for a page is copied from the prior page in
+ * the code above.
*/
if (last_off == P_HIKEY)
{
@@ -493,8 +496,8 @@ _bt_uppershutdown(Relation index, BTPageState *state)
*
* If we're at the top, it's the root, so attach it to the metapage.
* Otherwise, add an entry for it to its parent using its minimum
- * key. This may cause the last page of the parent level to split,
- * but that's not a problem -- we haven't gotten to it yet.
+ * key. This may cause the last page of the parent level to
+ * split, but that's not a problem -- we haven't gotten to it yet.
*/
if (s->btps_next == (BTPageState *) NULL)
{
@@ -513,7 +516,7 @@ _bt_uppershutdown(Relation index, BTPageState *state)
/*
* This is the rightmost page, so the ItemId array needs to be
- * slid back one slot. Then we can dump out the page.
+ * slid back one slot. Then we can dump out the page.
*/
_bt_slideleft(index, s->btps_buf, s->btps_page);
_bt_wrtbuf(index, s->btps_buf);
@@ -529,22 +532,29 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
{
BTPageState *state = NULL;
bool merge = (btspool2 != NULL);
- BTItem bti, bti2 = NULL;
- bool should_free, should_free2, load1;
+ BTItem bti,
+ bti2 = NULL;
+ bool should_free,
+ should_free2,
+ load1;
TupleDesc tupdes = RelationGetDescr(index);
- int i, keysz = RelationGetNumberOfAttributes(index);
+ int i,
+ keysz = RelationGetNumberOfAttributes(index);
ScanKey indexScanKey = NULL;
if (merge)
{
+
/*
- * Another BTSpool for dead tuples exists.
- * Now we have to merge btspool and btspool2.
- */
- ScanKey entry;
- Datum attrDatum1, attrDatum2;
- bool isFirstNull, isSecondNull;
- int32 compare;
+ * Another BTSpool for dead tuples exists. Now we have to merge
+ * btspool and btspool2.
+ */
+ ScanKey entry;
+ Datum attrDatum1,
+ attrDatum2;
+ bool isFirstNull,
+ isSecondNull;
+ int32 compare;
/* the preparation of merge */
bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, true, &should_free);
@@ -552,7 +562,7 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
indexScanKey = _bt_mkscankey_nodata(index);
for (;;)
{
- load1 = true; /* load BTSpool next ? */
+ load1 = true; /* load BTSpool next ? */
if (NULL == bti2)
{
if (NULL == bti)
@@ -564,8 +574,8 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
for (i = 1; i <= keysz; i++)
{
entry = indexScanKey + i - 1;
- attrDatum1 = index_getattr((IndexTuple)bti, i, tupdes, &isFirstNull);
- attrDatum2 = index_getattr((IndexTuple)bti2, i, tupdes, &isSecondNull);
+ attrDatum1 = index_getattr((IndexTuple) bti, i, tupdes, &isFirstNull);
+ attrDatum2 = index_getattr((IndexTuple) bti2, i, tupdes, &isSecondNull);
if (isFirstNull)
{
if (!isSecondNull)
@@ -586,7 +596,7 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
}
else if (compare < 0)
break;
- }
+ }
}
}
else
@@ -613,7 +623,8 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
}
_bt_freeskey(indexScanKey);
}
- else /* merge is unnecessary */
+ else
+/* merge is unnecessary */
{
while (bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, true, &should_free), bti != (BTItem) NULL)
{
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 507205f2be7..2a37147d68e 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.42 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.43 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -124,7 +124,7 @@ _bt_freestack(BTStack stack)
* Construct a BTItem from a plain IndexTuple.
*
* This is now useless code, since a BTItem *is* an index tuple with
- * no extra stuff. We hang onto it for the moment to preserve the
+ * no extra stuff. We hang onto it for the moment to preserve the
* notational distinction, in case we want to add some extra stuff
* again someday.
*/
@@ -165,7 +165,7 @@ _bt_formitem(IndexTuple itup)
* are "x = 1 AND y < 4 AND z < 5", then _bt_checkkeys will reject a tuple
* (1,2,7), but we must continue the scan in case there are tuples (1,3,z).
* But once we reach tuples like (1,4,z) we can stop scanning because no
- * later tuples could match. This is reflected by setting
+ * later tuples could match. This is reflected by setting
* so->numberOfRequiredKeys to the number of leading keys that must be
* matched to continue the scan. numberOfRequiredKeys is equal to the
* number of leading "=" keys plus the key(s) for the first non "="
@@ -178,7 +178,7 @@ _bt_formitem(IndexTuple itup)
*
* XXX this routine is one of many places that fail to handle SK_COMMUTE
* scankeys properly. Currently, the planner is careful never to generate
- * any indexquals that would require SK_COMMUTE to be set. Someday we ought
+ * any indexquals that would require SK_COMMUTE to be set. Someday we ought
* to try to fix this, though it's not real critical as long as indexable
* operators all have commutators...
*
@@ -191,7 +191,7 @@ _bt_formitem(IndexTuple itup)
void
_bt_orderkeys(Relation relation, BTScanOpaque so)
{
- ScanKeyData xform[BTMaxStrategyNumber];
+ ScanKeyData xform[BTMaxStrategyNumber];
bool init[BTMaxStrategyNumber];
uint16 numberOfKeys = so->numberOfKeys;
ScanKey key;
@@ -240,14 +240,14 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
/*
* Initialize for processing of keys for attr 1.
*
- * xform[i] holds a copy of the current scan key of strategy type i+1,
- * if any; init[i] is TRUE if we have found such a key for this attr.
+ * xform[i] holds a copy of the current scan key of strategy type i+1, if
+ * any; init[i] is TRUE if we have found such a key for this attr.
*/
attno = 1;
map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation),
BTMaxStrategyNumber,
attno);
- MemSet(xform, 0, sizeof(xform)); /* not really necessary */
+ MemSet(xform, 0, sizeof(xform)); /* not really necessary */
MemSet(init, 0, sizeof(init));
/*
@@ -255,7 +255,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
* pass to handle after-last-key processing. Actual exit from the
* loop is at the "break" statement below.
*/
- for (i = 0; ; cur++, i++)
+ for (i = 0;; cur++, i++)
{
if (i < numberOfKeys)
{
@@ -263,7 +263,9 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (cur->sk_flags & SK_ISNULL)
{
so->qual_ok = false;
- /* Quit processing so we don't try to invoke comparison
+
+ /*
+ * Quit processing so we don't try to invoke comparison
* routines on NULLs.
*/
return;
@@ -271,8 +273,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
}
/*
- * If we are at the end of the keys for a particular attr,
- * finish up processing and emit the cleaned-up keys.
+ * If we are at the end of the keys for a particular attr, finish
+ * up processing and emit the cleaned-up keys.
*/
if (i == numberOfKeys || cur->sk_attno != attno)
{
@@ -296,7 +298,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
eq = &xform[BTEqualStrategyNumber - 1];
for (j = BTMaxStrategyNumber; --j >= 0;)
{
- if (! init[j] ||
+ if (!init[j] ||
j == (BTEqualStrategyNumber - 1))
continue;
chk = &xform[j];
@@ -313,6 +315,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
}
else
{
+
/*
* No "=" for this key, so we're done with required keys
*/
@@ -355,8 +358,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
* Emit the cleaned-up keys back into the key[] array in the
* correct order. Note we are overwriting our input here!
* It's OK because (a) xform[] is a physical copy of the keys
- * we want, (b) we cannot emit more keys than we input, so
- * we won't overwrite as-yet-unprocessed keys.
+ * we want, (b) we cannot emit more keys than we input, so we
+ * won't overwrite as-yet-unprocessed keys.
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
{
@@ -383,7 +386,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation),
BTMaxStrategyNumber,
attno);
- MemSet(xform, 0, sizeof(xform)); /* not really necessary */
+ MemSet(xform, 0, sizeof(xform)); /* not really necessary */
MemSet(init, 0, sizeof(init));
}
@@ -409,7 +412,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (DatumGetBool(test))
xform[j].sk_argument = cur->sk_argument;
else if (j == (BTEqualStrategyNumber - 1))
- so->qual_ok = false; /* key == a && key == b, but a != b */
+ so->qual_ok = false; /* key == a && key == b, but a !=
+ * b */
}
else
{
@@ -473,16 +477,18 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (isNull)
{
+
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual
- * is one of the "must match" subset. On a backward scan,
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if (keysok < so->numberOfRequiredKeys &&
ScanDirectionIsForward(dir))
*continuescan = false;
+
/*
* In any case, this indextuple doesn't match the qual.
*/
@@ -498,9 +504,10 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (DatumGetBool(test) == !!(key->sk_flags & SK_NEGATE))
{
+
/*
- * Tuple fails this qual. If it's a required qual, then
- * we can conclude no further tuples will pass, either.
+ * Tuple fails this qual. If it's a required qual, then we
+ * can conclude no further tuples will pass, either.
*/
if (keysok < so->numberOfRequiredKeys)
*continuescan = false;
diff --git a/src/backend/access/rtree/rtget.c b/src/backend/access/rtree/rtget.c
index df0f5e9c80e..c8fa6b18d68 100644
--- a/src/backend/access/rtree/rtget.c
+++ b/src/backend/access/rtree/rtget.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.24 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.25 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,8 +30,8 @@ static ItemPointer rtheapptr(Relation r, ItemPointer itemp);
Datum
rtgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/* if we have it cached in the scan desc, just return the value */
diff --git a/src/backend/access/rtree/rtproc.c b/src/backend/access/rtree/rtproc.c
index fd610caebe1..400be10ccb3 100644
--- a/src/backend/access/rtree/rtproc.c
+++ b/src/backend/access/rtree/rtproc.c
@@ -6,7 +6,7 @@
* NOTE: for largely-historical reasons, the intersection functions should
* return a NULL pointer (*not* an SQL null value) to indicate "no
* intersection". The size functions must be prepared to accept such
- * a pointer and return 0. This convention means that only pass-by-reference
+ * a pointer and return 0. This convention means that only pass-by-reference
* data types can be used as the output of the union and intersection
* routines, but that's not a big problem.
*
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.31 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.32 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,6 +70,7 @@ Datum
rt_box_size(PG_FUNCTION_ARGS)
{
BOX *a = PG_GETARG_BOX_P(0);
+
/* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1);
@@ -98,8 +99,8 @@ rt_bigbox_size(PG_FUNCTION_ARGS)
Datum
rt_poly_union(PG_FUNCTION_ARGS)
{
- POLYGON *a = PG_GETARG_POLYGON_P(0);
- POLYGON *b = PG_GETARG_POLYGON_P(1);
+ POLYGON *a = PG_GETARG_POLYGON_P(0);
+ POLYGON *b = PG_GETARG_POLYGON_P(1);
POLYGON *p;
p = (POLYGON *) palloc(sizeof(POLYGON));
@@ -122,8 +123,8 @@ rt_poly_union(PG_FUNCTION_ARGS)
Datum
rt_poly_inter(PG_FUNCTION_ARGS)
{
- POLYGON *a = PG_GETARG_POLYGON_P(0);
- POLYGON *b = PG_GETARG_POLYGON_P(1);
+ POLYGON *a = PG_GETARG_POLYGON_P(0);
+ POLYGON *b = PG_GETARG_POLYGON_P(1);
POLYGON *p;
p = (POLYGON *) palloc(sizeof(POLYGON));
@@ -155,13 +156,15 @@ Datum
rt_poly_size(PG_FUNCTION_ARGS)
{
Pointer aptr = PG_GETARG_POINTER(0);
+
/* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1);
- POLYGON *a;
+ POLYGON *a;
double xdim,
ydim;
- /* Can't just use GETARG because of possibility that input is NULL;
+ /*
+ * Can't just use GETARG because of possibility that input is NULL;
* since POLYGON is toastable, GETARG will try to inspect its value
*/
if (aptr == NULL)
diff --git a/src/backend/access/rtree/rtree.c b/src/backend/access/rtree/rtree.c
index 45382d5ef3c..3752a59e99a 100644
--- a/src/backend/access/rtree/rtree.c
+++ b/src/backend/access/rtree/rtree.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.60 2001/03/07 21:20:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.61 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,12 +68,12 @@ static InsertIndexResult rtdoinsert(Relation r, IndexTuple itup,
static void rttighten(Relation r, RTSTACK *stk, Datum datum, int att_size,
RTSTATE *rtstate);
static InsertIndexResult rtdosplit(Relation r, Buffer buffer, RTSTACK *stack,
- IndexTuple itup, RTSTATE *rtstate);
+ IndexTuple itup, RTSTATE *rtstate);
static void rtintinsert(Relation r, RTSTACK *stk, IndexTuple ltup,
IndexTuple rtup, RTSTATE *rtstate);
static void rtnewroot(Relation r, IndexTuple lt, IndexTuple rt);
static void rtpicksplit(Relation r, Page page, SPLITVEC *v, IndexTuple itup,
- RTSTATE *rtstate);
+ RTSTATE *rtstate);
static void RTInitBuffer(Buffer b, uint32 f);
static OffsetNumber choose(Relation r, Page p, IndexTuple it,
RTSTATE *rtstate);
@@ -84,12 +84,14 @@ static void initRtstate(RTSTATE *rtstate, Relation index);
Datum
rtbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -101,9 +103,11 @@ rtbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@@ -171,6 +175,7 @@ rtbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -232,9 +237,7 @@ rtbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -278,12 +281,14 @@ rtbuild(PG_FUNCTION_ARGS)
Datum
rtinsert(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+
#endif
InsertIndexResult res;
IndexTuple itup;
@@ -412,7 +417,7 @@ rttighten(Relation r,
p = BufferGetPage(b);
oldud = IndexTupleGetDatum(PageGetItem(p,
- PageGetItemId(p, stk->rts_child)));
+ PageGetItemId(p, stk->rts_child)));
FunctionCall2(&rtstate->sizeFn, oldud,
PointerGetDatum(&old_size));
@@ -564,7 +569,7 @@ rtdosplit(Relation r,
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
/* now insert the new index tuple */
- if (*spl_left == maxoff+1)
+ if (*spl_left == maxoff + 1)
{
if (PageAddItem(left, (Item) itup, IndexTupleSize(itup),
leftoff, LP_USED) == InvalidOffsetNumber)
@@ -576,7 +581,7 @@ rtdosplit(Relation r,
}
else
{
- Assert(*spl_right == maxoff+1);
+ Assert(*spl_right == maxoff + 1);
if (PageAddItem(right, (Item) itup, IndexTupleSize(itup),
rightoff, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "rtdosplit: failed to add index item to %s",
@@ -665,10 +670,10 @@ rtintinsert(Relation r,
old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child));
/*
- * This is a hack. Right now, we force rtree internal keys to be constant
- * size. To fix this, need delete the old key and add both left and
- * right for the two new pages. The insertion of left may force a
- * split if the new left key is bigger than the old key.
+ * This is a hack. Right now, we force rtree internal keys to be
+ * constant size. To fix this, need delete the old key and add both
+ * left and right for the two new pages. The insertion of left may
+ * force a split if the new left key is bigger than the old key.
*/
if (IndexTupleSize(old) != IndexTupleSize(ltup))
@@ -734,7 +739,7 @@ rtnewroot(Relation r, IndexTuple lt, IndexTuple rt)
* We return two vectors of index item numbers, one for the items to be
* put on the left page, one for the items to be put on the right page.
* In addition, the item to be added (itup) is listed in the appropriate
- * vector. It is represented by item number N+1 (N = # of items on page).
+ * vector. It is represented by item number N+1 (N = # of items on page).
*
* Both vectors appear in sequence order with a terminating sentinel value
* of InvalidOffsetNumber.
@@ -747,9 +752,9 @@ rtnewroot(Relation r, IndexTuple lt, IndexTuple rt)
*
* We must also deal with a consideration not found in Guttman's algorithm:
* variable-length data. In particular, the incoming item might be
- * large enough that not just any split will work. In the worst case,
+ * large enough that not just any split will work. In the worst case,
* our "split" may have to be the new item on one page and all the existing
- * items on the other. Short of that, we have to take care that we do not
+ * items on the other. Short of that, we have to take care that we do not
* make a split that leaves both pages too full for the new item.
*/
static void
@@ -794,9 +799,10 @@ rtpicksplit(Relation r,
right_avail_space;
/*
- * First, make sure the new item is not so large that we can't possibly
- * fit it on a page, even by itself. (It's sufficient to make this test
- * here, since any oversize tuple must lead to a page split attempt.)
+ * First, make sure the new item is not so large that we can't
+ * possibly fit it on a page, even by itself. (It's sufficient to
+ * make this test here, since any oversize tuple must lead to a page
+ * split attempt.)
*/
newitemsz = IndexTupleTotalSize(itup);
if (newitemsz > RTPageAvailSpace)
@@ -804,7 +810,8 @@ rtpicksplit(Relation r,
(unsigned long) newitemsz, (unsigned long) RTPageAvailSpace);
maxoff = PageGetMaxOffsetNumber(page);
- newitemoff = OffsetNumberNext(maxoff); /* phony index for new item */
+ newitemoff = OffsetNumberNext(maxoff); /* phony index for new
+ * item */
/* Make arrays big enough for worst case, including sentinel */
nbytes = (maxoff + 2) * sizeof(OffsetNumber);
@@ -827,8 +834,8 @@ rtpicksplit(Relation r,
item_2_sz = IndexTupleTotalSize(item_2);
/*
- * Ignore seed pairs that don't leave room for the new item
- * on either split page.
+ * Ignore seed pairs that don't leave room for the new item on
+ * either split page.
*/
if (newitemsz + item_1_sz > RTPageAvailSpace &&
newitemsz + item_2_sz > RTPageAvailSpace)
@@ -841,8 +848,10 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_union));
inter_d = FunctionCall2(&rtstate->interFn,
datum_alpha, datum_beta);
- /* The interFn may return a NULL pointer (not an SQL null!)
- * to indicate no intersection. sizeFn must cope with this.
+
+ /*
+ * The interFn may return a NULL pointer (not an SQL null!) to
+ * indicate no intersection. sizeFn must cope with this.
*/
FunctionCall2(&rtstate->sizeFn, inter_d,
PointerGetDatum(&size_inter));
@@ -869,6 +878,7 @@ rtpicksplit(Relation r,
if (firsttime)
{
+
/*
* There is no possible split except to put the new item on its
* own page. Since we still have to compute the union rectangles,
@@ -916,14 +926,14 @@ rtpicksplit(Relation r,
for (i = FirstOffsetNumber; i <= newitemoff; i = OffsetNumberNext(i))
{
- bool left_feasible,
- right_feasible,
- choose_left;
+ bool left_feasible,
+ right_feasible,
+ choose_left;
/*
* If we've already decided where to place this item, just put it
- * on the correct list. Otherwise, we need to figure out which page
- * needs the least enlargement in order to store the item.
+ * on the correct list. Otherwise, we need to figure out which
+ * page needs the least enlargement in order to store the item.
*/
if (i == seed_1)
@@ -961,12 +971,13 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_beta));
/*
- * We prefer the page that shows smaller enlargement of its union area
- * (Guttman's algorithm), but we must take care that at least one page
- * will still have room for the new item after this one is added.
+ * We prefer the page that shows smaller enlargement of its union
+ * area (Guttman's algorithm), but we must take care that at least
+ * one page will still have room for the new item after this one
+ * is added.
*
- * (We know that all the old items together can fit on one page,
- * so we need not worry about any other problem than failing to fit
+ * (We know that all the old items together can fit on one page, so
+ * we need not worry about any other problem than failing to fit
* the new item.)
*/
left_feasible = (left_avail_space >= item_1_sz &&
@@ -987,7 +998,7 @@ rtpicksplit(Relation r,
else
{
elog(ERROR, "rtpicksplit: failed to find a workable page split");
- choose_left = false; /* keep compiler quiet */
+ choose_left = false;/* keep compiler quiet */
}
if (choose_left)
@@ -1012,7 +1023,7 @@ rtpicksplit(Relation r,
}
}
- *left = *right = InvalidOffsetNumber; /* add ending sentinels */
+ *left = *right = InvalidOffsetNumber; /* add ending sentinels */
v->spl_ldatum = datum_l;
v->spl_rdatum = datum_r;
@@ -1096,8 +1107,8 @@ freestack(RTSTACK *s)
Datum
rtdelete(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
BlockNumber blkno;
OffsetNumber offnum;
Buffer buf;
@@ -1203,14 +1214,14 @@ rtree_redo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "rtree_redo: unimplemented");
}
-
+
void
rtree_undo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "rtree_undo: unimplemented");
}
-
+
void
-rtree_desc(char *buf, uint8 xl_info, char* rec)
+rtree_desc(char *buf, uint8 xl_info, char *rec)
{
}
diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c
index 605d51b5d33..f3e6d52fe67 100644
--- a/src/backend/access/rtree/rtscan.c
+++ b/src/backend/access/rtree/rtscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.35 2001/01/24 19:42:50 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.36 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,9 +75,9 @@ rtbeginscan(PG_FUNCTION_ARGS)
Datum
rtrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- bool fromEnd = PG_GETARG_BOOL(1);
- ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ bool fromEnd = PG_GETARG_BOOL(1);
+ ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
RTreeScanOpaque p;
RegProcedure internal_proc;
int i;
@@ -162,7 +162,7 @@ rtrescan(PG_FUNCTION_ARGS)
Datum
rtmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
RTreeScanOpaque p;
RTSTACK *o,
*n,
@@ -198,7 +198,7 @@ rtmarkpos(PG_FUNCTION_ARGS)
Datum
rtrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
RTreeScanOpaque p;
RTSTACK *o,
*n,
@@ -234,7 +234,7 @@ rtrestrpos(PG_FUNCTION_ARGS)
Datum
rtendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
RTreeScanOpaque p;
p = (RTreeScanOpaque) s->opaque;
diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c
index b25db74da8c..625b0db3202 100644
--- a/src/backend/access/transam/rmgr.c
+++ b/src/backend/access/transam/rmgr.c
@@ -9,21 +9,21 @@
#include "storage/smgr.h"
#include "commands/sequence.h"
-RmgrData RmgrTable[] = {
-{"XLOG", xlog_redo, xlog_undo, xlog_desc},
-{"Transaction", xact_redo, xact_undo, xact_desc},
-{"Storage", smgr_redo, smgr_undo, smgr_desc},
-{"Reserved 3", NULL, NULL, NULL},
-{"Reserved 4", NULL, NULL, NULL},
-{"Reserved 5", NULL, NULL, NULL},
-{"Reserved 6", NULL, NULL, NULL},
-{"Reserved 7", NULL, NULL, NULL},
-{"Reserved 8", NULL, NULL, NULL},
-{"Reserved 9", NULL, NULL, NULL},
-{"Heap", heap_redo, heap_undo, heap_desc},
-{"Btree", btree_redo, btree_undo, btree_desc},
-{"Hash", hash_redo, hash_undo, hash_desc},
-{"Rtree", rtree_redo, rtree_undo, rtree_desc},
-{"Gist", gist_redo, gist_undo, gist_desc},
-{"Sequence", seq_redo, seq_undo, seq_desc}
+RmgrData RmgrTable[] = {
+ {"XLOG", xlog_redo, xlog_undo, xlog_desc},
+ {"Transaction", xact_redo, xact_undo, xact_desc},
+ {"Storage", smgr_redo, smgr_undo, smgr_desc},
+ {"Reserved 3", NULL, NULL, NULL},
+ {"Reserved 4", NULL, NULL, NULL},
+ {"Reserved 5", NULL, NULL, NULL},
+ {"Reserved 6", NULL, NULL, NULL},
+ {"Reserved 7", NULL, NULL, NULL},
+ {"Reserved 8", NULL, NULL, NULL},
+ {"Reserved 9", NULL, NULL, NULL},
+ {"Heap", heap_redo, heap_undo, heap_desc},
+ {"Btree", btree_redo, btree_undo, btree_desc},
+ {"Hash", hash_redo, hash_undo, hash_desc},
+ {"Rtree", rtree_redo, rtree_undo, rtree_desc},
+ {"Gist", gist_redo, gist_undo, gist_desc},
+ {"Sequence", seq_redo, seq_undo, seq_desc}
};
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index 64289057926..29e72e84175 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.41 2001/03/18 20:18:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.42 2001/03/22 03:59:17 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -427,8 +427,8 @@ InitializeTransactionLog(void)
TransactionLogUpdate(AmiTransactionId, XID_COMMIT);
TransactionIdStore(AmiTransactionId, &cachedTestXid);
cachedTestXidStatus = XID_COMMIT;
- Assert(!IsUnderPostmaster &&
- ShmemVariableCache->nextXid <= FirstTransactionId);
+ Assert(!IsUnderPostmaster &&
+ ShmemVariableCache->nextXid <= FirstTransactionId);
ShmemVariableCache->nextXid = FirstTransactionId;
}
else if (RecoveryCheckingEnabled())
diff --git a/src/backend/access/transam/transsup.c b/src/backend/access/transam/transsup.c
index e4ff7979cf9..c433506eae6 100644
--- a/src/backend/access/transam/transsup.c
+++ b/src/backend/access/transam/transsup.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.28 2001/01/24 19:42:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.29 2001/03/22 03:59:17 momjian Exp $
*
* NOTES
* This file contains support functions for the high
@@ -186,7 +186,7 @@ TransBlockGetXidStatus(Block tblock,
bits8 bit2;
BitIndex offset;
- tblock = (Block) ((char*) tblock + sizeof(XLogRecPtr));
+ tblock = (Block) ((char *) tblock + sizeof(XLogRecPtr));
/* ----------------
* calculate the index into the transaction data where
@@ -229,7 +229,7 @@ TransBlockSetXidStatus(Block tblock,
Index index;
BitIndex offset;
- tblock = (Block) ((char*) tblock + sizeof(XLogRecPtr));
+ tblock = (Block) ((char *) tblock + sizeof(XLogRecPtr));
/* ----------------
* calculate the index into the transaction data where
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index d6097b2567c..34c607eab9f 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
* Copyright (c) 2000, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.37 2001/03/18 20:18:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.38 2001/03/22 03:59:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -23,8 +23,8 @@
#define VAR_OID_PREFETCH 8192
/* Spinlocks for serializing generation of XIDs and OIDs, respectively */
-SPINLOCK XidGenLockId;
-SPINLOCK OidGenLockId;
+SPINLOCK XidGenLockId;
+SPINLOCK OidGenLockId;
/* pointer to "variable cache" in shared memory (set up by shmem.c) */
VariableCache ShmemVariableCache = NULL;
@@ -32,9 +32,10 @@ VariableCache ShmemVariableCache = NULL;
void
GetNewTransactionId(TransactionId *xid)
{
+
/*
- * During bootstrap initialization, we return the special
- * bootstrap transaction id.
+ * During bootstrap initialization, we return the special bootstrap
+ * transaction id.
*/
if (AMI_OVERRIDE)
{
@@ -60,9 +61,10 @@ GetNewTransactionId(TransactionId *xid)
void
ReadNewTransactionId(TransactionId *xid)
{
+
/*
- * During bootstrap initialization, we return the special
- * bootstrap transaction id.
+ * During bootstrap initialization, we return the special bootstrap
+ * transaction id.
*/
if (AMI_OVERRIDE)
{
@@ -80,7 +82,7 @@ ReadNewTransactionId(TransactionId *xid)
* ----------------------------------------------------------------
*/
-static Oid lastSeenOid = InvalidOid;
+static Oid lastSeenOid = InvalidOid;
void
GetNewObjectId(Oid *oid_return)
@@ -119,10 +121,10 @@ CheckMaxObjectId(Oid assigned_oid)
}
/* If we are in the logged oid range, just bump nextOid up */
- if (assigned_oid <= ShmemVariableCache->nextOid +
- ShmemVariableCache->oidCount - 1)
+ if (assigned_oid <= ShmemVariableCache->nextOid +
+ ShmemVariableCache->oidCount - 1)
{
- ShmemVariableCache->oidCount -=
+ ShmemVariableCache->oidCount -=
assigned_oid - ShmemVariableCache->nextOid + 1;
ShmemVariableCache->nextOid = assigned_oid + 1;
SpinRelease(OidGenLockId);
@@ -130,10 +132,9 @@ CheckMaxObjectId(Oid assigned_oid)
}
/*
- * We have exceeded the logged oid range.
- * We should lock the database and kill all other backends
- * but we are loading oid's that we can not guarantee are unique
- * anyway, so we must rely on the user.
+ * We have exceeded the logged oid range. We should lock the database
+ * and kill all other backends but we are loading oid's that we can
+ * not guarantee are unique anyway, so we must rely on the user.
*/
XLogPutNextOid(assigned_oid + VAR_OID_PREFETCH);
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 1331c8e9834..6a8e6c0639f 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.99 2001/03/13 01:17:05 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.100 2001/03/22 03:59:18 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -222,9 +222,10 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED;
int XactIsoLevel;
int CommitDelay = 0; /* precommit delay in microseconds */
-int CommitSiblings = 5; /* number of concurrent xacts needed to sleep */
+int CommitSiblings = 5; /* number of concurrent xacts needed to
+ * sleep */
-static void (*_RollbackFunc)(void*) = NULL;
+static void (*_RollbackFunc) (void *) = NULL;
static void *_RollbackData = NULL;
/* ----------------
@@ -666,39 +667,40 @@ RecordTransactionCommit()
if (MyLastRecPtr.xrecoff != 0)
{
- XLogRecData rdata;
- xl_xact_commit xlrec;
- XLogRecPtr recptr;
+ XLogRecData rdata;
+ xl_xact_commit xlrec;
+ XLogRecPtr recptr;
BufmgrCommit();
xlrec.xtime = time(NULL);
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&xlrec);
+ rdata.data = (char *) (&xlrec);
rdata.len = SizeOfXactCommit;
rdata.next = NULL;
START_CRIT_SECTION();
+
/*
* SHOULD SAVE ARRAY OF RELFILENODE-s TO DROP
*/
recptr = XLogInsert(RM_XACT_ID, XLOG_XACT_COMMIT, &rdata);
- /*
- * Sleep before commit! So we can flush more than one
- * commit records per single fsync. (The idea is some other
- * backend may do the XLogFlush while we're sleeping. This
- * needs work still, because on most Unixen, the minimum
- * select() delay is 10msec or more, which is way too long.)
+ /*
+ * Sleep before commit! So we can flush more than one commit
+ * records per single fsync. (The idea is some other backend may
+ * do the XLogFlush while we're sleeping. This needs work still,
+ * because on most Unixen, the minimum select() delay is 10msec or
+ * more, which is way too long.)
*
- * We do not sleep if enableFsync is not turned on, nor if there
- * are fewer than CommitSiblings other backends with active
+ * We do not sleep if enableFsync is not turned on, nor if there are
+ * fewer than CommitSiblings other backends with active
* transactions.
*/
if (CommitDelay > 0 && enableFsync &&
CountActiveBackends() >= CommitSiblings)
{
- struct timeval delay;
+ struct timeval delay;
delay.tv_sec = 0;
delay.tv_usec = CommitDelay;
@@ -812,13 +814,13 @@ RecordTransactionAbort(void)
*/
if (MyLastRecPtr.xrecoff != 0 && !TransactionIdDidCommit(xid))
{
- XLogRecData rdata;
- xl_xact_abort xlrec;
- XLogRecPtr recptr;
+ XLogRecData rdata;
+ xl_xact_abort xlrec;
+ XLogRecPtr recptr;
xlrec.xtime = time(NULL);
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&xlrec);
+ rdata.data = (char *) (&xlrec);
rdata.len = SizeOfXactAbort;
rdata.next = NULL;
@@ -879,7 +881,7 @@ AtAbort_Memory(void)
{
/* ----------------
* Make sure we are in a valid context (not a child of
- * TransactionCommandContext...). Note that it is possible
+ * TransactionCommandContext...). Note that it is possible
* for this code to be called when we aren't in a transaction
* at all; go directly to TopMemoryContext in that case.
* ----------------
@@ -896,9 +898,7 @@ AtAbort_Memory(void)
MemoryContextResetAndDeleteChildren(TransactionCommandContext);
}
else
- {
MemoryContextSwitchTo(TopMemoryContext);
- }
}
@@ -1021,6 +1021,7 @@ CurrentXactInProgress(void)
{
return CurrentTransactionState->state == TRANS_INPROGRESS;
}
+
#endif
/* --------------------------------
@@ -1106,7 +1107,7 @@ CommitTransaction(void)
AtCommit_Memory();
AtEOXact_Files();
- SharedBufferChanged = false; /* safest place to do it */
+ SharedBufferChanged = false;/* safest place to do it */
/* ----------------
* done with commit processing, set current transaction
@@ -1143,15 +1144,16 @@ AbortTransaction(void)
/*
* Release any spinlocks or buffer context locks we might be holding
- * as quickly as possible. (Real locks, however, must be held till
- * we finish aborting.) Releasing spinlocks is critical since we
- * might try to grab them again while cleaning up!
+ * as quickly as possible. (Real locks, however, must be held till we
+ * finish aborting.) Releasing spinlocks is critical since we might
+ * try to grab them again while cleaning up!
*/
ProcReleaseSpins(NULL);
UnlockBuffers();
+
/*
- * Also clean up any open wait for lock, since the lock manager
- * will choke if we try to wait for another lock before doing this.
+ * Also clean up any open wait for lock, since the lock manager will
+ * choke if we try to wait for another lock before doing this.
*/
LockWaitCancel();
@@ -1203,7 +1205,7 @@ AbortTransaction(void)
AtEOXact_Files();
AtAbort_Locks();
- SharedBufferChanged = false; /* safest place to do it */
+ SharedBufferChanged = false;/* safest place to do it */
/* ----------------
* State remains TRANS_ABORT until CleanupTransaction().
@@ -1327,8 +1329,8 @@ StartTransactionCommand(void)
}
/*
- * We must switch to TransactionCommandContext before returning.
- * This is already done if we called StartTransaction, otherwise not.
+ * We must switch to TransactionCommandContext before returning. This
+ * is already done if we called StartTransaction, otherwise not.
*/
Assert(TransactionCommandContext != NULL);
MemoryContextSwitchTo(TransactionCommandContext);
@@ -1757,7 +1759,7 @@ IsTransactionBlock(void)
void
xact_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
if (info == XLOG_XACT_COMMIT)
{
@@ -1765,9 +1767,7 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record)
/* SHOULD REMOVE FILES OF ALL DROPPED RELATIONS */
}
else if (info == XLOG_XACT_ABORT)
- {
TransactionIdAbort(record->xl_xid);
- }
else
elog(STOP, "xact_redo: unknown op code %u", info);
}
@@ -1775,43 +1775,43 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record)
void
xact_undo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
- if (info == XLOG_XACT_COMMIT) /* shouldn't be called by XLOG */
+ if (info == XLOG_XACT_COMMIT) /* shouldn't be called by XLOG */
elog(STOP, "xact_undo: can't undo committed xaction");
else if (info != XLOG_XACT_ABORT)
elog(STOP, "xact_redo: unknown op code %u", info);
}
-
+
void
-xact_desc(char *buf, uint8 xl_info, char* rec)
+xact_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_XACT_COMMIT)
{
- xl_xact_commit *xlrec = (xl_xact_commit*) rec;
- struct tm *tm = localtime(&xlrec->xtime);
+ xl_xact_commit *xlrec = (xl_xact_commit *) rec;
+ struct tm *tm = localtime(&xlrec->xtime);
sprintf(buf + strlen(buf), "commit: %04u-%02u-%02u %02u:%02u:%02u",
- tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
}
else if (info == XLOG_XACT_ABORT)
{
- xl_xact_abort *xlrec = (xl_xact_abort*) rec;
- struct tm *tm = localtime(&xlrec->xtime);
+ xl_xact_abort *xlrec = (xl_xact_abort *) rec;
+ struct tm *tm = localtime(&xlrec->xtime);
sprintf(buf + strlen(buf), "abort: %04u-%02u-%02u %02u:%02u:%02u",
- tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
}
else
strcat(buf, "UNKNOWN");
}
void
-XactPushRollback(void (*func) (void *), void* data)
+ XactPushRollback(void (*func) (void *), void *data)
{
#ifdef XLOG_II
if (_RollbackFunc != NULL)
diff --git a/src/backend/access/transam/xid.c b/src/backend/access/transam/xid.c
index 6ee28d1a2b0..624d6da850c 100644
--- a/src/backend/access/transam/xid.c
+++ b/src/backend/access/transam/xid.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: xid.c,v 1.29 2001/01/24 19:42:51 momjian Exp $
+ * $Id: xid.c,v 1.30 2001/03/22 03:59:18 momjian Exp $
*
* OLD COMMENTS
* XXX WARNING
@@ -26,8 +26,8 @@
/*
* TransactionId is typedef'd as uint32, so...
*/
-#define PG_GETARG_TRANSACTIONID(n) PG_GETARG_UINT32(n)
-#define PG_RETURN_TRANSACTIONID(x) PG_RETURN_UINT32(x)
+#define PG_GETARG_TRANSACTIONID(n) PG_GETARG_UINT32(n)
+#define PG_RETURN_TRANSACTIONID(x) PG_RETURN_UINT32(x)
extern TransactionId NullTransactionId;
@@ -49,6 +49,7 @@ Datum
xidout(PG_FUNCTION_ARGS)
{
TransactionId transactionId = PG_GETARG_TRANSACTIONID(0);
+
/* maximum 32 bit unsigned integer representation takes 10 chars */
char *representation = palloc(11);
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 9994025dd69..59d783264bb 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.62 2001/03/18 20:18:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.63 2001/03/22 03:59:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,57 +45,60 @@
/*
* This chunk of hackery attempts to determine which file sync methods
* are available on the current platform, and to choose an appropriate
- * default method. We assume that fsync() is always available, and that
+ * default method. We assume that fsync() is always available, and that
* configure determined whether fdatasync() is.
*/
#define SYNC_METHOD_FSYNC 0
#define SYNC_METHOD_FDATASYNC 1
-#define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and O_DSYNC */
+#define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and
+ * O_DSYNC */
#if defined(O_SYNC)
-# define OPEN_SYNC_FLAG O_SYNC
+#define OPEN_SYNC_FLAG O_SYNC
#else
-# if defined(O_FSYNC)
-# define OPEN_SYNC_FLAG O_FSYNC
-# endif
+#if defined(O_FSYNC)
+#define OPEN_SYNC_FLAG O_FSYNC
+#endif
#endif
#if defined(OPEN_SYNC_FLAG)
-# if defined(O_DSYNC) && (O_DSYNC != OPEN_SYNC_FLAG)
-# define OPEN_DATASYNC_FLAG O_DSYNC
-# endif
+#if defined(O_DSYNC) && (O_DSYNC != OPEN_SYNC_FLAG)
+#define OPEN_DATASYNC_FLAG O_DSYNC
+#endif
#endif
#if defined(OPEN_DATASYNC_FLAG)
-# define DEFAULT_SYNC_METHOD_STR "open_datasync"
-# define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN
-# define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG
+#define DEFAULT_SYNC_METHOD_STR "open_datasync"
+#define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN
+#define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG
#else
-# if defined(HAVE_FDATASYNC)
-# define DEFAULT_SYNC_METHOD_STR "fdatasync"
-# define DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
-# define DEFAULT_SYNC_FLAGBIT 0
-# else
-# define DEFAULT_SYNC_METHOD_STR "fsync"
-# define DEFAULT_SYNC_METHOD SYNC_METHOD_FSYNC
-# define DEFAULT_SYNC_FLAGBIT 0
-# endif
+#if defined(HAVE_FDATASYNC)
+#define DEFAULT_SYNC_METHOD_STR "fdatasync"
+#define DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
+#define DEFAULT_SYNC_FLAGBIT 0
+#else
+#define DEFAULT_SYNC_METHOD_STR "fsync"
+#define DEFAULT_SYNC_METHOD SYNC_METHOD_FSYNC
+#define DEFAULT_SYNC_FLAGBIT 0
+#endif
#endif
/* Max time to wait to acquire XLog activity locks */
-#define XLOG_LOCK_TIMEOUT (5*60*1000000) /* 5 minutes */
+#define XLOG_LOCK_TIMEOUT (5*60*1000000) /* 5 minutes */
/* Max time to wait to acquire checkpoint lock */
-#define CHECKPOINT_LOCK_TIMEOUT (20*60*1000000) /* 20 minutes */
+#define CHECKPOINT_LOCK_TIMEOUT (20*60*1000000) /* 20 minutes */
/* User-settable parameters */
int CheckPointSegments = 3;
int XLOGbuffers = 8;
-int XLOGfiles = 0; /* how many files to pre-allocate during ckpt */
+int XLOGfiles = 0; /* how many files to pre-allocate during
+ * ckpt */
int XLOG_DEBUG = 0;
char *XLOG_sync_method = NULL;
const char XLOG_sync_method_default[] = DEFAULT_SYNC_METHOD_STR;
-char XLOG_archive_dir[MAXPGPATH]; /* null string means delete 'em */
+char XLOG_archive_dir[MAXPGPATH]; /* null string means
+ * delete 'em */
/* these are derived from XLOG_sync_method by assign_xlog_sync_method */
static int sync_method = DEFAULT_SYNC_METHOD;
@@ -135,7 +138,7 @@ static XLogRecPtr ProcLastRecPtr = {0, 0};
/*
* RedoRecPtr is this backend's local copy of the REDO record pointer
* (which is almost but not quite the same as a pointer to the most recent
- * CHECKPOINT record). We update this from the shared-memory copy,
+ * CHECKPOINT record). We update this from the shared-memory copy,
* XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
* hold the Insert spinlock). See XLogInsert for details.
*/
@@ -164,12 +167,12 @@ SPINLOCK ControlFileLockId;
*
* XLogCtl->LogwrtResult and XLogCtl->Write.LogwrtResult are both "always
* right", since both are updated by a write or flush operation before
- * it releases logwrt_lck. The point of keeping XLogCtl->Write.LogwrtResult
+ * it releases logwrt_lck. The point of keeping XLogCtl->Write.LogwrtResult
* is that it can be examined/modified by code that already holds logwrt_lck
* without needing to grab info_lck as well.
*
* XLogCtl->Insert.LogwrtResult may lag behind the reality of the other two,
- * but is updated when convenient. Again, it exists for the convenience of
+ * but is updated when convenient. Again, it exists for the convenience of
* code that is already holding insert_lck but not the other locks.
*
* The unshared LogwrtResult may lag behind any or all of these, and again
@@ -187,25 +190,25 @@ typedef struct XLogwrtRqst
{
XLogRecPtr Write; /* last byte + 1 to write out */
XLogRecPtr Flush; /* last byte + 1 to flush */
-} XLogwrtRqst;
+} XLogwrtRqst;
typedef struct XLogwrtResult
{
XLogRecPtr Write; /* last byte + 1 written out */
XLogRecPtr Flush; /* last byte + 1 flushed */
-} XLogwrtResult;
+} XLogwrtResult;
/*
* Shared state data for XLogInsert.
*/
typedef struct XLogCtlInsert
{
- XLogwrtResult LogwrtResult; /* a recent value of LogwrtResult */
- XLogRecPtr PrevRecord; /* start of previously-inserted record */
- uint16 curridx; /* current block index in cache */
- XLogPageHeader currpage; /* points to header of block in cache */
- char *currpos; /* current insertion point in cache */
- XLogRecPtr RedoRecPtr; /* current redo point for insertions */
+ XLogwrtResult LogwrtResult; /* a recent value of LogwrtResult */
+ XLogRecPtr PrevRecord; /* start of previously-inserted record */
+ uint16 curridx; /* current block index in cache */
+ XLogPageHeader currpage; /* points to header of block in cache */
+ char *currpos; /* current insertion point in cache */
+ XLogRecPtr RedoRecPtr; /* current redo point for insertions */
} XLogCtlInsert;
/*
@@ -213,8 +216,8 @@ typedef struct XLogCtlInsert
*/
typedef struct XLogCtlWrite
{
- XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
- uint16 curridx; /* cache index of next block to write */
+ XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
+ uint16 curridx; /* cache index of next block to write */
} XLogCtlWrite;
/*
@@ -223,30 +226,31 @@ typedef struct XLogCtlWrite
typedef struct XLogCtlData
{
/* Protected by insert_lck: */
- XLogCtlInsert Insert;
+ XLogCtlInsert Insert;
/* Protected by info_lck: */
- XLogwrtRqst LogwrtRqst;
- XLogwrtResult LogwrtResult;
+ XLogwrtRqst LogwrtRqst;
+ XLogwrtResult LogwrtResult;
/* Protected by logwrt_lck: */
- XLogCtlWrite Write;
+ XLogCtlWrite Write;
+
/*
* These values do not change after startup, although the pointed-to
- * pages and xlblocks values certainly do. Permission to read/write
+ * pages and xlblocks values certainly do. Permission to read/write
* the pages and xlblocks values depends on insert_lck and logwrt_lck.
*/
- char *pages; /* buffers for unwritten XLOG pages */
- XLogRecPtr *xlblocks; /* 1st byte ptr-s + BLCKSZ */
- uint32 XLogCacheByte; /* # bytes in xlog buffers */
- uint32 XLogCacheBlck; /* highest allocated xlog buffer index */
- StartUpID ThisStartUpID;
+ char *pages; /* buffers for unwritten XLOG pages */
+ XLogRecPtr *xlblocks; /* 1st byte ptr-s + BLCKSZ */
+ uint32 XLogCacheByte; /* # bytes in xlog buffers */
+ uint32 XLogCacheBlck; /* highest allocated xlog buffer index */
+ StartUpID ThisStartUpID;
/* This value is not protected by *any* spinlock... */
- XLogRecPtr RedoRecPtr; /* see SetRedoRecPtr/GetRedoRecPtr */
+ XLogRecPtr RedoRecPtr; /* see SetRedoRecPtr/GetRedoRecPtr */
- slock_t insert_lck; /* XLogInsert lock */
- slock_t info_lck; /* locks shared LogwrtRqst/LogwrtResult */
- slock_t logwrt_lck; /* XLogWrite/XLogFlush lock */
- slock_t chkp_lck; /* checkpoint lock */
+ slock_t insert_lck; /* XLogInsert lock */
+ slock_t info_lck; /* locks shared LogwrtRqst/LogwrtResult */
+ slock_t logwrt_lck; /* XLogWrite/XLogFlush lock */
+ slock_t chkp_lck; /* checkpoint lock */
} XLogCtlData;
static XLogCtlData *XLogCtl = NULL;
@@ -271,7 +275,7 @@ static ControlFileData *ControlFile = NULL;
( \
(recptr).xlogid = XLogCtl->xlblocks[curridx].xlogid, \
(recptr).xrecoff = \
- XLogCtl->xlblocks[curridx].xrecoff - INSERT_FREESPACE(Insert) \
+ XLogCtl->xlblocks[curridx].xrecoff - INSERT_FREESPACE(Insert) \
)
@@ -303,7 +307,7 @@ static ControlFileData *ControlFile = NULL;
* Compute ID and segment from an XLogRecPtr.
*
* For XLByteToSeg, do the computation at face value. For XLByteToPrevSeg,
- * a boundary byte is taken to be in the previous segment. This is suitable
+ * a boundary byte is taken to be in the previous segment. This is suitable
* for deciding which segment to write given a pointer to a record end,
* for example.
*/
@@ -354,8 +358,8 @@ static ControlFileData *ControlFile = NULL;
/* File path names */
-static char XLogDir[MAXPGPATH];
-static char ControlFilePath[MAXPGPATH];
+static char XLogDir[MAXPGPATH];
+static char ControlFilePath[MAXPGPATH];
/*
* Private, possibly out-of-date copy of shared LogwrtResult.
@@ -384,8 +388,10 @@ static int readFile = -1;
static uint32 readId = 0;
static uint32 readSeg = 0;
static uint32 readOff = 0;
+
/* Buffer for currently read page (BLCKSZ bytes) */
static char *readBuf = NULL;
+
/* State information for XLOG reading */
static XLogRecPtr ReadRecPtr;
static XLogRecPtr EndRecPtr;
@@ -397,16 +403,16 @@ static bool InRedo = false;
static bool AdvanceXLInsertBuffer(void);
static void XLogWrite(XLogwrtRqst WriteRqst);
-static int XLogFileInit(uint32 log, uint32 seg,
- bool *use_existent, bool use_lock);
+static int XLogFileInit(uint32 log, uint32 seg,
+ bool *use_existent, bool use_lock);
static int XLogFileOpen(uint32 log, uint32 seg, bool econt);
static void PreallocXlogFiles(XLogRecPtr endptr);
static void MoveOfflineLogs(uint32 log, uint32 seg);
static XLogRecord *ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer);
static bool ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI);
static XLogRecord *ReadCheckpointRecord(XLogRecPtr RecPtr,
- const char *whichChkpt,
- char *buffer);
+ const char *whichChkpt,
+ char *buffer);
static void WriteControlFile(void);
static void ReadControlFile(void);
static char *str_time(time_t tnow);
@@ -432,44 +438,44 @@ static void issue_xlog_fsync(void);
XLogRecPtr
XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
{
- XLogCtlInsert *Insert = &XLogCtl->Insert;
- XLogRecord *record;
+ XLogCtlInsert *Insert = &XLogCtl->Insert;
+ XLogRecord *record;
XLogContRecord *contrecord;
- XLogRecPtr RecPtr;
- XLogRecPtr WriteRqst;
- uint32 freespace;
- uint16 curridx;
- XLogRecData *rdt;
- Buffer dtbuf[XLR_MAX_BKP_BLOCKS];
- bool dtbuf_bkp[XLR_MAX_BKP_BLOCKS];
- BkpBlock dtbuf_xlg[XLR_MAX_BKP_BLOCKS];
- XLogRecPtr dtbuf_lsn[XLR_MAX_BKP_BLOCKS];
- XLogRecData dtbuf_rdt[2 * XLR_MAX_BKP_BLOCKS];
- crc64 rdata_crc;
- uint32 len,
- write_len;
- unsigned i;
- bool do_logwrt;
- bool updrqst;
- bool no_tran = (rmid == RM_XLOG_ID) ? true : false;
+ XLogRecPtr RecPtr;
+ XLogRecPtr WriteRqst;
+ uint32 freespace;
+ uint16 curridx;
+ XLogRecData *rdt;
+ Buffer dtbuf[XLR_MAX_BKP_BLOCKS];
+ bool dtbuf_bkp[XLR_MAX_BKP_BLOCKS];
+ BkpBlock dtbuf_xlg[XLR_MAX_BKP_BLOCKS];
+ XLogRecPtr dtbuf_lsn[XLR_MAX_BKP_BLOCKS];
+ XLogRecData dtbuf_rdt[2 * XLR_MAX_BKP_BLOCKS];
+ crc64 rdata_crc;
+ uint32 len,
+ write_len;
+ unsigned i;
+ bool do_logwrt;
+ bool updrqst;
+ bool no_tran = (rmid == RM_XLOG_ID) ? true : false;
if (info & XLR_INFO_MASK)
{
if ((info & XLR_INFO_MASK) != XLOG_NO_TRAN)
- elog(STOP, "XLogInsert: invalid info mask %02X",
+ elog(STOP, "XLogInsert: invalid info mask %02X",
(info & XLR_INFO_MASK));
no_tran = true;
info &= ~XLR_INFO_MASK;
}
/*
- * In bootstrap mode, we don't actually log anything but XLOG resources;
- * return a phony record pointer.
+ * In bootstrap mode, we don't actually log anything but XLOG
+ * resources; return a phony record pointer.
*/
if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
{
RecPtr.xlogid = 0;
- RecPtr.xrecoff = SizeOfXLogPHD; /* start of 1st checkpoint record */
+ RecPtr.xrecoff = SizeOfXLogPHD; /* start of 1st checkpoint record */
return (RecPtr);
}
@@ -479,16 +485,17 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* header isn't added into the CRC yet since we don't know the final
* length or info bits quite yet.
*
- * We may have to loop back to here if a race condition is detected below.
- * We could prevent the race by doing all this work while holding the
- * insert spinlock, but it seems better to avoid doing CRC calculations
- * while holding the lock. This means we have to be careful about
- * modifying the rdata list until we know we aren't going to loop back
- * again. The only change we allow ourselves to make earlier is to set
- * rdt->data = NULL in list items we have decided we will have to back
- * up the whole buffer for. This is OK because we will certainly decide
- * the same thing again for those items if we do it over; doing it here
- * saves an extra pass over the list later.
+ * We may have to loop back to here if a race condition is detected
+ * below. We could prevent the race by doing all this work while
+ * holding the insert spinlock, but it seems better to avoid doing CRC
+ * calculations while holding the lock. This means we have to be
+ * careful about modifying the rdata list until we know we aren't
+ * going to loop back again. The only change we allow ourselves to
+ * make earlier is to set rdt->data = NULL in list items we have
+ * decided we will have to back up the whole buffer for. This is OK
+ * because we will certainly decide the same thing again for those
+ * items if we do it over; doing it here saves an extra pass over the
+ * list later.
*/
begin:;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -499,7 +506,7 @@ begin:;
INIT_CRC64(rdata_crc);
len = 0;
- for (rdt = rdata; ; )
+ for (rdt = rdata;;)
{
if (rdt->buffer == InvalidBuffer)
{
@@ -528,13 +535,14 @@ begin:;
{
/* OK, put it in this slot */
dtbuf[i] = rdt->buffer;
+
/*
* XXX We assume page LSN is first data on page
*/
- dtbuf_lsn[i] = *((XLogRecPtr*)BufferGetBlock(rdt->buffer));
+ dtbuf_lsn[i] = *((XLogRecPtr *) BufferGetBlock(rdt->buffer));
if (XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{
- crc64 dtcrc;
+ crc64 dtcrc;
dtbuf_bkp[i] = true;
rdt->data = NULL;
@@ -545,7 +553,7 @@ begin:;
dtbuf_xlg[i].node = BufferGetFileNode(dtbuf[i]);
dtbuf_xlg[i].block = BufferGetBlockNumber(dtbuf[i]);
COMP_CRC64(dtcrc,
- (char*) &(dtbuf_xlg[i]) + sizeof(crc64),
+ (char *) &(dtbuf_xlg[i]) + sizeof(crc64),
sizeof(BkpBlock) - sizeof(crc64));
FIN_CRC64(dtcrc);
dtbuf_xlg[i].crc = dtcrc;
@@ -571,7 +579,7 @@ begin:;
/*
* NOTE: the test for len == 0 here is somewhat fishy, since in theory
* all of the rmgr data might have been suppressed in favor of backup
- * blocks. Currently, all callers of XLogInsert provide at least some
+ * blocks. Currently, all callers of XLogInsert provide at least some
* not-in-a-buffer data and so len == 0 should never happen, but that
* may not be true forever. If you need to remove the len == 0 check,
* also remove the check for xl_len == 0 in ReadRecord, below.
@@ -589,16 +597,16 @@ begin:;
/* try to update LogwrtResult while waiting for insert lock */
if (!TAS(&(XLogCtl->info_lck)))
{
- XLogwrtRqst LogwrtRqst;
+ XLogwrtRqst LogwrtRqst;
LogwrtRqst = XLogCtl->LogwrtRqst;
LogwrtResult = XLogCtl->LogwrtResult;
S_UNLOCK(&(XLogCtl->info_lck));
/*
- * If cache is half filled then try to acquire logwrt lock
- * and do LOGWRT work, but only once per XLogInsert call.
- * Ignore any fractional blocks in performing this check.
+ * If cache is half filled then try to acquire logwrt lock and
+ * do LOGWRT work, but only once per XLogInsert call. Ignore
+ * any fractional blocks in performing this check.
*/
LogwrtRqst.Write.xrecoff -= LogwrtRqst.Write.xrecoff % BLCKSZ;
if (do_logwrt &&
@@ -625,8 +633,9 @@ begin:;
/*
* Check to see if my RedoRecPtr is out of date. If so, may have to
- * go back and recompute everything. This can only happen just after a
- * checkpoint, so it's better to be slow in this case and fast otherwise.
+ * go back and recompute everything. This can only happen just after
+ * a checkpoint, so it's better to be slow in this case and fast
+ * otherwise.
*/
if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
{
@@ -640,9 +649,10 @@ begin:;
if (dtbuf_bkp[i] == false &&
XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{
+
/*
- * Oops, this buffer now needs to be backed up, but we didn't
- * think so above. Start over.
+ * Oops, this buffer now needs to be backed up, but we
+ * didn't think so above. Start over.
*/
S_UNLOCK(&(XLogCtl->insert_lck));
END_CRIT_SECTION();
@@ -658,8 +668,9 @@ begin:;
* this loop, write_len includes the backup block data.
*
* Also set the appropriate info bits to show which buffers were backed
- * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct
- * buffer value (ignoring InvalidBuffer) appearing in the rdata list.
+ * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th
+ * distinct buffer value (ignoring InvalidBuffer) appearing in the
+ * rdata list.
*/
write_len = len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -671,13 +682,13 @@ begin:;
rdt->next = &(dtbuf_rdt[2 * i]);
- dtbuf_rdt[2 * i].data = (char*) &(dtbuf_xlg[i]);
+ dtbuf_rdt[2 * i].data = (char *) &(dtbuf_xlg[i]);
dtbuf_rdt[2 * i].len = sizeof(BkpBlock);
write_len += sizeof(BkpBlock);
rdt = dtbuf_rdt[2 * i].next = &(dtbuf_rdt[2 * i + 1]);
- dtbuf_rdt[2 * i + 1].data = (char*) BufferGetBlock(dtbuf[i]);
+ dtbuf_rdt[2 * i + 1].data = (char *) BufferGetBlock(dtbuf[i]);
dtbuf_rdt[2 * i + 1].len = BLCKSZ;
write_len += BLCKSZ;
dtbuf_rdt[2 * i + 1].next = NULL;
@@ -711,7 +722,7 @@ begin:;
record->xl_rmid = rmid;
/* Now we can finish computing the main CRC */
- COMP_CRC64(rdata_crc, (char*) record + sizeof(crc64),
+ COMP_CRC64(rdata_crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(rdata_crc);
record->xl_crc = rdata_crc;
@@ -729,7 +740,7 @@ begin:;
if (XLOG_DEBUG)
{
- char buf[8192];
+ char buf[8192];
sprintf(buf, "INSERT @ %u/%u: ", RecPtr.xlogid, RecPtr.xrecoff);
xlog_outrec(buf, record);
@@ -791,18 +802,19 @@ begin:;
/* Ensure next record will be properly aligned */
Insert->currpos = (char *) Insert->currpage +
- MAXALIGN(Insert->currpos - (char *) Insert->currpage);
+ MAXALIGN(Insert->currpos - (char *) Insert->currpage);
freespace = INSERT_FREESPACE(Insert);
/*
- * The recptr I return is the beginning of the *next* record.
- * This will be stored as LSN for changed data pages...
+ * The recptr I return is the beginning of the *next* record. This
+ * will be stored as LSN for changed data pages...
*/
INSERT_RECPTR(RecPtr, Insert, curridx);
/* Need to update shared LogwrtRqst if some block was filled up */
if (freespace < SizeOfXLogRecord)
- updrqst = true; /* curridx is filled and available for writing out */
+ updrqst = true; /* curridx is filled and available for
+ * writing out */
else
curridx = PrevBufIdx(curridx);
WriteRqst = XLogCtl->xlblocks[curridx];
@@ -850,9 +862,9 @@ AdvanceXLInsertBuffer(void)
LogwrtResult = Insert->LogwrtResult;
/*
- * Get ending-offset of the buffer page we need to replace (this may be
- * zero if the buffer hasn't been used yet). Fall through if it's already
- * written out.
+ * Get ending-offset of the buffer page we need to replace (this may
+ * be zero if the buffer hasn't been used yet). Fall through if it's
+ * already written out.
*/
OldPageRqstPtr = XLogCtl->xlblocks[nextidx];
if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write))
@@ -870,7 +882,7 @@ AdvanceXLInsertBuffer(void)
{
if (XLByteLT(XLogCtl->LogwrtRqst.Write, FinishedPageRqstPtr))
XLogCtl->LogwrtRqst.Write = FinishedPageRqstPtr;
- update_needed = false; /* Did the shared-request update */
+ update_needed = false; /* Did the shared-request update */
LogwrtResult = XLogCtl->LogwrtResult;
S_UNLOCK(&(XLogCtl->info_lck));
@@ -883,8 +895,8 @@ AdvanceXLInsertBuffer(void)
}
/*
- * LogwrtResult lock is busy or we know the page is still dirty.
- * Try to acquire logwrt lock and write full blocks.
+ * LogwrtResult lock is busy or we know the page is still
+ * dirty. Try to acquire logwrt lock and write full blocks.
*/
if (!TAS(&(XLogCtl->logwrt_lck)))
{
@@ -896,9 +908,10 @@ AdvanceXLInsertBuffer(void)
Insert->LogwrtResult = LogwrtResult;
break;
}
+
/*
- * Have to write buffers while holding insert lock.
- * This is not good, so only write as much as we absolutely
+ * Have to write buffers while holding insert lock. This
+ * is not good, so only write as much as we absolutely
* must.
*/
WriteRqst.Write = OldPageRqstPtr;
@@ -933,14 +946,15 @@ AdvanceXLInsertBuffer(void)
}
Insert->curridx = nextidx;
Insert->currpage = (XLogPageHeader) (XLogCtl->pages + nextidx * BLCKSZ);
- Insert->currpos = ((char*) Insert->currpage) + SizeOfXLogPHD;
+ Insert->currpos = ((char *) Insert->currpage) + SizeOfXLogPHD;
+
/*
- * Be sure to re-zero the buffer so that bytes beyond what we've written
- * will look like zeroes and not valid XLOG records...
+ * Be sure to re-zero the buffer so that bytes beyond what we've
+ * written will look like zeroes and not valid XLOG records...
*/
- MemSet((char*) Insert->currpage, 0, BLCKSZ);
+ MemSet((char *) Insert->currpage, 0, BLCKSZ);
Insert->currpage->xlp_magic = XLOG_PAGE_MAGIC;
- /* Insert->currpage->xlp_info = 0; */ /* done by memset */
+ /* Insert->currpage->xlp_info = 0; *//* done by memset */
Insert->currpage->xlp_sui = ThisStartUpID;
return update_needed;
@@ -959,11 +973,15 @@ XLogWrite(XLogwrtRqst WriteRqst)
bool ispartialpage;
bool use_existent;
- /* Update local LogwrtResult (caller probably did this already, but...) */
+ /*
+ * Update local LogwrtResult (caller probably did this already,
+ * but...)
+ */
LogwrtResult = Write->LogwrtResult;
while (XLByteLT(LogwrtResult.Write, WriteRqst.Write))
{
+
/*
* Make sure we're not ahead of the insert process. This could
* happen if we're passed a bogus WriteRqst.Write that is past the
@@ -979,6 +997,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
+
/*
* Switch to new logfile segment.
*/
@@ -1011,11 +1030,12 @@ XLogWrite(XLogwrtRqst WriteRqst)
ControlFile->logSeg = openLogSeg + 1;
ControlFile->time = time(NULL);
UpdateControlFile();
+
/*
- * Signal postmaster to start a checkpoint if it's been too
- * long since the last one. (We look at local copy of
- * RedoRecPtr which might be a little out of date, but should
- * be close enough for this purpose.)
+ * Signal postmaster to start a checkpoint if it's been
+ * too long since the last one. (We look at local copy of
+ * RedoRecPtr which might be a little out of date, but
+ * should be close enough for this purpose.)
*/
if (IsUnderPostmaster &&
(openLogId != RedoRecPtr.xlogid ||
@@ -1056,14 +1076,14 @@ XLogWrite(XLogwrtRqst WriteRqst)
/*
* If we just wrote the whole last page of a logfile segment,
* fsync the segment immediately. This avoids having to go back
- * and re-open prior segments when an fsync request comes along later.
- * Doing it here ensures that one and only one backend will perform
- * this fsync.
+ * and re-open prior segments when an fsync request comes along
+ * later. Doing it here ensures that one and only one backend will
+ * perform this fsync.
*/
if (openLogOff >= XLogSegSize && !ispartialpage)
{
issue_xlog_fsync();
- LogwrtResult.Flush = LogwrtResult.Write; /* end of current page */
+ LogwrtResult.Flush = LogwrtResult.Write; /* end of current page */
}
if (ispartialpage)
@@ -1081,15 +1101,16 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (XLByteLT(LogwrtResult.Flush, WriteRqst.Flush) &&
XLByteLT(LogwrtResult.Flush, LogwrtResult.Write))
{
+
/*
- * Could get here without iterating above loop, in which case
- * we might have no open file or the wrong one. However, we do
- * not need to fsync more than one file.
+ * Could get here without iterating above loop, in which case we
+ * might have no open file or the wrong one. However, we do not
+ * need to fsync more than one file.
*/
if (sync_method != SYNC_METHOD_OPEN)
{
if (openLogFile >= 0 &&
- !XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
+ !XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
if (close(openLogFile) != 0)
elog(STOP, "close(logfile %u seg %u) failed: %m",
@@ -1110,8 +1131,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
/*
* Update shared-memory status
*
- * We make sure that the shared 'request' values do not fall behind
- * the 'result' values. This is not absolutely essential, but it saves
+ * We make sure that the shared 'request' values do not fall behind the
+ * 'result' values. This is not absolutely essential, but it saves
* some code in a couple of places.
*/
S_LOCK(&(XLogCtl->info_lck));
@@ -1163,8 +1184,9 @@ XLogFlush(XLogRecPtr record)
* Since fsync is usually a horribly expensive operation, we try to
* piggyback as much data as we can on each fsync: if we see any more
* data entered into the xlog buffer, we'll write and fsync that too,
- * so that the final value of LogwrtResult.Flush is as large as possible.
- * This gives us some chance of avoiding another fsync immediately after.
+ * so that the final value of LogwrtResult.Flush is as large as
+ * possible. This gives us some chance of avoiding another fsync
+ * immediately after.
*/
/* initialize to given target; may increase below */
@@ -1192,9 +1214,7 @@ XLogFlush(XLogRecPtr record)
uint32 freespace = INSERT_FREESPACE(Insert);
if (freespace < SizeOfXLogRecord) /* buffer is full */
- {
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
- }
else
{
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
@@ -1232,7 +1252,7 @@ XLogFlush(XLogRecPtr record)
* log, seg: identify segment to be created/opened.
*
* *use_existent: if TRUE, OK to use a pre-existing file (else, any
- * pre-existing file will be deleted). On return, TRUE if a pre-existing
+ * pre-existing file will be deleted). On return, TRUE if a pre-existing
* file was used.
*
* use_lock: if TRUE, acquire ControlFileLock spinlock while moving file into
@@ -1257,7 +1277,8 @@ XLogFileInit(uint32 log, uint32 seg,
XLogFileName(path, log, seg);
/*
- * Try to use existent file (checkpoint maker may have created it already)
+ * Try to use existent file (checkpoint maker may have created it
+ * already)
*/
if (*use_existent)
{
@@ -1270,14 +1291,14 @@ XLogFileInit(uint32 log, uint32 seg,
log, seg);
}
else
- return(fd);
+ return (fd);
}
/*
- * Initialize an empty (all zeroes) segment. NOTE: it is possible that
- * another process is doing the same thing. If so, we will end up
- * pre-creating an extra log segment. That seems OK, and better than
- * holding the spinlock throughout this lengthy process.
+ * Initialize an empty (all zeroes) segment. NOTE: it is possible
+ * that another process is doing the same thing. If so, we will end
+ * up pre-creating an extra log segment. That seems OK, and better
+ * than holding the spinlock throughout this lengthy process.
*/
snprintf(tmppath, MAXPGPATH, "%s%cxlogtemp.%d",
XLogDir, SEP_CHAR, (int) getpid());
@@ -1291,10 +1312,10 @@ XLogFileInit(uint32 log, uint32 seg,
elog(STOP, "InitCreate(%s) failed: %m", tmppath);
/*
- * Zero-fill the file. We have to do this the hard way to ensure that
+ * Zero-fill the file. We have to do this the hard way to ensure that
* all the file space has really been allocated --- on platforms that
* allow "holes" in files, just seeking to the end doesn't allocate
- * intermediate space. This way, we know that we have all the space
+ * intermediate space. This way, we know that we have all the space
* and (after the fsync below) that all the indirect blocks are down
* on disk. Therefore, fdatasync(2) or O_DSYNC will be sufficient to
* sync future writes to the log file.
@@ -1304,9 +1325,12 @@ XLogFileInit(uint32 log, uint32 seg,
{
if ((int) write(fd, zbuffer, sizeof(zbuffer)) != (int) sizeof(zbuffer))
{
- int save_errno = errno;
+ int save_errno = errno;
- /* If we fail to make the file, delete it to release disk space */
+ /*
+ * If we fail to make the file, delete it to release disk
+ * space
+ */
unlink(tmppath);
errno = save_errno;
@@ -1336,10 +1360,8 @@ XLogFileInit(uint32 log, uint32 seg,
targseg = seg;
strcpy(targpath, path);
- if (! *use_existent)
- {
+ if (!*use_existent)
unlink(targpath);
- }
else
{
while ((fd = BasicOpenFile(targpath, O_RDWR | PG_BINARY,
@@ -1451,10 +1473,10 @@ PreallocXlogFiles(XLogRecPtr endptr)
static void
MoveOfflineLogs(uint32 log, uint32 seg)
{
- DIR *xldir;
- struct dirent *xlde;
- char lastoff[32];
- char path[MAXPGPATH];
+ DIR *xldir;
+ struct dirent *xlde;
+ char lastoff[32];
+ char path[MAXPGPATH];
Assert(XLOG_archive_dir[0] == 0); /* ! implemented yet */
@@ -1471,9 +1493,9 @@ MoveOfflineLogs(uint32 log, uint32 seg)
strspn(xlde->d_name, "0123456789ABCDEF") == 16 &&
strcmp(xlde->d_name, lastoff) <= 0)
{
- elog(LOG, "MoveOfflineLogs: %s %s", (XLOG_archive_dir[0]) ?
+ elog(LOG, "MoveOfflineLogs: %s %s", (XLOG_archive_dir[0]) ?
"archive" : "remove", xlde->d_name);
- sprintf(path, "%s%c%s", XLogDir, SEP_CHAR, xlde->d_name);
+ sprintf(path, "%s%c%s", XLogDir, SEP_CHAR, xlde->d_name);
if (XLOG_archive_dir[0] == 0)
unlink(path);
}
@@ -1499,13 +1521,13 @@ RestoreBkpBlocks(XLogRecord *record, XLogRecPtr lsn)
char *blk;
int i;
- blk = (char*)XLogRecGetData(record) + record->xl_len;
+ blk = (char *) XLogRecGetData(record) + record->xl_len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
if (!(record->xl_info & XLR_SET_BKP_BLOCK(i)))
continue;
- memcpy((char*)&bkpb, blk, sizeof(BkpBlock));
+ memcpy((char *) &bkpb, blk, sizeof(BkpBlock));
blk += sizeof(BkpBlock);
reln = XLogOpenRelation(true, record->xl_rmid, bkpb.node);
@@ -1516,7 +1538,7 @@ RestoreBkpBlocks(XLogRecord *record, XLogRecPtr lsn)
if (BufferIsValid(buffer))
{
page = (Page) BufferGetPage(buffer);
- memcpy((char*)page, blk, BLCKSZ);
+ memcpy((char *) page, blk, BLCKSZ);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
@@ -1546,7 +1568,7 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
/* Check CRC of rmgr data and record header */
INIT_CRC64(crc);
COMP_CRC64(crc, XLogRecGetData(record), len);
- COMP_CRC64(crc, (char*) record + sizeof(crc64),
+ COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc);
@@ -1554,11 +1576,11 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
{
elog(emode, "ReadRecord: bad rmgr data CRC in record at %u/%u",
recptr.xlogid, recptr.xrecoff);
- return(false);
+ return (false);
}
/* Check CRCs of backup blocks, if any */
- blk = (char*)XLogRecGetData(record) + len;
+ blk = (char *) XLogRecGetData(record) + len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
if (!(record->xl_info & XLR_SET_BKP_BLOCK(i)))
@@ -1569,18 +1591,19 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
COMP_CRC64(crc, blk + sizeof(crc64),
sizeof(BkpBlock) - sizeof(crc64));
FIN_CRC64(crc);
- memcpy((char*)&cbuf, blk, sizeof(crc64)); /* don't assume alignment */
+ memcpy((char *) &cbuf, blk, sizeof(crc64)); /* don't assume
+ * alignment */
if (!EQ_CRC64(cbuf, crc))
{
elog(emode, "ReadRecord: bad bkp block %d CRC in record at %u/%u",
i + 1, recptr.xlogid, recptr.xrecoff);
- return(false);
+ return (false);
}
blk += sizeof(BkpBlock) + BLCKSZ;
}
- return(true);
+ return (true);
}
/*
@@ -1609,13 +1632,14 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
if (readBuf == NULL)
{
+
/*
* First time through, permanently allocate readBuf. We do it
* this way, rather than just making a static array, for two
- * reasons: (1) no need to waste the storage in most instantiations
- * of the backend; (2) a static char array isn't guaranteed to
- * have any particular alignment, whereas malloc() will provide
- * MAXALIGN'd storage.
+ * reasons: (1) no need to waste the storage in most
+ * instantiations of the backend; (2) a static char array isn't
+ * guaranteed to have any particular alignment, whereas malloc()
+ * will provide MAXALIGN'd storage.
*/
readBuf = (char *) malloc(BLCKSZ);
Assert(readBuf != NULL);
@@ -1656,7 +1680,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
readFile = XLogFileOpen(readId, readSeg, (emode == LOG));
if (readFile < 0)
goto next_record_is_invalid;
- readOff = (uint32) (-1); /* force read to occur below */
+ readOff = (uint32) (-1);/* force read to occur below */
}
targetPageOff = ((RecPtr->xrecoff % XLogSegSize) / BLCKSZ) * BLCKSZ;
@@ -1688,9 +1712,10 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
record = (XLogRecord *) ((char *) readBuf + RecPtr->xrecoff % BLCKSZ);
got_record:;
+
/*
- * Currently, xl_len == 0 must be bad data, but that might not be
- * true forever. See note in XLogInsert.
+ * Currently, xl_len == 0 must be bad data, but that might not be true
+ * forever. See note in XLogInsert.
*/
if (record->xl_len == 0)
{
@@ -1698,8 +1723,10 @@ got_record:;
RecPtr->xlogid, RecPtr->xrecoff);
goto next_record_is_invalid;
}
+
/*
- * Compute total length of record including any appended backup blocks.
+ * Compute total length of record including any appended backup
+ * blocks.
*/
total_len = SizeOfXLogRecord + record->xl_len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -1708,6 +1735,7 @@ got_record:;
continue;
total_len += sizeof(BkpBlock) + BLCKSZ;
}
+
/*
* Make sure it will fit in buffer (currently, it is mechanically
* impossible for this test to fail, but it seems like a good idea
@@ -1731,7 +1759,7 @@ got_record:;
{
/* Need to reassemble record */
XLogContRecord *contrecord;
- uint32 gotlen = len;
+ uint32 gotlen = len;
memcpy(buffer, record, len);
record = (XLogRecord *) buffer;
@@ -1764,7 +1792,7 @@ got_record:;
goto next_record_is_invalid;
}
contrecord = (XLogContRecord *) ((char *) readBuf + SizeOfXLogPHD);
- if (contrecord->xl_rem_len == 0 ||
+ if (contrecord->xl_rem_len == 0 ||
total_len != (contrecord->xl_rem_len + gotlen))
{
elog(emode, "ReadRecord: invalid cont-record len %u in logfile %u seg %u off %u",
@@ -1774,7 +1802,7 @@ got_record:;
len = BLCKSZ - SizeOfXLogPHD - SizeOfXLogContRecord;
if (contrecord->xl_rem_len > len)
{
- memcpy(buffer, (char *)contrecord + SizeOfXLogContRecord, len);
+ memcpy(buffer, (char *) contrecord + SizeOfXLogContRecord, len);
gotlen += len;
buffer += len;
continue;
@@ -1788,12 +1816,12 @@ got_record:;
if (BLCKSZ - SizeOfXLogRecord >= SizeOfXLogPHD +
SizeOfXLogContRecord + MAXALIGN(contrecord->xl_rem_len))
{
- nextRecord = (XLogRecord *) ((char *) contrecord +
+ nextRecord = (XLogRecord *) ((char *) contrecord +
SizeOfXLogContRecord + MAXALIGN(contrecord->xl_rem_len));
}
EndRecPtr.xlogid = readId;
EndRecPtr.xrecoff = readSeg * XLogSegSize + readOff +
- SizeOfXLogPHD + SizeOfXLogContRecord +
+ SizeOfXLogPHD + SizeOfXLogContRecord +
MAXALIGN(contrecord->xl_rem_len);
ReadRecPtr = *RecPtr;
return record;
@@ -1822,7 +1850,7 @@ next_record_is_invalid:;
* Check whether the xlog header of a page just read in looks valid.
*
* This is just a convenience subroutine to avoid duplicated code in
- * ReadRecord. It's not intended for use from anywhere else.
+ * ReadRecord. It's not intended for use from anywhere else.
*/
static bool
ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
@@ -1839,14 +1867,16 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
hdr->xlp_info, readId, readSeg, readOff);
return false;
}
+
/*
- * We disbelieve a SUI less than the previous page's SUI, or more
- * than a few counts greater. In theory as many as 512 shutdown
- * checkpoint records could appear on a 32K-sized xlog page, so
- * that's the most differential there could legitimately be.
+ * We disbelieve a SUI less than the previous page's SUI, or more than
+ * a few counts greater. In theory as many as 512 shutdown checkpoint
+ * records could appear on a 32K-sized xlog page, so that's the most
+ * differential there could legitimately be.
*
* Note this check can only be applied when we are reading the next page
- * in sequence, so ReadRecord passes a flag indicating whether to check.
+ * in sequence, so ReadRecord passes a flag indicating whether to
+ * check.
*/
if (checkSUI)
{
@@ -1866,7 +1896,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
* I/O routines for pg_control
*
* *ControlFile is a buffer in shared memory that holds an image of the
- * contents of pg_control. WriteControlFile() initializes pg_control
+ * contents of pg_control. WriteControlFile() initializes pg_control
* given a preloaded buffer, ReadControlFile() loads the buffer from
* the pg_control file (during postmaster or standalone-backend startup),
* and UpdateControlFile() rewrites pg_control after we modify xlog state.
@@ -1890,9 +1920,11 @@ static void
WriteControlFile(void)
{
int fd;
- char buffer[BLCKSZ]; /* need not be aligned */
+ char buffer[BLCKSZ]; /* need not be aligned */
+
#ifdef USE_LOCALE
char *localeptr;
+
#endif
/*
@@ -1911,16 +1943,17 @@ WriteControlFile(void)
if (!localeptr)
elog(STOP, "Invalid LC_CTYPE setting");
StrNCpy(ControlFile->lc_ctype, localeptr, LOCALE_NAME_BUFLEN);
+
/*
* Issue warning notice if initdb'ing in a locale that will not permit
- * LIKE index optimization. This is not a clean place to do it, but
- * I don't see a better place either...
+ * LIKE index optimization. This is not a clean place to do it, but I
+ * don't see a better place either...
*/
if (!locale_is_like_safe())
elog(NOTICE, "Initializing database with %s collation order."
"\n\tThis locale setting will prevent use of index optimization for"
"\n\tLIKE and regexp searches. If you are concerned about speed of"
- "\n\tsuch queries, you may wish to set LC_COLLATE to \"C\" and"
+ "\n\tsuch queries, you may wish to set LC_COLLATE to \"C\" and"
"\n\tre-initdb. For more information see the Administrator's Guide.",
ControlFile->lc_collate);
#else
@@ -1930,17 +1963,17 @@ WriteControlFile(void)
/* Contents are protected with a CRC */
INIT_CRC64(ControlFile->crc);
- COMP_CRC64(ControlFile->crc,
- (char*) ControlFile + sizeof(crc64),
+ COMP_CRC64(ControlFile->crc,
+ (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile->crc);
/*
- * We write out BLCKSZ bytes into pg_control, zero-padding the
- * excess over sizeof(ControlFileData). This reduces the odds
- * of premature-EOF errors when reading pg_control. We'll still
- * fail when we check the contents of the file, but hopefully with
- * a more specific error than "couldn't read pg_control".
+ * We write out BLCKSZ bytes into pg_control, zero-padding the excess
+ * over sizeof(ControlFileData). This reduces the odds of
+ * premature-EOF errors when reading pg_control. We'll still fail
+ * when we check the contents of the file, but hopefully with a more
+ * specific error than "couldn't read pg_control".
*/
if (sizeof(ControlFileData) > BLCKSZ)
elog(STOP, "sizeof(ControlFileData) is too large ... fix xlog.c");
@@ -1993,8 +2026,8 @@ ReadControlFile(void)
/* Now check the CRC. */
INIT_CRC64(crc);
- COMP_CRC64(crc,
- (char*) ControlFile + sizeof(crc64),
+ COMP_CRC64(crc,
+ (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(crc);
@@ -2002,14 +2035,15 @@ ReadControlFile(void)
elog(STOP, "Invalid CRC in control file");
/*
- * Do compatibility checking immediately. We do this here for 2 reasons:
+ * Do compatibility checking immediately. We do this here for 2
+ * reasons:
*
- * (1) if the database isn't compatible with the backend executable,
- * we want to abort before we can possibly do any damage;
+ * (1) if the database isn't compatible with the backend executable, we
+ * want to abort before we can possibly do any damage;
*
* (2) this code is executed in the postmaster, so the setlocale() will
* propagate to forked backends, which aren't going to read this file
- * for themselves. (These locale settings are considered critical
+ * for themselves. (These locale settings are considered critical
* compatibility items because they can affect sort order of indexes.)
*/
if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
@@ -2042,8 +2076,8 @@ UpdateControlFile(void)
int fd;
INIT_CRC64(ControlFile->crc);
- COMP_CRC64(ControlFile->crc,
- (char*) ControlFile + sizeof(crc64),
+ COMP_CRC64(ControlFile->crc,
+ (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile->crc);
@@ -2096,6 +2130,7 @@ XLOGShmemInit(void)
Assert(!found);
memset(XLogCtl, 0, sizeof(XLogCtlData));
+
/*
* Since XLogCtlData contains XLogRecPtr fields, its sizeof should be
* a multiple of the alignment for same, so no extra alignment padding
@@ -2104,9 +2139,10 @@ XLOGShmemInit(void)
XLogCtl->xlblocks = (XLogRecPtr *)
(((char *) XLogCtl) + sizeof(XLogCtlData));
memset(XLogCtl->xlblocks, 0, sizeof(XLogRecPtr) * XLOGbuffers);
+
/*
- * Here, on the other hand, we must MAXALIGN to ensure the page buffers
- * have worst-case alignment.
+ * Here, on the other hand, we must MAXALIGN to ensure the page
+ * buffers have worst-case alignment.
*/
XLogCtl->pages =
((char *) XLogCtl) + MAXALIGN(sizeof(XLogCtlData) +
@@ -2114,8 +2150,8 @@ XLOGShmemInit(void)
memset(XLogCtl->pages, 0, BLCKSZ * XLOGbuffers);
/*
- * Do basic initialization of XLogCtl shared data.
- * (StartupXLOG will fill in additional info.)
+ * Do basic initialization of XLogCtl shared data. (StartupXLOG will
+ * fill in additional info.)
*/
XLogCtl->XLogCacheByte = BLCKSZ * XLOGbuffers;
XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
@@ -2145,7 +2181,7 @@ BootStrapXLOG(void)
char *buffer;
XLogPageHeader page;
XLogRecord *record;
- bool use_existent;
+ bool use_existent;
crc64 crc;
/* Use malloc() to ensure buffer is MAXALIGNED */
@@ -2180,7 +2216,7 @@ BootStrapXLOG(void)
INIT_CRC64(crc);
COMP_CRC64(crc, &checkPoint, sizeof(checkPoint));
- COMP_CRC64(crc, (char*) record + sizeof(crc64),
+ COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc);
record->xl_crc = crc;
@@ -2246,8 +2282,8 @@ StartupXLOG(void)
/*
* Read control file and check XLOG status looks valid.
*
- * Note: in most control paths, *ControlFile is already valid and we
- * need not do ReadControlFile() here, but might as well do it to be sure.
+ * Note: in most control paths, *ControlFile is already valid and we need
+ * not do ReadControlFile() here, but might as well do it to be sure.
*/
ReadControlFile();
@@ -2297,9 +2333,7 @@ StartupXLOG(void)
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
- {
elog(STOP, "Unable to locate a valid CheckPoint record");
- }
}
LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
@@ -2320,7 +2354,7 @@ StartupXLOG(void)
ShmemVariableCache->oidCount = 0;
ThisStartUpID = checkPoint.ThisStartUpID;
- RedoRecPtr = XLogCtl->Insert.RedoRecPtr =
+ RedoRecPtr = XLogCtl->Insert.RedoRecPtr =
XLogCtl->RedoRecPtr = checkPoint.redo;
if (XLByteLT(RecPtr, checkPoint.redo))
@@ -2328,7 +2362,7 @@ StartupXLOG(void)
if (checkPoint.undo.xrecoff == 0)
checkPoint.undo = RecPtr;
- if (XLByteLT(checkPoint.undo, RecPtr) ||
+ if (XLByteLT(checkPoint.undo, RecPtr) ||
XLByteLT(checkPoint.redo, RecPtr))
{
if (wasShutdown)
@@ -2336,9 +2370,7 @@ StartupXLOG(void)
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
- {
InRecovery = true;
- }
/* REDO */
if (InRecovery)
@@ -2355,7 +2387,8 @@ StartupXLOG(void)
/* Is REDO required ? */
if (XLByteLT(checkPoint.redo, RecPtr))
record = ReadRecord(&(checkPoint.redo), STOP, buffer);
- else /* read past CheckPoint record */
+ else
+/* read past CheckPoint record */
record = ReadRecord(NULL, LOG, buffer);
if (record != NULL)
@@ -2369,15 +2402,15 @@ StartupXLOG(void)
ShmemVariableCache->nextXid = record->xl_xid + 1;
if (XLOG_DEBUG)
{
- char buf[8192];
+ char buf[8192];
- sprintf(buf, "REDO @ %u/%u; LSN %u/%u: ",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
- EndRecPtr.xlogid, EndRecPtr.xrecoff);
+ sprintf(buf, "REDO @ %u/%u; LSN %u/%u: ",
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
+ EndRecPtr.xlogid, EndRecPtr.xrecoff);
xlog_outrec(buf, record);
strcat(buf, " - ");
- RmgrTable[record->xl_rmid].rm_desc(buf,
- record->xl_info, XLogRecGetData(record));
+ RmgrTable[record->xl_rmid].rm_desc(buf,
+ record->xl_info, XLogRecGetData(record));
fprintf(stderr, "%s\n", buf);
}
@@ -2411,8 +2444,11 @@ StartupXLOG(void)
XLogCtl->xlblocks[0].xrecoff =
((EndOfLog.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ;
Insert = &XLogCtl->Insert;
- /* Tricky point here: readBuf contains the *last* block that the LastRec
- * record spans, not the one it starts in, which is what we want.
+
+ /*
+ * Tricky point here: readBuf contains the *last* block that the
+ * LastRec record spans, not the one it starts in, which is what we
+ * want.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
@@ -2458,6 +2494,7 @@ StartupXLOG(void)
if (InRecovery)
{
+
/*
* In case we had to use the secondary checkpoint, make sure that
* it will still be shown as the secondary checkpoint after this
@@ -2554,7 +2591,7 @@ SetThisStartUpID(void)
/*
* CheckPoint process called by postmaster saves copy of new RedoRecPtr
- * in shmem (using SetRedoRecPtr). When checkpointer completes, postmaster
+ * in shmem (using SetRedoRecPtr). When checkpointer completes, postmaster
* calls GetRedoRecPtr to update its own copy of RedoRecPtr, so that
* subsequently-spawned backends will start out with a reasonably up-to-date
* local RedoRecPtr. Since these operations are not protected by any spinlock
@@ -2605,7 +2642,7 @@ CreateCheckPoint(bool shutdown)
CheckPoint checkPoint;
XLogRecPtr recptr;
XLogCtlInsert *Insert = &XLogCtl->Insert;
- XLogRecData rdata;
+ XLogRecData rdata;
uint32 freespace;
uint32 _logId;
uint32 _logSeg;
@@ -2613,7 +2650,7 @@ CreateCheckPoint(bool shutdown)
if (MyLastRecPtr.xrecoff != 0)
elog(ERROR, "CreateCheckPoint: cannot be called inside transaction block");
-
+
START_CRIT_SECTION();
/* Grab lock, using larger than normal sleep between tries (1 sec) */
@@ -2639,17 +2676,17 @@ CreateCheckPoint(bool shutdown)
/*
* If this isn't a shutdown, and we have not inserted any XLOG records
* since the start of the last checkpoint, skip the checkpoint. The
- * idea here is to avoid inserting duplicate checkpoints when the system
- * is idle. That wastes log space, and more importantly it exposes us to
- * possible loss of both current and previous checkpoint records if the
- * machine crashes just as we're writing the update. (Perhaps it'd make
- * even more sense to checkpoint only when the previous checkpoint record
- * is in a different xlog page?)
+ * idea here is to avoid inserting duplicate checkpoints when the
+ * system is idle. That wastes log space, and more importantly it
+ * exposes us to possible loss of both current and previous checkpoint
+ * records if the machine crashes just as we're writing the update.
+ * (Perhaps it'd make even more sense to checkpoint only when the
+ * previous checkpoint record is in a different xlog page?)
*
* We have to make two tests to determine that nothing has happened since
- * the start of the last checkpoint: current insertion point must match
- * the end of the last checkpoint record, and its redo pointer must point
- * to itself.
+ * the start of the last checkpoint: current insertion point must
+ * match the end of the last checkpoint record, and its redo pointer
+ * must point to itself.
*/
if (!shutdown)
{
@@ -2677,7 +2714,7 @@ CreateCheckPoint(bool shutdown)
* NB: this is NOT necessarily where the checkpoint record itself will
* be, since other backends may insert more XLOG records while we're
* off doing the buffer flush work. Those XLOG records are logically
- * after the checkpoint, even though physically before it. Got that?
+ * after the checkpoint, even though physically before it. Got that?
*/
freespace = INSERT_FREESPACE(Insert);
if (freespace < SizeOfXLogRecord)
@@ -2687,16 +2724,18 @@ CreateCheckPoint(bool shutdown)
freespace = BLCKSZ - SizeOfXLogPHD;
}
INSERT_RECPTR(checkPoint.redo, Insert, Insert->curridx);
+
/*
* Here we update the shared RedoRecPtr for future XLogInsert calls;
* this must be done while holding the insert lock.
*/
RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
+
/*
- * Get UNDO record ptr - this is oldest of PROC->logRec values.
- * We do this while holding insert lock to ensure that we won't miss
- * any about-to-commit transactions (UNDO must include all xacts that
- * have commits after REDO point).
+ * Get UNDO record ptr - this is oldest of PROC->logRec values. We do
+ * this while holding insert lock to ensure that we won't miss any
+ * about-to-commit transactions (UNDO must include all xacts that have
+ * commits after REDO point).
*/
checkPoint.undo = GetUndoRecPtr();
@@ -2720,8 +2759,8 @@ CreateCheckPoint(bool shutdown)
SpinRelease(OidGenLockId);
/*
- * Having constructed the checkpoint record, ensure all shmem disk buffers
- * are flushed to disk.
+ * Having constructed the checkpoint record, ensure all shmem disk
+ * buffers are flushed to disk.
*/
FlushBufferPool();
@@ -2729,7 +2768,7 @@ CreateCheckPoint(bool shutdown)
* Now insert the checkpoint record into XLOG.
*/
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&checkPoint);
+ rdata.data = (char *) (&checkPoint);
rdata.len = sizeof(checkPoint);
rdata.next = NULL;
@@ -2748,11 +2787,11 @@ CreateCheckPoint(bool shutdown)
elog(STOP, "XLog concurrent activity while data base is shutting down");
/*
- * Remember location of prior checkpoint's earliest info.
- * Oldest item is redo or undo, whichever is older; but watch out
- * for case that undo = 0.
+ * Remember location of prior checkpoint's earliest info. Oldest item
+ * is redo or undo, whichever is older; but watch out for case that
+ * undo = 0.
*/
- if (ControlFile->checkPointCopy.undo.xrecoff != 0 &&
+ if (ControlFile->checkPointCopy.undo.xrecoff != 0 &&
XLByteLT(ControlFile->checkPointCopy.undo,
ControlFile->checkPointCopy.redo))
XLByteToSeg(ControlFile->checkPointCopy.undo, _logId, _logSeg);
@@ -2801,10 +2840,10 @@ CreateCheckPoint(bool shutdown)
void
XLogPutNextOid(Oid nextOid)
{
- XLogRecData rdata;
+ XLogRecData rdata;
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&nextOid);
+ rdata.data = (char *) (&nextOid);
rdata.len = sizeof(Oid);
rdata.next = NULL;
(void) XLogInsert(RM_XLOG_ID, XLOG_NEXTOID, &rdata);
@@ -2816,11 +2855,11 @@ XLogPutNextOid(Oid nextOid)
void
xlog_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
if (info == XLOG_NEXTOID)
{
- Oid nextOid;
+ Oid nextOid;
memcpy(&nextOid, XLogRecGetData(record), sizeof(Oid));
if (ShmemVariableCache->nextOid < nextOid)
@@ -2846,9 +2885,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
/* In an ONLINE checkpoint, treat the counters like NEXTOID */
if (ShmemVariableCache->nextXid < checkPoint.nextXid)
- {
ShmemVariableCache->nextXid = checkPoint.nextXid;
- }
if (ShmemVariableCache->nextOid < checkPoint.nextOid)
{
ShmemVariableCache->nextOid = checkPoint.nextOid;
@@ -2856,32 +2893,33 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
}
}
}
-
+
void
xlog_undo(XLogRecPtr lsn, XLogRecord *record)
{
}
-
+
void
-xlog_desc(char *buf, uint8 xl_info, char* rec)
+xlog_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_CHECKPOINT_SHUTDOWN ||
info == XLOG_CHECKPOINT_ONLINE)
{
- CheckPoint *checkpoint = (CheckPoint*) rec;
+ CheckPoint *checkpoint = (CheckPoint *) rec;
+
sprintf(buf + strlen(buf), "checkpoint: redo %u/%u; undo %u/%u; "
- "sui %u; xid %u; oid %u; %s",
- checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
- checkpoint->undo.xlogid, checkpoint->undo.xrecoff,
- checkpoint->ThisStartUpID, checkpoint->nextXid,
- checkpoint->nextOid,
- (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
+ "sui %u; xid %u; oid %u; %s",
+ checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
+ checkpoint->undo.xlogid, checkpoint->undo.xrecoff,
+ checkpoint->ThisStartUpID, checkpoint->nextXid,
+ checkpoint->nextOid,
+ (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
}
else if (info == XLOG_NEXTOID)
{
- Oid nextOid;
+ Oid nextOid;
memcpy(&nextOid, rec, sizeof(Oid));
sprintf(buf + strlen(buf), "nextOid: %u", nextOid);
@@ -2893,13 +2931,13 @@ xlog_desc(char *buf, uint8 xl_info, char* rec)
static void
xlog_outrec(char *buf, XLogRecord *record)
{
- int bkpb;
- int i;
+ int bkpb;
+ int i;
sprintf(buf + strlen(buf), "prev %u/%u; xprev %u/%u; xid %u",
- record->xl_prev.xlogid, record->xl_prev.xrecoff,
- record->xl_xact_prev.xlogid, record->xl_xact_prev.xrecoff,
- record->xl_xid);
+ record->xl_prev.xlogid, record->xl_prev.xrecoff,
+ record->xl_xact_prev.xlogid, record->xl_xact_prev.xrecoff,
+ record->xl_xid);
for (i = 0, bkpb = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
@@ -2912,7 +2950,7 @@ xlog_outrec(char *buf, XLogRecord *record)
sprintf(buf + strlen(buf), "; bkpb %d", bkpb);
sprintf(buf + strlen(buf), ": %s",
- RmgrTable[record->xl_rmid].rm_name);
+ RmgrTable[record->xl_rmid].rm_name);
}
@@ -2923,15 +2961,19 @@ xlog_outrec(char *buf, XLogRecord *record)
bool
check_xlog_sync_method(const char *method)
{
- if (strcasecmp(method, "fsync") == 0) return true;
+ if (strcasecmp(method, "fsync") == 0)
+ return true;
#ifdef HAVE_FDATASYNC
- if (strcasecmp(method, "fdatasync") == 0) return true;
+ if (strcasecmp(method, "fdatasync") == 0)
+ return true;
#endif
#ifdef OPEN_SYNC_FLAG
- if (strcasecmp(method, "open_sync") == 0) return true;
+ if (strcasecmp(method, "open_sync") == 0)
+ return true;
#endif
#ifdef OPEN_DATASYNC_FLAG
- if (strcasecmp(method, "open_datasync") == 0) return true;
+ if (strcasecmp(method, "open_datasync") == 0)
+ return true;
#endif
return false;
}
@@ -2939,8 +2981,8 @@ check_xlog_sync_method(const char *method)
void
assign_xlog_sync_method(const char *method)
{
- int new_sync_method;
- int new_sync_bit;
+ int new_sync_method;
+ int new_sync_bit;
if (strcasecmp(method, "fsync") == 0)
{
@@ -2978,11 +3020,12 @@ assign_xlog_sync_method(const char *method)
if (sync_method != new_sync_method || open_sync_bit != new_sync_bit)
{
+
/*
- * To ensure that no blocks escape unsynced, force an fsync on
- * the currently open log segment (if any). Also, if the open
- * flag is changing, close the log file so it will be reopened
- * (with new flag bit) at next use.
+ * To ensure that no blocks escape unsynced, force an fsync on the
+ * currently open log segment (if any). Also, if the open flag is
+ * changing, close the log file so it will be reopened (with new
+ * flag bit) at next use.
*/
if (openLogFile >= 0)
{
@@ -3011,7 +3054,7 @@ issue_xlog_fsync(void)
{
switch (sync_method)
{
- case SYNC_METHOD_FSYNC:
+ case SYNC_METHOD_FSYNC:
if (pg_fsync(openLogFile) != 0)
elog(STOP, "fsync(logfile %u seg %u) failed: %m",
openLogId, openLogSeg);
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index 8b80c326cab..a3f440ca5f9 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.14 2001/03/13 01:17:05 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.15 2001/03/22 03:59:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,26 +37,26 @@
* xaction/command and return
*
* - -1 if not
- * - 0 if there is no tuple at all
- * - 1 if yes
+ * - 0 if there is no tuple at all
+ * - 1 if yes
*/
int
-XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
- TransactionId xid, CommandId cid)
+XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
+ TransactionId xid, CommandId cid)
{
- Relation reln;
- Buffer buffer;
- Page page;
- ItemId lp;
- HeapTupleHeader htup;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ ItemId lp;
+ HeapTupleHeader htup;
reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
if (!RelationIsValid(reln))
- return(0);
+ return (0);
buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
if (!BufferIsValid(buffer))
- return(0);
+ return (0);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = (Page) BufferGetPage(buffer);
@@ -64,13 +64,13 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
{
UnlockAndReleaseBuffer(buffer);
- return(0);
+ return (0);
}
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
{
UnlockAndReleaseBuffer(buffer);
- return(0);
+ return (0);
}
htup = (HeapTupleHeader) PageGetItem(page, lp);
@@ -79,11 +79,11 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
if (htup->t_xmin != xid || htup->t_cmin != cid)
{
UnlockAndReleaseBuffer(buffer);
- return(-1);
+ return (-1);
}
UnlockAndReleaseBuffer(buffer);
- return(1);
+ return (1);
}
/*
@@ -95,19 +95,19 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
bool
XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
{
- Relation reln;
- Buffer buffer;
- Page page;
- ItemId lp;
- HeapTupleHeader htup;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ ItemId lp;
+ HeapTupleHeader htup;
reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
if (!RelationIsValid(reln))
- return(false);
+ return (false);
buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
if (!BufferIsValid(buffer))
- return(false);
+ return (false);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = (Page) BufferGetPage(buffer);
@@ -115,21 +115,21 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
{
UnlockAndReleaseBuffer(buffer);
- return(false);
+ return (false);
}
if (PageGetSUI(page) != ThisStartUpID)
{
Assert(PageGetSUI(page) < ThisStartUpID);
UnlockAndReleaseBuffer(buffer);
- return(true);
+ return (true);
}
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
{
UnlockAndReleaseBuffer(buffer);
- return(false);
+ return (false);
}
htup = (HeapTupleHeader) PageGetItem(page, lp);
@@ -140,22 +140,22 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
{
if (htup->t_infomask & HEAP_XMIN_INVALID ||
(htup->t_infomask & HEAP_MOVED_IN &&
- TransactionIdDidAbort((TransactionId)htup->t_cmin)) ||
+ TransactionIdDidAbort((TransactionId) htup->t_cmin)) ||
TransactionIdDidAbort(htup->t_xmin))
{
UnlockAndReleaseBuffer(buffer);
- return(false);
+ return (false);
}
}
UnlockAndReleaseBuffer(buffer);
- return(true);
+ return (true);
}
/*
* Open pg_log in recovery
*/
-extern Relation LogRelation; /* pg_log relation */
+extern Relation LogRelation; /* pg_log relation */
void
XLogOpenLogRelation(void)
@@ -189,32 +189,32 @@ XLogOpenLogRelation(void)
Buffer
XLogReadBuffer(bool extend, Relation reln, BlockNumber blkno)
{
- BlockNumber lastblock = RelationGetNumberOfBlocks(reln);
+ BlockNumber lastblock = RelationGetNumberOfBlocks(reln);
Buffer buffer;
if (blkno >= lastblock)
{
buffer = InvalidBuffer;
- if (extend) /* we do this in recovery only - no locks */
+ if (extend) /* we do this in recovery only - no locks */
{
Assert(InRecovery);
while (lastblock <= blkno)
{
if (buffer != InvalidBuffer)
- ReleaseBuffer(buffer); /* must be WriteBuffer()? */
+ ReleaseBuffer(buffer); /* must be WriteBuffer()? */
buffer = ReadBuffer(reln, P_NEW);
lastblock++;
}
}
if (buffer != InvalidBuffer)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
- return(buffer);
+ return (buffer);
}
buffer = ReadBuffer(reln, blkno);
if (buffer != InvalidBuffer)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
- return(buffer);
+ return (buffer);
}
/*
@@ -223,32 +223,33 @@ XLogReadBuffer(bool extend, Relation reln, BlockNumber blkno)
typedef struct XLogRelDesc
{
- RelationData reldata;
- struct XLogRelDesc *lessRecently;
- struct XLogRelDesc *moreRecently;
+ RelationData reldata;
+ struct XLogRelDesc *lessRecently;
+ struct XLogRelDesc *moreRecently;
} XLogRelDesc;
typedef struct XLogRelCacheEntry
{
- RelFileNode rnode;
- XLogRelDesc *rdesc;
+ RelFileNode rnode;
+ XLogRelDesc *rdesc;
} XLogRelCacheEntry;
-static HTAB *_xlrelcache;
-static XLogRelDesc *_xlrelarr = NULL;
-static Form_pg_class _xlpgcarr = NULL;
-static int _xlast = 0;
-static int _xlcnt = 0;
-#define _XLOG_RELCACHESIZE 512
+static HTAB *_xlrelcache;
+static XLogRelDesc *_xlrelarr = NULL;
+static Form_pg_class _xlpgcarr = NULL;
+static int _xlast = 0;
+static int _xlcnt = 0;
+
+#define _XLOG_RELCACHESIZE 512
static void
_xl_init_rel_cache(void)
{
- HASHCTL ctl;
+ HASHCTL ctl;
_xlcnt = _XLOG_RELCACHESIZE;
_xlast = 0;
- _xlrelarr = (XLogRelDesc*) malloc(sizeof(XLogRelDesc) * _xlcnt);
+ _xlrelarr = (XLogRelDesc *) malloc(sizeof(XLogRelDesc) * _xlcnt);
memset(_xlrelarr, 0, sizeof(XLogRelDesc) * _xlcnt);
_xlpgcarr = (Form_pg_class) malloc(sizeof(FormData_pg_class) * _xlcnt);
memset(_xlpgcarr, 0, sizeof(FormData_pg_class) * _xlcnt);
@@ -258,26 +259,26 @@ _xl_init_rel_cache(void)
memset(&ctl, 0, (int) sizeof(ctl));
ctl.keysize = sizeof(RelFileNode);
- ctl.datasize = sizeof(XLogRelDesc*);
+ ctl.datasize = sizeof(XLogRelDesc *);
ctl.hash = tag_hash;
_xlrelcache = hash_create(_XLOG_RELCACHESIZE, &ctl,
- HASH_ELEM | HASH_FUNCTION);
+ HASH_ELEM | HASH_FUNCTION);
}
static void
_xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
{
- XLogRelCacheEntry *hentry;
- bool found;
- XLogRelDesc *rdesc = *edata;
- Form_pg_class tpgc = rdesc->reldata.rd_rel;
+ XLogRelCacheEntry *hentry;
+ bool found;
+ XLogRelDesc *rdesc = *edata;
+ Form_pg_class tpgc = rdesc->reldata.rd_rel;
rdesc->lessRecently->moreRecently = rdesc->moreRecently;
rdesc->moreRecently->lessRecently = rdesc->lessRecently;
- hentry = (XLogRelCacheEntry*) hash_search(_xlrelcache,
- (char*)&(rdesc->reldata.rd_node), HASH_REMOVE, &found);
+ hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache,
+ (char *) &(rdesc->reldata.rd_node), HASH_REMOVE, &found);
if (hentry == NULL)
elog(STOP, "_xl_remove_hash_entry: can't delete from cache");
@@ -294,16 +295,16 @@ _xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
return;
}
-static XLogRelDesc*
+static XLogRelDesc *
_xl_new_reldesc(void)
{
- XLogRelDesc *res;
+ XLogRelDesc *res;
_xlast++;
if (_xlast < _xlcnt)
{
_xlrelarr[_xlast].reldata.rd_rel = &(_xlpgcarr[_xlast]);
- return(&(_xlrelarr[_xlast]));
+ return (&(_xlrelarr[_xlast]));
}
/* reuse */
@@ -312,7 +313,7 @@ _xl_new_reldesc(void)
_xl_remove_hash_entry(&res, 0);
_xlast--;
- return(res);
+ return (res);
}
@@ -344,12 +345,12 @@ XLogCloseRelationCache(void)
Relation
XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
{
- XLogRelDesc *res;
- XLogRelCacheEntry *hentry;
- bool found;
+ XLogRelDesc *res;
+ XLogRelCacheEntry *hentry;
+ bool found;
- hentry = (XLogRelCacheEntry*)
- hash_search(_xlrelcache, (char*)&rnode, HASH_FIND, &found);
+ hentry = (XLogRelCacheEntry *)
+ hash_search(_xlrelcache, (char *) &rnode, HASH_FIND, &found);
if (hentry == NULL)
elog(STOP, "XLogOpenRelation: error in cache");
@@ -372,8 +373,8 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode;
res->reldata.rd_node = rnode;
- hentry = (XLogRelCacheEntry*)
- hash_search(_xlrelcache, (char*)&rnode, HASH_ENTER, &found);
+ hentry = (XLogRelCacheEntry *)
+ hash_search(_xlrelcache, (char *) &rnode, HASH_ENTER, &found);
if (hentry == NULL)
elog(STOP, "XLogOpenRelation: can't insert into cache");
@@ -385,7 +386,7 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->reldata.rd_fd = -1;
res->reldata.rd_fd = smgropen(DEFAULT_SMGR, &(res->reldata),
- true /* allow failure */);
+ true /* allow failure */ );
}
res->moreRecently = &(_xlrelarr[0]);
@@ -393,8 +394,8 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
_xlrelarr[0].lessRecently = res;
res->lessRecently->moreRecently = res;
- if (res->reldata.rd_fd < 0) /* file doesn't exist */
- return(NULL);
+ if (res->reldata.rd_fd < 0) /* file doesn't exist */
+ return (NULL);
- return(&(res->reldata));
+ return (&(res->reldata));
}