aboutsummaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/common/heaptuple.c35
-rw-r--r--src/backend/access/common/indextuple.c22
-rw-r--r--src/backend/access/common/printtup.c32
-rw-r--r--src/backend/access/common/tupdesc.c101
-rw-r--r--src/backend/access/heap/heapam.c334
-rw-r--r--src/backend/access/heap/hio.c7
-rw-r--r--src/backend/access/heap/stats.c69
-rw-r--r--src/backend/access/heap/tuptoaster.c262
-rw-r--r--src/backend/access/index/indexam.c28
-rw-r--r--src/backend/access/nbtree/nbtsearch.c39
-rw-r--r--src/backend/access/transam/transam.c152
-rw-r--r--src/backend/access/transam/transsup.c134
-rw-r--r--src/backend/access/transam/xact.c564
-rw-r--r--src/backend/bootstrap/bootstrap.c59
-rw-r--r--src/backend/catalog/heap.c362
-rw-r--r--src/backend/catalog/index.c394
-rw-r--r--src/backend/catalog/pg_operator.c138
-rw-r--r--src/backend/catalog/pg_proc.c23
-rw-r--r--src/backend/catalog/pg_type.c144
-rw-r--r--src/backend/commands/_deadcode/recipe.c53
-rw-r--r--src/backend/commands/analyze.c7
-rw-r--r--src/backend/commands/command.c90
-rw-r--r--src/backend/commands/copy.c8
-rw-r--r--src/backend/commands/creatinh.c44
-rw-r--r--src/backend/commands/define.c19
-rw-r--r--src/backend/commands/indexcmds.c35
-rw-r--r--src/backend/commands/proclang.c30
-rw-r--r--src/backend/commands/trigger.c268
-rw-r--r--src/backend/commands/user.c14
-rw-r--r--src/backend/executor/_deadcode/nodeTee.c28
-rw-r--r--src/backend/executor/execAmi.c58
-rw-r--r--src/backend/executor/execJunk.c79
-rw-r--r--src/backend/executor/execProcnode.c109
-rw-r--r--src/backend/executor/execScan.c69
-rw-r--r--src/backend/executor/execTuples.c133
-rw-r--r--src/backend/executor/execUtils.c81
-rw-r--r--src/backend/executor/functions.c17
-rw-r--r--src/backend/executor/nodeAgg.c28
-rw-r--r--src/backend/executor/nodeAppend.c131
-rw-r--r--src/backend/executor/nodeGroup.c31
-rw-r--r--src/backend/executor/nodeHash.c153
-rw-r--r--src/backend/executor/nodeHashjoin.c177
-rw-r--r--src/backend/executor/nodeIndexscan.c361
-rw-r--r--src/backend/executor/nodeLimit.c91
-rw-r--r--src/backend/executor/nodeMaterial.c121
-rw-r--r--src/backend/executor/nodeMergejoin.c398
-rw-r--r--src/backend/executor/nodeNestloop.c163
-rw-r--r--src/backend/executor/nodeResult.c126
-rw-r--r--src/backend/executor/nodeSeqscan.c108
-rw-r--r--src/backend/executor/nodeSetOp.c69
-rw-r--r--src/backend/executor/nodeSort.c154
-rw-r--r--src/backend/executor/nodeSubqueryscan.c83
-rw-r--r--src/backend/executor/nodeTidscan.c146
-rw-r--r--src/backend/executor/nodeUnique.c93
-rw-r--r--src/backend/lib/stringinfo.c12
-rw-r--r--src/backend/nodes/copyfuncs.c303
-rw-r--r--src/backend/optimizer/path/_deadcode/predmig.c4
-rw-r--r--src/backend/optimizer/path/_deadcode/xfunc.c12
-rw-r--r--src/backend/optimizer/path/pathkeys.c16
-rw-r--r--src/backend/parser/analyze.c67
-rw-r--r--src/backend/parser/parse_clause.c8
-rw-r--r--src/backend/rewrite/rewriteDefine.c21
-rw-r--r--src/backend/rewrite/rewriteHandler.c52
-rw-r--r--src/backend/storage/ipc/ipc.c107
-rw-r--r--src/backend/storage/lmgr/proc.c127
-rw-r--r--src/backend/tcop/dest.c49
-rw-r--r--src/backend/tcop/fastpath.c8
-rw-r--r--src/backend/tcop/postgres.c273
-rw-r--r--src/backend/tcop/pquery.c99
-rw-r--r--src/backend/tcop/utility.c23
-rw-r--r--src/backend/tioga/tgRecipe.h3
-rw-r--r--src/backend/utils/adt/ascii.c17
-rw-r--r--src/backend/utils/adt/formatting.c225
-rw-r--r--src/backend/utils/adt/numeric.c333
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c204
-rw-r--r--src/backend/utils/adt/ri_triggers.c731
-rw-r--r--src/backend/utils/adt/ruleutils.c158
-rw-r--r--src/backend/utils/init/postinit.c23
-rw-r--r--src/backend/utils/misc/database.c42
79 files changed, 4035 insertions, 5356 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 9bb08054943..1f9622e39ba 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.70 2001/03/22 03:59:11 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.71 2001/03/22 06:16:06 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@@ -246,9 +246,8 @@ nocachegetattr(HeapTuple tuple,
* there's a null somewhere in the tuple
*/
- /* ----------------
- * check to see if desired att is null
- * ----------------
+ /*
+ * check to see if desired att is null
*/
#ifdef IN_MACRO
@@ -261,9 +260,8 @@ nocachegetattr(HeapTuple tuple,
}
#endif
- /* ----------------
- * Now check to see if any preceding bits are null...
- * ----------------
+ /*
+ * Now check to see if any preceding bits are null...
*/
{
int byte = attnum >> 3;
@@ -658,9 +656,8 @@ heap_modifytuple(HeapTuple tuple,
HeapTuple newTuple;
uint8 infomask;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(HeapTupleIsValid(tuple));
Assert(RelationIsValid(relation));
@@ -670,10 +667,9 @@ heap_modifytuple(HeapTuple tuple,
numberOfAttributes = RelationGetForm(relation)->relnatts;
- /* ----------------
- * allocate and fill *value and *nulls arrays from either
- * the tuple or the repl information, as appropriate.
- * ----------------
+ /*
+ * allocate and fill *value and *nulls arrays from either the tuple or
+ * the repl information, as appropriate.
*/
value = (Datum *) palloc(numberOfAttributes * sizeof *value);
nulls = (char *) palloc(numberOfAttributes * sizeof *nulls);
@@ -701,17 +697,16 @@ heap_modifytuple(HeapTuple tuple,
}
}
- /* ----------------
- * create a new tuple from the *values and *nulls arrays
- * ----------------
+ /*
+ * create a new tuple from the *values and *nulls arrays
*/
newTuple = heap_formtuple(RelationGetDescr(relation),
value,
nulls);
- /* ----------------
- * copy the header except for t_len, t_natts, t_hoff, t_bits, t_infomask
- * ----------------
+ /*
+ * copy the header except for t_len, t_natts, t_hoff, t_bits,
+ * t_infomask
*/
infomask = newTuple->t_data->t_infomask;
memmove((char *) &newTuple->t_data->t_oid, /* XXX */
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index da8129f307f..5c165e2c779 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.53 2001/03/22 03:59:11 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.54 2001/03/22 06:16:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -165,9 +165,8 @@ index_formtuple(TupleDesc tupleDescriptor,
infomask |= size;
- /* ----------------
+ /*
* initialize metadata
- * ----------------
*/
tuple->t_info = infomask;
return tuple;
@@ -205,9 +204,9 @@ nocache_index_getattr(IndexTuple tup,
int data_off; /* tuple data offset */
(void) isnull; /* not used */
- /* ----------------
- * sanity checks
- * ----------------
+
+ /*
+ * sanity checks
*/
/* ----------------
@@ -246,9 +245,9 @@ nocache_index_getattr(IndexTuple tup,
}
else
{ /* there's a null somewhere in the tuple */
- /* ----------------
- * check to see if desired att is null
- * ----------------
+
+ /*
+ * check to see if desired att is null
*/
/* XXX "knows" t_bits are just after fixed tuple header! */
@@ -264,9 +263,8 @@ nocache_index_getattr(IndexTuple tup,
}
#endif
- /* ----------------
- * Now check to see if any preceding bits are null...
- * ----------------
+ /*
+ * Now check to see if any preceding bits are null...
*/
{
int byte = attnum >> 3;
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index d44bfe973e0..da95edfc1fe 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.58 2001/03/22 03:59:11 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.59 2001/03/22 06:16:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -155,16 +155,14 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
if (myState->attrinfo != typeinfo || myState->nattrs != natts)
printtup_prepare_info(myState, typeinfo, natts);
- /* ----------------
- * tell the frontend to expect new tuple data (in ASCII style)
- * ----------------
+ /*
+ * tell the frontend to expect new tuple data (in ASCII style)
*/
pq_beginmessage(&buf);
pq_sendbyte(&buf, 'D');
- /* ----------------
- * send a bitmap of which attributes are not null
- * ----------------
+ /*
+ * send a bitmap of which attributes are not null
*/
j = 0;
k = 1 << 7;
@@ -183,9 +181,8 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
if (k != (1 << 7)) /* flush last partial byte */
pq_sendint(&buf, j, 1);
- /* ----------------
- * send the attributes of this tuple
- * ----------------
+ /*
+ * send the attributes of this tuple
*/
for (i = 0; i < natts; ++i)
{
@@ -357,16 +354,14 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
if (myState->attrinfo != typeinfo || myState->nattrs != natts)
printtup_prepare_info(myState, typeinfo, natts);
- /* ----------------
- * tell the frontend to expect new tuple data (in binary style)
- * ----------------
+ /*
+ * tell the frontend to expect new tuple data (in binary style)
*/
pq_beginmessage(&buf);
pq_sendbyte(&buf, 'B');
- /* ----------------
- * send a bitmap of which attributes are not null
- * ----------------
+ /*
+ * send a bitmap of which attributes are not null
*/
j = 0;
k = 1 << 7;
@@ -385,9 +380,8 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
if (k != (1 << 7)) /* flush last partial byte */
pq_sendint(&buf, j, 1);
- /* ----------------
- * send the attributes of this tuple
- * ----------------
+ /*
+ * send the attributes of this tuple
*/
#ifdef IPORTAL_DEBUG
fprintf(stderr, "sending tuple with %d atts\n", natts);
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index e07c6296d15..769f754b669 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.72 2001/03/22 03:59:11 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.73 2001/03/22 06:16:06 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -37,17 +37,15 @@ CreateTemplateTupleDesc(int natts)
uint32 size;
TupleDesc desc;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(natts >= 1);
- /* ----------------
- * allocate enough memory for the tuple descriptor and
- * zero it as TupleDescInitEntry assumes that the descriptor
- * is filled with NULL pointers.
- * ----------------
+ /*
+ * allocate enough memory for the tuple descriptor and zero it as
+ * TupleDescInitEntry assumes that the descriptor is filled with NULL
+ * pointers.
*/
size = natts * sizeof(Form_pg_attribute);
desc = (TupleDesc) palloc(sizeof(struct tupleDesc));
@@ -71,9 +69,8 @@ CreateTupleDesc(int natts, Form_pg_attribute *attrs)
{
TupleDesc desc;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(natts >= 1);
@@ -337,9 +334,8 @@ TupleDescInitEntry(TupleDesc desc,
Form_pg_type typeForm;
Form_pg_attribute att;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(PointerIsValid(desc));
AssertArg(attributeNumber >= 1);
@@ -352,17 +348,15 @@ TupleDescInitEntry(TupleDesc desc,
AssertArg(!PointerIsValid(desc->attrs[attributeNumber - 1]));
- /* ----------------
- * allocate storage for this attribute
- * ----------------
+ /*
+ * allocate storage for this attribute
*/
att = (Form_pg_attribute) palloc(ATTRIBUTE_TUPLE_SIZE);
desc->attrs[attributeNumber - 1] = att;
- /* ----------------
- * initialize the attribute fields
- * ----------------
+ /*
+ * initialize the attribute fields
*/
att->attrelid = 0; /* dummy value */
@@ -404,10 +398,10 @@ TupleDescInitEntry(TupleDesc desc,
0, 0, 0);
if (!HeapTupleIsValid(tuple))
{
- /* ----------------
- * here type info does not exist yet so we just fill
- * the attribute with dummy information and return false.
- * ----------------
+
+ /*
+ * here type info does not exist yet so we just fill the attribute
+ * with dummy information and return false.
*/
att->atttypid = InvalidOid;
att->attlen = (int16) 0;
@@ -417,32 +411,30 @@ TupleDescInitEntry(TupleDesc desc,
return false;
}
- /* ----------------
- * type info exists so we initialize our attribute
- * information from the type tuple we found..
- * ----------------
+ /*
+ * type info exists so we initialize our attribute information from
+ * the type tuple we found..
*/
typeForm = (Form_pg_type) GETSTRUCT(tuple);
att->atttypid = tuple->t_data->t_oid;
- /*------------------------
+ /*
* There are a couple of cases where we must override the information
* stored in pg_type.
*
* First: if this attribute is a set, what is really stored in the
- * attribute is the OID of a tuple in the pg_proc catalog.
- * The pg_proc tuple contains the query string which defines
- * this set - i.e., the query to run to get the set.
- * So the atttypid (just assigned above) refers to the type returned
- * by this query, but the actual length of this attribute is the
- * length (size) of an OID.
+ * attribute is the OID of a tuple in the pg_proc catalog. The pg_proc
+ * tuple contains the query string which defines this set - i.e., the
+ * query to run to get the set. So the atttypid (just assigned above)
+ * refers to the type returned by this query, but the actual length of
+ * this attribute is the length (size) of an OID.
*
- * (Why not just make the atttypid point to the OID type, instead
- * of the type the query returns? Because the executor uses the atttypid
- * to tell the front end what type will be returned (in BeginCommand),
- * and in the end the type returned will be the result of the query, not
- * an OID.)
+ * (Why not just make the atttypid point to the OID type, instead of the
+ * type the query returns? Because the executor uses the atttypid to
+ * tell the front end what type will be returned (in BeginCommand),
+ * and in the end the type returned will be the result of the query,
+ * not an OID.)
*
* (Why not wait until the return type of the set is known (i.e., the
* recursive call to the executor to execute the set has returned)
@@ -460,7 +452,6 @@ TupleDescInitEntry(TupleDesc desc,
*
* A set of complex type is first and foremost a set, so its
* representation is Oid not pointer. So, test that case first.
- *-----------------------------------------
*/
if (attisset)
{
@@ -550,9 +541,8 @@ BuildDescForRelation(List *schema, char *relname)
int ndef = 0;
bool attisset;
- /* ----------------
- * allocate a new tuple descriptor
- * ----------------
+ /*
+ * allocate a new tuple descriptor
*/
natts = length(schema);
desc = CreateTemplateTupleDesc(natts);
@@ -565,11 +555,10 @@ BuildDescForRelation(List *schema, char *relname)
ColumnDef *entry = lfirst(p);
List *arry;
- /* ----------------
- * for each entry in the list, get the name and type
- * information from the list and have TupleDescInitEntry
- * fill in the attribute information we need.
- * ----------------
+ /*
+ * for each entry in the list, get the name and type information
+ * from the list and have TupleDescInitEntry fill in the attribute
+ * information we need.
*/
attnum++;
@@ -595,12 +584,12 @@ BuildDescForRelation(List *schema, char *relname)
typenameTypeId(typename),
atttypmod, attdim, attisset))
{
- /* ----------------
- * if TupleDescInitEntry() fails, it means there is
- * no type in the system catalogs. So now we check if
- * the type name equals the relation name. If so we
- * have a self reference, otherwise it's an error.
- * ----------------
+
+ /*
+ * if TupleDescInitEntry() fails, it means there is no type in
+ * the system catalogs. So now we check if the type name
+ * equals the relation name. If so we have a self reference,
+ * otherwise it's an error.
*/
if (strcmp(typename, relname) == 0)
TupleDescMakeSelfReference(desc, attnum, relname);
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index b55717744c1..d56d6abf2bc 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.111 2001/03/22 03:59:13 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.112 2001/03/22 06:16:07 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -116,20 +116,20 @@ initscan(HeapScanDesc scan,
unsigned nkeys,
ScanKey key)
{
- /* ----------------
- * Make sure we have up-to-date idea of number of blocks in relation.
- * It is sufficient to do this once at scan start, since any tuples
- * added while the scan is in progress will be invisible to my
- * transaction anyway...
- * ----------------
+
+ /*
+ * Make sure we have up-to-date idea of number of blocks in relation.
+ * It is sufficient to do this once at scan start, since any tuples
+ * added while the scan is in progress will be invisible to my
+ * transaction anyway...
*/
relation->rd_nblocks = RelationGetNumberOfBlocks(relation);
if (relation->rd_nblocks == 0)
{
- /* ----------------
- * relation is empty
- * ----------------
+
+ /*
+ * relation is empty
*/
scan->rs_ntup.t_datamcxt = scan->rs_ctup.t_datamcxt =
scan->rs_ptup.t_datamcxt = NULL;
@@ -139,9 +139,9 @@ initscan(HeapScanDesc scan,
}
else if (atend)
{
- /* ----------------
- * reverse scan
- * ----------------
+
+ /*
+ * reverse scan
*/
scan->rs_ntup.t_datamcxt = scan->rs_ctup.t_datamcxt = NULL;
scan->rs_ntup.t_data = scan->rs_ctup.t_data = NULL;
@@ -152,9 +152,9 @@ initscan(HeapScanDesc scan,
}
else
{
- /* ----------------
- * forward scan
- * ----------------
+
+ /*
+ * forward scan
*/
scan->rs_ctup.t_datamcxt = scan->rs_ptup.t_datamcxt = NULL;
scan->rs_ctup.t_data = scan->rs_ptup.t_data = NULL;
@@ -170,9 +170,8 @@ initscan(HeapScanDesc scan,
ItemPointerSetInvalid(&(scan->rs_mntid));
ItemPointerSetInvalid(&(scan->rs_mcd));
- /* ----------------
- * copy the scan key, if appropriate
- * ----------------
+ /*
+ * copy the scan key, if appropriate
*/
if (key != NULL)
memmove(scan->rs_key, key, nkeys * sizeof(ScanKeyData));
@@ -188,11 +187,9 @@ unpinscan(HeapScanDesc scan)
if (BufferIsValid(scan->rs_pbuf))
ReleaseBuffer(scan->rs_pbuf);
- /* ------------------------------------
- * Scan will pin buffer once for each non-NULL tuple pointer
- * (ptup, ctup, ntup), so they have to be unpinned multiple
- * times.
- * ------------------------------------
+ /*
+ * Scan will pin buffer once for each non-NULL tuple pointer (ptup,
+ * ctup, ntup), so they have to be unpinned multiple times.
*/
if (BufferIsValid(scan->rs_cbuf))
ReleaseBuffer(scan->rs_cbuf);
@@ -251,19 +248,17 @@ heapgettup(Relation relation,
ItemPointer tid = (tuple->t_data == NULL) ?
(ItemPointer) NULL : &(tuple->t_self);
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_heapgettup);
IncrHeapAccessStat(global_heapgettup);
- /* ----------------
- * debugging stuff
+ /*
+ * debugging stuff
*
- * check validity of arguments, here and for other functions too
- * Note: no locking manipulations needed--this is a local function
- * ----------------
+ * check validity of arguments, here and for other functions too Note: no
+ * locking manipulations needed--this is a local function
*/
#ifdef HEAPDEBUGALL
if (ItemPointerIsValid(tid))
@@ -289,9 +284,8 @@ heapgettup(Relation relation,
tuple->t_tableOid = relation->rd_id;
- /* ----------------
- * return null immediately if relation is empty
- * ----------------
+ /*
+ * return null immediately if relation is empty
*/
if (!(pages = relation->rd_nblocks))
{
@@ -300,15 +294,14 @@ heapgettup(Relation relation,
return;
}
- /* ----------------
- * calculate next starting lineoff, given scan direction
- * ----------------
+ /*
+ * calculate next starting lineoff, given scan direction
*/
if (!dir)
{
- /* ----------------
+
+ /*
* ``no movement'' scan direction
- * ----------------
*/
/* assume it is a valid TID XXX */
if (ItemPointerIsValid(tid) == false)
@@ -340,9 +333,9 @@ heapgettup(Relation relation,
}
else if (dir < 0)
{
- /* ----------------
- * reverse scan direction
- * ----------------
+
+ /*
+ * reverse scan direction
*/
if (ItemPointerIsValid(tid) == false)
tid = NULL;
@@ -383,9 +376,9 @@ heapgettup(Relation relation,
}
else
{
- /* ----------------
- * forward scan direction
- * ----------------
+
+ /*
+ * forward scan direction
*/
if (ItemPointerIsValid(tid) == false)
{
@@ -420,10 +413,9 @@ heapgettup(Relation relation,
/* 'dir' is now non-zero */
- /* ----------------
- * calculate line pointer and number of remaining items
- * to check on this page.
- * ----------------
+ /*
+ * calculate line pointer and number of remaining items to check on
+ * this page.
*/
lpp = PageGetItemId(dp, lineoff);
if (dir < 0)
@@ -431,10 +423,9 @@ heapgettup(Relation relation,
else
linesleft = lines - lineoff;
- /* ----------------
- * advance the scan until we find a qualifying tuple or
- * run out of stuff to scan
- * ----------------
+ /*
+ * advance the scan until we find a qualifying tuple or run out of
+ * stuff to scan
*/
for (;;)
{
@@ -446,9 +437,9 @@ heapgettup(Relation relation,
tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
tuple->t_len = ItemIdGetLength(lpp);
ItemPointerSet(&(tuple->t_self), page, lineoff);
- /* ----------------
- * if current tuple qualifies, return it.
- * ----------------
+
+ /*
+ * if current tuple qualifies, return it.
*/
HeapTupleSatisfies(tuple, relation, *buffer, (PageHeader) dp,
snapshot, nkeys, key);
@@ -459,9 +450,8 @@ heapgettup(Relation relation,
}
}
- /* ----------------
- * otherwise move to the next item on the page
- * ----------------
+ /*
+ * otherwise move to the next item on the page
*/
--linesleft;
if (dir < 0)
@@ -477,17 +467,15 @@ heapgettup(Relation relation,
}
}
- /* ----------------
- * if we get here, it means we've exhausted the items on
- * this page and it's time to move to the next..
- * ----------------
+ /*
+ * if we get here, it means we've exhausted the items on this page
+ * and it's time to move to the next..
*/
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
page = nextpage(page, dir);
- /* ----------------
- * return NULL if we've exhausted all the pages..
- * ----------------
+ /*
+ * return NULL if we've exhausted all the pages..
*/
if (page < 0 || page >= pages)
{
@@ -588,9 +576,8 @@ heap_open(Oid relationId, LOCKMODE lockmode)
Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_open);
IncrHeapAccessStat(global_open);
@@ -626,9 +613,8 @@ heap_openr(const char *relationName, LOCKMODE lockmode)
Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_openr);
IncrHeapAccessStat(global_openr);
@@ -663,9 +649,8 @@ heap_open_nofail(Oid relationId)
{
Relation r;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_open);
IncrHeapAccessStat(global_open);
@@ -694,9 +679,8 @@ heap_openr_nofail(const char *relationName)
{
Relation r;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_openr);
IncrHeapAccessStat(global_openr);
@@ -724,9 +708,8 @@ heap_close(Relation relation, LOCKMODE lockmode)
{
Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_close);
IncrHeapAccessStat(global_close);
@@ -752,27 +735,24 @@ heap_beginscan(Relation relation,
{
HeapScanDesc scan;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_beginscan);
IncrHeapAccessStat(global_beginscan);
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
if (!RelationIsValid(relation))
elog(ERROR, "heap_beginscan: !RelationIsValid(relation)");
- /* ----------------
- * increment relation ref count while scanning relation
+ /*
+ * increment relation ref count while scanning relation
*
- * This is just to make really sure the relcache entry won't go away
- * while the scan has a pointer to it. Caller should be holding the
- * rel open anyway, so this is redundant in all normal scenarios...
- * ----------------
+ * This is just to make really sure the relcache entry won't go away
+ * while the scan has a pointer to it. Caller should be holding the
+ * rel open anyway, so this is redundant in all normal scenarios...
*/
RelationIncrementReferenceCount(relation);
@@ -780,9 +760,8 @@ heap_beginscan(Relation relation,
if (relation->rd_rel->relkind == RELKIND_UNCATALOGED)
snapshot = SnapshotSelf;
- /* ----------------
- * allocate and initialize scan descriptor
- * ----------------
+ /*
+ * allocate and initialize scan descriptor
*/
scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
@@ -814,22 +793,20 @@ heap_rescan(HeapScanDesc scan,
bool scanFromEnd,
ScanKey key)
{
- /* ----------------
- * increment access statistics
- * ----------------
+
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_rescan);
IncrHeapAccessStat(global_rescan);
- /* ----------------
- * unpin scan buffers
- * ----------------
+ /*
+ * unpin scan buffers
*/
unpinscan(scan);
- /* ----------------
- * reinitialize scan descriptor
- * ----------------
+ /*
+ * reinitialize scan descriptor
*/
scan->rs_atend = scanFromEnd;
initscan(scan, scan->rs_rd, scanFromEnd, scan->rs_nkeys, key);
@@ -845,24 +822,22 @@ heap_rescan(HeapScanDesc scan,
void
heap_endscan(HeapScanDesc scan)
{
- /* ----------------
- * increment access statistics
- * ----------------
+
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_endscan);
IncrHeapAccessStat(global_endscan);
/* Note: no locking manipulations needed */
- /* ----------------
- * unpin scan buffers
- * ----------------
+ /*
+ * unpin scan buffers
*/
unpinscan(scan);
- /* ----------------
- * decrement relation reference count and free scan descriptor storage
- * ----------------
+ /*
+ * decrement relation reference count and free scan descriptor storage
*/
RelationDecrementReferenceCount(scan->rs_rd);
@@ -919,34 +894,31 @@ heap_getnext(HeapScanDesc scandesc, int backw)
{
HeapScanDesc scan = scandesc;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_getnext);
IncrHeapAccessStat(global_getnext);
/* Note: no locking manipulations needed */
- /* ----------------
- * argument checks
- * ----------------
+ /*
+ * argument checks
*/
if (scan == NULL)
elog(ERROR, "heap_getnext: NULL relscan");
- /* ----------------
- * initialize return buffer to InvalidBuffer
- * ----------------
+ /*
+ * initialize return buffer to InvalidBuffer
*/
HEAPDEBUG_1; /* heap_getnext( info ) */
if (backw)
{
- /* ----------------
- * handle reverse scan
- * ----------------
+
+ /*
+ * handle reverse scan
*/
HEAPDEBUG_2; /* heap_getnext called with backw */
@@ -1020,9 +992,9 @@ heap_getnext(HeapScanDesc scandesc, int backw)
}
else
{
- /* ----------------
- * handle forward scan
- * ----------------
+
+ /*
+ * handle forward scan
*/
if (scan->rs_ctup.t_data == scan->rs_ntup.t_data &&
BufferIsInvalid(scan->rs_nbuf))
@@ -1097,10 +1069,9 @@ heap_getnext(HeapScanDesc scandesc, int backw)
scan->rs_nbuf = UnknownBuffer;
}
- /* ----------------
- * if we get here it means we have a new current scan tuple, so
- * point to the proper return buffer and return the tuple.
- * ----------------
+ /*
+ * if we get here it means we have a new current scan tuple, so point
+ * to the proper return buffer and return the tuple.
*/
HEAPDEBUG_7; /* heap_getnext returning tuple */
@@ -1133,17 +1104,15 @@ heap_fetch(Relation relation,
ItemPointer tid = &(tuple->t_self);
OffsetNumber offnum;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_fetch);
IncrHeapAccessStat(global_fetch);
- /* ----------------
- * get the buffer from the relation descriptor
- * Note that this does a buffer pin.
- * ----------------
+ /*
+ * get the buffer from the relation descriptor Note that this does a
+ * buffer pin.
*/
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
@@ -1154,17 +1123,15 @@ heap_fetch(Relation relation,
LockBuffer(buffer, BUFFER_LOCK_SHARE);
- /* ----------------
- * get the item line pointer corresponding to the requested tid
- * ----------------
+ /*
+ * get the item line pointer corresponding to the requested tid
*/
dp = (PageHeader) BufferGetPage(buffer);
offnum = ItemPointerGetOffsetNumber(tid);
lp = PageGetItemId(dp, offnum);
- /* ----------------
- * more sanity checks
- * ----------------
+ /*
+ * more sanity checks
*/
if (!ItemIdIsUsed(lp))
@@ -1182,9 +1149,8 @@ heap_fetch(Relation relation,
tuple->t_len = ItemIdGetLength(lp);
tuple->t_tableOid = relation->rd_id;
- /* ----------------
- * check time qualification of tid
- * ----------------
+ /*
+ * check time qualification of tid
*/
HeapTupleSatisfies(tuple, relation, buffer, dp,
@@ -1229,10 +1195,9 @@ heap_get_latest_tid(Relation relation,
bool invalidBlock,
linkend;
- /* ----------------
- * get the buffer from the relation descriptor
- * Note that this does a buffer pin.
- * ----------------
+ /*
+ * get the buffer from the relation descriptor Note that this does a
+ * buffer pin.
*/
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
@@ -1243,9 +1208,8 @@ heap_get_latest_tid(Relation relation,
LockBuffer(buffer, BUFFER_LOCK_SHARE);
- /* ----------------
- * get the item line pointer corresponding to the requested tid
- * ----------------
+ /*
+ * get the item line pointer corresponding to the requested tid
*/
dp = (PageHeader) BufferGetPage(buffer);
offnum = ItemPointerGetOffsetNumber(tid);
@@ -1263,9 +1227,8 @@ heap_get_latest_tid(Relation relation,
return NULL;
}
- /* ----------------
- * more sanity checks
- * ----------------
+ /*
+ * more sanity checks
*/
tp.t_datamcxt = NULL;
@@ -1274,9 +1237,8 @@ heap_get_latest_tid(Relation relation,
tp.t_self = *tid;
ctid = tp.t_data->t_ctid;
- /* ----------------
- * check time qualification of tid
- * ----------------
+ /*
+ * check time qualification of tid
*/
HeapTupleSatisfies(&tp, relation, buffer, dp,
@@ -1323,15 +1285,13 @@ heap_insert(Relation relation, HeapTuple tup)
IncrHeapAccessStat(local_insert);
IncrHeapAccessStat(global_insert);
- /* ----------------
- * If the object id of this tuple has already been assigned, trust
- * the caller. There are a couple of ways this can happen. At initial
- * db creation, the backend program sets oids for tuples. When we
- * define an index, we set the oid. Finally, in the future, we may
- * allow users to set their own object ids in order to support a
- * persistent object store (objects need to contain pointers to one
- * another).
- * ----------------
+ /*
+ * If the object id of this tuple has already been assigned, trust the
+ * caller. There are a couple of ways this can happen. At initial db
+ * creation, the backend program sets oids for tuples. When we define
+ * an index, we set the oid. Finally, in the future, we may allow
+ * users to set their own object ids in order to support a persistent
+ * object store (objects need to contain pointers to one another).
*/
if (!OidIsValid(tup->t_data->t_oid))
tup->t_data->t_oid = newoid();
@@ -1346,10 +1306,10 @@ heap_insert(Relation relation, HeapTuple tup)
tup->t_tableOid = relation->rd_id;
#ifdef TUPLE_TOASTER_ACTIVE
- /* ----------
- * If the new tuple is too big for storage or contains already
- * toasted attributes from some other relation, invoke the toaster.
- * ----------
+
+ /*
+ * If the new tuple is too big for storage or contains already toasted
+ * attributes from some other relation, invoke the toaster.
*/
if (HeapTupleHasExtended(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
@@ -1540,12 +1500,12 @@ l1:
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
#ifdef TUPLE_TOASTER_ACTIVE
- /* ----------
- * If the relation has toastable attributes, we need to delete
- * no longer needed items there too. We have to do this before
+
+ /*
+ * If the relation has toastable attributes, we need to delete no
+ * longer needed items there too. We have to do this before
* WriteBuffer because we need to look at the contents of the tuple,
* but it's OK to release the context lock on the buffer first.
- * ----------
*/
if (HeapTupleHasExtended(&tp))
heap_tuple_toast_attrs(relation, NULL, &(tp));
@@ -1977,9 +1937,8 @@ void
heap_markpos(HeapScanDesc scan)
{
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_markpos);
IncrHeapAccessStat(global_markpos);
@@ -2012,9 +1971,8 @@ heap_markpos(HeapScanDesc scan)
scan->rs_key);
}
- /* ----------------
+ /*
* Should not unpin the buffer pages. They may still be in use.
- * ----------------
*/
if (scan->rs_ptup.t_data != NULL)
scan->rs_mptid = scan->rs_ptup.t_self;
@@ -2054,9 +2012,9 @@ heap_markpos(HeapScanDesc scan)
void
heap_restrpos(HeapScanDesc scan)
{
- /* ----------------
- * increment access statistics
- * ----------------
+
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_restrpos);
IncrHeapAccessStat(global_restrpos);
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 94dedbf87b9..b4780c208e0 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Id: hio.c,v 1.36 2001/03/22 03:59:13 momjian Exp $
+ * $Id: hio.c,v 1.37 2001/03/22 06:16:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,9 +37,8 @@ RelationPutHeapTuple(Relation relation,
ItemId itemId;
Item item;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_RelationPutHeapTuple);
IncrHeapAccessStat(global_RelationPutHeapTuple);
diff --git a/src/backend/access/heap/stats.c b/src/backend/access/heap/stats.c
index 3525833e6b9..6dabf49e341 100644
--- a/src/backend/access/heap/stats.c
+++ b/src/backend/access/heap/stats.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/Attic/stats.c,v 1.23 2001/01/24 19:42:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/Attic/stats.c,v 1.24 2001/03/22 06:16:07 momjian Exp $
*
* NOTES
* initam should be moved someplace else.
@@ -37,25 +37,22 @@ InitHeapAccessStatistics()
MemoryContext oldContext;
HeapAccessStatistics stats;
- /* ----------------
- * make sure we don't initialize things twice
- * ----------------
+ /*
+ * make sure we don't initialize things twice
*/
if (heap_access_stats != NULL)
return;
- /* ----------------
- * allocate statistics structure from the top memory context
- * ----------------
+ /*
+ * allocate statistics structure from the top memory context
*/
oldContext = MemoryContextSwitchTo(TopMemoryContext);
stats = (HeapAccessStatistics)
palloc(sizeof(HeapAccessStatisticsData));
- /* ----------------
- * initialize fields to default values
- * ----------------
+ /*
+ * initialize fields to default values
*/
stats->global_open = 0;
stats->global_openr = 0;
@@ -103,17 +100,15 @@ InitHeapAccessStatistics()
stats->local_RelationNameGetRelation = 0;
stats->global_RelationNameGetRelation = 0;
- /* ----------------
- * record init times
- * ----------------
+ /*
+ * record init times
*/
time(&stats->init_global_timestamp);
time(&stats->local_reset_timestamp);
time(&stats->last_request_timestamp);
- /* ----------------
- * return to old memory context
- * ----------------
+ /*
+ * return to old memory context
*/
MemoryContextSwitchTo(oldContext);
@@ -130,18 +125,16 @@ ResetHeapAccessStatistics()
{
HeapAccessStatistics stats;
- /* ----------------
- * do nothing if stats aren't initialized
- * ----------------
+ /*
+ * do nothing if stats aren't initialized
*/
if (heap_access_stats == NULL)
return;
stats = heap_access_stats;
- /* ----------------
- * reset local counts
- * ----------------
+ /*
+ * reset local counts
*/
stats->local_open = 0;
stats->local_openr = 0;
@@ -165,9 +158,8 @@ ResetHeapAccessStatistics()
stats->local_RelationPutHeapTuple = 0;
stats->local_RelationPutLongHeapTuple = 0;
- /* ----------------
- * reset local timestamps
- * ----------------
+ /*
+ * reset local timestamps
*/
time(&stats->local_reset_timestamp);
time(&stats->last_request_timestamp);
@@ -185,22 +177,19 @@ GetHeapAccessStatistics()
{
HeapAccessStatistics stats;
- /* ----------------
- * return nothing if stats aren't initialized
- * ----------------
+ /*
+ * return nothing if stats aren't initialized
*/
if (heap_access_stats == NULL)
return NULL;
- /* ----------------
- * record the current request time
- * ----------------
+ /*
+ * record the current request time
*/
time(&heap_access_stats->last_request_timestamp);
- /* ----------------
- * allocate a copy of the stats and return it to the caller.
- * ----------------
+ /*
+ * allocate a copy of the stats and return it to the caller.
*/
stats = (HeapAccessStatistics)
palloc(sizeof(HeapAccessStatisticsData));
@@ -222,9 +211,9 @@ GetHeapAccessStatistics()
void
PrintHeapAccessStatistics(HeapAccessStatistics stats)
{
- /* ----------------
- * return nothing if stats aren't valid
- * ----------------
+
+ /*
+ * return nothing if stats aren't valid
*/
if (stats == NULL)
return;
@@ -342,9 +331,9 @@ PrintAndFreeHeapAccessStatistics(HeapAccessStatistics stats)
void
initam(void)
{
- /* ----------------
- * initialize heap statistics.
- * ----------------
+
+ /*
+ * initialize heap statistics.
*/
InitHeapAccessStatistics();
}
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index d0e60681e77..c271b08f703 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.18 2001/03/22 03:59:13 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.19 2001/03/22 06:16:07 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -81,17 +81,18 @@ heap_tuple_fetch_attr(varattrib *attr)
if (VARATT_IS_EXTERNAL(attr))
{
- /* ----------
+
+ /*
* This is an external stored plain value
- * ----------
*/
result = toast_fetch_datum(attr);
}
else
{
- /* ----------
- * This is a plain value inside of the main tuple - why am I called?
- * ----------
+
+ /*
+ * This is a plain value inside of the main tuple - why am I
+ * called?
*/
result = attr;
}
@@ -134,18 +135,18 @@ heap_tuple_untoast_attr(varattrib *attr)
}
else
{
- /* ----------
+
+ /*
* This is an external stored plain value
- * ----------
*/
result = toast_fetch_datum(attr);
}
}
else if (VARATT_IS_COMPRESSED(attr))
{
- /* ----------
+
+ /*
* This is a compressed value inside of the main tuple
- * ----------
*/
result = (varattrib *) palloc(attr->va_content.va_compressed.va_rawsize
+ VARHDRSZ);
@@ -154,9 +155,10 @@ heap_tuple_untoast_attr(varattrib *attr)
pglz_decompress((PGLZ_Header *) attr, VARATT_DATA(result));
}
else
- /* ----------
- * This is a plain value inside of the main tuple - why am I called?
- * ----------
+
+ /*
+ * This is a plain value inside of the main tuple - why am I
+ * called?
*/
return attr;
@@ -180,19 +182,16 @@ toast_delete(Relation rel, HeapTuple oldtup)
Datum value;
bool isnull;
- /* ----------
- * Get the tuple descriptor, the number of and attribute
- * descriptors.
- * ----------
+ /*
+ * Get the tuple descriptor, the number of and attribute descriptors.
*/
tupleDesc = rel->rd_att;
numAttrs = tupleDesc->natts;
att = tupleDesc->attrs;
- /* ----------
- * Check for external stored attributes and delete them
- * from the secondary relation.
- * ----------
+ /*
+ * Check for external stored attributes and delete them from the
+ * secondary relation.
*/
for (i = 0; i < numAttrs; i++)
{
@@ -237,10 +236,9 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
bool toast_free[MaxHeapAttributeNumber];
bool toast_delold[MaxHeapAttributeNumber];
- /* ----------
- * Get the tuple descriptor, the number of and attribute
- * descriptors and the location of the tuple values.
- * ----------
+ /*
+ * Get the tuple descriptor, the number of and attribute descriptors
+ * and the location of the tuple values.
*/
tupleDesc = rel->rd_att;
numAttrs = tupleDesc->natts;
@@ -266,9 +264,9 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (oldtup != NULL)
{
- /* ----------
+
+ /*
* For UPDATE get the old and new values of this attribute
- * ----------
*/
old_value = (varattrib *) DatumGetPointer(
heap_getattr(oldtup, i + 1, tupleDesc, &old_isnull));
@@ -276,10 +274,9 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
new_value = (varattrib *) DatumGetPointer(toast_values[i]);
- /* ----------
- * If the old value is an external stored one, check if it
- * has changed so we have to delete it later.
- * ----------
+ /*
+ * If the old value is an external stored one, check if it has
+ * changed so we have to delete it later.
*/
if (!old_isnull && att[i]->attlen == -1 &&
VARATT_IS_EXTERNAL(old_value))
@@ -290,21 +287,21 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
old_value->va_content.va_external.va_attno !=
new_value->va_content.va_external.va_attno)
{
- /* ----------
- * The old external store value isn't needed any
- * more after the update
- * ----------
+
+ /*
+ * The old external store value isn't needed any more
+ * after the update
*/
toast_delold[i] = true;
need_delold = true;
}
else
{
- /* ----------
- * This attribute isn't changed by this update
- * so we reuse the original reference to the old
- * value in the new tuple.
- * ----------
+
+ /*
+ * This attribute isn't changed by this update so we
+ * reuse the original reference to the old value in
+ * the new tuple.
*/
toast_action[i] = 'p';
toast_sizes[i] = VARATT_SIZE(toast_values[i]);
@@ -314,17 +311,16 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
else
{
- /* ----------
+
+ /*
* For INSERT simply get the new value
- * ----------
*/
toast_values[i] =
heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
}
- /* ----------
+ /*
* Handle NULL attributes
- * ----------
*/
if (new_isnull)
{
@@ -334,24 +330,22 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
continue;
}
- /* ----------
+ /*
* Now look at varsize attributes
- * ----------
*/
if (att[i]->attlen == -1)
{
- /* ----------
+
+ /*
* If the table's attribute says PLAIN always, force it so.
- * ----------
*/
if (att[i]->attstorage == 'p')
toast_action[i] = 'p';
- /* ----------
+ /*
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Expand it to plain (and, probably, toast it again below).
- * ----------
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(toast_values[i])))
{
@@ -362,17 +356,16 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
need_free = true;
}
- /* ----------
+ /*
* Remember the size of this attribute
- * ----------
*/
toast_sizes[i] = VARATT_SIZE(DatumGetPointer(toast_values[i]));
}
else
{
- /* ----------
+
+ /*
* Not a variable size attribute, plain storage always
- * ----------
*/
toast_action[i] = 'p';
toast_sizes[i] = att[i]->attlen;
@@ -393,9 +386,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
maxDataLen += BITMAPLEN(numAttrs);
maxDataLen = TOAST_TUPLE_TARGET - MAXALIGN(maxDataLen);
- /* ----------
+ /*
* Look for attributes with attstorage 'x' to compress
- * ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
maxDataLen)
@@ -405,9 +397,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
Datum old_value;
Datum new_value;
- /* ----------
+ /*
* Search for the biggest yet uncompressed internal attribute
- * ----------
*/
for (i = 0; i < numAttrs; i++)
{
@@ -427,9 +418,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (biggest_attno < 0)
break;
- /* ----------
+ /*
* Attempt to compress it inline
- * ----------
*/
i = biggest_attno;
old_value = toast_values[i];
@@ -457,10 +447,9 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
}
- /* ----------
- * Second we look for attributes of attstorage 'x' or 'e' that
- * are still inline.
- * ----------
+ /*
+ * Second we look for attributes of attstorage 'x' or 'e' that are
+ * still inline.
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
@@ -469,10 +458,9 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
int32 biggest_size = MAXALIGN(sizeof(varattrib));
Datum old_value;
- /* ----------
- * Search for the biggest yet inlined attribute with
- * attstorage = 'x' or 'e'
- * ----------
+ /*
+ * Search for the biggest yet inlined attribute with attstorage =
+ * 'x' or 'e'
*/
for (i = 0; i < numAttrs; i++)
{
@@ -492,9 +480,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (biggest_attno < 0)
break;
- /* ----------
+ /*
* Store this external
- * ----------
*/
i = biggest_attno;
old_value = toast_values[i];
@@ -513,10 +500,9 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
need_free = true;
}
- /* ----------
- * Round 3 - this time we take attributes with storage
- * 'm' into compression
- * ----------
+ /*
+ * Round 3 - this time we take attributes with storage 'm' into
+ * compression
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
maxDataLen)
@@ -526,9 +512,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
Datum old_value;
Datum new_value;
- /* ----------
+ /*
* Search for the biggest yet uncompressed internal attribute
- * ----------
*/
for (i = 0; i < numAttrs; i++)
{
@@ -548,9 +533,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (biggest_attno < 0)
break;
- /* ----------
+ /*
* Attempt to compress it inline
- * ----------
*/
i = biggest_attno;
old_value = toast_values[i];
@@ -578,9 +562,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
}
- /* ----------
+ /*
* Finally we store attributes of type 'm' external
- * ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
@@ -589,10 +572,9 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
int32 biggest_size = MAXALIGN(sizeof(varattrib));
Datum old_value;
- /* ----------
- * Search for the biggest yet inlined attribute with
- * attstorage = 'm'
- * ----------
+ /*
+ * Search for the biggest yet inlined attribute with attstorage =
+ * 'm'
*/
for (i = 0; i < numAttrs; i++)
{
@@ -612,9 +594,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (biggest_attno < 0)
break;
- /* ----------
+ /*
* Store this external
- * ----------
*/
i = biggest_attno;
old_value = toast_values[i];
@@ -633,10 +614,9 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
need_free = true;
}
- /* ----------
- * In the case we toasted any values, we need to build
- * a new heap tuple with the changed values.
- * ----------
+ /*
+ * In the case we toasted any values, we need to build a new heap
+ * tuple with the changed values.
*/
if (need_change)
{
@@ -645,9 +625,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
MemoryContext oldcxt;
HeapTupleHeader olddata;
- /* ----------
+ /*
* Calculate the new size of the tuple
- * ----------
*/
new_len = offsetof(HeapTupleHeaderData, t_bits);
if (has_nulls)
@@ -655,19 +634,17 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
new_len = MAXALIGN(new_len);
new_len += ComputeDataSize(tupleDesc, toast_values, toast_nulls);
- /* ----------
+ /*
* Remember the old memory location of the tuple (for below),
- * switch to the memory context of the HeapTuple structure
- * and allocate the new tuple.
- * ----------
+ * switch to the memory context of the HeapTuple structure and
+ * allocate the new tuple.
*/
olddata = newtup->t_data;
oldcxt = MemoryContextSwitchTo(newtup->t_datamcxt);
new_data = palloc(new_len);
- /* ----------
+ /*
* Put the tuple header and the changed values into place
- * ----------
*/
memcpy(new_data, newtup->t_data, newtup->t_data->t_hoff);
newtup->t_data = (HeapTupleHeader) new_data;
@@ -682,33 +659,29 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
&(newtup->t_data->t_infomask),
has_nulls ? newtup->t_data->t_bits : NULL);
- /* ----------
- * In the case we modified a previously modified tuple again,
- * free the memory from the previous run
- * ----------
+ /*
+ * In the case we modified a previously modified tuple again, free
+ * the memory from the previous run
*/
if ((char *) olddata != ((char *) newtup + HEAPTUPLESIZE))
pfree(olddata);
- /* ----------
+ /*
* Switch back to the old memory context
- * ----------
*/
MemoryContextSwitchTo(oldcxt);
}
- /* ----------
+ /*
* Free allocated temp values
- * ----------
*/
if (need_free)
for (i = 0; i < numAttrs; i++)
if (toast_free[i])
pfree(DatumGetPointer(toast_values[i]));
- /* ----------
+ /*
* Delete external values from the old tuple
- * ----------
*/
if (need_delold)
for (i = 0; i < numAttrs; i++)
@@ -776,9 +749,8 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
char *data_p;
int32 data_todo;
- /* ----------
+ /*
* Create the varattrib reference
- * ----------
*/
result = (varattrib *) palloc(sizeof(varattrib));
@@ -802,9 +774,8 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
result->va_content.va_external.va_rowid = mainoid;
result->va_content.va_external.va_attno = attno;
- /* ----------
+ /*
* Initialize constant parts of the tuple data
- * ----------
*/
t_values[0] = ObjectIdGetDatum(result->va_content.va_external.va_valueid);
t_values[2] = PointerGetDatum(chunk_data);
@@ -812,36 +783,32 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
t_nulls[1] = ' ';
t_nulls[2] = ' ';
- /* ----------
+ /*
* Get the data to process
- * ----------
*/
data_p = VARATT_DATA(value);
data_todo = VARATT_SIZE(value) - VARHDRSZ;
- /* ----------
+ /*
* Open the toast relation
- * ----------
*/
toastrel = heap_open(rel->rd_rel->reltoastrelid, RowExclusiveLock);
toasttupDesc = toastrel->rd_att;
toastidx = index_open(rel->rd_rel->reltoastidxid);
- /* ----------
+ /*
* Split up the item into chunks
- * ----------
*/
while (data_todo > 0)
{
- /* ----------
+
+ /*
* Calculate the size of this chunk
- * ----------
*/
chunk_size = Min(TOAST_MAX_CHUNK_SIZE, data_todo);
- /* ----------
+ /*
* Build a tuple
- * ----------
*/
t_values[1] = Int32GetDatum(chunk_seq++);
VARATT_SIZEP(chunk_data) = chunk_size + VARHDRSZ;
@@ -850,9 +817,8 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
if (!HeapTupleIsValid(toasttup))
elog(ERROR, "Failed to build TOAST tuple");
- /* ----------
+ /*
* Store it and create the index entry
- * ----------
*/
heap_insert(toastrel, toasttup);
idxres = index_insert(toastidx, t_values, t_nulls,
@@ -861,24 +827,21 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
if (idxres == NULL)
elog(ERROR, "Failed to insert index entry for TOAST tuple");
- /* ----------
+ /*
* Free memory
- * ----------
*/
heap_freetuple(toasttup);
pfree(idxres);
- /* ----------
+ /*
* Move on to next chunk
- * ----------
*/
data_todo -= chunk_size;
data_p += chunk_size;
}
- /* ----------
+ /*
* Done - close toast relation and return the reference
- * ----------
*/
index_close(toastidx);
heap_close(toastrel, RowExclusiveLock);
@@ -908,17 +871,15 @@ toast_delete_datum(Relation rel, Datum value)
if (!VARATT_IS_EXTERNAL(attr))
return;
- /* ----------
+ /*
* Open the toast relation and it's index
- * ----------
*/
toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
RowExclusiveLock);
toastidx = index_open(attr->va_content.va_external.va_toastidxid);
- /* ----------
+ /*
* Setup a scan key to fetch from the index by va_valueid
- * ----------
*/
ScanKeyEntryInitialize(&toastkey,
(bits16) 0,
@@ -926,9 +887,8 @@ toast_delete_datum(Relation rel, Datum value)
(RegProcedure) F_OIDEQ,
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
- /* ----------
+ /*
* Read the chunks by index
- * ----------
*/
toastscan = index_beginscan(toastidx, false, 1, &toastkey);
while ((indexRes = index_getnext(toastscan, ForwardScanDirection)) != NULL)
@@ -940,18 +900,16 @@ toast_delete_datum(Relation rel, Datum value)
if (!toasttup.t_data)
continue;
- /* ----------
+ /*
* Have a chunk, delete it
- * ----------
*/
simple_heap_delete(toastrel, &toasttup.t_self);
ReleaseBuffer(buffer);
}
- /* ----------
+ /*
* End scan and close relations
- * ----------
*/
index_endscan(toastscan);
index_close(toastidx);
@@ -1003,18 +961,16 @@ toast_fetch_datum(varattrib *attr)
if (VARATT_IS_COMPRESSED(attr))
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
- /* ----------
+ /*
* Open the toast relation and it's index
- * ----------
*/
toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
AccessShareLock);
toasttupDesc = toastrel->rd_att;
toastidx = index_open(attr->va_content.va_external.va_toastidxid);
- /* ----------
+ /*
* Setup a scan key to fetch from the index by va_valueid
- * ----------
*/
ScanKeyEntryInitialize(&toastkey,
(bits16) 0,
@@ -1022,11 +978,10 @@ toast_fetch_datum(varattrib *attr)
(RegProcedure) F_OIDEQ,
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
- /* ----------
+ /*
* Read the chunks by index
*
* Note we will not necessarily see the chunks in sequence-number order.
- * ----------
*/
toastscan = index_beginscan(toastidx, false, 1, &toastkey);
while ((indexRes = index_getnext(toastscan, ForwardScanDirection)) != NULL)
@@ -1039,9 +994,8 @@ toast_fetch_datum(varattrib *attr)
continue;
ttup = &toasttup;
- /* ----------
+ /*
* Have a chunk, extract the sequence number and the data
- * ----------
*/
residx = DatumGetInt32(heap_getattr(ttup, 2, toasttupDesc, &isnull));
Assert(!isnull);
@@ -1049,9 +1003,8 @@ toast_fetch_datum(varattrib *attr)
Assert(!isnull);
chunksize = VARATT_SIZE(chunk) - VARHDRSZ;
- /* ----------
+ /*
* Some checks on the data we've found
- * ----------
*/
if (residx < 0 || residx >= numchunks)
elog(ERROR, "unexpected chunk number %d for toast value %d",
@@ -1076,9 +1029,8 @@ toast_fetch_datum(varattrib *attr)
residx,
attr->va_content.va_external.va_valueid);
- /* ----------
+ /*
* Copy the data into proper place in our result
- * ----------
*/
memcpy(((char *) VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE,
VARATT_DATA(chunk),
@@ -1087,9 +1039,8 @@ toast_fetch_datum(varattrib *attr)
ReleaseBuffer(buffer);
}
- /* ----------
+ /*
* Final checks that we successfully fetched the datum
- * ----------
*/
if (memcmp(chunks_found, chunks_expected, numchunks) != 0)
elog(ERROR, "not all toast chunks found for value %d",
@@ -1097,9 +1048,8 @@ toast_fetch_datum(varattrib *attr)
pfree(chunks_expected);
pfree(chunks_found);
- /* ----------
+ /*
* End scan and close relations
- * ----------
*/
index_endscan(toastscan);
index_close(toastidx);
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 89c11aa256c..b48ef923652 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.47 2001/01/24 19:42:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.48 2001/03/22 06:16:07 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relationId
@@ -190,9 +190,8 @@ index_insert(Relation relation,
RELATION_CHECKS;
GET_REL_PROCEDURE(insert, aminsert);
- /* ----------------
- * have the am's insert proc do all the work.
- * ----------------
+ /*
+ * have the am's insert proc do all the work.
*/
specificResult = (InsertIndexResult)
DatumGetPointer(OidFunctionCall5(procedure,
@@ -241,13 +240,12 @@ index_beginscan(Relation relation,
RelationIncrementReferenceCount(relation);
- /* ----------------
- * Acquire AccessShareLock for the duration of the scan
+ /*
+ * Acquire AccessShareLock for the duration of the scan
*
- * Note: we could get an SI inval message here and consequently have
- * to rebuild the relcache entry. The refcount increment above
- * ensures that we will rebuild it and not just flush it...
- * ----------------
+ * Note: we could get an SI inval message here and consequently have to
+ * rebuild the relcache entry. The refcount increment above ensures
+ * that we will rebuild it and not just flush it...
*/
LockRelation(relation, AccessShareLock);
@@ -347,9 +345,8 @@ index_getnext(IndexScanDesc scan,
SCAN_CHECKS;
- /* ----------------
- * Look up the access procedure only once per scan.
- * ----------------
+ /*
+ * Look up the access procedure only once per scan.
*/
if (scan->fn_getnext.fn_oid == InvalidOid)
{
@@ -359,9 +356,8 @@ index_getnext(IndexScanDesc scan,
fmgr_info(procedure, &scan->fn_getnext);
}
- /* ----------------
- * have the am's gettuple proc do all the work.
- * ----------------
+ /*
+ * have the am's gettuple proc do all the work.
*/
result = (RetrieveIndexResult)
DatumGetPointer(FunctionCall2(&scan->fn_getnext,
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index d8b8e0682a0..e7ea3643531 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.64 2001/03/22 03:59:15 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.65 2001/03/22 06:16:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -238,21 +238,19 @@ _bt_binsrch(Relation rel,
high = mid;
}
- /*--------------------
+ /*
* At this point we have high == low, but be careful: they could point
* past the last slot on the page.
*
- * On a leaf page, we always return the first key >= scan key
- * (which could be the last slot + 1).
- *--------------------
+ * On a leaf page, we always return the first key >= scan key (which
+ * could be the last slot + 1).
*/
if (P_ISLEAF(opaque))
return low;
- /*--------------------
- * On a non-leaf page, return the last key < scan key.
- * There must be one if _bt_compare() is playing by the rules.
- *--------------------
+ /*
+ * On a non-leaf page, return the last key < scan key. There must be
+ * one if _bt_compare() is playing by the rules.
*/
Assert(low > P_FIRSTDATAKEY(opaque));
@@ -584,21 +582,20 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
ItemPointerSet(current, blkno, offnum);
- /*----------
- * At this point we are positioned at the first item >= scan key,
- * or possibly at the end of a page on which all the existing items
- * are < scan key and we know that everything on later pages is
- * >= scan key. We could step forward in the latter case, but that'd
- * be a waste of time if we want to scan backwards. So, it's now time to
- * examine the scan strategy to find the exact place to start the scan.
+ /*
+ * At this point we are positioned at the first item >= scan key, or
+ * possibly at the end of a page on which all the existing items are <
+ * scan key and we know that everything on later pages is >= scan key.
+ * We could step forward in the latter case, but that'd be a waste of
+ * time if we want to scan backwards. So, it's now time to examine
+ * the scan strategy to find the exact place to start the scan.
*
- * Note: if _bt_step fails (meaning we fell off the end of the index
- * in one direction or the other), we either return NULL (no matches) or
- * call _bt_endpoint() to set up a scan starting at that index endpoint,
- * as appropriate for the desired scan type.
+ * Note: if _bt_step fails (meaning we fell off the end of the index in
+ * one direction or the other), we either return NULL (no matches) or
+ * call _bt_endpoint() to set up a scan starting at that index
+ * endpoint, as appropriate for the desired scan type.
*
* it's yet other place to add some code later for is(not)null ...
- *----------
*/
switch (strat_total)
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index 29e72e84175..0f106539704 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.42 2001/03/22 03:59:17 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.43 2001/03/22 06:16:10 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -124,30 +124,25 @@ TransactionLogTest(TransactionId transactionId, /* transaction id to test */
XidStatus xidstatus; /* recorded status of xid */
bool fail = false; /* success/failure */
- /* ----------------
- * during initialization consider all transactions
- * as having been committed
- * ----------------
+ /*
+ * during initialization consider all transactions as having been
+ * committed
*/
if (!RelationIsValid(LogRelation))
return (bool) (status == XID_COMMIT);
- /* ----------------
- * before going to the buffer manager, check our single
- * item cache to see if we didn't just check the transaction
- * status a moment ago.
- * ----------------
+ /*
+ * before going to the buffer manager, check our single item cache to
+ * see if we didn't just check the transaction status a moment ago.
*/
if (TransactionIdEquals(transactionId, cachedTestXid))
return (bool)
(status == cachedTestXidStatus);
- /* ----------------
- * compute the item pointer corresponding to the
- * page containing our transaction id. We save the item in
- * our cache to speed up things if we happen to ask for the
- * same xid's status more than once.
- * ----------------
+ /*
+ * compute the item pointer corresponding to the page containing our
+ * transaction id. We save the item in our cache to speed up things
+ * if we happen to ask for the same xid's status more than once.
*/
TransComputeBlockNumber(LogRelation, transactionId, &blockNumber);
xidstatus = TransBlockNumberGetXidStatus(LogRelation,
@@ -169,9 +164,8 @@ TransactionLogTest(TransactionId transactionId, /* transaction id to test */
return (bool) (status == xidstatus);
}
- /* ----------------
- * here the block didn't contain the information we wanted
- * ----------------
+ /*
+ * here the block didn't contain the information we wanted
*/
elog(ERROR, "TransactionLogTest: failed to get xidstatus");
@@ -192,16 +186,14 @@ TransactionLogUpdate(TransactionId transactionId, /* trans id to update */
BlockNumber blockNumber;
bool fail = false; /* success/failure */
- /* ----------------
- * during initialization we don't record any updates.
- * ----------------
+ /*
+ * during initialization we don't record any updates.
*/
if (!RelationIsValid(LogRelation))
return;
- /* ----------------
- * update the log relation
- * ----------------
+ /*
+ * update the log relation
*/
TransComputeBlockNumber(LogRelation, transactionId, &blockNumber);
TransBlockNumberSetXidStatus(LogRelation,
@@ -292,43 +284,38 @@ static void
TransRecover(Relation logRelation)
{
#ifdef NOT_USED
- /* ----------------
- * first get the last recorded transaction in the log.
- * ----------------
+
+ /*
+ * first get the last recorded transaction in the log.
*/
TransGetLastRecordedTransaction(logRelation, logLastXid, &fail);
if (fail == true)
elog(ERROR, "TransRecover: failed TransGetLastRecordedTransaction");
- /* ----------------
- * next get the "last" and "next" variables
- * ----------------
+ /*
+ * next get the "last" and "next" variables
*/
VariableRelationGetLastXid(&varLastXid);
VariableRelationGetNextXid(&varNextXid);
- /* ----------------
- * intregity test (1)
- * ----------------
+ /*
+ * intregity test (1)
*/
if (TransactionIdIsLessThan(varNextXid, logLastXid))
elog(ERROR, "TransRecover: varNextXid < logLastXid");
- /* ----------------
- * intregity test (2)
- * ----------------
+ /*
+ * intregity test (2)
*/
- /* ----------------
- * intregity test (3)
- * ----------------
+ /*
+ * intregity test (3)
*/
- /* ----------------
- * here we have a valid "
+ /*
+ * here we have a valid "
*
- * **** RESUME HERE ****
- * ----------------
+ **** RESUME HERE ****
*/
varNextXid = TransactionIdDup(varLastXid);
TransactionIdIncrement(&varNextXid);
@@ -375,51 +362,45 @@ InitializeTransactionLog(void)
Relation logRelation;
MemoryContext oldContext;
- /* ----------------
- * don't do anything during bootstrapping
- * ----------------
+ /*
+ * don't do anything during bootstrapping
*/
if (AMI_OVERRIDE)
return;
- /* ----------------
- * disable the transaction system so the access methods
- * don't interfere during initialization.
- * ----------------
+ /*
+ * disable the transaction system so the access methods don't
+ * interfere during initialization.
*/
OverrideTransactionSystem(true);
- /* ----------------
- * make sure allocations occur within the top memory context
- * so that our log management structures are protected from
- * garbage collection at the end of every transaction.
- * ----------------
+ /*
+ * make sure allocations occur within the top memory context so that
+ * our log management structures are protected from garbage collection
+ * at the end of every transaction.
*/
oldContext = MemoryContextSwitchTo(TopMemoryContext);
- /* ----------------
- * first open the log and time relations
- * (these are created by amiint so they are guaranteed to exist)
- * ----------------
+ /*
+ * first open the log and time relations (these are created by amiint
+ * so they are guaranteed to exist)
*/
logRelation = heap_openr(LogRelationName, NoLock);
VariableRelation = heap_openr(VariableRelationName, NoLock);
- /* ----------------
- * XXX TransactionLogUpdate requires that LogRelation
- * is valid so we temporarily set it so we can initialize
- * things properly. This could be done cleaner.
- * ----------------
+ /*
+ * XXX TransactionLogUpdate requires that LogRelation is valid so we
+ * temporarily set it so we can initialize things properly. This could
+ * be done cleaner.
*/
LogRelation = logRelation;
- /* ----------------
- * if we have a virgin database, we initialize the log
- * relation by committing the AmiTransactionId (id 512) and we
- * initialize the variable relation by setting the next available
- * transaction id to FirstTransactionId (id 514). OID initialization
- * happens as a side effect of bootstrapping in varsup.c.
- * ----------------
+ /*
+ * if we have a virgin database, we initialize the log relation by
+ * committing the AmiTransactionId (id 512) and we initialize the
+ * variable relation by setting the next available transaction id to
+ * FirstTransactionId (id 514). OID initialization happens as a side
+ * effect of bootstrapping in varsup.c.
*/
SpinAcquire(OidGenLockId);
if (!TransactionIdDidCommit(AmiTransactionId))
@@ -433,33 +414,30 @@ InitializeTransactionLog(void)
}
else if (RecoveryCheckingEnabled())
{
- /* ----------------
- * if we have a pre-initialized database and if the
- * perform recovery checking flag was passed then we
- * do our database integrity checking.
- * ----------------
+
+ /*
+ * if we have a pre-initialized database and if the perform
+ * recovery checking flag was passed then we do our database
+ * integrity checking.
*/
TransRecover(logRelation);
}
LogRelation = (Relation) NULL;
SpinRelease(OidGenLockId);
- /* ----------------
- * now re-enable the transaction system
- * ----------------
+ /*
+ * now re-enable the transaction system
*/
OverrideTransactionSystem(false);
- /* ----------------
- * instantiate the global variables
- * ----------------
+ /*
+ * instantiate the global variables
*/
LogRelation = logRelation;
- /* ----------------
- * restore the memory context to the previous context
- * before we return from initialization.
- * ----------------
+ /*
+ * restore the memory context to the previous context before we return
+ * from initialization.
*/
MemoryContextSwitchTo(oldContext);
}
diff --git a/src/backend/access/transam/transsup.c b/src/backend/access/transam/transsup.c
index c433506eae6..0a44a018a9a 100644
--- a/src/backend/access/transam/transsup.c
+++ b/src/backend/access/transam/transsup.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.29 2001/03/22 03:59:17 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.30 2001/03/22 06:16:10 momjian Exp $
*
* NOTES
* This file contains support functions for the high
@@ -56,11 +56,9 @@ TransComputeBlockNumber(Relation relation, /* relation to test */
{
long itemsPerBlock = 0;
- /* ----------------
- * we calculate the block number of our transaction
- * by dividing the transaction id by the number of
- * transaction things per block.
- * ----------------
+ /*
+ * we calculate the block number of our transaction by dividing the
+ * transaction id by the number of transaction things per block.
*/
if (relation == LogRelation)
itemsPerBlock = TP_NumXidStatusPerBlock;
@@ -109,18 +107,16 @@ TransBlockGetLastTransactionIdStatus(Block tblock,
BitIndex offset;
XidStatus xstatus;
- /* ----------------
- * sanity check
- * ----------------
+ /*
+ * sanity check
*/
Assert((tblock != NULL));
- /* ----------------
- * search downward from the top of the block data, looking
- * for the first Non-in progress transaction status. Since we
- * are scanning backward, this will be last recorded transaction
- * status on the block.
- * ----------------
+ /*
+ * search downward from the top of the block data, looking for the
+ * first Non-in progress transaction status. Since we are scanning
+ * backward, this will be last recorded transaction status on the
+ * block.
*/
maxIndex = TP_NumXidStatusPerBlock;
for (index = maxIndex; index > 0; index--)
@@ -131,11 +127,10 @@ TransBlockGetLastTransactionIdStatus(Block tblock,
xstatus = (bit1 | bit2);
- /* ----------------
- * here we have the status of some transaction, so test
- * if the status is recorded as "in progress". If so, then
- * we save the transaction id in the place specified by the caller.
- * ----------------
+ /*
+ * here we have the status of some transaction, so test if the
+ * status is recorded as "in progress". If so, then we save the
+ * transaction id in the place specified by the caller.
*/
if (xstatus != XID_INPROGRESS)
{
@@ -148,12 +143,11 @@ TransBlockGetLastTransactionIdStatus(Block tblock,
}
}
- /* ----------------
- * if we get here and index is 0 it means we couldn't find
- * a non-inprogress transaction on the block. For now we just
- * return this info to the user. They can check if the return
- * status is "in progress" to know this condition has arisen.
- * ----------------
+ /*
+ * if we get here and index is 0 it means we couldn't find a
+ * non-inprogress transaction on the block. For now we just return
+ * this info to the user. They can check if the return status is "in
+ * progress" to know this condition has arisen.
*/
if (index == 0)
{
@@ -161,9 +155,8 @@ TransBlockGetLastTransactionIdStatus(Block tblock,
TransactionIdStore(baseXid, returnXidP);
}
- /* ----------------
- * return the status to the user
- * ----------------
+ /*
+ * return the status to the user
*/
return xstatus;
}
@@ -200,17 +193,15 @@ TransBlockGetXidStatus(Block tblock,
*/
index = transactionId % TP_NumXidStatusPerBlock;
- /* ----------------
- * get the data at the specified index
- * ----------------
+ /*
+ * get the data at the specified index
*/
offset = BitIndexOf(index);
bit1 = ((bits8) BitArrayBitIsSet((BitArray) tblock, offset++)) << 1;
bit2 = (bits8) BitArrayBitIsSet((BitArray) tblock, offset);
- /* ----------------
- * return the transaction status to the caller
- * ----------------
+ /*
+ * return the transaction status to the caller
*/
return (XidStatus) (bit1 | bit2);
}
@@ -245,9 +236,8 @@ TransBlockSetXidStatus(Block tblock,
offset = BitIndexOf(index);
- /* ----------------
- * store the transaction value at the specified offset
- * ----------------
+ /*
+ * store the transaction value at the specified offset
*/
switch (xstatus)
{
@@ -291,18 +281,16 @@ TransBlockNumberGetXidStatus(Relation relation,
XidStatus xstatus; /* recorded status of xid */
bool localfail; /* bool used if failP = NULL */
- /* ----------------
- * get the page containing the transaction information
- * ----------------
+ /*
+ * get the page containing the transaction information
*/
buffer = ReadBuffer(relation, blockNumber);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
block = BufferGetBlock(buffer);
- /* ----------------
- * get the status from the block. note, for now we always
- * return false in failP.
- * ----------------
+ /*
+ * get the status from the block. note, for now we always return
+ * false in failP.
*/
if (failP == NULL)
failP = &localfail;
@@ -310,9 +298,8 @@ TransBlockNumberGetXidStatus(Relation relation,
xstatus = TransBlockGetXidStatus(block, xid);
- /* ----------------
- * release the buffer and return the status
- * ----------------
+ /*
+ * release the buffer and return the status
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
@@ -335,19 +322,17 @@ TransBlockNumberSetXidStatus(Relation relation,
Block block; /* block containing xstatus */
bool localfail; /* bool used if failP = NULL */
- /* ----------------
- * get the block containing the transaction status
- * ----------------
+ /*
+ * get the block containing the transaction status
*/
buffer = ReadBuffer(relation, blockNumber);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
block = BufferGetBlock(buffer);
- /* ----------------
- * attempt to update the status of the transaction on the block.
- * if we are successful, write the block. otherwise release the buffer.
- * note, for now we always return false in failP.
- * ----------------
+ /*
+ * attempt to update the status of the transaction on the block. if we
+ * are successful, write the block. otherwise release the buffer.
+ * note, for now we always return false in failP.
*/
if (failP == NULL)
failP = &localfail;
@@ -381,22 +366,20 @@ TransGetLastRecordedTransaction(Relation relation,
(*failP) = false;
- /* ----------------
- * SOMEDAY gain exclusive access to the log relation
+ /*
+ * SOMEDAY gain exclusive access to the log relation
*
- * That someday is today 5 Aug. 1991 -mer
- * It looks to me like we only need to set a read lock here, despite
- * the above comment about exclusive access. The block is never
- * actually written into, we only check status bits.
- * ----------------
+ * That someday is today 5 Aug. 1991 -mer It looks to me like we only
+ * need to set a read lock here, despite the above comment about
+ * exclusive access. The block is never actually written into, we
+ * only check status bits.
*/
RelationSetLockForRead(relation);
- /* ----------------
- * we assume the last block of the log contains the last
- * recorded transaction. If the relation is empty we return
- * failure to the user.
- * ----------------
+ /*
+ * we assume the last block of the log contains the last recorded
+ * transaction. If the relation is empty we return failure to the
+ * user.
*/
n = RelationGetNumberOfBlocks(relation);
if (n == 0)
@@ -405,17 +388,15 @@ TransGetLastRecordedTransaction(Relation relation,
return;
}
- /* ----------------
- * get the block containing the transaction information
- * ----------------
+ /*
+ * get the block containing the transaction information
*/
blockNumber = n - 1;
buffer = ReadBuffer(relation, blockNumber);
block = BufferGetBlock(buffer);
- /* ----------------
- * get the last xid on the block
- * ----------------
+ /*
+ * get the last xid on the block
*/
baseXid = blockNumber * TP_NumXidStatusPerBlock;
@@ -424,9 +405,8 @@ TransGetLastRecordedTransaction(Relation relation,
ReleaseBuffer(buffer);
- /* ----------------
- * SOMEDAY release our lock on the log relation
- * ----------------
+ /*
+ * SOMEDAY release our lock on the log relation
*/
RelationUnsetLockForRead(relation);
}
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 6a8e6c0639f..c88e665a7ec 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.100 2001/03/22 03:59:18 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.101 2001/03/22 06:16:10 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -396,17 +396,15 @@ GetCurrentTransactionId(void)
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * if the transaction system is disabled, we return
- * the special "disabled" transaction id.
- * ----------------
+ /*
+ * if the transaction system is disabled, we return the special
+ * "disabled" transaction id.
*/
if (s->state == TRANS_DISABLED)
return (TransactionId) DisabledTransactionId;
- /* ----------------
- * otherwise return the current transaction id.
- * ----------------
+ /*
+ * otherwise return the current transaction id.
*/
return (TransactionId) s->transactionIdData;
}
@@ -421,10 +419,9 @@ GetCurrentCommandId(void)
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * if the transaction system is disabled, we return
- * the special "disabled" command id.
- * ----------------
+ /*
+ * if the transaction system is disabled, we return the special
+ * "disabled" command id.
*/
if (s->state == TRANS_DISABLED)
return (CommandId) DisabledCommandId;
@@ -437,10 +434,9 @@ GetScanCommandId(void)
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * if the transaction system is disabled, we return
- * the special "disabled" command id.
- * ----------------
+ /*
+ * if the transaction system is disabled, we return the special
+ * "disabled" command id.
*/
if (s->state == TRANS_DISABLED)
return (CommandId) DisabledCommandId;
@@ -458,10 +454,9 @@ GetCurrentTransactionStartTime(void)
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * if the transaction system is disabled, we return
- * the special "disabled" starting time.
- * ----------------
+ /*
+ * if the transaction system is disabled, we return the special
+ * "disabled" starting time.
*/
if (s->state == TRANS_DISABLED)
return (AbsoluteTime) DisabledStartTime;
@@ -608,16 +603,15 @@ AtStart_Locks(void)
static void
AtStart_Memory(void)
{
- /* ----------------
- * We shouldn't have any transaction contexts already.
- * ----------------
+
+ /*
+ * We shouldn't have any transaction contexts already.
*/
Assert(TopTransactionContext == NULL);
Assert(TransactionCommandContext == NULL);
- /* ----------------
- * Create a toplevel context for the transaction.
- * ----------------
+ /*
+ * Create a toplevel context for the transaction.
*/
TopTransactionContext =
AllocSetContextCreate(TopMemoryContext,
@@ -626,9 +620,8 @@ AtStart_Memory(void)
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
- /* ----------------
- * Create a statement-level context and make it active.
- * ----------------
+ /*
+ * Create a statement-level context and make it active.
*/
TransactionCommandContext =
AllocSetContextCreate(TopTransactionContext,
@@ -732,9 +725,9 @@ RecordTransactionCommit()
static void
AtCommit_Cache(void)
{
- /* ----------------
+
+ /*
* Make catalog changes visible to all backend.
- * ----------------
*/
RegisterInvalid(true);
}
@@ -746,9 +739,9 @@ AtCommit_Cache(void)
static void
AtCommit_LocalCache(void)
{
- /* ----------------
+
+ /*
* Make catalog changes visible to me for the next command.
- * ----------------
*/
ImmediateLocalInvalidation(true);
}
@@ -760,11 +753,11 @@ AtCommit_LocalCache(void)
static void
AtCommit_Locks(void)
{
- /* ----------------
- * XXX What if ProcReleaseLocks fails? (race condition?)
+
+ /*
+ * XXX What if ProcReleaseLocks fails? (race condition?)
*
- * Then you're up a creek! -mer 5/24/92
- * ----------------
+ * Then you're up a creek! -mer 5/24/92
*/
ProcReleaseLocks(true);
}
@@ -776,17 +769,16 @@ AtCommit_Locks(void)
static void
AtCommit_Memory(void)
{
- /* ----------------
- * Now that we're "out" of a transaction, have the
- * system allocate things in the top memory context instead
- * of per-transaction contexts.
- * ----------------
+
+ /*
+ * Now that we're "out" of a transaction, have the system allocate
+ * things in the top memory context instead of per-transaction
+ * contexts.
*/
MemoryContextSwitchTo(TopMemoryContext);
- /* ----------------
- * Release all transaction-local memory.
- * ----------------
+ /*
+ * Release all transaction-local memory.
*/
Assert(TopTransactionContext != NULL);
MemoryContextDelete(TopTransactionContext);
@@ -862,11 +854,11 @@ AtAbort_Cache(void)
static void
AtAbort_Locks(void)
{
- /* ----------------
- * XXX What if ProcReleaseLocks() fails? (race condition?)
+
+ /*
+ * XXX What if ProcReleaseLocks() fails? (race condition?)
*
- * Then you're up a creek without a paddle! -mer
- * ----------------
+ * Then you're up a creek without a paddle! -mer
*/
ProcReleaseLocks(false);
}
@@ -879,21 +871,20 @@ AtAbort_Locks(void)
static void
AtAbort_Memory(void)
{
- /* ----------------
- * Make sure we are in a valid context (not a child of
- * TransactionCommandContext...). Note that it is possible
- * for this code to be called when we aren't in a transaction
- * at all; go directly to TopMemoryContext in that case.
- * ----------------
+
+ /*
+ * Make sure we are in a valid context (not a child of
+ * TransactionCommandContext...). Note that it is possible for this
+ * code to be called when we aren't in a transaction at all; go
+ * directly to TopMemoryContext in that case.
*/
if (TransactionCommandContext != NULL)
{
MemoryContextSwitchTo(TransactionCommandContext);
- /* ----------------
- * We do not want to destroy transaction contexts yet,
- * but it should be OK to delete any command-local memory.
- * ----------------
+ /*
+ * We do not want to destroy transaction contexts yet, but it
+ * should be OK to delete any command-local memory.
*/
MemoryContextResetAndDeleteChildren(TransactionCommandContext);
}
@@ -914,17 +905,16 @@ AtAbort_Memory(void)
static void
AtCleanup_Memory(void)
{
- /* ----------------
- * Now that we're "out" of a transaction, have the
- * system allocate things in the top memory context instead
- * of per-transaction contexts.
- * ----------------
+
+ /*
+ * Now that we're "out" of a transaction, have the system allocate
+ * things in the top memory context instead of per-transaction
+ * contexts.
*/
MemoryContextSwitchTo(TopMemoryContext);
- /* ----------------
- * Release all transaction-local memory.
- * ----------------
+ /*
+ * Release all transaction-local memory.
*/
if (TopTransactionContext != NULL)
MemoryContextDelete(TopTransactionContext);
@@ -951,61 +941,54 @@ StartTransaction(void)
FreeXactSnapshot();
XactIsoLevel = DefaultXactIsoLevel;
- /* ----------------
- * Check the current transaction state. If the transaction system
- * is switched off, or if we're already in a transaction, do nothing.
- * We're already in a transaction when the monitor sends a null
- * command to the backend to flush the comm channel. This is a
- * hacky fix to a communications problem, and we keep having to
- * deal with it here. We should fix the comm channel code. mao 080891
- * ----------------
+ /*
+ * Check the current transaction state. If the transaction system is
+ * switched off, or if we're already in a transaction, do nothing.
+ * We're already in a transaction when the monitor sends a null
+ * command to the backend to flush the comm channel. This is a hacky
+ * fix to a communications problem, and we keep having to deal with it
+ * here. We should fix the comm channel code. mao 080891
*/
if (s->state == TRANS_DISABLED || s->state == TRANS_INPROGRESS)
return;
- /* ----------------
- * set the current transaction state information
- * appropriately during start processing
- * ----------------
+ /*
+ * set the current transaction state information appropriately during
+ * start processing
*/
s->state = TRANS_START;
SetReindexProcessing(false);
- /* ----------------
- * generate a new transaction id
- * ----------------
+ /*
+ * generate a new transaction id
*/
GetNewTransactionId(&(s->transactionIdData));
XactLockTableInsert(s->transactionIdData);
- /* ----------------
- * initialize current transaction state fields
- * ----------------
+ /*
+ * initialize current transaction state fields
*/
s->commandId = FirstCommandId;
s->scanCommandId = FirstCommandId;
s->startTime = GetCurrentAbsoluteTime();
- /* ----------------
- * initialize the various transaction subsystems
- * ----------------
+ /*
+ * initialize the various transaction subsystems
*/
AtStart_Memory();
AtStart_Cache();
AtStart_Locks();
- /* ----------------
- * Tell the trigger manager to we're starting a transaction
- * ----------------
+ /*
+ * Tell the trigger manager to we're starting a transaction
*/
DeferredTriggerBeginXact();
- /* ----------------
- * done with start processing, set current transaction
- * state to "in progress"
- * ----------------
+ /*
+ * done with start processing, set current transaction state to "in
+ * progress"
*/
s->state = TRANS_INPROGRESS;
@@ -1034,9 +1017,8 @@ CommitTransaction(void)
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
return;
@@ -1047,24 +1029,21 @@ CommitTransaction(void)
/* Prevent cancel/die interrupt while cleaning up */
HOLD_INTERRUPTS();
- /* ----------------
- * Tell the trigger manager that this transaction is about to be
- * committed. He'll invoke all trigger deferred until XACT before
- * we really start on committing the transaction.
- * ----------------
+ /*
+ * Tell the trigger manager that this transaction is about to be
+ * committed. He'll invoke all trigger deferred until XACT before we
+ * really start on committing the transaction.
*/
DeferredTriggerEndXact();
- /* ----------------
- * set the current transaction state information
- * appropriately during the abort processing
- * ----------------
+ /*
+ * set the current transaction state information appropriately during
+ * the abort processing
*/
s->state = TRANS_COMMIT;
- /* ----------------
- * do commit processing
- * ----------------
+ /*
+ * do commit processing
*/
/* handle commit for large objects [ PA, 7/17/98 ] */
@@ -1109,10 +1088,9 @@ CommitTransaction(void)
SharedBufferChanged = false;/* safest place to do it */
- /* ----------------
- * done with commit processing, set current transaction
- * state back to default
- * ----------------
+ /*
+ * done with commit processing, set current transaction state back to
+ * default
*/
s->state = TRANS_DEFAULT;
@@ -1157,9 +1135,8 @@ AbortTransaction(void)
*/
LockWaitCancel();
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
{
@@ -1170,10 +1147,9 @@ AbortTransaction(void)
if (s->state != TRANS_INPROGRESS)
elog(NOTICE, "AbortTransaction and not in in-progress state");
- /* ----------------
- * set the current transaction state information
- * appropriately during the abort processing
- * ----------------
+ /*
+ * set the current transaction state information appropriately during
+ * the abort processing
*/
s->state = TRANS_ABORT;
@@ -1182,9 +1158,8 @@ AbortTransaction(void)
*/
SetUserId(GetSessionUserId());
- /* ----------------
- * do abort processing
- * ----------------
+ /*
+ * do abort processing
*/
DeferredTriggerAbortXact();
lo_commit(false); /* 'false' means it's abort */
@@ -1207,9 +1182,8 @@ AbortTransaction(void)
SharedBufferChanged = false;/* safest place to do it */
- /* ----------------
- * State remains TRANS_ABORT until CleanupTransaction().
- * ----------------
+ /*
+ * State remains TRANS_ABORT until CleanupTransaction().
*/
RESUME_INTERRUPTS();
}
@@ -1227,23 +1201,20 @@ CleanupTransaction(void)
if (s->state == TRANS_DISABLED)
return;
- /* ----------------
- * State should still be TRANS_ABORT from AbortTransaction().
- * ----------------
+ /*
+ * State should still be TRANS_ABORT from AbortTransaction().
*/
if (s->state != TRANS_ABORT)
elog(FATAL, "CleanupTransaction and not in abort state");
- /* ----------------
- * do abort cleanup processing
- * ----------------
+ /*
+ * do abort cleanup processing
*/
AtCleanup_Memory();
- /* ----------------
- * done with abort processing, set current transaction
- * state back to default
- * ----------------
+ /*
+ * done with abort processing, set current transaction state back to
+ * default
*/
s->state = TRANS_DEFAULT;
}
@@ -1259,44 +1230,41 @@ StartTransactionCommand(void)
switch (s->blockState)
{
- /* ----------------
- * if we aren't in a transaction block, we
- * just do our usual start transaction.
- * ----------------
+
+ /*
+ * if we aren't in a transaction block, we just do our usual
+ * start transaction.
*/
case TBLOCK_DEFAULT:
StartTransaction();
break;
- /* ----------------
- * We should never experience this -- if we do it
- * means the BEGIN state was not changed in the previous
- * CommitTransactionCommand(). If we get it, we print
- * a warning and change to the in-progress state.
- * ----------------
+ /*
+ * We should never experience this -- if we do it means the
+ * BEGIN state was not changed in the previous
+ * CommitTransactionCommand(). If we get it, we print a
+ * warning and change to the in-progress state.
*/
case TBLOCK_BEGIN:
elog(NOTICE, "StartTransactionCommand: unexpected TBLOCK_BEGIN");
s->blockState = TBLOCK_INPROGRESS;
break;
- /* ----------------
- * This is the case when are somewhere in a transaction
- * block and about to start a new command. For now we
- * do nothing but someday we may do command-local resource
- * initialization.
- * ----------------
+ /*
+ * This is the case when are somewhere in a transaction block
+ * and about to start a new command. For now we do nothing
+ * but someday we may do command-local resource
+ * initialization.
*/
case TBLOCK_INPROGRESS:
break;
- /* ----------------
- * As with BEGIN, we should never experience this
- * if we do it means the END state was not changed in the
- * previous CommitTransactionCommand(). If we get it, we
- * print a warning, commit the transaction, start a new
- * transaction and change to the default state.
- * ----------------
+ /*
+ * As with BEGIN, we should never experience this if we do it
+ * means the END state was not changed in the previous
+ * CommitTransactionCommand(). If we get it, we print a
+ * warning, commit the transaction, start a new transaction
+ * and change to the default state.
*/
case TBLOCK_END:
elog(NOTICE, "StartTransactionCommand: unexpected TBLOCK_END");
@@ -1305,23 +1273,21 @@ StartTransactionCommand(void)
StartTransaction();
break;
- /* ----------------
- * Here we are in the middle of a transaction block but
- * one of the commands caused an abort so we do nothing
- * but remain in the abort state. Eventually we will get
- * to the "END TRANSACTION" which will set things straight.
- * ----------------
+ /*
+ * Here we are in the middle of a transaction block but one of
+ * the commands caused an abort so we do nothing but remain in
+ * the abort state. Eventually we will get to the "END
+ * TRANSACTION" which will set things straight.
*/
case TBLOCK_ABORT:
break;
- /* ----------------
- * This means we somehow aborted and the last call to
- * CommitTransactionCommand() didn't clear the state so
- * we remain in the ENDABORT state and maybe next time
- * we get to CommitTransactionCommand() the state will
- * get reset to default.
- * ----------------
+ /*
+ * This means we somehow aborted and the last call to
+ * CommitTransactionCommand() didn't clear the state so we
+ * remain in the ENDABORT state and maybe next time we get to
+ * CommitTransactionCommand() the state will get reset to
+ * default.
*/
case TBLOCK_ENDABORT:
elog(NOTICE, "StartTransactionCommand: unexpected TBLOCK_ENDABORT");
@@ -1347,68 +1313,62 @@ CommitTransactionCommand(void)
switch (s->blockState)
{
- /* ----------------
- * if we aren't in a transaction block, we
- * just do our usual transaction commit
- * ----------------
+
+ /*
+ * if we aren't in a transaction block, we just do our usual
+ * transaction commit
*/
case TBLOCK_DEFAULT:
CommitTransaction();
break;
- /* ----------------
- * This is the case right after we get a "BEGIN TRANSACTION"
- * command, but the user hasn't done anything else yet, so
- * we change to the "transaction block in progress" state
- * and return.
- * ----------------
+ /*
+ * This is the case right after we get a "BEGIN TRANSACTION"
+ * command, but the user hasn't done anything else yet, so we
+ * change to the "transaction block in progress" state and
+ * return.
*/
case TBLOCK_BEGIN:
s->blockState = TBLOCK_INPROGRESS;
break;
- /* ----------------
- * This is the case when we have finished executing a command
- * someplace within a transaction block. We increment the
- * command counter and return. Someday we may free resources
- * local to the command.
+ /*
+ * This is the case when we have finished executing a command
+ * someplace within a transaction block. We increment the
+ * command counter and return. Someday we may free resources
+ * local to the command.
*
- * That someday is today, at least for memory allocated in
- * TransactionCommandContext.
- * - vadim 03/25/97
- * ----------------
+ * That someday is today, at least for memory allocated in
+ * TransactionCommandContext. - vadim 03/25/97
*/
case TBLOCK_INPROGRESS:
CommandCounterIncrement();
MemoryContextResetAndDeleteChildren(TransactionCommandContext);
break;
- /* ----------------
- * This is the case when we just got the "END TRANSACTION"
- * statement, so we commit the transaction and go back to
- * the default state.
- * ----------------
+ /*
+ * This is the case when we just got the "END TRANSACTION"
+ * statement, so we commit the transaction and go back to the
+ * default state.
*/
case TBLOCK_END:
CommitTransaction();
s->blockState = TBLOCK_DEFAULT;
break;
- /* ----------------
- * Here we are in the middle of a transaction block but
- * one of the commands caused an abort so we do nothing
- * but remain in the abort state. Eventually we will get
- * to the "END TRANSACTION" which will set things straight.
- * ----------------
+ /*
+ * Here we are in the middle of a transaction block but one of
+ * the commands caused an abort so we do nothing but remain in
+ * the abort state. Eventually we will get to the "END
+ * TRANSACTION" which will set things straight.
*/
case TBLOCK_ABORT:
break;
- /* ----------------
- * Here we were in an aborted transaction block which
- * just processed the "END TRANSACTION" command from the
- * user, so clean up and return to the default state.
- * ----------------
+ /*
+ * Here we were in an aborted transaction block which just
+ * processed the "END TRANSACTION" command from the user, so
+ * clean up and return to the default state.
*/
case TBLOCK_ENDABORT:
CleanupTransaction();
@@ -1428,22 +1388,21 @@ AbortCurrentTransaction(void)
switch (s->blockState)
{
- /* ----------------
- * if we aren't in a transaction block, we
- * just do the basic abort & cleanup transaction.
- * ----------------
+
+ /*
+ * if we aren't in a transaction block, we just do the basic
+ * abort & cleanup transaction.
*/
case TBLOCK_DEFAULT:
AbortTransaction();
CleanupTransaction();
break;
- /* ----------------
- * If we are in the TBLOCK_BEGIN it means something
- * screwed up right after reading "BEGIN TRANSACTION"
- * so we enter the abort state. Eventually an "END
- * TRANSACTION" will fix things.
- * ----------------
+ /*
+ * If we are in the TBLOCK_BEGIN it means something screwed up
+ * right after reading "BEGIN TRANSACTION" so we enter the
+ * abort state. Eventually an "END TRANSACTION" will fix
+ * things.
*/
case TBLOCK_BEGIN:
s->blockState = TBLOCK_ABORT;
@@ -1451,12 +1410,11 @@ AbortCurrentTransaction(void)
/* CleanupTransaction happens when we exit TBLOCK_ABORT */
break;
- /* ----------------
- * This is the case when are somewhere in a transaction
- * block which aborted so we abort the transaction and
- * set the ABORT state. Eventually an "END TRANSACTION"
- * will fix things and restore us to a normal state.
- * ----------------
+ /*
+ * This is the case when are somewhere in a transaction block
+ * which aborted so we abort the transaction and set the ABORT
+ * state. Eventually an "END TRANSACTION" will fix things and
+ * restore us to a normal state.
*/
case TBLOCK_INPROGRESS:
s->blockState = TBLOCK_ABORT;
@@ -1464,12 +1422,10 @@ AbortCurrentTransaction(void)
/* CleanupTransaction happens when we exit TBLOCK_ABORT */
break;
- /* ----------------
- * Here, the system was fouled up just after the
- * user wanted to end the transaction block so we
- * abort the transaction and put us back into the
- * default state.
- * ----------------
+ /*
+ * Here, the system was fouled up just after the user wanted
+ * to end the transaction block so we abort the transaction
+ * and put us back into the default state.
*/
case TBLOCK_END:
s->blockState = TBLOCK_DEFAULT;
@@ -1477,22 +1433,20 @@ AbortCurrentTransaction(void)
CleanupTransaction();
break;
- /* ----------------
- * Here, we are already in an aborted transaction
- * state and are waiting for an "END TRANSACTION" to
- * come along and lo and behold, we abort again!
- * So we just remain in the abort state.
- * ----------------
+ /*
+ * Here, we are already in an aborted transaction state and
+ * are waiting for an "END TRANSACTION" to come along and lo
+ * and behold, we abort again! So we just remain in the abort
+ * state.
*/
case TBLOCK_ABORT:
break;
- /* ----------------
- * Here we were in an aborted transaction block which
- * just processed the "END TRANSACTION" command but somehow
- * aborted again.. since we must have done the abort
- * processing, we clean up and return to the default state.
- * ----------------
+ /*
+ * Here we were in an aborted transaction block which just
+ * processed the "END TRANSACTION" command but somehow aborted
+ * again.. since we must have done the abort processing, we
+ * clean up and return to the default state.
*/
case TBLOCK_ENDABORT:
CleanupTransaction();
@@ -1514,9 +1468,8 @@ BeginTransactionBlock(void)
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
return;
@@ -1524,21 +1477,18 @@ BeginTransactionBlock(void)
if (s->blockState != TBLOCK_DEFAULT)
elog(NOTICE, "BEGIN: already a transaction in progress");
- /* ----------------
- * set the current transaction block state information
- * appropriately during begin processing
- * ----------------
+ /*
+ * set the current transaction block state information appropriately
+ * during begin processing
*/
s->blockState = TBLOCK_BEGIN;
- /* ----------------
- * do begin processing
- * ----------------
+ /*
+ * do begin processing
*/
- /* ----------------
- * done with begin processing, set block state to inprogress
- * ----------------
+ /*
+ * done with begin processing, set block state to inprogress
*/
s->blockState = TBLOCK_INPROGRESS;
}
@@ -1552,22 +1502,20 @@ EndTransactionBlock(void)
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
return;
if (s->blockState == TBLOCK_INPROGRESS)
{
- /* ----------------
- * here we are in a transaction block which should commit
- * when we get to the upcoming CommitTransactionCommand()
- * so we set the state to "END". CommitTransactionCommand()
- * will recognize this and commit the transaction and return
- * us to the default state
- * ----------------
+
+ /*
+ * here we are in a transaction block which should commit when we
+ * get to the upcoming CommitTransactionCommand() so we set the
+ * state to "END". CommitTransactionCommand() will recognize this
+ * and commit the transaction and return us to the default state
*/
s->blockState = TBLOCK_END;
return;
@@ -1575,25 +1523,23 @@ EndTransactionBlock(void)
if (s->blockState == TBLOCK_ABORT)
{
- /* ----------------
- * here, we are in a transaction block which aborted
- * and since the AbortTransaction() was already done,
- * we do whatever is needed and change to the special
- * "END ABORT" state. The upcoming CommitTransactionCommand()
- * will recognise this and then put us back in the default
- * state.
- * ----------------
+
+ /*
+ * here, we are in a transaction block which aborted and since the
+ * AbortTransaction() was already done, we do whatever is needed
+ * and change to the special "END ABORT" state. The upcoming
+ * CommitTransactionCommand() will recognise this and then put us
+ * back in the default state.
*/
s->blockState = TBLOCK_ENDABORT;
return;
}
- /* ----------------
- * here, the user issued COMMIT when not inside a transaction.
- * Issue a notice and go to abort state. The upcoming call to
- * CommitTransactionCommand() will then put us back into the
- * default state.
- * ----------------
+ /*
+ * here, the user issued COMMIT when not inside a transaction. Issue a
+ * notice and go to abort state. The upcoming call to
+ * CommitTransactionCommand() will then put us back into the default
+ * state.
*/
elog(NOTICE, "COMMIT: no transaction in progress");
AbortTransaction();
@@ -1610,34 +1556,31 @@ AbortTransactionBlock(void)
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
return;
if (s->blockState == TBLOCK_INPROGRESS)
{
- /* ----------------
- * here we were inside a transaction block something
- * screwed up inside the system so we enter the abort state,
- * do the abort processing and then return.
- * We remain in the abort state until we see an
- * END TRANSACTION command.
- * ----------------
+
+ /*
+ * here we were inside a transaction block something screwed up
+ * inside the system so we enter the abort state, do the abort
+ * processing and then return. We remain in the abort state until
+ * we see an END TRANSACTION command.
*/
s->blockState = TBLOCK_ABORT;
AbortTransaction();
return;
}
- /* ----------------
- * here, the user issued ABORT when not inside a transaction.
- * Issue a notice and go to abort state. The upcoming call to
- * CommitTransactionCommand() will then put us back into the
- * default state.
- * ----------------
+ /*
+ * here, the user issued ABORT when not inside a transaction. Issue a
+ * notice and go to abort state. The upcoming call to
+ * CommitTransactionCommand() will then put us back into the default
+ * state.
*/
elog(NOTICE, "ROLLBACK: no transaction in progress");
AbortTransaction();
@@ -1655,9 +1598,8 @@ UserAbortTransactionBlock(void)
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
return;
@@ -1675,14 +1617,13 @@ UserAbortTransactionBlock(void)
if (s->blockState == TBLOCK_INPROGRESS)
{
- /* ----------------
- * here we were inside a transaction block and we
- * got an abort command from the user, so we move to
- * the abort state, do the abort processing and
- * then change to the ENDABORT state so we will end up
- * in the default state after the upcoming
- * CommitTransactionCommand().
- * ----------------
+
+ /*
+ * here we were inside a transaction block and we got an abort
+ * command from the user, so we move to the abort state, do the
+ * abort processing and then change to the ENDABORT state so we
+ * will end up in the default state after the upcoming
+ * CommitTransactionCommand().
*/
s->blockState = TBLOCK_ABORT;
AbortTransaction();
@@ -1690,12 +1631,11 @@ UserAbortTransactionBlock(void)
return;
}
- /* ----------------
- * here, the user issued ABORT when not inside a transaction.
- * Issue a notice and go to abort state. The upcoming call to
- * CommitTransactionCommand() will then put us back into the
- * default state.
- * ----------------
+ /*
+ * here, the user issued ABORT when not inside a transaction. Issue a
+ * notice and go to abort state. The upcoming call to
+ * CommitTransactionCommand() will then put us back into the default
+ * state.
*/
elog(NOTICE, "ROLLBACK: no transaction in progress");
AbortTransaction();
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index 95f73c356ad..aeb90c1ffa4 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.105 2001/03/13 01:17:05 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.106 2001/03/22 06:16:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -141,7 +141,7 @@ int numattr; /* number of attributes for cur. rel */
int DebugMode;
-static MemoryContext nogc = NULL; /* special no-gc mem context */
+static MemoryContext nogc = NULL; /* special no-gc mem context */
extern int optind;
extern char *optarg;
@@ -216,11 +216,10 @@ BootstrapMain(int argc, char *argv[])
char *dbName;
int flag;
int xlogop = BS_XLOG_NOP;
- char *potential_DataDir = NULL;
+ char *potential_DataDir = NULL;
- /* --------------------
- * initialize globals
- * -------------------
+ /*
+ * initialize globals
*/
MyProcPid = getpid();
@@ -236,9 +235,8 @@ BootstrapMain(int argc, char *argv[])
MemoryContextInit();
}
- /* ----------------
- * process command arguments
- * ----------------
+ /*
+ * process command arguments
*/
/* Set defaults, to be overriden by explicit options below */
@@ -248,7 +246,8 @@ BootstrapMain(int argc, char *argv[])
if (!IsUnderPostmaster)
{
ResetAllOptions();
- potential_DataDir = getenv("PGDATA"); /* Null if no PGDATA variable */
+ potential_DataDir = getenv("PGDATA"); /* Null if no PGDATA
+ * variable */
}
while ((flag = getopt(argc, argv, "D:dCQx:pB:F")) != EOF)
@@ -307,9 +306,9 @@ BootstrapMain(int argc, char *argv[])
if (!potential_DataDir)
{
fprintf(stderr, "%s does not know where to find the database system "
- "data. You must specify the directory that contains the "
- "database system either by specifying the -D invocation "
- "option or by setting the PGDATA environment variable.\n\n",
+ "data. You must specify the directory that contains the "
+ "database system either by specifying the -D invocation "
+ "option or by setting the PGDATA environment variable.\n\n",
argv[0]);
proc_exit(1);
}
@@ -319,15 +318,17 @@ BootstrapMain(int argc, char *argv[])
if (IsUnderPostmaster)
{
+
/*
* Properly accept or ignore signals the postmaster might send us
*/
pqsignal(SIGHUP, SIG_IGN);
- pqsignal(SIGINT, SIG_IGN); /* ignore query-cancel */
+ pqsignal(SIGINT, SIG_IGN); /* ignore query-cancel */
pqsignal(SIGTERM, die);
pqsignal(SIGQUIT, quickdie);
pqsignal(SIGUSR1, SIG_IGN);
pqsignal(SIGUSR2, SIG_IGN);
+
/*
* Reset some signals that are accepted by postmaster but not here
*/
@@ -336,8 +337,10 @@ BootstrapMain(int argc, char *argv[])
pqsignal(SIGTTOU, SIG_DFL);
pqsignal(SIGCONT, SIG_DFL);
pqsignal(SIGWINCH, SIG_DFL);
+
/*
- * Unblock signals (they were blocked when the postmaster forked us)
+ * Unblock signals (they were blocked when the postmaster forked
+ * us)
*/
PG_SETMASK(&UnBlockSig);
}
@@ -352,7 +355,7 @@ BootstrapMain(int argc, char *argv[])
/*
* Create lockfile for data directory.
*/
- if (! CreateDataDirLockFile(DataDir, false))
+ if (!CreateDataDirLockFile(DataDir, false))
proc_exit(1);
}
@@ -408,9 +411,8 @@ BootstrapMain(int argc, char *argv[])
for (i = 0; i < HASHTABLESIZE; ++i)
hashtable[i] = NULL;
- /* ----------------
- * abort processing resumes here
- * ----------------
+ /*
+ * abort processing resumes here
*/
if (sigsetjmp(Warn_restart, 1) != 0)
{
@@ -418,9 +420,8 @@ BootstrapMain(int argc, char *argv[])
AbortCurrentTransaction();
}
- /* ----------------
- * process input.
- * ----------------
+ /*
+ * process input.
*/
/*
@@ -720,9 +721,9 @@ InsertOneValue(Oid objectid, char *value, int i)
ObjectIdGetDatum(ap->am_typ.typelem),
Int32GetDatum(-1));
prt = DatumGetCString(OidFunctionCall3(ap->am_typ.typoutput,
- values[i],
- ObjectIdGetDatum(ap->am_typ.typelem),
- Int32GetDatum(-1)));
+ values[i],
+ ObjectIdGetDatum(ap->am_typ.typelem),
+ Int32GetDatum(-1)));
if (!Quiet)
printf("%s ", prt);
pfree(prt);
@@ -740,12 +741,12 @@ InsertOneValue(Oid objectid, char *value, int i)
printf("Typ == NULL, typeindex = %u idx = %d\n", typeindex, i);
values[i] = OidFunctionCall3(Procid[typeindex].inproc,
CStringGetDatum(value),
- ObjectIdGetDatum(Procid[typeindex].elem),
+ ObjectIdGetDatum(Procid[typeindex].elem),
Int32GetDatum(-1));
prt = DatumGetCString(OidFunctionCall3(Procid[typeindex].outproc,
- values[i],
- ObjectIdGetDatum(Procid[typeindex].elem),
- Int32GetDatum(-1)));
+ values[i],
+ ObjectIdGetDatum(Procid[typeindex].elem),
+ Int32GetDatum(-1)));
if (!Quiet)
printf("%s ", prt);
pfree(prt);
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 34a22412c39..54867d51a4b 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.161 2001/03/22 03:59:19 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.162 2001/03/22 06:16:10 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -186,9 +186,8 @@ heap_create(char *relname,
MemoryContext oldcxt;
Oid tblNode = MyDatabaseId;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(natts > 0);
@@ -200,10 +199,9 @@ heap_create(char *relname,
relname);
}
- /* ----------------
- * real ugly stuff to assign the proper relid in the relation
- * descriptor follows.
- * ----------------
+ /*
+ * real ugly stuff to assign the proper relid in the relation
+ * descriptor follows.
*/
if (relname && IsSystemRelationName(relname))
{
@@ -279,18 +277,16 @@ heap_create(char *relname,
(int) MyProcPid, uniqueId++);
}
- /* ----------------
- * switch to the cache context to create the relcache entry.
- * ----------------
+ /*
+ * switch to the cache context to create the relcache entry.
*/
if (!CacheMemoryContext)
CreateCacheMemoryContext();
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
- /* ----------------
- * allocate a new relation descriptor.
- * ----------------
+ /*
+ * allocate a new relation descriptor.
*/
rel = (Relation) palloc(sizeof(RelationData));
MemSet((char *) rel, 0, sizeof(RelationData));
@@ -303,18 +299,16 @@ heap_create(char *relname,
*/
rel->rd_att = CreateTupleDescCopyConstr(tupDesc);
- /* ----------------
- * nail the reldesc if this is a bootstrap create reln and
- * we may need it in the cache later on in the bootstrap
- * process so we don't ever want it kicked out. e.g. pg_attribute!!!
- * ----------------
+ /*
+ * nail the reldesc if this is a bootstrap create reln and we may need
+ * it in the cache later on in the bootstrap process so we don't ever
+ * want it kicked out. e.g. pg_attribute!!!
*/
if (nailme)
rel->rd_isnailed = true;
- /* ----------------
- * initialize the fields of our new relation descriptor
- * ----------------
+ /*
+ * initialize the fields of our new relation descriptor
*/
rel->rd_rel = (Form_pg_class) palloc(sizeof *rel->rd_rel);
MemSet((char *) rel->rd_rel, 0, sizeof *rel->rd_rel);
@@ -334,15 +328,13 @@ heap_create(char *relname,
rel->rd_node.relNode = relid;
rel->rd_rel->relfilenode = relid;
- /* ----------------
- * done building relcache entry.
- * ----------------
+ /*
+ * done building relcache entry.
*/
MemoryContextSwitchTo(oldcxt);
- /* ----------------
- * have the storage manager create the relation.
- * ----------------
+ /*
+ * have the storage manager create the relation.
*/
if (storage_create)
heap_storage_create(rel);
@@ -432,13 +424,11 @@ CheckAttributeNames(TupleDesc tupdesc)
int j;
int natts = tupdesc->natts;
- /* ----------------
- * first check for collision with system attribute names
- * ----------------
+ /*
+ * first check for collision with system attribute names
*
- * also, warn user if attribute to be created has
- * an unknown typid (usually as a result of a 'retrieve into'
- * - jolly
+ * also, warn user if attribute to be created has an unknown typid
+ * (usually as a result of a 'retrieve into' - jolly
*/
for (i = 0; i < natts; i++)
{
@@ -460,9 +450,8 @@ CheckAttributeNames(TupleDesc tupdesc)
}
}
- /* ----------------
- * next check for repeated attribute names
- * ----------------
+ /*
+ * next check for repeated attribute names
*/
for (i = 1; i < natts; i++)
{
@@ -508,10 +497,9 @@ RelnameFindRelid(const char *relname)
pg_class_desc = heap_openr(RelationRelationName, AccessShareLock);
- /* ----------------
- * At bootstrap time, we have to do this the hard way. Form the
- * scan key.
- * ----------------
+ /*
+ * At bootstrap time, we have to do this the hard way. Form the
+ * scan key.
*/
ScanKeyEntryInitialize(&key,
0,
@@ -519,9 +507,8 @@ RelnameFindRelid(const char *relname)
(RegProcedure) F_NAMEEQ,
(Datum) relname);
- /* ----------------
- * begin the scan
- * ----------------
+ /*
+ * begin the scan
*/
pg_class_scan = heap_beginscan(pg_class_desc,
0,
@@ -529,10 +516,9 @@ RelnameFindRelid(const char *relname)
1,
&key);
- /* ----------------
- * get a tuple. if the tuple is NULL then it means we
- * didn't find an existing relation.
- * ----------------
+ /*
+ * get a tuple. if the tuple is NULL then it means we didn't find
+ * an existing relation.
*/
tuple = heap_getnext(pg_class_scan, 0);
@@ -567,23 +553,20 @@ AddNewAttributeTuples(Oid new_rel_oid,
Relation idescs[Num_pg_attr_indices];
int natts = tupdesc->natts;
- /* ----------------
- * open pg_attribute
- * ----------------
+ /*
+ * open pg_attribute
*/
rel = heap_openr(AttributeRelationName, RowExclusiveLock);
- /* -----------------
+ /*
* Check if we have any indices defined on pg_attribute.
- * -----------------
*/
hasindex = RelationGetForm(rel)->relhasindex;
if (hasindex)
CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs);
- /* ----------------
- * first we add the user attributes..
- * ----------------
+ /*
+ * first we add the user attributes..
*/
dpp = tupdesc->attrs;
for (i = 0; i < natts; i++)
@@ -607,9 +590,8 @@ AddNewAttributeTuples(Oid new_rel_oid,
dpp++;
}
- /* ----------------
- * next we add the system attributes..
- * ----------------
+ /*
+ * next we add the system attributes..
*/
dpp = HeapAtt;
for (i = 0; i < -1 - FirstLowInvalidHeapAttributeNumber; i++)
@@ -663,31 +645,29 @@ AddNewRelationTuple(Relation pg_class_desc,
HeapTuple tup;
Relation idescs[Num_pg_class_indices];
- /* ----------------
- * first we update some of the information in our
- * uncataloged relation's relation descriptor.
- * ----------------
+ /*
+ * first we update some of the information in our uncataloged
+ * relation's relation descriptor.
*/
new_rel_reltup = new_rel_desc->rd_rel;
- /* ----------------
- * Here we insert bogus estimates of the size of the new relation.
- * In reality, of course, the new relation has 0 tuples and pages,
- * and if we were tracking these statistics accurately then we'd
- * set the fields that way. But at present the stats will be updated
- * only by VACUUM or CREATE INDEX, and the user might insert a lot of
- * tuples before he gets around to doing either of those. So, instead
- * of saying the relation is empty, we insert guesstimates. The point
- * is to keep the optimizer from making really stupid choices on
+ /*
+ * Here we insert bogus estimates of the size of the new relation. In
+ * reality, of course, the new relation has 0 tuples and pages, and if
+ * we were tracking these statistics accurately then we'd set the
+ * fields that way. But at present the stats will be updated only by
+ * VACUUM or CREATE INDEX, and the user might insert a lot of tuples
+ * before he gets around to doing either of those. So, instead of
+ * saying the relation is empty, we insert guesstimates. The point is
+ * to keep the optimizer from making really stupid choices on
* never-yet-vacuumed tables; so the estimates need only be large
* enough to discourage the optimizer from using nested-loop plans.
- * With this hack, nested-loop plans will be preferred only after
- * the table has been proven to be small by VACUUM or CREATE INDEX.
- * Maintaining the stats on-the-fly would solve the problem more cleanly,
- * but the overhead of that would likely cost more than it'd save.
- * (NOTE: CREATE INDEX inserts the same bogus estimates if it finds the
- * relation has 0 rows and pages. See index.c.)
- * ----------------
+ * With this hack, nested-loop plans will be preferred only after the
+ * table has been proven to be small by VACUUM or CREATE INDEX.
+ * Maintaining the stats on-the-fly would solve the problem more
+ * cleanly, but the overhead of that would likely cost more than it'd
+ * save. (NOTE: CREATE INDEX inserts the same bogus estimates if it
+ * finds the relation has 0 rows and pages. See index.c.)
*/
new_rel_reltup->relpages = 10; /* bogus estimates */
new_rel_reltup->reltuples = 1000;
@@ -792,9 +772,8 @@ heap_create_with_catalog(char *relname,
int natts = tupdesc->natts;
char *temp_relname = NULL;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(IsNormalProcessingMode() || IsBootstrapProcessingMode());
if (natts <= 0 || natts > MaxHeapAttributeNumber)
@@ -817,17 +796,16 @@ heap_create_with_catalog(char *relname,
strcpy(relname, temp_relname); /* heap_create will change this */
}
- /* ----------------
- * Tell heap_create not to create a physical file; we'll do that
- * below after all our catalog updates are done. (This isn't really
- * necessary anymore, but we may as well avoid the cycles of creating
- * and deleting the file in case we fail.)
+ /*
+ * Tell heap_create not to create a physical file; we'll do that below
+ * after all our catalog updates are done. (This isn't really
+ * necessary anymore, but we may as well avoid the cycles of creating
+ * and deleting the file in case we fail.)
*
- * Note: The call to heap_create() changes relname for
- * temp tables; it becomes the true physical relname.
- * The call to heap_storage_create() does all the "real"
- * work of creating the disk file for the relation.
- * ----------------
+ * Note: The call to heap_create() changes relname for temp tables; it
+ * becomes the true physical relname. The call to
+ * heap_storage_create() does all the "real" work of creating the disk
+ * file for the relation.
*/
new_rel_desc = heap_create(relname, tupdesc, istemp, false,
allow_system_table_mods);
@@ -838,13 +816,12 @@ heap_create_with_catalog(char *relname,
/* Assign an OID for the relation's tuple type */
new_type_oid = newoid();
- /* ----------------
- * now create an entry in pg_class for the relation.
+ /*
+ * now create an entry in pg_class for the relation.
*
- * NOTE: we could get a unique-index failure here, in case someone else
- * is creating the same relation name in parallel but hadn't committed
- * yet when we checked for a duplicate name above.
- * ----------------
+ * NOTE: we could get a unique-index failure here, in case someone else
+ * is creating the same relation name in parallel but hadn't committed
+ * yet when we checked for a duplicate name above.
*/
pg_class_desc = heap_openr(RelationRelationName, RowExclusiveLock);
@@ -856,20 +833,18 @@ heap_create_with_catalog(char *relname,
relkind,
temp_relname);
- /* ----------------
- * since defining a relation also defines a complex type,
- * we add a new system type corresponding to the new relation.
+ /*
+ * since defining a relation also defines a complex type, we add a new
+ * system type corresponding to the new relation.
*
- * NOTE: we could get a unique-index failure here, in case the same name
- * has already been used for a type.
- * ----------------
+ * NOTE: we could get a unique-index failure here, in case the same name
+ * has already been used for a type.
*/
AddNewRelationType(relname, new_rel_oid, new_type_oid);
- /* ----------------
- * now add tuples to pg_attribute for the attributes in
- * our new relation.
- * ----------------
+ /*
+ * now add tuples to pg_attribute for the attributes in our new
+ * relation.
*/
AddNewAttributeTuples(new_rel_oid, tupdesc);
@@ -887,12 +862,11 @@ heap_create_with_catalog(char *relname,
if (relkind != RELKIND_VIEW)
heap_storage_create(new_rel_desc);
- /* ----------------
- * ok, the relation has been cataloged, so close our relations
- * and return the oid of the newly created relation.
+ /*
+ * ok, the relation has been cataloged, so close our relations and
+ * return the oid of the newly created relation.
*
- * SOMEDAY: fill the STATISTIC relation properly.
- * ----------------
+ * SOMEDAY: fill the STATISTIC relation properly.
*/
heap_close(new_rel_desc, NoLock); /* do not unlock till end of xact */
heap_close(pg_class_desc, RowExclusiveLock);
@@ -950,16 +924,13 @@ RelationRemoveInheritance(Relation relation)
ScanKeyData entry;
bool found = false;
- /* ----------------
- * open pg_inherits
- * ----------------
+ /*
+ * open pg_inherits
*/
catalogRelation = heap_openr(InheritsRelationName, RowExclusiveLock);
- /* ----------------
- * form a scan key for the subclasses of this class
- * and begin scanning
- * ----------------
+ /*
+ * form a scan key for the subclasses of this class and begin scanning
*/
ScanKeyEntryInitialize(&entry, 0x0, Anum_pg_inherits_inhparent,
F_OIDEQ,
@@ -971,9 +942,8 @@ RelationRemoveInheritance(Relation relation)
1,
&entry);
- /* ----------------
- * if any subclasses exist, then we disallow the deletion.
- * ----------------
+ /*
+ * if any subclasses exist, then we disallow the deletion.
*/
tuple = heap_getnext(scan, 0);
if (HeapTupleIsValid(tuple))
@@ -992,10 +962,9 @@ RelationRemoveInheritance(Relation relation)
}
heap_endscan(scan);
- /* ----------------
- * If we get here, it means the relation has no subclasses
- * so we can trash it. First we remove dead INHERITS tuples.
- * ----------------
+ /*
+ * If we get here, it means the relation has no subclasses so we can
+ * trash it. First we remove dead INHERITS tuples.
*/
entry.sk_attno = Anum_pg_inherits_inhrelid;
@@ -1014,9 +983,8 @@ RelationRemoveInheritance(Relation relation)
heap_endscan(scan);
heap_close(catalogRelation, RowExclusiveLock);
- /* ----------------
- * now remove dead IPL tuples
- * ----------------
+ /*
+ * now remove dead IPL tuples
*/
catalogRelation = heap_openr(InheritancePrecidenceListRelationName,
RowExclusiveLock);
@@ -1083,9 +1051,8 @@ DeleteRelationTuple(Relation rel)
Relation pg_class_desc;
HeapTuple tup;
- /* ----------------
- * open pg_class
- * ----------------
+ /*
+ * open pg_class
*/
pg_class_desc = heap_openr(RelationRelationName, RowExclusiveLock);
@@ -1096,9 +1063,8 @@ DeleteRelationTuple(Relation rel)
elog(ERROR, "Relation \"%s\" does not exist",
RelationGetRelationName(rel));
- /* ----------------
- * delete the relation tuple from pg_class, and finish up.
- * ----------------
+ /*
+ * delete the relation tuple from pg_class, and finish up.
*/
simple_heap_delete(pg_class_desc, &tup->t_self);
heap_freetuple(tup);
@@ -1212,13 +1178,12 @@ heap_truncate(char *relname)
rel = heap_openr(relname, AccessExclusiveLock);
rid = RelationGetRelid(rel);
- /* ----------------
- * TRUNCATE TABLE within a transaction block is dangerous, because
- * if the transaction is later rolled back we have no way to
- * undo truncation of the relation's physical file. Disallow it
- * except for a rel created in the current xact (which would be deleted
- * on abort, anyway).
- * ----------------
+ /*
+ * TRUNCATE TABLE within a transaction block is dangerous, because if
+ * the transaction is later rolled back we have no way to undo
+ * truncation of the relation's physical file. Disallow it except for
+ * a rel created in the current xact (which would be deleted on abort,
+ * anyway).
*/
if (IsTransactionBlock() && !rel->rd_myxactonly)
elog(ERROR, "TRUNCATE TABLE cannot run inside a BEGIN/END block");
@@ -1256,9 +1221,8 @@ DeleteAttributeTuples(Relation rel)
HeapTuple tup;
int2 attnum;
- /* ----------------
- * open pg_attribute
- * ----------------
+ /*
+ * open pg_attribute
*/
pg_attribute_desc = heap_openr(AttributeRelationName, RowExclusiveLock);
@@ -1305,16 +1269,14 @@ DeleteTypeTuple(Relation rel)
HeapTuple atttup;
Oid typoid;
- /* ----------------
- * open pg_type
- * ----------------
+ /*
+ * open pg_type
*/
pg_type_desc = heap_openr(TypeRelationName, RowExclusiveLock);
- /* ----------------
- * create a scan key to locate the type tuple corresponding
- * to this relation.
- * ----------------
+ /*
+ * create a scan key to locate the type tuple corresponding to this
+ * relation.
*/
ScanKeyEntryInitialize(&key, 0,
Anum_pg_type_typrelid,
@@ -1327,10 +1289,9 @@ DeleteTypeTuple(Relation rel)
1,
&key);
- /* ----------------
- * use heap_getnext() to fetch the pg_type tuple. If this
- * tuple is not valid then something's wrong.
- * ----------------
+ /*
+ * use heap_getnext() to fetch the pg_type tuple. If this tuple is
+ * not valid then something's wrong.
*/
tup = heap_getnext(pg_type_scan, 0);
@@ -1342,12 +1303,10 @@ DeleteTypeTuple(Relation rel)
RelationGetRelationName(rel));
}
- /* ----------------
- * now scan pg_attribute. if any other relations have
- * attributes of the type of the relation we are deleteing
- * then we have to disallow the deletion. should talk to
- * stonebraker about this. -cim 6/19/90
- * ----------------
+ /*
+ * now scan pg_attribute. if any other relations have attributes of
+ * the type of the relation we are deleteing then we have to disallow
+ * the deletion. should talk to stonebraker about this. -cim 6/19/90
*/
typoid = tup->t_data->t_oid;
@@ -1365,11 +1324,9 @@ DeleteTypeTuple(Relation rel)
1,
&attkey);
- /* ----------------
- * try and get a pg_attribute tuple. if we succeed it means
- * we can't delete the relation because something depends on
- * the schema.
- * ----------------
+ /*
+ * try and get a pg_attribute tuple. if we succeed it means we can't
+ * delete the relation because something depends on the schema.
*/
atttup = heap_getnext(pg_attribute_scan, 0);
@@ -1388,10 +1345,9 @@ DeleteTypeTuple(Relation rel)
heap_endscan(pg_attribute_scan);
heap_close(pg_attribute_desc, RowExclusiveLock);
- /* ----------------
- * Ok, it's safe so we delete the relation tuple
- * from pg_type and finish up.
- * ----------------
+ /*
+ * Ok, it's safe so we delete the relation tuple from pg_type and
+ * finish up.
*/
simple_heap_delete(pg_type_desc, &tup->t_self);
@@ -1414,17 +1370,15 @@ heap_drop_with_catalog(const char *relname,
bool istemp = is_temp_rel_name(relname);
int i;
- /* ----------------
- * Open and lock the relation.
- * ----------------
+ /*
+ * Open and lock the relation.
*/
rel = heap_openr(relname, AccessExclusiveLock);
rid = RelationGetRelid(rel);
has_toasttable = rel->rd_rel->reltoastrelid != InvalidOid;
- /* ----------------
- * prevent deletion of system relations
- * ----------------
+ /*
+ * prevent deletion of system relations
*/
/* allow temp of pg_class? Guess so. */
if (!istemp && !allow_system_table_mods &&
@@ -1432,19 +1386,17 @@ heap_drop_with_catalog(const char *relname,
elog(ERROR, "System relation \"%s\" may not be dropped",
RelationGetRelationName(rel));
- /* ----------------
- * Release all buffers that belong to this relation, after writing
- * any that are dirty
- * ----------------
+ /*
+ * Release all buffers that belong to this relation, after writing any
+ * that are dirty
*/
i = FlushRelationBuffers(rel, (BlockNumber) 0);
if (i < 0)
elog(ERROR, "heap_drop_with_catalog: FlushRelationBuffers returned %d",
i);
- /* ----------------
- * remove rules if necessary
- * ----------------
+ /*
+ * remove rules if necessary
*/
if (rel->rd_rules != NULL)
RelationRemoveRules(rid);
@@ -1452,27 +1404,23 @@ heap_drop_with_catalog(const char *relname,
/* triggers */
RelationRemoveTriggers(rel);
- /* ----------------
- * remove inheritance information
- * ----------------
+ /*
+ * remove inheritance information
*/
RelationRemoveInheritance(rel);
- /* ----------------
- * remove indexes if necessary
- * ----------------
+ /*
+ * remove indexes if necessary
*/
RelationRemoveIndexes(rel);
- /* ----------------
- * delete attribute tuples
- * ----------------
+ /*
+ * delete attribute tuples
*/
DeleteAttributeTuples(rel);
- /* ----------------
- * delete comments, statistics, and constraints
- * ----------------
+ /*
+ * delete comments, statistics, and constraints
*/
DeleteComments(RelationGetRelid(rel));
@@ -1480,21 +1428,18 @@ heap_drop_with_catalog(const char *relname,
RemoveConstraints(rel);
- /* ----------------
- * delete type tuple
- * ----------------
+ /*
+ * delete type tuple
*/
DeleteTypeTuple(rel);
- /* ----------------
- * delete relation tuple
- * ----------------
+ /*
+ * delete relation tuple
*/
DeleteRelationTuple(rel);
- /* ----------------
- * unlink the relation's physical file and finish up.
- * ----------------
+ /*
+ * unlink the relation's physical file and finish up.
*/
if (rel->rd_rel->relkind != RELKIND_VIEW)
smgrunlink(DEFAULT_SMGR, rel);
@@ -1506,9 +1451,8 @@ heap_drop_with_catalog(const char *relname,
*/
heap_close(rel, NoLock);
- /* ----------------
- * flush the relation from the relcache
- * ----------------
+ /*
+ * flush the relation from the relcache
*/
RelationForgetRelation(rid);
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 103c4ccc016..3b150e5340b 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.143 2001/03/22 03:59:19 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.144 2001/03/22 06:16:10 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -235,9 +235,8 @@ ConstructTupleDescriptor(Relation heapRelation,
heapTupDesc = RelationGetDescr(heapRelation);
natts = RelationGetForm(heapRelation)->relnatts;
- /* ----------------
- * allocate the new tuple descriptor
- * ----------------
+ /*
+ * allocate the new tuple descriptor
*/
indexTupDesc = CreateTemplateTupleDesc(numatts);
@@ -255,21 +254,20 @@ ConstructTupleDescriptor(Relation heapRelation,
Form_pg_attribute from;
Form_pg_attribute to;
- /* ----------------
- * get the attribute number and make sure it's valid;
- * determine which attribute descriptor to copy
- * ----------------
+ /*
+ * get the attribute number and make sure it's valid; determine
+ * which attribute descriptor to copy
*/
atnum = attNums[i];
if (!AttrNumberIsForUserDefinedAttr(atnum))
{
- /* ----------------
- * here we are indexing on a system attribute (-1...-n)
- * so we convert atnum into a usable index 0...n-1 so we can
- * use it to dereference the array sysatts[] which stores
- * tuple descriptor information for system attributes.
- * ----------------
+
+ /*
+ * here we are indexing on a system attribute (-1...-n) so we
+ * convert atnum into a usable index 0...n-1 so we can use it
+ * to dereference the array sysatts[] which stores tuple
+ * descriptor information for system attributes.
*/
if (atnum <= FirstLowInvalidHeapAttributeNumber || atnum >= 0)
elog(ERROR, "Cannot create index on system attribute: attribute number out of range (%d)", atnum);
@@ -279,9 +277,9 @@ ConstructTupleDescriptor(Relation heapRelation,
}
else
{
- /* ----------------
- * here we are indexing on a normal attribute (1...n)
- * ----------------
+
+ /*
+ * here we are indexing on a normal attribute (1...n)
*/
if (atnum > natts)
elog(ERROR, "Cannot create index: attribute %d does not exist",
@@ -291,10 +289,9 @@ ConstructTupleDescriptor(Relation heapRelation,
from = heapTupDesc->attrs[atind];
}
- /* ----------------
- * now that we've determined the "from", let's copy
- * the tuple desc data...
- * ----------------
+ /*
+ * now that we've determined the "from", let's copy the tuple desc
+ * data...
*/
indexTupDesc->attrs[i] = to =
(Form_pg_attribute) palloc(ATTRIBUTE_TUPLE_SIZE);
@@ -346,26 +343,23 @@ AccessMethodObjectIdGetForm(Oid accessMethodObjectId,
ScanKeyData key;
Form_pg_am aform;
- /* ----------------
- * form a scan key for the pg_am relation
- * ----------------
+ /*
+ * form a scan key for the pg_am relation
*/
ScanKeyEntryInitialize(&key, 0, ObjectIdAttributeNumber,
F_OIDEQ,
ObjectIdGetDatum(accessMethodObjectId));
- /* ----------------
- * fetch the desired access method tuple
- * ----------------
+ /*
+ * fetch the desired access method tuple
*/
pg_am_desc = heap_openr(AccessMethodRelationName, AccessShareLock);
pg_am_scan = heap_beginscan(pg_am_desc, 0, SnapshotNow, 1, &key);
pg_am_tuple = heap_getnext(pg_am_scan, 0);
- /* ----------------
- * return NULL if not found
- * ----------------
+ /*
+ * return NULL if not found
*/
if (!HeapTupleIsValid(pg_am_tuple))
{
@@ -374,9 +368,8 @@ AccessMethodObjectIdGetForm(Oid accessMethodObjectId,
return NULL;
}
- /* ----------------
- * if found AM tuple, then copy it into resultCxt and return the copy
- * ----------------
+ /*
+ * if found AM tuple, then copy it into resultCxt and return the copy
*/
aform = (Form_pg_am) MemoryContextAlloc(resultCxt, sizeof *aform);
memcpy(aform, GETSTRUCT(pg_am_tuple), sizeof *aform);
@@ -397,9 +390,8 @@ ConstructIndexReldesc(Relation indexRelation, Oid amoid)
indexRelation->rd_am = AccessMethodObjectIdGetForm(amoid,
CacheMemoryContext);
- /* ----------------
- * XXX missing the initialization of some other fields
- * ----------------
+ /*
+ * XXX missing the initialization of some other fields
*/
indexRelation->rd_rel->relowner = GetUserId();
@@ -428,11 +420,10 @@ UpdateRelationRelation(Relation indexRelation, char *temp_relname)
CLASS_TUPLE_SIZE,
(char *) indexRelation->rd_rel);
- /* ----------------
- * the new tuple must have the same oid as the relcache entry for the
- * index. sure would be embarrassing to do this sort of thing in
- * polite company.
- * ----------------
+ /*
+ * the new tuple must have the same oid as the relcache entry for the
+ * index. sure would be embarrassing to do this sort of thing in
+ * polite company.
*/
tuple->t_data->t_oid = RelationGetRelid(indexRelation);
heap_insert(pg_class, tuple);
@@ -500,23 +491,21 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
TupleDesc indexTupDesc;
int i;
- /* ----------------
- * open the attribute relation
- * ----------------
+ /*
+ * open the attribute relation
*/
pg_attribute = heap_openr(AttributeRelationName, RowExclusiveLock);
- /* ----------------
- * initialize *null, *replace and *value
- * ----------------
+ /*
+ * initialize *null, *replace and *value
*/
MemSet(nullv, ' ', Natts_pg_attribute);
MemSet(replace, ' ', Natts_pg_attribute);
- /* ----------------
+ /* ----------
* create the first attribute tuple.
* XXX For now, only change the ATTNUM attribute value
- * ----------------
+ * ----------
*/
replace[Anum_pg_attribute_attnum - 1] = 'r';
replace[Anum_pg_attribute_attcacheoff - 1] = 'r';
@@ -535,9 +524,8 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs);
}
- /* ----------------
- * insert the first attribute tuple.
- * ----------------
+ /*
+ * insert the first attribute tuple.
*/
cur_tuple = heap_modifytuple(init_tuple,
pg_attribute,
@@ -550,18 +538,17 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
if (hasind)
CatalogIndexInsert(idescs, Num_pg_attr_indices, pg_attribute, cur_tuple);
- /* ----------------
- * now we use the information in the index cur_tuple
- * descriptor to form the remaining attribute tuples.
- * ----------------
+ /*
+ * now we use the information in the index cur_tuple descriptor to
+ * form the remaining attribute tuples.
*/
indexTupDesc = RelationGetDescr(indexRelation);
for (i = 1; i < numatts; i += 1)
{
- /* ----------------
- * process the remaining attributes...
- * ----------------
+
+ /*
+ * process the remaining attributes...
*/
memmove(GETSTRUCT(cur_tuple),
(char *) indexTupDesc->attrs[i],
@@ -580,10 +567,9 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
if (hasind)
CatalogIndexInsert(idescs, Num_pg_attr_indices, pg_attribute, new_tuple);
- /* ----------------
- * ModifyHeapTuple returns a new copy of a cur_tuple
- * so we free the original and use the copy..
- * ----------------
+ /*
+ * ModifyHeapTuple returns a new copy of a cur_tuple so we free
+ * the original and use the copy..
*/
cur_tuple = new_tuple;
}
@@ -617,10 +603,9 @@ UpdateIndexRelation(Oid indexoid,
int i;
Relation idescs[Num_pg_index_indices];
- /* ----------------
- * allocate a Form_pg_index big enough to hold the
- * index-predicate (if any) in string form
- * ----------------
+ /*
+ * allocate a Form_pg_index big enough to hold the index-predicate (if
+ * any) in string form
*/
if (indexInfo->ii_Predicate != NULL)
{
@@ -638,9 +623,8 @@ UpdateIndexRelation(Oid indexoid,
indexForm = (Form_pg_index) palloc(itupLen);
MemSet(indexForm, 0, sizeof(FormData_pg_index));
- /* ----------------
- * store information into the index tuple form
- * ----------------
+ /*
+ * store information into the index tuple form
*/
indexForm->indexrelid = indexoid;
indexForm->indrelid = heapoid;
@@ -652,11 +636,10 @@ UpdateIndexRelation(Oid indexoid,
indexForm->indisprimary = primary;
memcpy((char *) &indexForm->indpred, (char *) predText, predLen);
- /* ----------------
- * copy index key and op class information
+ /*
+ * copy index key and op class information
*
- * We zeroed the extra slots (if any) above --- that's essential.
- * ----------------
+ * We zeroed the extra slots (if any) above --- that's essential.
*/
for (i = 0; i < indexInfo->ii_NumKeyAttrs; i++)
indexForm->indkey[i] = indexInfo->ii_KeyAttrNumbers[i];
@@ -664,29 +647,25 @@ UpdateIndexRelation(Oid indexoid,
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
indexForm->indclass[i] = classOids[i];
- /* ----------------
- * open the system catalog index relation
- * ----------------
+ /*
+ * open the system catalog index relation
*/
pg_index = heap_openr(IndexRelationName, RowExclusiveLock);
- /* ----------------
- * form a tuple to insert into pg_index
- * ----------------
+ /*
+ * form a tuple to insert into pg_index
*/
tuple = heap_addheader(Natts_pg_index,
itupLen,
(char *) indexForm);
- /* ----------------
- * insert the tuple into the pg_index
- * ----------------
+ /*
+ * insert the tuple into the pg_index
*/
heap_insert(pg_index, tuple);
- /* ----------------
- * add index tuples for it
- * ----------------
+ /*
+ * add index tuples for it
*/
if (!IsIgnoringSystemIndexes())
{
@@ -695,9 +674,8 @@ UpdateIndexRelation(Oid indexoid,
CatalogCloseIndices(Num_pg_index_indices, idescs);
}
- /* ----------------
- * close the relation and free the tuple
- * ----------------
+ /*
+ * close the relation and free the tuple
*/
heap_close(pg_index, RowExclusiveLock);
pfree(predText);
@@ -802,27 +780,24 @@ InitIndexStrategy(int numatts,
Oid attrelid;
Size strsize;
- /* ----------------
- * get information from the index relation descriptor
- * ----------------
+ /*
+ * get information from the index relation descriptor
*/
attrelid = indexRelation->rd_att->attrs[0]->attrelid;
amstrategies = indexRelation->rd_am->amstrategies;
amsupport = indexRelation->rd_am->amsupport;
- /* ----------------
- * get the size of the strategy
- * ----------------
+ /*
+ * get the size of the strategy
*/
strsize = AttributeNumberGetIndexStrategySize(numatts, amstrategies);
- /* ----------------
- * allocate the new index strategy structure
+ /*
+ * allocate the new index strategy structure
*
- * the index strategy has to be allocated in the same
- * context as the relation descriptor cache or else
- * it will be lost at the end of the transaction.
- * ----------------
+ * the index strategy has to be allocated in the same context as the
+ * relation descriptor cache or else it will be lost at the end of the
+ * transaction.
*/
if (!CacheMemoryContext)
CreateCacheMemoryContext();
@@ -839,11 +814,10 @@ InitIndexStrategy(int numatts,
else
support = (RegProcedure *) NULL;
- /* ----------------
- * fill in the index strategy structure with information
- * from the catalogs. First we must advance the command counter
- * so that we will see the newly-entered index catalog tuples.
- * ----------------
+ /*
+ * fill in the index strategy structure with information from the
+ * catalogs. First we must advance the command counter so that we
+ * will see the newly-entered index catalog tuples.
*/
CommandCounterIncrement();
@@ -852,9 +826,8 @@ InitIndexStrategy(int numatts,
attrelid, accessMethodObjectId,
amstrategies, amsupport, numatts);
- /* ----------------
- * store the strategy information in the index reldesc
- * ----------------
+ /*
+ * store the strategy information in the index reldesc
*/
RelationSetIndexSupport(indexRelation, strategy, support);
}
@@ -884,17 +857,15 @@ index_create(char *heapRelationName,
SetReindexProcessing(false);
- /* ----------------
- * check parameters
- * ----------------
+ /*
+ * check parameters
*/
if (indexInfo->ii_NumIndexAttrs < 1 ||
indexInfo->ii_NumKeyAttrs < 1)
elog(ERROR, "must index at least one attribute");
- /* ----------------
- * get heap relation oid and open the heap relation
- * ----------------
+ /*
+ * get heap relation oid and open the heap relation
*/
heapoid = GetHeapRelationOid(heapRelationName, indexRelationName, istemp);
@@ -903,9 +874,8 @@ index_create(char *heapRelationName,
*/
heapRelation = heap_open(heapoid, ShareLock);
- /* ----------------
- * construct new tuple descriptor
- * ----------------
+ /*
+ * construct new tuple descriptor
*/
if (OidIsValid(indexInfo->ii_FuncOid))
indexTupDesc = BuildFuncTupleDesc(indexInfo->ii_FuncOid);
@@ -923,9 +893,8 @@ index_create(char *heapRelationName,
* change this */
}
- /* ----------------
- * create the index relation
- * ----------------
+ /*
+ * create the index relation
*/
indexRelation = heap_create(indexRelationName, indexTupDesc,
istemp, false, allow_system_table_mods);
@@ -937,11 +906,10 @@ index_create(char *heapRelationName,
*/
LockRelation(indexRelation, AccessExclusiveLock);
- /* ----------------
- * construct the index relation descriptor
+ /*
+ * construct the index relation descriptor
*
- * XXX should have a proper way to create cataloged relations
- * ----------------
+ * XXX should have a proper way to create cataloged relations
*/
ConstructIndexReldesc(indexRelation, accessMethodObjectId);
@@ -957,18 +925,16 @@ index_create(char *heapRelationName,
*/
heap_storage_create(indexRelation);
- /* ----------------
- * now update the object id's of all the attribute
- * tuple forms in the index relation's tuple descriptor
- * ----------------
+ /*
+ * now update the object id's of all the attribute tuple forms in the
+ * index relation's tuple descriptor
*/
InitializeAttributeOids(indexRelation,
indexInfo->ii_NumIndexAttrs,
indexoid);
- /* ----------------
- * append ATTRIBUTE tuples for the index
- * ----------------
+ /*
+ * append ATTRIBUTE tuples for the index
*/
AppendAttributeTuples(indexRelation, indexInfo->ii_NumIndexAttrs);
@@ -983,9 +949,8 @@ index_create(char *heapRelationName,
UpdateIndexRelation(indexoid, heapoid, indexInfo,
classObjectId, islossy, primary);
- /* ----------------
- * initialize the index strategy
- * ----------------
+ /*
+ * initialize the index strategy
*/
InitIndexStrategy(indexInfo->ii_NumIndexAttrs,
indexRelation,
@@ -1033,15 +998,15 @@ index_drop(Oid indexId)
Assert(OidIsValid(indexId));
- /* ----------------
- * To drop an index safely, we must grab exclusive lock on its parent
- * table; otherwise there could be other backends using the index!
- * Exclusive lock on the index alone is insufficient because the index
- * access routines are a little slipshod about obtaining adequate locking
- * (see ExecOpenIndices()). We do grab exclusive lock on the index too,
- * just to be safe. Both locks must be held till end of transaction,
- * else other backends will still see this index in pg_index.
- * ----------------
+ /*
+ * To drop an index safely, we must grab exclusive lock on its parent
+ * table; otherwise there could be other backends using the index!
+ * Exclusive lock on the index alone is insufficient because the index
+ * access routines are a little slipshod about obtaining adequate
+ * locking (see ExecOpenIndices()). We do grab exclusive lock on the
+ * index too, just to be safe. Both locks must be held till end of
+ * transaction, else other backends will still see this index in
+ * pg_index.
*/
heapId = IndexGetRelation(indexId);
userHeapRelation = heap_open(heapId, AccessExclusiveLock);
@@ -1049,22 +1014,19 @@ index_drop(Oid indexId)
userIndexRelation = index_open(indexId);
LockRelation(userIndexRelation, AccessExclusiveLock);
- /* ----------------
- * Note: unlike heap_drop_with_catalog, we do not need to prevent
- * deletion of system indexes here; that's checked for upstream.
- * If we did check it here, deletion of TOAST tables would fail...
- * ----------------
+ /*
+ * Note: unlike heap_drop_with_catalog, we do not need to prevent
+ * deletion of system indexes here; that's checked for upstream. If we
+ * did check it here, deletion of TOAST tables would fail...
*/
- /* ----------------
+ /*
* fix DESCRIPTION relation
- * ----------------
*/
DeleteComments(indexId);
- /* ----------------
+ /*
* fix RELATION relation
- * ----------------
*/
relationRelation = heap_openr(RelationRelationName, RowExclusiveLock);
@@ -1091,9 +1053,8 @@ index_drop(Oid indexId)
heap_close(relationRelation, RowExclusiveLock);
- /* ----------------
+ /*
* fix ATTRIBUTE relation
- * ----------------
*/
attributeRelation = heap_openr(AttributeRelationName, RowExclusiveLock);
@@ -1110,9 +1071,8 @@ index_drop(Oid indexId)
}
heap_close(attributeRelation, RowExclusiveLock);
- /* ----------------
+ /*
* fix INDEX relation
- * ----------------
*/
indexRelation = heap_openr(IndexRelationName, RowExclusiveLock);
@@ -1171,9 +1131,8 @@ BuildIndexInfo(HeapTuple indexTuple)
int i;
int numKeys;
- /* ----------------
- * count the number of keys, and copy them into the IndexInfo
- * ----------------
+ /*
+ * count the number of keys, and copy them into the IndexInfo
*/
numKeys = 0;
for (i = 0; i < INDEX_MAX_KEYS &&
@@ -1184,13 +1143,12 @@ BuildIndexInfo(HeapTuple indexTuple)
}
ii->ii_NumKeyAttrs = numKeys;
- /* ----------------
- * Handle functional index.
+ /*
+ * Handle functional index.
*
- * If we have a functional index then the number of
- * attributes defined in the index must be 1 (the function's
- * single return value). Otherwise it's same as number of keys.
- * ----------------
+ * If we have a functional index then the number of attributes defined in
+ * the index must be 1 (the function's single return value).
+ * Otherwise it's same as number of keys.
*/
ii->ii_FuncOid = indexStruct->indproc;
@@ -1203,9 +1161,8 @@ BuildIndexInfo(HeapTuple indexTuple)
else
ii->ii_NumIndexAttrs = numKeys;
- /* ----------------
- * If partial index, convert predicate into expression nodetree
- * ----------------
+ /*
+ * If partial index, convert predicate into expression nodetree
*/
if (VARSIZE(&indexStruct->indpred) != 0)
{
@@ -1257,9 +1214,9 @@ FormIndexDatum(IndexInfo *indexInfo,
if (OidIsValid(indexInfo->ii_FuncOid))
{
- /* ----------------
- * Functional index --- compute the single index attribute
- * ----------------
+
+ /*
+ * Functional index --- compute the single index attribute
*/
FunctionCallInfoData fcinfo;
bool anynull = false;
@@ -1292,10 +1249,10 @@ FormIndexDatum(IndexInfo *indexInfo,
}
else
{
- /* ----------------
- * Plain index --- for each attribute we need from the heap tuple,
- * get the attribute and stick it into the datum and nullv arrays.
- * ----------------
+
+ /*
+ * Plain index --- for each attribute we need from the heap tuple,
+ * get the attribute and stick it into the datum and nullv arrays.
*/
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
{
@@ -1465,9 +1422,8 @@ setRelhasindex(Oid relid, bool hasindex)
relid);
}
- /* ----------------
- * Update hasindex in pg_class.
- * ----------------
+ /*
+ * Update hasindex in pg_class.
*/
if (pg_class_scan)
LockBuffer(pg_class_scan->rs_cbuf, BUFFER_LOCK_EXCLUSIVE);
@@ -1601,22 +1557,20 @@ UpdateStats(Oid relid, long reltuples)
HeapScanDesc pg_class_scan = NULL;
bool in_place_upd;
- /* ----------------
+ /*
* This routine handles updates for both the heap and index relation
- * statistics. In order to guarantee that we're able to *see* the index
- * relation tuple, we bump the command counter id here. The index
- * relation tuple was created in the current transaction.
- * ----------------
+ * statistics. In order to guarantee that we're able to *see* the
+ * index relation tuple, we bump the command counter id here. The
+ * index relation tuple was created in the current transaction.
*/
CommandCounterIncrement();
- /* ----------------
+ /*
* CommandCounterIncrement() flushes invalid cache entries, including
* those for the heap and index relations for which we're updating
* statistics. Now that the cache is flushed, it's safe to open the
* relation again. We need the relation open in order to figure out
* how many blocks it contains.
- * ----------------
*/
/*
@@ -1630,9 +1584,8 @@ UpdateStats(Oid relid, long reltuples)
/* Grab lock to be held till end of xact (probably redundant...) */
LockRelation(whichRel, ShareLock);
- /* ----------------
+ /*
* Find the RELATION relation tuple for the given relation.
- * ----------------
*/
pg_class = heap_openr(RelationRelationName, RowExclusiveLock);
@@ -1670,16 +1623,15 @@ UpdateStats(Oid relid, long reltuples)
relid);
}
- /* ----------------
+ /*
* Figure values to insert.
*
- * If we found zero tuples in the scan, do NOT believe it; instead put
- * a bogus estimate into the statistics fields. Otherwise, the common
+ * If we found zero tuples in the scan, do NOT believe it; instead put a
+ * bogus estimate into the statistics fields. Otherwise, the common
* pattern "CREATE TABLE; CREATE INDEX; insert data" leaves the table
- * with zero size statistics until a VACUUM is done. The optimizer will
- * generate very bad plans if the stats claim the table is empty when
- * it is actually sizable. See also CREATE TABLE in heap.c.
- * ----------------
+ * with zero size statistics until a VACUUM is done. The optimizer
+ * will generate very bad plans if the stats claim the table is empty
+ * when it is actually sizable. See also CREATE TABLE in heap.c.
*/
relpages = RelationGetNumberOfBlocks(whichRel);
@@ -1708,9 +1660,8 @@ UpdateStats(Oid relid, long reltuples)
whichRel->rd_rel->relpages = relpages;
whichRel->rd_rel->reltuples = reltuples;
- /* ----------------
- * Update statistics in pg_class.
- * ----------------
+ /*
+ * Update statistics in pg_class.
*/
if (in_place_upd)
{
@@ -1798,9 +1749,8 @@ DefaultBuild(Relation heapRelation,
ExprContext *econtext;
InsertIndexResult insertResult;
- /* ----------------
- * more & better checking is needed
- * ----------------
+ /*
+ * more & better checking is needed
*/
Assert(OidIsValid(indexRelation->rd_rel->relam)); /* XXX */
@@ -1833,9 +1783,8 @@ DefaultBuild(Relation heapRelation,
econtext = MakeExprContext(NULL, TransactionCommandContext);
#endif /* OMIT_PARTIAL_INDEX */
- /* ----------------
- * Ok, begin our scan of the base relation.
- * ----------------
+ /*
+ * Ok, begin our scan of the base relation.
*/
scan = heap_beginscan(heapRelation, /* relation */
0, /* start at end */
@@ -1845,12 +1794,11 @@ DefaultBuild(Relation heapRelation,
reltuples = indtuples = 0;
- /* ----------------
- * for each tuple in the base relation, we create an index
- * tuple and add it to the index relation. We keep a running
- * count of the number of tuples so that we can update pg_class
- * with correct statistics when we're done building the index.
- * ----------------
+ /*
+ * for each tuple in the base relation, we create an index tuple and
+ * add it to the index relation. We keep a running count of the
+ * number of tuples so that we can update pg_class with correct
+ * statistics when we're done building the index.
*/
while (HeapTupleIsValid(heapTuple = heap_getnext(scan, 0)))
{
@@ -1888,10 +1836,9 @@ DefaultBuild(Relation heapRelation,
indtuples++;
- /* ----------------
- * FormIndexDatum fills in its datum and null parameters
- * with attribute information taken from the given heap tuple.
- * ----------------
+ /*
+ * FormIndexDatum fills in its datum and null parameters with
+ * attribute information taken from the given heap tuple.
*/
FormIndexDatum(indexInfo,
heapTuple,
@@ -1956,18 +1903,16 @@ index_build(Relation heapRelation,
{
RegProcedure procedure;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(RelationIsValid(indexRelation));
Assert(PointerIsValid(indexRelation->rd_am));
procedure = indexRelation->rd_am->ambuild;
- /* ----------------
- * use the access method build procedure if supplied, else default.
- * ----------------
+ /*
+ * use the access method build procedure if supplied, else default.
*/
if (RegProcedureIsValid(procedure))
OidFunctionCall5(procedure,
@@ -2042,11 +1987,10 @@ reindex_index(Oid indexId, bool force, bool inplace)
accessMethodId;
bool old;
- /* ----------------
- * REINDEX within a transaction block is dangerous, because
- * if the transaction is later rolled back we have no way to
- * undo truncation of the index's physical file. Disallow it.
- * ----------------
+ /*
+ * REINDEX within a transaction block is dangerous, because if the
+ * transaction is later rolled back we have no way to undo truncation
+ * of the index's physical file. Disallow it.
*/
if (IsTransactionBlock())
elog(ERROR, "REINDEX cannot run inside a BEGIN/END block");
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index 25ecf12f3b6..11494b0172a 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.56 2001/03/22 03:59:20 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.57 2001/03/22 06:16:10 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@@ -102,17 +102,15 @@ OperatorGetWithOpenRelation(Relation pg_operator_desc,
opKey[1].sk_nargs = opKey[1].sk_func.fn_nargs;
opKey[2].sk_nargs = opKey[2].sk_func.fn_nargs;
- /* ----------------
- * form scan key
- * ----------------
+ /*
+ * form scan key
*/
opKey[0].sk_argument = PointerGetDatum(operatorName);
opKey[1].sk_argument = ObjectIdGetDatum(leftObjectId);
opKey[2].sk_argument = ObjectIdGetDatum(rightObjectId);
- /* ----------------
- * begin the scan
- * ----------------
+ /*
+ * begin the scan
*/
pg_operator_scan = heap_beginscan(pg_operator_desc,
0,
@@ -120,10 +118,9 @@ OperatorGetWithOpenRelation(Relation pg_operator_desc,
3,
opKey);
- /* ----------------
- * fetch the operator tuple, if it exists, and determine
- * the proper return oid value.
- * ----------------
+ /*
+ * fetch the operator tuple, if it exists, and determine the proper
+ * return oid value.
*/
tup = heap_getnext(pg_operator_scan, 0);
@@ -140,9 +137,8 @@ OperatorGetWithOpenRelation(Relation pg_operator_desc,
*defined = false;
}
- /* ----------------
- * close the scan and return the oid.
- * ----------------
+ /*
+ * close the scan and return the oid.
*/
heap_endscan(pg_operator_scan);
@@ -170,11 +166,10 @@ OperatorGet(char *operatorName,
bool leftDefined = false;
bool rightDefined = false;
- /* ----------------
- * look up the operator data types.
+ /*
+ * look up the operator data types.
*
- * Note: types must be defined before operators
- * ----------------
+ * Note: types must be defined before operators
*/
if (leftTypeName)
{
@@ -198,16 +193,14 @@ OperatorGet(char *operatorName,
(OidIsValid(rightObjectId) && rightDefined)))
elog(ERROR, "OperatorGet: must have at least one argument type");
- /* ----------------
- * open the pg_operator relation
- * ----------------
+ /*
+ * open the pg_operator relation
*/
pg_operator_desc = heap_openr(OperatorRelationName, AccessShareLock);
- /* ----------------
- * get the oid for the operator with the appropriate name
- * and left/right types.
- * ----------------
+ /*
+ * get the oid for the operator with the appropriate name and
+ * left/right types.
*/
operatorObjectId = OperatorGetWithOpenRelation(pg_operator_desc,
operatorName,
@@ -215,9 +208,8 @@ OperatorGet(char *operatorName,
rightObjectId,
defined);
- /* ----------------
- * close the relation and return the operator oid.
- * ----------------
+ /*
+ * close the relation and return the operator oid.
*/
heap_close(pg_operator_desc, AccessShareLock);
@@ -243,9 +235,8 @@ OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
NameData oname;
TupleDesc tupDesc;
- /* ----------------
- * initialize our *nulls and *values arrays
- * ----------------
+ /*
+ * initialize our *nulls and *values arrays
*/
for (i = 0; i < Natts_pg_operator; ++i)
{
@@ -253,10 +244,9 @@ OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
values[i] = (Datum) NULL; /* redundant, but safe */
}
- /* ----------------
- * initialize *values with the operator name and input data types.
- * Note that oprcode is set to InvalidOid, indicating it's a shell.
- * ----------------
+ /*
+ * initialize *values with the operator name and input data types.
+ * Note that oprcode is set to InvalidOid, indicating it's a shell.
*/
i = 0;
namestrcpy(&oname, operatorName);
@@ -277,9 +267,8 @@ OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
values[i++] = ObjectIdGetDatum(InvalidOid);
values[i++] = ObjectIdGetDatum(InvalidOid);
- /* ----------------
- * create a new operator tuple
- * ----------------
+ /*
+ * create a new operator tuple
*/
tupDesc = pg_operator_desc->rd_att;
@@ -287,10 +276,8 @@ OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
values,
nulls);
- /* ----------------
- * insert our "shell" operator tuple and
- * close the relation
- * ----------------
+ /*
+ * insert our "shell" operator tuple and close the relation
*/
heap_insert(pg_operator_desc, tup);
operatorObjectId = tup->t_data->t_oid;
@@ -304,9 +291,8 @@ OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
CatalogCloseIndices(Num_pg_operator_indices, idescs);
}
- /* ----------------
- * free the tuple and return the operator oid
- * ----------------
+ /*
+ * free the tuple and return the operator oid
*/
heap_freetuple(tup);
@@ -335,9 +321,8 @@ OperatorShellMake(char *operatorName,
bool leftDefined = false;
bool rightDefined = false;
- /* ----------------
- * get the left and right type oid's for this operator
- * ----------------
+ /*
+ * get the left and right type oid's for this operator
*/
if (leftTypeName)
leftObjectId = TypeGet(leftTypeName, &leftDefined);
@@ -349,24 +334,22 @@ OperatorShellMake(char *operatorName,
(OidIsValid(rightObjectId) && rightDefined)))
elog(ERROR, "OperatorShellMake: no valid argument types??");
- /* ----------------
- * open pg_operator
- * ----------------
+ /*
+ * open pg_operator
*/
pg_operator_desc = heap_openr(OperatorRelationName, RowExclusiveLock);
- /* ----------------
- * add a "shell" operator tuple to the operator relation
- * and recover the shell tuple's oid.
- * ----------------
+ /*
+ * add a "shell" operator tuple to the operator relation and recover
+ * the shell tuple's oid.
*/
operatorObjectId = OperatorShellMakeWithOpenRelation(pg_operator_desc,
operatorName,
leftObjectId,
rightObjectId);
- /* ----------------
- * close the operator relation and return the oid.
- * ----------------
+
+ /*
+ * close the operator relation and return the oid.
*/
heap_close(pg_operator_desc, RowExclusiveLock);
@@ -516,11 +499,10 @@ OperatorDef(char *operatorName,
* filling in a previously-created shell.
*/
- /* ----------------
- * look up the operator data types.
+ /*
+ * look up the operator data types.
*
- * Note: types must be defined before operators
- * ----------------
+ * Note: types must be defined before operators
*/
if (leftTypeName)
{
@@ -551,12 +533,10 @@ OperatorDef(char *operatorName,
nulls[i] = ' ';
}
- /* ----------------
- * Look up registered procedures -- find the return type
- * of procedureName to place in "result" field.
- * Do this before shells are created so we don't
- * have to worry about deleting them later.
- * ----------------
+ /*
+ * Look up registered procedures -- find the return type of
+ * procedureName to place in "result" field. Do this before shells are
+ * created so we don't have to worry about deleting them later.
*/
MemSet(typeId, 0, FUNC_MAX_ARGS * sizeof(Oid));
if (!leftTypeName)
@@ -589,9 +569,8 @@ OperatorDef(char *operatorName,
ReleaseSysCache(tup);
- /* ----------------
- * find restriction
- * ----------------
+ /*
+ * find restriction
*/
if (restrictionName)
{ /* optional */
@@ -617,9 +596,8 @@ OperatorDef(char *operatorName,
else
values[Anum_pg_operator_oprrest - 1] = ObjectIdGetDatum(InvalidOid);
- /* ----------------
- * find join - only valid for binary operators
- * ----------------
+ /*
+ * find join - only valid for binary operators
*/
if (joinName)
{ /* optional */
@@ -645,9 +623,8 @@ OperatorDef(char *operatorName,
else
values[Anum_pg_operator_oprjoin - 1] = ObjectIdGetDatum(InvalidOid);
- /* ----------------
+ /*
* set up values in the operator tuple
- * ----------------
*/
i = 0;
namestrcpy(&oname, operatorName);
@@ -1077,11 +1054,10 @@ OperatorCreate(char *operatorName,
elog(ERROR, "OperatorCreate: only binary operators can have sort links");
}
- /* ----------------
- * Use OperatorDef() to define the specified operator and
- * also create shells for the operator's associated operators
- * if they don't already exist.
- * ----------------
+ /*
+ * Use OperatorDef() to define the specified operator and also create
+ * shells for the operator's associated operators if they don't
+ * already exist.
*/
OperatorDef(operatorName,
leftTypeName,
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index 0872eb6e977..a71aa8e5e77 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.54 2001/03/22 03:59:20 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.55 2001/03/22 06:16:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,9 +73,8 @@ ProcedureCreate(char *procedureName,
TupleDesc tupDesc;
Oid retval;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(PointerIsValid(prosrc));
Assert(PointerIsValid(probin));
@@ -142,16 +141,16 @@ ProcedureCreate(char *procedureName,
if (strcmp(procedureName, GENERICSETNAME) == 0)
{
#ifdef SETS_FIXED
- /* ----------
- * The code below doesn't work any more because the
- * PROSRC system cache and the pg_proc_prosrc_index
- * have been removed. Instead a sequential heap scan
- * or something better must get implemented. The reason
- * for removing is that nbtree index crashes if sources
- * exceed 2K --- what's likely for procedural languages.
+
+ /*
+ * The code below doesn't work any more because the PROSRC
+ * system cache and the pg_proc_prosrc_index have been
+ * removed. Instead a sequential heap scan or something better
+ * must get implemented. The reason for removing is that
+ * nbtree index crashes if sources exceed 2K --- what's likely
+ * for procedural languages.
*
* 1999/09/30 Jan
- * ----------
*/
text *prosrctext;
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 0fbadb55b2d..ff15f25deda 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.60 2001/03/22 03:59:20 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.61 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,9 +48,8 @@ TypeGetWithOpenRelation(Relation pg_type_desc,
Oid typoid;
ScanKeyData typeKey[1];
- /* ----------------
- * initialize the scan key and begin a scan of pg_type
- * ----------------
+ /*
+ * initialize the scan key and begin a scan of pg_type
*/
ScanKeyEntryInitialize(typeKey,
0,
@@ -64,16 +63,14 @@ TypeGetWithOpenRelation(Relation pg_type_desc,
1,
typeKey);
- /* ----------------
- * get the type tuple, if it exists.
- * ----------------
+ /*
+ * get the type tuple, if it exists.
*/
tup = heap_getnext(scan, 0);
- /* ----------------
- * if no type tuple exists for the given type name, then
- * end the scan and return appropriate information.
- * ----------------
+ /*
+ * if no type tuple exists for the given type name, then end the scan
+ * and return appropriate information.
*/
if (!HeapTupleIsValid(tup))
{
@@ -82,11 +79,10 @@ TypeGetWithOpenRelation(Relation pg_type_desc,
return InvalidOid;
}
- /* ----------------
- * here, the type tuple does exist so we pull information from
- * the typisdefined field of the tuple and return the tuple's
- * oid, which is the oid of the type.
- * ----------------
+ /*
+ * here, the type tuple does exist so we pull information from the
+ * typisdefined field of the tuple and return the tuple's oid, which
+ * is the oid of the type.
*/
*defined = (bool) ((Form_pg_type) GETSTRUCT(tup))->typisdefined;
typoid = tup->t_data->t_oid;
@@ -116,23 +112,20 @@ TypeGet(char *typeName, /* name of type to be fetched */
Relation pg_type_desc;
Oid typeoid;
- /* ----------------
- * open the pg_type relation
- * ----------------
+ /*
+ * open the pg_type relation
*/
pg_type_desc = heap_openr(TypeRelationName, AccessShareLock);
- /* ----------------
- * scan the type relation for the information we want
- * ----------------
+ /*
+ * scan the type relation for the information we want
*/
typeoid = TypeGetWithOpenRelation(pg_type_desc,
typeName,
defined);
- /* ----------------
- * close the type relation and return the type oid.
- * ----------------
+ /*
+ * close the type relation and return the type oid.
*/
heap_close(pg_type_desc, AccessShareLock);
@@ -155,9 +148,8 @@ TypeShellMakeWithOpenRelation(Relation pg_type_desc, char *typeName)
NameData name;
TupleDesc tupDesc;
- /* ----------------
- * initialize our *nulls and *values arrays
- * ----------------
+ /*
+ * initialize our *nulls and *values arrays
*/
for (i = 0; i < Natts_pg_type; ++i)
{
@@ -165,9 +157,8 @@ TypeShellMakeWithOpenRelation(Relation pg_type_desc, char *typeName)
values[i] = (Datum) NULL; /* redundant, but safe */
}
- /* ----------------
- * initialize *values with the type name and dummy values
- * ----------------
+ /*
+ * initialize *values with the type name and dummy values
*/
i = 0;
namestrcpy(&name, typeName);
@@ -190,17 +181,15 @@ TypeShellMakeWithOpenRelation(Relation pg_type_desc, char *typeName)
values[i++] = DirectFunctionCall1(textin,
CStringGetDatum(typeName)); /* 17 */
- /* ----------------
- * create a new type tuple with FormHeapTuple
- * ----------------
+ /*
+ * create a new type tuple with FormHeapTuple
*/
tupDesc = pg_type_desc->rd_att;
tup = heap_formtuple(tupDesc, values, nulls);
- /* ----------------
- * insert the tuple in the relation and get the tuple's oid.
- * ----------------
+ /*
+ * insert the tuple in the relation and get the tuple's oid.
*/
heap_insert(pg_type_desc, tup);
typoid = tup->t_data->t_oid;
@@ -213,9 +202,9 @@ TypeShellMakeWithOpenRelation(Relation pg_type_desc, char *typeName)
CatalogIndexInsert(idescs, Num_pg_type_indices, pg_type_desc, tup);
CatalogCloseIndices(Num_pg_type_indices, idescs);
}
- /* ----------------
- * free the tuple and return the type-oid
- * ----------------
+
+ /*
+ * free the tuple and return the type-oid
*/
heap_freetuple(tup);
@@ -243,21 +232,18 @@ TypeShellMake(char *typeName)
Assert(PointerIsValid(typeName));
- /* ----------------
- * open pg_type
- * ----------------
+ /*
+ * open pg_type
*/
pg_type_desc = heap_openr(TypeRelationName, RowExclusiveLock);
- /* ----------------
- * insert the shell tuple
- * ----------------
+ /*
+ * insert the shell tuple
*/
typoid = TypeShellMakeWithOpenRelation(pg_type_desc, typeName);
- /* ----------------
- * close pg_type and return the tuple's oid.
- * ----------------
+ /*
+ * close pg_type and return the tuple's oid.
*/
heap_close(pg_type_desc, RowExclusiveLock);
@@ -311,20 +297,18 @@ TypeCreate(char *typeName,
Oid argList[FUNC_MAX_ARGS];
ScanKeyData typeKey[1];
- /* ----------------
- * check that the type is not already defined. It might exist as
- * a shell type, however (but only if assignedTypeOid is not given).
- * ----------------
+ /*
+ * check that the type is not already defined. It might exist as a
+ * shell type, however (but only if assignedTypeOid is not given).
*/
typeObjectId = TypeGet(typeName, &defined);
if (OidIsValid(typeObjectId) &&
(defined || assignedTypeOid != InvalidOid))
elog(ERROR, "TypeCreate: type %s already defined", typeName);
- /* ----------------
- * if this type has an associated elementType, then we check that
- * it is defined.
- * ----------------
+ /*
+ * if this type has an associated elementType, then we check that it
+ * is defined.
*/
if (elementTypeName)
{
@@ -333,16 +317,14 @@ TypeCreate(char *typeName,
elog(ERROR, "TypeCreate: type %s is not defined", elementTypeName);
}
- /* ----------------
- * XXX comment me
- * ----------------
+ /*
+ * XXX comment me
*/
if (externalSize == 0)
externalSize = -1; /* variable length */
- /* ----------------
- * initialize arrays needed by FormHeapTuple
- * ----------------
+ /*
+ * initialize arrays needed by FormHeapTuple
*/
for (i = 0; i < Natts_pg_type; ++i)
{
@@ -362,9 +344,8 @@ TypeCreate(char *typeName,
if (internalSize == 0)
internalSize = -1;
- /* ----------------
- * initialize the *values information
- * ----------------
+ /*
+ * initialize the *values information
*/
i = 0;
namestrcpy(&name, typeName);
@@ -441,28 +422,24 @@ TypeCreate(char *typeName,
values[i++] = ObjectIdGetDatum(procOid); /* 11 - 14 */
}
- /* ----------------
+ /*
* set default alignment
- * ----------------
*/
values[i++] = CharGetDatum(alignment); /* 15 */
- /* ----------------
- * set default storage for TOAST
- * ----------------
+ /*
+ * set default storage for TOAST
*/
values[i++] = CharGetDatum(storage); /* 16 */
- /* ----------------
- * initialize the default value for this type.
- * ----------------
+ /*
+ * initialize the default value for this type.
*/
values[i] = DirectFunctionCall1(textin, /* 17 */
CStringGetDatum(defaultTypeValue ? defaultTypeValue : "-"));
- /* ----------------
- * open pg_type and begin a scan for the type name.
- * ----------------
+ /*
+ * open pg_type and begin a scan for the type name.
*/
pg_type_desc = heap_openr(TypeRelationName, RowExclusiveLock);
@@ -478,11 +455,9 @@ TypeCreate(char *typeName,
1,
typeKey);
- /* ----------------
- * define the type either by adding a tuple to the type
- * relation, or by updating the fields of the "shell" tuple
- * already there.
- * ----------------
+ /*
+ * define the type either by adding a tuple to the type relation, or
+ * by updating the fields of the "shell" tuple already there.
*/
tup = heap_getnext(pg_type_scan, 0);
if (HeapTupleIsValid(tup))
@@ -517,9 +492,8 @@ TypeCreate(char *typeName,
typeObjectId = tup->t_data->t_oid;
}
- /* ----------------
- * finish up
- * ----------------
+ /*
+ * finish up
*/
heap_endscan(pg_type_scan);
diff --git a/src/backend/commands/_deadcode/recipe.c b/src/backend/commands/_deadcode/recipe.c
index 8d492e3a2b5..15a1d0a5647 100644
--- a/src/backend/commands/_deadcode/recipe.c
+++ b/src/backend/commands/_deadcode/recipe.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.12 2001/01/24 19:42:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.13 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -203,10 +203,10 @@ beginRecipe(RecipeStmt *stmt)
* skip the rule rewrite and time qual stuff
*/
- /* ----------------------------------------------------------
- * 1) plan the main query, everything from an eye node back to
- a Tee
- * ---------------------------------------------------------- */
+ /*
+ * 1) plan the main query, everything from an eye node back to a
+ * Tee
+ */
parsetree = qList->qtrees[0];
/*
@@ -218,11 +218,11 @@ beginRecipe(RecipeStmt *stmt)
plan = planner(parsetree);
- /* ----------------------------------------------------------
- * 2) plan the tee queries, (subgraphs rooted from a Tee)
- by the time the eye is processed, all tees that contribute
- to that eye will have been included in the teeInfo list
- * ---------------------------------------------------------- */
+ /*
+ * 2) plan the tee queries, (subgraphs rooted from a Tee) by the
+ * time the eye is processed, all tees that contribute to that eye
+ * will have been included in the teeInfo list
+ */
if (teeInfo)
{
int t;
@@ -258,10 +258,10 @@ beginRecipe(RecipeStmt *stmt)
}
}
- /* ----------------------------------------------------------
- * 3) replace the tee table scans in the main plan with
- actual tee plannodes
- * ---------------------------------------------------------- */
+ /*
+ * 3) replace the tee table scans in the main plan with actual
+ * tee plannodes
+ */
plan = replaceTeeScans(plan, parsetree, teeInfo);
@@ -274,9 +274,9 @@ beginRecipe(RecipeStmt *stmt)
queryDesc = CreateQueryDesc(parsetree,
plan,
whereToSendOutput);
- /* ----------------
- * call ExecStart to prepare the plan for execution
- * ----------------
+
+ /*
+ * call ExecStart to prepare the plan for execution
*/
attinfo = ExecutorStart(queryDesc, NULL);
@@ -323,16 +323,15 @@ tg_rewriteQuery(TgRecipe * r,
orig = q->qtrees[0];
- /*-------------------------------------------------------------------
- step 1:
-
- form a combined range table from all the range tables in the original
- query as well as the input nodes
-
- form a combined qualification from the qual in the original plus
- the quals of the input nodes
- -------------------------------------------------------------------
- */
+ /*
+ * step 1:
+ *
+ * form a combined range table from all the range tables in the original
+ * query as well as the input nodes
+ *
+ * form a combined qualification from the qual in the original plus the
+ * quals of the input nodes
+ */
/* start with the original range table */
rtable = orig->rtable;
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index f4e056bd0a7..88e56869da5 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.15 2001/03/22 03:59:20 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.16 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -582,9 +582,8 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
for (i = 0; i < Natts_pg_statistic; ++i)
nulls[i] = ' ';
- /* ----------------
- * initialize values[]
- * ----------------
+ /*
+ * initialize values[]
*/
i = 0;
values[i++] = ObjectIdGetDatum(relid); /* starelid */
diff --git a/src/backend/commands/command.c b/src/backend/commands/command.c
index 49d1edf4c4b..a8e16f2d732 100644
--- a/src/backend/commands/command.c
+++ b/src/backend/commands/command.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.123 2001/03/22 03:59:21 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.124 2001/03/22 06:16:11 momjian Exp $
*
* NOTES
* The PerformAddAttribute() code, like most of the relation
@@ -69,28 +69,24 @@ PortalCleanup(Portal portal)
{
MemoryContext oldcontext;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(PortalIsValid(portal));
AssertArg(portal->cleanup == PortalCleanup);
- /* ----------------
- * set proper portal-executor context before calling ExecMain.
- * ----------------
+ /*
+ * set proper portal-executor context before calling ExecMain.
*/
oldcontext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
- /* ----------------
- * tell the executor to shutdown the query
- * ----------------
+ /*
+ * tell the executor to shutdown the query
*/
ExecutorEnd(PortalGetQueryDesc(portal), PortalGetState(portal));
- /* ----------------
- * switch back to previous context
- * ----------------
+ /*
+ * switch back to previous context
*/
MemoryContextSwitchTo(oldcontext);
}
@@ -111,9 +107,8 @@ PerformPortalFetch(char *name,
EState *estate;
MemoryContext oldcontext;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
if (name == NULL)
{
@@ -121,9 +116,8 @@ PerformPortalFetch(char *name,
return;
}
- /* ----------------
- * get the portal from the portal name
- * ----------------
+ /*
+ * get the portal from the portal name
*/
portal = GetPortalByName(name);
if (!PortalIsValid(portal))
@@ -133,18 +127,16 @@ PerformPortalFetch(char *name,
return;
}
- /* ----------------
- * switch into the portal context
- * ----------------
+ /*
+ * switch into the portal context
*/
oldcontext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
- /* ----------------
- * tell the destination to prepare to receive some tuples.
+ /*
+ * tell the destination to prepare to receive some tuples.
*
- * If we've been asked for a MOVE, make a temporary QueryDesc
- * with the appropriate dummy destination.
- * ----------------
+ * If we've been asked for a MOVE, make a temporary QueryDesc with the
+ * appropriate dummy destination.
*/
queryDesc = PortalGetQueryDesc(portal);
estate = PortalGetState(portal);
@@ -168,15 +160,14 @@ PerformPortalFetch(char *name,
tag,
dest);
- /* ----------------
- * Determine which direction to go in, and check to see if we're already
- * at the end of the available tuples in that direction. If so, do
- * nothing. (This check exists because not all plan node types are
- * robust about being called again if they've already returned NULL
- * once.) If it's OK to do the fetch, call the executor. Then,
- * update the atStart/atEnd state depending on the number of tuples
- * that were retrieved.
- * ----------------
+ /*
+ * Determine which direction to go in, and check to see if we're
+ * already at the end of the available tuples in that direction. If
+ * so, do nothing. (This check exists because not all plan node types
+ * are robust about being called again if they've already returned
+ * NULL once.) If it's OK to do the fetch, call the executor. Then,
+ * update the atStart/atEnd state depending on the number of tuples
+ * that were retrieved.
*/
if (forward)
{
@@ -201,19 +192,17 @@ PerformPortalFetch(char *name,
}
}
- /* ----------------
- * Clean up and switch back to old context.
- * ----------------
+ /*
+ * Clean up and switch back to old context.
*/
if (dest == None) /* MOVE */
pfree(queryDesc);
MemoryContextSwitchTo(oldcontext);
- /* ----------------
- * Note: the "end-of-command" tag is returned by higher-level
- * utility code
- * ----------------
+ /*
+ * Note: the "end-of-command" tag is returned by higher-level utility
+ * code
*/
}
@@ -226,9 +215,8 @@ PerformPortalClose(char *name, CommandDest dest)
{
Portal portal;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
if (name == NULL)
{
@@ -236,9 +224,8 @@ PerformPortalClose(char *name, CommandDest dest)
return;
}
- /* ----------------
- * get the portal from the portal name
- * ----------------
+ /*
+ * get the portal from the portal name
*/
portal = GetPortalByName(name);
if (!PortalIsValid(portal))
@@ -248,9 +235,8 @@ PerformPortalClose(char *name, CommandDest dest)
return;
}
- /* ----------------
- * Note: PortalCleanup is called as a side-effect
- * ----------------
+ /*
+ * Note: PortalCleanup is called as a side-effect
*/
PortalDrop(&portal);
}
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index f586869b078..0f249fa385c 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.135 2001/03/22 03:59:21 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.136 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -888,16 +888,14 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
{
ExecStoreTuple(tuple, slot, InvalidBuffer, false);
- /* ----------------
+ /*
* Check the constraints of the tuple
- * ----------------
*/
if (rel->rd_att->constr)
ExecConstraints("CopyFrom", resultRelInfo, slot, estate);
- /* ----------------
+ /*
* OK, store the tuple and create index entries for it
- * ----------------
*/
heap_insert(rel, tuple);
diff --git a/src/backend/commands/creatinh.c b/src/backend/commands/creatinh.c
index c4a5eaa00e9..602ceb54328 100644
--- a/src/backend/commands/creatinh.c
+++ b/src/backend/commands/creatinh.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.73 2001/03/22 03:59:22 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.74 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,10 +68,9 @@ DefineRelation(CreateStmt *stmt, char relkind)
stmt->relname, NAMEDATALEN);
StrNCpy(relname, stmt->relname, NAMEDATALEN);
- /* ----------------
- * Look up inheritance ancestors and generate relation schema,
- * including inherited attributes.
- * ----------------
+ /*
+ * Look up inheritance ancestors and generate relation schema,
+ * including inherited attributes.
*/
schema = MergeAttributes(schema, stmt->inhRelnames, stmt->istemp,
&inheritOids, &old_constraints);
@@ -80,14 +79,12 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (numberOfAttributes <= 0)
elog(ERROR, "DefineRelation: please inherit from a relation or define an attribute");
- /* ----------------
- * create a relation descriptor from the relation schema
- * and create the relation. Note that in this stage only
- * inherited (pre-cooked) defaults and constraints will be
- * included into the new relation. (BuildDescForRelation
- * takes care of the inherited defaults, but we have to copy
- * inherited constraints here.)
- * ----------------
+ /*
+ * create a relation descriptor from the relation schema and create
+ * the relation. Note that in this stage only inherited (pre-cooked)
+ * defaults and constraints will be included into the new relation.
+ * (BuildDescForRelation takes care of the inherited defaults, but we
+ * have to copy inherited constraints here.)
*/
descriptor = BuildDescForRelation(schema, relname);
@@ -559,18 +556,16 @@ StoreCatalogInheritance(Oid relationId, List *supers)
List *entry;
HeapTuple tuple;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(OidIsValid(relationId));
if (supers == NIL)
return;
- /* ----------------
+ /*
* Catalog INHERITS information using direct ancestors only.
- * ----------------
*/
relation = heap_openr(InheritsRelationName, RowExclusiveLock);
desc = RelationGetDescr(relation);
@@ -620,9 +615,8 @@ StoreCatalogInheritance(Oid relationId, List *supers)
* ----------------
*/
- /* ----------------
- * 1. append after each relationId, its superclasses, recursively.
- * ----------------
+ /*
+ * 1. append after each relationId, its superclasses, recursively.
*/
foreach(entry, supers)
{
@@ -656,9 +650,8 @@ StoreCatalogInheritance(Oid relationId, List *supers)
lnext(current) = next;
}
- /* ----------------
- * 2. remove all but last of duplicates.
- * ----------------
+ /*
+ * 2. remove all but last of duplicates.
*/
foreach(entry, supers)
{
@@ -690,9 +683,8 @@ again:
}
}
- /* ----------------
+ /*
* Catalog IPL information using expanded list.
- * ----------------
*/
relation = heap_openr(InheritancePrecidenceListRelationName, RowExclusiveLock);
desc = RelationGetDescr(relation);
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index c8a2726a8f7..346c599c7f2 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.53 2001/03/22 03:59:22 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.54 2001/03/22 06:16:11 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -442,9 +442,8 @@ DefineOperator(char *oprName,
if (functionName == NULL)
elog(ERROR, "Define: \"procedure\" unspecified");
- /* ----------------
- * now have OperatorCreate do all the work..
- * ----------------
+ /*
+ * now have OperatorCreate do all the work..
*/
OperatorCreate(oprName, /* operator name */
typeName1, /* first type name */
@@ -640,9 +639,8 @@ DefineType(char *typeName, List *parameters)
if (internalLength != -1 && storage != 'p')
elog(ERROR, "Define: fixed size types must have storage PLAIN");
- /* ----------------
- * now have TypeCreate do all the real work.
- * ----------------
+ /*
+ * now have TypeCreate do all the real work.
*/
TypeCreate(typeName, /* type name */
InvalidOid, /* preassigned type oid (not done here) */
@@ -661,10 +659,9 @@ DefineType(char *typeName, List *parameters)
alignment, /* required alignment */
storage); /* TOAST strategy */
- /* ----------------
- * When we create a base type (as opposed to a complex type)
- * we need to have an array entry for it in pg_type as well.
- * ----------------
+ /*
+ * When we create a base type (as opposed to a complex type) we need
+ * to have an array entry for it in pg_type as well.
*/
shadow_type = makeArrayTypeName(typeName);
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 2d3e70c427b..40e805137ac 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.46 2001/03/22 03:59:23 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.47 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -405,16 +405,15 @@ FuncIndexArgs(IndexInfo *indexInfo,
nargs++;
}
- /* ----------------
+ /*
* Lookup the function procedure to get its OID and result type.
*
- * We rely on parse_func.c to find the correct function in the
- * possible presence of binary-compatible types. However, parse_func
- * may do too much: it will accept a function that requires run-time
- * coercion of input types, and the executor is not currently set up
- * to support that. So, check to make sure that the selected function
- * has exact-match or binary-compatible input types.
- * ----------------
+ * We rely on parse_func.c to find the correct function in the possible
+ * presence of binary-compatible types. However, parse_func may do
+ * too much: it will accept a function that requires run-time coercion
+ * of input types, and the executor is not currently set up to support
+ * that. So, check to make sure that the selected function has
+ * exact-match or binary-compatible input types.
*/
if (!func_get_detail(funcIndex->name, nargs, argTypes,
&funcid, &rettype, &retset, &true_typeids))
@@ -637,11 +636,10 @@ ReindexIndex(const char *name, bool force /* currently unused */ )
HeapTuple tuple;
bool overwrite = false;
- /* ----------------
- * REINDEX within a transaction block is dangerous, because
- * if the transaction is later rolled back we have no way to
- * undo truncation of the index's physical file. Disallow it.
- * ----------------
+ /*
+ * REINDEX within a transaction block is dangerous, because if the
+ * transaction is later rolled back we have no way to undo truncation
+ * of the index's physical file. Disallow it.
*/
if (IsTransactionBlock())
elog(ERROR, "REINDEX cannot run inside a BEGIN/END block");
@@ -681,11 +679,10 @@ ReindexTable(const char *name, bool force)
{
HeapTuple tuple;
- /* ----------------
- * REINDEX within a transaction block is dangerous, because
- * if the transaction is later rolled back we have no way to
- * undo truncation of the index's physical file. Disallow it.
- * ----------------
+ /*
+ * REINDEX within a transaction block is dangerous, because if the
+ * transaction is later rolled back we have no way to undo truncation
+ * of the index's physical file. Disallow it.
*/
if (IsTransactionBlock())
elog(ERROR, "REINDEX cannot run inside a BEGIN/END block");
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index ca1dbf3cbe4..43327c7a65a 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -59,9 +59,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
int i;
- /* ----------------
+ /*
* Check permission
- * ----------------
*/
if (!superuser())
{
@@ -69,10 +68,9 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
"permitted to create procedural languages");
}
- /* ----------------
- * Translate the language name and check that
- * this language doesn't already exist
- * ----------------
+ /*
+ * Translate the language name and check that this language doesn't
+ * already exist
*/
case_translate_language_name(stmt->plname, languageName);
@@ -81,10 +79,9 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
0, 0, 0))
elog(ERROR, "Language %s already exists", languageName);
- /* ----------------
- * Lookup the PL handler function and check that it is
- * of return type Opaque
- * ----------------
+ /*
+ * Lookup the PL handler function and check that it is of return type
+ * Opaque
*/
memset(typev, 0, sizeof(typev));
procTup = SearchSysCache(PROCNAME,
@@ -99,9 +96,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
elog(ERROR, "PL handler function %s() isn't of return type Opaque",
stmt->plhandler);
- /* ----------------
+ /*
* Insert the new language into pg_language
- * ----------------
*/
for (i = 0; i < Natts_pg_language; i++)
{
@@ -150,9 +146,8 @@ DropProceduralLanguage(DropPLangStmt *stmt)
HeapTuple langTup;
Relation rel;
- /* ----------------
+ /*
* Check permission
- * ----------------
*/
if (!superuser())
{
@@ -160,10 +155,9 @@ DropProceduralLanguage(DropPLangStmt *stmt)
"permitted to drop procedural languages");
}
- /* ----------------
- * Translate the language name, check that
- * this language exist and is a PL
- * ----------------
+ /*
+ * Translate the language name, check that this language exist and is
+ * a PL
*/
case_translate_language_name(stmt->plname, languageName);
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 034b49887e7..70f2e1b2957 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.89 2001/03/22 03:59:23 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.90 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -72,10 +72,9 @@ CreateTrigger(CreateTrigStmt *stmt)
if (!pg_ownercheck(GetUserId(), stmt->relname, RELNAME))
elog(ERROR, "%s: %s", stmt->relname, aclcheck_error_strings[ACLCHECK_NOT_OWNER]);
- /* ----------
- * If trigger is a constraint, user trigger name as constraint
- * name and build a unique trigger name instead.
- * ----------
+ /*
+ * If trigger is a constraint, user trigger name as constraint name
+ * and build a unique trigger name instead.
*/
if (stmt->isconstraint)
{
@@ -413,15 +412,14 @@ RelationRemoveTriggers(Relation rel)
heap_endscan(tgscan);
- /* ----------
- * If we deleted any triggers, must update pg_class entry and
- * advance command counter to make the updated entry visible.
- * This is fairly annoying, since we'e just going to drop the
- * durn thing later, but it's necessary to have a consistent
- * state in case we do CommandCounterIncrement() below ---
- * if RelationBuildTriggers() runs, it will complain otherwise.
- * Perhaps RelationBuildTriggers() shouldn't be so picky...
- * ----------
+ /*
+ * If we deleted any triggers, must update pg_class entry and advance
+ * command counter to make the updated entry visible. This is fairly
+ * annoying, since we'e just going to drop the durn thing later, but
+ * it's necessary to have a consistent state in case we do
+ * CommandCounterIncrement() below --- if RelationBuildTriggers()
+ * runs, it will complain otherwise. Perhaps RelationBuildTriggers()
+ * shouldn't be so picky...
*/
if (found)
{
@@ -446,9 +444,8 @@ RelationRemoveTriggers(Relation rel)
CommandCounterIncrement();
}
- /* ----------
+ /*
* Also drop all constraint triggers referencing this relation
- * ----------
*/
ScanKeyEntryInitialize(&key, 0, Anum_pg_trigger_tgconstrrelid,
F_OIDEQ, RelationGetRelid(rel));
@@ -473,12 +470,11 @@ RelationRemoveTriggers(Relation rel)
DropTrigger(&stmt);
- /* ----------
- * Need to do a command counter increment here to show up
- * new pg_class.reltriggers in the next loop iteration
- * (in case there are multiple referential integrity action
- * triggers for the same FK table defined on the PK table).
- * ----------
+ /*
+ * Need to do a command counter increment here to show up new
+ * pg_class.reltriggers in the next loop iteration (in case there
+ * are multiple referential integrity action triggers for the same
+ * FK table defined on the PK table).
*/
CommandCounterIncrement();
@@ -1182,18 +1178,15 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
List *sl;
DeferredTriggerStatus trigstate;
- /* ----------
- * Not deferrable triggers (i.e. normal AFTER ROW triggers
- * and constraints declared NOT DEFERRABLE, the state is
- * allways false.
- * ----------
+ /*
+ * Not deferrable triggers (i.e. normal AFTER ROW triggers and
+ * constraints declared NOT DEFERRABLE, the state is allways false.
*/
if ((itemstate & TRIGGER_DEFERRED_DEFERRABLE) == 0)
return false;
- /* ----------
+ /*
* Lookup if we know an individual state for this trigger
- * ----------
*/
foreach(sl, deftrig_trigstates)
{
@@ -1202,19 +1195,16 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
return trigstate->dts_tgisdeferred;
}
- /* ----------
- * No individual state known - so if the user issued a
- * SET CONSTRAINT ALL ..., we return that instead of the
- * triggers default state.
- * ----------
+ /*
+ * No individual state known - so if the user issued a SET CONSTRAINT
+ * ALL ..., we return that instead of the triggers default state.
*/
if (deftrig_all_isset)
return deftrig_all_isdeferred;
- /* ----------
- * No ALL state known either, remember the default state
- * as the current and return that.
- * ----------
+ /*
+ * No ALL state known either, remember the default state as the
+ * current and return that.
*/
oldcxt = MemoryContextSwitchTo(deftrig_cxt);
@@ -1319,9 +1309,8 @@ deferredTriggerExecute(DeferredTriggerEvent event, int itemno,
Buffer oldbuffer;
Buffer newbuffer;
- /* ----------
+ /*
* Open the heap and fetch the required OLD and NEW tuples.
- * ----------
*/
rel = heap_open(event->dte_relid, NoLock);
@@ -1341,9 +1330,8 @@ deferredTriggerExecute(DeferredTriggerEvent event, int itemno,
elog(ERROR, "deferredTriggerExecute: failed to fetch new tuple");
}
- /* ----------
+ /*
* Setup the trigger information
- * ----------
*/
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = (event->dte_event & TRIGGER_EVENT_OPMASK) |
@@ -1374,10 +1362,9 @@ deferredTriggerExecute(DeferredTriggerEvent event, int itemno,
break;
}
- /* ----------
- * Call the trigger and throw away an eventually returned
- * updated tuple.
- * ----------
+ /*
+ * Call the trigger and throw away an eventually returned updated
+ * tuple.
*/
rettuple = ExecCallTriggerFunc(LocTriggerData.tg_trigger,
&LocTriggerData,
@@ -1385,16 +1372,14 @@ deferredTriggerExecute(DeferredTriggerEvent event, int itemno,
if (rettuple != NULL && rettuple != &oldtuple && rettuple != &newtuple)
heap_freetuple(rettuple);
- /* ----------
- * Might have been a referential integrity constraint trigger.
- * Reset the snapshot overriding flag.
- * ----------
+ /*
+ * Might have been a referential integrity constraint trigger. Reset
+ * the snapshot overriding flag.
*/
ReferentialIntegritySnapshotOverride = false;
- /* ----------
+ /*
* Release buffers and close the relation
- * ----------
*/
if (ItemPointerIsValid(&(event->dte_oldctid)))
ReleaseBuffer(oldbuffer);
@@ -1420,14 +1405,13 @@ deferredTriggerInvokeEvents(bool immediate_only)
int i;
MemoryContext per_tuple_context;
- /* ----------
- * For now we process all events - to speedup transaction blocks
- * we need to remember the actual end of the queue at EndQuery
- * and process only events that are newer. On state changes we
- * simply reset the position to the beginning of the queue and
- * process all events once with the new states when the
- * SET CONSTRAINTS ... command finishes and calls EndQuery.
- * ----------
+ /*
+ * For now we process all events - to speedup transaction blocks we
+ * need to remember the actual end of the queue at EndQuery and
+ * process only events that are newer. On state changes we simply
+ * reset the position to the beginning of the queue and process all
+ * events once with the new states when the SET CONSTRAINTS ...
+ * command finishes and calls EndQuery.
*/
/* Make a per-tuple memory context for trigger function calls */
@@ -1440,9 +1424,9 @@ deferredTriggerInvokeEvents(bool immediate_only)
for (event = deftrig_events; event != NULL; event = event->dte_next)
{
- /* ----------
+
+ /*
* Check if event is completely done.
- * ----------
*/
if (event->dte_event & (TRIGGER_DEFERRED_DONE |
TRIGGER_DEFERRED_CANCELED))
@@ -1450,9 +1434,8 @@ deferredTriggerInvokeEvents(bool immediate_only)
MemoryContextReset(per_tuple_context);
- /* ----------
+ /*
* Check each trigger item in the event.
- * ----------
*/
still_deferred_ones = false;
for (i = 0; i < event->dte_n_items; i++)
@@ -1460,10 +1443,9 @@ deferredTriggerInvokeEvents(bool immediate_only)
if (event->dte_item[i].dti_state & TRIGGER_DEFERRED_DONE)
continue;
- /* ----------
- * This trigger item hasn't been called yet. Check if
- * we should call it now.
- * ----------
+ /*
+ * This trigger item hasn't been called yet. Check if we
+ * should call it now.
*/
if (immediate_only && deferredTriggerCheckState(
event->dte_item[i].dti_tgoid,
@@ -1473,18 +1455,15 @@ deferredTriggerInvokeEvents(bool immediate_only)
continue;
}
- /* ----------
+ /*
* So let's fire it...
- * ----------
*/
deferredTriggerExecute(event, i, per_tuple_context);
event->dte_item[i].dti_state |= TRIGGER_DEFERRED_DONE;
}
- /* ----------
- * Remember in the event itself if all trigger items are
- * done.
- * ----------
+ /*
+ * Remember in the event itself if all trigger items are done.
*/
if (!still_deferred_ones)
event->dte_event |= TRIGGER_DEFERRED_DONE;
@@ -1532,10 +1511,9 @@ DeferredTriggerBeginXact(void)
elog(ERROR,
"DeferredTriggerBeginXact() called while inside transaction");
- /* ----------
- * Create the per transaction memory context and copy all states
- * from the per session context to here.
- * ----------
+ /*
+ * Create the per transaction memory context and copy all states from
+ * the per session context to here.
*/
deftrig_cxt = AllocSetContextCreate(TopTransactionContext,
"DeferredTriggerXact",
@@ -1578,9 +1556,9 @@ DeferredTriggerBeginXact(void)
void
DeferredTriggerEndQuery(void)
{
- /* ----------
+
+ /*
* Ignore call if we aren't in a transaction.
- * ----------
*/
if (deftrig_cxt == NULL)
return;
@@ -1599,9 +1577,9 @@ DeferredTriggerEndQuery(void)
void
DeferredTriggerEndXact(void)
{
- /* ----------
+
+ /*
* Ignore call if we aren't in a transaction.
- * ----------
*/
if (deftrig_cxt == NULL)
return;
@@ -1624,9 +1602,9 @@ DeferredTriggerEndXact(void)
void
DeferredTriggerAbortXact(void)
{
- /* ----------
+
+ /*
* Ignore call if we aren't in a transaction.
- * ----------
*/
if (deftrig_cxt == NULL)
return;
@@ -1655,20 +1633,19 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
DeferredTriggerStatus state;
bool hasindex;
- /* ----------
+ /*
* Handle SET CONSTRAINTS ALL ...
- * ----------
*/
if (stmt->constraints == NIL)
{
if (!IsTransactionBlock())
{
- /* ----------
+
+ /*
* ... outside of a transaction block
*
* Drop all information about individual trigger states per
* session.
- * ----------
*/
l = deftrig_dfl_trigstates;
while (l != NIL)
@@ -1681,9 +1658,8 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
}
deftrig_dfl_trigstates = NIL;
- /* ----------
+ /*
* Set the session ALL state to known.
- * ----------
*/
deftrig_dfl_all_isset = true;
deftrig_dfl_all_isdeferred = stmt->deferred;
@@ -1692,12 +1668,12 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
}
else
{
- /* ----------
+
+ /*
* ... inside of a transaction block
*
* Drop all information about individual trigger states per
* transaction.
- * ----------
*/
l = deftrig_trigstates;
while (l != NIL)
@@ -1710,9 +1686,8 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
}
deftrig_trigstates = NIL;
- /* ----------
+ /*
* Set the per transaction ALL state to known.
- * ----------
*/
deftrig_all_isset = true;
deftrig_all_isdeferred = stmt->deferred;
@@ -1743,16 +1718,14 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
Form_pg_trigger pg_trigger;
Oid constr_oid;
- /* ----------
+ /*
* Check that only named constraints are set explicitly
- * ----------
*/
if (strcmp((char *) lfirst(l), "") == 0)
elog(ERROR, "unnamed constraints cannot be set explicitly");
- /* ----------
+ /*
* Setup to scan pg_trigger by tgconstrname ...
- * ----------
*/
ScanKeyEntryInitialize(&skey,
(bits16) 0x0,
@@ -1765,9 +1738,8 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
else
tgscan = heap_beginscan(tgrel, 0, SnapshotNow, 1, &skey);
- /* ----------
+ /*
* ... and search for the constraint trigger row
- * ----------
*/
found = false;
for (;;)
@@ -1792,11 +1764,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
break;
}
- /* ----------
- * If we found some, check that they fit the deferrability
- * but skip ON <event> RESTRICT ones, since they are silently
+ /*
+ * If we found some, check that they fit the deferrability but
+ * skip ON <event> RESTRICT ones, since they are silently
* never deferrable.
- * ----------
*/
pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
if (stmt->deferred && !pg_trigger->tgdeferrable &&
@@ -1813,9 +1784,8 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
ReleaseBuffer(buffer);
}
- /* ----------
+ /*
* Not found ?
- * ----------
*/
if (!found)
elog(ERROR, "Constraint '%s' does not exist", (char *) lfirst(l));
@@ -1831,10 +1801,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
if (!IsTransactionBlock())
{
- /* ----------
- * Outside of a transaction block set the trigger
- * states of individual triggers on session level.
- * ----------
+
+ /*
+ * Outside of a transaction block set the trigger states of
+ * individual triggers on session level.
*/
oldcxt = MemoryContextSwitchTo(deftrig_gcxt);
@@ -1869,10 +1839,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
}
else
{
- /* ----------
- * Inside of a transaction block set the trigger
- * states of individual triggers on transaction level.
- * ----------
+
+ /*
+ * Inside of a transaction block set the trigger states of
+ * individual triggers on transaction level.
*/
oldcxt = MemoryContextSwitchTo(deftrig_cxt);
@@ -1938,9 +1908,8 @@ DeferredTriggerSaveEvent(Relation rel, int event,
elog(ERROR,
"DeferredTriggerSaveEvent() called outside of transaction");
- /* ----------
+ /*
* Get the CTID's of OLD and NEW
- * ----------
*/
if (oldtup != NULL)
ItemPointerCopy(&(oldtup->t_self), &(oldctid));
@@ -1951,9 +1920,8 @@ DeferredTriggerSaveEvent(Relation rel, int event,
else
ItemPointerSetInvalid(&(newctid));
- /* ----------
+ /*
* Create a new event
- * ----------
*/
oldcxt = MemoryContextSwitchTo(deftrig_cxt);
@@ -1991,11 +1959,11 @@ DeferredTriggerSaveEvent(Relation rel, int event,
break;
case TRIGGER_EVENT_UPDATE:
- /* ----------
- * On UPDATE check if the tuple updated has been inserted
- * or a foreign referenced key value that's changing now
- * has been updated once before in this transaction.
- * ----------
+
+ /*
+ * On UPDATE check if the tuple updated has been inserted or a
+ * foreign referenced key value that's changing now has been
+ * updated once before in this transaction.
*/
if (oldtup->t_data->t_xmin != GetCurrentTransactionId())
prev_event = NULL;
@@ -2003,18 +1971,16 @@ DeferredTriggerSaveEvent(Relation rel, int event,
prev_event =
deferredTriggerGetPreviousEvent(rel->rd_id, &oldctid);
- /* ----------
+ /*
* Now check if one of the referenced keys is changed.
- * ----------
*/
for (i = 0; i < ntriggers; i++)
{
bool is_ri_trigger;
bool key_unchanged;
- /* ----------
+ /*
* We are interested in RI_FKEY triggers only.
- * ----------
*/
switch (triggers[i]->tgfoid)
{
@@ -2044,11 +2010,11 @@ DeferredTriggerSaveEvent(Relation rel, int event,
if (key_unchanged)
{
- /* ----------
+
+ /*
* The key hasn't changed, so no need later to invoke
* the trigger at all. But remember other states from
* the possible earlier event.
- * ----------
*/
new_event->dte_item[i].dti_state |= TRIGGER_DEFERRED_DONE;
@@ -2057,10 +2023,11 @@ DeferredTriggerSaveEvent(Relation rel, int event,
if (prev_event->dte_event &
TRIGGER_DEFERRED_ROW_INSERTED)
{
- /* ----------
- * This is a row inserted during our transaction.
- * So any key value is considered changed.
- * ----------
+
+ /*
+ * This is a row inserted during our
+ * transaction. So any key value is considered
+ * changed.
*/
new_event->dte_event |=
TRIGGER_DEFERRED_ROW_INSERTED;
@@ -2071,11 +2038,11 @@ DeferredTriggerSaveEvent(Relation rel, int event,
}
else
{
- /* ----------
- * This is a row, previously updated. So
- * if this key has been changed before, we
- * still remember that it happened.
- * ----------
+
+ /*
+ * This is a row, previously updated. So if
+ * this key has been changed before, we still
+ * remember that it happened.
*/
if (prev_event->dte_item[i].dti_state &
TRIGGER_DEFERRED_KEY_CHANGED)
@@ -2090,10 +2057,10 @@ DeferredTriggerSaveEvent(Relation rel, int event,
}
else
{
- /* ----------
+
+ /*
* Bomb out if this key has been changed before.
* Otherwise remember that we do so.
- * ----------
*/
if (prev_event)
{
@@ -2112,10 +2079,9 @@ DeferredTriggerSaveEvent(Relation rel, int event,
NameGetDatum(&(rel->rd_rel->relname)))));
}
- /* ----------
- * This is the first change to this key, so let
- * it happen.
- * ----------
+ /*
+ * This is the first change to this key, so let it
+ * happen.
*/
new_event->dte_item[i].dti_state |=
TRIGGER_DEFERRED_KEY_CHANGED;
@@ -2126,18 +2092,17 @@ DeferredTriggerSaveEvent(Relation rel, int event,
break;
case TRIGGER_EVENT_DELETE:
- /* ----------
- * On DELETE check if the tuple deleted has been inserted
- * or a possibly referenced key value has changed in this
+
+ /*
+ * On DELETE check if the tuple deleted has been inserted or a
+ * possibly referenced key value has changed in this
* transaction.
- * ----------
*/
if (oldtup->t_data->t_xmin != GetCurrentTransactionId())
break;
- /* ----------
+ /*
* Look at the previous event to the same tuple.
- * ----------
*/
prev_event = deferredTriggerGetPreviousEvent(rel->rd_id, &oldctid);
if (prev_event->dte_event & TRIGGER_DEFERRED_KEY_CHANGED)
@@ -2149,9 +2114,8 @@ DeferredTriggerSaveEvent(Relation rel, int event,
break;
}
- /* ----------
+ /*
* Anything's fine up to here. Add the new event to the queue.
- * ----------
*/
oldcxt = MemoryContextSwitchTo(deftrig_cxt);
deferredTriggerAddEvent(new_event);
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index ede41b64cc8..cb429081521 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.74 2001/03/22 03:59:24 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.75 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -538,14 +538,14 @@ DropUser(DropUserStmt *stmt)
usesysid = DatumGetInt32(heap_getattr(tuple, Anum_pg_shadow_usesysid, pg_shadow_dsc, &null));
- /*-------------------
+ /*
* Check if user still owns a database. If so, error out.
*
- * (It used to be that this function would drop the database automatically.
- * This is not only very dangerous for people that don't read the manual,
- * it doesn't seem to be the behaviour one would expect either.)
- * -- petere 2000/01/14)
- *-------------------*/
+ * (It used to be that this function would drop the database
+ * automatically. This is not only very dangerous for people that
+ * don't read the manual, it doesn't seem to be the behaviour one
+ * would expect either.) -- petere 2000/01/14)
+ */
pg_rel = heap_openr(DatabaseRelationName, AccessExclusiveLock);
pg_dsc = RelationGetDescr(pg_rel);
diff --git a/src/backend/executor/_deadcode/nodeTee.c b/src/backend/executor/_deadcode/nodeTee.c
index 955a52b1070..5ec43e377e4 100644
--- a/src/backend/executor/_deadcode/nodeTee.c
+++ b/src/backend/executor/_deadcode/nodeTee.c
@@ -15,7 +15,7 @@
* ExecInitTee
* ExecEndTee
*
- * $Id: nodeTee.c,v 1.9 2001/01/24 19:42:55 momjian Exp $
+ * $Id: nodeTee.c,v 1.10 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,9 +60,8 @@ ExecInitTee(Tee * node, EState *currentEstate, Plan *parent)
if (node->plan.state)
return TRUE;
- /* ----------------
- * assign the node's execution state
- * ----------------
+ /*
+ * assign the node's execution state
*/
/*
@@ -93,9 +92,8 @@ ExecInitTee(Tee * node, EState *currentEstate, Plan *parent)
node->plan.state = estate;
- /* ----------------
+ /*
* create teeState structure
- * ----------------
*/
teeState = makeNode(TeeState);
teeState->tee_leftPlace = 0;
@@ -120,9 +118,9 @@ ExecInitTee(Tee * node, EState *currentEstate, Plan *parent)
ExecAssignExprContext(estate, &(teeState->cstate));
#define TEE_NSLOTS 2
- /* ----------------
- * initialize tuple slots
- * ----------------
+
+ /*
+ * initialize tuple slots
*/
ExecInitResultTupleSlot(estate, &(teeState->cstate));
@@ -130,16 +128,16 @@ ExecInitTee(Tee * node, EState *currentEstate, Plan *parent)
outerPlan = outerPlan((Plan *) node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * the tuple type info is from the outer plan of this node
- * the result type is also the same as the outerplan
+ /*
+ * the tuple type info is from the outer plan of this node the result
+ * type is also the same as the outerplan
*/
ExecAssignResultTypeFromOuterPlan((Plan *) node, &(teeState->cstate));
ExecAssignProjectionInfo((Plan *) node, &teeState->cstate);
- /* ---------------------------------------
- initialize temporary relation to buffer tuples
- */
+ /*
+ * initialize temporary relation to buffer tuples
+ */
tupType = ExecGetResultType(&(teeState->cstate));
len = ExecTargetListLength(((Plan *) node)->targetlist);
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index a9c5bd40372..3c0e029fd3c 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: execAmi.c,v 1.57 2001/03/22 03:59:25 momjian Exp $
+ * $Id: execAmi.c,v 1.58 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -78,23 +78,20 @@ ExecOpenScanR(Oid relOid,
Relation relation;
Pointer scanDesc;
- /* ----------------
- * note: scanDesc returned by ExecBeginScan can be either
- * a HeapScanDesc or an IndexScanDesc so for now we
- * make it a Pointer. There should be a better scan
- * abstraction someday -cim 9/9/89
- * ----------------
+ /*
+ * note: scanDesc returned by ExecBeginScan can be either a
+ * HeapScanDesc or an IndexScanDesc so for now we make it a Pointer.
+ * There should be a better scan abstraction someday -cim 9/9/89
*/
- /* ----------------
- * Open the relation with the correct call depending
- * on whether this is a heap relation or an index relation.
+ /*
+ * Open the relation with the correct call depending on whether this
+ * is a heap relation or an index relation.
*
- * For a table, acquire AccessShareLock for the duration of the query
- * execution. For indexes, acquire no lock here; the index machinery
- * does its own locks and unlocks. (We rely on having some kind of
- * lock on the parent table to ensure the index won't go away!)
- * ----------------
+ * For a table, acquire AccessShareLock for the duration of the query
+ * execution. For indexes, acquire no lock here; the index machinery
+ * does its own locks and unlocks. (We rely on having some kind of
+ * lock on the parent table to ensure the index won't go away!)
*/
if (isindex)
relation = index_open(relOid);
@@ -136,13 +133,12 @@ ExecBeginScan(Relation relation,
{
Pointer scanDesc;
- /* ----------------
- * open the appropriate type of scan.
+ /*
+ * open the appropriate type of scan.
*
- * Note: ambeginscan()'s second arg is a boolean indicating
- * that the scan should be done in reverse.. That is,
- * if you pass it true, then the scan is backward.
- * ----------------
+ * Note: ambeginscan()'s second arg is a boolean indicating that the scan
+ * should be done in reverse.. That is, if you pass it true, then the
+ * scan is backward.
*/
if (isindex)
{
@@ -180,9 +176,8 @@ ExecCloseR(Plan *node)
Relation relation;
HeapScanDesc scanDesc;
- /* ----------------
- * get state for node and shut down the heap scan, if any
- * ----------------
+ /*
+ * get state for node and shut down the heap scan, if any
*/
switch (nodeTag(node))
{
@@ -209,10 +204,9 @@ ExecCloseR(Plan *node)
if (scanDesc != NULL)
heap_endscan(scanDesc);
- /* ----------------
- * if this is an index scan then we have to take care
- * of the index relations as well.
- * ----------------
+ /*
+ * if this is an index scan then we have to take care of the index
+ * relations as well.
*/
if (IsA(node, IndexScan))
{
@@ -229,10 +223,10 @@ ExecCloseR(Plan *node)
for (i = 0; i < numIndices; i++)
{
- /* ----------------
- * shut down each of the index scans and
- * close each of the index relations
- * ----------------
+
+ /*
+ * shut down each of the index scans and close each of the
+ * index relations
*/
if (indexScanDescs[i] != NULL)
index_endscan(indexScanDescs[i]);
diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c
index d288a8de735..f716c688101 100644
--- a/src/backend/executor/execJunk.c
+++ b/src/backend/executor/execJunk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.26 2001/03/22 03:59:26 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.27 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -90,12 +90,10 @@ ExecInitJunkFilter(List *targetList, TupleDesc tupType)
ALLOCSET_DEFAULT_MAXSIZE);
oldContext = MemoryContextSwitchTo(junkContext);
- /* ---------------------
- * First find the "clean" target list, i.e. all the entries
- * in the original target list which have a false 'resjunk'
- * NOTE: make copy of the Resdom nodes, because we have
- * to change the 'resno's...
- * ---------------------
+ /*
+ * First find the "clean" target list, i.e. all the entries in the
+ * original target list which have a false 'resjunk' NOTE: make copy
+ * of the Resdom nodes, because we have to change the 'resno's...
*/
cleanTargetList = NIL;
cleanResno = 1;
@@ -167,25 +165,23 @@ ExecInitJunkFilter(List *targetList, TupleDesc tupType)
}
}
- /* ---------------------
+ /*
* Now calculate the tuple type for the cleaned tuple (we were already
* given the type for the original targetlist).
- * ---------------------
*/
cleanTupType = ExecTypeFromTL(cleanTargetList);
len = ExecTargetListLength(targetList);
cleanLength = ExecTargetListLength(cleanTargetList);
- /* ---------------------
- * Now calculate the "map" between the original tuple's attributes
- * and the "clean" tuple's attributes.
+ /*
+ * Now calculate the "map" between the original tuple's attributes and
+ * the "clean" tuple's attributes.
*
- * The "map" is an array of "cleanLength" attribute numbers, i.e.
- * one entry for every attribute of the "clean" tuple.
- * The value of this entry is the attribute number of the corresponding
- * attribute of the "original" tuple.
- * ---------------------
+ * The "map" is an array of "cleanLength" attribute numbers, i.e. one
+ * entry for every attribute of the "clean" tuple. The value of this
+ * entry is the attribute number of the corresponding attribute of the
+ * "original" tuple.
*/
if (cleanLength > 0)
{
@@ -236,9 +232,8 @@ ExecInitJunkFilter(List *targetList, TupleDesc tupType)
else
cleanMap = NULL;
- /* ---------------------
+ /*
* Finally create and initialize the JunkFilter struct.
- * ---------------------
*/
junkfilter = makeNode(JunkFilter);
@@ -298,10 +293,9 @@ ExecGetJunkAttribute(JunkFilter *junkfilter,
TupleDesc tupType;
HeapTuple tuple;
- /* ---------------------
- * first look in the junkfilter's target list for
- * an attribute with the given name
- * ---------------------
+ /*
+ * first look in the junkfilter's target list for an attribute with
+ * the given name
*/
resno = InvalidAttrNumber;
targetList = junkfilter->jf_targetList;
@@ -327,9 +321,8 @@ ExecGetJunkAttribute(JunkFilter *junkfilter,
return false;
}
- /* ---------------------
+ /*
* Now extract the attribute value from the tuple.
- * ---------------------
*/
tuple = slot->val;
tupType = junkfilter->jf_tupType;
@@ -361,9 +354,8 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
Datum values_array[64];
char nulls_array[64];
- /* ----------------
- * get info from the slot and the junk filter
- * ----------------
+ /*
+ * get info from the slot and the junk filter
*/
tuple = slot->val;
@@ -372,21 +364,19 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
cleanLength = junkfilter->jf_cleanLength;
cleanMap = junkfilter->jf_cleanMap;
- /* ---------------------
- * Handle the trivial case first.
- * ---------------------
+ /*
+ * Handle the trivial case first.
*/
if (cleanLength == 0)
return (HeapTuple) NULL;
- /* ---------------------
- * Create the arrays that will hold the attribute values
- * and the null information for the new "clean" tuple.
+ /*
+ * Create the arrays that will hold the attribute values and the null
+ * information for the new "clean" tuple.
*
- * Note: we use memory on the stack to optimize things when
- * we are dealing with a small number of tuples.
- * for large tuples we just use palloc.
- * ---------------------
+ * Note: we use memory on the stack to optimize things when we are
+ * dealing with a small number of tuples. for large tuples we just use
+ * palloc.
*/
if (cleanLength > 64)
{
@@ -399,9 +389,8 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
nulls = nulls_array;
}
- /* ---------------------
+ /*
* Exctract one by one all the values of the "clean" tuple.
- * ---------------------
*/
for (i = 0; i < cleanLength; i++)
{
@@ -413,18 +402,16 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
nulls[i] = ' ';
}
- /* ---------------------
+ /*
* Now form the new tuple.
- * ---------------------
*/
cleanTuple = heap_formtuple(cleanTupType,
values,
nulls);
- /* ---------------------
- * We are done. Free any space allocated for 'values' and 'nulls'
- * and return the new tuple.
- * ---------------------
+ /*
+ * We are done. Free any space allocated for 'values' and 'nulls' and
+ * return the new tuple.
*/
if (cleanLength > 64)
{
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index bc1b6d0b2f7..79308ba4efb 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.25 2001/01/29 00:39:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.26 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -117,10 +117,8 @@ ExecInitNode(Plan *node, EState *estate, Plan *parent)
bool result;
List *subp;
- /* ----------------
- * do nothing when we get to the end
- * of a leaf on tree.
- * ----------------
+ /*
+ * do nothing when we get to the end of a leaf on tree.
*/
if (node == NULL)
return FALSE;
@@ -134,9 +132,9 @@ ExecInitNode(Plan *node, EState *estate, Plan *parent)
switch (nodeTag(node))
{
- /* ----------------
- * control nodes
- * ----------------
+
+ /*
+ * control nodes
*/
case T_Result:
result = ExecInitResult((Result *) node, estate, parent);
@@ -146,9 +144,8 @@ ExecInitNode(Plan *node, EState *estate, Plan *parent)
result = ExecInitAppend((Append *) node, estate, parent);
break;
- /* ----------------
- * scan nodes
- * ----------------
+ /*
+ * scan nodes
*/
case T_SeqScan:
result = ExecInitSeqScan((SeqScan *) node, estate, parent);
@@ -167,9 +164,8 @@ ExecInitNode(Plan *node, EState *estate, Plan *parent)
parent);
break;
- /* ----------------
- * join nodes
- * ----------------
+ /*
+ * join nodes
*/
case T_NestLoop:
result = ExecInitNestLoop((NestLoop *) node, estate, parent);
@@ -187,9 +183,8 @@ ExecInitNode(Plan *node, EState *estate, Plan *parent)
result = ExecInitHashJoin((HashJoin *) node, estate, parent);
break;
- /* ----------------
- * materialization nodes
- * ----------------
+ /*
+ * materialization nodes
*/
case T_Material:
result = ExecInitMaterial((Material *) node, estate, parent);
@@ -253,9 +248,8 @@ ExecProcNode(Plan *node, Plan *parent)
CHECK_FOR_INTERRUPTS();
- /* ----------------
- * deal with NULL nodes..
- * ----------------
+ /*
+ * deal with NULL nodes..
*/
if (node == NULL)
return NULL;
@@ -265,9 +259,9 @@ ExecProcNode(Plan *node, Plan *parent)
switch (nodeTag(node))
{
- /* ----------------
- * control nodes
- * ----------------
+
+ /*
+ * control nodes
*/
case T_Result:
result = ExecResult((Result *) node);
@@ -277,9 +271,8 @@ ExecProcNode(Plan *node, Plan *parent)
result = ExecProcAppend((Append *) node);
break;
- /* ----------------
- * scan nodes
- * ----------------
+ /*
+ * scan nodes
*/
case T_SeqScan:
result = ExecSeqScan((SeqScan *) node);
@@ -297,9 +290,8 @@ ExecProcNode(Plan *node, Plan *parent)
result = ExecSubqueryScan((SubqueryScan *) node);
break;
- /* ----------------
- * join nodes
- * ----------------
+ /*
+ * join nodes
*/
case T_NestLoop:
result = ExecNestLoop((NestLoop *) node);
@@ -317,9 +309,8 @@ ExecProcNode(Plan *node, Plan *parent)
result = ExecHashJoin((HashJoin *) node);
break;
- /* ----------------
- * materialization nodes
- * ----------------
+ /*
+ * materialization nodes
*/
case T_Material:
result = ExecMaterial((Material *) node);
@@ -366,9 +357,9 @@ ExecCountSlotsNode(Plan *node)
switch (nodeTag(node))
{
- /* ----------------
- * control nodes
- * ----------------
+
+ /*
+ * control nodes
*/
case T_Result:
return ExecCountSlotsResult((Result *) node);
@@ -376,9 +367,8 @@ ExecCountSlotsNode(Plan *node)
case T_Append:
return ExecCountSlotsAppend((Append *) node);
- /* ----------------
- * scan nodes
- * ----------------
+ /*
+ * scan nodes
*/
case T_SeqScan:
return ExecCountSlotsSeqScan((SeqScan *) node);
@@ -392,9 +382,8 @@ ExecCountSlotsNode(Plan *node)
case T_SubqueryScan:
return ExecCountSlotsSubqueryScan((SubqueryScan *) node);
- /* ----------------
- * join nodes
- * ----------------
+ /*
+ * join nodes
*/
case T_NestLoop:
return ExecCountSlotsNestLoop((NestLoop *) node);
@@ -408,9 +397,8 @@ ExecCountSlotsNode(Plan *node)
case T_HashJoin:
return ExecCountSlotsHashJoin((HashJoin *) node);
- /* ----------------
- * materialization nodes
- * ----------------
+ /*
+ * materialization nodes
*/
case T_Material:
return ExecCountSlotsMaterial((Material *) node);
@@ -457,10 +445,8 @@ ExecEndNode(Plan *node, Plan *parent)
{
List *subp;
- /* ----------------
- * do nothing when we get to the end
- * of a leaf on tree.
- * ----------------
+ /*
+ * do nothing when we get to the end of a leaf on tree.
*/
if (node == NULL)
return;
@@ -477,9 +463,9 @@ ExecEndNode(Plan *node, Plan *parent)
switch (nodeTag(node))
{
- /* ----------------
- * control nodes
- * ----------------
+
+ /*
+ * control nodes
*/
case T_Result:
ExecEndResult((Result *) node);
@@ -489,9 +475,8 @@ ExecEndNode(Plan *node, Plan *parent)
ExecEndAppend((Append *) node);
break;
- /* ----------------
- * scan nodes
- * ----------------
+ /*
+ * scan nodes
*/
case T_SeqScan:
ExecEndSeqScan((SeqScan *) node);
@@ -509,9 +494,8 @@ ExecEndNode(Plan *node, Plan *parent)
ExecEndSubqueryScan((SubqueryScan *) node);
break;
- /* ----------------
- * join nodes
- * ----------------
+ /*
+ * join nodes
*/
case T_NestLoop:
ExecEndNestLoop((NestLoop *) node);
@@ -529,9 +513,8 @@ ExecEndNode(Plan *node, Plan *parent)
ExecEndHashJoin((HashJoin *) node);
break;
- /* ----------------
- * materialization nodes
- * ----------------
+ /*
+ * materialization nodes
*/
case T_Material:
ExecEndMaterial((Material *) node);
@@ -726,9 +709,9 @@ ExecGetTupType(Plan *node)
break;
default:
- /* ----------------
- * should never get here
- * ----------------
+
+ /*
+ * should never get here
*/
elog(ERROR, "ExecGetTupType: node type %d unsupported",
(int) nodeTag(node));
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index 58a3b5edea4..51a05a6457e 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.16 2001/03/22 03:59:26 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.17 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,20 +55,18 @@ ExecScan(Scan *node,
ExprDoneCond isDone;
TupleTableSlot *resultSlot;
- /* ----------------
- * Fetch data from node
- * ----------------
+ /*
+ * Fetch data from node
*/
estate = node->plan.state;
scanstate = node->scanstate;
econtext = scanstate->cstate.cs_ExprContext;
qual = node->plan.qual;
- /* ----------------
- * Check to see if we're still projecting out tuples from a previous
- * scan tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
- * ----------------
+ /*
+ * Check to see if we're still projecting out tuples from a previous
+ * scan tuple (because there is a function-returning-set in the
+ * projection expressions). If so, try to project another one.
*/
if (scanstate->cstate.cs_TupFromTlist)
{
@@ -79,11 +77,10 @@ ExecScan(Scan *node,
scanstate->cstate.cs_TupFromTlist = false;
}
- /* ----------------
- * Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a scan tuple.
- * ----------------
+ /*
+ * Reset per-tuple memory context to free any expression evaluation
+ * storage allocated in the previous tuple cycle. Note this can't
+ * happen until we're done projecting out tuples from a scan tuple.
*/
ResetExprContext(econtext);
@@ -97,12 +94,11 @@ ExecScan(Scan *node,
slot = (*accessMtd) (node);
- /* ----------------
- * if the slot returned by the accessMtd contains
- * NULL, then it means there is nothing more to scan
- * so we just return an empty slot, being careful to use
- * the projection result slot so it has correct tupleDesc.
- * ----------------
+ /*
+ * if the slot returned by the accessMtd contains NULL, then it
+ * means there is nothing more to scan so we just return an empty
+ * slot, being careful to use the projection result slot so it has
+ * correct tupleDesc.
*/
if (TupIsNull(slot))
{
@@ -112,29 +108,27 @@ ExecScan(Scan *node,
true);
}
- /* ----------------
- * place the current tuple into the expr context
- * ----------------
+ /*
+ * place the current tuple into the expr context
*/
econtext->ecxt_scantuple = slot;
- /* ----------------
- * check that the current tuple satisfies the qual-clause
+ /*
+ * check that the current tuple satisfies the qual-clause
*
- * check for non-nil qual here to avoid a function call to
- * ExecQual() when the qual is nil ... saves only a few cycles,
- * but they add up ...
- * ----------------
+ * check for non-nil qual here to avoid a function call to ExecQual()
+ * when the qual is nil ... saves only a few cycles, but they add
+ * up ...
*/
if (!qual || ExecQual(qual, econtext, false))
{
- /* ----------------
- * Found a satisfactory scan tuple.
+
+ /*
+ * Found a satisfactory scan tuple.
*
- * Form a projection tuple, store it in the result tuple
- * slot and return it --- unless we find we can project no
- * tuples from this scan tuple, in which case continue scan.
- * ----------------
+ * Form a projection tuple, store it in the result tuple slot and
+ * return it --- unless we find we can project no tuples from
+ * this scan tuple, in which case continue scan.
*/
resultSlot = ExecProject(scanstate->cstate.cs_ProjInfo, &isDone);
if (isDone != ExprEndResult)
@@ -144,9 +138,8 @@ ExecScan(Scan *node,
}
}
- /* ----------------
- * Tuple fails qual, so free per-tuple memory and try again.
- * ----------------
+ /*
+ * Tuple fails qual, so free per-tuple memory and try again.
*/
ResetExprContext(econtext);
}
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 3e75aef337c..b6f7b1ee6ff 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.47 2001/03/22 03:59:26 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.48 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -134,29 +134,26 @@ ExecCreateTupleTable(int initialSize) /* initial number of slots in
TupleTable newtable; /* newly allocated table */
TupleTableSlot *array; /* newly allocated slot array */
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(initialSize >= 1);
- /* ----------------
- * Now allocate our new table along with space for the pointers
- * to the tuples.
+ /*
+ * Now allocate our new table along with space for the pointers to the
+ * tuples.
*/
newtable = (TupleTable) palloc(sizeof(TupleTableData));
array = (TupleTableSlot *) palloc(initialSize * sizeof(TupleTableSlot));
- /* ----------------
- * clean out the slots we just allocated
- * ----------------
+ /*
+ * clean out the slots we just allocated
*/
MemSet(array, 0, initialSize * sizeof(TupleTableSlot));
- /* ----------------
- * initialize the new table and return it to the caller.
- * ----------------
+ /*
+ * initialize the new table and return it to the caller.
*/
newtable->size = initialSize;
newtable->next = 0;
@@ -182,25 +179,22 @@ ExecDropTupleTable(TupleTable table, /* tuple table */
TupleTableSlot *array; /* start of table array */
int i; /* counter */
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(table != NULL);
- /* ----------------
- * get information from the table
- * ----------------
+ /*
+ * get information from the table
*/
array = table->array;
next = table->next;
- /* ----------------
- * first free all the valid pointers in the tuple array
- * and drop refcounts of any referenced buffers,
- * if that's what the caller wants. (There is probably
- * no good reason for the caller ever not to want it!)
- * ----------------
+ /*
+ * first free all the valid pointers in the tuple array and drop
+ * refcounts of any referenced buffers, if that's what the caller
+ * wants. (There is probably no good reason for the caller ever not
+ * to want it!)
*/
if (shouldFree)
{
@@ -213,9 +207,8 @@ ExecDropTupleTable(TupleTable table, /* tuple table */
}
}
- /* ----------------
- * finally free the tuple array and the table itself.
- * ----------------
+ /*
+ * finally free the tuple array and the table itself.
*/
pfree(array);
pfree(table);
@@ -242,36 +235,32 @@ ExecAllocTableSlot(TupleTable table)
int slotnum; /* new slot number */
TupleTableSlot *slot;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(table != NULL);
- /* ----------------
- * if our table is full we have to allocate a larger
- * size table. Since ExecAllocTableSlot() is only called
- * before the table is ever used to store tuples, we don't
- * have to worry about the contents of the old table.
- * If this changes, then we will have to preserve the contents.
- * -cim 6/23/90
+ /*
+ * if our table is full we have to allocate a larger size table.
+ * Since ExecAllocTableSlot() is only called before the table is ever
+ * used to store tuples, we don't have to worry about the contents of
+ * the old table. If this changes, then we will have to preserve the
+ * contents. -cim 6/23/90
*
- * Unfortunately, we *cannot* do this. All of the nodes in
- * the plan that have already initialized their slots will have
- * pointers into _freed_ memory. This leads to bad ends. We
- * now count the number of slots we will need and create all the
- * slots we will need ahead of time. The if below should never
- * happen now. Fail if it does. -mer 4 Aug 1992
- * ----------------
+ * Unfortunately, we *cannot* do this. All of the nodes in the plan that
+ * have already initialized their slots will have pointers into
+ * _freed_ memory. This leads to bad ends. We now count the number
+ * of slots we will need and create all the slots we will need ahead
+ * of time. The if below should never happen now. Fail if it does.
+ * -mer 4 Aug 1992
*/
if (table->next >= table->size)
elog(ERROR, "Plan requires more slots than are available"
"\n\tsend mail to your local executor guru to fix this");
- /* ----------------
- * at this point, space in the table is guaranteed so we
- * reserve the next slot, initialize and return it.
- * ----------------
+ /*
+ * at this point, space in the table is guaranteed so we reserve the
+ * next slot, initialize and return it.
*/
slotnum = table->next;
table->next++;
@@ -358,9 +347,9 @@ ExecStoreTuple(HeapTuple tuple,
Buffer buffer,
bool shouldFree)
{
- /* ----------------
- * sanity checks
- * ----------------
+
+ /*
+ * sanity checks
*/
Assert(slot != NULL);
/* passing shouldFree=true for a tuple on a disk page is not sane */
@@ -369,10 +358,9 @@ ExecStoreTuple(HeapTuple tuple,
/* clear out any old contents of the slot */
ExecClearTuple(slot);
- /* ----------------
- * store the new tuple into the specified slot and
- * return the slot into which we stored the tuple.
- * ----------------
+ /*
+ * store the new tuple into the specified slot and return the slot
+ * into which we stored the tuple.
*/
slot->val = tuple;
slot->ttc_buffer = buffer;
@@ -401,21 +389,18 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
{
HeapTuple oldtuple; /* prior contents of slot */
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(slot != NULL);
- /* ----------------
- * get information from the tuple table
- * ----------------
+ /*
+ * get information from the tuple table
*/
oldtuple = slot->val;
- /* ----------------
- * free the old contents of the specified slot if necessary.
- * ----------------
+ /*
+ * free the old contents of the specified slot if necessary.
*/
if (slot->ttc_shouldFree && oldtuple != NULL)
heap_freetuple(oldtuple);
@@ -424,9 +409,8 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
slot->ttc_shouldFree = true;/* probably useless code... */
- /* ----------------
- * Drop the pin on the referenced buffer, if there is one.
- * ----------------
+ /*
+ * Drop the pin on the referenced buffer, if there is one.
*/
if (BufferIsValid(slot->ttc_buffer))
ReleaseBuffer(slot->ttc_buffer);
@@ -582,24 +566,21 @@ ExecTypeFromTL(List *targetList)
Oid restype;
int len;
- /* ----------------
- * examine targetlist - if empty then return NULL
- * ----------------
+ /*
+ * examine targetlist - if empty then return NULL
*/
len = ExecTargetListLength(targetList);
if (len == 0)
return NULL;
- /* ----------------
- * allocate a new typeInfo
- * ----------------
+ /*
+ * allocate a new typeInfo
*/
typeInfo = CreateTemplateTupleDesc(len);
- /* ----------------
+ /*
* scan list, generate type info for each entry
- * ----------------
*/
foreach(tlitem, targetList)
{
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 6ee0d2e26ed..72aceb35f0f 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.74 2001/03/22 03:59:26 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.75 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -336,18 +336,16 @@ ExecFreeProjectionInfo(CommonState *commonstate)
{
ProjectionInfo *projInfo;
- /* ----------------
- * get projection info. if NULL then this node has
- * none so we just return.
- * ----------------
+ /*
+ * get projection info. if NULL then this node has none so we just
+ * return.
*/
projInfo = commonstate->cs_ProjInfo;
if (projInfo == NULL)
return;
- /* ----------------
- * clean up memory used.
- * ----------------
+ /*
+ * clean up memory used.
*/
if (projInfo->pi_tupValue != NULL)
pfree(projInfo->pi_tupValue);
@@ -365,18 +363,16 @@ ExecFreeExprContext(CommonState *commonstate)
{
ExprContext *econtext;
- /* ----------------
- * get expression context. if NULL then this node has
- * none so we just return.
- * ----------------
+ /*
+ * get expression context. if NULL then this node has none so we just
+ * return.
*/
econtext = commonstate->cs_ExprContext;
if (econtext == NULL)
return;
- /* ----------------
- * clean up memory used.
- * ----------------
+ /*
+ * clean up memory used.
*/
MemoryContextDelete(econtext->ecxt_per_tuple_memory);
pfree(econtext);
@@ -476,18 +472,16 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
IsSystemRelationName(RelationGetRelationName(resultRelation)))
return;
- /* ----------------
- * Get cached list of index OIDs
- * ----------------
+ /*
+ * Get cached list of index OIDs
*/
indexoidlist = RelationGetIndexList(resultRelation);
len = length(indexoidlist);
if (len == 0)
return;
- /* ----------------
- * allocate space for result arrays
- * ----------------
+ /*
+ * allocate space for result arrays
*/
relationDescs = (RelationPtr) palloc(len * sizeof(Relation));
indexInfoArray = (IndexInfo **) palloc(len * sizeof(IndexInfo *));
@@ -496,9 +490,8 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
resultRelInfo->ri_IndexRelationDescs = relationDescs;
resultRelInfo->ri_IndexRelationInfo = indexInfoArray;
- /* ----------------
- * For each index, open the index relation and save pg_index info.
- * ----------------
+ /*
+ * For each index, open the index relation and save pg_index info.
*/
i = 0;
foreach(indexoidscan, indexoidlist)
@@ -508,24 +501,23 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
HeapTuple indexTuple;
IndexInfo *ii;
- /* ----------------
+ /*
* Open (and lock, if necessary) the index relation
*
* Hack for not btree and hash indices: they use relation level
- * exclusive locking on update (i.e. - they are not ready for MVCC)
- * and so we have to exclusively lock indices here to prevent
- * deadlocks if we will scan them - index_beginscan places
+ * exclusive locking on update (i.e. - they are not ready for
+ * MVCC) and so we have to exclusively lock indices here to
+ * prevent deadlocks if we will scan them - index_beginscan places
* AccessShareLock, indices update methods don't use locks at all.
* We release this lock in ExecCloseIndices. Note, that hashes use
* page level locking - i.e. are not deadlock-free - let's them be
* on their way -:)) vadim 03-12-1998
*
* If there are multiple not-btree-or-hash indices, all backends must
- * lock the indices in the same order or we will get deadlocks here
- * during concurrent updates. This is now guaranteed by
+ * lock the indices in the same order or we will get deadlocks
+ * here during concurrent updates. This is now guaranteed by
* RelationGetIndexList(), which promises to return the index list
* in OID order. tgl 06-19-2000
- * ----------------
*/
indexDesc = index_open(indexOid);
@@ -533,9 +525,8 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
indexDesc->rd_rel->relam != HASH_AM_OID)
LockRelation(indexDesc, AccessExclusiveLock);
- /* ----------------
- * Get the pg_index tuple for the index
- * ----------------
+ /*
+ * Get the pg_index tuple for the index
*/
indexTuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(indexOid),
@@ -543,9 +534,8 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
if (!HeapTupleIsValid(indexTuple))
elog(ERROR, "ExecOpenIndices: index %u not found", indexOid);
- /* ----------------
- * extract the index key information from the tuple
- * ----------------
+ /*
+ * extract the index key information from the tuple
*/
ii = BuildIndexInfo(indexTuple);
@@ -647,9 +637,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
/* Arrange for econtext's scan tuple to be the tuple under test */
econtext->ecxt_scantuple = slot;
- /* ----------------
- * for each index, form and insert the index tuple
- * ----------------
+ /*
+ * for each index, form and insert the index tuple
*/
for (i = 0; i < numIndices; i++)
{
@@ -669,10 +658,9 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
continue;
}
- /* ----------------
- * FormIndexDatum fills in its datum and null parameters
- * with attribute information taken from the given heap tuple.
- * ----------------
+ /*
+ * FormIndexDatum fills in its datum and null parameters with
+ * attribute information taken from the given heap tuple.
*/
FormIndexDatum(indexInfo,
heapTuple,
@@ -687,9 +675,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
&(heapTuple->t_self), /* tid of heap tuple */
heapRelation);
- /* ----------------
- * keep track of index inserts for debugging
- * ----------------
+ /*
+ * keep track of index inserts for debugging
*/
IncrIndexInserted();
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 4cc1dc27926..73dd6937d7d 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.44 2001/03/22 03:59:26 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.45 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -162,9 +162,8 @@ init_sql_fcache(FmgrInfo *finfo)
Datum tmp;
bool isNull;
- /* ----------------
- * get the procedure tuple corresponding to the given function Oid
- * ----------------
+ /*
+ * get the procedure tuple corresponding to the given function Oid
*/
procedureTuple = SearchSysCache(PROCOID,
ObjectIdGetDatum(foid),
@@ -175,9 +174,8 @@ init_sql_fcache(FmgrInfo *finfo)
procedureStruct = (Form_pg_proc) GETSTRUCT(procedureTuple);
- /* ----------------
- * get the return type from the procedure tuple
- * ----------------
+ /*
+ * get the return type from the procedure tuple
*/
typeTuple = SearchSysCache(TYPEOID,
ObjectIdGetDatum(procedureStruct->prorettype),
@@ -191,9 +189,8 @@ init_sql_fcache(FmgrInfo *finfo)
fcache = (SQLFunctionCachePtr) palloc(sizeof(SQLFunctionCache));
MemSet(fcache, 0, sizeof(SQLFunctionCache));
- /* ----------------
- * get the type length and by-value flag from the type tuple
- * ----------------
+ /*
+ * get the type length and by-value flag from the type tuple
*/
fcache->typlen = typeStruct->typlen;
if (typeStruct->typrelid == InvalidOid)
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index e0f50bd66d1..73f2b56cf3d 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -46,7 +46,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.76 2001/03/22 03:59:27 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.77 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -202,13 +202,12 @@ initialize_aggregate(AggStatePerAgg peraggstate)
peraggstate->transValue = peraggstate->initValue;
peraggstate->transValueIsNull = peraggstate->initValueIsNull;
- /* ------------------------------------------
+ /*
* If the initial value for the transition state doesn't exist in the
- * pg_aggregate table then we will let the first non-NULL value returned
- * from the outer procNode become the initial value. (This is useful for
- * aggregates like max() and min().) The noTransValue flag signals that
- * we still need to do this.
- * ------------------------------------------
+ * pg_aggregate table then we will let the first non-NULL value
+ * returned from the outer procNode become the initial value. (This is
+ * useful for aggregates like max() and min().) The noTransValue flag
+ * signals that we still need to do this.
*/
peraggstate->noTransValue = peraggstate->initValueIsNull;
}
@@ -477,9 +476,8 @@ ExecAgg(Agg *node)
int aggno;
bool isNull;
- /* ---------------------
- * get state info from node
- * ---------------------
+ /*
+ * get state info from node
*/
aggstate = node->aggstate;
estate = node->plan.state;
@@ -516,9 +514,8 @@ ExecAgg(Agg *node)
inputTuple = NULL; /* no saved input tuple yet */
- /* ----------------
- * for each tuple from the outer plan, update all the aggregates
- * ----------------
+ /*
+ * for each tuple from the outer plan, update all the aggregates
*/
for (;;)
{
@@ -829,9 +826,8 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
outerPlan = outerPlan(node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * initialize source tuple type.
- * ----------------
+ /*
+ * initialize source tuple type.
*/
ExecAssignScanTypeFromOuterPlan((Plan *) node, &aggstate->csstate);
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index 390f9fc1d91..f8b4b89bc93 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.39 2001/01/24 19:42:54 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.40 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -81,9 +81,8 @@ exec_append_initialize_next(Append *node)
int whichplan;
int nplans;
- /* ----------------
- * get information from the append node
- * ----------------
+ /*
+ * get information from the append node
*/
estate = node->plan.state;
appendstate = node->appendstate;
@@ -92,35 +91,34 @@ exec_append_initialize_next(Append *node)
if (whichplan < 0)
{
- /* ----------------
- * if scanning in reverse, we start at
- * the last scan in the list and then
- * proceed back to the first.. in any case
- * we inform ExecProcAppend that we are
- * at the end of the line by returning FALSE
- * ----------------
+
+ /*
+ * if scanning in reverse, we start at the last scan in the list
+ * and then proceed back to the first.. in any case we inform
+ * ExecProcAppend that we are at the end of the line by returning
+ * FALSE
*/
appendstate->as_whichplan = 0;
return FALSE;
}
else if (whichplan >= nplans)
{
- /* ----------------
- * as above, end the scan if we go beyond
- * the last scan in our list..
- * ----------------
+
+ /*
+ * as above, end the scan if we go beyond the last scan in our
+ * list..
*/
appendstate->as_whichplan = nplans - 1;
return FALSE;
}
else
{
- /* ----------------
- * initialize the scan
+
+ /*
+ * initialize the scan
*
* If we are controlling the target relation, select the proper
* active ResultRelInfo and junk filter for this target.
- * ----------------
*/
if (node->isTarget)
{
@@ -162,10 +160,8 @@ ExecInitAppend(Append *node, EState *estate, Plan *parent)
CXT1_printf("ExecInitAppend: context is %d\n", CurrentMemoryContext);
- /* ----------------
- * assign execution state to node and get information
- * for append state
- * ----------------
+ /*
+ * assign execution state to node and get information for append state
*/
node->plan.state = estate;
@@ -175,9 +171,8 @@ ExecInitAppend(Append *node, EState *estate, Plan *parent)
initialized = (bool *) palloc(nplans * sizeof(bool));
MemSet(initialized, 0, nplans * sizeof(bool));
- /* ----------------
- * create new AppendState for our append node
- * ----------------
+ /*
+ * create new AppendState for our append node
*/
appendstate = makeNode(AppendState);
appendstate->as_whichplan = 0;
@@ -186,26 +181,24 @@ ExecInitAppend(Append *node, EState *estate, Plan *parent)
node->appendstate = appendstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * Append plans don't have expression contexts because they
- * never call ExecQual or ExecProject.
- * ----------------
+ * Append plans don't have expression contexts because they never call
+ * ExecQual or ExecProject.
*/
#define APPEND_NSLOTS 1
- /* ----------------
- * append nodes still have Result slots, which hold pointers
- * to tuples, so we have to initialize them.
- * ----------------
+
+ /*
+ * append nodes still have Result slots, which hold pointers to
+ * tuples, so we have to initialize them.
*/
ExecInitResultTupleSlot(estate, &appendstate->cstate);
- /* ----------------
- * call ExecInitNode on each of the plans in our list
- * and save the results into the array "initialized"
- * ----------------
+ /*
+ * call ExecInitNode on each of the plans in our list and save the
+ * results into the array "initialized"
*/
for (i = 0; i < nplans; i++)
{
@@ -216,16 +209,14 @@ ExecInitAppend(Append *node, EState *estate, Plan *parent)
initialized[i] = ExecInitNode(initNode, estate, (Plan *) node);
}
- /* ----------------
- * initialize tuple type
- * ----------------
+ /*
+ * initialize tuple type
*/
ExecAssignResultTypeFromTL((Plan *) node, &appendstate->cstate);
appendstate->cstate.cs_ProjInfo = NULL;
- /* ----------------
- * return the result from the first subplan's initialization
- * ----------------
+ /*
+ * return the result from the first subplan's initialization
*/
appendstate->as_whichplan = 0;
exec_append_initialize_next(node);
@@ -264,9 +255,8 @@ ExecProcAppend(Append *node)
TupleTableSlot *result_slot;
ScanDirection direction;
- /* ----------------
- * get information from the node
- * ----------------
+ /*
+ * get information from the node
*/
appendstate = node->appendstate;
estate = node->plan.state;
@@ -275,49 +265,46 @@ ExecProcAppend(Append *node)
whichplan = appendstate->as_whichplan;
result_slot = appendstate->cstate.cs_ResultTupleSlot;
- /* ----------------
- * figure out which subplan we are currently processing
- * ----------------
+ /*
+ * figure out which subplan we are currently processing
*/
subnode = (Plan *) nth(whichplan, appendplans);
if (subnode == NULL)
elog(DEBUG, "ExecProcAppend: subnode is NULL");
- /* ----------------
- * get a tuple from the subplan
- * ----------------
+ /*
+ * get a tuple from the subplan
*/
result = ExecProcNode(subnode, (Plan *) node);
if (!TupIsNull(result))
{
- /* ----------------
- * if the subplan gave us something then place a copy of
- * whatever we get into our result slot and return it.
+
+ /*
+ * if the subplan gave us something then place a copy of whatever
+ * we get into our result slot and return it.
*
- * Note we rely on the subplan to retain ownership of the
- * tuple for as long as we need it --- we don't copy it.
- * ----------------
+ * Note we rely on the subplan to retain ownership of the tuple for
+ * as long as we need it --- we don't copy it.
*/
return ExecStoreTuple(result->val, result_slot, InvalidBuffer, false);
}
else
{
- /* ----------------
- * .. go on to the "next" subplan in the appropriate
- * direction and try processing again (recursively)
- * ----------------
+
+ /*
+ * .. go on to the "next" subplan in the appropriate direction and
+ * try processing again (recursively)
*/
if (ScanDirectionIsForward(direction))
appendstate->as_whichplan++;
else
appendstate->as_whichplan--;
- /* ----------------
- * return something from next node or an empty slot
- * if all of our subplans have been exhausted.
- * ----------------
+ /*
+ * return something from next node or an empty slot if all of our
+ * subplans have been exhausted.
*/
if (exec_append_initialize_next(node))
{
@@ -347,9 +334,8 @@ ExecEndAppend(Append *node)
bool *initialized;
int i;
- /* ----------------
- * get information from the node
- * ----------------
+ /*
+ * get information from the node
*/
appendstate = node->appendstate;
estate = node->plan.state;
@@ -357,9 +343,8 @@ ExecEndAppend(Append *node)
nplans = appendstate->as_nplans;
initialized = appendstate->as_initialized;
- /* ----------------
- * shut down each of the subscans
- * ----------------
+ /*
+ * shut down each of the subscans
*/
for (i = 0; i < nplans; i++)
{
diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c
index 500e9c07c43..6dda57f6e96 100644
--- a/src/backend/executor/nodeGroup.c
+++ b/src/backend/executor/nodeGroup.c
@@ -15,7 +15,7 @@
* locate group boundaries.
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.42 2001/03/22 03:59:27 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.43 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,9 +76,8 @@ ExecGroupEveryTuple(Group *node)
ProjectionInfo *projInfo;
TupleTableSlot *resultSlot;
- /* ---------------------
- * get state info from node
- * ---------------------
+ /*
+ * get state info from node
*/
grpstate = node->grpstate;
if (grpstate->grp_done)
@@ -156,10 +155,9 @@ ExecGroupEveryTuple(Group *node)
InvalidBuffer, false);
}
- /* ----------------
- * form a projection tuple, store it in the result tuple
- * slot and return it.
- * ----------------
+ /*
+ * form a projection tuple, store it in the result tuple slot and
+ * return it.
*/
projInfo = grpstate->csstate.cstate.cs_ProjInfo;
@@ -187,9 +185,8 @@ ExecGroupOneTuple(Group *node)
ProjectionInfo *projInfo;
TupleTableSlot *resultSlot;
- /* ---------------------
- * get state info from node
- * ---------------------
+ /*
+ * get state info from node
*/
grpstate = node->grpstate;
if (grpstate->grp_done)
@@ -243,10 +240,9 @@ ExecGroupOneTuple(Group *node)
break;
}
- /* ----------------
- * form a projection tuple, store it in the result tuple
- * slot and return it.
- * ----------------
+ /*
+ * form a projection tuple, store it in the result tuple slot and
+ * return it.
*/
projInfo = grpstate->csstate.cstate.cs_ProjInfo;
@@ -316,9 +312,8 @@ ExecInitGroup(Group *node, EState *estate, Plan *parent)
outerPlan = outerPlan(node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * initialize tuple type.
- * ----------------
+ /*
+ * initialize tuple type.
*/
ExecAssignScanTypeFromOuterPlan((Plan *) node, &grpstate->csstate);
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 7b5e3d4cced..45b3f8bc105 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
*
- * $Id: nodeHash.c,v 1.55 2001/03/22 03:59:27 momjian Exp $
+ * $Id: nodeHash.c,v 1.56 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,9 +55,8 @@ ExecHash(Hash *node)
int nbatch;
int i;
- /* ----------------
- * get state info from node
- * ----------------
+ /*
+ * get state info from node
*/
hashstate = node->hashstate;
@@ -72,25 +71,23 @@ ExecHash(Hash *node)
if (nbatch > 0)
{
- /* ----------------
- * Open temp files for inner batches, if needed.
- * Note that file buffers are palloc'd in regular executor context.
- * ----------------
+
+ /*
+ * Open temp files for inner batches, if needed. Note that file
+ * buffers are palloc'd in regular executor context.
*/
for (i = 0; i < nbatch; i++)
hashtable->innerBatchFile[i] = BufFileCreateTemp();
}
- /* ----------------
- * set expression context
- * ----------------
+ /*
+ * set expression context
*/
hashkey = node->hashkey;
econtext = hashstate->cstate.cs_ExprContext;
- /* ----------------
- * get all inner tuples and insert into the hash table (or temp files)
- * ----------------
+ /*
+ * get all inner tuples and insert into the hash table (or temp files)
*/
for (;;)
{
@@ -102,10 +99,9 @@ ExecHash(Hash *node)
ExecClearTuple(slot);
}
- /* ---------------------
- * Return the slot so that we have the tuple descriptor
- * when we need to save/restore them. -Jeff 11 July 1991
- * ---------------------
+ /*
+ * Return the slot so that we have the tuple descriptor when we need
+ * to save/restore them. -Jeff 11 July 1991
*/
return slot;
}
@@ -125,45 +121,39 @@ ExecInitHash(Hash *node, EState *estate, Plan *parent)
SO1_printf("ExecInitHash: %s\n",
"initializing hash node");
- /* ----------------
- * assign the node's execution state
- * ----------------
+ /*
+ * assign the node's execution state
*/
node->plan.state = estate;
- /* ----------------
+ /*
* create state structure
- * ----------------
*/
hashstate = makeNode(HashState);
node->hashstate = hashstate;
hashstate->hashtable = NULL;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &hashstate->cstate);
- /* ----------------
+ /*
* initialize our result slot
- * ----------------
*/
ExecInitResultTupleSlot(estate, &hashstate->cstate);
- /* ----------------
+ /*
* initializes child nodes
- * ----------------
*/
outerPlan = outerPlan(node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * initialize tuple type. no need to initialize projection
- * info because this node doesn't do projections
- * ----------------
+ /*
+ * initialize tuple type. no need to initialize projection info
+ * because this node doesn't do projections
*/
ExecAssignResultTypeFromOuterPlan((Plan *) node, &hashstate->cstate);
hashstate->cstate.cs_ProjInfo = NULL;
@@ -192,23 +182,20 @@ ExecEndHash(Hash *node)
HashState *hashstate;
Plan *outerPlan;
- /* ----------------
- * get info from the hash state
- * ----------------
+ /*
+ * get info from the hash state
*/
hashstate = node->hashstate;
- /* ----------------
- * free projection info. no need to free result type info
- * because that came from the outer plan...
- * ----------------
+ /*
+ * free projection info. no need to free result type info because
+ * that came from the outer plan...
*/
ExecFreeProjectionInfo(&hashstate->cstate);
ExecFreeExprContext(&hashstate->cstate);
- /* ----------------
- * shut down the subplan
- * ----------------
+ /*
+ * shut down the subplan
*/
outerPlan = outerPlan(node);
ExecEndNode(outerPlan, (Plan *) node);
@@ -239,13 +226,13 @@ ExecHashTableCreate(Hash *node)
int i;
MemoryContext oldcxt;
- /* ----------------
- * Get information about the size of the relation to be hashed
- * (it's the "outer" subtree of this node, but the inner relation of
- * the hashjoin).
- * Caution: this is only the planner's estimates, and so
- * can't be trusted too far. Apply a healthy fudge factor.
- * ----------------
+ /*
+ * Get information about the size of the relation to be hashed (it's
+ * the "outer" subtree of this node, but the inner relation of the
+ * hashjoin).
+ *
+ * Caution: this is only the planner's estimates, and so can't be trusted
+ * too far. Apply a healthy fudge factor.
*/
outerNode = outerPlan(node);
ntuples = outerNode->plan_rows;
@@ -331,11 +318,11 @@ ExecHashTableCreate(Hash *node)
nbatch, totalbuckets, nbuckets);
#endif
- /* ----------------
- * Initialize the hash table control block.
- * The hashtable control block is just palloc'd from the executor's
- * per-query memory context.
- * ----------------
+ /*
+ * Initialize the hash table control block.
+ *
+ * The hashtable control block is just palloc'd from the executor's
+ * per-query memory context.
*/
hashtable = (HashJoinTable) palloc(sizeof(HashTableData));
hashtable->nbuckets = nbuckets;
@@ -348,18 +335,16 @@ ExecHashTableCreate(Hash *node)
hashtable->innerBatchSize = NULL;
hashtable->outerBatchSize = NULL;
- /* ----------------
- * Get info about the datatype of the hash key.
- * ----------------
+ /*
+ * Get info about the datatype of the hash key.
*/
get_typlenbyval(exprType(node->hashkey),
&hashtable->typLen,
&hashtable->typByVal);
- /* ----------------
- * Create temporary memory contexts in which to keep the hashtable
- * working storage. See notes in executor/hashjoin.h.
- * ----------------
+ /*
+ * Create temporary memory contexts in which to keep the hashtable
+ * working storage. See notes in executor/hashjoin.h.
*/
hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
"HashTableContext",
@@ -379,9 +364,9 @@ ExecHashTableCreate(Hash *node)
if (nbatch > 0)
{
- /* ---------------
- * allocate and initialize the file arrays in hashCxt
- * ---------------
+
+ /*
+ * allocate and initialize the file arrays in hashCxt
*/
hashtable->innerBatchFile = (BufFile **)
palloc(nbatch * sizeof(BufFile *));
@@ -464,15 +449,14 @@ ExecHashTableInsert(HashJoinTable hashtable,
TupleTableSlot *slot = econtext->ecxt_innertuple;
HeapTuple heapTuple = slot->val;
- /* ----------------
- * decide whether to put the tuple in the hash table or a tmp file
- * ----------------
+ /*
+ * decide whether to put the tuple in the hash table or a tmp file
*/
if (bucketno < hashtable->nbuckets)
{
- /* ---------------
- * put the tuple in hash table
- * ---------------
+
+ /*
+ * put the tuple in hash table
*/
HashJoinTuple hashTuple;
int hashTupleSize;
@@ -496,9 +480,9 @@ ExecHashTableInsert(HashJoinTable hashtable,
}
else
{
- /* -----------------
+
+ /*
* put the tuple into a tmp file for other batches
- * -----------------
*/
int batchno = (hashtable->nbatch * (bucketno - hashtable->nbuckets)) /
(hashtable->totalbuckets - hashtable->nbuckets);
@@ -524,20 +508,18 @@ ExecHashGetBucket(HashJoinTable hashtable,
Datum keyval;
bool isNull;
- /* ----------------
- * Get the join attribute value of the tuple
+ /*
+ * Get the join attribute value of the tuple
*
- * We reset the eval context each time to avoid any possibility
- * of memory leaks in the hash function.
- * ----------------
+ * We reset the eval context each time to avoid any possibility of memory
+ * leaks in the hash function.
*/
ResetExprContext(econtext);
keyval = ExecEvalExprSwitchContext(hashkey, econtext, &isNull, NULL);
- /* ------------------
- * compute the hash function
- * ------------------
+ /*
+ * compute the hash function
*/
if (isNull)
bucketno = 0;
@@ -606,9 +588,8 @@ ExecScanHashBucket(HashJoinState *hjstate,
hashTuple = hashTuple->next;
}
- /* ----------------
- * no match
- * ----------------
+ /*
+ * no match
*/
return NULL;
}
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index dae06d2c937..12f6d58443e 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.37 2001/03/22 03:59:27 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.38 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,9 +64,8 @@ ExecHashJoin(HashJoin *node)
int i;
bool hashPhaseDone;
- /* ----------------
- * get information from HashJoin node
- * ----------------
+ /*
+ * get information from HashJoin node
*/
hjstate = node->hashjoinstate;
hjclauses = node->hashclauses;
@@ -79,18 +78,16 @@ ExecHashJoin(HashJoin *node)
hashPhaseDone = hjstate->hj_hashdone;
dir = estate->es_direction;
- /* -----------------
+ /*
* get information from HashJoin state
- * -----------------
*/
hashtable = hjstate->hj_HashTable;
econtext = hjstate->jstate.cs_ExprContext;
- /* ----------------
- * Check to see if we're still projecting out tuples from a previous
- * join tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
- * ----------------
+ /*
+ * Check to see if we're still projecting out tuples from a previous
+ * join tuple (because there is a function-returning-set in the
+ * projection expressions). If so, try to project another one.
*/
if (hjstate->jstate.cs_TupFromTlist)
{
@@ -103,42 +100,39 @@ ExecHashJoin(HashJoin *node)
hjstate->jstate.cs_TupFromTlist = false;
}
- /* ----------------
- * Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a join tuple.
- * ----------------
+ /*
+ * Reset per-tuple memory context to free any expression evaluation
+ * storage allocated in the previous tuple cycle. Note this can't
+ * happen until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
- /* ----------------
- * if this is the first call, build the hash table for inner relation
- * ----------------
+ /*
+ * if this is the first call, build the hash table for inner relation
*/
if (!hashPhaseDone)
{ /* if the hash phase not completed */
if (hashtable == NULL)
{ /* if the hash table has not been created */
- /* ----------------
+
+ /*
* create the hash table
- * ----------------
*/
hashtable = ExecHashTableCreate(hashNode);
hjstate->hj_HashTable = hashtable;
hjstate->hj_InnerHashKey = hashNode->hashkey;
- /* ----------------
+ /*
* execute the Hash node, to build the hash table
- * ----------------
*/
hashNode->hashstate->hashtable = hashtable;
innerTupleSlot = ExecProcNode((Plan *) hashNode, (Plan *) node);
}
hjstate->hj_hashdone = true;
- /* ----------------
- * Open temp files for outer batches, if needed.
- * Note that file buffers are palloc'd in regular executor context.
- * ----------------
+
+ /*
+ * Open temp files for outer batches, if needed. Note that file
+ * buffers are palloc'd in regular executor context.
*/
for (i = 0; i < hashtable->nbatch; i++)
hashtable->outerBatchFile[i] = BufFileCreateTemp();
@@ -146,9 +140,8 @@ ExecHashJoin(HashJoin *node)
else if (hashtable == NULL)
return NULL;
- /* ----------------
- * Now get an outer tuple and probe into the hash table for matches
- * ----------------
+ /*
+ * Now get an outer tuple and probe into the hash table for matches
*/
outerTupleSlot = hjstate->jstate.cs_OuterTupleSlot;
outerVar = (Node *) get_leftop(clause);
@@ -188,11 +181,10 @@ ExecHashJoin(HashJoin *node)
outerVar);
hjstate->hj_CurTuple = NULL;
- /* ----------------
- * Now we've got an outer tuple and the corresponding hash bucket,
- * but this tuple may not belong to the current batch.
- * This need only be checked in the first pass.
- * ----------------
+ /*
+ * Now we've got an outer tuple and the corresponding hash
+ * bucket, but this tuple may not belong to the current batch.
+ * This need only be checked in the first pass.
*/
if (hashtable->curbatch == 0)
{
@@ -240,14 +232,13 @@ ExecHashJoin(HashJoin *node)
/* reset temp memory each time to avoid leaks from qual expr */
ResetExprContext(econtext);
- /* ----------------
- * if we pass the qual, then save state for next call and
- * have ExecProject form the projection, store it
- * in the tuple table, and return the slot.
+ /*
+ * if we pass the qual, then save state for next call and have
+ * ExecProject form the projection, store it in the tuple
+ * table, and return the slot.
*
- * Only the joinquals determine MatchedOuter status,
- * but all quals must pass to actually return the tuple.
- * ----------------
+ * Only the joinquals determine MatchedOuter status, but all
+ * quals must pass to actually return the tuple.
*/
if (ExecQual(joinqual, econtext, false))
{
@@ -269,11 +260,10 @@ ExecHashJoin(HashJoin *node)
}
}
- /* ----------------
- * Now the current outer tuple has run out of matches,
- * so check whether to emit a dummy outer-join tuple.
- * If not, loop around to get a new outer tuple.
- * ----------------
+ /*
+ * Now the current outer tuple has run out of matches, so check
+ * whether to emit a dummy outer-join tuple. If not, loop around
+ * to get a new outer tuple.
*/
hjstate->hj_NeedNewOuter = true;
@@ -291,11 +281,11 @@ ExecHashJoin(HashJoin *node)
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification was satisfied so we project and
- * return the slot containing the result tuple
- * using ExecProject().
- * ----------------
+
+ /*
+ * qualification was satisfied so we project and return
+ * the slot containing the result tuple using
+ * ExecProject().
*/
TupleTableSlot *result;
@@ -325,30 +315,26 @@ ExecInitHashJoin(HashJoin *node, EState *estate, Plan *parent)
Plan *outerNode;
Hash *hashNode;
- /* ----------------
- * assign the node's execution state
- * ----------------
+ /*
+ * assign the node's execution state
*/
node->join.plan.state = estate;
- /* ----------------
+ /*
* create state structure
- * ----------------
*/
hjstate = makeNode(HashJoinState);
node->hashjoinstate = hjstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &hjstate->jstate);
- /* ----------------
+ /*
* initializes child nodes
- * ----------------
*/
outerNode = outerPlan((Plan *) node);
hashNode = (Hash *) innerPlan((Plan *) node);
@@ -357,9 +343,9 @@ ExecInitHashJoin(HashJoin *node, EState *estate, Plan *parent)
ExecInitNode((Plan *) hashNode, estate, (Plan *) node);
#define HASHJOIN_NSLOTS 3
- /* ----------------
- * tuple table initialization
- * ----------------
+
+ /*
+ * tuple table initialization
*/
ExecInitResultTupleSlot(estate, &hjstate->jstate);
hjstate->hj_OuterTupleSlot = ExecInitExtraTupleSlot(estate);
@@ -378,14 +364,12 @@ ExecInitHashJoin(HashJoin *node, EState *estate, Plan *parent)
(int) node->join.jointype);
}
- /* ----------------
- * now for some voodoo. our temporary tuple slot
- * is actually the result tuple slot of the Hash node
- * (which is our inner plan). we do this because Hash
- * nodes don't return tuples via ExecProcNode() -- instead
- * the hash join node uses ExecScanHashBucket() to get
- * at the contents of the hash table. -cim 6/9/91
- * ----------------
+ /*
+ * now for some voodoo. our temporary tuple slot is actually the
+ * result tuple slot of the Hash node (which is our inner plan). we
+ * do this because Hash nodes don't return tuples via ExecProcNode()
+ * -- instead the hash join node uses ExecScanHashBucket() to get at
+ * the contents of the hash table. -cim 6/9/91
*/
{
HashState *hashstate = hashNode->hashstate;
@@ -394,9 +378,8 @@ ExecInitHashJoin(HashJoin *node, EState *estate, Plan *parent)
hjstate->hj_HashTupleSlot = slot;
}
- /* ----------------
- * initialize tuple type and projection info
- * ----------------
+ /*
+ * initialize tuple type and projection info
*/
ExecAssignResultTypeFromTL((Plan *) node, &hjstate->jstate);
ExecAssignProjectionInfo((Plan *) node, &hjstate->jstate);
@@ -405,9 +388,8 @@ ExecInitHashJoin(HashJoin *node, EState *estate, Plan *parent)
ExecGetTupType(outerNode),
false);
- /* ----------------
- * initialize hash-specific info
- * ----------------
+ /*
+ * initialize hash-specific info
*/
hjstate->hj_hashdone = false;
@@ -444,15 +426,13 @@ ExecEndHashJoin(HashJoin *node)
{
HashJoinState *hjstate;
- /* ----------------
- * get info from the HashJoin state
- * ----------------
+ /*
+ * get info from the HashJoin state
*/
hjstate = node->hashjoinstate;
- /* ----------------
+ /*
* free hash table in case we end plan before all tuples are retrieved
- * ---------------
*/
if (hjstate->hj_HashTable)
{
@@ -460,28 +440,24 @@ ExecEndHashJoin(HashJoin *node)
hjstate->hj_HashTable = NULL;
}
- /* ----------------
- * Free the projection info and the scan attribute info
+ /*
+ * Free the projection info and the scan attribute info
*
- * Note: we don't ExecFreeResultType(hjstate)
- * because the rule manager depends on the tupType
- * returned by ExecMain(). So for now, this
- * is freed at end-transaction time. -cim 6/2/91
- * ----------------
+ * Note: we don't ExecFreeResultType(hjstate) because the rule manager
+ * depends on the tupType returned by ExecMain(). So for now, this is
+ * freed at end-transaction time. -cim 6/2/91
*/
ExecFreeProjectionInfo(&hjstate->jstate);
ExecFreeExprContext(&hjstate->jstate);
- /* ----------------
+ /*
* clean up subtrees
- * ----------------
*/
ExecEndNode(outerPlan((Plan *) node), (Plan *) node);
ExecEndNode(innerPlan((Plan *) node), (Plan *) node);
- /* ----------------
- * clean out the tuple table
- * ----------------
+ /*
+ * clean out the tuple table
*/
ExecClearTuple(hjstate->jstate.cs_ResultTupleSlot);
ExecClearTuple(hjstate->hj_OuterTupleSlot);
@@ -598,10 +574,9 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
hashtable->outerBatchFile[newbatch - 2] = NULL;
}
- /* --------------
- * We can skip over any batches that are empty on either side.
- * Release associated temp files right away.
- * --------------
+ /*
+ * We can skip over any batches that are empty on either side. Release
+ * associated temp files right away.
*/
while (newbatch <= nbatch &&
(innerBatchSize[newbatch - 1] == 0L ||
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index a6e6e45e9dc..1adc49d6ff0 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.58 2001/03/22 03:59:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.59 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -84,9 +84,8 @@ IndexNext(IndexScan *node)
bool bBackward;
int indexNumber;
- /* ----------------
- * extract necessary information from index scan node
- * ----------------
+ /*
+ * extract necessary information from index scan node
*/
estate = node->scan.plan.state;
direction = estate->es_direction;
@@ -145,11 +144,10 @@ IndexNext(IndexScan *node)
tuple = &(indexstate->iss_htup);
- /* ----------------
- * ok, now that we have what we need, fetch an index tuple.
- * if scanning this index succeeded then return the
- * appropriate heap tuple.. else return NULL.
- * ----------------
+ /*
+ * ok, now that we have what we need, fetch an index tuple. if
+ * scanning this index succeeded then return the appropriate heap
+ * tuple.. else return NULL.
*/
bBackward = ScanDirectionIsBackward(direction);
if (bBackward)
@@ -238,10 +236,10 @@ IndexNext(IndexScan *node)
indexstate->iss_IndexPtr++;
}
}
- /* ----------------
- * if we get here it means the index scan failed so we
- * are at the end of the scan..
- * ----------------
+
+ /*
+ * if we get here it means the index scan failed so we are at the end
+ * of the scan..
*/
return ExecClearTuple(slot);
}
@@ -272,17 +270,15 @@ ExecIndexScan(IndexScan *node)
{
IndexScanState *indexstate = node->indxstate;
- /* ----------------
- * If we have runtime keys and they've not already been set up,
- * do it now.
- * ----------------
+ /*
+ * If we have runtime keys and they've not already been set up, do it
+ * now.
*/
if (indexstate->iss_RuntimeKeyInfo && !indexstate->iss_RuntimeKeysReady)
ExecReScan((Plan *) node, NULL, NULL);
- /* ----------------
- * use IndexNext as access method
- * ----------------
+ /*
+ * use IndexNext as access method
*/
return ExecScan(&node->scan, (ExecScanAccessMtd) IndexNext);
}
@@ -448,37 +444,32 @@ ExecEndIndexScan(IndexScan *node)
indxqual = node->indxqual;
runtimeKeyInfo = indexstate->iss_RuntimeKeyInfo;
- /* ----------------
- * extract information from the node
- * ----------------
+ /*
+ * extract information from the node
*/
numIndices = indexstate->iss_NumIndices;
scanKeys = indexstate->iss_ScanKeys;
numScanKeys = indexstate->iss_NumScanKeys;
- /* ----------------
- * Free the projection info and the scan attribute info
+ /*
+ * Free the projection info and the scan attribute info
*
- * Note: we don't ExecFreeResultType(scanstate)
- * because the rule manager depends on the tupType
- * returned by ExecMain(). So for now, this
- * is freed at end-transaction time. -cim 6/2/91
- * ----------------
+ * Note: we don't ExecFreeResultType(scanstate) because the rule manager
+ * depends on the tupType returned by ExecMain(). So for now, this is
+ * freed at end-transaction time. -cim 6/2/91
*/
ExecFreeProjectionInfo(&scanstate->cstate);
ExecFreeExprContext(&scanstate->cstate);
if (indexstate->iss_RuntimeContext)
FreeExprContext(indexstate->iss_RuntimeContext);
- /* ----------------
- * close the heap and index relations
- * ----------------
+ /*
+ * close the heap and index relations
*/
ExecCloseR((Plan *) node);
- /* ----------------
- * free the scan keys used in scanning the indices
- * ----------------
+ /*
+ * free the scan keys used in scanning the indices
*/
for (i = 0; i < numIndices; i++)
{
@@ -498,9 +489,8 @@ ExecEndIndexScan(IndexScan *node)
pfree(runtimeKeyInfo);
}
- /* ----------------
- * clear out tuple table slots
- * ----------------
+ /*
+ * clear out tuple table slots
*/
ExecClearTuple(scanstate->cstate.cs_ResultTupleSlot);
ExecClearTuple(scanstate->css_ScanTupleSlot);
@@ -605,50 +595,45 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
HeapScanDesc currentScanDesc;
ScanDirection direction;
- /* ----------------
- * assign execution state to node
- * ----------------
+ /*
+ * assign execution state to node
*/
node->scan.plan.state = estate;
- /* --------------------------------
- * Part 1) initialize scan state
+ /*
+ * Part 1) initialize scan state
*
- * create new CommonScanState for node
- * --------------------------------
+ * create new CommonScanState for node
*/
scanstate = makeNode(CommonScanState);
node->scan.scanstate = scanstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &scanstate->cstate);
#define INDEXSCAN_NSLOTS 3
- /* ----------------
- * tuple table initialization
- * ----------------
+
+ /*
+ * tuple table initialization
*/
ExecInitResultTupleSlot(estate, &scanstate->cstate);
ExecInitScanTupleSlot(estate, scanstate);
- /* ----------------
- * initialize projection info. result type comes from scan desc
- * below..
- * ----------------
+ /*
+ * initialize projection info. result type comes from scan desc
+ * below..
*/
ExecAssignProjectionInfo((Plan *) node, &scanstate->cstate);
- /* --------------------------------
- * Part 2) initialize index scan state
- *
- * create new IndexScanState for node
- * --------------------------------
- */
+ /*
+ * Part 2) initialize index scan state
+ *
+ * create new IndexScanState for node
+ */
indexstate = makeNode(IndexScanState);
indexstate->iss_NumIndices = 0;
indexstate->iss_IndexPtr = -1;
@@ -662,9 +647,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
node->indxstate = indexstate;
- /* ----------------
- * get the index node information
- * ----------------
+ /*
+ * get the index node information
*/
indxid = node->indxid;
numIndices = length(indxid);
@@ -672,27 +656,24 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
CXT1_printf("ExecInitIndexScan: context is %d\n", CurrentMemoryContext);
- /* ----------------
- * scanKeys is used to keep track of the ScanKey's. This is needed
- * because a single scan may use several indices and each index has
- * its own ScanKey.
- * ----------------
+ /*
+ * scanKeys is used to keep track of the ScanKey's. This is needed
+ * because a single scan may use several indices and each index has
+ * its own ScanKey.
*/
numScanKeys = (int *) palloc(numIndices * sizeof(int));
scanKeys = (ScanKey *) palloc(numIndices * sizeof(ScanKey));
relationDescs = (RelationPtr) palloc(numIndices * sizeof(Relation));
scanDescs = (IndexScanDescPtr) palloc(numIndices * sizeof(IndexScanDesc));
- /* ----------------
- * initialize space for runtime key info (may not be needed)
- * ----------------
+ /*
+ * initialize space for runtime key info (may not be needed)
*/
have_runtime_keys = false;
runtimeKeyInfo = (int **) palloc(numIndices * sizeof(int *));
- /* ----------------
- * build the index scan keys from the index qualification
- * ----------------
+ /*
+ * build the index scan keys from the index qualification
*/
indxqual = node->indxqual;
for (i = 0; i < numIndices; i++)
@@ -713,10 +694,9 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
CXT1_printf("ExecInitIndexScan: context is %d\n", CurrentMemoryContext);
- /* ----------------
- * for each opclause in the given qual,
- * convert each qual's opclause into a single scan key
- * ----------------
+ /*
+ * for each opclause in the given qual, convert each qual's
+ * opclause into a single scan key
*/
for (j = 0; j < n_keys; j++)
{
@@ -731,9 +711,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
Oid opid; /* operator id used in scan */
Datum scanvalue = 0; /* value used in scan (if const) */
- /* ----------------
- * extract clause information from the qualification
- * ----------------
+ /*
+ * extract clause information from the qualification
*/
clause = nth(j, qual);
@@ -743,48 +722,46 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
opid = op->opid;
- /* ----------------
- * Here we figure out the contents of the index qual.
- * The usual case is (var op const) or (const op var)
- * which means we form a scan key for the attribute
- * listed in the var node and use the value of the const.
+ /*
+ * Here we figure out the contents of the index qual. The
+ * usual case is (var op const) or (const op var) which means
+ * we form a scan key for the attribute listed in the var node
+ * and use the value of the const.
*
- * If we don't have a const node, then it means that
- * one of the var nodes refers to the "scan" tuple and
- * is used to determine which attribute to scan, and the
- * other expression is used to calculate the value used in
- * scanning the index.
+ * If we don't have a const node, then it means that one of the
+ * var nodes refers to the "scan" tuple and is used to
+ * determine which attribute to scan, and the other expression
+ * is used to calculate the value used in scanning the index.
*
- * This means our index scan's scan key is a function of
- * information obtained during the execution of the plan
- * in which case we need to recalculate the index scan key
- * at run time.
+ * This means our index scan's scan key is a function of
+ * information obtained during the execution of the plan in
+ * which case we need to recalculate the index scan key at run
+ * time.
*
- * Hence, we set have_runtime_keys to true and then set
- * the appropriate flag in run_keys to LEFT_OP or RIGHT_OP.
- * The corresponding scan keys are recomputed at run time.
+ * Hence, we set have_runtime_keys to true and then set the
+ * appropriate flag in run_keys to LEFT_OP or RIGHT_OP. The
+ * corresponding scan keys are recomputed at run time.
*
- * XXX Although this code *thinks* it can handle an indexqual
- * with the indexkey on either side, in fact it cannot.
- * Indexscans only work with quals that have the indexkey on
- * the left (the planner/optimizer makes sure it never passes
- * anything else). The reason: the scankey machinery has no
- * provision for distinguishing which side of the operator is
- * the indexed attribute and which is the compared-to constant.
- * It just assumes that the attribute is on the left :-(
+ * XXX Although this code *thinks* it can handle an indexqual
+ * with the indexkey on either side, in fact it cannot.
+ * Indexscans only work with quals that have the indexkey on
+ * the left (the planner/optimizer makes sure it never passes
+ * anything else). The reason: the scankey machinery has no
+ * provision for distinguishing which side of the operator is
+ * the indexed attribute and which is the compared-to
+ * constant. It just assumes that the attribute is on the left
+ * :-(
*
- * I am leaving this code able to support both ways, even though
- * half of it is dead code, on the off chance that someone will
- * fix the scankey machinery someday --- tgl 8/11/99.
- * ----------------
+ * I am leaving this code able to support both ways, even though
+ * half of it is dead code, on the off chance that someone
+ * will fix the scankey machinery someday --- tgl 8/11/99.
*/
scanvar = NO_OP;
run_keys[j] = NO_OP;
- /* ----------------
- * determine information in leftop
- * ----------------
+ /*
+ * determine information in leftop
*/
leftop = (Node *) get_leftop(clause);
@@ -795,21 +772,21 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
if (IsA(leftop, Var) &&var_is_rel((Var *) leftop))
{
- /* ----------------
- * if the leftop is a "rel-var", then it means
- * that it is a var node which tells us which
- * attribute to use for our scan key.
- * ----------------
+
+ /*
+ * if the leftop is a "rel-var", then it means that it is
+ * a var node which tells us which attribute to use for
+ * our scan key.
*/
varattno = ((Var *) leftop)->varattno;
scanvar = LEFT_OP;
}
else if (IsA(leftop, Const))
{
- /* ----------------
- * if the leftop is a const node then it means
- * it identifies the value to place in our scan key.
- * ----------------
+
+ /*
+ * if the leftop is a const node then it means it
+ * identifies the value to place in our scan key.
*/
scanvalue = ((Const *) leftop)->constvalue;
if (((Const *) leftop)->constisnull)
@@ -819,10 +796,9 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
{
bool isnull;
- /* ----------------
- * if the leftop is a Param node then it means
- * it identifies the value to place in our scan key.
- * ----------------
+ /*
+ * if the leftop is a Param node then it means it
+ * identifies the value to place in our scan key.
*/
/* Life was so easy before ... subselects */
@@ -844,19 +820,18 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
}
else
{
- /* ----------------
- * otherwise, the leftop contains an expression evaluable
- * at runtime to figure out the value to place in our
- * scan key.
- * ----------------
+
+ /*
+ * otherwise, the leftop contains an expression evaluable
+ * at runtime to figure out the value to place in our scan
+ * key.
*/
have_runtime_keys = true;
run_keys[j] = LEFT_OP;
}
- /* ----------------
- * now determine information in rightop
- * ----------------
+ /*
+ * now determine information in rightop
*/
rightop = (Node *) get_rightop(clause);
@@ -867,30 +842,29 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
if (IsA(rightop, Var) &&var_is_rel((Var *) rightop))
{
- /* ----------------
- * here we make sure only one op identifies the
- * scan-attribute...
- * ----------------
+
+ /*
+ * here we make sure only one op identifies the
+ * scan-attribute...
*/
if (scanvar == LEFT_OP)
elog(ERROR, "ExecInitIndexScan: %s",
"both left and right op's are rel-vars");
- /* ----------------
- * if the rightop is a "rel-var", then it means
- * that it is a var node which tells us which
- * attribute to use for our scan key.
- * ----------------
+ /*
+ * if the rightop is a "rel-var", then it means that it is
+ * a var node which tells us which attribute to use for
+ * our scan key.
*/
varattno = ((Var *) rightop)->varattno;
scanvar = RIGHT_OP;
}
else if (IsA(rightop, Const))
{
- /* ----------------
- * if the rightop is a const node then it means
- * it identifies the value to place in our scan key.
- * ----------------
+
+ /*
+ * if the rightop is a const node then it means it
+ * identifies the value to place in our scan key.
*/
scanvalue = ((Const *) rightop)->constvalue;
if (((Const *) rightop)->constisnull)
@@ -900,10 +874,9 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
{
bool isnull;
- /* ----------------
- * if the rightop is a Param node then it means
- * it identifies the value to place in our scan key.
- * ----------------
+ /*
+ * if the rightop is a Param node then it means it
+ * identifies the value to place in our scan key.
*/
/* Life was so easy before ... subselects */
@@ -925,28 +898,26 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
}
else
{
- /* ----------------
- * otherwise, the rightop contains an expression evaluable
- * at runtime to figure out the value to place in our
- * scan key.
- * ----------------
+
+ /*
+ * otherwise, the rightop contains an expression evaluable
+ * at runtime to figure out the value to place in our scan
+ * key.
*/
have_runtime_keys = true;
run_keys[j] = RIGHT_OP;
}
- /* ----------------
- * now check that at least one op tells us the scan
- * attribute...
- * ----------------
+ /*
+ * now check that at least one op tells us the scan
+ * attribute...
*/
if (scanvar == NO_OP)
elog(ERROR, "ExecInitIndexScan: %s",
"neither leftop nor rightop refer to scan relation");
- /* ----------------
- * initialize the scan key's fields appropriately
- * ----------------
+ /*
+ * initialize the scan key's fields appropriately
*/
ScanKeyEntryInitialize(&scan_keys[j],
flags,
@@ -956,9 +927,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
scanvalue); /* constant */
}
- /* ----------------
- * store the key information into our arrays.
- * ----------------
+ /*
+ * store the key information into our arrays.
*/
numScanKeys[i] = n_keys;
scanKeys[i] = scan_keys;
@@ -972,20 +942,17 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
indexstate->iss_ScanKeys = scanKeys;
indexstate->iss_NumScanKeys = numScanKeys;
- /* ----------------
- * If all of our keys have the form (op var const) , then we have no
- * runtime keys so we store NULL in the runtime key info.
- * Otherwise runtime key info contains an array of pointers
- * (one for each index) to arrays of flags (one for each key)
- * which indicate that the qual needs to be evaluated at runtime.
- * -cim 10/24/89
+ /*
+ * If all of our keys have the form (op var const) , then we have no
+ * runtime keys so we store NULL in the runtime key info. Otherwise
+ * runtime key info contains an array of pointers (one for each index)
+ * to arrays of flags (one for each key) which indicate that the qual
+ * needs to be evaluated at runtime. -cim 10/24/89
*
- * If we do have runtime keys, we need an ExprContext to evaluate them;
- * the node's standard context won't do because we want to reset that
- * context for every tuple. So, build another context just like the
- * other one...
- * -tgl 7/11/00
- * ----------------
+ * If we do have runtime keys, we need an ExprContext to evaluate them;
+ * the node's standard context won't do because we want to reset that
+ * context for every tuple. So, build another context just like the
+ * other one... -tgl 7/11/00
*/
if (have_runtime_keys)
{
@@ -1009,18 +976,15 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
pfree(runtimeKeyInfo);
}
- /* ----------------
- * get the range table and direction information
- * from the execution state (these are needed to
- * open the relations).
- * ----------------
+ /*
+ * get the range table and direction information from the execution
+ * state (these are needed to open the relations).
*/
rangeTable = estate->es_range_table;
direction = estate->es_direction;
- /* ----------------
- * open the base relation
- * ----------------
+ /*
+ * open the base relation
*/
relid = node->scan.scanrelid;
rtentry = rt_fetch(relid, rangeTable);
@@ -1040,17 +1004,15 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
scanstate->css_currentRelation = currentRelation;
scanstate->css_currentScanDesc = currentScanDesc;
- /* ----------------
- * get the scan type from the relation descriptor.
- * ----------------
+ /*
+ * get the scan type from the relation descriptor.
*/
ExecAssignScanType(scanstate, RelationGetDescr(currentRelation), false);
ExecAssignResultTypeFromTL((Plan *) node, &scanstate->cstate);
- /* ----------------
- * open the index relations and initialize
- * relation and scan descriptors.
- * ----------------
+ /*
+ * open the index relations and initialize relation and scan
+ * descriptors.
*/
for (i = 0; i < numIndices; i++)
{
@@ -1073,9 +1035,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
indexstate->iss_RelationDescs = relationDescs;
indexstate->iss_ScanDescs = scanDescs;
- /* ----------------
- * all done.
- * ----------------
+ /*
+ * all done.
*/
return TRUE;
}
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index 534c3a419d1..227f58232f9 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.4 2001/03/22 03:59:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.5 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,46 +44,43 @@ ExecLimit(Limit *node)
Plan *outerPlan;
long netlimit;
- /* ----------------
- * get information from the node
- * ----------------
+ /*
+ * get information from the node
*/
limitstate = node->limitstate;
direction = node->plan.state->es_direction;
outerPlan = outerPlan((Plan *) node);
resultTupleSlot = limitstate->cstate.cs_ResultTupleSlot;
- /* ----------------
- * If first call for this scan, compute limit/offset.
- * (We can't do this any earlier, because parameters from upper nodes
- * may not be set until now.)
- * ----------------
+ /*
+ * If first call for this scan, compute limit/offset. (We can't do
+ * this any earlier, because parameters from upper nodes may not be
+ * set until now.)
*/
if (!limitstate->parmsSet)
recompute_limits(node);
netlimit = limitstate->offset + limitstate->count;
- /* ----------------
- * now loop, returning only desired tuples.
- * ----------------
+ /*
+ * now loop, returning only desired tuples.
*/
for (;;)
{
- /*----------------
- * If we have reached the subplan EOF or the limit, just quit.
+
+ /*
+ * If we have reached the subplan EOF or the limit, just quit.
*
* NOTE: when scanning forwards, we must fetch one tuple beyond the
- * COUNT limit before we can return NULL, else the subplan won't be
- * properly positioned to start going backwards. Hence test here
- * is for position > netlimit not position >= netlimit.
+ * COUNT limit before we can return NULL, else the subplan won't
+ * be properly positioned to start going backwards. Hence test
+ * here is for position > netlimit not position >= netlimit.
*
* Similarly, when scanning backwards, we must re-fetch the last
- * tuple in the offset region before we can return NULL. Otherwise
- * we won't be correctly aligned to start going forward again. So,
- * although you might think we can quit when position = offset + 1,
- * we have to fetch a subplan tuple first, and then exit when
- * position = offset.
- *----------------
+ * tuple in the offset region before we can return NULL.
+ * Otherwise we won't be correctly aligned to start going forward
+ * again. So, although you might think we can quit when position
+ * = offset + 1, we have to fetch a subplan tuple first, and then
+ * exit when position = offset.
*/
if (ScanDirectionIsForward(direction))
{
@@ -97,9 +94,9 @@ ExecLimit(Limit *node)
if (limitstate->position <= limitstate->offset)
return NULL;
}
- /* ----------------
- * fetch a tuple from the outer subplan
- * ----------------
+
+ /*
+ * fetch a tuple from the outer subplan
*/
slot = ExecProcNode(outerPlan, (Plan *) node);
if (TupIsNull(slot))
@@ -136,10 +133,9 @@ ExecLimit(Limit *node)
}
limitstate->atEnd = false;
- /* ----------------
- * Now, is this a tuple we want? If not, loop around to fetch
- * another tuple from the subplan.
- * ----------------
+ /*
+ * Now, is this a tuple we want? If not, loop around to fetch
+ * another tuple from the subplan.
*/
if (limitstate->position > limitstate->offset &&
(limitstate->noCount || limitstate->position <= netlimit))
@@ -224,47 +220,42 @@ ExecInitLimit(Limit *node, EState *estate, Plan *parent)
LimitState *limitstate;
Plan *outerPlan;
- /* ----------------
- * assign execution state to node
- * ----------------
+ /*
+ * assign execution state to node
*/
node->plan.state = estate;
- /* ----------------
- * create new LimitState for node
- * ----------------
+ /*
+ * create new LimitState for node
*/
limitstate = makeNode(LimitState);
node->limitstate = limitstate;
limitstate->parmsSet = false;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * Limit nodes never call ExecQual or ExecProject, but they need
- * an exprcontext anyway to evaluate the limit/offset parameters in.
- * ----------------
+ * Limit nodes never call ExecQual or ExecProject, but they need an
+ * exprcontext anyway to evaluate the limit/offset parameters in.
*/
ExecAssignExprContext(estate, &limitstate->cstate);
#define LIMIT_NSLOTS 1
- /* ------------
+
+ /*
* Tuple table initialization
- * ------------
*/
ExecInitResultTupleSlot(estate, &limitstate->cstate);
- /* ----------------
- * then initialize outer plan
- * ----------------
+ /*
+ * then initialize outer plan
*/
outerPlan = outerPlan((Plan *) node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * limit nodes do no projections, so initialize
- * projection info for this node appropriately
- * ----------------
+ /*
+ * limit nodes do no projections, so initialize projection info for
+ * this node appropriately
*/
ExecAssignResultTypeFromOuterPlan((Plan *) node, &limitstate->cstate);
limitstate->cstate.cs_ProjInfo = NULL;
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 1f55f852f0e..b74d37ae744 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.34 2001/03/22 03:59:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.35 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -53,46 +53,41 @@ ExecMaterial(Material *node)
TupleTableSlot *slot;
bool should_free;
- /* ----------------
- * get state info from node
- * ----------------
+ /*
+ * get state info from node
*/
matstate = node->matstate;
estate = node->plan.state;
dir = estate->es_direction;
tuplestorestate = (Tuplestorestate *) matstate->tuplestorestate;
- /* ----------------
- * If first time through, read all tuples from outer plan and
- * pass them to tuplestore.c.
- * Subsequent calls just fetch tuples from tuplestore.
- * ----------------
+ /*
+ * If first time through, read all tuples from outer plan and pass
+ * them to tuplestore.c. Subsequent calls just fetch tuples from
+ * tuplestore.
*/
if (tuplestorestate == NULL)
{
Plan *outerNode;
- /* ----------------
- * Want to scan subplan in the forward direction while creating
- * the stored data. (Does setting my direction actually affect
- * the subplan? I bet this is useless code...)
- * ----------------
+ /*
+ * Want to scan subplan in the forward direction while creating
+ * the stored data. (Does setting my direction actually affect
+ * the subplan? I bet this is useless code...)
*/
estate->es_direction = ForwardScanDirection;
- /* ----------------
- * Initialize tuplestore module.
- * ----------------
+ /*
+ * Initialize tuplestore module.
*/
tuplestorestate = tuplestore_begin_heap(true, /* randomAccess */
SortMem);
matstate->tuplestorestate = (void *) tuplestorestate;
- /* ----------------
- * Scan the subplan and feed all the tuples to tuplestore.
- * ----------------
+ /*
+ * Scan the subplan and feed all the tuples to tuplestore.
*/
outerNode = outerPlan((Plan *) node);
@@ -107,23 +102,20 @@ ExecMaterial(Material *node)
ExecClearTuple(slot);
}
- /* ----------------
- * Complete the store.
- * ----------------
+ /*
+ * Complete the store.
*/
tuplestore_donestoring(tuplestorestate);
- /* ----------------
- * restore to user specified direction
- * ----------------
+ /*
+ * restore to user specified direction
*/
estate->es_direction = dir;
}
- /* ----------------
- * Get the first or next tuple from tuplestore.
- * Returns NULL if no more tuples.
- * ----------------
+ /*
+ * Get the first or next tuple from tuplestore. Returns NULL if no
+ * more tuples.
*/
slot = (TupleTableSlot *) matstate->csstate.cstate.cs_ResultTupleSlot;
heapTuple = tuplestore_getheaptuple(tuplestorestate,
@@ -143,50 +135,44 @@ ExecInitMaterial(Material *node, EState *estate, Plan *parent)
MaterialState *matstate;
Plan *outerPlan;
- /* ----------------
- * assign the node's execution state
- * ----------------
+ /*
+ * assign the node's execution state
*/
node->plan.state = estate;
- /* ----------------
+ /*
* create state structure
- * ----------------
*/
matstate = makeNode(MaterialState);
matstate->tuplestorestate = NULL;
node->matstate = matstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * Materialization nodes don't need ExprContexts because
- * they never call ExecQual or ExecProject.
- * ----------------
+ * Materialization nodes don't need ExprContexts because they never call
+ * ExecQual or ExecProject.
*/
#define MATERIAL_NSLOTS 1
- /* ----------------
+
+ /*
* tuple table initialization
*
- * material nodes only return tuples from their materialized
- * relation.
- * ----------------
+ * material nodes only return tuples from their materialized relation.
*/
ExecInitResultTupleSlot(estate, &matstate->csstate.cstate);
ExecInitScanTupleSlot(estate, &matstate->csstate);
- /* ----------------
+ /*
* initializes child nodes
- * ----------------
*/
outerPlan = outerPlan((Plan *) node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * initialize tuple type. no need to initialize projection
- * info because this node doesn't do projections.
- * ----------------
+ /*
+ * initialize tuple type. no need to initialize projection info
+ * because this node doesn't do projections.
*/
ExecAssignResultTypeFromOuterPlan((Plan *) node, &matstate->csstate.cstate);
ExecAssignScanTypeFromOuterPlan((Plan *) node, &matstate->csstate);
@@ -213,28 +199,24 @@ ExecEndMaterial(Material *node)
MaterialState *matstate;
Plan *outerPlan;
- /* ----------------
- * get info from the material state
- * ----------------
+ /*
+ * get info from the material state
*/
matstate = node->matstate;
- /* ----------------
- * shut down the subplan
- * ----------------
+ /*
+ * shut down the subplan
*/
outerPlan = outerPlan((Plan *) node);
ExecEndNode(outerPlan, (Plan *) node);
- /* ----------------
- * clean out the tuple table
- * ----------------
+ /*
+ * clean out the tuple table
*/
ExecClearTuple(matstate->csstate.css_ScanTupleSlot);
- /* ----------------
- * Release tuplestore resources
- * ----------------
+ /*
+ * Release tuplestore resources
*/
if (matstate->tuplestorestate != NULL)
tuplestore_end((Tuplestorestate *) matstate->tuplestorestate);
@@ -252,9 +234,8 @@ ExecMaterialMarkPos(Material *node)
{
MaterialState *matstate = node->matstate;
- /* ----------------
- * if we haven't materialized yet, just return.
- * ----------------
+ /*
+ * if we haven't materialized yet, just return.
*/
if (!matstate->tuplestorestate)
return;
@@ -273,16 +254,14 @@ ExecMaterialRestrPos(Material *node)
{
MaterialState *matstate = node->matstate;
- /* ----------------
- * if we haven't materialized yet, just return.
- * ----------------
+ /*
+ * if we haven't materialized yet, just return.
*/
if (!matstate->tuplestorestate)
return;
- /* ----------------
- * restore the scan to the previously marked position
- * ----------------
+ /*
+ * restore the scan to the previously marked position
*/
tuplestore_restorepos((Tuplestorestate *) matstate->tuplestorestate);
}
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index e3617c032b0..40a962dabb5 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.43 2001/03/22 03:59:29 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.44 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -114,38 +114,36 @@ MJFormSkipQual(List *qualList, char *replaceopname)
Oid oprleft,
oprright;
- /* ----------------
- * qualList is a list: ((op .. ..) ...)
- * first we make a copy of it. copyObject() makes a deep copy
- * so let's use it instead of the old fashoned lispCopy()...
- * ----------------
+ /*
+ * qualList is a list: ((op .. ..) ...)
+ *
+ * first we make a copy of it. copyObject() makes a deep copy so let's
+ * use it instead of the old fashoned lispCopy()...
*/
qualCopy = (List *) copyObject((Node *) qualList);
foreach(qualcdr, qualCopy)
{
- /* ----------------
- * first get the current (op .. ..) list
- * ----------------
+
+ /*
+ * first get the current (op .. ..) list
*/
qual = lfirst(qualcdr);
- /* ----------------
- * now get at the op
- * ----------------
+ /*
+ * now get at the op
*/
op = (Oper *) qual->oper;
if (!IsA(op, Oper))
elog(ERROR, "MJFormSkipQual: op not an Oper!");
- /* ----------------
- * Get the declared left and right operand types of the operator.
- * Note we do *not* use the actual operand types, since those might
- * be different in scenarios with binary-compatible data types.
- * There should be "<" and ">" operators matching a mergejoinable
- * "=" operator's declared operand types, but we might not find them
- * if we search with the actual operand types.
- * ----------------
+ /*
+ * Get the declared left and right operand types of the operator.
+ * Note we do *not* use the actual operand types, since those
+ * might be different in scenarios with binary-compatible data
+ * types. There should be "<" and ">" operators matching a
+ * mergejoinable "=" operator's declared operand types, but we
+ * might not find them if we search with the actual operand types.
*/
optup = SearchSysCache(OPEROID,
ObjectIdGetDatum(op->opno),
@@ -157,10 +155,9 @@ MJFormSkipQual(List *qualList, char *replaceopname)
oprright = opform->oprright;
ReleaseSysCache(optup);
- /* ----------------
- * Now look up the matching "<" or ">" operator. If there isn't one,
- * whoever marked the "=" operator mergejoinable was a loser.
- * ----------------
+ /*
+ * Now look up the matching "<" or ">" operator. If there isn't
+ * one, whoever marked the "=" operator mergejoinable was a loser.
*/
optup = SearchSysCache(OPERNAME,
PointerGetDatum(replaceopname),
@@ -173,9 +170,8 @@ MJFormSkipQual(List *qualList, char *replaceopname)
op->opno, replaceopname);
opform = (Form_pg_operator) GETSTRUCT(optup);
- /* ----------------
- * And replace the data in the copied operator node.
- * ----------------
+ /*
+ * And replace the data in the copied operator node.
*/
op->opno = optup->t_data->t_oid;
op->opid = opform->oprcode;
@@ -216,12 +212,10 @@ MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
*/
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
- /* ----------------
- * for each pair of clauses, test them until
- * our compare conditions are satisfied.
- * if we reach the end of the list, none of our key greater-than
- * conditions were satisfied so we return false.
- * ----------------
+ /*
+ * for each pair of clauses, test them until our compare conditions
+ * are satisfied. if we reach the end of the list, none of our key
+ * greater-than conditions were satisfied so we return false.
*/
result = false; /* assume 'false' result */
@@ -231,12 +225,11 @@ MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
Datum const_value;
bool isNull;
- /* ----------------
- * first test if our compare clause is satisfied.
- * if so then return true.
+ /*
+ * first test if our compare clause is satisfied. if so then
+ * return true.
*
- * A NULL result is considered false.
- * ----------------
+ * A NULL result is considered false.
*/
const_value = ExecEvalExpr((Node *) lfirst(clause), econtext,
&isNull, NULL);
@@ -247,11 +240,10 @@ MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
break;
}
- /* ----------------
- * ok, the compare clause failed so we test if the keys
- * are equal... if key1 != key2, we return false.
- * otherwise key1 = key2 so we move on to the next pair of keys.
- * ----------------
+ /*
+ * ok, the compare clause failed so we test if the keys are
+ * equal... if key1 != key2, we return false. otherwise key1 =
+ * key2 so we move on to the next pair of keys.
*/
const_value = ExecEvalExpr((Node *) lfirst(eqclause),
econtext,
@@ -404,9 +396,8 @@ ExecMergeJoin(MergeJoin *node)
bool doFillOuter;
bool doFillInner;
- /* ----------------
- * get information from node
- * ----------------
+ /*
+ * get information from node
*/
mergestate = node->mergestate;
estate = node->join.plan.state;
@@ -455,11 +446,10 @@ ExecMergeJoin(MergeJoin *node)
innerSkipQual = mergestate->mj_OuterSkipQual;
}
- /* ----------------
- * Check to see if we're still projecting out tuples from a previous
- * join tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
- * ----------------
+ /*
+ * Check to see if we're still projecting out tuples from a previous
+ * join tuple (because there is a function-returning-set in the
+ * projection expressions). If so, try to project another one.
*/
if (mergestate->jstate.cs_TupFromTlist)
{
@@ -473,25 +463,23 @@ ExecMergeJoin(MergeJoin *node)
mergestate->jstate.cs_TupFromTlist = false;
}
- /* ----------------
- * Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a join tuple.
- * ----------------
+ /*
+ * Reset per-tuple memory context to free any expression evaluation
+ * storage allocated in the previous tuple cycle. Note this can't
+ * happen until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
- /* ----------------
- * ok, everything is setup.. let's go to work
- * ----------------
+ /*
+ * ok, everything is setup.. let's go to work
*/
for (;;)
{
- /* ----------------
- * get the current state of the join and do things accordingly.
- * Note: The join states are highlighted with 32-* comments for
- * improved readability.
- * ----------------
+
+ /*
+ * get the current state of the join and do things accordingly.
+ * Note: The join states are highlighted with 32-* comments for
+ * improved readability.
*/
MJ_dump(mergestate);
@@ -553,10 +541,9 @@ ExecMergeJoin(MergeJoin *node)
return NULL;
}
- /* ----------------
- * OK, we have the initial tuples. Begin by skipping
- * unmatched inner tuples.
- * ----------------
+ /*
+ * OK, we have the initial tuples. Begin by skipping
+ * unmatched inner tuples.
*/
mergestate->mj_JoinState = EXEC_MJ_SKIPINNER_BEGIN;
break;
@@ -644,10 +631,11 @@ ExecMergeJoin(MergeJoin *node)
if (qualResult)
{
- /* ----------------
- * qualification succeeded. now form the desired
- * projection tuple and return the slot containing it.
- * ----------------
+
+ /*
+ * qualification succeeded. now form the desired
+ * projection tuple and return the slot containing
+ * it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -697,10 +685,11 @@ ExecMergeJoin(MergeJoin *node)
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification succeeded. now form the desired
- * projection tuple and return the slot containing it.
- * ----------------
+
+ /*
+ * qualification succeeded. now form the desired
+ * projection tuple and return the slot containing
+ * it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -719,9 +708,8 @@ ExecMergeJoin(MergeJoin *node)
}
}
- /* ----------------
- * now we get the next inner tuple, if any
- * ----------------
+ /*
+ * now we get the next inner tuple, if any
*/
innerTupleSlot = ExecProcNode(innerPlan, (Plan *) node);
mergestate->mj_InnerTupleSlot = innerTupleSlot;
@@ -775,10 +763,11 @@ ExecMergeJoin(MergeJoin *node)
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification succeeded. now form the desired
- * projection tuple and return the slot containing it.
- * ----------------
+
+ /*
+ * qualification succeeded. now form the desired
+ * projection tuple and return the slot containing
+ * it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -797,19 +786,17 @@ ExecMergeJoin(MergeJoin *node)
}
}
- /* ----------------
- * now we get the next outer tuple, if any
- * ----------------
+ /*
+ * now we get the next outer tuple, if any
*/
outerTupleSlot = ExecProcNode(outerPlan, (Plan *) node);
mergestate->mj_OuterTupleSlot = outerTupleSlot;
MJ_DEBUG_PROC_NODE(outerTupleSlot);
mergestate->mj_MatchedOuter = false;
- /* ----------------
- * if the outer tuple is null then we are done with the
- * join, unless we have inner tuples we need to null-fill.
- * ----------------
+ /*
+ * if the outer tuple is null then we are done with the
+ * join, unless we have inner tuples we need to null-fill.
*/
if (TupIsNull(outerTupleSlot))
{
@@ -869,9 +856,9 @@ ExecMergeJoin(MergeJoin *node)
case EXEC_MJ_TESTOUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_TESTOUTER\n");
- /* ----------------
- * here we compare the outer tuple with the marked inner tuple
- * ----------------
+ /*
+ * here we compare the outer tuple with the marked inner
+ * tuple
*/
ResetExprContext(econtext);
@@ -967,11 +954,10 @@ ExecMergeJoin(MergeJoin *node)
case EXEC_MJ_SKIPOUTER_BEGIN:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPOUTER_BEGIN\n");
- /* ----------------
- * before we advance, make sure the current tuples
- * do not satisfy the mergeclauses. If they do, then
- * we update the marked tuple and go join them.
- * ----------------
+ /*
+ * before we advance, make sure the current tuples do not
+ * satisfy the mergeclauses. If they do, then we update
+ * the marked tuple and go join them.
*/
ResetExprContext(econtext);
@@ -999,9 +985,8 @@ ExecMergeJoin(MergeJoin *node)
case EXEC_MJ_SKIPOUTER_TEST:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPOUTER_TEST\n");
- /* ----------------
- * ok, now test the skip qualification
- * ----------------
+ /*
+ * ok, now test the skip qualification
*/
outerTupleSlot = mergestate->mj_OuterTupleSlot;
econtext->ecxt_outertuple = outerTupleSlot;
@@ -1014,10 +999,9 @@ ExecMergeJoin(MergeJoin *node)
MJ_DEBUG_MERGE_COMPARE(outerSkipQual, compareResult);
- /* ----------------
- * compareResult is true as long as we should
- * continue skipping outer tuples.
- * ----------------
+ /*
+ * compareResult is true as long as we should continue
+ * skipping outer tuples.
*/
if (compareResult)
{
@@ -1025,12 +1009,10 @@ ExecMergeJoin(MergeJoin *node)
break;
}
- /* ----------------
- * now check the inner skip qual to see if we
- * should now skip inner tuples... if we fail the
- * inner skip qual, then we know we have a new pair
- * of matching tuples.
- * ----------------
+ /*
+ * now check the inner skip qual to see if we should now
+ * skip inner tuples... if we fail the inner skip qual,
+ * then we know we have a new pair of matching tuples.
*/
compareResult = MergeCompare(mergeclauses,
innerSkipQual,
@@ -1044,10 +1026,9 @@ ExecMergeJoin(MergeJoin *node)
mergestate->mj_JoinState = EXEC_MJ_JOINMARK;
break;
- /*------------------------------------------------
+ /*
* Before advancing, we check to see if we must emit an
* outer-join fill tuple for this outer tuple.
- *------------------------------------------------
*/
case EXEC_MJ_SKIPOUTER_ADVANCE:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPOUTER_ADVANCE\n");
@@ -1071,10 +1052,11 @@ ExecMergeJoin(MergeJoin *node)
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification succeeded. now form the desired
- * projection tuple and return the slot containing it.
- * ----------------
+
+ /*
+ * qualification succeeded. now form the desired
+ * projection tuple and return the slot containing
+ * it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -1093,19 +1075,17 @@ ExecMergeJoin(MergeJoin *node)
}
}
- /* ----------------
- * now we get the next outer tuple, if any
- * ----------------
+ /*
+ * now we get the next outer tuple, if any
*/
outerTupleSlot = ExecProcNode(outerPlan, (Plan *) node);
mergestate->mj_OuterTupleSlot = outerTupleSlot;
MJ_DEBUG_PROC_NODE(outerTupleSlot);
mergestate->mj_MatchedOuter = false;
- /* ----------------
- * if the outer tuple is null then we are done with the
- * join, unless we have inner tuples we need to null-fill.
- * ----------------
+ /*
+ * if the outer tuple is null then we are done with the
+ * join, unless we have inner tuples we need to null-fill.
*/
if (TupIsNull(outerTupleSlot))
{
@@ -1125,9 +1105,8 @@ ExecMergeJoin(MergeJoin *node)
return NULL;
}
- /* ----------------
- * otherwise test the new tuple against the skip qual.
- * ----------------
+ /*
+ * otherwise test the new tuple against the skip qual.
*/
mergestate->mj_JoinState = EXEC_MJ_SKIPOUTER_TEST;
break;
@@ -1155,11 +1134,10 @@ ExecMergeJoin(MergeJoin *node)
case EXEC_MJ_SKIPINNER_BEGIN:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPINNER_BEGIN\n");
- /* ----------------
- * before we advance, make sure the current tuples
- * do not satisfy the mergeclauses. If they do, then
- * we update the marked tuple and go join them.
- * ----------------
+ /*
+ * before we advance, make sure the current tuples do not
+ * satisfy the mergeclauses. If they do, then we update
+ * the marked tuple and go join them.
*/
ResetExprContext(econtext);
@@ -1187,9 +1165,8 @@ ExecMergeJoin(MergeJoin *node)
case EXEC_MJ_SKIPINNER_TEST:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPINNER_TEST\n");
- /* ----------------
- * ok, now test the skip qualification
- * ----------------
+ /*
+ * ok, now test the skip qualification
*/
outerTupleSlot = mergestate->mj_OuterTupleSlot;
econtext->ecxt_outertuple = outerTupleSlot;
@@ -1202,10 +1179,9 @@ ExecMergeJoin(MergeJoin *node)
MJ_DEBUG_MERGE_COMPARE(innerSkipQual, compareResult);
- /* ----------------
- * compareResult is true as long as we should
- * continue skipping inner tuples.
- * ----------------
+ /*
+ * compareResult is true as long as we should continue
+ * skipping inner tuples.
*/
if (compareResult)
{
@@ -1213,12 +1189,10 @@ ExecMergeJoin(MergeJoin *node)
break;
}
- /* ----------------
- * now check the outer skip qual to see if we
- * should now skip outer tuples... if we fail the
- * outer skip qual, then we know we have a new pair
- * of matching tuples.
- * ----------------
+ /*
+ * now check the outer skip qual to see if we should now
+ * skip outer tuples... if we fail the outer skip qual,
+ * then we know we have a new pair of matching tuples.
*/
compareResult = MergeCompare(mergeclauses,
outerSkipQual,
@@ -1232,10 +1206,9 @@ ExecMergeJoin(MergeJoin *node)
mergestate->mj_JoinState = EXEC_MJ_JOINMARK;
break;
- /*------------------------------------------------
+ /*
* Before advancing, we check to see if we must emit an
* outer-join fill tuple for this inner tuple.
- *------------------------------------------------
*/
case EXEC_MJ_SKIPINNER_ADVANCE:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPINNER_ADVANCE\n");
@@ -1259,10 +1232,11 @@ ExecMergeJoin(MergeJoin *node)
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification succeeded. now form the desired
- * projection tuple and return the slot containing it.
- * ----------------
+
+ /*
+ * qualification succeeded. now form the desired
+ * projection tuple and return the slot containing
+ * it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -1281,19 +1255,17 @@ ExecMergeJoin(MergeJoin *node)
}
}
- /* ----------------
- * now we get the next inner tuple, if any
- * ----------------
+ /*
+ * now we get the next inner tuple, if any
*/
innerTupleSlot = ExecProcNode(innerPlan, (Plan *) node);
mergestate->mj_InnerTupleSlot = innerTupleSlot;
MJ_DEBUG_PROC_NODE(innerTupleSlot);
mergestate->mj_MatchedInner = false;
- /* ----------------
- * if the inner tuple is null then we are done with the
- * join, unless we have outer tuples we need to null-fill.
- * ----------------
+ /*
+ * if the inner tuple is null then we are done with the
+ * join, unless we have outer tuples we need to null-fill.
*/
if (TupIsNull(innerTupleSlot))
{
@@ -1313,9 +1285,8 @@ ExecMergeJoin(MergeJoin *node)
return NULL;
}
- /* ----------------
- * otherwise test the new tuple against the skip qual.
- * ----------------
+ /*
+ * otherwise test the new tuple against the skip qual.
*/
mergestate->mj_JoinState = EXEC_MJ_SKIPINNER_TEST;
break;
@@ -1349,10 +1320,11 @@ ExecMergeJoin(MergeJoin *node)
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification succeeded. now form the desired
- * projection tuple and return the slot containing it.
- * ----------------
+
+ /*
+ * qualification succeeded. now form the desired
+ * projection tuple and return the slot containing
+ * it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -1371,9 +1343,8 @@ ExecMergeJoin(MergeJoin *node)
}
}
- /* ----------------
- * now we get the next inner tuple, if any
- * ----------------
+ /*
+ * now we get the next inner tuple, if any
*/
innerTupleSlot = ExecProcNode(innerPlan, (Plan *) node);
mergestate->mj_InnerTupleSlot = innerTupleSlot;
@@ -1418,10 +1389,11 @@ ExecMergeJoin(MergeJoin *node)
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification succeeded. now form the desired
- * projection tuple and return the slot containing it.
- * ----------------
+
+ /*
+ * qualification succeeded. now form the desired
+ * projection tuple and return the slot containing
+ * it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -1440,9 +1412,8 @@ ExecMergeJoin(MergeJoin *node)
}
}
- /* ----------------
- * now we get the next outer tuple, if any
- * ----------------
+ /*
+ * now we get the next outer tuple, if any
*/
outerTupleSlot = ExecProcNode(outerPlan, (Plan *) node);
mergestate->mj_OuterTupleSlot = outerTupleSlot;
@@ -1487,39 +1458,35 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, Plan *parent)
MJ1_printf("ExecInitMergeJoin: %s\n",
"initializing node");
- /* ----------------
- * assign the node's execution state and
- * get the range table and direction from it
- * ----------------
+ /*
+ * assign the node's execution state and get the range table and
+ * direction from it
*/
node->join.plan.state = estate;
- /* ----------------
- * create new merge state for node
- * ----------------
+ /*
+ * create new merge state for node
*/
mergestate = makeNode(MergeJoinState);
node->mergestate = mergestate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &mergestate->jstate);
- /* ----------------
- * initialize subplans
- * ----------------
+ /*
+ * initialize subplans
*/
ExecInitNode(outerPlan((Plan *) node), estate, (Plan *) node);
ExecInitNode(innerPlan((Plan *) node), estate, (Plan *) node);
#define MERGEJOIN_NSLOTS 4
- /* ----------------
- * tuple table initialization
- * ----------------
+
+ /*
+ * tuple table initialization
*/
ExecInitResultTupleSlot(estate, &mergestate->jstate);
@@ -1569,16 +1536,14 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, Plan *parent)
(int) node->join.jointype);
}
- /* ----------------
- * initialize tuple type and projection info
- * ----------------
+ /*
+ * initialize tuple type and projection info
*/
ExecAssignResultTypeFromTL((Plan *) node, &mergestate->jstate);
ExecAssignProjectionInfo((Plan *) node, &mergestate->jstate);
- /* ----------------
- * form merge skip qualifications
- * ----------------
+ /*
+ * form merge skip qualifications
*/
joinclauses = node->mergeclauses;
mergestate->mj_OuterSkipQual = MJFormSkipQual(joinclauses, "<");
@@ -1590,9 +1555,8 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, Plan *parent)
MJ_nodeDisplay(mergestate->mj_InnerSkipQual);
MJ_printf("\n");
- /* ----------------
- * initialize join state
- * ----------------
+ /*
+ * initialize join state
*/
mergestate->mj_JoinState = EXEC_MJ_INITIALIZE;
mergestate->jstate.cs_TupFromTlist = false;
@@ -1601,9 +1565,8 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, Plan *parent)
mergestate->mj_OuterTupleSlot = NULL;
mergestate->mj_InnerTupleSlot = NULL;
- /* ----------------
- * initialization successful
- * ----------------
+ /*
+ * initialization successful
*/
MJ1_printf("ExecInitMergeJoin: %s\n",
"node initialized");
@@ -1634,34 +1597,29 @@ ExecEndMergeJoin(MergeJoin *node)
MJ1_printf("ExecEndMergeJoin: %s\n",
"ending node processing");
- /* ----------------
- * get state information from the node
- * ----------------
+ /*
+ * get state information from the node
*/
mergestate = node->mergestate;
- /* ----------------
- * Free the projection info and the scan attribute info
+ /*
+ * Free the projection info and the scan attribute info
*
- * Note: we don't ExecFreeResultType(mergestate)
- * because the rule manager depends on the tupType
- * returned by ExecMain(). So for now, this
- * is freed at end-transaction time. -cim 6/2/91
- * ----------------
+ * Note: we don't ExecFreeResultType(mergestate) because the rule manager
+ * depends on the tupType returned by ExecMain(). So for now, this is
+ * freed at end-transaction time. -cim 6/2/91
*/
ExecFreeProjectionInfo(&mergestate->jstate);
ExecFreeExprContext(&mergestate->jstate);
- /* ----------------
- * shut down the subplans
- * ----------------
+ /*
+ * shut down the subplans
*/
ExecEndNode((Plan *) innerPlan((Plan *) node), (Plan *) node);
ExecEndNode((Plan *) outerPlan((Plan *) node), (Plan *) node);
- /* ----------------
- * clean out the tuple table
- * ----------------
+ /*
+ * clean out the tuple table
*/
ExecClearTuple(mergestate->jstate.cs_ResultTupleSlot);
ExecClearTuple(mergestate->mj_MarkedTupleSlot);
diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c
index 9c01ee4a1fb..b6959038870 100644
--- a/src/backend/executor/nodeNestloop.c
+++ b/src/backend/executor/nodeNestloop.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.23 2001/03/22 03:59:29 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.24 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,9 +68,8 @@ ExecNestLoop(NestLoop *node)
List *otherqual;
ExprContext *econtext;
- /* ----------------
- * get information from the node
- * ----------------
+ /*
+ * get information from the node
*/
ENL1_printf("getting info from node");
@@ -81,18 +80,16 @@ ExecNestLoop(NestLoop *node)
innerPlan = innerPlan((Plan *) node);
econtext = nlstate->jstate.cs_ExprContext;
- /* ----------------
+ /*
* get the current outer tuple
- * ----------------
*/
outerTupleSlot = nlstate->jstate.cs_OuterTupleSlot;
econtext->ecxt_outertuple = outerTupleSlot;
- /* ----------------
- * Check to see if we're still projecting out tuples from a previous
- * join tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
- * ----------------
+ /*
+ * Check to see if we're still projecting out tuples from a previous
+ * join tuple (because there is a function-returning-set in the
+ * projection expressions). If so, try to project another one.
*/
if (nlstate->jstate.cs_TupFromTlist)
{
@@ -106,37 +103,34 @@ ExecNestLoop(NestLoop *node)
nlstate->jstate.cs_TupFromTlist = false;
}
- /* ----------------
- * Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a join tuple.
- * ----------------
+ /*
+ * Reset per-tuple memory context to free any expression evaluation
+ * storage allocated in the previous tuple cycle. Note this can't
+ * happen until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
- /* ----------------
- * Ok, everything is setup for the join so now loop until
- * we return a qualifying join tuple.
- * ----------------
+ /*
+ * Ok, everything is setup for the join so now loop until we return a
+ * qualifying join tuple.
*/
ENL1_printf("entering main loop");
for (;;)
{
- /* ----------------
- * If we don't have an outer tuple, get the next one and
- * reset the inner scan.
- * ----------------
+
+ /*
+ * If we don't have an outer tuple, get the next one and reset the
+ * inner scan.
*/
if (nlstate->nl_NeedNewOuter)
{
ENL1_printf("getting new outer tuple");
outerTupleSlot = ExecProcNode(outerPlan, (Plan *) node);
- /* ----------------
- * if there are no more outer tuples, then the join
- * is complete..
- * ----------------
+ /*
+ * if there are no more outer tuples, then the join is
+ * complete..
*/
if (TupIsNull(outerTupleSlot))
{
@@ -150,9 +144,8 @@ ExecNestLoop(NestLoop *node)
nlstate->nl_NeedNewOuter = false;
nlstate->nl_MatchedOuter = false;
- /* ----------------
- * now rescan the inner plan
- * ----------------
+ /*
+ * now rescan the inner plan
*/
ENL1_printf("rescanning inner plan");
@@ -164,9 +157,8 @@ ExecNestLoop(NestLoop *node)
ExecReScan(innerPlan, econtext, (Plan *) node);
}
- /* ----------------
- * we have an outerTuple, try to get the next inner tuple.
- * ----------------
+ /*
+ * we have an outerTuple, try to get the next inner tuple.
*/
ENL1_printf("getting new inner tuple");
@@ -195,11 +187,11 @@ ExecNestLoop(NestLoop *node)
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification was satisfied so we project and
- * return the slot containing the result tuple
- * using ExecProject().
- * ----------------
+
+ /*
+ * qualification was satisfied so we project and
+ * return the slot containing the result tuple using
+ * ExecProject().
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -223,14 +215,13 @@ ExecNestLoop(NestLoop *node)
continue;
}
- /* ----------------
- * at this point we have a new pair of inner and outer
- * tuples so we test the inner and outer tuples to see
- * if they satisfy the node's qualification.
+ /*
+ * at this point we have a new pair of inner and outer tuples so
+ * we test the inner and outer tuples to see if they satisfy the
+ * node's qualification.
*
- * Only the joinquals determine MatchedOuter status,
- * but all quals must pass to actually return the tuple.
- * ----------------
+ * Only the joinquals determine MatchedOuter status, but all quals
+ * must pass to actually return the tuple.
*/
ENL1_printf("testing qualification");
@@ -240,11 +231,11 @@ ExecNestLoop(NestLoop *node)
if (otherqual == NIL || ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification was satisfied so we project and
- * return the slot containing the result tuple
- * using ExecProject().
- * ----------------
+
+ /*
+ * qualification was satisfied so we project and return
+ * the slot containing the result tuple using
+ * ExecProject().
*/
TupleTableSlot *result;
ExprDoneCond isDone;
@@ -262,9 +253,8 @@ ExecNestLoop(NestLoop *node)
}
}
- /* ----------------
- * Tuple fails qual, so free per-tuple memory and try again.
- * ----------------
+ /*
+ * Tuple fails qual, so free per-tuple memory and try again.
*/
ResetExprContext(econtext);
@@ -288,38 +278,34 @@ ExecInitNestLoop(NestLoop *node, EState *estate, Plan *parent)
NL1_printf("ExecInitNestLoop: %s\n",
"initializing node");
- /* ----------------
- * assign execution state to node
- * ----------------
+ /*
+ * assign execution state to node
*/
node->join.plan.state = estate;
- /* ----------------
- * create new nest loop state
- * ----------------
+ /*
+ * create new nest loop state
*/
nlstate = makeNode(NestLoopState);
node->nlstate = nlstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &nlstate->jstate);
- /* ----------------
- * now initialize children
- * ----------------
+ /*
+ * now initialize children
*/
ExecInitNode(outerPlan((Plan *) node), estate, (Plan *) node);
ExecInitNode(innerPlan((Plan *) node), estate, (Plan *) node);
#define NESTLOOP_NSLOTS 2
- /* ----------------
- * tuple table initialization
- * ----------------
+
+ /*
+ * tuple table initialization
*/
ExecInitResultTupleSlot(estate, &nlstate->jstate);
@@ -337,16 +323,14 @@ ExecInitNestLoop(NestLoop *node, EState *estate, Plan *parent)
(int) node->join.jointype);
}
- /* ----------------
- * initialize tuple type and projection info
- * ----------------
+ /*
+ * initialize tuple type and projection info
*/
ExecAssignResultTypeFromTL((Plan *) node, &nlstate->jstate);
ExecAssignProjectionInfo((Plan *) node, &nlstate->jstate);
- /* ----------------
- * finally, wipe the current outer tuple clean.
- * ----------------
+ /*
+ * finally, wipe the current outer tuple clean.
*/
nlstate->jstate.cs_OuterTupleSlot = NULL;
nlstate->jstate.cs_TupFromTlist = false;
@@ -380,34 +364,29 @@ ExecEndNestLoop(NestLoop *node)
NL1_printf("ExecEndNestLoop: %s\n",
"ending node processing");
- /* ----------------
- * get info from the node
- * ----------------
+ /*
+ * get info from the node
*/
nlstate = node->nlstate;
- /* ----------------
- * Free the projection info
+ /*
+ * Free the projection info
*
- * Note: we don't ExecFreeResultType(nlstate)
- * because the rule manager depends on the tupType
- * returned by ExecMain(). So for now, this
- * is freed at end-transaction time. -cim 6/2/91
- * ----------------
+ * Note: we don't ExecFreeResultType(nlstate) because the rule manager
+ * depends on the tupType returned by ExecMain(). So for now, this is
+ * freed at end-transaction time. -cim 6/2/91
*/
ExecFreeProjectionInfo(&nlstate->jstate);
ExecFreeExprContext(&nlstate->jstate);
- /* ----------------
- * close down subplans
- * ----------------
+ /*
+ * close down subplans
*/
ExecEndNode(outerPlan((Plan *) node), (Plan *) node);
ExecEndNode(innerPlan((Plan *) node), (Plan *) node);
- /* ----------------
- * clean out the tuple table
- * ----------------
+ /*
+ * clean out the tuple table
*/
ExecClearTuple(nlstate->jstate.cs_ResultTupleSlot);
diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c
index 863d4a4a56e..f9f325f637b 100644
--- a/src/backend/executor/nodeResult.c
+++ b/src/backend/executor/nodeResult.c
@@ -34,7 +34,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.18 2001/03/22 03:59:29 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.19 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -69,16 +69,14 @@ ExecResult(Result *node)
ExprContext *econtext;
ExprDoneCond isDone;
- /* ----------------
- * initialize the result node's state
- * ----------------
+ /*
+ * initialize the result node's state
*/
resstate = node->resstate;
econtext = resstate->cstate.cs_ExprContext;
- /* ----------------
- * check constant qualifications like (2 > 1), if not already done
- * ----------------
+ /*
+ * check constant qualifications like (2 > 1), if not already done
*/
if (resstate->rs_checkqual)
{
@@ -94,11 +92,10 @@ ExecResult(Result *node)
}
}
- /* ----------------
- * Check to see if we're still projecting out tuples from a previous
- * scan tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
- * ----------------
+ /*
+ * Check to see if we're still projecting out tuples from a previous
+ * scan tuple (because there is a function-returning-set in the
+ * projection expressions). If so, try to project another one.
*/
if (resstate->cstate.cs_TupFromTlist)
{
@@ -109,20 +106,18 @@ ExecResult(Result *node)
resstate->cstate.cs_TupFromTlist = false;
}
- /* ----------------
- * Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a scan tuple.
- * ----------------
+ /*
+ * Reset per-tuple memory context to free any expression evaluation
+ * storage allocated in the previous tuple cycle. Note this can't
+ * happen until we're done projecting out tuples from a scan tuple.
*/
ResetExprContext(econtext);
- /* ----------------
- * if rs_done is true then it means that we were asked to return
- * a constant tuple and we already did the last time ExecResult()
- * was called, OR that we failed the constant qual check.
- * Either way, now we are through.
- * ----------------
+ /*
+ * if rs_done is true then it means that we were asked to return a
+ * constant tuple and we already did the last time ExecResult() was
+ * called, OR that we failed the constant qual check. Either way, now
+ * we are through.
*/
while (!resstate->rs_done)
{
@@ -130,9 +125,10 @@ ExecResult(Result *node)
if (outerPlan != NULL)
{
- /* ----------------
- * retrieve tuples from the outer plan until there are no more.
- * ----------------
+
+ /*
+ * retrieve tuples from the outer plan until there are no
+ * more.
*/
outerTupleSlot = ExecProcNode(outerPlan, (Plan *) node);
@@ -141,28 +137,27 @@ ExecResult(Result *node)
resstate->cstate.cs_OuterTupleSlot = outerTupleSlot;
- /* ----------------
- * XXX gross hack. use outer tuple as scan tuple for projection
- * ----------------
+ /*
+ * XXX gross hack. use outer tuple as scan tuple for
+ * projection
*/
econtext->ecxt_outertuple = outerTupleSlot;
econtext->ecxt_scantuple = outerTupleSlot;
}
else
{
- /* ----------------
- * if we don't have an outer plan, then we are just generating
- * the results from a constant target list. Do it only once.
- * ----------------
+
+ /*
+ * if we don't have an outer plan, then we are just generating
+ * the results from a constant target list. Do it only once.
*/
resstate->rs_done = true;
}
- /* ----------------
- * form the result tuple using ExecProject(), and return it
- * --- unless the projection produces an empty set, in which case
- * we must loop back to see if there are more outerPlan tuples.
- * ----------------
+ /*
+ * form the result tuple using ExecProject(), and return it ---
+ * unless the projection produces an empty set, in which case we
+ * must loop back to see if there are more outerPlan tuples.
*/
resultSlot = ExecProject(resstate->cstate.cs_ProjInfo, &isDone);
@@ -189,39 +184,35 @@ ExecInitResult(Result *node, EState *estate, Plan *parent)
{
ResultState *resstate;
- /* ----------------
- * assign execution state to node
- * ----------------
+ /*
+ * assign execution state to node
*/
node->plan.state = estate;
- /* ----------------
- * create new ResultState for node
- * ----------------
+ /*
+ * create new ResultState for node
*/
resstate = makeNode(ResultState);
resstate->rs_done = false;
resstate->rs_checkqual = (node->resconstantqual == NULL) ? false : true;
node->resstate = resstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &resstate->cstate);
#define RESULT_NSLOTS 1
- /* ----------------
- * tuple table initialization
- * ----------------
+
+ /*
+ * tuple table initialization
*/
ExecInitResultTupleSlot(estate, &resstate->cstate);
- /* ----------------
- * then initialize children
- * ----------------
+ /*
+ * then initialize children
*/
ExecInitNode(outerPlan(node), estate, (Plan *) node);
@@ -230,9 +221,8 @@ ExecInitResult(Result *node, EState *estate, Plan *parent)
*/
Assert(innerPlan(node) == NULL);
- /* ----------------
- * initialize tuple type and projection info
- * ----------------
+ /*
+ * initialize tuple type and projection info
*/
ExecAssignResultTypeFromTL((Plan *) node, &resstate->cstate);
ExecAssignProjectionInfo((Plan *) node, &resstate->cstate);
@@ -259,27 +249,23 @@ ExecEndResult(Result *node)
resstate = node->resstate;
- /* ----------------
- * Free the projection info
+ /*
+ * Free the projection info
*
- * Note: we don't ExecFreeResultType(resstate)
- * because the rule manager depends on the tupType
- * returned by ExecMain(). So for now, this
- * is freed at end-transaction time. -cim 6/2/91
- * ----------------
+ * Note: we don't ExecFreeResultType(resstate) because the rule manager
+ * depends on the tupType returned by ExecMain(). So for now, this is
+ * freed at end-transaction time. -cim 6/2/91
*/
ExecFreeProjectionInfo(&resstate->cstate);
ExecFreeExprContext(&resstate->cstate);
- /* ----------------
- * shut down subplans
- * ----------------
+ /*
+ * shut down subplans
*/
ExecEndNode(outerPlan(node), (Plan *) node);
- /* ----------------
- * clean out the tuple table
- * ----------------
+ /*
+ * clean out the tuple table
*/
ExecClearTuple(resstate->cstate.cs_ResultTupleSlot);
pfree(resstate);
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index d879cee7a75..4e22845a11f 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.28 2001/03/22 03:59:29 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.29 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -54,9 +54,8 @@ SeqNext(SeqScan *node)
ScanDirection direction;
TupleTableSlot *slot;
- /* ----------------
- * get information from the estate and scan state
- * ----------------
+ /*
+ * get information from the estate and scan state
*/
estate = node->plan.state;
scanstate = node->scanstate;
@@ -91,21 +90,19 @@ SeqNext(SeqScan *node)
return (slot);
}
- /* ----------------
- * get the next tuple from the access methods
- * ----------------
+ /*
+ * get the next tuple from the access methods
*/
tuple = heap_getnext(scandesc, ScanDirectionIsBackward(direction));
- /* ----------------
- * save the tuple and the buffer returned to us by the access methods
- * in our scan tuple slot and return the slot. Note: we pass 'false'
- * because tuples returned by heap_getnext() are pointers onto
- * disk pages and were not created with palloc() and so should not
- * be pfree()'d. Note also that ExecStoreTuple will increment the
- * refcount of the buffer; the refcount will not be dropped until
- * the tuple table slot is cleared.
- * ----------------
+ /*
+ * save the tuple and the buffer returned to us by the access methods
+ * in our scan tuple slot and return the slot. Note: we pass 'false'
+ * because tuples returned by heap_getnext() are pointers onto disk
+ * pages and were not created with palloc() and so should not be
+ * pfree()'d. Note also that ExecStoreTuple will increment the
+ * refcount of the buffer; the refcount will not be dropped until the
+ * tuple table slot is cleared.
*/
slot = ExecStoreTuple(tuple,/* tuple to store */
@@ -130,9 +127,9 @@ SeqNext(SeqScan *node)
TupleTableSlot *
ExecSeqScan(SeqScan *node)
{
- /* ----------------
- * use SeqNext as access method
- * ----------------
+
+ /*
+ * use SeqNext as access method
*/
return ExecScan(node, (ExecScanAccessMtd) SeqNext);
}
@@ -156,11 +153,9 @@ InitScanRelation(SeqScan *node, EState *estate,
Relation currentRelation;
HeapScanDesc currentScanDesc;
- /* ----------------
- * get the relation object id from the relid'th entry
- * in the range table, open that relation and initialize
- * the scan state...
- * ----------------
+ /*
+ * get the relation object id from the relid'th entry in the range
+ * table, open that relation and initialize the scan state...
*/
relid = node->scanrelid;
rangeTable = estate->es_range_table;
@@ -197,55 +192,49 @@ ExecInitSeqScan(SeqScan *node, EState *estate, Plan *parent)
Oid reloid;
HeapScanDesc scandesc;
- /* ----------------
- * Once upon a time it was possible to have an outerPlan of a SeqScan,
- * but not any more.
- * ----------------
+ /*
+ * Once upon a time it was possible to have an outerPlan of a SeqScan,
+ * but not any more.
*/
Assert(outerPlan((Plan *) node) == NULL);
Assert(innerPlan((Plan *) node) == NULL);
- /* ----------------
- * assign the node's execution state
- * ----------------
+ /*
+ * assign the node's execution state
*/
node->plan.state = estate;
- /* ----------------
- * create new CommonScanState for node
- * ----------------
+ /*
+ * create new CommonScanState for node
*/
scanstate = makeNode(CommonScanState);
node->scanstate = scanstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &scanstate->cstate);
#define SEQSCAN_NSLOTS 3
- /* ----------------
- * tuple table initialization
- * ----------------
+
+ /*
+ * tuple table initialization
*/
ExecInitResultTupleSlot(estate, &scanstate->cstate);
ExecInitScanTupleSlot(estate, scanstate);
- /* ----------------
- * initialize scan relation
- * ----------------
+ /*
+ * initialize scan relation
*/
reloid = InitScanRelation(node, estate, scanstate);
scandesc = scanstate->css_currentScanDesc;
scanstate->cstate.cs_TupFromTlist = false;
- /* ----------------
- * initialize tuple type
- * ----------------
+ /*
+ * initialize tuple type
*/
ExecAssignResultTypeFromTL((Plan *) node, &scanstate->cstate);
ExecAssignProjectionInfo((Plan *) node, &scanstate->cstate);
@@ -272,33 +261,28 @@ ExecEndSeqScan(SeqScan *node)
{
CommonScanState *scanstate;
- /* ----------------
- * get information from node
- * ----------------
+ /*
+ * get information from node
*/
scanstate = node->scanstate;
- /* ----------------
- * Free the projection info and the scan attribute info
+ /*
+ * Free the projection info and the scan attribute info
*
- * Note: we don't ExecFreeResultType(scanstate)
- * because the rule manager depends on the tupType
- * returned by ExecMain(). So for now, this
- * is freed at end-transaction time. -cim 6/2/91
- * ----------------
+ * Note: we don't ExecFreeResultType(scanstate) because the rule manager
+ * depends on the tupType returned by ExecMain(). So for now, this is
+ * freed at end-transaction time. -cim 6/2/91
*/
ExecFreeProjectionInfo(&scanstate->cstate);
ExecFreeExprContext(&scanstate->cstate);
- /* ----------------
+ /*
* close scan relation
- * ----------------
*/
ExecCloseR((Plan *) node);
- /* ----------------
- * clean out the tuple table
- * ----------------
+ /*
+ * clean out the tuple table
*/
ExecClearTuple(scanstate->cstate.cs_ResultTupleSlot);
ExecClearTuple(scanstate->css_ScanTupleSlot);
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 00c79992039..3072fe5f962 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSetOp.c,v 1.3 2001/03/22 03:59:29 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSetOp.c,v 1.4 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,19 +51,17 @@ ExecSetOp(SetOp *node)
Plan *outerPlan;
TupleDesc tupDesc;
- /* ----------------
- * get information from the node
- * ----------------
+ /*
+ * get information from the node
*/
setopstate = node->setopstate;
outerPlan = outerPlan((Plan *) node);
resultTupleSlot = setopstate->cstate.cs_ResultTupleSlot;
tupDesc = ExecGetResultType(&setopstate->cstate);
- /* ----------------
- * If the previously-returned tuple needs to be returned more than
- * once, keep returning it.
- * ----------------
+ /*
+ * If the previously-returned tuple needs to be returned more than
+ * once, keep returning it.
*/
if (setopstate->numOutput > 0)
{
@@ -74,23 +72,21 @@ ExecSetOp(SetOp *node)
/* Flag that we have no current tuple */
ExecClearTuple(resultTupleSlot);
- /* ----------------
- * Absorb groups of duplicate tuples, counting them, and
- * saving the first of each group as a possible return value.
- * At the end of each group, decide whether to return anything.
+ /*
+ * Absorb groups of duplicate tuples, counting them, and saving the
+ * first of each group as a possible return value. At the end of each
+ * group, decide whether to return anything.
*
- * We assume that the tuples arrive in sorted order
- * so we can detect duplicates easily.
- * ----------------
+ * We assume that the tuples arrive in sorted order so we can detect
+ * duplicates easily.
*/
for (;;)
{
TupleTableSlot *inputTupleSlot;
bool endOfGroup;
- /* ----------------
- * fetch a tuple from the outer subplan, unless we already did.
- * ----------------
+ /*
+ * fetch a tuple from the outer subplan, unless we already did.
*/
if (setopstate->cstate.cs_OuterTupleSlot == NULL &&
!setopstate->subplan_done)
@@ -235,15 +231,13 @@ ExecInitSetOp(SetOp *node, EState *estate, Plan *parent)
SetOpState *setopstate;
Plan *outerPlan;
- /* ----------------
- * assign execution state to node
- * ----------------
+ /*
+ * assign execution state to node
*/
node->plan.state = estate;
- /* ----------------
- * create new SetOpState for node
- * ----------------
+ /*
+ * create new SetOpState for node
*/
setopstate = makeNode(SetOpState);
node->setopstate = setopstate;
@@ -251,13 +245,12 @@ ExecInitSetOp(SetOp *node, EState *estate, Plan *parent)
setopstate->subplan_done = false;
setopstate->numOutput = 0;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * SetOp nodes have no ExprContext initialization because
- * they never call ExecQual or ExecProject. But they do need a
- * per-tuple memory context anyway for calling execTuplesMatch.
- * ----------------
+ * SetOp nodes have no ExprContext initialization because they never call
+ * ExecQual or ExecProject. But they do need a per-tuple memory
+ * context anyway for calling execTuplesMatch.
*/
setopstate->tempContext =
AllocSetContextCreate(CurrentMemoryContext,
@@ -267,23 +260,21 @@ ExecInitSetOp(SetOp *node, EState *estate, Plan *parent)
ALLOCSET_DEFAULT_MAXSIZE);
#define SETOP_NSLOTS 1
- /* ------------
+
+ /*
* Tuple table initialization
- * ------------
*/
ExecInitResultTupleSlot(estate, &setopstate->cstate);
- /* ----------------
- * then initialize outer plan
- * ----------------
+ /*
+ * then initialize outer plan
*/
outerPlan = outerPlan((Plan *) node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * setop nodes do no projections, so initialize
- * projection info for this node appropriately
- * ----------------
+ /*
+ * setop nodes do no projections, so initialize projection info for
+ * this node appropriately
*/
ExecAssignResultTypeFromOuterPlan((Plan *) node, &setopstate->cstate);
setopstate->cstate.cs_ProjInfo = NULL;
diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c
index b8c057c3397..12c6f82a8b2 100644
--- a/src/backend/executor/nodeSort.c
+++ b/src/backend/executor/nodeSort.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSort.c,v 1.31 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSort.c,v 1.32 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,25 +39,22 @@ FormSortKeys(Sort *sortnode)
Index reskey;
Oid reskeyop;
- /* ----------------
- * get information from the node
- * ----------------
+ /*
+ * get information from the node
*/
targetList = sortnode->plan.targetlist;
keycount = sortnode->keycount;
- /* ----------------
- * first allocate space for scan keys
- * ----------------
+ /*
+ * first allocate space for scan keys
*/
if (keycount <= 0)
elog(ERROR, "FormSortKeys: keycount <= 0");
sortkeys = (ScanKey) palloc(keycount * sizeof(ScanKeyData));
MemSet((char *) sortkeys, 0, keycount * sizeof(ScanKeyData));
- /* ----------------
- * form each scan key from the resdom info in the target list
- * ----------------
+ /*
+ * form each scan key from the resdom info in the target list
*/
foreach(tl, targetList)
{
@@ -106,9 +103,8 @@ ExecSort(Sort *node)
TupleTableSlot *slot;
bool should_free;
- /* ----------------
- * get state info from node
- * ----------------
+ /*
+ * get state info from node
*/
SO1_printf("ExecSort: %s\n",
"entering routine");
@@ -118,11 +114,10 @@ ExecSort(Sort *node)
dir = estate->es_direction;
tuplesortstate = (Tuplesortstate *) sortstate->tuplesortstate;
- /* ----------------
- * If first time through, read all tuples from outer plan and
- * pass them to tuplesort.c.
- * Subsequent calls just fetch tuples from tuplesort.
- * ----------------
+ /*
+ * If first time through, read all tuples from outer plan and pass
+ * them to tuplesort.c. Subsequent calls just fetch tuples from
+ * tuplesort.
*/
if (!sortstate->sort_Done)
@@ -134,17 +129,16 @@ ExecSort(Sort *node)
SO1_printf("ExecSort: %s\n",
"sorting subplan");
- /* ----------------
- * Want to scan subplan in the forward direction while creating
- * the sorted data. (Does setting my direction actually affect
- * the subplan? I bet this is useless code...)
- * ----------------
+
+ /*
+ * Want to scan subplan in the forward direction while creating
+ * the sorted data. (Does setting my direction actually affect
+ * the subplan? I bet this is useless code...)
*/
estate->es_direction = ForwardScanDirection;
- /* ----------------
- * Initialize tuplesort module.
- * ----------------
+ /*
+ * Initialize tuplesort module.
*/
SO1_printf("ExecSort: %s\n",
"calling tuplesort_begin");
@@ -159,9 +153,8 @@ ExecSort(Sort *node)
sortstate->tuplesortstate = (void *) tuplesortstate;
- /* ----------------
- * Scan the subplan and feed all the tuples to tuplesort.
- * ----------------
+ /*
+ * Scan the subplan and feed all the tuples to tuplesort.
*/
for (;;)
@@ -174,27 +167,23 @@ ExecSort(Sort *node)
tuplesort_puttuple(tuplesortstate, (void *) slot->val);
}
- /* ----------------
- * Complete the sort.
- * ----------------
+ /*
+ * Complete the sort.
*/
tuplesort_performsort(tuplesortstate);
- /* ----------------
- * restore to user specified direction
- * ----------------
+ /*
+ * restore to user specified direction
*/
estate->es_direction = dir;
- /* ----------------
- * make sure the tuple descriptor is up to date (is this needed?)
- * ----------------
+ /*
+ * make sure the tuple descriptor is up to date (is this needed?)
*/
ExecAssignResultType(&sortstate->csstate.cstate, tupDesc, false);
- /* ----------------
- * finally set the sorted flag to true
- * ----------------
+ /*
+ * finally set the sorted flag to true
*/
sortstate->sort_Done = true;
SO1_printf(stderr, "ExecSort: sorting done.\n");
@@ -203,10 +192,9 @@ ExecSort(Sort *node)
SO1_printf("ExecSort: %s\n",
"retrieving tuple from tuplesort");
- /* ----------------
- * Get the first or next tuple from tuplesort.
- * Returns NULL if no more tuples.
- * ----------------
+ /*
+ * Get the first or next tuple from tuplesort. Returns NULL if no more
+ * tuples.
*/
heapTuple = tuplesort_getheaptuple(tuplesortstate,
ScanDirectionIsForward(dir),
@@ -232,15 +220,13 @@ ExecInitSort(Sort *node, EState *estate, Plan *parent)
SO1_printf("ExecInitSort: %s\n",
"initializing sort node");
- /* ----------------
- * assign the node's execution state
- * ----------------
+ /*
+ * assign the node's execution state
*/
node->plan.state = estate;
- /* ----------------
+ /*
* create state structure
- * ----------------
*/
sortstate = makeNode(SortState);
sortstate->sort_Done = false;
@@ -249,42 +235,37 @@ ExecInitSort(Sort *node, EState *estate, Plan *parent)
node->sortstate = sortstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * Sort nodes don't initialize their ExprContexts because
- * they never call ExecQual or ExecProject.
- * ----------------
+ * Sort nodes don't initialize their ExprContexts because they never call
+ * ExecQual or ExecProject.
*/
#define SORT_NSLOTS 1
- /* ----------------
- * tuple table initialization
+
+ /*
+ * tuple table initialization
*
- * sort nodes only return scan tuples from their sorted
- * relation.
- * ----------------
+ * sort nodes only return scan tuples from their sorted relation.
*/
ExecInitResultTupleSlot(estate, &sortstate->csstate.cstate);
ExecInitScanTupleSlot(estate, &sortstate->csstate);
- /* ----------------
+ /*
* initializes child nodes
- * ----------------
*/
outerPlan = outerPlan((Plan *) node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * initialize sortstate information
- * ----------------
+ /*
+ * initialize sortstate information
*/
sortstate->sort_Keys = FormSortKeys(node);
- /* ----------------
- * initialize tuple type. no need to initialize projection
- * info because this node doesn't do projections.
- * ----------------
+ /*
+ * initialize tuple type. no need to initialize projection info
+ * because this node doesn't do projections.
*/
ExecAssignResultTypeFromOuterPlan((Plan *) node, &sortstate->csstate.cstate);
ExecAssignScanTypeFromOuterPlan((Plan *) node, &sortstate->csstate);
@@ -314,31 +295,27 @@ ExecEndSort(Sort *node)
SortState *sortstate;
Plan *outerPlan;
- /* ----------------
- * get info from the sort state
- * ----------------
+ /*
+ * get info from the sort state
*/
SO1_printf("ExecEndSort: %s\n",
"shutting down sort node");
sortstate = node->sortstate;
- /* ----------------
- * shut down the subplan
- * ----------------
+ /*
+ * shut down the subplan
*/
outerPlan = outerPlan((Plan *) node);
ExecEndNode(outerPlan, (Plan *) node);
- /* ----------------
- * clean out the tuple table
- * ----------------
+ /*
+ * clean out the tuple table
*/
ExecClearTuple(sortstate->csstate.css_ScanTupleSlot);
- /* ----------------
- * Release tuplesort resources
- * ----------------
+ /*
+ * Release tuplesort resources
*/
if (sortstate->tuplesortstate != NULL)
tuplesort_end((Tuplesortstate *) sortstate->tuplesortstate);
@@ -365,9 +342,8 @@ ExecSortMarkPos(Sort *node)
{
SortState *sortstate = node->sortstate;
- /* ----------------
- * if we haven't sorted yet, just return
- * ----------------
+ /*
+ * if we haven't sorted yet, just return
*/
if (!sortstate->sort_Done)
return;
@@ -386,16 +362,14 @@ ExecSortRestrPos(Sort *node)
{
SortState *sortstate = node->sortstate;
- /* ----------------
- * if we haven't sorted yet, just return.
- * ----------------
+ /*
+ * if we haven't sorted yet, just return.
*/
if (!sortstate->sort_Done)
return;
- /* ----------------
- * restore the scan to the previously marked position
- * ----------------
+ /*
+ * restore the scan to the previously marked position
*/
tuplesort_restorepos((Tuplesortstate *) sortstate->tuplesortstate);
}
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index 9b8711c9914..54dec2d1eec 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.5 2001/03/22 03:59:29 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.6 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,9 +55,8 @@ SubqueryNext(SubqueryScan *node)
ScanDirection direction;
TupleTableSlot *slot;
- /* ----------------
- * get information from the estate and scan state
- * ----------------
+ /*
+ * get information from the estate and scan state
*/
estate = node->scan.plan.state;
subquerystate = (SubqueryScanState *) node->scan.scanstate;
@@ -85,9 +84,8 @@ SubqueryNext(SubqueryScan *node)
return (slot);
}
- /* ----------------
- * get the next tuple from the sub-query
- * ----------------
+ /*
+ * get the next tuple from the sub-query
*/
subquerystate->sss_SubEState->es_direction = direction;
@@ -111,9 +109,9 @@ SubqueryNext(SubqueryScan *node)
TupleTableSlot *
ExecSubqueryScan(SubqueryScan *node)
{
- /* ----------------
- * use SubqueryNext as access method
- * ----------------
+
+ /*
+ * use SubqueryNext as access method
*/
return ExecScan(&node->scan, (ExecScanAccessMtd) SubqueryNext);
}
@@ -129,46 +127,41 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, Plan *parent)
RangeTblEntry *rte;
EState *sp_estate;
- /* ----------------
- * SubqueryScan should not have any "normal" children.
- * ----------------
+ /*
+ * SubqueryScan should not have any "normal" children.
*/
Assert(outerPlan((Plan *) node) == NULL);
Assert(innerPlan((Plan *) node) == NULL);
- /* ----------------
- * assign the node's execution state
- * ----------------
+ /*
+ * assign the node's execution state
*/
node->scan.plan.state = estate;
- /* ----------------
- * create new SubqueryScanState for node
- * ----------------
+ /*
+ * create new SubqueryScanState for node
*/
subquerystate = makeNode(SubqueryScanState);
node->scan.scanstate = (CommonScanState *) subquerystate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &subquerystate->csstate.cstate);
#define SUBQUERYSCAN_NSLOTS 2
- /* ----------------
- * tuple table initialization
- * ----------------
+
+ /*
+ * tuple table initialization
*/
ExecInitResultTupleSlot(estate, &subquerystate->csstate.cstate);
- /* ----------------
- * initialize subquery
+ /*
+ * initialize subquery
*
- * This should agree with ExecInitSubPlan
- * ----------------
+ * This should agree with ExecInitSubPlan
*/
rte = rt_fetch(node->scan.scanrelid, estate->es_range_table);
Assert(rte->subquery != NULL);
@@ -189,9 +182,8 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, Plan *parent)
subquerystate->csstate.css_ScanTupleSlot = NULL;
subquerystate->csstate.cstate.cs_TupFromTlist = false;
- /* ----------------
- * initialize tuple type
- * ----------------
+ /*
+ * initialize tuple type
*/
ExecAssignResultTypeFromTL((Plan *) node, &subquerystate->csstate.cstate);
ExecAssignProjectionInfo((Plan *) node, &subquerystate->csstate.cstate);
@@ -222,27 +214,23 @@ ExecEndSubqueryScan(SubqueryScan *node)
{
SubqueryScanState *subquerystate;
- /* ----------------
- * get information from node
- * ----------------
+ /*
+ * get information from node
*/
subquerystate = (SubqueryScanState *) node->scan.scanstate;
- /* ----------------
- * Free the projection info and the scan attribute info
+ /*
+ * Free the projection info and the scan attribute info
*
- * Note: we don't ExecFreeResultType(subquerystate)
- * because the rule manager depends on the tupType
- * returned by ExecMain(). So for now, this
- * is freed at end-transaction time. -cim 6/2/91
- * ----------------
+ * Note: we don't ExecFreeResultType(subquerystate) because the rule
+ * manager depends on the tupType returned by ExecMain(). So for now,
+ * this is freed at end-transaction time. -cim 6/2/91
*/
ExecFreeProjectionInfo(&subquerystate->csstate.cstate);
ExecFreeExprContext(&subquerystate->csstate.cstate);
- /* ----------------
+ /*
* close down subquery
- * ----------------
*/
ExecEndNode(node->subplan, node->subplan);
@@ -250,9 +238,8 @@ ExecEndSubqueryScan(SubqueryScan *node)
subquerystate->csstate.css_ScanTupleSlot = NULL;
- /* ----------------
- * clean out the tuple table
- * ----------------
+ /*
+ * clean out the tuple table
*/
ExecClearTuple(subquerystate->csstate.cstate.cs_ResultTupleSlot);
}
diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c
index 04c9efc4b0a..01a26d59306 100644
--- a/src/backend/executor/nodeTidscan.c
+++ b/src/backend/executor/nodeTidscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.15 2001/03/22 03:59:29 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.16 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -83,9 +83,8 @@ TidNext(TidScan *node)
ItemPointer *tidList,
itemptr;
- /* ----------------
- * extract necessary information from tid scan node
- * ----------------
+ /*
+ * extract necessary information from tid scan node
*/
estate = node->scan.plan.state;
direction = estate->es_direction;
@@ -120,11 +119,10 @@ TidNext(TidScan *node)
tuple = &(tidstate->tss_htup);
- /* ----------------
- * ok, now that we have what we need, fetch an tid tuple.
- * if scanning this tid succeeded then return the
- * appropriate heap tuple.. else return NULL.
- * ----------------
+ /*
+ * ok, now that we have what we need, fetch an tid tuple. if scanning
+ * this tid succeeded then return the appropriate heap tuple.. else
+ * return NULL.
*/
bBackward = ScanDirectionIsBackward(direction);
if (bBackward)
@@ -161,13 +159,12 @@ TidNext(TidScan *node)
bool prev_matches = false;
int prev_tid;
- /* ----------------
- * store the scanned tuple in the scan tuple slot of
- * the scan state. Eventually we will only do this and not
- * return a tuple. Note: we pass 'false' because tuples
- * returned by amgetnext are pointers onto disk pages and
- * were not created with palloc() and so should not be pfree()'d.
- * ----------------
+ /*
+ * store the scanned tuple in the scan tuple slot of the scan
+ * state. Eventually we will only do this and not return a
+ * tuple. Note: we pass 'false' because tuples returned by
+ * amgetnext are pointers onto disk pages and were not created
+ * with palloc() and so should not be pfree()'d.
*/
ExecStoreTuple(tuple, /* tuple to store */
slot,/* slot to store in */
@@ -211,10 +208,10 @@ TidNext(TidScan *node)
if (slot_is_valid)
return slot;
}
- /* ----------------
- * if we get here it means the tid scan failed so we
- * are at the end of the scan..
- * ----------------
+
+ /*
+ * if we get here it means the tid scan failed so we are at the end of
+ * the scan..
*/
return ExecClearTuple(slot);
}
@@ -241,9 +238,9 @@ TidNext(TidScan *node)
TupleTableSlot *
ExecTidScan(TidScan *node)
{
- /* ----------------
- * use TidNext as access method
- * ----------------
+
+ /*
+ * use TidNext as access method
*/
return ExecScan(&node->scan, (ExecScanAccessMtd) TidNext);
}
@@ -281,9 +278,8 @@ ExecTidReScan(TidScan *node, ExprContext *exprCtxt, Plan *parent)
node->scan.scanstate->cstate.cs_ExprContext,
tidList);
- /* ----------------
- * perhaps return something meaningful
- * ----------------
+ /*
+ * perhaps return something meaningful
*/
return;
}
@@ -306,32 +302,27 @@ ExecEndTidScan(TidScan *node)
if (tidstate && tidstate->tss_TidList)
pfree(tidstate->tss_TidList);
- /* ----------------
- * extract information from the node
- * ----------------
+ /*
+ * extract information from the node
*/
- /* ----------------
- * Free the projection info and the scan attribute info
+ /*
+ * Free the projection info and the scan attribute info
*
- * Note: we don't ExecFreeResultType(scanstate)
- * because the rule manager depends on the tupType
- * returned by ExecMain(). So for now, this
- * is freed at end-transaction time. -cim 6/2/91
- * ----------------
+ * Note: we don't ExecFreeResultType(scanstate) because the rule manager
+ * depends on the tupType returned by ExecMain(). So for now, this is
+ * freed at end-transaction time. -cim 6/2/91
*/
ExecFreeProjectionInfo(&scanstate->cstate);
ExecFreeExprContext(&scanstate->cstate);
- /* ----------------
- * close the heap and tid relations
- * ----------------
+ /*
+ * close the heap and tid relations
*/
ExecCloseR((Plan *) node);
- /* ----------------
- * clear out tuple table slots
- * ----------------
+ /*
+ * clear out tuple table slots
*/
ExecClearTuple(scanstate->cstate.cs_ResultTupleSlot);
ExecClearTuple(scanstate->css_ScanTupleSlot);
@@ -400,56 +391,50 @@ ExecInitTidScan(TidScan *node, EState *estate, Plan *parent)
Relation currentRelation;
List *execParam = NIL;
- /* ----------------
- * assign execution state to node
- * ----------------
+ /*
+ * assign execution state to node
*/
node->scan.plan.state = estate;
- /* --------------------------------
- * Part 1) initialize scan state
+ /*
+ * Part 1) initialize scan state
*
- * create new CommonScanState for node
- * --------------------------------
+ * create new CommonScanState for node
*/
scanstate = makeNode(CommonScanState);
node->scan.scanstate = scanstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &scanstate->cstate);
#define TIDSCAN_NSLOTS 3
- /* ----------------
- * tuple table initialization
- * ----------------
+
+ /*
+ * tuple table initialization
*/
ExecInitResultTupleSlot(estate, &scanstate->cstate);
ExecInitScanTupleSlot(estate, scanstate);
- /* ----------------
- * initialize projection info. result type comes from scan desc
- * below..
- * ----------------
+ /*
+ * initialize projection info. result type comes from scan desc
+ * below..
*/
ExecAssignProjectionInfo((Plan *) node, &scanstate->cstate);
- /* --------------------------------
- * Part 2) initialize tid scan state
- *
- * create new TidScanState for node
- * --------------------------------
- */
+ /*
+ * Part 2) initialize tid scan state
+ *
+ * create new TidScanState for node
+ */
tidstate = makeNode(TidScanState);
node->tidstate = tidstate;
- /* ----------------
- * get the tid node information
- * ----------------
+ /*
+ * get the tid node information
*/
tidList = (ItemPointer *) palloc(length(node->tideval) * sizeof(ItemPointer));
numTids = 0;
@@ -463,17 +448,14 @@ ExecInitTidScan(TidScan *node, EState *estate, Plan *parent)
tidstate->tss_TidPtr = tidPtr;
tidstate->tss_TidList = tidList;
- /* ----------------
- * get the range table and direction information
- * from the execution state (these are needed to
- * open the relations).
- * ----------------
+ /*
+ * get the range table and direction information from the execution
+ * state (these are needed to open the relations).
*/
rangeTable = estate->es_range_table;
- /* ----------------
- * open the base relation
- * ----------------
+ /*
+ * open the base relation
*/
relid = node->scan.scanrelid;
rtentry = rt_fetch(relid, rangeTable);
@@ -483,9 +465,8 @@ ExecInitTidScan(TidScan *node, EState *estate, Plan *parent)
scanstate->css_currentRelation = currentRelation;
scanstate->css_currentScanDesc = 0;
- /* ----------------
- * get the scan type from the relation descriptor.
- * ----------------
+ /*
+ * get the scan type from the relation descriptor.
*/
ExecAssignScanType(scanstate, RelationGetDescr(currentRelation), false);
ExecAssignResultTypeFromTL((Plan *) node, &scanstate->cstate);
@@ -496,9 +477,8 @@ ExecInitTidScan(TidScan *node, EState *estate, Plan *parent)
*/
((Plan *) node)->chgParam = execParam;
- /* ----------------
- * all done.
- * ----------------
+ /*
+ * all done.
*/
return TRUE;
}
diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c
index 5c3dedb152e..2a139d8e1cf 100644
--- a/src/backend/executor/nodeUnique.c
+++ b/src/backend/executor/nodeUnique.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.31 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.32 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,43 +47,38 @@ ExecUnique(Unique *node)
Plan *outerPlan;
TupleDesc tupDesc;
- /* ----------------
- * get information from the node
- * ----------------
+ /*
+ * get information from the node
*/
uniquestate = node->uniquestate;
outerPlan = outerPlan((Plan *) node);
resultTupleSlot = uniquestate->cstate.cs_ResultTupleSlot;
tupDesc = ExecGetResultType(&uniquestate->cstate);
- /* ----------------
- * now loop, returning only non-duplicate tuples.
- * We assume that the tuples arrive in sorted order
- * so we can detect duplicates easily.
- * ----------------
+ /*
+ * now loop, returning only non-duplicate tuples. We assume that the
+ * tuples arrive in sorted order so we can detect duplicates easily.
*/
for (;;)
{
- /* ----------------
- * fetch a tuple from the outer subplan
- * ----------------
+
+ /*
+ * fetch a tuple from the outer subplan
*/
slot = ExecProcNode(outerPlan, (Plan *) node);
if (TupIsNull(slot))
return NULL;
- /* ----------------
- * Always return the first tuple from the subplan.
- * ----------------
+ /*
+ * Always return the first tuple from the subplan.
*/
if (uniquestate->priorTuple == NULL)
break;
- /* ----------------
- * Else test if the new tuple and the previously returned
- * tuple match. If so then we loop back and fetch
- * another new tuple from the subplan.
- * ----------------
+ /*
+ * Else test if the new tuple and the previously returned tuple
+ * match. If so then we loop back and fetch another new tuple
+ * from the subplan.
*/
if (!execTuplesMatch(slot->val, uniquestate->priorTuple,
tupDesc,
@@ -93,18 +88,17 @@ ExecUnique(Unique *node)
break;
}
- /* ----------------
- * We have a new tuple different from the previous saved tuple (if any).
- * Save it and return it. We must copy it because the source subplan
- * won't guarantee that this source tuple is still accessible after
- * fetching the next source tuple.
+ /*
+ * We have a new tuple different from the previous saved tuple (if
+ * any). Save it and return it. We must copy it because the source
+ * subplan won't guarantee that this source tuple is still accessible
+ * after fetching the next source tuple.
*
- * Note that we manage the copy ourselves. We can't rely on the result
- * tuple slot to maintain the tuple reference because our caller may
- * replace the slot contents with a different tuple (see junk filter
- * handling in execMain.c). We assume that the caller will no longer
- * be interested in the current tuple after he next calls us.
- * ----------------
+ * Note that we manage the copy ourselves. We can't rely on the result
+ * tuple slot to maintain the tuple reference because our caller may
+ * replace the slot contents with a different tuple (see junk filter
+ * handling in execMain.c). We assume that the caller will no longer
+ * be interested in the current tuple after he next calls us.
*/
if (uniquestate->priorTuple != NULL)
heap_freetuple(uniquestate->priorTuple);
@@ -131,27 +125,24 @@ ExecInitUnique(Unique *node, EState *estate, Plan *parent)
UniqueState *uniquestate;
Plan *outerPlan;
- /* ----------------
- * assign execution state to node
- * ----------------
+ /*
+ * assign execution state to node
*/
node->plan.state = estate;
- /* ----------------
- * create new UniqueState for node
- * ----------------
+ /*
+ * create new UniqueState for node
*/
uniquestate = makeNode(UniqueState);
node->uniquestate = uniquestate;
uniquestate->priorTuple = NULL;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * Unique nodes have no ExprContext initialization because
- * they never call ExecQual or ExecProject. But they do need a
- * per-tuple memory context anyway for calling execTuplesMatch.
- * ----------------
+ * Unique nodes have no ExprContext initialization because they never
+ * call ExecQual or ExecProject. But they do need a per-tuple memory
+ * context anyway for calling execTuplesMatch.
*/
uniquestate->tempContext =
AllocSetContextCreate(CurrentMemoryContext,
@@ -161,23 +152,21 @@ ExecInitUnique(Unique *node, EState *estate, Plan *parent)
ALLOCSET_DEFAULT_MAXSIZE);
#define UNIQUE_NSLOTS 1
- /* ------------
+
+ /*
* Tuple table initialization
- * ------------
*/
ExecInitResultTupleSlot(estate, &uniquestate->cstate);
- /* ----------------
- * then initialize outer plan
- * ----------------
+ /*
+ * then initialize outer plan
*/
outerPlan = outerPlan((Plan *) node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * unique nodes do no projections, so initialize
- * projection info for this node appropriately
- * ----------------
+ /*
+ * unique nodes do no projections, so initialize projection info for
+ * this node appropriately
*/
ExecAssignResultTypeFromOuterPlan((Plan *) node, &uniquestate->cstate);
uniquestate->cstate.cs_ProjInfo = NULL;
diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c
index 239766825f3..f4b4294eaac 100644
--- a/src/backend/lib/stringinfo.c
+++ b/src/backend/lib/stringinfo.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: stringinfo.c,v 1.27 2001/01/24 19:42:55 momjian Exp $
+ * $Id: stringinfo.c,v 1.28 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -106,11 +106,11 @@ appendStringInfo(StringInfo str, const char *fmt,...)
for (;;)
{
- /*----------
- * Try to format the given string into the available space;
- * but if there's hardly any space, don't bother trying,
- * just fall through to enlarge the buffer first.
- *----------
+
+ /*
+ * Try to format the given string into the available space; but if
+ * there's hardly any space, don't bother trying, just fall
+ * through to enlarge the buffer first.
*/
avail = str->maxlen - str->len - 1;
if (avail > 16)
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 3dc2bf0373a..ad50630931e 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.139 2001/03/22 03:59:31 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.140 2001/03/22 06:16:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -114,9 +114,8 @@ _copyPlan(Plan *from)
{
Plan *newnode = makeNode(Plan);
- /* ----------------
- * copy the node superclass fields
- * ----------------
+ /*
+ * copy the node superclass fields
*/
CopyPlanFields(from, newnode);
@@ -133,15 +132,13 @@ _copyResult(Result *from)
{
Result *newnode = makeNode(Result);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, resconstantqual);
@@ -165,15 +162,13 @@ _copyAppend(Append *from)
{
Append *newnode = makeNode(Append);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, appendplans);
newnode->isTarget = from->isTarget;
@@ -205,9 +200,8 @@ _copyScan(Scan *from)
{
Scan *newnode = makeNode(Scan);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
CopyScanFields((Scan *) from, (Scan *) newnode);
@@ -224,9 +218,8 @@ _copySeqScan(SeqScan *from)
{
SeqScan *newnode = makeNode(SeqScan);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
CopyScanFields((Scan *) from, (Scan *) newnode);
@@ -243,16 +236,14 @@ _copyIndexScan(IndexScan *from)
{
IndexScan *newnode = makeNode(IndexScan);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
CopyScanFields((Scan *) from, (Scan *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->indxid = listCopy(from->indxid);
Node_Copy(from, newnode, indxqual);
@@ -282,15 +273,14 @@ _copyTidScan(TidScan *from)
{
TidScan *newnode = makeNode(TidScan);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
CopyScanFields((Scan *) from, (Scan *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+
+ /*
+ * copy remainder of node
*/
newnode->needRescan = from->needRescan;
Node_Copy(from, newnode, tideval);
@@ -307,16 +297,14 @@ _copySubqueryScan(SubqueryScan *from)
{
SubqueryScan *newnode = makeNode(SubqueryScan);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
CopyScanFields((Scan *) from, (Scan *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, subplan);
@@ -352,9 +340,8 @@ _copyJoin(Join *from)
{
Join *newnode = makeNode(Join);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
CopyJoinFields(from, newnode);
@@ -372,9 +359,8 @@ _copyNestLoop(NestLoop *from)
{
NestLoop *newnode = makeNode(NestLoop);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
CopyJoinFields((Join *) from, (Join *) newnode);
@@ -392,16 +378,14 @@ _copyMergeJoin(MergeJoin *from)
{
MergeJoin *newnode = makeNode(MergeJoin);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
CopyJoinFields((Join *) from, (Join *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, mergeclauses);
@@ -424,16 +408,14 @@ _copyHashJoin(HashJoin *from)
{
HashJoin *newnode = makeNode(HashJoin);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
CopyJoinFields((Join *) from, (Join *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, hashclauses);
newnode->hashjoinop = from->hashjoinop;
@@ -458,9 +440,8 @@ _copyMaterial(Material *from)
{
Material *newnode = makeNode(Material);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
@@ -477,9 +458,8 @@ _copySort(Sort *from)
{
Sort *newnode = makeNode(Sort);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
@@ -546,15 +526,13 @@ _copyUnique(Unique *from)
{
Unique *newnode = makeNode(Unique);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->numCols = from->numCols;
newnode->uniqColIdx = palloc(from->numCols * sizeof(AttrNumber));
@@ -572,15 +550,13 @@ _copySetOp(SetOp *from)
{
SetOp *newnode = makeNode(SetOp);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->cmd = from->cmd;
newnode->numCols = from->numCols;
@@ -600,15 +576,13 @@ _copyLimit(Limit *from)
{
Limit *newnode = makeNode(Limit);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, limitOffset);
Node_Copy(from, newnode, limitCount);
@@ -625,15 +599,13 @@ _copyHash(Hash *from)
{
Hash *newnode = makeNode(Hash);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyPlanFields((Plan *) from, (Plan *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, hashkey);
@@ -691,9 +663,8 @@ _copyFjoin(Fjoin *from)
{
Fjoin *newnode = makeNode(Fjoin);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
newnode->fj_initialized = from->fj_initialized;
@@ -726,9 +697,8 @@ _copyExpr(Expr *from)
{
Expr *newnode = makeNode(Expr);
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
newnode->typeOid = from->typeOid;
newnode->opType = from->opType;
@@ -748,9 +718,8 @@ _copyVar(Var *from)
{
Var *newnode = makeNode(Var);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->varno = from->varno;
newnode->varattno = from->varattno;
@@ -785,9 +754,8 @@ _copyOper(Oper *from)
{
Oper *newnode = makeNode(Oper);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->opno = from->opno;
newnode->opid = from->opid;
@@ -807,27 +775,27 @@ _copyConst(Const *from)
{
Const *newnode = makeNode(Const);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->consttype = from->consttype;
newnode->constlen = from->constlen;
if (from->constbyval || from->constisnull)
{
- /* ----------------
- * passed by value so just copy the datum.
- * Also, don't try to copy struct when value is null!
- * ----------------
+
+ /*
+ * passed by value so just copy the datum. Also, don't try to copy
+ * struct when value is null!
+ *
*/
newnode->constvalue = from->constvalue;
}
else
{
- /* ----------------
- * not passed by value. datum contains a pointer.
- * ----------------
+
+ /*
+ * not passed by value. datum contains a pointer.
*/
int length = from->constlen;
@@ -856,9 +824,8 @@ _copyParam(Param *from)
{
Param *newnode = makeNode(Param);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->paramkind = from->paramkind;
newnode->paramid = from->paramid;
@@ -879,9 +846,8 @@ _copyFunc(Func *from)
{
Func *newnode = makeNode(Func);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->funcid = from->funcid;
newnode->functype = from->functype;
@@ -900,9 +866,8 @@ _copyAggref(Aggref *from)
{
Aggref *newnode = makeNode(Aggref);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->aggname = pstrdup(from->aggname);
newnode->basetype = from->basetype;
@@ -924,9 +889,8 @@ _copySubLink(SubLink *from)
{
SubLink *newnode = makeNode(SubLink);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->subLinkType = from->subLinkType;
newnode->useor = from->useor;
@@ -946,9 +910,8 @@ _copyFieldSelect(FieldSelect *from)
{
FieldSelect *newnode = makeNode(FieldSelect);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, arg);
newnode->fieldnum = from->fieldnum;
@@ -967,9 +930,8 @@ _copyRelabelType(RelabelType *from)
{
RelabelType *newnode = makeNode(RelabelType);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, arg);
newnode->resulttype = from->resulttype;
@@ -1026,9 +988,8 @@ _copyCaseExpr(CaseExpr *from)
{
CaseExpr *newnode = makeNode(CaseExpr);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->casetype = from->casetype;
@@ -1048,9 +1009,8 @@ _copyCaseWhen(CaseWhen *from)
{
CaseWhen *newnode = makeNode(CaseWhen);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, expr);
Node_Copy(from, newnode, result);
@@ -1063,9 +1023,8 @@ _copyArrayRef(ArrayRef *from)
{
ArrayRef *newnode = makeNode(ArrayRef);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->refattrlength = from->refattrlength;
newnode->refelemlength = from->refelemlength;
@@ -1229,15 +1188,13 @@ _copyIndexPath(IndexPath *from)
{
IndexPath *newnode = makeNode(IndexPath);
- /* ----------------
- * copy the node superclass fields
- * ----------------
+ /*
+ * copy the node superclass fields
*/
CopyPathFields((Path *) from, (Path *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->indexid = listCopy(from->indexid);
Node_Copy(from, newnode, indexqual);
@@ -1258,15 +1215,13 @@ _copyTidPath(TidPath *from)
{
TidPath *newnode = makeNode(TidPath);
- /* ----------------
- * copy the node superclass fields
- * ----------------
+ /*
+ * copy the node superclass fields
*/
CopyPathFields((Path *) from, (Path *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, tideval);
newnode->unjoined_relids = listCopy(from->unjoined_relids);
@@ -1283,15 +1238,13 @@ _copyAppendPath(AppendPath *from)
{
AppendPath *newnode = makeNode(AppendPath);
- /* ----------------
- * copy the node superclass fields
- * ----------------
+ /*
+ * copy the node superclass fields
*/
CopyPathFields((Path *) from, (Path *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, subpaths);
@@ -1323,9 +1276,8 @@ _copyNestPath(NestPath *from)
{
NestPath *newnode = makeNode(NestPath);
- /* ----------------
- * copy the node superclass fields
- * ----------------
+ /*
+ * copy the node superclass fields
*/
CopyPathFields((Path *) from, (Path *) newnode);
CopyJoinPathFields((JoinPath *) from, (JoinPath *) newnode);
@@ -1342,16 +1294,14 @@ _copyMergePath(MergePath *from)
{
MergePath *newnode = makeNode(MergePath);
- /* ----------------
- * copy the node superclass fields
- * ----------------
+ /*
+ * copy the node superclass fields
*/
CopyPathFields((Path *) from, (Path *) newnode);
CopyJoinPathFields((JoinPath *) from, (JoinPath *) newnode);
- /* ----------------
- * copy the remainder of the node
- * ----------------
+ /*
+ * copy the remainder of the node
*/
Node_Copy(from, newnode, path_mergeclauses);
Node_Copy(from, newnode, outersortkeys);
@@ -1369,16 +1319,14 @@ _copyHashPath(HashPath *from)
{
HashPath *newnode = makeNode(HashPath);
- /* ----------------
- * copy the node superclass fields
- * ----------------
+ /*
+ * copy the node superclass fields
*/
CopyPathFields((Path *) from, (Path *) newnode);
CopyJoinPathFields((JoinPath *) from, (JoinPath *) newnode);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, path_hashclauses);
@@ -1394,9 +1342,8 @@ _copyPathKeyItem(PathKeyItem *from)
{
PathKeyItem *newnode = makeNode(PathKeyItem);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, key);
newnode->sortop = from->sortop;
@@ -1413,9 +1360,8 @@ _copyRestrictInfo(RestrictInfo *from)
{
RestrictInfo *newnode = makeNode(RestrictInfo);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, clause);
newnode->eval_cost = from->eval_cost;
@@ -1447,9 +1393,8 @@ _copyJoinInfo(JoinInfo *from)
{
JoinInfo *newnode = makeNode(JoinInfo);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
newnode->unjoined_relids = listCopy(from->unjoined_relids);
Node_Copy(from, newnode, jinfo_restrictinfo);
diff --git a/src/backend/optimizer/path/_deadcode/predmig.c b/src/backend/optimizer/path/_deadcode/predmig.c
index 1781f43db1d..462f1dc24ed 100644
--- a/src/backend/optimizer/path/_deadcode/predmig.c
+++ b/src/backend/optimizer/path/_deadcode/predmig.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.9 2001/03/22 03:59:35 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.10 2001/03/22 06:16:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -485,7 +485,7 @@ xfunc_form_groups(Query *queryInfo, Stream root, Stream bottom)
}
-/* ------------------- UTILITY FUNCTIONS ------------------------- */
+/* ------------------- UTILITY FUNCTIONS ------------------------- */
/*
** xfunc_free_stream
diff --git a/src/backend/optimizer/path/_deadcode/xfunc.c b/src/backend/optimizer/path/_deadcode/xfunc.c
index 4f5c30d52ef..82258d7baf6 100644
--- a/src/backend/optimizer/path/_deadcode/xfunc.c
+++ b/src/backend/optimizer/path/_deadcode/xfunc.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/xfunc.c,v 1.15 2001/01/24 19:42:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/xfunc.c,v 1.16 2001/03/22 06:16:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1447,15 +1447,13 @@ xfunc_copyrel(RelOptInfo from, RelOptInfo *to)
if (newnode == NULL)
return false;
- /* ----------------
- * copy node superclass fields
- * ----------------
+ /*
+ * copy node superclass fields
*/
CopyNodeFields((Node) from, (Node) newnode, alloc);
- /* ----------------
- * copy remainder of node
- * ----------------
+ /*
+ * copy remainder of node
*/
Node_Copy(from, newnode, alloc, relids);
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index f93a027cd53..4f4b6720e1e 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.31 2001/03/22 03:59:35 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.32 2001/03/22 06:16:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -860,13 +860,13 @@ make_pathkeys_for_mergeclauses(Query *root,
/*
* When we are given multiple merge clauses, it's possible that
- * some clauses refer to the same vars as earlier clauses.
- * There's no reason for us to specify sort keys like (A,B,A) when
- * (A,B) will do --- and adding redundant sort keys makes add_path
- * think that this sort order is different from ones that are
- * really the same, so don't do it. Since we now have a
- * canonicalized pathkey, a simple ptrMember test is sufficient to
- * detect redundant keys.
+ * some clauses refer to the same vars as earlier clauses. There's
+ * no reason for us to specify sort keys like (A,B,A) when (A,B)
+ * will do --- and adding redundant sort keys makes add_path think
+ * that this sort order is different from ones that are really the
+ * same, so don't do it. Since we now have a canonicalized
+ * pathkey, a simple ptrMember test is sufficient to detect
+ * redundant keys.
*/
if (!ptrMember(pathkey, pathkeys))
pathkeys = lappend(pathkeys, pathkey);
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index a502cea5d67..4687a559962 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.182 2001/03/22 03:59:40 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.183 2001/03/22 06:16:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -129,9 +129,9 @@ transformStmt(ParseState *pstate, Node *parseTree)
switch (nodeTag(parseTree))
{
- /*------------------------
- * Non-optimizable statements
- *------------------------
+
+ /*
+ * Non-optimizable statements
*/
case T_CreateStmt:
result = transformCreateStmt(pstate, (CreateStmt *) parseTree);
@@ -206,9 +206,8 @@ transformStmt(ParseState *pstate, Node *parseTree)
result = transformAlterTableStmt(pstate, (AlterTableStmt *) parseTree);
break;
- /*------------------------
- * Optimizable statements
- *------------------------
+ /*
+ * Optimizable statements
*/
case T_InsertStmt:
result = transformInsertStmt(pstate, (InsertStmt *) parseTree);
@@ -779,12 +778,11 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
{
constraint = lfirst(clist);
- /* ----------
+ /*
* If this column constraint is a FOREIGN KEY
* constraint, then we fill in the current attributes
* name and throw it into the list of FK constraints
* to be processed later.
- * ----------
*/
if (IsA(constraint, FkConstraint))
{
@@ -906,10 +904,10 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
break;
case T_FkConstraint:
- /* ----------
- * Table level FOREIGN KEY constraints are already complete.
- * Just remember for later.
- * ----------
+
+ /*
+ * Table level FOREIGN KEY constraints are already
+ * complete. Just remember for later.
*/
fkconstraints = lappend(fkconstraints, element);
break;
@@ -1806,9 +1804,9 @@ transformSelectStmt(ParseState *pstate, SelectStmt *stmt)
/*
* 15 august 1991 -- since 3.0 postgres does locking right, we
- * discovered that portals were violating locking protocol.
- * portal locks cannot span xacts. as a short-term fix, we
- * installed the check here. -- mao
+ * discovered that portals were violating locking protocol. portal
+ * locks cannot span xacts. as a short-term fix, we installed the
+ * check here. -- mao
*/
if (!IsTransactionBlock())
elog(ERROR, "DECLARE CURSOR may only be used in begin/end transaction blocks");
@@ -2019,9 +2017,9 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* 15 august 1991 -- since 3.0 postgres does locking right, we
- * discovered that portals were violating locking protocol.
- * portal locks cannot span xacts. as a short-term fix, we
- * installed the check here. -- mao
+ * discovered that portals were violating locking protocol. portal
+ * locks cannot span xacts. as a short-term fix, we installed the
+ * check here. -- mao
*/
if (!IsTransactionBlock())
elog(ERROR, "DECLARE CURSOR may only be used in begin/end transaction blocks");
@@ -2713,9 +2711,8 @@ transformFkeyCheckAttrs(FkConstraint *fkconstraint)
int i;
bool found = false;
- /* ----------
+ /*
* Open the referenced table and get the attributes list
- * ----------
*/
pkrel = heap_openr(fkconstraint->pktable_name, AccessShareLock);
if (pkrel == NULL)
@@ -2723,12 +2720,10 @@ transformFkeyCheckAttrs(FkConstraint *fkconstraint)
fkconstraint->pktable_name);
pkrel_attrs = pkrel->rd_att->attrs;
- /* ----------
- * Get the list of index OIDs for the table from the relcache,
- * and look up each one in the pg_index syscache for each unique
- * one, and then compare the attributes we were given to those
- * defined.
- * ----------
+ /*
+ * Get the list of index OIDs for the table from the relcache, and
+ * look up each one in the pg_index syscache for each unique one, and
+ * then compare the attributes we were given to those defined.
*/
indexoidlist = RelationGetIndexList(pkrel);
@@ -2812,9 +2807,8 @@ transformFkeyGetPrimaryKey(FkConstraint *fkconstraint)
Form_pg_index indexStruct = NULL;
int i;
- /* ----------
+ /*
* Open the referenced table and get the attributes list
- * ----------
*/
pkrel = heap_openr(fkconstraint->pktable_name, AccessShareLock);
if (pkrel == NULL)
@@ -2822,11 +2816,10 @@ transformFkeyGetPrimaryKey(FkConstraint *fkconstraint)
fkconstraint->pktable_name);
pkrel_attrs = pkrel->rd_att->attrs;
- /* ----------
- * Get the list of index OIDs for the table from the relcache,
- * and look up each one in the pg_index syscache until we find one
- * marked primary key (hopefully there isn't more than one such).
- * ----------
+ /*
+ * Get the list of index OIDs for the table from the relcache, and
+ * look up each one in the pg_index syscache until we find one marked
+ * primary key (hopefully there isn't more than one such).
*/
indexoidlist = RelationGetIndexList(pkrel);
@@ -2849,18 +2842,16 @@ transformFkeyGetPrimaryKey(FkConstraint *fkconstraint)
freeList(indexoidlist);
- /* ----------
+ /*
* Check that we found it
- * ----------
*/
if (indexStruct == NULL)
elog(ERROR, "PRIMARY KEY for referenced table \"%s\" not found",
fkconstraint->pktable_name);
- /* ----------
+ /*
* Now build the list of PK attributes from the indkey definition
* using the attribute names of the PK relation descriptor
- * ----------
*/
for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++)
{
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index cae712c1bbb..1e9deba73fe 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_clause.c,v 1.78 2001/03/22 03:59:41 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_clause.c,v 1.79 2001/03/22 06:16:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -519,9 +519,9 @@ transformFromClauseItem(ParseState *pstate, Node *n, List **containedRels)
*containedRels = nconc(l_containedRels, r_containedRels);
/*
- * Check for conflicting refnames in left and right subtrees.
- * Must do this because higher levels will assume I hand back a
- * self- consistent namespace subtree.
+ * Check for conflicting refnames in left and right subtrees. Must
+ * do this because higher levels will assume I hand back a self-
+ * consistent namespace subtree.
*/
checkNameSpaceConflicts(pstate, j->larg, j->rarg);
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 316b18316c6..e27dfd5742c 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteDefine.c,v 1.59 2001/03/22 03:59:43 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteDefine.c,v 1.60 2001/03/22 06:16:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,9 +61,8 @@ InsertRule(char *rulname,
elog(ERROR, "Attempt to insert rule \"%s\" failed: already exists",
rulname);
- /* ----------------
- * Set up *nulls and *values arrays
- * ----------------
+ /*
+ * Set up *nulls and *values arrays
*/
MemSet(nulls, ' ', sizeof(nulls));
@@ -77,9 +76,8 @@ InsertRule(char *rulname,
values[i++] = DirectFunctionCall1(textin, CStringGetDatum(evqual));
values[i++] = DirectFunctionCall1(textin, CStringGetDatum(actiontree));
- /* ----------------
- * create a new pg_rewrite tuple
- * ----------------
+ /*
+ * create a new pg_rewrite tuple
*/
pg_rewrite_desc = heap_openr(RewriteRelationName, RowExclusiveLock);
@@ -183,14 +181,13 @@ DefineQueryRewrite(RuleStmt *stmt)
event_relation = heap_openr(event_obj->relname, AccessExclusiveLock);
ev_relid = RelationGetRelid(event_relation);
- /* ----------
+ /*
* The current rewrite handler is known to work on relation level
* rules only. And for SELECT events, it expects one non-nothing
- * action that is instead and returns exactly a tuple of the
- * rewritten relation. This restricts SELECT rules to views.
+ * action that is instead and returns exactly a tuple of the rewritten
+ * relation. This restricts SELECT rules to views.
*
- * Jan
- * ----------
+ * Jan
*/
if (event_obj->attrs)
elog(ERROR, "attribute level rules currently not supported");
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 6ece2ae938f..889351067db 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteHandler.c,v 1.90 2001/03/22 03:59:44 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteHandler.c,v 1.91 2001/03/22 06:16:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -672,17 +672,15 @@ fireRules(Query *parsetree,
{
Query *qual_product;
- /* ----------
- * If there are instead rules with qualifications,
- * the original query is still performed. But all
- * the negated rule qualifications of the instead
- * rules are added so it does its actions only
- * in cases where the rule quals of all instead
- * rules are false. Think of it as the default
- * action in a case. We save this in *qual_products
- * so deepRewriteQuery() can add it to the query
- * list after we mangled it up enough.
- * ----------
+ /*
+ * If there are instead rules with qualifications, the
+ * original query is still performed. But all the negated rule
+ * qualifications of the instead rules are added so it does
+ * its actions only in cases where the rule quals of all
+ * instead rules are false. Think of it as the default action
+ * in a case. We save this in *qual_products so
+ * deepRewriteQuery() can add it to the query list after we
+ * mangled it up enough.
*/
if (*qual_products == NIL)
qual_product = parsetree;
@@ -722,10 +720,9 @@ fireRules(Query *parsetree,
pfree(info);
}
- /* ----------
- * If this was an unqualified instead rule,
- * throw away an eventually saved 'default' parsetree
- * ----------
+ /*
+ * If this was an unqualified instead rule, throw away an
+ * eventually saved 'default' parsetree
*/
if (event_qual == NULL && *instead_flag)
*qual_products = NIL;
@@ -842,23 +839,20 @@ deepRewriteQuery(Query *parsetree)
rewritten = nconc(rewritten, newstuff);
}
- /* ----------
- * qual_products are the original query with the negated
- * rule qualification of an instead rule
- * ----------
+ /*
+ * qual_products are the original query with the negated rule
+ * qualification of an instead rule
*/
if (qual_products != NIL)
rewritten = nconc(rewritten, qual_products);
- /* ----------
- * The original query is appended last (if no "instead" rule)
- * because update and delete rule actions might not do
- * anything if they are invoked after the update or
- * delete is performed. The command counter increment
- * between the query execution makes the deleted (and
- * maybe the updated) tuples disappear so the scans
- * for them in the rule actions cannot find them.
- * ----------
+ /*
+ * The original query is appended last (if no "instead" rule) because
+ * update and delete rule actions might not do anything if they are
+ * invoked after the update or delete is performed. The command
+ * counter increment between the query execution makes the deleted
+ * (and maybe the updated) tuples disappear so the scans for them in
+ * the rule actions cannot find them.
*/
if (!instead)
rewritten = lappend(rewritten, parsetree);
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index 375376abf83..e47f2f705e2 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.64 2001/03/22 03:59:45 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.65 2001/03/22 06:16:16 momjian Exp $
*
* NOTES
*
@@ -146,15 +146,14 @@ proc_exit(int code)
/* do our shared memory exits first */
shmem_exit(code);
- /* ----------------
- * call all the callbacks registered before calling exit().
+ /*
+ * call all the callbacks registered before calling exit().
*
- * Note that since we decrement on_proc_exit_index each time,
- * if a callback calls elog(ERROR) or elog(FATAL) then it won't
- * be invoked again when control comes back here (nor will the
- * previously-completed callbacks). So, an infinite loop
- * should not be possible.
- * ----------------
+ * Note that since we decrement on_proc_exit_index each time, if a
+ * callback calls elog(ERROR) or elog(FATAL) then it won't be invoked
+ * again when control comes back here (nor will the
+ * previously-completed callbacks). So, an infinite loop should not
+ * be possible.
*/
while (--on_proc_exit_index >= 0)
(*on_proc_exit_list[on_proc_exit_index].function) (code,
@@ -177,12 +176,11 @@ shmem_exit(int code)
if (DebugLvl > 1)
elog(DEBUG, "shmem_exit(%d)", code);
- /* ----------------
- * call all the registered callbacks.
+ /*
+ * call all the registered callbacks.
*
- * As with proc_exit(), we remove each callback from the list
- * before calling it, to avoid infinite loop in case of error.
- * ----------------
+ * As with proc_exit(), we remove each callback from the list before
+ * calling it, to avoid infinite loop in case of error.
*/
while (--on_shmem_exit_index >= 0)
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
@@ -387,40 +385,39 @@ IpcSemaphoreLock(IpcSemaphoreId semId, int sem, bool interruptOK)
sops.sem_flg = 0;
sops.sem_num = sem;
- /* ----------------
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were
- * sent a signal. So we try and lock the semaphore again.
+ /*
+ * Note: if errStatus is -1 and errno == EINTR then it means we
+ * returned from the operation prematurely because we were sent a
+ * signal. So we try and lock the semaphore again.
*
- * Each time around the loop, we check for a cancel/die interrupt.
- * We assume that if such an interrupt comes in while we are waiting,
- * it will cause the semop() call to exit with errno == EINTR, so that
- * we will be able to service the interrupt (if not in a critical
- * section already).
+ * Each time around the loop, we check for a cancel/die interrupt. We
+ * assume that if such an interrupt comes in while we are waiting, it
+ * will cause the semop() call to exit with errno == EINTR, so that we
+ * will be able to service the interrupt (if not in a critical section
+ * already).
*
- * Once we acquire the lock, we do NOT check for an interrupt before
- * returning. The caller needs to be able to record ownership of
- * the lock before any interrupt can be accepted.
+ * Once we acquire the lock, we do NOT check for an interrupt before
+ * returning. The caller needs to be able to record ownership of the
+ * lock before any interrupt can be accepted.
*
- * There is a window of a few instructions between CHECK_FOR_INTERRUPTS
- * and entering the semop() call. If a cancel/die interrupt occurs in
- * that window, we would fail to notice it until after we acquire the
- * lock (or get another interrupt to escape the semop()). We can avoid
- * this problem by temporarily setting ImmediateInterruptOK = true
- * before we do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this
- * interval will execute directly. However, there is a huge pitfall:
- * there is another window of a few instructions after the semop()
- * before we are able to reset ImmediateInterruptOK. If an interrupt
- * occurs then, we'll lose control, which means that the lock has been
- * acquired but our caller did not get a chance to record the fact.
- * Therefore, we only set ImmediateInterruptOK if the caller tells us
- * it's OK to do so, ie, the caller does not need to record acquiring
- * the lock. (This is currently true for lockmanager locks, since the
- * process that granted us the lock did all the necessary state updates.
- * It's not true for SysV semaphores used to emulate spinlocks --- but
- * our performance on such platforms is so horrible anyway that I'm
- * not going to worry too much about it.)
- * ----------------
+ * There is a window of a few instructions between CHECK_FOR_INTERRUPTS
+ * and entering the semop() call. If a cancel/die interrupt occurs in
+ * that window, we would fail to notice it until after we acquire the
+ * lock (or get another interrupt to escape the semop()). We can
+ * avoid this problem by temporarily setting ImmediateInterruptOK =
+ * true before we do CHECK_FOR_INTERRUPTS; then, a die() interrupt in
+ * this interval will execute directly. However, there is a huge
+ * pitfall: there is another window of a few instructions after the
+ * semop() before we are able to reset ImmediateInterruptOK. If an
+ * interrupt occurs then, we'll lose control, which means that the
+ * lock has been acquired but our caller did not get a chance to
+ * record the fact. Therefore, we only set ImmediateInterruptOK if the
+ * caller tells us it's OK to do so, ie, the caller does not need to
+ * record acquiring the lock. (This is currently true for lockmanager
+ * locks, since the process that granted us the lock did all the
+ * necessary state updates. It's not true for SysV semaphores used to
+ * emulate spinlocks --- but our performance on such platforms is so
+ * horrible anyway that I'm not going to worry too much about it.)
*/
do
{
@@ -452,12 +449,11 @@ IpcSemaphoreUnlock(IpcSemaphoreId semId, int sem)
sops.sem_num = sem;
- /* ----------------
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were
- * sent a signal. So we try and unlock the semaphore again.
- * Not clear this can really happen, but might as well cope.
- * ----------------
+ /*
+ * Note: if errStatus is -1 and errno == EINTR then it means we
+ * returned from the operation prematurely because we were sent a
+ * signal. So we try and unlock the semaphore again. Not clear this
+ * can really happen, but might as well cope.
*/
do
{
@@ -486,11 +482,10 @@ IpcSemaphoreTryLock(IpcSemaphoreId semId, int sem)
sops.sem_flg = IPC_NOWAIT; /* but don't block */
sops.sem_num = sem;
- /* ----------------
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were
- * sent a signal. So we try and lock the semaphore again.
- * ----------------
+ /*
+ * Note: if errStatus is -1 and errno == EINTR then it means we
+ * returned from the operation prematurely because we were sent a
+ * signal. So we try and lock the semaphore again.
*/
do
{
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index ee2d6751c5e..a12d0aa20f9 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.99 2001/03/22 03:59:46 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.100 2001/03/22 06:16:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -265,18 +265,15 @@ InitProcess(void)
MyProc->waitHolder = NULL;
SHMQueueInit(&(MyProc->procHolders));
- /* ----------------------
+ /*
* Release the lock.
- * ----------------------
*/
SpinRelease(ProcStructLock);
- /* -------------------------
- * Install ourselves in the shmem index table. The name to
- * use is determined by the OS-assigned process id. That
- * allows the cleanup process to find us after any untimely
- * exit.
- * -------------------------
+ /*
+ * Install ourselves in the shmem index table. The name to use is
+ * determined by the OS-assigned process id. That allows the cleanup
+ * process to find us after any untimely exit.
*/
location = MAKE_OFFSET(MyProc);
if ((!ShmemPIDLookup(MyProcPid, &location)) ||
@@ -531,23 +528,24 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
#endif
- /* ----------------------
+ /*
* Determine where to add myself in the wait queue.
*
* Normally I should go at the end of the queue. However, if I already
* hold locks that conflict with the request of any previous waiter,
* put myself in the queue just in front of the first such waiter.
* This is not a necessary step, since deadlock detection would move
- * me to before that waiter anyway; but it's relatively cheap to detect
- * such a conflict immediately, and avoid delaying till deadlock timeout.
+ * me to before that waiter anyway; but it's relatively cheap to
+ * detect such a conflict immediately, and avoid delaying till
+ * deadlock timeout.
*
- * Special case: if I find I should go in front of some waiter, check
- * to see if I conflict with already-held locks or the requests before
+ * Special case: if I find I should go in front of some waiter, check to
+ * see if I conflict with already-held locks or the requests before
* that waiter. If not, then just grant myself the requested lock
* immediately. This is the same as the test for immediate grant in
- * LockAcquire, except we are only considering the part of the wait queue
- * before my insertion point.
- * ----------------------
+ * LockAcquire, except we are only considering the part of the wait
+ * queue before my insertion point.
+ *
*/
if (myHeldLocks != 0)
{
@@ -598,9 +596,9 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
proc = (PROC *) &(waitQueue->links);
}
- /* -------------------
- * Insert self into queue, ahead of the given proc (or at tail of queue).
- * -------------------
+ /*
+ * Insert self into queue, ahead of the given proc (or at tail of
+ * queue).
*/
SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
waitQueue->size++;
@@ -617,18 +615,17 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
/* mark that we are waiting for a lock */
waitingForLock = true;
- /* -------------------
+ /*
* Release the locktable's spin lock.
*
- * NOTE: this may also cause us to exit critical-section state,
- * possibly allowing a cancel/die interrupt to be accepted.
- * This is OK because we have recorded the fact that we are waiting for
- * a lock, and so LockWaitCancel will clean up if cancel/die happens.
- * -------------------
+ * NOTE: this may also cause us to exit critical-section state, possibly
+ * allowing a cancel/die interrupt to be accepted. This is OK because
+ * we have recorded the fact that we are waiting for a lock, and so
+ * LockWaitCancel will clean up if cancel/die happens.
*/
SpinRelease(spinlock);
- /* --------------
+ /*
* Set timer so we can wake up after awhile and check for a deadlock.
* If a deadlock is detected, the handler releases the process's
* semaphore and sets MyProc->errType = STATUS_ERROR, allowing us to
@@ -637,9 +634,8 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
* By delaying the check until we've waited for a bit, we can avoid
* running the rather expensive deadlock-check code in most cases.
*
- * Need to zero out struct to set the interval and the microseconds fields
- * to 0.
- * --------------
+ * Need to zero out struct to set the interval and the microseconds
+ * fields to 0.
*/
#ifndef __BEOS__
MemSet(&timeval, 0, sizeof(struct itimerval));
@@ -653,26 +649,24 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
#endif
- /* --------------
+ /*
* If someone wakes us between SpinRelease and IpcSemaphoreLock,
- * IpcSemaphoreLock will not block. The wakeup is "saved" by
- * the semaphore implementation. Note also that if HandleDeadLock
- * is invoked but does not detect a deadlock, IpcSemaphoreLock()
- * will continue to wait. There used to be a loop here, but it
- * was useless code...
+ * IpcSemaphoreLock will not block. The wakeup is "saved" by the
+ * semaphore implementation. Note also that if HandleDeadLock is
+ * invoked but does not detect a deadlock, IpcSemaphoreLock() will
+ * continue to wait. There used to be a loop here, but it was useless
+ * code...
*
* We pass interruptOK = true, which eliminates a window in which
* cancel/die interrupts would be held off undesirably. This is a
* promise that we don't mind losing control to a cancel/die interrupt
* here. We don't, because we have no state-change work to do after
* being granted the lock (the grantor did it all).
- * --------------
*/
IpcSemaphoreLock(MyProc->sem.semId, MyProc->sem.semNum, true);
- /* ---------------
+ /*
* Disable the timer, if it's still running
- * ---------------
*/
#ifndef __BEOS__
MemSet(&timeval, 0, sizeof(struct itimerval));
@@ -688,12 +682,11 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
*/
waitingForLock = false;
- /* ----------------
+ /*
* Re-acquire the locktable's spin lock.
*
- * We could accept a cancel/die interrupt here. That's OK because
- * the lock is now registered as being held by this process.
- * ----------------
+ * We could accept a cancel/die interrupt here. That's OK because the
+ * lock is now registered as being held by this process.
*/
SpinAcquire(spinlock);
@@ -825,17 +818,18 @@ HandleDeadLock(SIGNAL_ARGS)
*/
LockLockTable();
- /* ---------------------
+ /*
* Check to see if we've been awoken by anyone in the interim.
*
* If we have we can return and resume our transaction -- happy day.
- * Before we are awoken the process releasing the lock grants it to
- * us so we know that we don't have to wait anymore.
+ * Before we are awoken the process releasing the lock grants it to us
+ * so we know that we don't have to wait anymore.
*
* We check by looking to see if we've been unlinked from the wait queue.
- * This is quicker than checking our semaphore's state, since no kernel
- * call is needed, and it is safe because we hold the locktable lock.
- * ---------------------
+ * This is quicker than checking our semaphore's state, since no
+ * kernel call is needed, and it is safe because we hold the locktable
+ * lock.
+ *
*/
if (MyProc->links.prev == INVALID_OFFSET ||
MyProc->links.next == INVALID_OFFSET)
@@ -858,37 +852,34 @@ HandleDeadLock(SIGNAL_ARGS)
return;
}
- /* ------------------------
+ /*
* Oops. We have a deadlock.
*
* Get this process out of wait state.
- * ------------------------
*/
RemoveFromWaitQueue(MyProc);
- /* -------------
- * Set MyProc->errType to STATUS_ERROR so that ProcSleep will
- * report an error after we return from this signal handler.
- * -------------
+ /*
+ * Set MyProc->errType to STATUS_ERROR so that ProcSleep will report
+ * an error after we return from this signal handler.
*/
MyProc->errType = STATUS_ERROR;
- /* ------------------
- * Unlock my semaphore so that the interrupted ProcSleep() call can finish.
- * ------------------
+ /*
+ * Unlock my semaphore so that the interrupted ProcSleep() call can
+ * finish.
*/
IpcSemaphoreUnlock(MyProc->sem.semId, MyProc->sem.semNum);
- /* ------------------
- * We're done here. Transaction abort caused by the error that ProcSleep
- * will raise will cause any other locks we hold to be released, thus
- * allowing other processes to wake up; we don't need to do that here.
- * NOTE: an exception is that releasing locks we hold doesn't consider
- * the possibility of waiters that were blocked behind us on the lock
- * we just failed to get, and might now be wakable because we're not
- * in front of them anymore. However, RemoveFromWaitQueue took care of
- * waking up any such processes.
- * ------------------
+ /*
+ * We're done here. Transaction abort caused by the error that
+ * ProcSleep will raise will cause any other locks we hold to be
+ * released, thus allowing other processes to wake up; we don't need
+ * to do that here. NOTE: an exception is that releasing locks we hold
+ * doesn't consider the possibility of waiters that were blocked
+ * behind us on the lock we just failed to get, and might now be
+ * wakable because we're not in front of them anymore. However,
+ * RemoveFromWaitQueue took care of waking up any such processes.
*/
UnlockLockTable();
errno = save_errno;
diff --git a/src/backend/tcop/dest.c b/src/backend/tcop/dest.c
index 1bd4d2339eb..0e9cdd22137 100644
--- a/src/backend/tcop/dest.c
+++ b/src/backend/tcop/dest.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/dest.c,v 1.43 2001/03/22 03:59:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/dest.c,v 1.44 2001/03/22 06:16:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -97,34 +97,31 @@ BeginCommand(char *pname,
{
case Remote:
case RemoteInternal:
- /* ----------------
- * if this is a "retrieve into portal" query, done
- * because nothing needs to be sent to the fe.
- * ----------------
+
+ /*
+ * if this is a "retrieve into portal" query, done because
+ * nothing needs to be sent to the fe.
*/
CommandInfo[0] = '\0';
if (isIntoPortal)
break;
- /* ----------------
- * if portal name not specified for remote query,
- * use the "blank" portal.
- * ----------------
+ /*
+ * if portal name not specified for remote query, use the
+ * "blank" portal.
*/
if (pname == NULL)
pname = "blank";
- /* ----------------
- * send fe info on tuples we're about to send
- * ----------------
+ /*
+ * send fe info on tuples we're about to send
*/
pq_puttextmessage('P', pname);
- /* ----------------
- * if this is a retrieve, then we send back the tuple
- * descriptor of the tuples. "retrieve into" is an
- * exception because no tuples are returned in that case.
- * ----------------
+ /*
+ * if this is a retrieve, then we send back the tuple
+ * descriptor of the tuples. "retrieve into" is an exception
+ * because no tuples are returned in that case.
*/
if (operation == CMD_SELECT && !isIntoRel)
{
@@ -151,9 +148,9 @@ BeginCommand(char *pname,
break;
case Debug:
- /* ----------------
- * show the return type of the tuples
- * ----------------
+
+ /*
+ * show the return type of the tuples
*/
if (pname == NULL)
pname = "blank";
@@ -213,9 +210,9 @@ EndCommand(char *commandTag, CommandDest dest)
{
case Remote:
case RemoteInternal:
- /* ----------------
- * tell the fe that the query is over
- * ----------------
+
+ /*
+ * tell the fe that the query is over
*/
sprintf(buf, "%s%s", commandTag, CommandInfo);
pq_puttextmessage('C', buf);
@@ -277,9 +274,9 @@ NullCommand(CommandDest dest)
{
case RemoteInternal:
case Remote:
- /* ----------------
- * tell the fe that we saw an empty query string
- * ----------------
+
+ /*
+ * tell the fe that we saw an empty query string
*/
pq_putbytes("I", 2);/* note we send I and \0 */
break;
diff --git a/src/backend/tcop/fastpath.c b/src/backend/tcop/fastpath.c
index 0d83c5104c4..56f13f9b8ba 100644
--- a/src/backend/tcop/fastpath.c
+++ b/src/backend/tcop/fastpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/fastpath.c,v 1.47 2001/03/22 03:59:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/fastpath.c,v 1.48 2001/03/22 06:16:17 momjian Exp $
*
* NOTES
* This cruft is the server side of PQfn.
@@ -298,9 +298,9 @@ HandleFunctionRequest(void)
* (including lookup of the given function ID) and elog if
* appropriate. Unfortunately, because we cannot even read the
* message properly without knowing whether the data types are
- * pass-by-ref or pass-by-value, it's not all that easy to do :-(.
- * The protocol should require the client to supply what it thinks is
- * the typbyval and typlen value for each arg, so that we can read the
+ * pass-by-ref or pass-by-value, it's not all that easy to do :-(. The
+ * protocol should require the client to supply what it thinks is the
+ * typbyval and typlen value for each arg, so that we can read the
* data without having to do any lookups. Then after we've read the
* message, we should do the lookups, verify agreement of the actual
* function arg types with what we received, and finally call the
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 745de3ce1a8..b62b41356bc 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/postgres.c,v 1.213 2001/03/22 03:59:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/postgres.c,v 1.214 2001/03/22 06:16:17 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
@@ -157,9 +157,8 @@ InteractiveBackend(StringInfo inBuf)
bool end = false; /* end-of-input flag */
bool backslashSeen = false; /* have we seen a \ ? */
- /* ----------------
- * display a prompt and obtain input from the user
- * ----------------
+ /*
+ * display a prompt and obtain input from the user
*/
printf("backend> ");
fflush(stdout);
@@ -172,10 +171,10 @@ InteractiveBackend(StringInfo inBuf)
{
if (UseNewLine)
{
- /* ----------------
- * if we are using \n as a delimiter, then read
- * characters until the \n.
- * ----------------
+
+ /*
+ * if we are using \n as a delimiter, then read characters
+ * until the \n.
*/
while ((c = getc(stdin)) != EOF)
{
@@ -208,9 +207,9 @@ InteractiveBackend(StringInfo inBuf)
}
else
{
- /* ----------------
- * otherwise read characters until EOF.
- * ----------------
+
+ /*
+ * otherwise read characters until EOF.
*/
while ((c = getc(stdin)) != EOF)
appendStringInfoChar(inBuf, (char) c);
@@ -222,16 +221,14 @@ InteractiveBackend(StringInfo inBuf)
if (end)
return EOF;
- /* ----------------
- * otherwise we have a user query so process it.
- * ----------------
+ /*
+ * otherwise we have a user query so process it.
*/
break;
}
- /* ----------------
- * if the query echo flag was given, print the query..
- * ----------------
+ /*
+ * if the query echo flag was given, print the query..
*/
if (EchoQuery)
printf("query: %s\n", inBuf->data);
@@ -260,9 +257,8 @@ SocketBackend(StringInfo inBuf)
char qtype;
char result = '\0';
- /* ----------------
- * get input from the frontend
- * ----------------
+ /*
+ * get input from the frontend
*/
qtype = '?';
if (pq_getbytes(&qtype, 1) == EOF)
@@ -270,9 +266,9 @@ SocketBackend(StringInfo inBuf)
switch (qtype)
{
- /* ----------------
- * 'Q': user entered a query
- * ----------------
+
+ /*
+ * 'Q': user entered a query
*/
case 'Q':
if (pq_getstr(inBuf))
@@ -280,9 +276,8 @@ SocketBackend(StringInfo inBuf)
result = 'Q';
break;
- /* ----------------
- * 'F': calling user/system functions
- * ----------------
+ /*
+ * 'F': calling user/system functions
*/
case 'F':
if (pq_getstr(inBuf))
@@ -290,20 +285,18 @@ SocketBackend(StringInfo inBuf)
result = 'F';
break;
- /* ----------------
- * 'X': frontend is exiting
- * ----------------
+ /*
+ * 'X': frontend is exiting
*/
case 'X':
result = 'X';
break;
- /* ----------------
- * otherwise we got garbage from the frontend.
+ /*
+ * otherwise we got garbage from the frontend.
*
- * XXX are we certain that we want to do an elog(FATAL) here?
- * -cim 1/24/90
- * ----------------
+ * XXX are we certain that we want to do an elog(FATAL) here?
+ * -cim 1/24/90
*/
default:
elog(FATAL, "Socket command type %c unknown", qtype);
@@ -350,15 +343,13 @@ pg_parse_and_rewrite(char *query_string, /* string to execute */
List *querytree_list;
List *list_item;
- /* ----------------
- * (1) parse the request string into a list of raw parse trees.
- * ----------------
+ /*
+ * (1) parse the request string into a list of raw parse trees.
*/
raw_parsetree_list = pg_parse_query(query_string, typev, nargs);
- /* ----------------
- * (2) Do parse analysis and rule rewrite.
- * ----------------
+ /*
+ * (2) Do parse analysis and rule rewrite.
*/
querytree_list = NIL;
foreach(list_item, raw_parsetree_list)
@@ -424,9 +415,8 @@ pg_analyze_and_rewrite(Node *parsetree)
Query *querytree;
List *new_list;
- /* ----------------
- * (1) Perform parse analysis.
- * ----------------
+ /*
+ * (1) Perform parse analysis.
*/
if (Show_parser_stats)
ResetUsage();
@@ -440,12 +430,11 @@ pg_analyze_and_rewrite(Node *parsetree)
ResetUsage();
}
- /* ----------------
- * (2) Rewrite the queries, as necessary
+ /*
+ * (2) Rewrite the queries, as necessary
*
- * rewritten queries are collected in new_list. Note there may be
- * more or fewer than in the original list.
- * ----------------
+ * rewritten queries are collected in new_list. Note there may be more
+ * or fewer than in the original list.
*/
new_list = NIL;
foreach(list_item, querytree_list)
@@ -567,9 +556,8 @@ pg_plan_query(Query *querytree)
}
#endif
- /* ----------------
- * Print plan if debugging.
- * ----------------
+ /*
+ * Print plan if debugging.
*/
if (Debug_print_plan)
{
@@ -704,10 +692,10 @@ pg_exec_query_string(char *query_string, /* string to execute */
if (!allowit)
{
- /* ----------------
- * the EndCommand() stuff is to tell the frontend
- * that the command ended. -cim 6/1/90
- * ----------------
+
+ /*
+ * the EndCommand() stuff is to tell the frontend that the
+ * command ended. -cim 6/1/90
*/
char *tag = "*ABORT STATE*";
@@ -773,9 +761,9 @@ pg_exec_query_string(char *query_string, /* string to execute */
if (querytree->commandType == CMD_UTILITY)
{
- /* ----------------
- * process utility functions (create, destroy, etc..)
- * ----------------
+
+ /*
+ * process utility functions (create, destroy, etc..)
*/
if (Debug_print_query)
elog(DEBUG, "ProcessUtility: %s", query_string);
@@ -786,9 +774,9 @@ pg_exec_query_string(char *query_string, /* string to execute */
}
else
{
- /* ----------------
- * process a plannable query.
- * ----------------
+
+ /*
+ * process a plannable query.
*/
Plan *plan;
@@ -1201,18 +1189,18 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
break;
case 'B':
- /* ----------------
- * specify the size of buffer pool
- * ----------------
+
+ /*
+ * specify the size of buffer pool
*/
if (secure)
NBuffers = atoi(optarg);
break;
case 'C':
- /* ----------------
- * don't print version string
- * ----------------
+
+ /*
+ * don't print version string
*/
Noversion = true;
break;
@@ -1237,34 +1225,34 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
break;
case 'E':
- /* ----------------
- * E - echo the query the user entered
- * ----------------
+
+ /*
+ * E - echo the query the user entered
*/
EchoQuery = true;
break;
case 'e':
- /* --------------------------
+
+ /*
* Use european date formats.
- * --------------------------
*/
EuroDates = true;
break;
case 'F':
- /* --------------------
- * turn off fsync
- * --------------------
+
+ /*
+ * turn off fsync
*/
if (secure)
enableFsync = false;
break;
case 'f':
- /* -----------------
- * f - forbid generation of certain plans
- * -----------------
+
+ /*
+ * f - forbid generation of certain plans
*/
switch (optarg[0])
{
@@ -1296,54 +1284,54 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
break;
case 'L':
- /* --------------------
- * turn off locking
- * --------------------
+
+ /*
+ * turn off locking
*/
if (secure)
lockingOff = 1;
break;
case 'N':
- /* ----------------
- * N - Don't use newline as a query delimiter
- * ----------------
+
+ /*
+ * N - Don't use newline as a query delimiter
*/
UseNewLine = 0;
break;
case 'O':
- /* --------------------
- * allow system table structure modifications
- * --------------------
+
+ /*
+ * allow system table structure modifications
*/
if (secure) /* XXX safe to allow from client??? */
allowSystemTableMods = true;
break;
case 'P':
- /* --------------------
- * ignore system indexes
- * --------------------
+
+ /*
+ * ignore system indexes
*/
if (secure) /* XXX safe to allow from client??? */
IgnoreSystemIndexes(true);
break;
case 'o':
- /* ----------------
- * o - send output (stdout and stderr) to the given file
- * ----------------
+
+ /*
+ * o - send output (stdout and stderr) to the given file
*/
if (secure)
StrNCpy(OutputFileName, optarg, MAXPGPATH);
break;
case 'p':
- /* ----------------
- * p - special flag passed if backend was forked
- * by a postmaster.
- * ----------------
+
+ /*
+ * p - special flag passed if backend was forked by a
+ * postmaster.
*/
if (secure)
{
@@ -1354,9 +1342,9 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
break;
case 'S':
- /* ----------------
- * S - amount of sort memory to use in 1k bytes
- * ----------------
+
+ /*
+ * S - amount of sort memory to use in 1k bytes
*/
{
int S;
@@ -1368,15 +1356,15 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
break;
case 's':
- /* ----------------
- * s - report usage statistics (timings) after each query
- * ----------------
+
+ /*
+ * s - report usage statistics (timings) after each query
*/
Show_query_stats = 1;
break;
case 't':
- /* ----------------
+ /* ---------------
* tell postgres to report usage statistics (timings) for
* each query
*
@@ -1411,9 +1399,9 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
break;
case 'W':
- /* ----------------
- * wait N seconds to allow attach from a debugger
- * ----------------
+
+ /*
+ * wait N seconds to allow attach from a debugger
*/
sleep(atoi(optarg));
break;
@@ -1715,7 +1703,7 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
if (!IsUnderPostmaster)
{
puts("\nPOSTGRES backend interactive interface ");
- puts("$Revision: 1.213 $ $Date: 2001/03/22 03:59:47 $\n");
+ puts("$Revision: 1.214 $ $Date: 2001/03/22 06:16:17 $\n");
}
/*
@@ -1810,11 +1798,10 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
parser_input = makeStringInfo();
- /* ----------------
- * (1) tell the frontend we're ready for a new query.
+ /*
+ * (1) tell the frontend we're ready for a new query.
*
- * Note: this includes fflush()'ing the last of the prior output.
- * ----------------
+ * Note: this includes fflush()'ing the last of the prior output.
*/
ReadyForQuery(whereToSendOutput);
@@ -1823,11 +1810,10 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
else
set_ps_display("idle");
- /* ----------------
- * (2) deal with pending asynchronous NOTIFY from other backends,
- * and enable async.c's signal handler to execute NOTIFY directly.
- * Then set up other stuff needed before blocking for input.
- * ----------------
+ /*
+ * (2) deal with pending asynchronous NOTIFY from other backends,
+ * and enable async.c's signal handler to execute NOTIFY directly.
+ * Then set up other stuff needed before blocking for input.
*/
QueryCancelPending = false; /* forget any earlier CANCEL
* signal */
@@ -1840,25 +1826,22 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
QueryCancelPending = false;
CHECK_FOR_INTERRUPTS();
- /* ----------------
- * (3) read a command (loop blocks here)
- * ----------------
+ /*
+ * (3) read a command (loop blocks here)
*/
firstchar = ReadCommand(parser_input);
- /* ----------------
- * (4) disable async signal conditions again.
- * ----------------
+ /*
+ * (4) disable async signal conditions again.
*/
ImmediateInterruptOK = false;
QueryCancelPending = false; /* forget any CANCEL signal */
DisableNotifyInterrupt();
- /* ----------------
- * (5) check for any other interesting events that happened
- * while we slept.
- * ----------------
+ /*
+ * (5) check for any other interesting events that happened while
+ * we slept.
*/
if (got_SIGHUP)
{
@@ -1866,15 +1849,14 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
ProcessConfigFile(PGC_SIGHUP);
}
- /* ----------------
- * (6) process the command.
- * ----------------
+ /*
+ * (6) process the command.
*/
switch (firstchar)
{
- /* ----------------
- * 'F' indicates a fastpath call.
- * ----------------
+
+ /*
+ * 'F' indicates a fastpath call.
*/
case 'F':
/* start an xact for this function invocation */
@@ -1890,30 +1872,29 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
finish_xact_command();
break;
- /* ----------------
- * 'Q' indicates a user query
- * ----------------
+ /*
+ * 'Q' indicates a user query
*/
case 'Q':
if (strspn(parser_input->data, " \t\r\n") == parser_input->len)
{
- /* ----------------
- * if there is nothing in the input buffer, don't bother
- * trying to parse and execute anything; just send
- * back a quick NullCommand response.
- * ----------------
+
+ /*
+ * if there is nothing in the input buffer, don't
+ * bother trying to parse and execute anything; just
+ * send back a quick NullCommand response.
*/
if (IsUnderPostmaster)
NullCommand(Remote);
}
else
{
- /* ----------------
- * otherwise, process the input string.
+
+ /*
+ * otherwise, process the input string.
*
- * Note: transaction command start/end is now done
- * within pg_exec_query_string(), not here.
- * ----------------
+ * Note: transaction command start/end is now done within
+ * pg_exec_query_string(), not here.
*/
if (Show_query_stats)
ResetUsage();
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 2dfc5a9ff1f..48df3f980a3 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/pquery.c,v 1.43 2001/03/22 03:59:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/pquery.c,v 1.44 2001/03/22 06:16:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,15 +55,13 @@ CreateExecutorState(void)
{
EState *state;
- /* ----------------
- * create a new executor state
- * ----------------
+ /*
+ * create a new executor state
*/
state = makeNode(EState);
- /* ----------------
- * initialize the Executor State structure
- * ----------------
+ /*
+ * initialize the Executor State structure
*/
state->es_direction = ForwardScanDirection;
state->es_range_table = NIL;
@@ -85,9 +83,8 @@ CreateExecutorState(void)
state->es_per_tuple_exprcontext = NULL;
- /* ----------------
- * return the executor state structure
- * ----------------
+ /*
+ * return the executor state structure
*/
return state;
}
@@ -137,9 +134,8 @@ PreparePortal(char *portalName)
{
Portal portal;
- /* ----------------
- * Check for already-in-use portal name.
- * ----------------
+ /*
+ * Check for already-in-use portal name.
*/
portal = GetPortalByName(portalName);
if (PortalIsValid(portal))
@@ -154,9 +150,8 @@ PreparePortal(char *portalName)
PortalDrop(&portal);
}
- /* ----------------
- * Create the new portal.
- * ----------------
+ /*
+ * Create the new portal.
*/
portal = CreatePortal(portalName);
@@ -188,9 +183,8 @@ ProcessQuery(Query *parsetree,
set_ps_display(tag = CreateOperationTag(operation));
- /* ----------------
- * initialize portal/into relation status
- * ----------------
+ /*
+ * initialize portal/into relation status
*/
isRetrieveIntoPortal = false;
isRetrieveIntoRelation = false;
@@ -219,10 +213,9 @@ ProcessQuery(Query *parsetree,
}
}
- /* ----------------
- * If retrieving into a portal, set up the portal and copy
- * the parsetree and plan into its memory context.
- * ----------------
+ /*
+ * If retrieving into a portal, set up the portal and copy the
+ * parsetree and plan into its memory context.
*/
if (isRetrieveIntoPortal)
{
@@ -238,40 +231,34 @@ ProcessQuery(Query *parsetree,
*/
}
- /* ----------------
- * Now we can create the QueryDesc object.
- * ----------------
+ /*
+ * Now we can create the QueryDesc object.
*/
queryDesc = CreateQueryDesc(parsetree, plan, dest);
- /* ----------------
- * When performing a retrieve into, we override the normal
- * communication destination during the processing of the
- * the query. This only affects the tuple-output function
- * - the correct destination will still see BeginCommand()
- * and EndCommand() messages.
- * ----------------
+ /*
+ * When performing a retrieve into, we override the normal
+ * communication destination during the processing of the the query.
+ * This only affects the tuple-output function - the correct
+ * destination will still see BeginCommand() and EndCommand()
+ * messages.
*/
if (isRetrieveIntoRelation)
queryDesc->dest = None;
- /* ----------------
- * create a default executor state.
- * ----------------
+ /*
+ * create a default executor state.
*/
state = CreateExecutorState();
- /* ----------------
- * call ExecStart to prepare the plan for execution
- * ----------------
+ /*
+ * call ExecStart to prepare the plan for execution
*/
attinfo = ExecutorStart(queryDesc, state);
- /* ----------------
- * report the query's result type information
- * back to the front end or to whatever destination
- * we're dealing with.
- * ----------------
+ /*
+ * report the query's result type information back to the front end or
+ * to whatever destination we're dealing with.
*/
BeginCommand(NULL,
operation,
@@ -281,10 +268,9 @@ ProcessQuery(Query *parsetree,
tag,
dest);
- /* ----------------
- * If retrieve into portal, stop now; we do not run the plan
- * until a FETCH command is received.
- * ----------------
+ /*
+ * If retrieve into portal, stop now; we do not run the plan until a
+ * FETCH command is received.
*/
if (isRetrieveIntoPortal)
{
@@ -302,25 +288,22 @@ ProcessQuery(Query *parsetree,
return;
}
- /* ----------------
- * Now we get to the important call to ExecutorRun() where we
- * actually run the plan..
- * ----------------
+ /*
+ * Now we get to the important call to ExecutorRun() where we actually
+ * run the plan..
*/
ExecutorRun(queryDesc, state, EXEC_RUN, 0L);
/* save infos for EndCommand */
UpdateCommandInfo(operation, state->es_lastoid, state->es_processed);
- /* ----------------
- * Now, we close down all the scans and free allocated resources.
- * ----------------
+ /*
+ * Now, we close down all the scans and free allocated resources.
*/
ExecutorEnd(queryDesc, state);
- /* ----------------
- * Notify the destination of end of processing.
- * ----------------
+ /*
+ * Notify the destination of end of processing.
*/
EndCommand(tag, dest);
}
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 370d2d3bac7..ae6cd20a5db 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/utility.c,v 1.108 2001/03/22 03:59:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/utility.c,v 1.109 2001/03/22 06:16:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -384,21 +384,21 @@ ProcessUtility(Node *parsetree,
*/
if (stmt->column == NULL)
{
- /* ----------------
- * rename relation
+
+ /*
+ * rename relation
*
- * Note: we also rename the "type" tuple
- * corresponding to the relation.
- * ----------------
+ * Note: we also rename the "type" tuple corresponding to
+ * the relation.
*/
renamerel(relname, /* old name */
stmt->newname); /* new name */
}
else
{
- /* ----------------
- * rename attribute
- * ----------------
+
+ /*
+ * rename attribute
*/
renameatt(relname, /* relname */
stmt->column, /* old att name */
@@ -923,9 +923,8 @@ ProcessUtility(Node *parsetree,
break;
}
- /* ----------------
- * tell fe/be or whatever that we're done.
- * ----------------
+ /*
+ * tell fe/be or whatever that we're done.
*/
EndCommand(commandTag, dest);
}
diff --git a/src/backend/tioga/tgRecipe.h b/src/backend/tioga/tgRecipe.h
index 5b190f17254..52868554a13 100644
--- a/src/backend/tioga/tgRecipe.h
+++ b/src/backend/tioga/tgRecipe.h
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: tgRecipe.h,v 1.16 2001/03/22 03:59:48 momjian Exp $
+ * $Id: tgRecipe.h,v 1.17 2001/03/22 06:16:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,6 +41,7 @@ typedef struct
*
*
*
+ *
* geo-decls.h */
#endif /* TIOGA_FRONTEND */
diff --git a/src/backend/utils/adt/ascii.c b/src/backend/utils/adt/ascii.c
index bdcd24e44e0..234651aebab 100644
--- a/src/backend/utils/adt/ascii.c
+++ b/src/backend/utils/adt/ascii.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* ascii.c
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/ascii.c,v 1.7 2001/03/22 03:59:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/ascii.c,v 1.8 2001/03/22 06:16:17 momjian Exp $
*
* Portions Copyright (c) 1999-2000, PostgreSQL Global Development Group
*
@@ -80,27 +80,27 @@ pg_to_ascii(unsigned char *src, unsigned char *src_end, unsigned char *desc, int
if (enc == LATIN1)
{
- /* ----------
+
+ /*
* ISO-8859-1 <range: 160 -- 255>
- * ----------
*/
ascii = " cL Y \"Ca -R 'u ., ?AAAAAAACEEEEIIII NOOOOOxOUUUUYTBaaaaaaaceeeeiiii nooooo/ouuuuyty";
range = RANGE_160;
}
else if (enc == LATIN2)
{
- /* ----------
+
+ /*
* ISO-8859-2 <range: 160 -- 255>
- * ----------
*/
ascii = " A L LS \"SSTZ-ZZ a,l'ls ,sstz\"zzRAAAALCCCEEEEIIDDNNOOOOxRUUUUYTBraaaalccceeeeiiddnnoooo/ruuuuyt.";
range = RANGE_160;
}
else if (enc == WIN1250)
{
- /* ----------
+
+ /*
* Window CP1250 <range: 128 -- 255>
- * ----------
*/
ascii = " ' \" %S<STZZ `'\"\".-- s>stzz L A \"CS -RZ ,l'u .,as L\"lzRAAAALCCCEEEEIIDDNNOOOOxRUUUUYTBraaaalccceeeeiiddnnoooo/ruuuuyt ";
range = RANGE_128;
@@ -111,9 +111,8 @@ pg_to_ascii(unsigned char *src, unsigned char *src_end, unsigned char *desc, int
pg_encoding_to_char(enc));
}
- /* ----------
+ /*
* Encode
- * ----------
*/
for (x = src; x <= src_end; x++)
{
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index bebe8240144..0946ea9e496 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* formatting.c
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/formatting.c,v 1.34 2001/03/22 03:59:50 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/formatting.c,v 1.35 2001/03/22 06:16:17 momjian Exp $
*
*
* Portions Copyright (c) 1999-2000, PostgreSQL Global Development Group
@@ -1132,9 +1132,8 @@ parse_format(FormatNode *node, char *str, KeyWord *kw,
{
suffix = 0;
- /* ----------
+ /*
* Prefix
- * ----------
*/
if (ver == DCH_TYPE && (s = suff_search(str, suf, SUFFTYPE_PREFIX)) != NULL)
{
@@ -1143,9 +1142,8 @@ parse_format(FormatNode *node, char *str, KeyWord *kw,
str += s->len;
}
- /* ----------
+ /*
* Keyword
- * ----------
*/
if (*str && (n->key = index_seq_search(str, kw, index)) != NULL)
{
@@ -1156,16 +1154,14 @@ parse_format(FormatNode *node, char *str, KeyWord *kw,
if (n->key->len)
str += n->key->len;
- /* ----------
+ /*
* NUM version: Prepare global NUMDesc struct
- * ----------
*/
if (ver == NUM_TYPE)
NUMDesc_prepare(Num, n);
- /* ----------
+ /*
* Postfix
- * ----------
*/
if (ver == DCH_TYPE && *str && (s = suff_search(str, suf, SUFFTYPE_POSTFIX)) != NULL)
{
@@ -1178,9 +1174,8 @@ parse_format(FormatNode *node, char *str, KeyWord *kw,
else if (*str)
{
- /* ----------
+ /*
* Special characters '\' and '"'
- * ----------
*/
if (*str == '"' && last != '\\')
{
@@ -1258,9 +1253,8 @@ DCH_processor(FormatNode *node, char *inout, int flag)
char *s;
- /* ----------
+ /*
* Zeroing global flags
- * ----------
*/
DCH_global_flag = 0;
@@ -1270,9 +1264,8 @@ DCH_processor(FormatNode *node, char *inout, int flag)
{
int len;
- /* ----------
+ /*
* Call node action function
- * ----------
*/
len = n->key->action(n->key->id, s, n->suffix, flag, n);
if (len > 0)
@@ -1284,18 +1277,17 @@ DCH_processor(FormatNode *node, char *inout, int flag)
else
{
- /* ----------
+ /*
* Remove to output char from input in TO_CHAR
- * ----------
*/
if (flag == TO_CHAR)
*s = n->character;
else
{
- /* ----------
+
+ /*
* Skip blank space in FROM_CHAR's input
- * ----------
*/
if (isspace((unsigned char) n->character) && IS_FX == 0)
{
@@ -1893,11 +1885,10 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
p_inout = inout;
- /* ----------
+ /*
* In the FROM-char is not difference between "January" or "JANUARY"
- * or "january", all is before search convert to "first-upper".
- * This convention is used for MONTH, MON, DAY, DY
- * ----------
+ * or "january", all is before search convert to "first-upper". This
+ * convention is used for MONTH, MON, DAY, DY
*/
if (flag == FROM_CHAR)
{
@@ -2459,9 +2450,8 @@ DCH_cache_getnew(char *str)
ent->age = (++DCHCounter);
}
- /* ----------
+ /*
* Cache is full - needs remove any older entry
- * ----------
*/
if (n_DCHCache > DCH_CACHE_FIELDS)
{
@@ -2583,24 +2573,21 @@ timestamp_to_char(PG_FUNCTION_ARGS)
tm->tm_wday = (date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + 1) % 7;
tm->tm_yday = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - date2j(tm->tm_year, 1, 1) + 1;
- /* ----------
+ /*
* Convert fmt to C string
- * ----------
*/
str = (char *) palloc(len + 1);
memcpy(str, VARDATA(fmt), len);
*(str + len) = '\0';
- /* ----------
+ /*
* Allocate result
- * ----------
*/
result = (text *) palloc((len * DCH_MAX_ITEM_SIZ) + 1 + VARHDRSZ);
- /* ----------
+ /*
* Allocate new memory if format picture is bigger than static cache
* and not use cache (call parser always) - flag=1 show this variant
- * ----------
*/
if (len > DCH_CACHE_SIZE)
{
@@ -2616,9 +2603,8 @@ timestamp_to_char(PG_FUNCTION_ARGS)
else
{
- /* ----------
+ /*
* Use cache buffers
- * ----------
*/
DCHCacheEntry *ent;
@@ -2629,10 +2615,9 @@ timestamp_to_char(PG_FUNCTION_ARGS)
ent = DCH_cache_getnew(str);
- /* ----------
+ /*
* Not in the cache, must run parser and save a new
* format-picture to the cache.
- * ----------
*/
parse_format(ent->format, str, DCH_keywords,
DCH_suff, DCH_index, DCH_TYPE, NULL);
@@ -2654,10 +2639,9 @@ timestamp_to_char(PG_FUNCTION_ARGS)
pfree(str);
- /* ----------
+ /*
* for result is allocated max memory, which current format-picture
* needs, now it must be re-allocate to result real size
- * ----------
*/
if (!(len = strlen(VARDATA(result))))
{
@@ -2706,18 +2690,17 @@ to_timestamp(PG_FUNCTION_ARGS)
if (len)
{
- /* ----------
+ /*
* Convert fmt to C string
- * ----------
*/
str = (char *) palloc(len + 1);
memcpy(str, VARDATA(fmt), len);
*(str + len) = '\0';
- /* ----------
- * Allocate new memory if format picture is bigger than static cache
- * and not use cache (call parser always) - flag=1 show this variant
- * ----------
+ /*
+ * Allocate new memory if format picture is bigger than static
+ * cache and not use cache (call parser always) - flag=1 show this
+ * variant
*/
if (len > DCH_CACHE_SIZE)
{
@@ -2732,9 +2715,8 @@ to_timestamp(PG_FUNCTION_ARGS)
else
{
- /* ----------
+ /*
* Use cache buffers
- * ----------
*/
DCHCacheEntry *ent;
@@ -2745,11 +2727,10 @@ to_timestamp(PG_FUNCTION_ARGS)
ent = DCH_cache_getnew(str);
- /* ----------
+ /*
* Not in the cache, must run parser and save a new
* format-picture to the cache.
- * ----------
- */
+ */
parse_format(ent->format, str, DCH_keywords,
DCH_suff, DCH_index, DCH_TYPE, NULL);
@@ -2762,17 +2743,15 @@ to_timestamp(PG_FUNCTION_ARGS)
format = ent->format;
}
- /* ----------
+ /*
* Call action for each node in FormatNode tree
- * ----------
*/
#ifdef DEBUG_TO_FROM_CHAR
/* dump_node(format, len); */
#endif
- /* ----------
+ /*
* Convert date to C string
- * ----------
*/
date_len = VARSIZE(date_txt) - VARHDRSZ;
date_str = (char *) palloc(date_len + 1);
@@ -2787,10 +2766,9 @@ to_timestamp(PG_FUNCTION_ARGS)
pfree(format);
}
- /* --------------------------------------------------------------
- * Convert values that user define for FROM_CHAR (to_date/to_timestamp)
- * to standard 'tm'
- * ----------
+ /*
+ * Convert values that user define for FROM_CHAR
+ * (to_date/to_timestamp) to standard 'tm'
*/
#ifdef DEBUG_TO_FROM_CHAR
NOTICE_TMFC;
@@ -3050,9 +3028,8 @@ NUM_cache_getnew(char *str)
ent->age = (++NUMCounter);
}
- /* ----------
+ /*
* Cache is full - needs remove any older entry
- * ----------
*/
if (n_NUMCache > NUM_CACHE_FIELDS)
{
@@ -3156,18 +3133,16 @@ NUM_cache(int len, NUMDesc *Num, char *pars_str, int *flag)
FormatNode *format = NULL;
char *str;
- /* ----------
+ /*
* Convert VARDATA() to string
- * ----------
*/
str = (char *) palloc(len + 1);
memcpy(str, pars_str, len);
*(str + len) = '\0';
- /* ----------
+ /*
* Allocate new memory if format picture is bigger than static cache
* and not use cache (call parser always) - flag=1 show this variant
- * ----------
*/
if (len > NUM_CACHE_SIZE)
{
@@ -3186,9 +3161,8 @@ NUM_cache(int len, NUMDesc *Num, char *pars_str, int *flag)
else
{
- /* ----------
+ /*
* Use cache buffers
- * ----------
*/
NUMCacheEntry *ent;
@@ -3199,11 +3173,10 @@ NUM_cache(int len, NUMDesc *Num, char *pars_str, int *flag)
ent = NUM_cache_getnew(str);
- /* ----------
+ /*
* Not in the cache, must run parser and save a new
* format-picture to the cache.
- * ----------
- */
+ */
parse_format(ent->format, str, NUM_keywords,
NULL, NUM_index, NUM_TYPE, &ent->Num);
@@ -3213,9 +3186,8 @@ NUM_cache(int len, NUMDesc *Num, char *pars_str, int *flag)
format = ent->format;
- /* ----------
+ /*
* Copy cache to used struct
- * ----------
*/
Num->flag = ent->Num.flag;
Num->lsign = ent->Num.lsign;
@@ -3302,15 +3274,13 @@ NUM_prepare_locale(NUMProc *Np)
struct lconv *lconv;
- /* ----------
+ /*
* Get locales
- * ----------
*/
lconv = PGLC_localeconv();
- /* ----------
+ /*
* Positive / Negative number sign
- * ----------
*/
if (lconv->negative_sign && *lconv->negative_sign)
Np->L_negative_sign = lconv->negative_sign;
@@ -3322,27 +3292,24 @@ NUM_prepare_locale(NUMProc *Np)
else
Np->L_positive_sign = "+";
- /* ----------
+ /*
* Number thousands separator
- * ----------
*/
if (lconv->thousands_sep && *lconv->thousands_sep)
Np->L_thousands_sep = lconv->thousands_sep;
else
Np->L_thousands_sep = ",";
- /* ----------
+ /*
* Number decimal point
- * ----------
*/
if (lconv->decimal_point && *lconv->decimal_point)
Np->decimal = lconv->decimal_point;
else
Np->decimal = ".";
- /* ----------
+ /*
* Currency symbol
- * ----------
*/
if (lconv->currency_symbol && *lconv->currency_symbol)
Np->L_currency_symbol = lconv->currency_symbol;
@@ -3357,9 +3324,9 @@ NUM_prepare_locale(NUMProc *Np)
{
#endif
- /* ----------
+
+ /*
* Default values
- * ----------
*/
Np->L_negative_sign = "-";
Np->L_positive_sign = "+";
@@ -3423,9 +3390,8 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
if (OVERLOAD_TEST)
return;
- /* ----------
+ /*
* read sign
- * ----------
*/
if (*Np->number == ' ' && (id == NUM_0 || id == NUM_9 || NUM_S))
{
@@ -3433,9 +3399,9 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read sign (%c).", *Np->inout_p);
#endif
- /* ----------
+
+ /*
* locale sign
- * ----------
*/
if (IS_LSIGN(Np->Num))
{
@@ -3464,9 +3430,9 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read sipmle sign (%c).", *Np->inout_p);
#endif
- /* ----------
+
+ /*
* simple + - < >
- * ----------
*/
if (*Np->inout_p == '-' || (IS_BRACKET(Np->Num) &&
*Np->inout_p == '<'))
@@ -3487,9 +3453,8 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
if (OVERLOAD_TEST)
return;
- /* ----------
+ /*
* read digit
- * ----------
*/
if (isdigit((unsigned char) *Np->inout_p))
{
@@ -3507,9 +3472,8 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
elog(DEBUG_elog_output, "Read digit (%c).", *Np->inout_p);
#endif
- /* ----------
+ /*
* read decimal point
- * ----------
*/
}
else if (IS_DECIMAL(Np->Num))
@@ -3572,9 +3536,8 @@ NUM_numpart_to_char(NUMProc *Np, int id)
#endif
Np->num_in = FALSE;
- /* ----------
+ /*
* Write sign
- * ----------
*/
if (Np->num_curr == Np->sign_pos && Np->sign_wrote == FALSE)
{
@@ -3585,9 +3548,8 @@ NUM_numpart_to_char(NUMProc *Np, int id)
if (IS_LSIGN(Np->Num))
{
- /* ----------
+ /*
* Write locale SIGN
- * ----------
*/
if (Np->sign == '-')
strcpy(Np->inout_p, Np->L_negative_sign);
@@ -3620,9 +3582,9 @@ NUM_numpart_to_char(NUMProc *Np, int id)
(Np->num_curr == Np->num_count + (Np->num_pre ? 1 : 0)
+ (IS_DECIMAL(Np->Num) ? 1 : 0)))
{
- /* ----------
+
+ /*
* Write close BRACKET
- * ----------
*/
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Writing bracket to position %d", Np->num_curr);
@@ -3631,9 +3593,8 @@ NUM_numpart_to_char(NUMProc *Np, int id)
++Np->inout_p;
}
- /* ----------
+ /*
* digits / FM / Zero / Dec. point
- * ----------
*/
if (id == NUM_9 || id == NUM_0 || id == NUM_D || id == NUM_DEC ||
(id == NUM_S && Np->num_curr < Np->num_pre))
@@ -3643,9 +3604,8 @@ NUM_numpart_to_char(NUMProc *Np, int id)
(Np->Num->zero_start > Np->num_curr || !IS_ZERO(Np->Num)))
{
- /* ----------
+ /*
* Write blank space
- * ----------
*/
if (!IS_FILLMODE(Np->Num))
{
@@ -3662,9 +3622,8 @@ NUM_numpart_to_char(NUMProc *Np, int id)
Np->Num->zero_start <= Np->num_curr)
{
- /* ----------
+ /*
* Write ZERO
- * ----------
*/
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Writing zero to position %d", Np->num_curr);
@@ -3677,10 +3636,9 @@ NUM_numpart_to_char(NUMProc *Np, int id)
else
{
- /* ----------
- * Write Decinal point
- * ----------
- */
+ /*
+ * Write Decinal point
+ */
if (*Np->number_p == '.')
{
@@ -3708,9 +3666,8 @@ NUM_numpart_to_char(NUMProc *Np, int id)
else
{
- /* ----------
+ /*
* Write Digits
- * ----------
*/
if (Np->last_relevant && Np->number_p > Np->last_relevant &&
id != NUM_0)
@@ -3775,9 +3732,8 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
if (Np->Num->zero_start)
--Np->Num->zero_start;
- /* ----------
+ /*
* Roman correction
- * ----------
*/
if (IS_ROMAN(Np->Num))
{
@@ -3797,9 +3753,8 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
Np->Num->flag |= NUM_F_ROMAN;
}
- /* ----------
+ /*
* Sign
- * ----------
*/
if (type == FROM_CHAR)
{
@@ -3833,9 +3788,8 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
Np->sign_wrote = TRUE; /* needn't sign */
}
- /* ----------
+ /*
* Count
- * ----------
*/
Np->num_count = Np->Num->post + Np->Num->pre - 1;
@@ -3858,9 +3812,8 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
if (!Np->sign_wrote)
{
- /* ----------
+ /*
* Set SING position
- * ----------
*/
if (Np->Num->lsign == NUM_LSIGN_POST)
{
@@ -3875,9 +3828,8 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
else
Np->sign_pos = Np->num_pre && !IS_FILLMODE(Np->Num) ? Np->num_pre : 0;
- /* ----------
+ /*
* terrible Ora format
- * ----------
*/
if (!IS_ZERO(Np->Num) && *Np->number == '0' &&
!IS_FILLMODE(Np->Num) && Np->Num->post != 0)
@@ -3924,15 +3876,13 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
);
#endif
- /* ----------
+ /*
* Locale
- * ----------
*/
NUM_prepare_locale(Np);
- /* ----------
+ /*
* Processor direct cycle
- * ----------
*/
if (Np->type == FROM_CHAR)
Np->number_p = Np->number + 1; /* first char is space for sign */
@@ -3944,24 +3894,22 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
if (Np->type == FROM_CHAR)
{
- /* ----------
+
+ /*
* Check non-string inout end
- * ----------
*/
if (Np->inout_p >= Np->inout + plen)
break;
}
- /* ----------
+ /*
* Format pictures actions
- * ----------
*/
if (n->type == NODE_TYPE_ACTION)
{
- /* ----------
+ /*
* Create/reading digit/zero/blank/sing
- * ----------
*/
switch (n->key->id)
{
@@ -4145,9 +4093,9 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
}
else
{
- /* ----------
+
+ /*
* Remove to output char from input in TO_CHAR
- * ----------
*/
if (Np->type == TO_CHAR)
*Np->inout_p = n->character;
@@ -4169,9 +4117,8 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
else
*Np->number_p = '\0';
- /* ----------
+ /*
* Correction - precision of dec. number
- * ----------
*/
Np->Num->post = Np->read_post;
@@ -4213,10 +4160,9 @@ do { \
if (flag) \
pfree(format); \
\
- /* ---------- \
+ /*
* for result is allocated max memory, which current format-picture\
* needs, now it must be re-allocate to result real size \
- * ---------- \
*/ \
if (!(len = strlen(VARDATA(result)))) \
{ \
@@ -4300,9 +4246,8 @@ numeric_to_char(PG_FUNCTION_ARGS)
NUM_TOCHAR_prepare;
- /* ----------
+ /*
* On DateType depend part (numeric)
- * ----------
*/
if (IS_ROMAN(&Num))
{
@@ -4399,9 +4344,8 @@ int4_to_char(PG_FUNCTION_ARGS)
NUM_TOCHAR_prepare;
- /* ----------
+ /*
* On DateType depend part (int32)
- * ----------
*/
if (IS_ROMAN(&Num))
numstr = orgnum = int_to_roman(value);
@@ -4481,9 +4425,8 @@ int8_to_char(PG_FUNCTION_ARGS)
NUM_TOCHAR_prepare;
- /* ----------
+ /*
* On DateType depend part (int32)
- * ----------
*/
if (IS_ROMAN(&Num))
{
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 4a16741bb91..4399ae554b3 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -5,7 +5,7 @@
*
* 1998 Jan Wieck
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/numeric.c,v 1.38 2001/03/22 03:59:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/numeric.c,v 1.39 2001/03/22 06:16:17 momjian Exp $
*
* ----------
*/
@@ -201,17 +201,15 @@ numeric_in(PG_FUNCTION_ARGS)
NumericVar value;
Numeric res;
- /* ----------
+ /*
* Check for NaN
- * ----------
*/
if (strcmp(str, "NaN") == 0)
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
- * Use set_var_from_str() to parse the input string
- * and return it in the packed DB storage format
- * ----------
+ /*
+ * Use set_var_from_str() to parse the input string and return it in
+ * the packed DB storage format
*/
init_var(&value);
set_var_from_str(str, &value);
@@ -238,21 +236,19 @@ numeric_out(PG_FUNCTION_ARGS)
NumericVar x;
char *str;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num))
PG_RETURN_CSTRING(pstrdup("NaN"));
- /* ----------
+ /*
* Get the number in the variable format.
*
- * Even if we didn't need to change format, we'd still need to copy
- * the value to have a modifiable copy for rounding. set_var_from_num()
+ * Even if we didn't need to change format, we'd still need to copy the
+ * value to have a modifiable copy for rounding. set_var_from_num()
* also guarantees there is extra digit space in case we produce a
* carry out from rounding.
- * ----------
*/
init_var(&x);
set_var_from_num(num, &x);
@@ -285,17 +281,15 @@ numeric(PG_FUNCTION_ARGS)
int maxweight;
NumericVar var;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
- * If the value isn't a valid type modifier, simply return a
- * copy of the input value
- * ----------
+ /*
+ * If the value isn't a valid type modifier, simply return a copy of
+ * the input value
*/
if (typmod < (int32) (VARHDRSZ))
{
@@ -304,20 +298,18 @@ numeric(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(new);
}
- /* ----------
+ /*
* Get the precision and scale out of the typmod value
- * ----------
*/
tmp_typmod = typmod - VARHDRSZ;
precision = (tmp_typmod >> 16) & 0xffff;
scale = tmp_typmod & 0xffff;
maxweight = precision - scale;
- /* ----------
- * If the number is in bounds and due to the present result scale
- * no rounding could be necessary, just make a copy of the input
- * and modify its scale fields.
- * ----------
+ /*
+ * If the number is in bounds and due to the present result scale no
+ * rounding could be necessary, just make a copy of the input and
+ * modify its scale fields.
*/
if (num->n_weight < maxweight && scale >= num->n_rscale)
{
@@ -329,10 +321,9 @@ numeric(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(new);
}
- /* ----------
- * We really need to fiddle with things - unpack the number into
- * a variable and let apply_typmod() do it.
- * ----------
+ /*
+ * We really need to fiddle with things - unpack the number into a
+ * variable and let apply_typmod() do it.
*/
init_var(&var);
@@ -359,16 +350,14 @@ numeric_abs(PG_FUNCTION_ARGS)
Numeric num = PG_GETARG_NUMERIC(0);
Numeric res;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
+ /*
* Do it the easy way directly on the packed format
- * ----------
*/
res = (Numeric) palloc(num->varlen);
memcpy(res, num, num->varlen);
@@ -385,25 +374,22 @@ numeric_uminus(PG_FUNCTION_ARGS)
Numeric num = PG_GETARG_NUMERIC(0);
Numeric res;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
+ /*
* Do it the easy way directly on the packed format
- * ----------
*/
res = (Numeric) palloc(num->varlen);
memcpy(res, num, num->varlen);
- /* ----------
- * The packed format is known to be totally zero digit trimmed
- * always. So we can identify a ZERO by the fact that there
- * are no digits at all. Do nothing to a zero.
- * ----------
+ /*
+ * The packed format is known to be totally zero digit trimmed always.
+ * So we can identify a ZERO by the fact that there are no digits at
+ * all. Do nothing to a zero.
*/
if (num->varlen != NUMERIC_HDRSZ)
{
@@ -425,29 +411,27 @@ numeric_sign(PG_FUNCTION_ARGS)
Numeric res;
NumericVar result;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num))
PG_RETURN_NUMERIC(make_result(&const_nan));
init_var(&result);
- /* ----------
- * The packed format is known to be totally zero digit trimmed
- * always. So we can identify a ZERO by the fact that there
- * are no digits at all.
- * ----------
+ /*
+ * The packed format is known to be totally zero digit trimmed always.
+ * So we can identify a ZERO by the fact that there are no digits at
+ * all.
*/
if (num->varlen == NUMERIC_HDRSZ)
set_var_from_var(&const_zero, &result);
else
{
- /* ----------
- * And if there are some, we return a copy of ONE
- * with the sign of our argument
- * ----------
+
+ /*
+ * And if there are some, we return a copy of ONE with the sign of
+ * our argument
*/
set_var_from_var(&const_one, &result);
result.sign = NUMERIC_SIGN(num);
@@ -477,23 +461,21 @@ numeric_round(PG_FUNCTION_ARGS)
NumericVar arg;
int i;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
- * Limit the scale value to avoid possible overflow in calculations below.
- * ----------
+ /*
+ * Limit the scale value to avoid possible overflow in calculations
+ * below.
*/
scale = MIN(NUMERIC_MAX_RESULT_SCALE,
MAX(-NUMERIC_MAX_RESULT_SCALE, scale));
- /* ----------
+ /*
* Unpack the argument and round it at the proper digit position
- * ----------
*/
init_var(&arg);
set_var_from_num(num, &arg);
@@ -533,17 +515,15 @@ numeric_round(PG_FUNCTION_ARGS)
}
}
- /* ----------
+ /*
* Set result's scale to something reasonable.
- * ----------
*/
scale = MIN(NUMERIC_MAX_DISPLAY_SCALE, MAX(0, scale));
arg.rscale = scale;
arg.dscale = scale;
- /* ----------
+ /*
* Return the rounded result
- * ----------
*/
res = make_result(&arg);
@@ -568,40 +548,36 @@ numeric_trunc(PG_FUNCTION_ARGS)
Numeric res;
NumericVar arg;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
- * Limit the scale value to avoid possible overflow in calculations below.
- * ----------
+ /*
+ * Limit the scale value to avoid possible overflow in calculations
+ * below.
*/
scale = MIN(NUMERIC_MAX_RESULT_SCALE,
MAX(-NUMERIC_MAX_RESULT_SCALE, scale));
- /* ----------
+ /*
* Unpack the argument and truncate it at the proper digit position
- * ----------
*/
init_var(&arg);
set_var_from_num(num, &arg);
arg.ndigits = MIN(arg.ndigits, MAX(0, arg.weight + scale + 1));
- /* ----------
+ /*
* Set result's scale to something reasonable.
- * ----------
*/
scale = MIN(NUMERIC_MAX_DISPLAY_SCALE, MAX(0, scale));
arg.rscale = scale;
arg.dscale = scale;
- /* ----------
+ /*
* Return the truncated result
- * ----------
*/
res = make_result(&arg);
@@ -931,18 +907,16 @@ numeric_add(PG_FUNCTION_ARGS)
NumericVar result;
Numeric res;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num1) || NUMERIC_IS_NAN(num2))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
- * Unpack the values, let add_var() compute the result
- * and return it. The internals of add_var() will automatically
- * set the correct result and display scales in the result.
- * ----------
+ /*
+ * Unpack the values, let add_var() compute the result and return it.
+ * The internals of add_var() will automatically set the correct
+ * result and display scales in the result.
*/
init_var(&arg1);
init_var(&arg2);
@@ -978,17 +952,15 @@ numeric_sub(PG_FUNCTION_ARGS)
NumericVar result;
Numeric res;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num1) || NUMERIC_IS_NAN(num2))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
- * Unpack the two arguments, let sub_var() compute the
- * result and return it.
- * ----------
+ /*
+ * Unpack the two arguments, let sub_var() compute the result and
+ * return it.
*/
init_var(&arg1);
init_var(&arg2);
@@ -1024,22 +996,19 @@ numeric_mul(PG_FUNCTION_ARGS)
NumericVar result;
Numeric res;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num1) || NUMERIC_IS_NAN(num2))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
- * Unpack the arguments, let mul_var() compute the result
- * and return it. Unlike add_var() and sub_var(), mul_var()
- * will round the result to the scale stored in global_rscale.
- * In the case of numeric_mul(), which is invoked for the *
- * operator on numerics, we set it to the exact representation
- * for the product (rscale = sum(rscale of arg1, rscale of arg2)
- * and the same for the dscale).
- * ----------
+ /*
+ * Unpack the arguments, let mul_var() compute the result and return
+ * it. Unlike add_var() and sub_var(), mul_var() will round the result
+ * to the scale stored in global_rscale. In the case of numeric_mul(),
+ * which is invoked for the * operator on numerics, we set it to the
+ * exact representation for the product (rscale = sum(rscale of arg1,
+ * rscale of arg2) and the same for the dscale).
*/
init_var(&arg1);
init_var(&arg2);
@@ -1081,16 +1050,14 @@ numeric_div(PG_FUNCTION_ARGS)
Numeric res;
int res_dscale;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num1) || NUMERIC_IS_NAN(num2))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
+ /*
* Unpack the arguments
- * ----------
*/
init_var(&arg1);
init_var(&arg2);
@@ -1122,9 +1089,8 @@ numeric_div(PG_FUNCTION_ARGS)
global_rscale = MAX(global_rscale, res_dscale + 4);
global_rscale = MIN(global_rscale, NUMERIC_MAX_RESULT_SCALE);
- /* ----------
+ /*
* Do the divide, set the display scale and return the result
- * ----------
*/
div_var(&arg1, &arg2, &result);
@@ -1192,16 +1158,14 @@ numeric_inc(PG_FUNCTION_ARGS)
NumericVar arg;
Numeric res;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
+ /*
* Compute the result and return it
- * ----------
*/
init_var(&arg);
@@ -1231,16 +1195,14 @@ numeric_smaller(PG_FUNCTION_ARGS)
NumericVar arg2;
Numeric res;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num1) || NUMERIC_IS_NAN(num2))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
+ /*
* Unpack the values, and decide which is the smaller one
- * ----------
*/
init_var(&arg1);
init_var(&arg2);
@@ -1275,16 +1237,14 @@ numeric_larger(PG_FUNCTION_ARGS)
NumericVar arg2;
Numeric res;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num1) || NUMERIC_IS_NAN(num2))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
+ /*
* Unpack the values, and decide which is the larger one
- * ----------
*/
init_var(&arg1);
init_var(&arg2);
@@ -1327,17 +1287,15 @@ numeric_sqrt(PG_FUNCTION_ARGS)
NumericVar result;
int res_dscale;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
- * Unpack the argument, determine the scales like for divide,
- * let sqrt_var() do the calculation and return the result.
- * ----------
+ /*
+ * Unpack the argument, determine the scales like for divide, let
+ * sqrt_var() do the calculation and return the result.
*/
init_var(&arg);
init_var(&result);
@@ -1378,16 +1336,14 @@ numeric_exp(PG_FUNCTION_ARGS)
NumericVar result;
int res_dscale;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
+ /*
* Same procedure like for sqrt().
- * ----------
*/
init_var(&arg);
init_var(&result);
@@ -1427,16 +1383,14 @@ numeric_ln(PG_FUNCTION_ARGS)
NumericVar result;
int res_dscale;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
+ /*
* Same procedure like for sqrt()
- * ----------
*/
init_var(&arg);
init_var(&result);
@@ -1478,16 +1432,14 @@ numeric_log(PG_FUNCTION_ARGS)
NumericVar result;
int res_dscale;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num1) || NUMERIC_IS_NAN(num2))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
+ /*
* Initialize things and calculate scales
- * ----------
*/
init_var(&arg1);
init_var(&arg2);
@@ -1501,9 +1453,8 @@ numeric_log(PG_FUNCTION_ARGS)
global_rscale = MAX(global_rscale, res_dscale + 4);
global_rscale = MIN(global_rscale, NUMERIC_MAX_RESULT_SCALE);
- /* ----------
+ /*
* Call log_var() to compute and return the result
- * ----------
*/
log_var(&arg1, &arg2, &result);
@@ -1536,16 +1487,14 @@ numeric_power(PG_FUNCTION_ARGS)
NumericVar result;
int res_dscale;
- /* ----------
+ /*
* Handle NaN
- * ----------
*/
if (NUMERIC_IS_NAN(num1) || NUMERIC_IS_NAN(num2))
PG_RETURN_NUMERIC(make_result(&const_nan));
- /* ----------
+ /*
* Initialize things and calculate scales
- * ----------
*/
init_var(&arg1);
init_var(&arg2);
@@ -1559,9 +1508,8 @@ numeric_power(PG_FUNCTION_ARGS)
global_rscale = MAX(global_rscale, res_dscale + 4);
global_rscale = MIN(global_rscale, NUMERIC_MAX_RESULT_SCALE);
- /* ----------
+ /*
* Call log_var() to compute and return the result
- * ----------
*/
power_var(&arg1, &arg2, &result);
@@ -1619,9 +1567,8 @@ numeric_int4(PG_FUNCTION_ARGS)
if (NUMERIC_IS_NAN(num))
elog(ERROR, "Cannot convert NaN to int4");
- /* ----------
+ /*
* Get the number in the variable format so we can round to integer.
- * ----------
*/
init_var(&x);
set_var_from_num(num, &x);
@@ -1670,9 +1617,8 @@ numeric_int8(PG_FUNCTION_ARGS)
if (NUMERIC_IS_NAN(num))
elog(ERROR, "Cannot convert NaN to int8");
- /* ----------
+ /*
* Get the number in the variable format so we can round to integer.
- * ----------
*/
init_var(&x);
set_var_from_num(num, &x);
@@ -1721,9 +1667,8 @@ numeric_int2(PG_FUNCTION_ARGS)
if (NUMERIC_IS_NAN(num))
elog(ERROR, "Cannot convert NaN to int2");
- /* ----------
+ /*
* Get the number in the variable format so we can round to integer.
- * ----------
*/
init_var(&x);
set_var_from_num(num, &x);
@@ -2512,10 +2457,8 @@ get_str_from_var(NumericVar *var, int dscale)
int i;
int d;
- /* ----------
- * Check if we must round up before printing the value and
- * do so.
- * ----------
+ /*
+ * Check if we must round up before printing the value and do so.
*/
i = dscale + var->weight + 1;
if (i >= 0 && var->ndigits > i)
@@ -2543,23 +2486,20 @@ get_str_from_var(NumericVar *var, int dscale)
else
var->ndigits = MAX(0, MIN(i, var->ndigits));
- /* ----------
+ /*
* Allocate space for the result
- * ----------
*/
str = palloc(MAX(0, dscale) + MAX(0, var->weight) + 4);
cp = str;
- /* ----------
+ /*
* Output a dash for negative values
- * ----------
*/
if (var->sign == NUMERIC_NEG)
*cp++ = '-';
- /* ----------
+ /*
* Output all digits before the decimal point
- * ----------
*/
i = MAX(var->weight, 0);
d = 0;
@@ -2573,10 +2513,9 @@ get_str_from_var(NumericVar *var, int dscale)
i--;
}
- /* ----------
- * If requested, output a decimal point and all the digits
- * that follow it.
- * ----------
+ /*
+ * If requested, output a decimal point and all the digits that follow
+ * it.
*/
if (dscale > 0)
{
@@ -2591,9 +2530,8 @@ get_str_from_var(NumericVar *var, int dscale)
}
}
- /* ----------
+ /*
* terminate the string and return it
- * ----------
*/
*cp = '\0';
return str;
@@ -2725,14 +2663,12 @@ apply_typmod(NumericVar *var, int32 typmod)
else
var->ndigits = MAX(0, MIN(i, var->ndigits));
- /* ----------
- * Check for overflow - note we can't do this before rounding,
- * because rounding could raise the weight. Also note that the
- * var's weight could be inflated by leading zeroes, which will
- * be stripped before storage but perhaps might not have been yet.
- * In any case, we must recognize a true zero, whose weight doesn't
- * mean anything.
- * ----------
+ /*
+ * Check for overflow - note we can't do this before rounding, because
+ * rounding could raise the weight. Also note that the var's weight
+ * could be inflated by leading zeroes, which will be stripped before
+ * storage but perhaps might not have been yet. In any case, we must
+ * recognize a true zero, whose weight doesn't mean anything.
*/
if (var->weight >= maxweight)
{
@@ -2805,28 +2741,27 @@ cmp_var(NumericVar *var1, NumericVar *var2)
static void
add_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
{
- /* ----------
+
+ /*
* Decide on the signs of the two variables what to do
- * ----------
*/
if (var1->sign == NUMERIC_POS)
{
if (var2->sign == NUMERIC_POS)
{
- /* ----------
- * Both are positive
- * result = +(ABS(var1) + ABS(var2))
- * ----------
+
+ /*
+ * Both are positive result = +(ABS(var1) + ABS(var2))
*/
add_abs(var1, var2, result);
result->sign = NUMERIC_POS;
}
else
{
- /* ----------
- * var1 is positive, var2 is negative
- * Must compare absolute values
- * ----------
+
+ /*
+ * var1 is positive, var2 is negative Must compare absolute
+ * values
*/
switch (cmp_abs(var1, var2))
{
@@ -2930,9 +2865,9 @@ add_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
static void
sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
{
- /* ----------
+
+ /*
* Decide on the signs of the two variables what to do
- * ----------
*/
if (var1->sign == NUMERIC_POS)
{
@@ -3157,17 +3092,15 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
int first_nextdigit;
int stat = 0;
- /* ----------
+ /*
* First of all division by zero check
- * ----------
*/
ndigits_tmp = var2->ndigits + 1;
if (ndigits_tmp == 1)
elog(ERROR, "division by zero on numeric");
- /* ----------
+ /*
* Determine the result sign, weight and number of digits to calculate
- * ----------
*/
if (var1->sign == var2->sign)
res_sign = NUMERIC_POS;
@@ -3178,9 +3111,8 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
if (res_ndigits <= 0)
res_ndigits = 1;
- /* ----------
+ /*
* Now result zero check
- * ----------
*/
if (var1->ndigits == 0)
{
@@ -3189,17 +3121,15 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
return;
}
- /* ----------
+ /*
* Initialize local variables
- * ----------
*/
init_var(&dividend);
for (i = 1; i < 10; i++)
init_var(&divisor[i]);
- /* ----------
+ /*
* Make a copy of the divisor which has one leading zero digit
- * ----------
*/
divisor[1].ndigits = ndigits_tmp;
divisor[1].rscale = var2->ndigits;
@@ -3209,9 +3139,8 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
divisor[1].digits[0] = 0;
memcpy(&(divisor[1].digits[1]), var2->digits, ndigits_tmp - 1);
- /* ----------
+ /*
* Make a copy of the dividend
- * ----------
*/
dividend.ndigits = var1->ndigits;
dividend.weight = 0;
@@ -3221,9 +3150,8 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
dividend.digits = dividend.buf;
memcpy(dividend.digits, var1->digits, var1->ndigits);
- /* ----------
+ /*
* Setup the result
- * ----------
*/
digitbuf_free(result->buf);
result->buf = digitbuf_alloc(res_ndigits + 2);
@@ -3356,7 +3284,7 @@ mod_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
init_var(&tmp);
- /* ----------
+ /* ---------
* We do this using the equation
* mod(x,y) = x - trunc(x/y)*y
* We fiddle a bit with global_rscale to control result precision.
@@ -3470,9 +3398,8 @@ sqrt_var(NumericVar *arg, NumericVar *result)
set_var_from_var(arg, &tmp_arg);
set_var_from_var(result, &last_val);
- /* ----------
+ /*
* Initialize the result to the first guess
- * ----------
*/
digitbuf_free(result->buf);
result->buf = digitbuf_alloc(1);
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index f548775ad2f..d92d969d40e 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -1,7 +1,7 @@
/* ----------
* pg_lzcompress.c -
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.10 2001/03/22 03:59:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.11 2001/03/22 06:16:17 momjian Exp $
*
* This is an implementation of LZ compression for PostgreSQL.
* It uses a simple history table and generates 2-3 byte tags
@@ -383,36 +383,33 @@ pglz_find_match(PGLZ_HistEntry **hstart, char *input, char *end,
char *ip;
char *hp;
- /* ----------
- * Traverse the linked history list until a good enough
- * match is found.
- * ----------
+ /*
+ * Traverse the linked history list until a good enough match is
+ * found.
*/
hent = hstart[pglz_hist_idx(input, end)];
while (hent && len < good_match)
{
- /* ----------
+
+ /*
* Be happy with lesser good matches the more entries we visited.
- * ----------
*/
good_match -= (good_match * good_drop) / 100;
- /* ----------
+ /*
* Stop if the offset does not fit into our tag anymore.
- * ----------
*/
thisoff = (ip = input) - (hp = hent->pos);
if (thisoff >= 0x0fff)
break;
- /* ----------
+ /*
* Determine length of match. A better match must be larger than
* the best so far. And if we already have a match of 16 or more
- * bytes, it's worth the call overhead to use memcmp() to check
- * if this match is equal for the same size. After that we must
+ * bytes, it's worth the call overhead to use memcmp() to check if
+ * this match is equal for the same size. After that we must
* fallback to character by character comparision to know the
* exact position where the diff occured.
- * ----------
*/
if (len >= 16)
{
@@ -434,9 +431,8 @@ pglz_find_match(PGLZ_HistEntry **hstart, char *input, char *end,
hp++;
}
- /* ----------
+ /*
* Remember this match as the best (if it is)
- * ----------
*/
if (thislen > len)
{
@@ -444,17 +440,15 @@ pglz_find_match(PGLZ_HistEntry **hstart, char *input, char *end,
off = thisoff;
}
- /* ----------
+ /*
* Advance to the next history entry
- * ----------
*/
hent = hent->next;
}
- /* ----------
- * Return match information only if it results at least in one
- * byte reduction.
- * ----------
+ /*
+ * Return match information only if it results at least in one byte
+ * reduction.
*/
if (len > 2)
{
@@ -495,23 +489,20 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
int32 result_max;
int32 need_rate;
- /* ----------
+ /*
* Our fallback strategy is the default.
- * ----------
*/
if (strategy == NULL)
strategy = PGLZ_strategy_default;
- /* ----------
+ /*
* Save the original source size in the header.
- * ----------
*/
dest->rawsize = slen;
- /* ----------
+ /*
* If the strategy forbids compression (at all or if source chunk too
* small), copy input to output without compression.
- * ----------
*/
if (strategy->match_size_good == 0)
{
@@ -527,9 +518,8 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
}
}
- /* ----------
+ /*
* Limit the match size to the maximum implementation allowed value
- * ----------
*/
if ((good_match = strategy->match_size_good) > PGLZ_MAX_MATCH)
good_match = PGLZ_MAX_MATCH;
@@ -541,22 +531,19 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
if (good_drop > 100)
good_drop = 100;
- /* ----------
+ /*
* Initialize the history tables. For inputs smaller than
- * PGLZ_HISTORY_SIZE, we already have a big enough history
- * table on the stack frame.
- * ----------
+ * PGLZ_HISTORY_SIZE, we already have a big enough history table on
+ * the stack frame.
*/
memset((void *) hist_start, 0, sizeof(hist_start));
memset((void *) hist_entries, 0, sizeof(hist_entries));
- /* ----------
- * Compute the maximum result size allowed by the strategy.
- * If the input size exceeds force_input_size, the max result size
- * is the input size itself.
- * Otherwise, it is the input size minus the minimum wanted
- * compression rate.
- * ----------
+ /*
+ * Compute the maximum result size allowed by the strategy. If the
+ * input size exceeds force_input_size, the max result size is the
+ * input size itself. Otherwise, it is the input size minus the
+ * minimum wanted compression rate.
*/
if (slen >= strategy->force_input_size)
result_max = slen;
@@ -570,16 +557,15 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
result_max = slen - ((slen * need_rate) / 100);
}
- /* ----------
+ /*
* Compress the source directly into the output buffer.
- * ----------
*/
while (dp < dend)
{
- /* ----------
- * If we already exceeded the maximum result size, set no compression
- * flag and stop this. But don't check too often.
- * ----------
+
+ /*
+ * If we already exceeded the maximum result size, set no
+ * compression flag and stop this. But don't check too often.
*/
if (bp - bstart >= result_max)
{
@@ -587,17 +573,16 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
break;
}
- /* ----------
+ /*
* Try to find a match in the history
- * ----------
*/
if (pglz_find_match(hist_start, dp, dend, &match_len,
&match_off, good_match, good_drop))
{
- /* ----------
- * Create the tag and add history entries for
- * all matched characters.
- * ----------
+
+ /*
+ * Create the tag and add history entries for all matched
+ * characters.
*/
pglz_out_tag(ctrlp, ctrlb, ctrl, bp, match_len, match_off);
while (match_len--)
@@ -609,9 +594,9 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
}
else
{
- /* ----------
+
+ /*
* No match found. Copy one literal byte.
- * ----------
*/
pglz_out_literal(ctrlp, ctrlb, ctrl, bp, *dp);
pglz_hist_add(hist_start, hist_entries, hist_next, dp, dend);
@@ -620,11 +605,10 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
}
}
- /* ----------
- * If we are still in compressing mode, write out the last
- * control byte and determine if the compression gained the
- * rate requested by the strategy.
- * ----------
+ /*
+ * If we are still in compressing mode, write out the last control
+ * byte and determine if the compression gained the rate requested by
+ * the strategy.
*/
if (do_compress)
{
@@ -635,12 +619,10 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
do_compress = 0;
}
- /* ----------
- * Done - if we successfully compressed and matched the
- * strategy's constraints, return the compressed result.
- * Otherwise copy the original source over it and return
- * the original length.
- * ----------
+ /*
+ * Done - if we successfully compressed and matched the strategy's
+ * constraints, return the compressed result. Otherwise copy the
+ * original source over it and return the original length.
*/
if (do_compress)
{
@@ -685,22 +667,22 @@ pglz_decompress(PGLZ_Header *source, char *dest)
while (dp < dend)
{
- /* ----------
+
+ /*
* Read one control byte and process the next 8 items.
- * ----------
*/
ctrl = *dp++;
for (ctrlc = 0; ctrlc < 8 && dp < dend; ctrlc++)
{
if (ctrl & 1)
{
- /* ----------
- * Otherwise it contains the match length minus 3
- * and the upper 4 bits of the offset. The next following
- * byte contains the lower 8 bits of the offset. If
- * the length is coded as 18, another extension tag byte
- * tells how much longer the match really was (0-255).
- * ----------
+
+ /*
+ * Otherwise it contains the match length minus 3 and the
+ * upper 4 bits of the offset. The next following byte
+ * contains the lower 8 bits of the offset. If the length
+ * is coded as 18, another extension tag byte tells how
+ * much longer the match really was (0-255).
*/
len = (dp[0] & 0x0f) + 3;
off = ((dp[0] & 0xf0) << 4) | dp[1];
@@ -708,12 +690,11 @@ pglz_decompress(PGLZ_Header *source, char *dest)
if (len == 18)
len += *dp++;
- /* ----------
- * Now we copy the bytes specified by the tag from
- * OUTPUT to OUTPUT. It is dangerous and platform
- * dependant to use memcpy() here, because the copied
- * areas could overlap extremely!
- * ----------
+ /*
+ * Now we copy the bytes specified by the tag from OUTPUT
+ * to OUTPUT. It is dangerous and platform dependant to
+ * use memcpy() here, because the copied areas could
+ * overlap extremely!
*/
while (len--)
{
@@ -723,25 +704,23 @@ pglz_decompress(PGLZ_Header *source, char *dest)
}
else
{
- /* ----------
- * An unset control bit means LITERAL BYTE. So we
- * just copy one from INPUT to OUTPUT.
- * ----------
+
+ /*
+ * An unset control bit means LITERAL BYTE. So we just
+ * copy one from INPUT to OUTPUT.
*/
*bp++ = *dp++;
}
- /* ----------
+ /*
* Advance the control bit
- * ----------
*/
ctrl >>= 1;
}
}
- /* ----------
+ /*
* That's it.
- * ----------
*/
return (char *) bp - dest;
}
@@ -761,11 +740,10 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
if (dstate->tocopy > 0)
{
- /* ----------
- * Copy one byte from output to output until we did it
- * for the length specified by the last tag. Return that
- * byte.
- * ----------
+
+ /*
+ * Copy one byte from output to output until we did it for the
+ * length specified by the last tag. Return that byte.
*/
dstate->tocopy--;
return (*(dstate->cp_out++) = *(dstate->cp_copy++));
@@ -773,25 +751,24 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
if (dstate->ctrl_count == 0)
{
- /* ----------
- * Get the next control byte if we need to, but check
- * for EOF before.
- * ----------
+
+ /*
+ * Get the next control byte if we need to, but check for EOF
+ * before.
*/
if (dstate->cp_in == dstate->cp_end)
return EOF;
- /* ----------
- * This decompression method saves time only, if we stop near
- * the beginning of the data (maybe because we're called by a
+ /*
+ * This decompression method saves time only, if we stop near the
+ * beginning of the data (maybe because we're called by a
* comparision function and a difference occurs early). Otherwise,
* all the checks, needed here, cause too much overhead.
*
* Thus we decompress the entire rest at once into the temporary
- * buffer and change the decomp state to return the prepared
- * data from the buffer by the more simple calls to
+ * buffer and change the decomp state to return the prepared data
+ * from the buffer by the more simple calls to
* pglz_get_next_decomp_char_from_plain().
- * ----------
*/
if (dstate->cp_out - dstate->temp_buf >= 256)
{
@@ -838,32 +815,29 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
return (int) (*(dstate->cp_in++));
}
- /* ----------
+ /*
* Not yet, get next control byte into decomp state.
- * ----------
*/
dstate->ctrl = (unsigned char) (*(dstate->cp_in++));
dstate->ctrl_count = 8;
}
- /* ----------
+ /*
* Check for EOF in tag/literal byte data.
- * ----------
*/
if (dstate->cp_in == dstate->cp_end)
return EOF;
- /* ----------
+ /*
* Handle next control bit.
- * ----------
*/
dstate->ctrl_count--;
if (dstate->ctrl & 0x01)
{
- /* ----------
- * Bit is set, so tag is following. Setup copy information
- * and do the copy for the first byte as above.
- * ----------
+
+ /*
+ * Bit is set, so tag is following. Setup copy information and do
+ * the copy for the first byte as above.
*/
int off;
@@ -879,9 +853,9 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
}
else
{
- /* ----------
+
+ /*
* Bit is unset, so literal byte follows.
- * ----------
*/
retval = (int) (*(dstate->cp_out++) = *(dstate->cp_in++));
}
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index db7f67ec601..6294eb086cf 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -18,7 +18,7 @@
* Portions Copyright (c) 2000-2001, PostgreSQL Global Development Group
* Copyright 1999 Jan Wieck
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/ri_triggers.c,v 1.22 2001/03/22 03:59:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/ri_triggers.c,v 1.23 2001/03/22 06:16:17 momjian Exp $
*
* ----------
*/
@@ -177,9 +177,9 @@ RI_FKey_check(PG_FUNCTION_ARGS)
ReferentialIntegritySnapshotOverride = true;
- /* ----------
- * Check that this is a valid trigger call on the right time and event.
- * ----------
+ /*
+ * Check that this is a valid trigger call on the right time and
+ * event.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "RI_FKey_check() not fired by trigger manager");
@@ -190,9 +190,8 @@ RI_FKey_check(PG_FUNCTION_ARGS)
!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
elog(ERROR, "RI_FKey_check() must be fired for INSERT or UPDATE");
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -202,10 +201,9 @@ RI_FKey_check(PG_FUNCTION_ARGS)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_check()",
RI_MAX_NUMKEYS);
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the new tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the new
+ * tuple.
*/
fk_rel = trigdata->tg_relation;
pk_rel = heap_openr(tgargs[RI_PK_RELNAME_ARGNO], NoLock);
@@ -242,7 +240,7 @@ RI_FKey_check(PG_FUNCTION_ARGS)
{
char querystr[8192];
- /* ----------
+ /* ---------
* The query string built is
* SELECT oid FROM ONLY <pktable>
* ----------
@@ -251,18 +249,16 @@ RI_FKey_check(PG_FUNCTION_ARGS)
tgargs[RI_PK_RELNAME_ARGNO],
tgargs[RI_PK_RELNAME_ARGNO]);
- /* ----------
+ /*
* Prepare, save and remember the new plan.
- * ----------
*/
qplan = SPI_prepare(querystr, 0, NULL);
qplan = SPI_saveplan(qplan);
ri_HashPreparedPlan(&qkey, qplan);
}
- /* ----------
+ /*
* Execute the plan
- * ----------
*/
if (SPI_connect() != SPI_OK_CONNECT)
elog(NOTICE, "SPI_connect() failed in RI_FKey_check()");
@@ -304,30 +300,30 @@ RI_FKey_check(PG_FUNCTION_ARGS)
switch (ri_NullCheck(fk_rel, new_row, &qkey, RI_KEYPAIR_FK_IDX))
{
case RI_KEYS_ALL_NULL:
- /* ----------
- * No check - if NULLs are allowed at all is
- * already checked by NOT NULL constraint.
+
+ /*
+ * No check - if NULLs are allowed at all is already checked
+ * by NOT NULL constraint.
*
- * This is true for MATCH FULL, MATCH PARTIAL, and
- * MATCH <unspecified>
- * ----------
+ * This is true for MATCH FULL, MATCH PARTIAL, and MATCH
+ * <unspecified>
*/
heap_close(pk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_KEYS_SOME_NULL:
- /* ----------
- * This is the only case that differs between the
- * three kinds of MATCH.
- * ----------
+
+ /*
+ * This is the only case that differs between the three kinds
+ * of MATCH.
*/
switch (match_type)
{
case RI_MATCH_TYPE_FULL:
- /* ----------
- * Not allowed - MATCH FULL says either all or none
- * of the attributes can be NULLs
- * ----------
+
+ /*
+ * Not allowed - MATCH FULL says either all or none of
+ * the attributes can be NULLs
*/
elog(ERROR, "%s referential integrity violation - "
"MATCH FULL doesn't allow mixing of NULL "
@@ -337,21 +333,21 @@ RI_FKey_check(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
case RI_MATCH_TYPE_UNSPECIFIED:
- /* ----------
+
+ /*
* MATCH <unspecified> - if ANY column is null, we
* have a match.
- * ----------
*/
heap_close(pk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_MATCH_TYPE_PARTIAL:
- /* ----------
+
+ /*
* MATCH PARTIAL - all non-null columns must match.
- * (not implemented, can be done by modifying the query
- * below to only include non-null columns, or by
+ * (not implemented, can be done by modifying the
+ * query below to only include non-null columns, or by
* writing a special version here)
- * ----------
*/
elog(ERROR, "MATCH PARTIAL not yet implemented");
heap_close(pk_rel, NoLock);
@@ -359,30 +355,27 @@ RI_FKey_check(PG_FUNCTION_ARGS)
}
case RI_KEYS_NONE_NULL:
- /* ----------
+
+ /*
* Have a full qualified key - continue below for all three
* kinds of MATCH.
- * ----------
*/
break;
}
- /* ----------
- * Note:
- * We cannot avoid the check on UPDATE, even if old and new
- * key are the same. Otherwise, someone could DELETE the PK
- * that consists of the DEFAULT values, and if there are any
- * references, a ON DELETE SET DEFAULT action would update
- * the references to exactly these values but we wouldn't see
- * that weired case (this is the only place to see it).
- * ----------
+ /*
+ * Note: We cannot avoid the check on UPDATE, even if old and new key
+ * are the same. Otherwise, someone could DELETE the PK that consists
+ * of the DEFAULT values, and if there are any references, a ON DELETE
+ * SET DEFAULT action would update the references to exactly these
+ * values but we wouldn't see that weired case (this is the only place
+ * to see it).
*/
if (SPI_connect() != SPI_OK_CONNECT)
elog(NOTICE, "SPI_connect() failed in RI_FKey_check()");
- /* ----------
+ /*
* Fetch or prepare a saved plan for the real check
- * ----------
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
@@ -416,28 +409,26 @@ RI_FKey_check(PG_FUNCTION_ARGS)
tgargs[RI_PK_RELNAME_ARGNO]);
strcat(querystr, buf);
- /* ----------
+ /*
* Prepare, save and remember the new plan.
- * ----------
*/
qplan = SPI_prepare(querystr, qkey.nkeypairs, queryoids);
qplan = SPI_saveplan(qplan);
ri_HashPreparedPlan(&qkey, qplan);
}
- /* ----------
- * We have a plan now. Build up the arguments for SPI_execp()
- * from the key values in the new FK tuple.
- * ----------
+ /*
+ * We have a plan now. Build up the arguments for SPI_execp() from the
+ * key values in the new FK tuple.
*/
for (i = 0; i < qkey.nkeypairs; i++)
{
- /* ----------
+
+ /*
* We can implement MATCH PARTIAL by excluding this column from
* the query if it is null. Simple! Unfortunately, the
- * referential actions aren't so I've not bothered to do so
- * for the moment.
- * ----------
+ * referential actions aren't so I've not bothered to do so for
+ * the moment.
*/
check_values[i] = SPI_getbinval(new_row,
@@ -451,9 +442,8 @@ RI_FKey_check(PG_FUNCTION_ARGS)
}
check_nulls[i] = '\0';
- /* ----------
+ /*
* Now check that foreign key exists in PK table
- * ----------
*/
SetUserId(RelationGetForm(pk_rel)->relowner);
@@ -477,9 +467,8 @@ RI_FKey_check(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #1 in ri_triggers.c");
return PointerGetDatum(NULL);
@@ -541,9 +530,9 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
ReferentialIntegritySnapshotOverride = true;
- /* ----------
- * Check that this is a valid trigger call on the right time and event.
- * ----------
+ /*
+ * Check that this is a valid trigger call on the right time and
+ * event.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "RI_FKey_noaction_del() not fired by trigger manager");
@@ -553,9 +542,8 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
elog(ERROR, "RI_FKey_noaction_del() must be fired for DELETE");
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -565,17 +553,15 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_noaction_del()",
RI_MAX_NUMKEYS);
- /* ----------
+ /*
* Nothing to do if no column names to compare given
- * ----------
*/
if (tgnargs == 4)
return PointerGetDatum(NULL);
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the old tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the old
+ * tuple.
*/
fk_rel = heap_openr(tgargs[RI_FK_RELNAME_ARGNO], NoLock);
pk_rel = trigdata->tg_relation;
@@ -601,18 +587,18 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
{
case RI_KEYS_ALL_NULL:
case RI_KEYS_SOME_NULL:
- /* ----------
+
+ /*
* No check - MATCH FULL means there cannot be any
* reference to old key if it contains NULL
- * ----------
*/
heap_close(fk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_KEYS_NONE_NULL:
- /* ----------
+
+ /*
* Have a full qualified key - continue below
- * ----------
*/
break;
}
@@ -621,10 +607,9 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
if (SPI_connect() != SPI_OK_CONNECT)
elog(NOTICE, "SPI_connect() failed in RI_FKey_noaction_del()");
- /* ----------
+ /*
* Fetch or prepare a saved plan for the restrict delete
* lookup if foreign references exist
- * ----------
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
@@ -658,19 +643,17 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
tgargs[RI_FK_RELNAME_ARGNO]);
strcat(querystr, buf);
- /* ----------
+ /*
* Prepare, save and remember the new plan.
- * ----------
*/
qplan = SPI_prepare(querystr, qkey.nkeypairs, queryoids);
qplan = SPI_saveplan(qplan);
ri_HashPreparedPlan(&qkey, qplan);
}
- /* ----------
+ /*
* We have a plan now. Build up the arguments for SPI_execp()
* from the key values in the deleted PK tuple.
- * ----------
*/
for (i = 0; i < qkey.nkeypairs; i++)
{
@@ -685,9 +668,8 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
}
del_nulls[i] = '\0';
- /* ----------
+ /*
* Now check for existing references
- * ----------
*/
SetUserId(RelationGetForm(pk_rel)->relowner);
@@ -708,18 +690,16 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
- /* ----------
+ /*
* Handle MATCH PARTIAL restrict delete.
- * ----------
*/
case RI_MATCH_TYPE_PARTIAL:
elog(ERROR, "MATCH PARTIAL not yet supported");
return PointerGetDatum(NULL);
}
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #2 in ri_triggers.c");
return PointerGetDatum(NULL);
@@ -756,9 +736,9 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
ReferentialIntegritySnapshotOverride = true;
- /* ----------
- * Check that this is a valid trigger call on the right time and event.
- * ----------
+ /*
+ * Check that this is a valid trigger call on the right time and
+ * event.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "RI_FKey_noaction_upd() not fired by trigger manager");
@@ -768,9 +748,8 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
elog(ERROR, "RI_FKey_noaction_upd() must be fired for UPDATE");
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -780,17 +759,15 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_noaction_upd()",
RI_MAX_NUMKEYS);
- /* ----------
+ /*
* Nothing to do if no column names to compare given
- * ----------
*/
if (tgnargs == 4)
return PointerGetDatum(NULL);
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the new and old tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the new
+ * and old tuple.
*/
fk_rel = heap_openr(tgargs[RI_FK_RELNAME_ARGNO], NoLock);
pk_rel = trigdata->tg_relation;
@@ -817,26 +794,25 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
{
case RI_KEYS_ALL_NULL:
case RI_KEYS_SOME_NULL:
- /* ----------
+
+ /*
* No check - MATCH FULL means there cannot be any
* reference to old key if it contains NULL
- * ----------
*/
heap_close(fk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_KEYS_NONE_NULL:
- /* ----------
+
+ /*
* Have a full qualified key - continue below
- * ----------
*/
break;
}
heap_close(fk_rel, NoLock);
- /* ----------
+ /*
* No need to check anything if old and new keys are equal
- * ----------
*/
if (ri_KeysEqual(pk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_PK_IDX))
@@ -845,10 +821,9 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
if (SPI_connect() != SPI_OK_CONNECT)
elog(NOTICE, "SPI_connect() failed in RI_FKey_noaction_upd()");
- /* ----------
+ /*
* Fetch or prepare a saved plan for the noaction update
* lookup if foreign references exist
- * ----------
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
@@ -882,19 +857,17 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
tgargs[RI_FK_RELNAME_ARGNO]);
strcat(querystr, buf);
- /* ----------
+ /*
* Prepare, save and remember the new plan.
- * ----------
*/
qplan = SPI_prepare(querystr, qkey.nkeypairs, queryoids);
qplan = SPI_saveplan(qplan);
ri_HashPreparedPlan(&qkey, qplan);
}
- /* ----------
+ /*
* We have a plan now. Build up the arguments for SPI_execp()
* from the key values in the updated PK tuple.
- * ----------
*/
for (i = 0; i < qkey.nkeypairs; i++)
{
@@ -909,9 +882,8 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
}
upd_nulls[i] = '\0';
- /* ----------
+ /*
* Now check for existing references
- * ----------
*/
SetUserId(RelationGetForm(pk_rel)->relowner);
@@ -932,18 +904,16 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
- /* ----------
+ /*
* Handle MATCH PARTIAL noaction update.
- * ----------
*/
case RI_MATCH_TYPE_PARTIAL:
elog(ERROR, "MATCH PARTIAL not yet supported");
return PointerGetDatum(NULL);
}
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #3 in ri_triggers.c");
return PointerGetDatum(NULL);
@@ -974,9 +944,9 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
ReferentialIntegritySnapshotOverride = true;
- /* ----------
- * Check that this is a valid trigger call on the right time and event.
- * ----------
+ /*
+ * Check that this is a valid trigger call on the right time and
+ * event.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "RI_FKey_cascade_del() not fired by trigger manager");
@@ -986,9 +956,8 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
elog(ERROR, "RI_FKey_cascade_del() must be fired for DELETE");
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -998,17 +967,15 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_cascade_del()",
RI_MAX_NUMKEYS);
- /* ----------
+ /*
* Nothing to do if no column names to compare given
- * ----------
*/
if (tgnargs == 4)
return PointerGetDatum(NULL);
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the old tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the old
+ * tuple.
*/
fk_rel = heap_openr(tgargs[RI_FK_RELNAME_ARGNO], NoLock);
pk_rel = trigdata->tg_relation;
@@ -1034,18 +1001,18 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
{
case RI_KEYS_ALL_NULL:
case RI_KEYS_SOME_NULL:
- /* ----------
+
+ /*
* No check - MATCH FULL means there cannot be any
* reference to old key if it contains NULL
- * ----------
*/
heap_close(fk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_KEYS_NONE_NULL:
- /* ----------
+
+ /*
* Have a full qualified key - continue below
- * ----------
*/
break;
}
@@ -1054,9 +1021,8 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
if (SPI_connect() != SPI_OK_CONNECT)
elog(NOTICE, "SPI_connect() failed in RI_FKey_cascade_del()");
- /* ----------
+ /*
* Fetch or prepare a saved plan for the cascaded delete
- * ----------
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
@@ -1087,19 +1053,17 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
- /* ----------
+ /*
* Prepare, save and remember the new plan.
- * ----------
*/
qplan = SPI_prepare(querystr, qkey.nkeypairs, queryoids);
qplan = SPI_saveplan(qplan);
ri_HashPreparedPlan(&qkey, qplan);
}
- /* ----------
+ /*
* We have a plan now. Build up the arguments for SPI_execp()
* from the key values in the deleted PK tuple.
- * ----------
*/
for (i = 0; i < qkey.nkeypairs; i++)
{
@@ -1114,9 +1078,8 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
}
del_nulls[i] = '\0';
- /* ----------
+ /*
* Now delete constraint
- * ----------
*/
if (SPI_execp(qplan, del_values, del_nulls, 0) != SPI_OK_DELETE)
elog(ERROR, "SPI_execp() failed in RI_FKey_cascade_del()");
@@ -1126,18 +1089,16 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
- /* ----------
+ /*
* Handle MATCH PARTIAL cascaded delete.
- * ----------
*/
case RI_MATCH_TYPE_PARTIAL:
elog(ERROR, "MATCH PARTIAL not yet supported");
return PointerGetDatum(NULL);
}
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #4 in ri_triggers.c");
return PointerGetDatum(NULL);
@@ -1170,9 +1131,9 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
ReferentialIntegritySnapshotOverride = true;
- /* ----------
- * Check that this is a valid trigger call on the right time and event.
- * ----------
+ /*
+ * Check that this is a valid trigger call on the right time and
+ * event.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "RI_FKey_cascade_upd() not fired by trigger manager");
@@ -1182,9 +1143,8 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
elog(ERROR, "RI_FKey_cascade_upd() must be fired for UPDATE");
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -1194,17 +1154,15 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_cascade_upd()",
RI_MAX_NUMKEYS);
- /* ----------
+ /*
* Nothing to do if no column names to compare given
- * ----------
*/
if (tgnargs == 4)
return PointerGetDatum(NULL);
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the new and old tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the new
+ * and old tuple.
*/
fk_rel = heap_openr(tgargs[RI_FK_RELNAME_ARGNO], NoLock);
pk_rel = trigdata->tg_relation;
@@ -1231,26 +1189,25 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
{
case RI_KEYS_ALL_NULL:
case RI_KEYS_SOME_NULL:
- /* ----------
+
+ /*
* No update - MATCH FULL means there cannot be any
* reference to old key if it contains NULL
- * ----------
*/
heap_close(fk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_KEYS_NONE_NULL:
- /* ----------
+
+ /*
* Have a full qualified key - continue below
- * ----------
*/
break;
}
heap_close(fk_rel, NoLock);
- /* ----------
+ /*
* No need to do anything if old and new keys are equal
- * ----------
*/
if (ri_KeysEqual(pk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_PK_IDX))
@@ -1259,10 +1216,9 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
if (SPI_connect() != SPI_OK_CONNECT)
elog(NOTICE, "SPI_connect() failed in RI_FKey_cascade_upd()");
- /* ----------
- * Fetch or prepare a saved plan for the cascaded update
- * of foreign references
- * ----------
+ /*
+ * Fetch or prepare a saved plan for the cascaded update of
+ * foreign references
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
@@ -1304,19 +1260,17 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
}
strcat(querystr, qualstr);
- /* ----------
+ /*
* Prepare, save and remember the new plan.
- * ----------
*/
qplan = SPI_prepare(querystr, qkey.nkeypairs * 2, queryoids);
qplan = SPI_saveplan(qplan);
ri_HashPreparedPlan(&qkey, qplan);
}
- /* ----------
+ /*
* We have a plan now. Build up the arguments for SPI_execp()
* from the key values in the updated PK tuple.
- * ----------
*/
for (i = 0, j = qkey.nkeypairs; i < qkey.nkeypairs; i++, j++)
{
@@ -1340,9 +1294,8 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
}
upd_nulls[j] = '\0';
- /* ----------
+ /*
* Now update the existing references
- * ----------
*/
if (SPI_execp(qplan, upd_values, upd_nulls, 0) != SPI_OK_UPDATE)
elog(ERROR, "SPI_execp() failed in RI_FKey_cascade_upd()");
@@ -1352,18 +1305,16 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
- /* ----------
+ /*
* Handle MATCH PARTIAL cascade update.
- * ----------
*/
case RI_MATCH_TYPE_PARTIAL:
elog(ERROR, "MATCH PARTIAL not yet supported");
return PointerGetDatum(NULL);
}
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #5 in ri_triggers.c");
return PointerGetDatum(NULL);
@@ -1401,9 +1352,9 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
ReferentialIntegritySnapshotOverride = true;
- /* ----------
- * Check that this is a valid trigger call on the right time and event.
- * ----------
+ /*
+ * Check that this is a valid trigger call on the right time and
+ * event.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "RI_FKey_restrict_del() not fired by trigger manager");
@@ -1413,9 +1364,8 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
elog(ERROR, "RI_FKey_restrict_del() must be fired for DELETE");
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -1425,17 +1375,15 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_restrict_del()",
RI_MAX_NUMKEYS);
- /* ----------
+ /*
* Nothing to do if no column names to compare given
- * ----------
*/
if (tgnargs == 4)
return PointerGetDatum(NULL);
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the old tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the old
+ * tuple.
*/
fk_rel = heap_openr(tgargs[RI_FK_RELNAME_ARGNO], NoLock);
pk_rel = trigdata->tg_relation;
@@ -1461,18 +1409,18 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
{
case RI_KEYS_ALL_NULL:
case RI_KEYS_SOME_NULL:
- /* ----------
+
+ /*
* No check - MATCH FULL means there cannot be any
* reference to old key if it contains NULL
- * ----------
*/
heap_close(fk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_KEYS_NONE_NULL:
- /* ----------
+
+ /*
* Have a full qualified key - continue below
- * ----------
*/
break;
}
@@ -1481,10 +1429,9 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
if (SPI_connect() != SPI_OK_CONNECT)
elog(NOTICE, "SPI_connect() failed in RI_FKey_restrict_del()");
- /* ----------
+ /*
* Fetch or prepare a saved plan for the restrict delete
* lookup if foreign references exist
- * ----------
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
@@ -1518,19 +1465,17 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
tgargs[RI_FK_RELNAME_ARGNO]);
strcat(querystr, buf);
- /* ----------
+ /*
* Prepare, save and remember the new plan.
- * ----------
*/
qplan = SPI_prepare(querystr, qkey.nkeypairs, queryoids);
qplan = SPI_saveplan(qplan);
ri_HashPreparedPlan(&qkey, qplan);
}
- /* ----------
+ /*
* We have a plan now. Build up the arguments for SPI_execp()
* from the key values in the deleted PK tuple.
- * ----------
*/
for (i = 0; i < qkey.nkeypairs; i++)
{
@@ -1545,9 +1490,8 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
}
del_nulls[i] = '\0';
- /* ----------
+ /*
* Now check for existing references
- * ----------
*/
if (SPI_execp(qplan, del_values, del_nulls, 1) != SPI_OK_SELECT)
elog(ERROR, "SPI_execp() failed in RI_FKey_restrict_del()");
@@ -1564,18 +1508,16 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
- /* ----------
+ /*
* Handle MATCH PARTIAL restrict delete.
- * ----------
*/
case RI_MATCH_TYPE_PARTIAL:
elog(ERROR, "MATCH PARTIAL not yet supported");
return PointerGetDatum(NULL);
}
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #6 in ri_triggers.c");
return PointerGetDatum(NULL);
@@ -1617,9 +1559,9 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
ReferentialIntegritySnapshotOverride = true;
- /* ----------
- * Check that this is a valid trigger call on the right time and event.
- * ----------
+ /*
+ * Check that this is a valid trigger call on the right time and
+ * event.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "RI_FKey_restrict_upd() not fired by trigger manager");
@@ -1629,9 +1571,8 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
elog(ERROR, "RI_FKey_restrict_upd() must be fired for UPDATE");
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -1641,17 +1582,15 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_restrict_upd()",
RI_MAX_NUMKEYS);
- /* ----------
+ /*
* Nothing to do if no column names to compare given
- * ----------
*/
if (tgnargs == 4)
return PointerGetDatum(NULL);
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the new and old tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the new
+ * and old tuple.
*/
fk_rel = heap_openr(tgargs[RI_FK_RELNAME_ARGNO], NoLock);
pk_rel = trigdata->tg_relation;
@@ -1678,26 +1617,25 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
{
case RI_KEYS_ALL_NULL:
case RI_KEYS_SOME_NULL:
- /* ----------
+
+ /*
* No check - MATCH FULL means there cannot be any
* reference to old key if it contains NULL
- * ----------
*/
heap_close(fk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_KEYS_NONE_NULL:
- /* ----------
+
+ /*
* Have a full qualified key - continue below
- * ----------
*/
break;
}
heap_close(fk_rel, NoLock);
- /* ----------
+ /*
* No need to check anything if old and new keys are equal
- * ----------
*/
if (ri_KeysEqual(pk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_PK_IDX))
@@ -1706,10 +1644,9 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
if (SPI_connect() != SPI_OK_CONNECT)
elog(NOTICE, "SPI_connect() failed in RI_FKey_restrict_upd()");
- /* ----------
+ /*
* Fetch or prepare a saved plan for the restrict update
* lookup if foreign references exist
- * ----------
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
@@ -1743,19 +1680,17 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
tgargs[RI_FK_RELNAME_ARGNO]);
strcat(querystr, buf);
- /* ----------
+ /*
* Prepare, save and remember the new plan.
- * ----------
*/
qplan = SPI_prepare(querystr, qkey.nkeypairs, queryoids);
qplan = SPI_saveplan(qplan);
ri_HashPreparedPlan(&qkey, qplan);
}
- /* ----------
+ /*
* We have a plan now. Build up the arguments for SPI_execp()
* from the key values in the updated PK tuple.
- * ----------
*/
for (i = 0; i < qkey.nkeypairs; i++)
{
@@ -1770,9 +1705,8 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
}
upd_nulls[i] = '\0';
- /* ----------
+ /*
* Now check for existing references
- * ----------
*/
SetUserId(RelationGetForm(pk_rel)->relowner);
@@ -1793,18 +1727,16 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
- /* ----------
+ /*
* Handle MATCH PARTIAL restrict update.
- * ----------
*/
case RI_MATCH_TYPE_PARTIAL:
elog(ERROR, "MATCH PARTIAL not yet supported");
return PointerGetDatum(NULL);
}
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #7 in ri_triggers.c");
return PointerGetDatum(NULL);
@@ -1835,9 +1767,9 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
ReferentialIntegritySnapshotOverride = true;
- /* ----------
- * Check that this is a valid trigger call on the right time and event.
- * ----------
+ /*
+ * Check that this is a valid trigger call on the right time and
+ * event.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "RI_FKey_setnull_del() not fired by trigger manager");
@@ -1847,9 +1779,8 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
elog(ERROR, "RI_FKey_setnull_del() must be fired for DELETE");
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -1859,17 +1790,15 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_setnull_del()",
RI_MAX_NUMKEYS);
- /* ----------
+ /*
* Nothing to do if no column names to compare given
- * ----------
*/
if (tgnargs == 4)
return PointerGetDatum(NULL);
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the old tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the old
+ * tuple.
*/
fk_rel = heap_openr(tgargs[RI_FK_RELNAME_ARGNO], NoLock);
pk_rel = trigdata->tg_relation;
@@ -1895,18 +1824,18 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
{
case RI_KEYS_ALL_NULL:
case RI_KEYS_SOME_NULL:
- /* ----------
+
+ /*
* No update - MATCH FULL means there cannot be any
* reference to old key if it contains NULL
- * ----------
*/
heap_close(fk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_KEYS_NONE_NULL:
- /* ----------
+
+ /*
* Have a full qualified key - continue below
- * ----------
*/
break;
}
@@ -1915,10 +1844,9 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
if (SPI_connect() != SPI_OK_CONNECT)
elog(NOTICE, "SPI_connect() failed in RI_FKey_setnull_del()");
- /* ----------
+ /*
* Fetch or prepare a saved plan for the set null delete
* operation
- * ----------
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
@@ -1959,19 +1887,17 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
}
strcat(querystr, qualstr);
- /* ----------
+ /*
* Prepare, save and remember the new plan.
- * ----------
*/
qplan = SPI_prepare(querystr, qkey.nkeypairs, queryoids);
qplan = SPI_saveplan(qplan);
ri_HashPreparedPlan(&qkey, qplan);
}
- /* ----------
+ /*
* We have a plan now. Build up the arguments for SPI_execp()
* from the key values in the updated PK tuple.
- * ----------
*/
for (i = 0; i < qkey.nkeypairs; i++)
{
@@ -1986,9 +1912,8 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
}
upd_nulls[i] = '\0';
- /* ----------
+ /*
* Now update the existing references
- * ----------
*/
if (SPI_execp(qplan, upd_values, upd_nulls, 0) != SPI_OK_UPDATE)
elog(ERROR, "SPI_execp() failed in RI_FKey_setnull_del()");
@@ -1998,18 +1923,16 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
- /* ----------
+ /*
* Handle MATCH PARTIAL set null delete.
- * ----------
*/
case RI_MATCH_TYPE_PARTIAL:
elog(ERROR, "MATCH PARTIAL not yet supported");
return PointerGetDatum(NULL);
}
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #8 in ri_triggers.c");
return PointerGetDatum(NULL);
@@ -2043,9 +1966,9 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
ReferentialIntegritySnapshotOverride = true;
- /* ----------
- * Check that this is a valid trigger call on the right time and event.
- * ----------
+ /*
+ * Check that this is a valid trigger call on the right time and
+ * event.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "RI_FKey_setnull_upd() not fired by trigger manager");
@@ -2055,9 +1978,8 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
elog(ERROR, "RI_FKey_setnull_upd() must be fired for UPDATE");
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -2067,17 +1989,15 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_setnull_upd()",
RI_MAX_NUMKEYS);
- /* ----------
+ /*
* Nothing to do if no column names to compare given
- * ----------
*/
if (tgnargs == 4)
return PointerGetDatum(NULL);
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the old tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the old
+ * tuple.
*/
fk_rel = heap_openr(tgargs[RI_FK_RELNAME_ARGNO], NoLock);
pk_rel = trigdata->tg_relation;
@@ -2105,27 +2025,26 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
{
case RI_KEYS_ALL_NULL:
case RI_KEYS_SOME_NULL:
- /* ----------
+
+ /*
* No update - MATCH FULL means there cannot be any
* reference to old key if it contains NULL
- * ----------
*/
heap_close(fk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_KEYS_NONE_NULL:
- /* ----------
+
+ /*
* Have a full qualified key - continue below
- * ----------
*/
break;
}
heap_close(fk_rel, NoLock);
- /* ----------
+ /*
* No need to do anything if old and new keys are equal
- * ----------
*/
if (ri_KeysEqual(pk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_PK_IDX))
@@ -2152,10 +2071,9 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
ri_AllKeysUnequal(pk_rel, old_row, new_row,
&qkey, RI_KEYPAIR_PK_IDX);
- /* ----------
+ /*
* Fetch or prepare a saved plan for the set null update
* operation if possible, or build a temporary plan if not.
- * ----------
*/
if (!use_cached_query ||
(qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
@@ -2207,9 +2125,8 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
}
strcat(querystr, qualstr);
- /* ----------
+ /*
* Prepare the new plan.
- * ----------
*/
qplan = SPI_prepare(querystr, qkey.nkeypairs, queryoids);
@@ -2224,10 +2141,9 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
}
}
- /* ----------
+ /*
* We have a plan now. Build up the arguments for SPI_execp()
* from the key values in the updated PK tuple.
- * ----------
*/
for (i = 0; i < qkey.nkeypairs; i++)
{
@@ -2242,9 +2158,8 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
}
upd_nulls[i] = '\0';
- /* ----------
+ /*
* Now update the existing references
- * ----------
*/
if (SPI_execp(qplan, upd_values, upd_nulls, 0) != SPI_OK_UPDATE)
elog(ERROR, "SPI_execp() failed in RI_FKey_setnull_upd()");
@@ -2254,18 +2169,16 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
- /* ----------
+ /*
* Handle MATCH PARTIAL set null update.
- * ----------
*/
case RI_MATCH_TYPE_PARTIAL:
elog(ERROR, "MATCH PARTIAL not yet supported");
return PointerGetDatum(NULL);
}
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #9 in ri_triggers.c");
return PointerGetDatum(NULL);
@@ -2296,9 +2209,9 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
ReferentialIntegritySnapshotOverride = true;
- /* ----------
- * Check that this is a valid trigger call on the right time and event.
- * ----------
+ /*
+ * Check that this is a valid trigger call on the right time and
+ * event.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "RI_FKey_setdefault_del() not fired by trigger manager");
@@ -2308,9 +2221,8 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
elog(ERROR, "RI_FKey_setdefault_del() must be fired for DELETE");
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -2320,17 +2232,15 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_setdefault_del()",
RI_MAX_NUMKEYS);
- /* ----------
+ /*
* Nothing to do if no column names to compare given
- * ----------
*/
if (tgnargs == 4)
return PointerGetDatum(NULL);
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the old tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the old
+ * tuple.
*/
fk_rel = heap_openr(tgargs[RI_FK_RELNAME_ARGNO], NoLock);
pk_rel = trigdata->tg_relation;
@@ -2356,18 +2266,18 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
{
case RI_KEYS_ALL_NULL:
case RI_KEYS_SOME_NULL:
- /* ----------
+
+ /*
* No update - MATCH FULL means there cannot be any
* reference to old key if it contains NULL
- * ----------
*/
heap_close(fk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_KEYS_NONE_NULL:
- /* ----------
+
+ /*
* Have a full qualified key - continue below
- * ----------
*/
break;
}
@@ -2375,12 +2285,10 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
if (SPI_connect() != SPI_OK_CONNECT)
elog(NOTICE, "SPI_connect() failed in RI_FKey_setdefault_del()");
- /* ----------
+ /*
* Prepare a plan for the set defalt delete operation.
- * Unfortunately we need to do it on every invocation
- * because the default value could potentially change
- * between calls.
- * ----------
+ * Unfortunately we need to do it on every invocation because
+ * the default value could potentially change between calls.
*/
{
char buf[256];
@@ -2425,9 +2333,8 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
}
strcat(querystr, qualstr);
- /* ----------
+ /*
* Prepare the plan
- * ----------
*/
qplan = SPI_prepare(querystr, qkey.nkeypairs, queryoids);
@@ -2449,20 +2356,20 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
defval = NULL;
for (i = 0; i < qkey.nkeypairs && defval != NULL; i++)
{
- /* ----------
+
+ /*
* For each key attribute lookup the tuple constructor
* for a corresponding default value
- * ----------
*/
for (j = 0; j < fk_rel->rd_att->constr->num_defval; j++)
{
if (defval[j].adnum ==
qkey.keypair[i][RI_KEYPAIR_FK_IDX])
{
- /* ----------
- * That's the one - push the expression
- * from defval.adbin into the plan's targetlist
- * ----------
+
+ /*
+ * That's the one - push the expression from
+ * defval.adbin into the plan's targetlist
*/
spi_qptle = (TargetEntry *)
nth(defval[j].adnum - 1,
@@ -2477,10 +2384,9 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
/* fk_rel is no longer needed OK ? */
heap_close(fk_rel, NoLock);
- /* ----------
+ /*
* We have a plan now. Build up the arguments for SPI_execp()
* from the key values in the deleted PK tuple.
- * ----------
*/
for (i = 0; i < qkey.nkeypairs; i++)
{
@@ -2495,9 +2401,8 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
}
upd_nulls[i] = '\0';
- /* ----------
+ /*
* Now update the existing references
- * ----------
*/
if (SPI_execp(qplan, upd_values, upd_nulls, 0) != SPI_OK_UPDATE)
elog(ERROR, "SPI_execp() failed in RI_FKey_setdefault_del()");
@@ -2507,18 +2412,16 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
- /* ----------
+ /*
* Handle MATCH PARTIAL set null delete.
- * ----------
*/
case RI_MATCH_TYPE_PARTIAL:
elog(ERROR, "MATCH PARTIAL not yet supported");
return PointerGetDatum(NULL);
}
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #10 in ri_triggers.c");
return PointerGetDatum(NULL);
@@ -2551,9 +2454,9 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
ReferentialIntegritySnapshotOverride = true;
- /* ----------
- * Check that this is a valid trigger call on the right time and event.
- * ----------
+ /*
+ * Check that this is a valid trigger call on the right time and
+ * event.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
elog(ERROR, "RI_FKey_setdefault_upd() not fired by trigger manager");
@@ -2563,9 +2466,8 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
elog(ERROR, "RI_FKey_setdefault_upd() must be fired for UPDATE");
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -2575,17 +2477,15 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_setdefault_upd()",
RI_MAX_NUMKEYS);
- /* ----------
+ /*
* Nothing to do if no column names to compare given
- * ----------
*/
if (tgnargs == 4)
return PointerGetDatum(NULL);
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the old tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the old
+ * tuple.
*/
fk_rel = heap_openr(tgargs[RI_FK_RELNAME_ARGNO], NoLock);
pk_rel = trigdata->tg_relation;
@@ -2614,25 +2514,24 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
{
case RI_KEYS_ALL_NULL:
case RI_KEYS_SOME_NULL:
- /* ----------
+
+ /*
* No update - MATCH FULL means there cannot be any
* reference to old key if it contains NULL
- * ----------
*/
heap_close(fk_rel, NoLock);
return PointerGetDatum(NULL);
case RI_KEYS_NONE_NULL:
- /* ----------
+
+ /*
* Have a full qualified key - continue below
- * ----------
*/
break;
}
- /* ----------
+ /*
* No need to do anything if old and new keys are equal
- * ----------
*/
if (ri_KeysEqual(pk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_PK_IDX))
@@ -2641,12 +2540,10 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
if (SPI_connect() != SPI_OK_CONNECT)
elog(NOTICE, "SPI_connect() failed in RI_FKey_setdefault_upd()");
- /* ----------
+ /*
* Prepare a plan for the set defalt delete operation.
- * Unfortunately we need to do it on every invocation
- * because the default value could potentially change
- * between calls.
- * ----------
+ * Unfortunately we need to do it on every invocation because
+ * the default value could potentially change between calls.
*/
{
char buf[256];
@@ -2701,17 +2598,15 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
}
strcat(querystr, qualstr);
- /* ----------
+ /*
* Prepare the plan
- * ----------
*/
qplan = SPI_prepare(querystr, qkey.nkeypairs, queryoids);
- /* ----------
- * Now replace the CONST NULL targetlist expressions
- * in the generated plan by (any) default values found
- * in the tuple constructor.
- * ----------
+ /*
+ * Now replace the CONST NULL targetlist expressions in
+ * the generated plan by (any) default values found in the
+ * tuple constructor.
*/
spi_plan = (Plan *) lfirst(((_SPI_plan *) qplan)->ptlist);
if (fk_rel->rd_att->constr != NULL)
@@ -2731,20 +2626,21 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
!ri_OneKeyEqual(pk_rel, i, old_row,
new_row, &qkey, RI_KEYPAIR_PK_IDX))
{
- /* ----------
- * For each key attribute lookup the tuple constructor
- * for a corresponding default value
- * ----------
+
+ /*
+ * For each key attribute lookup the tuple
+ * constructor for a corresponding default value
*/
for (j = 0; j < fk_rel->rd_att->constr->num_defval; j++)
{
if (defval[j].adnum ==
qkey.keypair[i][RI_KEYPAIR_FK_IDX])
{
- /* ----------
+
+ /*
* That's the one - push the expression
- * from defval.adbin into the plan's targetlist
- * ----------
+ * from defval.adbin into the plan's
+ * targetlist
*/
spi_qptle = (TargetEntry *)
nth(defval[j].adnum - 1,
@@ -2760,10 +2656,9 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
/* fk_rel is no longer needed OK ? */
heap_close(fk_rel, NoLock);
- /* ----------
+ /*
* We have a plan now. Build up the arguments for SPI_execp()
* from the key values in the deleted PK tuple.
- * ----------
*/
for (i = 0; i < qkey.nkeypairs; i++)
{
@@ -2778,9 +2673,8 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
}
upd_nulls[i] = '\0';
- /* ----------
+ /*
* Now update the existing references
- * ----------
*/
if (SPI_execp(qplan, upd_values, upd_nulls, 0) != SPI_OK_UPDATE)
elog(ERROR, "SPI_execp() failed in RI_FKey_setdefault_upd()");
@@ -2790,18 +2684,16 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
- /* ----------
+ /*
* Handle MATCH PARTIAL set null delete.
- * ----------
*/
case RI_MATCH_TYPE_PARTIAL:
elog(ERROR, "MATCH PARTIAL not yet supported");
return PointerGetDatum(NULL);
}
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #11 in ri_triggers.c");
return PointerGetDatum(NULL);
@@ -2828,9 +2720,8 @@ RI_FKey_keyequal_upd(TriggerData *trigdata)
HeapTuple old_row;
RI_QueryKey qkey;
- /* ----------
+ /*
* Check for the correct # of call arguments
- * ----------
*/
tgnargs = trigdata->tg_trigger->tgnargs;
tgargs = trigdata->tg_trigger->tgargs;
@@ -2840,17 +2731,15 @@ RI_FKey_keyequal_upd(TriggerData *trigdata)
elog(ERROR, "too many keys (%d max) in call to RI_FKey_keyequal_upd()",
RI_MAX_NUMKEYS);
- /* ----------
+ /*
* Nothing to do if no column names to compare given
- * ----------
*/
if (tgnargs == 4)
return true;
- /* ----------
- * Get the relation descriptors of the FK and PK tables and
- * the new and old tuple.
- * ----------
+ /*
+ * Get the relation descriptors of the FK and PK tables and the new
+ * and old tuple.
*/
fk_rel = heap_openr(tgargs[RI_FK_RELNAME_ARGNO], NoLock);
pk_rel = trigdata->tg_relation;
@@ -2859,9 +2748,9 @@ RI_FKey_keyequal_upd(TriggerData *trigdata)
switch (ri_DetermineMatchType(tgargs[RI_MATCH_TYPE_ARGNO]))
{
- /* ----------
+
+ /*
* MATCH <UNSPECIFIED>
- * ----------
*/
case RI_MATCH_TYPE_UNSPECIFIED:
case RI_MATCH_TYPE_FULL:
@@ -2871,25 +2760,22 @@ RI_FKey_keyequal_upd(TriggerData *trigdata)
tgnargs, tgargs);
heap_close(fk_rel, NoLock);
- /* ----------
+ /*
* Return if key's are equal
- * ----------
*/
return ri_KeysEqual(pk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_PK_IDX);
- /* ----------
+ /*
* Handle MATCH PARTIAL set null delete.
- * ----------
*/
case RI_MATCH_TYPE_PARTIAL:
elog(ERROR, "MATCH PARTIAL not yet supported");
break;
}
- /* ----------
+ /*
* Never reached
- * ----------
*/
elog(ERROR, "internal error #12 in ri_triggers.c");
return false;
@@ -2955,9 +2841,8 @@ ri_BuildQueryKeyFull(RI_QueryKey *key, Oid constr_id, int32 constr_queryno,
int j;
int fno;
- /* ----------
+ /*
* Initialize the key and fill in type, oid's and number of keypairs
- * ----------
*/
memset((void *) key, 0, sizeof(RI_QueryKey));
key->constr_type = RI_MATCH_TYPE_FULL;
@@ -2967,10 +2852,9 @@ ri_BuildQueryKeyFull(RI_QueryKey *key, Oid constr_id, int32 constr_queryno,
key->pk_relid = pk_rel->rd_id;
key->nkeypairs = (argc - RI_FIRST_ATTNAME_ARGNO) / 2;
- /* ----------
+ /*
* Lookup the attribute numbers of the arguments to the trigger call
* and fill in the keypairs.
- * ----------
*/
for (i = 0, j = RI_FIRST_ATTNAME_ARGNO; j < argc; i++, j += 2)
{
@@ -3070,16 +2954,14 @@ ri_FetchPreparedPlan(RI_QueryKey *key)
RI_QueryHashEntry *entry;
bool found;
- /* ----------
+ /*
* On the first call initialize the hashtable
- * ----------
*/
if (!ri_query_cache)
ri_InitHashTables();
- /* ----------
+ /*
* Lookup for the key
- * ----------
*/
entry = (RI_QueryHashEntry *) hash_search(ri_query_cache,
(char *) key, HASH_FIND, &found);
@@ -3103,16 +2985,14 @@ ri_HashPreparedPlan(RI_QueryKey *key, void *plan)
RI_QueryHashEntry *entry;
bool found;
- /* ----------
+ /*
* On the first call initialize the hashtable
- * ----------
*/
if (!ri_query_cache)
ri_InitHashTables();
- /* ----------
+ /*
* Add the new plan.
- * ----------
*/
entry = (RI_QueryHashEntry *) hash_search(ri_query_cache,
(char *) key, HASH_ENTER, &found);
@@ -3140,28 +3020,26 @@ ri_KeysEqual(Relation rel, HeapTuple oldtup, HeapTuple newtup,
for (i = 0; i < key->nkeypairs; i++)
{
- /* ----------
+
+ /*
* Get one attributes oldvalue. If it is NULL - they're not equal.
- * ----------
*/
oldvalue = SPI_getbinval(oldtup, rel->rd_att,
key->keypair[i][pairidx], &isnull);
if (isnull)
return false;
- /* ----------
+ /*
* Get one attributes oldvalue. If it is NULL - they're not equal.
- * ----------
*/
newvalue = SPI_getbinval(newtup, rel->rd_att,
key->keypair[i][pairidx], &isnull);
if (isnull)
return false;
- /* ----------
- * Get the attributes type OID and call the '=' operator
- * to compare the values.
- * ----------
+ /*
+ * Get the attributes type OID and call the '=' operator to
+ * compare the values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[i][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
@@ -3192,28 +3070,26 @@ ri_AllKeysUnequal(Relation rel, HeapTuple oldtup, HeapTuple newtup,
keys_unequal = true;
for (i = 0; keys_unequal && i < key->nkeypairs; i++)
{
- /* ----------
+
+ /*
* Get one attributes oldvalue. If it is NULL - they're not equal.
- * ----------
*/
oldvalue = SPI_getbinval(oldtup, rel->rd_att,
key->keypair[i][pairidx], &isnull);
if (isnull)
continue;
- /* ----------
+ /*
* Get one attributes oldvalue. If it is NULL - they're not equal.
- * ----------
*/
newvalue = SPI_getbinval(newtup, rel->rd_att,
key->keypair[i][pairidx], &isnull);
if (isnull)
continue;
- /* ----------
- * Get the attributes type OID and call the '=' operator
- * to compare the values.
- * ----------
+ /*
+ * Get the attributes type OID and call the '=' operator to
+ * compare the values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[i][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
@@ -3243,28 +3119,25 @@ ri_OneKeyEqual(Relation rel, int column, HeapTuple oldtup, HeapTuple newtup,
Datum newvalue;
bool isnull;
- /* ----------
+ /*
* Get one attributes oldvalue. If it is NULL - they're not equal.
- * ----------
*/
oldvalue = SPI_getbinval(oldtup, rel->rd_att,
key->keypair[column][pairidx], &isnull);
if (isnull)
return false;
- /* ----------
+ /*
* Get one attributes oldvalue. If it is NULL - they're not equal.
- * ----------
*/
newvalue = SPI_getbinval(newtup, rel->rd_att,
key->keypair[column][pairidx], &isnull);
if (isnull)
return false;
- /* ----------
- * Get the attributes type OID and call the '=' operator
- * to compare the values.
- * ----------
+ /*
+ * Get the attributes type OID and call the '=' operator to compare
+ * the values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[column][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
@@ -3289,27 +3162,24 @@ ri_AttributesEqual(Oid typeid, Datum oldvalue, Datum newvalue)
RI_OpreqHashEntry *entry;
bool found;
- /* ----------
+ /*
* On the first call initialize the hashtable
- * ----------
*/
if (!ri_query_cache)
ri_InitHashTables();
- /* ----------
+ /*
* Try to find the '=' operator for this type in our cache
- * ----------
*/
entry = (RI_OpreqHashEntry *) hash_search(ri_opreq_cache,
(char *) &typeid, HASH_FIND, &found);
if (entry == NULL)
elog(FATAL, "error in RI operator cache");
- /* ----------
- * If not found, lookup the OPERNAME system cache for it
- * to get the func OID, then do the function manager lookup,
- * and remember that info.
- * ----------
+ /*
+ * If not found, lookup the OPERNAME system cache for it to get the
+ * func OID, then do the function manager lookup, and remember that
+ * info.
*/
if (!found)
{
@@ -3338,9 +3208,8 @@ ri_AttributesEqual(Oid typeid, Datum oldvalue, Datum newvalue)
ReleaseSysCache(opr_tup);
}
- /* ----------
+ /*
* Call the type specific '=' function
- * ----------
*/
return DatumGetBool(FunctionCall2(&(entry->oprfmgrinfo),
oldvalue, newvalue));
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index c6db1c5b30f..b9aab50f628 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -3,7 +3,7 @@
* back to source text
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/ruleutils.c,v 1.74 2001/03/22 03:59:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/ruleutils.c,v 1.75 2001/03/22 06:16:18 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
@@ -163,24 +163,21 @@ pg_get_ruledef(PG_FUNCTION_ARGS)
StringInfoData buf;
int len;
- /* ----------
+ /*
* We need the rules name somewhere deep down: rulename is global
- * ----------
*/
rulename = pstrdup(NameStr(*rname));
- /* ----------
+ /*
* Connect to SPI manager
- * ----------
*/
if (SPI_connect() != SPI_OK_CONNECT)
elog(ERROR, "get_ruledef: cannot connect to SPI manager");
- /* ----------
- * On the first call prepare the plan to lookup pg_proc.
- * We read pg_proc over the SPI manager instead of using
- * the syscache to be checked for read access on pg_proc.
- * ----------
+ /*
+ * On the first call prepare the plan to lookup pg_proc. We read
+ * pg_proc over the SPI manager instead of using the syscache to be
+ * checked for read access on pg_proc.
*/
if (plan_getrule == NULL)
{
@@ -194,9 +191,8 @@ pg_get_ruledef(PG_FUNCTION_ARGS)
plan_getrule = SPI_saveplan(plan);
}
- /* ----------
+ /*
* Get the pg_rewrite tuple for this rule
- * ----------
*/
args[0] = PointerGetDatum(rulename);
nulls[0] = (rulename == NULL) ? 'n' : ' ';
@@ -217,9 +213,8 @@ pg_get_ruledef(PG_FUNCTION_ARGS)
ruletup = SPI_tuptable->vals[0];
rulettc = SPI_tuptable->tupdesc;
- /* ----------
+ /*
* Get the rules definition and put it into executors memory
- * ----------
*/
initStringInfo(&buf);
make_ruledef(&buf, ruletup, rulettc);
@@ -229,16 +224,14 @@ pg_get_ruledef(PG_FUNCTION_ARGS)
memcpy(VARDATA(ruledef), buf.data, buf.len);
pfree(buf.data);
- /* ----------
+ /*
* Disconnect from SPI manager
- * ----------
*/
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "get_ruledef: SPI_finish() failed");
- /* ----------
+ /*
* Easy - isn't it?
- * ----------
*/
PG_RETURN_TEXT_P(ruledef);
}
@@ -263,24 +256,21 @@ pg_get_viewdef(PG_FUNCTION_ARGS)
int len;
char *name;
- /* ----------
+ /*
* We need the view name somewhere deep down
- * ----------
*/
rulename = pstrdup(NameStr(*vname));
- /* ----------
+ /*
* Connect to SPI manager
- * ----------
*/
if (SPI_connect() != SPI_OK_CONNECT)
elog(ERROR, "get_viewdef: cannot connect to SPI manager");
- /* ----------
- * On the first call prepare the plan to lookup pg_proc.
- * We read pg_proc over the SPI manager instead of using
- * the syscache to be checked for read access on pg_proc.
- * ----------
+ /*
+ * On the first call prepare the plan to lookup pg_proc. We read
+ * pg_proc over the SPI manager instead of using the syscache to be
+ * checked for read access on pg_proc.
*/
if (plan_getview == NULL)
{
@@ -294,9 +284,9 @@ pg_get_viewdef(PG_FUNCTION_ARGS)
plan_getview = SPI_saveplan(plan);
}
- /* ----------
- * Get the pg_rewrite tuple for this rule: rulename is actually viewname here
- * ----------
+ /*
+ * Get the pg_rewrite tuple for this rule: rulename is actually
+ * viewname here
*/
name = MakeRetrieveViewRuleName(rulename);
args[0] = PointerGetDatum(name);
@@ -309,9 +299,9 @@ pg_get_viewdef(PG_FUNCTION_ARGS)
appendStringInfo(&buf, "Not a view");
else
{
- /* ----------
+
+ /*
* Get the rules definition and put it into executors memory
- * ----------
*/
ruletup = SPI_tuptable->vals[0];
rulettc = SPI_tuptable->tupdesc;
@@ -324,16 +314,14 @@ pg_get_viewdef(PG_FUNCTION_ARGS)
pfree(buf.data);
pfree(name);
- /* ----------
+ /*
* Disconnect from SPI manager
- * ----------
*/
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "get_viewdef: SPI_finish() failed");
- /* ----------
+ /*
* Easy - isn't it?
- * ----------
*/
PG_RETURN_TEXT_P(ruledef);
}
@@ -366,17 +354,14 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
StringInfoData keybuf;
char *sep;
- /* ----------
+ /*
* Connect to SPI manager
- * ----------
*/
if (SPI_connect() != SPI_OK_CONNECT)
elog(ERROR, "get_indexdef: cannot connect to SPI manager");
- /* ----------
- * On the first call prepare the plans to lookup pg_am
- * and pg_opclass.
- * ----------
+ /*
+ * On the first call prepare the plans to lookup pg_am and pg_opclass.
*/
if (plan_getam == NULL)
{
@@ -396,9 +381,8 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
plan_getopclass = SPI_saveplan(plan);
}
- /* ----------
+ /*
* Fetch the pg_index tuple by the Oid of the index
- * ----------
*/
ht_idx = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(indexrelid),
@@ -407,9 +391,8 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
elog(ERROR, "syscache lookup for index %u failed", indexrelid);
idxrec = (Form_pg_index) GETSTRUCT(ht_idx);
- /* ----------
+ /*
* Fetch the pg_class tuple of the index relation
- * ----------
*/
ht_idxrel = SearchSysCache(RELOID,
ObjectIdGetDatum(idxrec->indexrelid),
@@ -418,9 +401,8 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
elog(ERROR, "syscache lookup for relid %u failed", idxrec->indexrelid);
idxrelrec = (Form_pg_class) GETSTRUCT(ht_idxrel);
- /* ----------
+ /*
* Fetch the pg_class tuple of the indexed relation
- * ----------
*/
ht_indrel = SearchSysCache(RELOID,
ObjectIdGetDatum(idxrec->indrelid),
@@ -429,9 +411,8 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
elog(ERROR, "syscache lookup for relid %u failed", idxrec->indrelid);
indrelrec = (Form_pg_class) GETSTRUCT(ht_indrel);
- /* ----------
+ /*
* Get the am name for the index relation
- * ----------
*/
spi_args[0] = ObjectIdGetDatum(idxrelrec->relam);
spi_nulls[0] = ' ';
@@ -447,9 +428,8 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
spi_ttc = SPI_tuptable->tupdesc;
spi_fno = SPI_fnumber(spi_ttc, "amname");
- /* ----------
+ /*
* Start the index definition
- * ----------
*/
initStringInfo(&buf);
appendStringInfo(&buf, "CREATE %sINDEX %s ON %s USING %s (",
@@ -459,9 +439,8 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
quote_identifier(SPI_getvalue(spi_tup, spi_ttc,
spi_fno)));
- /* ----------
+ /*
* Collect the indexed attributes
- * ----------
*/
initStringInfo(&keybuf);
sep = "";
@@ -473,17 +452,15 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
appendStringInfo(&keybuf, sep);
sep = ", ";
- /* ----------
+ /*
* Add the indexed field name
- * ----------
*/
appendStringInfo(&keybuf, "%s",
quote_identifier(get_relid_attribute_name(idxrec->indrelid,
idxrec->indkey[keyno])));
- /* ----------
+ /*
* If not a functional index, add the operator class name
- * ----------
*/
if (idxrec->indproc == InvalidOid)
{
@@ -504,9 +481,8 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
}
}
- /* ----------
+ /*
* For functional index say 'func (attrs) opclass'
- * ----------
*/
if (idxrec->indproc != InvalidOid)
{
@@ -541,21 +517,19 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
ReleaseSysCache(proctup);
}
else
- /* ----------
+
+ /*
* For the others say 'attr opclass [, ...]'
- * ----------
*/
appendStringInfo(&buf, "%s", keybuf.data);
- /* ----------
+ /*
* Finish
- * ----------
*/
appendStringInfo(&buf, ")");
- /* ----------
+ /*
* Create the result in upper executor memory, and free objects
- * ----------
*/
len = buf.len + VARHDRSZ;
indexdef = SPI_palloc(len);
@@ -568,9 +542,8 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
ReleaseSysCache(ht_idxrel);
ReleaseSysCache(ht_indrel);
- /* ----------
+ /*
* Disconnect from SPI manager
- * ----------
*/
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "get_viewdef: SPI_finish() failed");
@@ -592,16 +565,14 @@ pg_get_userbyid(PG_FUNCTION_ARGS)
HeapTuple usertup;
Form_pg_shadow user_rec;
- /* ----------
+ /*
* Allocate space for the result
- * ----------
*/
result = (Name) palloc(NAMEDATALEN);
memset(NameStr(*result), 0, NAMEDATALEN);
- /* ----------
+ /*
* Get the pg_shadow entry and print the result
- * ----------
*/
usertup = SearchSysCache(SHADOWSYSID,
ObjectIdGetDatum(uid),
@@ -705,9 +676,8 @@ make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc)
int fno;
bool isnull;
- /* ----------
+ /*
* Get the attribute values from the rules tuple
- * ----------
*/
fno = SPI_fnumber(rulettc, "ev_type");
ev_type = (char) SPI_getbinval(ruletup, rulettc, fno, &isnull);
@@ -730,9 +700,8 @@ make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc)
actions = (List *) stringToNode(ev_action);
- /* ----------
+ /*
* Build the rules definition text
- * ----------
*/
appendStringInfo(buf, "CREATE RULE %s AS ON ",
quote_identifier(rulename));
@@ -852,9 +821,8 @@ make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc)
int fno;
bool isnull;
- /* ----------
+ /*
* Get the attribute values from the rules tuple
- * ----------
*/
fno = SPI_fnumber(rulettc, "ev_type");
ev_type = (char) SPI_getbinval(ruletup, rulettc, fno, &isnull);
@@ -961,11 +929,10 @@ get_select_query_def(Query *query, deparse_context *context)
char *sep;
List *l;
- /* ----------
- * If the Query node has a setOperations tree, then it's the top
- * level of a UNION/INTERSECT/EXCEPT query; only the ORDER BY and
- * LIMIT fields are interesting in the top query itself.
- * ----------
+ /*
+ * If the Query node has a setOperations tree, then it's the top level
+ * of a UNION/INTERSECT/EXCEPT query; only the ORDER BY and LIMIT
+ * fields are interesting in the top query itself.
*/
if (query->setOperations)
{
@@ -1033,9 +1000,8 @@ get_basic_select_query(Query *query, deparse_context *context)
char *sep;
List *l;
- /* ----------
+ /*
* Build up the query string - first we say SELECT
- * ----------
*/
appendStringInfo(buf, "SELECT");
@@ -1230,10 +1196,9 @@ get_insert_query_def(Query *query, deparse_context *context)
char *sep;
List *l;
- /* ----------
+ /*
* If it's an INSERT ... SELECT there will be a single subquery RTE
* for the SELECT.
- * ----------
*/
foreach(l, query->rtable)
{
@@ -1245,9 +1210,8 @@ get_insert_query_def(Query *query, deparse_context *context)
select_rte = rte;
}
- /* ----------
+ /*
* Start the query with INSERT INTO relname
- * ----------
*/
rte = rt_fetch(query->resultRelation, query->rtable);
appendStringInfo(buf, "INSERT INTO %s",
@@ -1303,9 +1267,8 @@ get_update_query_def(Query *query, deparse_context *context)
RangeTblEntry *rte;
List *l;
- /* ----------
+ /*
* Start the query with UPDATE relname SET
- * ----------
*/
rte = rt_fetch(query->resultRelation, query->rtable);
appendStringInfo(buf, "UPDATE %s%s SET ",
@@ -1357,9 +1320,8 @@ get_delete_query_def(Query *query, deparse_context *context)
StringInfo buf = context->buf;
RangeTblEntry *rte;
- /* ----------
+ /*
* Start the query with DELETE FROM relname
- * ----------
*/
rte = rt_fetch(query->resultRelation, query->rtable);
appendStringInfo(buf, "DELETE FROM %s%s",
@@ -1681,14 +1643,13 @@ get_rule_expr(Node *node, deparse_context *context)
if (node == NULL)
return;
- /* ----------
+ /*
* Each level of get_rule_expr must emit an indivisible term
- * (parenthesized if necessary) to ensure result is reparsed into
- * the same expression tree.
+ * (parenthesized if necessary) to ensure result is reparsed into the
+ * same expression tree.
*
* There might be some work left here to support additional node types.
* Can we ever see Param nodes here?
- * ----------
*/
switch (nodeTag(node))
{
@@ -1722,9 +1683,8 @@ get_rule_expr(Node *node, deparse_context *context)
Expr *expr = (Expr *) node;
List *args = expr->args;
- /* ----------
+ /*
* Expr nodes have to be handled a bit detailed
- * ----------
*/
switch (expr->opType)
{
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index ef5f09374af..57a5dbe7126 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/init/postinit.c,v 1.82 2001/03/22 04:00:00 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/init/postinit.c,v 1.83 2001/03/22 06:16:18 momjian Exp $
*
*
*-------------------------------------------------------------------------
@@ -151,17 +151,17 @@ ReverifyMyDatabase(const char *name)
static void
InitCommunication(void)
{
- /* ----------------
- * initialize shared memory and semaphores appropriately.
- * ----------------
+
+ /*
+ * initialize shared memory and semaphores appropriately.
*/
if (!IsUnderPostmaster) /* postmaster already did this */
{
- /* ----------------
- * we're running a postgres backend by itself with
- * no front end or postmaster. Create private "shmem"
- * and semaphores. Setting MaxBackends = 16 is arbitrary.
- * ----------------
+
+ /*
+ * we're running a postgres backend by itself with no front end or
+ * postmaster. Create private "shmem" and semaphores. Setting
+ * MaxBackends = 16 is arbitrary.
*/
CreateSharedMemoryAndSemaphores(true, 16);
}
@@ -207,9 +207,8 @@ InitPostgres(const char *dbname, const char *username)
SetDatabaseName(dbname);
- /* ----------------
- * initialize the database id used for system caches and lock tables
- * ----------------
+ /*
+ * initialize the database id used for system caches and lock tables
*/
if (bootstrap)
{
diff --git a/src/backend/utils/misc/database.c b/src/backend/utils/misc/database.c
index 5e14ef778bd..21afef019c9 100644
--- a/src/backend/utils/misc/database.c
+++ b/src/backend/utils/misc/database.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/misc/Attic/database.c,v 1.44 2001/03/22 04:00:06 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/misc/Attic/database.c,v 1.45 2001/03/22 06:16:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -162,22 +162,20 @@ GetRawDatabaseInfo(const char *name, Oid *db_id, char *path)
pfree(dbfname);
- /* ----------------
- * read and examine every page in pg_database
+ /*
+ * read and examine every page in pg_database
+ *
+ * Raw I/O! Read those tuples the hard way! Yow!
*
- * Raw I/O! Read those tuples the hard way! Yow!
+ * Why don't we use the access methods or move this code someplace else?
+ * This is really pg_database schema dependent code. Perhaps it
+ * should go in lib/catalog/pg_database? -cim 10/3/90
*
- * Why don't we use the access methods or move this code
- * someplace else? This is really pg_database schema dependent
- * code. Perhaps it should go in lib/catalog/pg_database?
- * -cim 10/3/90
+ * mao replies 4 apr 91: yeah, maybe this should be moved to
+ * lib/catalog. however, we CANNOT use the access methods since those
+ * use the buffer cache, which uses the relation cache, which requires
+ * that the dbid be set, which is what we're trying to do here.
*
- * mao replies 4 apr 91: yeah, maybe this should be moved to
- * lib/catalog. however, we CANNOT use the access methods since
- * those use the buffer cache, which uses the relation cache, which
- * requires that the dbid be set, which is what we're trying to do
- * here.
- * ----------------
*/
pg = (Page) palloc(BLCKSZ);
@@ -199,16 +197,17 @@ GetRawDatabaseInfo(const char *name, Oid *db_id, char *path)
tup.t_datamcxt = NULL;
tup.t_data = (HeapTupleHeader) PageGetItem(pg, lpp);
- /*--------------------
+ /*
* Check to see if tuple is valid (committed).
*
* XXX warning, will robinson: violation of transaction semantics
- * happens right here. We cannot really determine if the tuple
- * is valid without checking transaction commit status, and the
- * only way to do that at init time is to paw over pg_log by hand,
- * too. Instead of checking, we assume that the inserting
- * transaction committed, and that any deleting transaction did
- * also, unless shown otherwise by on-row commit status bits.
+ * happens right here. We cannot really determine if the
+ * tuple is valid without checking transaction commit status,
+ * and the only way to do that at init time is to paw over
+ * pg_log by hand, too. Instead of checking, we assume that
+ * the inserting transaction committed, and that any deleting
+ * transaction did also, unless shown otherwise by on-row
+ * commit status bits.
*
* All in all, this code is pretty shaky. We will cross-check
* our result in ReverifyMyDatabase() in postinit.c.
@@ -221,7 +220,6 @@ GetRawDatabaseInfo(const char *name, Oid *db_id, char *path)
* XXX wouldn't it be better to let new backends read the
* database OID from a flat file, handled the same way we
* handle the password relation?
- *--------------------
*/
if (!PhonyHeapTupleSatisfiesNow(tup.t_data))
continue;