diff options
Diffstat (limited to 'src/backend/executor')
-rw-r--r-- | src/backend/executor/execAmi.c | 8 | ||||
-rw-r--r-- | src/backend/executor/execGrouping.c | 8 | ||||
-rw-r--r-- | src/backend/executor/execJunk.c | 22 | ||||
-rw-r--r-- | src/backend/executor/execMain.c | 42 | ||||
-rw-r--r-- | src/backend/executor/execQual.c | 27 | ||||
-rw-r--r-- | src/backend/executor/execTuples.c | 7 | ||||
-rw-r--r-- | src/backend/executor/execUtils.c | 18 | ||||
-rw-r--r-- | src/backend/executor/functions.c | 22 | ||||
-rw-r--r-- | src/backend/executor/nodeAgg.c | 14 | ||||
-rw-r--r-- | src/backend/executor/nodeBitmapIndexscan.c | 6 | ||||
-rw-r--r-- | src/backend/executor/nodeHash.c | 6 | ||||
-rw-r--r-- | src/backend/executor/nodeHashjoin.c | 12 | ||||
-rw-r--r-- | src/backend/executor/nodeIndexscan.c | 6 | ||||
-rw-r--r-- | src/backend/executor/nodeMergejoin.c | 20 | ||||
-rw-r--r-- | src/backend/executor/nodeNestloop.c | 6 | ||||
-rw-r--r-- | src/backend/executor/nodeSubplan.c | 28 | ||||
-rw-r--r-- | src/backend/executor/nodeUnique.c | 10 | ||||
-rw-r--r-- | src/backend/executor/spi.c | 6 |
18 files changed, 134 insertions, 134 deletions
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index 06e4ab7b232..2cd86cca8c8 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.85 2005/10/15 02:49:16 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.85.2.1 2005/11/22 18:23:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -403,9 +403,9 @@ ExecMayReturnRawTuples(PlanState *node) * but just pass up input tuples, we have to recursively examine the input * plan node. * - * Note: Hash and Material are listed here because they sometimes return an - * original input tuple, not a copy. But Sort and SetOp never return an - * original tuple, so they can be treated like projecting nodes. + * Note: Hash and Material are listed here because they sometimes return + * an original input tuple, not a copy. But Sort and SetOp never return + * an original tuple, so they can be treated like projecting nodes. */ switch (nodeTag(node)) { diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index 688e2157e8b..58addc41a92 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.16 2005/10/15 02:49:16 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.16.2.1 2005/11/22 18:23:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -381,9 +381,9 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, /* * created new entry * - * Zero any caller-requested space in the entry. (This zaps the "key - * data" dynahash.c copied into the new entry, but we don't care - * since we're about to overwrite it anyway.) + * Zero any caller-requested space in the entry. (This zaps the + * "key data" dynahash.c copied into the new entry, but we don't + * care since we're about to overwrite it anyway.) */ MemSet(entry, 0, hashtable->entrysize); diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c index 2245c61e7fe..9a4102f3b27 100644 --- a/src/backend/executor/execJunk.c +++ b/src/backend/executor/execJunk.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.50 2005/10/15 02:49:16 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.50.2.1 2005/11/22 18:23:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -87,11 +87,11 @@ ExecInitJunkFilter(List *targetList, bool hasoid, TupleTableSlot *slot) * Now calculate the mapping between the original tuple's attributes and * the "clean" tuple's attributes. * - * The "map" is an array of "cleanLength" attribute numbers, i.e. one entry - * for every attribute of the "clean" tuple. The value of this entry is - * the attribute number of the corresponding attribute of the "original" - * tuple. (Zero indicates a NULL output attribute, but we do not use that - * feature in this routine.) + * The "map" is an array of "cleanLength" attribute numbers, i.e. one + * entry for every attribute of the "clean" tuple. The value of this entry + * is the attribute number of the corresponding attribute of the + * "original" tuple. (Zero indicates a NULL output attribute, but we do + * not use that feature in this routine.) */ cleanLength = cleanTupType->natts; if (cleanLength > 0) @@ -158,11 +158,11 @@ ExecInitJunkFilterConversion(List *targetList, * Calculate the mapping between the original tuple's attributes and the * "clean" tuple's attributes. * - * The "map" is an array of "cleanLength" attribute numbers, i.e. one entry - * for every attribute of the "clean" tuple. The value of this entry is - * the attribute number of the corresponding attribute of the "original" - * tuple. We store zero for any deleted attributes, marking that a NULL - * is needed in the output tuple. + * The "map" is an array of "cleanLength" attribute numbers, i.e. one + * entry for every attribute of the "clean" tuple. The value of this entry + * is the attribute number of the corresponding attribute of the + * "original" tuple. We store zero for any deleted attributes, marking + * that a NULL is needed in the output tuple. */ cleanLength = cleanTupType->natts; if (cleanLength > 0) diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 32c7711664d..04e36b87b08 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -26,7 +26,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.256.2.3 2005/11/20 18:38:42 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.256.2.4 2005/11/22 18:23:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -376,10 +376,10 @@ ExecCheckRTEPerms(RangeTblEntry *rte) /* * userid to check as: current user unless we have a setuid indication. * - * Note: GetUserId() is presently fast enough that there's no harm in calling - * it separately for each RTE. If that stops being true, we could call it - * once in ExecCheckRTPerms and pass the userid down from there. But for - * now, no need for the extra clutter. + * Note: GetUserId() is presently fast enough that there's no harm in + * calling it separately for each RTE. If that stops being true, we could + * call it once in ExecCheckRTPerms and pass the userid down from there. + * But for now, no need for the extra clutter. */ userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); @@ -582,8 +582,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) * initialize the executor "tuple" table. We need slots for all the plan * nodes, plus possibly output slots for the junkfilter(s). At this point * we aren't sure if we need junkfilters, so just add slots for them - * unconditionally. Also, if it's not a SELECT, set up a slot for use - * for trigger output tuples. + * unconditionally. Also, if it's not a SELECT, set up a slot for use for + * trigger output tuples. */ { int nSlots = ExecCountSlotsNode(plan); @@ -797,11 +797,11 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) /* * We can skip WAL-logging the insertions, unless PITR is in use. * - * Note that for a non-temp INTO table, this is safe only because we know - * that the catalog changes above will have been WAL-logged, and so - * RecordTransactionCommit will think it needs to WAL-log the eventual - * transaction commit. Else the commit might be lost, even though all - * the data is safely fsync'd ... + * Note that for a non-temp INTO table, this is safe only because we + * know that the catalog changes above will have been WAL-logged, and + * so RecordTransactionCommit will think it needs to WAL-log the + * eventual transaction commit. Else the commit might be lost, even + * though all the data is safely fsync'd ... */ estate->es_into_relation_use_wal = XLogArchivingActive(); } @@ -1495,8 +1495,8 @@ ExecDelete(TupleTableSlot *slot, /* * delete the tuple * - * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the - * row to be deleted is visible to that snapshot, and throw a can't- + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be deleted is visible to that snapshot, and throw a can't- * serialize error if not. This is a special-case behavior needed for * referential integrity updates in serializable transactions. */ @@ -1549,9 +1549,9 @@ ldelete:; * Note: Normally one would think that we have to delete index tuples * associated with the heap tuple now.. * - * ... but in POSTGRES, we have no need to do this because the vacuum daemon - * automatically opens an index scan and deletes index tuples when it - * finds deleted heap tuples. -cim 9/27/89 + * ... but in POSTGRES, we have no need to do this because the vacuum + * daemon automatically opens an index scan and deletes index tuples when + * it finds deleted heap tuples. -cim 9/27/89 */ /* AFTER ROW DELETE Triggers */ @@ -1635,8 +1635,8 @@ ExecUpdate(TupleTableSlot *slot, /* * Check the constraints of the tuple * - * If we generate a new candidate tuple after EvalPlanQual testing, we must - * loop back here and recheck constraints. (We don't need to redo + * If we generate a new candidate tuple after EvalPlanQual testing, we + * must loop back here and recheck constraints. (We don't need to redo * triggers, however. If there are any BEFORE triggers then trigger.c * will have done heap_lock_tuple to lock the correct tuple, so there's no * need to do them again.) @@ -1648,8 +1648,8 @@ lreplace:; /* * replace the heap tuple * - * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the - * row to be updated is visible to that snapshot, and throw a can't- + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be updated is visible to that snapshot, and throw a can't- * serialize error if not. This is a special-case behavior needed for * referential integrity updates in serializable transactions. */ diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index 4ee9a4ca622..37e4d704ce4 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.183 2005/10/19 22:30:30 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.183.2.1 2005/11/22 18:23:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -65,7 +65,7 @@ static Datum ExecEvalAggref(AggrefExprState *aggref, static Datum ExecEvalVar(ExprState *exprstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalParam(ExprState *exprstate, ExprContext *econtext, @@ -327,8 +327,8 @@ ExecEvalArrayRef(ArrayRefExprState *astate, /* * Evaluate the value to be assigned into the array. * - * XXX At some point we'll need to look into making the old value of the - * array element available via CaseTestExpr, as is done by + * XXX At some point we'll need to look into making the old value of + * the array element available via CaseTestExpr, as is done by * ExecEvalFieldStore. This is not needed now but will be needed to * support arrays of composite types; in an assignment to a field of * an array member, the parser would generate a FieldStore that @@ -534,8 +534,8 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext, Assert(variable->varattno == InvalidAttrNumber); /* - * Whole-row Vars can only appear at the level of a relation scan, - * never in a join. + * Whole-row Vars can only appear at the level of a relation scan, never + * in a join. */ Assert(variable->varno != INNER); Assert(variable->varno != OUTER); @@ -545,8 +545,8 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext, tupleDesc = slot->tts_tupleDescriptor; /* - * We have to make a copy of the tuple so we can safely insert the - * Datum overhead fields, which are not set in on-disk tuples. + * We have to make a copy of the tuple so we can safely insert the Datum + * overhead fields, which are not set in on-disk tuples. */ dtuple = (HeapTupleHeader) palloc(tuple->t_len); memcpy((char *) dtuple, (char *) tuple->t_data, tuple->t_len); @@ -554,12 +554,11 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext, HeapTupleHeaderSetDatumLength(dtuple, tuple->t_len); /* - * If the Var identifies a named composite type, label the tuple - * with that type; otherwise use what is in the tupleDesc. + * If the Var identifies a named composite type, label the tuple with that + * type; otherwise use what is in the tupleDesc. * - * It's likely that the slot's tupleDesc is a record type; if so, - * make sure it's been "blessed", so that the Datum can be interpreted - * later. + * It's likely that the slot's tupleDesc is a record type; if so, make + * sure it's been "blessed", so that the Datum can be interpreted later. */ if (variable->vartype != RECORDOID) { @@ -2915,7 +2914,7 @@ ExecInitExpr(Expr *node, PlanState *parent) { case T_Var: { - Var *var = (Var *) node; + Var *var = (Var *) node; state = (ExprState *) makeNode(ExprState); if (var->varattno != InvalidAttrNumber) diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index b38bcc44cb4..0d3700c8e94 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -15,7 +15,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.88 2005/10/15 02:49:16 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.88.2.1 2005/11/22 18:23:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -631,8 +631,9 @@ ExecMaterializeSlot(TupleTableSlot *slot) * in which this could be optimized but it's probably not worth worrying * about.) * - * We may be called in a context that is shorter-lived than the tuple slot, - * but we have to ensure that the materialized tuple will survive anyway. + * We may be called in a context that is shorter-lived than the tuple + * slot, but we have to ensure that the materialized tuple will survive + * anyway. */ oldContext = MemoryContextSwitchTo(slot->tts_mcxt); newTuple = ExecCopySlotTuple(slot); diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index cab0e6179fa..c7da61eeef2 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.126.2.1 2005/11/14 17:43:13 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.126.2.2 2005/11/22 18:23:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -769,19 +769,19 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo) /* * Open and lock the index relation * - * If the index AM supports concurrent updates, obtain RowExclusiveLock - * to signify that we are updating the index. This locks out only - * operations that need exclusive access, such as relocating the index - * to a new tablespace. + * If the index AM supports concurrent updates, obtain + * RowExclusiveLock to signify that we are updating the index. This + * locks out only operations that need exclusive access, such as + * relocating the index to a new tablespace. * * If the index AM is not safe for concurrent updates, obtain an * exclusive lock on the index to lock out other updaters as well as * readers (index_beginscan places AccessShareLock). * - * If there are multiple not-concurrent-safe indexes, all backends must - * lock the indexes in the same order or we will get deadlocks here. - * This is guaranteed by RelationGetIndexList(), which promises to - * return the index list in OID order. + * If there are multiple not-concurrent-safe indexes, all backends + * must lock the indexes in the same order or we will get deadlocks + * here. This is guaranteed by RelationGetIndexList(), which promises + * to return the index list in OID order. * * The locks will be released in ExecCloseIndices. */ diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 24a8b9a493a..b969fdf6de7 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.98 2005/10/15 02:49:16 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.98.2.1 2005/11/22 18:23:08 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -268,11 +268,11 @@ init_sql_fcache(FmgrInfo *finfo) * If the function has any arguments declared as polymorphic types, then * it wasn't type-checked at definition time; must do so now. * - * Also, force a type-check if the declared return type is a rowtype; we need - * to find out whether we are actually returning the whole tuple result, - * or just regurgitating a rowtype expression result. In the latter case - * we clear returnsTuple because we need not act different from the scalar - * result case. + * Also, force a type-check if the declared return type is a rowtype; we + * need to find out whether we are actually returning the whole tuple + * result, or just regurgitating a rowtype expression result. In the + * latter case we clear returnsTuple because we need not act different + * from the scalar result case. * * In the returnsTuple case, check_sql_fn_retval will also construct a * JunkFilter we can use to coerce the returned rowtype to the desired @@ -498,12 +498,12 @@ postquel_execute(execution_state *es, * labeling to make it a valid Datum. There are several reasons why * we do this: * - * 1. To copy the tuple out of the child execution context and into the - * desired result context. + * 1. To copy the tuple out of the child execution context and into + * the desired result context. * - * 2. To remove any junk attributes present in the raw subselect result. - * (This is probably not absolutely necessary, but it seems like good - * policy.) + * 2. To remove any junk attributes present in the raw subselect + * result. (This is probably not absolutely necessary, but it seems + * like good policy.) * * 3. To insert dummy null columns if the declared result type has any * attisdropped columns. diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 0403c9aca1b..014219a1051 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -61,7 +61,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.135 2005/10/15 02:49:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.135.2.1 2005/11/22 18:23:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -283,8 +283,8 @@ initialize_aggregates(AggState *aggstate, /* * (Re)set transValue to the initial value. * - * Note that when the initial value is pass-by-ref, we must copy it (into - * the aggcontext) since we will pfree the transValue later. + * Note that when the initial value is pass-by-ref, we must copy it + * (into the aggcontext) since we will pfree the transValue later. */ if (peraggstate->initValueIsNull) pergroupstate->transValue = peraggstate->initValue; @@ -341,8 +341,8 @@ advance_transition_function(AggState *aggstate, * already checked that the agg's input type is binary-compatible * with its transtype, so straight copy here is OK.) * - * We must copy the datum into aggcontext if it is pass-by-ref. We do - * not need to pfree the old transValue, since it's NULL. + * We must copy the datum into aggcontext if it is pass-by-ref. We + * do not need to pfree the old transValue, since it's NULL. */ oldContext = MemoryContextSwitchTo(aggstate->aggcontext); pergroupstate->transValue = datumCopy(newVal, @@ -842,8 +842,8 @@ agg_retrieve_direct(AggState *aggstate) * aggregate will have a targetlist reference to ctid. We need to * return a null for ctid in that situation, not coredump. * - * The values returned for the aggregates will be the initial values of - * the transition functions. + * The values returned for the aggregates will be the initial values + * of the transition functions. */ if (TupIsNull(firstSlot)) { diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c index 49b63170d49..3e66f74e28a 100644 --- a/src/backend/executor/nodeBitmapIndexscan.c +++ b/src/backend/executor/nodeBitmapIndexscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.10 2005/10/15 02:49:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.10.2.1 2005/11/22 18:23:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -211,8 +211,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate) /* * Miscellaneous initialization * - * We do not need a standard exprcontext for this node, though we may decide - * below to create a runtime-key exprcontext + * We do not need a standard exprcontext for this node, though we may + * decide below to create a runtime-key exprcontext */ /* diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 8c51e785b28..320a7896c33 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.96 2005/10/15 02:49:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.96.2.1 2005/11/22 18:23:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -237,8 +237,8 @@ ExecHashTableCreate(Hash *node, List *hashOperators) /* * Initialize the hash table control block. * - * The hashtable control block is just palloc'd from the executor's per-query - * memory context. + * The hashtable control block is just palloc'd from the executor's + * per-query memory context. */ hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData)); hashtable->nbuckets = nbuckets; diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index 817f3822ef6..40eebb44027 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.75 2005/10/18 01:06:24 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.75.2.1 2005/11/22 18:23:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -622,13 +622,13 @@ start_over: * 1. In a LEFT JOIN, we have to process outer batches even if the inner * batch is empty. * - * 2. If we have increased nbatch since the initial estimate, we have to scan - * inner batches since they might contain tuples that need to be + * 2. If we have increased nbatch since the initial estimate, we have to + * scan inner batches since they might contain tuples that need to be * reassigned to later inner batches. * - * 3. Similarly, if we have increased nbatch since starting the outer scan, - * we have to rescan outer batches in case they contain tuples that need - * to be reassigned. + * 3. Similarly, if we have increased nbatch since starting the outer + * scan, we have to rescan outer batches in case they contain tuples that + * need to be reassigned. */ curbatch++; while (curbatch < nbatch && diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 94ab2223c75..009aba997f6 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.104 2005/10/15 02:49:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.104.2.1 2005/11/22 18:23:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -578,8 +578,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals, * listed in the var node and use the value of the const as comparison * data. * - * If we don't have a const node, it means our scan key is a function of - * information obtained during the execution of the plan, in which + * If we don't have a const node, it means our scan key is a function + * of information obtained during the execution of the plan, in which * case we need to recalculate the index scan key at run time. Hence, * we set have_runtime_keys to true and place the appropriate * subexpression in run_keys. The corresponding scan key values are diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 0d4eed4c9ba..a1d209db349 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.75 2005/10/15 02:49:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.75.2.1 2005/11/22 18:23:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -239,8 +239,8 @@ MJExamineQuals(List *qualList, PlanState *parent) * much like SelectSortFunction except we insist on matching all the * operators provided, and it can be a cross-type opclass. * - * XXX for now, insist on forward sort so that NULLs can be counted on to - * be high. + * XXX for now, insist on forward sort so that NULLs can be counted on + * to be high. */ catlist = SearchSysCacheList(AMOPOPID, 1, ObjectIdGetDatum(qual->opno), @@ -1121,13 +1121,13 @@ ExecMergeJoin(MergeJoinState *node) * scan position to the first mark, and go join that tuple * (and any following ones) to the new outer. * - * NOTE: we do not need to worry about the MatchedInner state - * for the rescanned inner tuples. We know all of them - * will match this new outer tuple and therefore won't be - * emitted as fill tuples. This works *only* because we - * require the extra joinquals to be nil when doing a - * right or full join --- otherwise some of the rescanned - * tuples might fail the extra joinquals. + * NOTE: we do not need to worry about the MatchedInner + * state for the rescanned inner tuples. We know all of + * them will match this new outer tuple and therefore + * won't be emitted as fill tuples. This works *only* + * because we require the extra joinquals to be nil when + * doing a right or full join --- otherwise some of the + * rescanned tuples might fail the extra joinquals. */ ExecRestrPos(innerPlan); diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c index a497e9ac337..24f3621bb99 100644 --- a/src/backend/executor/nodeNestloop.c +++ b/src/backend/executor/nodeNestloop.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.39 2005/10/15 02:49:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.39.2.1 2005/11/22 18:23:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -223,8 +223,8 @@ ExecNestLoop(NestLoopState *node) * test the inner and outer tuples to see if they satisfy the node's * qualification. * - * Only the joinquals determine MatchedOuter status, but all quals must - * pass to actually return the tuple. + * Only the joinquals determine MatchedOuter status, but all quals + * must pass to actually return the tuple. */ ENL1_printf("testing qualification"); diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index 0e7b6df7225..061ca12eda4 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.70 2005/10/15 02:49:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.70.2.1 2005/11/22 18:23:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -128,8 +128,8 @@ ExecHashSubPlan(SubPlanState *node, * unequal to the LHS; if so, the result is UNKNOWN. (We skip that part * if we don't care about UNKNOWN.) Otherwise, the result is FALSE. * - * Note: the reason we can avoid a full scan of the main hash table is that - * the combining operators are assumed never to yield NULL when both + * Note: the reason we can avoid a full scan of the main hash table is + * that the combining operators are assumed never to yield NULL when both * inputs are non-null. If they were to do so, we might need to produce * UNKNOWN instead of FALSE because of an UNKNOWN result in comparing the * LHS to some main-table entry --- which is a comparison we will not even @@ -255,9 +255,9 @@ ExecScanSubPlan(SubPlanState *node, * FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for * MULTIEXPR_SUBLINK. * - * For EXPR_SUBLINK we require the subplan to produce no more than one tuple, - * else an error is raised. For ARRAY_SUBLINK we allow the subplan to - * produce more than one tuple. In either case, if zero tuples are + * For EXPR_SUBLINK we require the subplan to produce no more than one + * tuple, else an error is raised. For ARRAY_SUBLINK we allow the subplan + * to produce more than one tuple. In either case, if zero tuples are * produced, we return NULL. Assuming we get a tuple, we just use its * first column (there can be only one non-junk column in this case). */ @@ -480,13 +480,13 @@ buildSubPlanHash(SubPlanState *node) * If we need to distinguish accurately between FALSE and UNKNOWN (i.e., * NULL) results of the IN operation, then we have to store subplan output * rows that are partly or wholly NULL. We store such rows in a separate - * hash table that we expect will be much smaller than the main table. - * (We can use hashing to eliminate partly-null rows that are not - * distinct. We keep them separate to minimize the cost of the inevitable - * full-table searches; see findPartialMatch.) + * hash table that we expect will be much smaller than the main table. (We + * can use hashing to eliminate partly-null rows that are not distinct. + * We keep them separate to minimize the cost of the inevitable full-table + * searches; see findPartialMatch.) * - * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't need - * to store subplan output rows that contain NULL. + * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't + * need to store subplan output rows that contain NULL. */ MemoryContextReset(node->tablecxt); node->hashtable = NULL; @@ -796,8 +796,8 @@ ExecInitSubPlan(SubPlanState *node, EState *estate) * righthand sides. We need both the ExprState list (for ExecProject) * and the underlying parse Exprs (for ExecTypeFromTL). * - * We also extract the combining operators themselves to initialize the - * equality and hashing functions for the hash tables. + * We also extract the combining operators themselves to initialize + * the equality and hashing functions for the hash tables. */ lefttlist = righttlist = NIL; leftptlist = rightptlist = NIL; diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c index ab3879d7cc6..0b0ee93e4b1 100644 --- a/src/backend/executor/nodeUnique.c +++ b/src/backend/executor/nodeUnique.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.48 2005/10/15 02:49:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.48.2.1 2005/11/22 18:23:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -56,10 +56,10 @@ ExecUnique(UniqueState *node) * now loop, returning only non-duplicate tuples. We assume that the * tuples arrive in sorted order so we can detect duplicates easily. * - * We return the first tuple from each group of duplicates (or the last tuple - * of each group, when moving backwards). At either end of the subplan, - * clear the result slot so that we correctly return the first/last tuple - * when reversing direction. + * We return the first tuple from each group of duplicates (or the last + * tuple of each group, when moving backwards). At either end of the + * subplan, clear the result slot so that we correctly return the + * first/last tuple when reversing direction. */ for (;;) { diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 0b45fe49df2..e8ba1f5f292 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.144 2005/11/03 17:11:36 alvherre Exp $ + * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.144.2.1 2005/11/22 18:23:09 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -113,8 +113,8 @@ SPI_connect(void) /* * Create memory contexts for this procedure * - * XXX it would be better to use PortalContext as the parent context, but we - * may not be inside a portal (consider deferred-trigger execution). + * XXX it would be better to use PortalContext as the parent context, but + * we may not be inside a portal (consider deferred-trigger execution). * Perhaps CurTransactionContext would do? For now it doesn't matter * because we clean up explicitly in AtEOSubXact_SPI(). */ |