aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/README75
-rw-r--r--src/backend/executor/execCurrent.c5
-rw-r--r--src/backend/executor/execMain.c758
-rw-r--r--src/backend/executor/execQual.c4
-rw-r--r--src/backend/executor/execScan.c96
-rw-r--r--src/backend/executor/execUtils.c10
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c80
-rw-r--r--src/backend/executor/nodeCtescan.c26
-rw-r--r--src/backend/executor/nodeFunctionscan.c29
-rw-r--r--src/backend/executor/nodeIndexscan.c77
-rw-r--r--src/backend/executor/nodeLockRows.c170
-rw-r--r--src/backend/executor/nodeModifyTable.c68
-rw-r--r--src/backend/executor/nodeSeqscan.c80
-rw-r--r--src/backend/executor/nodeSubqueryscan.c47
-rw-r--r--src/backend/executor/nodeTidscan.c74
-rw-r--r--src/backend/executor/nodeValuesscan.c25
-rw-r--r--src/backend/executor/nodeWorktablescan.c26
17 files changed, 884 insertions, 766 deletions
diff --git a/src/backend/executor/README b/src/backend/executor/README
index 06d05d52311..c928186e06c 100644
--- a/src/backend/executor/README
+++ b/src/backend/executor/README
@@ -1,4 +1,4 @@
-$PostgreSQL: pgsql/src/backend/executor/README,v 1.10 2009/10/12 18:10:41 tgl Exp $
+$PostgreSQL: pgsql/src/backend/executor/README,v 1.11 2009/10/26 02:26:29 tgl Exp $
The Postgres Executor
=====================
@@ -160,41 +160,38 @@ modified tuple. SELECT FOR UPDATE/SHARE behaves similarly, except that its
action is just to lock the modified tuple and return results based on that
version of the tuple.
-To implement this checking, we actually re-run the entire query from scratch
-for each modified tuple, but with the scan node that sourced the original
-tuple set to return only the modified tuple, not the original tuple or any
-of the rest of the relation. If this query returns a tuple, then the
-modified tuple passes the quals (and the query output is the suitably
-modified update tuple, if we're doing UPDATE). If no tuple is returned,
-then the modified tuple fails the quals, so we ignore it and continue the
-original query. (This is reasonably efficient for simple queries, but may
-be horribly slow for joins. A better design would be nice; one thought for
-future investigation is to treat the tuple substitution like a parameter,
-so that we can avoid rescanning unrelated nodes.)
-
-Note a fundamental bogosity of this approach: if the relation containing
-the original tuple is being used in a self-join, the other instance(s) of
-the relation will be treated as still containing the original tuple, whereas
-logical consistency would demand that the modified tuple appear in them too.
-But we'd have to actually substitute the modified tuple for the original,
-while still returning all the rest of the relation, to ensure consistent
-answers. Implementing this correctly is a task for future work.
-
-In UPDATE/DELETE, only the target relation needs to be handled this way,
-so only one special recheck query needs to execute at a time. In SELECT FOR
-UPDATE, there may be multiple relations flagged FOR UPDATE, so it's possible
-that while we are executing a recheck query for one modified tuple, we will
-hit another modified tuple in another relation. In this case we "stack up"
-recheck queries: a sub-recheck query is spawned in which both the first and
-second modified tuples will be returned as the only components of their
-relations. (In event of success, all these modified tuples will be locked.)
-Again, this isn't necessarily quite the right thing ... but in simple cases
-it works. Potentially, recheck queries could get nested to the depth of the
-number of FOR UPDATE/SHARE relations in the query.
-
-It should be noted also that UPDATE/DELETE expect at most one tuple to
-result from the modified query, whereas in the FOR UPDATE case it's possible
-for multiple tuples to result (since we could be dealing with a join in
-which multiple tuples join to the modified tuple). We want FOR UPDATE to
-lock all relevant tuples, so we process all tuples output by all the stacked
-recheck queries.
+To implement this checking, we actually re-run the query from scratch for
+each modified tuple (or set of tuples, for SELECT FOR UPDATE), with the
+relation scan nodes tweaked to return only the current tuples --- either
+the original ones, or the updated (and now locked) versions of the modified
+tuple(s). If this query returns a tuple, then the modified tuple(s) pass
+the quals (and the query output is the suitably modified update tuple, if
+we're doing UPDATE). If no tuple is returned, then the modified tuple(s)
+fail the quals, so we ignore the current result tuple and continue the
+original query.
+
+In UPDATE/DELETE, only the target relation needs to be handled this way.
+In SELECT FOR UPDATE, there may be multiple relations flagged FOR UPDATE,
+so we obtain lock on the current tuple version in each such relation before
+executing the recheck.
+
+It is also possible that there are relations in the query that are not
+to be locked (they are neither the UPDATE/DELETE target nor specified to
+be locked in SELECT FOR UPDATE/SHARE). When re-running the test query
+we want to use the same rows from these relations that were joined to
+the locked rows. For ordinary relations this can be implemented relatively
+cheaply by including the row TID in the join outputs and re-fetching that
+TID. (The re-fetch is expensive, but we're trying to optimize the normal
+case where no re-test is needed.) We have also to consider non-table
+relations, such as a ValuesScan or FunctionScan. For these, since there
+is no equivalent of TID, the only practical solution seems to be to include
+the entire row value in the join output row.
+
+We disallow set-returning functions in the targetlist of SELECT FOR UPDATE,
+so as to ensure that at most one tuple can be returned for any particular
+set of scan tuples. Otherwise we'd get duplicates due to the original
+query returning the same set of scan tuples multiple times. (Note: there
+is no explicit prohibition on SRFs in UPDATE, but the net effect will be
+that only the first result row of an SRF counts, because all subsequent
+rows will result in attempts to re-update an already updated target row.
+This is historical behavior and seems not worth changing.)
diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c
index 78ad80db66f..a4103332c40 100644
--- a/src/backend/executor/execCurrent.c
+++ b/src/backend/executor/execCurrent.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/executor/execCurrent.c,v 1.11 2009/10/12 18:10:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execCurrent.c,v 1.12 2009/10/26 02:26:29 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -102,6 +102,9 @@ execCurrentOf(CurrentOfExpr *cexpr,
{
ExecRowMark *thiserm = (ExecRowMark *) lfirst(lc);
+ if (!RowMarkRequiresRowShareLock(thiserm->markType))
+ continue; /* ignore non-FOR UPDATE/SHARE items */
+
if (RelationGetRelid(thiserm->relation) == table_oid)
{
if (erm)
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index d03ad094184..d7d99bc0aea 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.333 2009/10/12 18:10:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.334 2009/10/26 02:26:29 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,17 +61,6 @@ ExecutorStart_hook_type ExecutorStart_hook = NULL;
ExecutorRun_hook_type ExecutorRun_hook = NULL;
ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
-typedef struct evalPlanQual
-{
- Index rti;
- EState *estate;
- PlanState *planstate;
- PlanState *origplanstate;
- TupleTableSlot *resultslot;
- struct evalPlanQual *next; /* stack of active PlanQual plans */
- struct evalPlanQual *free; /* list of free PlanQual plans */
-} evalPlanQual;
-
/* decls for local routines only used within this module */
static void InitPlan(QueryDesc *queryDesc, int eflags);
static void ExecEndPlan(PlanState *planstate, EState *estate);
@@ -81,13 +70,11 @@ static void ExecutePlan(EState *estate, PlanState *planstate,
long numberTuples,
ScanDirection direction,
DestReceiver *dest);
-static void EndEvalPlanQual(EState *estate);
static void ExecCheckRTPerms(List *rangeTable);
static void ExecCheckRTEPerms(RangeTblEntry *rte);
static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
-static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
- Plan *planTree, evalPlanQual *priorepq);
-static void EvalPlanQualStop(evalPlanQual *epq);
+static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
+ Plan *planTree);
static void OpenIntoRel(QueryDesc *queryDesc);
static void CloseIntoRel(QueryDesc *queryDesc);
static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
@@ -155,7 +142,8 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
/*
- * Fill in parameters, if any, from queryDesc
+ * Fill in external parameters, if any, from queryDesc; and allocate
+ * workspace for internal parameters
*/
estate->es_param_list_info = queryDesc->params;
@@ -648,6 +636,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
* initialize the node's execution state
*/
estate->es_range_table = rangeTable;
+ estate->es_plannedstmt = plannedstmt;
/*
* initialize result relation stuff, and open/lock the result rels.
@@ -703,7 +692,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
estate->es_rowMarks = NIL;
foreach(l, plannedstmt->rowMarks)
{
- RowMarkClause *rc = (RowMarkClause *) lfirst(l);
+ PlanRowMark *rc = (PlanRowMark *) lfirst(l);
Oid relid;
Relation relation;
ExecRowMark *erm;
@@ -712,18 +701,36 @@ InitPlan(QueryDesc *queryDesc, int eflags)
if (rc->isParent)
continue;
- relid = getrelid(rc->rti, rangeTable);
- relation = heap_open(relid, RowShareLock);
+ switch (rc->markType)
+ {
+ case ROW_MARK_EXCLUSIVE:
+ case ROW_MARK_SHARE:
+ relid = getrelid(rc->rti, rangeTable);
+ relation = heap_open(relid, RowShareLock);
+ break;
+ case ROW_MARK_REFERENCE:
+ relid = getrelid(rc->rti, rangeTable);
+ relation = heap_open(relid, AccessShareLock);
+ break;
+ case ROW_MARK_COPY:
+ /* there's no real table here ... */
+ relation = NULL;
+ break;
+ default:
+ elog(ERROR, "unrecognized markType: %d", rc->markType);
+ relation = NULL; /* keep compiler quiet */
+ break;
+ }
+
erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
erm->relation = relation;
erm->rti = rc->rti;
erm->prti = rc->prti;
- erm->rowmarkId = rc->rowmarkId;
- erm->forUpdate = rc->forUpdate;
+ erm->markType = rc->markType;
erm->noWait = rc->noWait;
- /* remaining fields are filled during LockRows plan node init */
- erm->ctidAttNo = InvalidAttrNumber;
- erm->toidAttNo = InvalidAttrNumber;
+ erm->ctidAttNo = rc->ctidAttNo;
+ erm->toidAttNo = rc->toidAttNo;
+ erm->wholeAttNo = rc->wholeAttNo;
ItemPointerSetInvalid(&(erm->curCtid));
estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
}
@@ -747,10 +754,9 @@ InitPlan(QueryDesc *queryDesc, int eflags)
estate->es_trig_tuple_slot = NULL;
/* mark EvalPlanQual not active */
- estate->es_plannedstmt = plannedstmt;
- estate->es_evalPlanQual = NULL;
- estate->es_evTupleNull = NULL;
- estate->es_evTuple = NULL;
+ estate->es_epqTuple = NULL;
+ estate->es_epqTupleSet = NULL;
+ estate->es_epqScanDone = NULL;
/*
* Initialize private state information for each SubPlan. We must do this
@@ -1077,12 +1083,6 @@ ExecEndPlan(PlanState *planstate, EState *estate)
ListCell *l;
/*
- * shut down any PlanQual processing we were doing
- */
- if (estate->es_evalPlanQual != NULL)
- EndEvalPlanQual(estate);
-
- /*
* shut down the node-type-specific query processing
*/
ExecEndNode(planstate);
@@ -1133,9 +1133,10 @@ ExecEndPlan(PlanState *planstate, EState *estate)
*/
foreach(l, estate->es_rowMarks)
{
- ExecRowMark *erm = lfirst(l);
+ ExecRowMark *erm = (ExecRowMark *) lfirst(l);
- heap_close(erm->relation, NoLock);
+ if (erm->relation)
+ heap_close(erm->relation, NoLock);
}
}
@@ -1330,15 +1331,23 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
}
}
+
/*
- * Check a modified tuple to see if we want to process its updated version
- * under READ COMMITTED rules.
+ * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
+ * process the updated version under READ COMMITTED rules.
*
* See backend/executor/README for some info about how this works.
+ */
+
+
+/*
+ * Check a modified tuple to see if we want to process its updated version
+ * under READ COMMITTED rules.
*
- * estate - executor state data
+ * estate - outer executor state data
+ * epqstate - state for EvalPlanQual rechecking
+ * relation - table containing tuple
* rti - rangetable index of table containing tuple
- * subplanstate - portion of plan tree that needs to be re-evaluated
* *tid - t_ctid from the outdated tuple (ie, next updated version)
* priorXmax - t_xmax from the outdated tuple
*
@@ -1349,19 +1358,20 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
* NULL if we determine we shouldn't process the row.
*/
TupleTableSlot *
-EvalPlanQual(EState *estate, Index rti,
- PlanState *subplanstate,
+EvalPlanQual(EState *estate, EPQState *epqstate,
+ Relation relation, Index rti,
ItemPointer tid, TransactionId priorXmax)
{
TupleTableSlot *slot;
HeapTuple copyTuple;
- Assert(rti != 0);
+ Assert(rti > 0);
/*
- * Get the updated version of the row; if fail, return NULL.
+ * Get and lock the updated version of the row; if fail, return NULL.
*/
- copyTuple = EvalPlanQualFetch(estate, rti, tid, priorXmax);
+ copyTuple = EvalPlanQualFetch(estate, relation, LockTupleExclusive,
+ tid, priorXmax);
if (copyTuple == NULL)
return NULL;
@@ -1373,52 +1383,32 @@ EvalPlanQual(EState *estate, Index rti,
*tid = copyTuple->t_self;
/*
- * Need to run a recheck subquery. Find or create a PQ stack entry.
+ * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
*/
- EvalPlanQualPush(estate, rti, subplanstate);
+ EvalPlanQualBegin(epqstate, estate);
/*
- * free old RTE' tuple, if any, and store target tuple where relation's
+ * Free old test tuple, if any, and store new tuple where relation's
* scan node will see it
*/
- EvalPlanQualSetTuple(estate, rti, copyTuple);
+ EvalPlanQualSetTuple(epqstate, rti, copyTuple);
/*
- * Run the EPQ query, but just for one tuple.
+ * Fetch any non-locked source rows
*/
- slot = EvalPlanQualNext(estate);
+ EvalPlanQualFetchRowMarks(epqstate);
/*
- * If we got a result, we must copy it out of the EPQ query's local
- * context before we shut down the EPQ query.
+ * Run the EPQ query. We assume it will return at most one tuple.
*/
- if (TupIsNull(slot))
- slot = NULL; /* in case we got back an empty slot */
- else
- {
- TupleDesc tupdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
- evalPlanQual *epq = estate->es_evalPlanQual;
-
- if (epq->resultslot == NULL)
- {
- epq->resultslot = ExecInitExtraTupleSlot(estate);
- ExecSetSlotDescriptor(epq->resultslot, tupdesc);
- }
- else
- {
- TupleDesc oldtupdesc = epq->resultslot->tts_tupleDescriptor;
-
- ExecSetSlotDescriptor(epq->resultslot, tupdesc);
- FreeTupleDesc(oldtupdesc);
- }
-
- slot = ExecCopySlot(epq->resultslot, slot);
- }
+ slot = EvalPlanQualNext(epqstate);
/*
- * Shut it down ...
+ * Clear out the test tuple. This is needed in case the EPQ query
+ * is re-used to test a tuple for a different relation. (Not clear
+ * that can really happen, but let's be safe.)
*/
- EvalPlanQualPop(estate, subplanstate);
+ EvalPlanQualSetTuple(epqstate, rti, NULL);
return slot;
}
@@ -1427,55 +1417,29 @@ EvalPlanQual(EState *estate, Index rti,
* Fetch a copy of the newest version of an outdated tuple
*
* estate - executor state data
- * rti - rangetable index of table containing tuple
+ * relation - table containing tuple
+ * lockmode - requested tuple lock mode
* *tid - t_ctid from the outdated tuple (ie, next updated version)
* priorXmax - t_xmax from the outdated tuple
*
* Returns a palloc'd copy of the newest tuple version, or NULL if we find
* that there is no newest version (ie, the row was deleted not updated).
+ * If successful, we have locked the newest tuple version, so caller does not
+ * need to worry about it changing anymore.
*
- * XXX this does not lock the new row version ... wouldn't it be better if
- * it did? As-is, caller might have to repeat all its work.
+ * Note: properly, lockmode should be declared as enum LockTupleMode,
+ * but we use "int" to avoid having to include heapam.h in executor.h.
*/
HeapTuple
-EvalPlanQualFetch(EState *estate, Index rti,
+EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
ItemPointer tid, TransactionId priorXmax)
{
HeapTuple copyTuple = NULL;
- Relation relation;
HeapTupleData tuple;
SnapshotData SnapshotDirty;
- Assert(rti != 0);
-
/*
- * Find relation containing target tuple --- must be either a result
- * relation of the query, or a SELECT FOR UPDATE target
- */
- if (estate->es_result_relation_info != NULL &&
- estate->es_result_relation_info->ri_RangeTableIndex == rti)
- relation = estate->es_result_relation_info->ri_RelationDesc;
- else
- {
- ListCell *l;
-
- relation = NULL;
- foreach(l, estate->es_rowMarks)
- {
- ExecRowMark *erm = lfirst(l);
-
- if (erm->rti == rti)
- {
- relation = erm->relation;
- break;
- }
- }
- if (relation == NULL)
- elog(ERROR, "could not find RowMark for RT index %u", rti);
- }
-
- /*
- * fetch tid tuple
+ * fetch target tuple
*
* Loop here to deal with updated or busy tuples
*/
@@ -1487,6 +1451,10 @@ EvalPlanQualFetch(EState *estate, Index rti,
if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
{
+ HTSU_Result test;
+ ItemPointerData update_ctid;
+ TransactionId update_xmax;
+
/*
* If xmin isn't what we're expecting, the slot must have been
* recycled and reused for an unrelated tuple. This implies that
@@ -1536,6 +1504,49 @@ EvalPlanQualFetch(EState *estate, Index rti,
}
/*
+ * This is a live tuple, so now try to lock it.
+ */
+ test = heap_lock_tuple(relation, &tuple, &buffer,
+ &update_ctid, &update_xmax,
+ estate->es_output_cid,
+ lockmode, false);
+ /* We now have two pins on the buffer, get rid of one */
+ ReleaseBuffer(buffer);
+
+ switch (test)
+ {
+ case HeapTupleSelfUpdated:
+ /* treat it as deleted; do not process */
+ ReleaseBuffer(buffer);
+ return NULL;
+
+ case HeapTupleMayBeUpdated:
+ /* successfully locked */
+ break;
+
+ case HeapTupleUpdated:
+ ReleaseBuffer(buffer);
+ if (IsXactIsoLevelSerializable)
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
+ if (!ItemPointerEquals(&update_ctid, &tuple.t_self))
+ {
+ /* it was updated, so look at the updated version */
+ tuple.t_self = update_ctid;
+ continue;
+ }
+ /* tuple was deleted, so give up */
+ return NULL;
+
+ default:
+ ReleaseBuffer(buffer);
+ elog(ERROR, "unrecognized heap_lock_tuple status: %u",
+ test);
+ return NULL; /* keep compiler quiet */
+ }
+
+ /*
* We got tuple - now copy it for use by recheck query.
*/
copyTuple = heap_copytuple(&tuple);
@@ -1570,7 +1581,7 @@ EvalPlanQualFetch(EState *estate, Index rti,
* mean that the row was updated or deleted by either a committed xact
* or our own xact. If it was deleted, we can ignore it; if it was
* updated then chain up to the next version and repeat the whole
- * test.
+ * process.
*
* As above, it should be safe to examine xmax and t_ctid without the
* buffer content lock, because they can't be changing.
@@ -1597,294 +1608,334 @@ EvalPlanQualFetch(EState *estate, Index rti,
}
/*
- * Push a new level of EPQ state, and prepare to execute the given subplan
+ * EvalPlanQualInit -- initialize during creation of a plan state node
+ * that might need to invoke EPQ processing.
+ * Note: subplan can be NULL if it will be set later with EvalPlanQualSetPlan.
*/
void
-EvalPlanQualPush(EState *estate, Index rti, PlanState *subplanstate)
+EvalPlanQualInit(EPQState *epqstate, EState *estate,
+ Plan *subplan, int epqParam)
{
- evalPlanQual *epq;
- bool endNode;
+ /* Mark the EPQ state inactive */
+ epqstate->estate = NULL;
+ epqstate->planstate = NULL;
+ epqstate->origslot = NULL;
+ /* ... and remember data that EvalPlanQualBegin will need */
+ epqstate->plan = subplan;
+ epqstate->rowMarks = NIL;
+ epqstate->epqParam = epqParam;
+}
- Assert(rti != 0);
+/*
+ * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
+ *
+ * We need this so that ModifyTuple can deal with multiple subplans.
+ */
+void
+EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan)
+{
+ /* If we have a live EPQ query, shut it down */
+ EvalPlanQualEnd(epqstate);
+ /* And set/change the plan pointer */
+ epqstate->plan = subplan;
+}
- epq = estate->es_evalPlanQual;
- endNode = true;
+/*
+ * EvalPlanQualAddRowMark -- add an ExecRowMark that EPQ needs to handle.
+ *
+ * Currently, only non-locking RowMarks are supported.
+ */
+void
+EvalPlanQualAddRowMark(EPQState *epqstate, ExecRowMark *erm)
+{
+ if (RowMarkRequiresRowShareLock(erm->markType))
+ elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
+ epqstate->rowMarks = lappend(epqstate->rowMarks, erm);
+}
- if (epq != NULL && epq->rti == 0)
- {
- /* Top PQ stack entry is idle, so re-use it */
- Assert(epq->next == NULL);
- epq->rti = rti;
- endNode = false;
- }
+/*
+ * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
+ *
+ * NB: passed tuple must be palloc'd; it may get freed later
+ */
+void
+EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
+{
+ EState *estate = epqstate->estate;
- /*
- * If this is request for another RTE - Ra, - then we have to check wasn't
- * PlanQual requested for Ra already and if so then Ra' row was updated
- * again and we have to re-start old execution for Ra and forget all what
- * we done after Ra was suspended. Cool? -:))
- */
- if (epq != NULL && epq->rti != rti &&
- epq->estate->es_evTuple[rti - 1] != NULL)
- {
- do
- {
- evalPlanQual *oldepq;
-
- /* stop execution */
- EvalPlanQualStop(epq);
- /* pop previous PlanQual from the stack */
- oldepq = epq->next;
- Assert(oldepq && oldepq->rti != 0);
- /* push current PQ to freePQ stack */
- oldepq->free = epq;
- epq = oldepq;
- estate->es_evalPlanQual = epq;
- } while (epq->rti != rti);
- }
+ Assert(rti > 0);
/*
- * If we are requested for another RTE then we have to suspend execution
- * of current PlanQual and start execution for new one.
+ * free old test tuple, if any, and store new tuple where relation's
+ * scan node will see it
*/
- if (epq == NULL || epq->rti != rti)
- {
- /* try to reuse plan used previously */
- evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
-
- if (newepq == NULL) /* first call or freePQ stack is empty */
- {
- newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
- newepq->free = NULL;
- newepq->estate = NULL;
- newepq->planstate = NULL;
- newepq->origplanstate = NULL;
- newepq->resultslot = NULL;
- }
- else
- {
- /* recycle previously used PlanQual */
- Assert(newepq->estate == NULL);
- epq->free = NULL;
- }
- /* push current PQ to the stack */
- newepq->next = epq;
- epq = newepq;
- estate->es_evalPlanQual = epq;
- epq->rti = rti;
- endNode = false;
- }
+ if (estate->es_epqTuple[rti - 1] != NULL)
+ heap_freetuple(estate->es_epqTuple[rti - 1]);
+ estate->es_epqTuple[rti - 1] = tuple;
+ estate->es_epqTupleSet[rti - 1] = true;
+}
- Assert(epq->rti == rti);
- Assert(estate->es_evalPlanQual == epq);
+/*
+ * Fetch back the current test tuple (if any) for the specified RTI
+ */
+HeapTuple
+EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
+{
+ EState *estate = epqstate->estate;
- /*
- * Ok - we're requested for the same RTE. Unfortunately we still have to
- * end and restart execution of the plan, because ExecReScan wouldn't
- * ensure that upper plan nodes would reset themselves. We could make
- * that work if insertion of the target tuple were integrated with the
- * Param mechanism somehow, so that the upper plan nodes know that their
- * children's outputs have changed.
- *
- * Note that the stack of free evalPlanQual nodes is quite useless at the
- * moment, since it only saves us from pallocing/releasing the
- * evalPlanQual nodes themselves. But it will be useful once we implement
- * ReScan instead of end/restart for re-using PlanQual nodes.
- */
- if (endNode)
- {
- /* stop execution */
- EvalPlanQualStop(epq);
- }
+ Assert(rti > 0);
- /*
- * Initialize new recheck query.
- *
- * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
- * instead copy down changeable state from the top plan (including
- * es_result_relation_info) and reset locally changeable
- * state in the epq (including es_param_exec_vals, es_evTupleNull).
- */
- epq->origplanstate = subplanstate;
- EvalPlanQualStart(epq, estate, subplanstate->plan, epq->next);
+ return estate->es_epqTuple[rti - 1];
}
/*
- * Install one test tuple into current EPQ level
+ * Fetch the current row values for any non-locked relations that need
+ * to be scanned by an EvalPlanQual operation. origslot must have been set
+ * to contain the current result row (top-level row) that we need to recheck.
*/
void
-EvalPlanQualSetTuple(EState *estate, Index rti, HeapTuple tuple)
+EvalPlanQualFetchRowMarks(EPQState *epqstate)
{
- evalPlanQual *epq = estate->es_evalPlanQual;
- EState *epqstate;
+ ListCell *l;
- Assert(rti != 0);
+ Assert(epqstate->origslot != NULL);
- /*
- * free old RTE' tuple, if any, and store target tuple where relation's
- * scan node will see it
- */
- epqstate = epq->estate;
- if (epqstate->es_evTuple[rti - 1] != NULL)
- heap_freetuple(epqstate->es_evTuple[rti - 1]);
- epqstate->es_evTuple[rti - 1] = tuple;
+ foreach(l, epqstate->rowMarks)
+ {
+ ExecRowMark *erm = (ExecRowMark *) lfirst(l);
+ Datum datum;
+ bool isNull;
+ HeapTupleData tuple;
+
+ /* clear any leftover test tuple for this rel */
+ EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
+
+ if (erm->relation)
+ {
+ Buffer buffer;
+
+ Assert(erm->markType == ROW_MARK_REFERENCE);
+
+ /* if child rel, must check whether it produced this row */
+ if (erm->rti != erm->prti)
+ {
+ Oid tableoid;
+
+ datum = ExecGetJunkAttribute(epqstate->origslot,
+ erm->toidAttNo,
+ &isNull);
+ /* non-locked rels could be on the inside of outer joins */
+ if (isNull)
+ continue;
+ tableoid = DatumGetObjectId(datum);
+
+ if (tableoid != RelationGetRelid(erm->relation))
+ {
+ /* this child is inactive right now */
+ continue;
+ }
+ }
+
+ /* fetch the tuple's ctid */
+ datum = ExecGetJunkAttribute(epqstate->origslot,
+ erm->ctidAttNo,
+ &isNull);
+ /* non-locked rels could be on the inside of outer joins */
+ if (isNull)
+ continue;
+ tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
+
+ /* okay, fetch the tuple */
+ if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
+ false, NULL))
+ elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+
+ /* successful, copy and store tuple */
+ EvalPlanQualSetTuple(epqstate, erm->rti,
+ heap_copytuple(&tuple));
+ ReleaseBuffer(buffer);
+ }
+ else
+ {
+ HeapTupleHeader td;
+
+ Assert(erm->markType == ROW_MARK_COPY);
+
+ /* fetch the whole-row Var for the relation */
+ datum = ExecGetJunkAttribute(epqstate->origslot,
+ erm->wholeAttNo,
+ &isNull);
+ /* non-locked rels could be on the inside of outer joins */
+ if (isNull)
+ continue;
+ td = DatumGetHeapTupleHeader(datum);
+
+ /* build a temporary HeapTuple control structure */
+ tuple.t_len = HeapTupleHeaderGetDatumLength(td);
+ ItemPointerSetInvalid(&(tuple.t_self));
+ tuple.t_tableOid = InvalidOid;
+ tuple.t_data = td;
+
+ /* copy and store tuple */
+ EvalPlanQualSetTuple(epqstate, erm->rti,
+ heap_copytuple(&tuple));
+ }
+ }
}
/*
* Fetch the next row (if any) from EvalPlanQual testing
+ *
+ * (In practice, there should never be more than one row...)
*/
TupleTableSlot *
-EvalPlanQualNext(EState *estate)
+EvalPlanQualNext(EPQState *epqstate)
{
- evalPlanQual *epq = estate->es_evalPlanQual;
MemoryContext oldcontext;
TupleTableSlot *slot;
- Assert(epq->rti != 0);
-
- oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
- slot = ExecProcNode(epq->planstate);
+ oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
+ slot = ExecProcNode(epqstate->planstate);
MemoryContextSwitchTo(oldcontext);
return slot;
}
/*
- * Shut down and pop the specified level of EvalPlanQual machinery,
- * plus any levels nested within it
+ * Initialize or reset an EvalPlanQual state tree
*/
void
-EvalPlanQualPop(EState *estate, PlanState *subplanstate)
+EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
{
- evalPlanQual *epq = estate->es_evalPlanQual;
+ EState *estate = epqstate->estate;
- for (;;)
+ if (estate == NULL)
{
- PlanState *epqplanstate = epq->origplanstate;
- evalPlanQual *oldepq;
-
- Assert(epq->rti != 0);
-
- /* stop execution */
- EvalPlanQualStop(epq);
- epq->origplanstate = NULL;
- /* pop old PQ from the stack */
- oldepq = epq->next;
- if (oldepq == NULL)
- {
- /* this is the first (oldest) PQ - mark as free */
- epq->rti = 0;
- break;
- }
- Assert(oldepq->rti != 0);
- /* push current PQ to freePQ stack */
- oldepq->free = epq;
- epq = oldepq;
- estate->es_evalPlanQual = epq;
- if (epqplanstate == subplanstate)
- break;
+ /* First time through, so create a child EState */
+ EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
}
-}
-
-static void
-EndEvalPlanQual(EState *estate)
-{
- evalPlanQual *epq = estate->es_evalPlanQual;
-
- if (epq->rti == 0) /* plans already shutdowned */
+ else
{
- Assert(epq->next == NULL);
- return;
- }
+ /*
+ * We already have a suitable child EPQ tree, so just reset it.
+ */
+ int rtsize = list_length(parentestate->es_range_table);
+ PlanState *planstate = epqstate->planstate;
- for (;;)
- {
- evalPlanQual *oldepq;
-
- /* stop execution */
- EvalPlanQualStop(epq);
- epq->origplanstate = NULL;
- /* pop old PQ from the stack */
- oldepq = epq->next;
- if (oldepq == NULL)
+ MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
+
+ /* Recopy current values of parent parameters */
+ if (parentestate->es_plannedstmt->nParamExec > 0)
{
- /* this is the first (oldest) PQ - mark as free */
- epq->rti = 0;
- break;
+ int i = parentestate->es_plannedstmt->nParamExec;
+
+ while (--i >= 0)
+ {
+ /* copy value if any, but not execPlan link */
+ estate->es_param_exec_vals[i].value =
+ parentestate->es_param_exec_vals[i].value;
+ estate->es_param_exec_vals[i].isnull =
+ parentestate->es_param_exec_vals[i].isnull;
+ }
}
- Assert(oldepq->rti != 0);
- /* push current PQ to freePQ stack */
- oldepq->free = epq;
- epq = oldepq;
- estate->es_evalPlanQual = epq;
+
+ /*
+ * Mark child plan tree as needing rescan at all scan nodes. The
+ * first ExecProcNode will take care of actually doing the rescan.
+ */
+ planstate->chgParam = bms_add_member(planstate->chgParam,
+ epqstate->epqParam);
}
}
/*
- * Start execution of one level of PlanQual.
+ * Start execution of an EvalPlanQual plan tree.
*
* This is a cut-down version of ExecutorStart(): we copy some state from
* the top-level estate rather than initializing it fresh.
*/
static void
-EvalPlanQualStart(evalPlanQual *epq, EState *estate, Plan *planTree,
- evalPlanQual *priorepq)
+EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
{
- EState *epqstate;
+ EState *estate;
int rtsize;
MemoryContext oldcontext;
ListCell *l;
- rtsize = list_length(estate->es_range_table);
+ rtsize = list_length(parentestate->es_range_table);
- epq->estate = epqstate = CreateExecutorState();
+ epqstate->estate = estate = CreateExecutorState();
- oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
+ oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
/*
- * The epqstates share the top query's copy of unchanging state such as
+ * Child EPQ EStates share the parent's copy of unchanging state such as
* the snapshot, rangetable, result-rel info, and external Param info.
* They need their own copies of local state, including a tuple table,
* es_param_exec_vals, etc.
*/
- epqstate->es_direction = ForwardScanDirection;
- epqstate->es_snapshot = estate->es_snapshot;
- epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
- epqstate->es_range_table = estate->es_range_table;
- epqstate->es_junkFilter = estate->es_junkFilter;
- epqstate->es_output_cid = estate->es_output_cid;
- epqstate->es_result_relations = estate->es_result_relations;
- epqstate->es_num_result_relations = estate->es_num_result_relations;
- epqstate->es_result_relation_info = estate->es_result_relation_info;
+ estate->es_direction = ForwardScanDirection;
+ estate->es_snapshot = parentestate->es_snapshot;
+ estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
+ estate->es_range_table = parentestate->es_range_table;
+ estate->es_plannedstmt = parentestate->es_plannedstmt;
+ estate->es_junkFilter = parentestate->es_junkFilter;
+ estate->es_output_cid = parentestate->es_output_cid;
+ estate->es_result_relations = parentestate->es_result_relations;
+ estate->es_num_result_relations = parentestate->es_num_result_relations;
+ estate->es_result_relation_info = parentestate->es_result_relation_info;
/* es_trig_target_relations must NOT be copied */
- epqstate->es_param_list_info = estate->es_param_list_info;
- if (estate->es_plannedstmt->nParamExec > 0)
- epqstate->es_param_exec_vals = (ParamExecData *)
- palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
- epqstate->es_rowMarks = estate->es_rowMarks;
- epqstate->es_instrument = estate->es_instrument;
- epqstate->es_select_into = estate->es_select_into;
- epqstate->es_into_oids = estate->es_into_oids;
- epqstate->es_plannedstmt = estate->es_plannedstmt;
-
- /*
- * Each epqstate must have its own es_evTupleNull state, but all the stack
- * entries share es_evTuple state. This allows sub-rechecks to inherit
- * the value being examined by an outer recheck.
- */
- epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
- if (priorepq == NULL)
- /* first PQ stack entry */
- epqstate->es_evTuple = (HeapTuple *)
- palloc0(rtsize * sizeof(HeapTuple));
+ estate->es_rowMarks = parentestate->es_rowMarks;
+ estate->es_instrument = parentestate->es_instrument;
+ estate->es_select_into = parentestate->es_select_into;
+ estate->es_into_oids = parentestate->es_into_oids;
+
+ /*
+ * The external param list is simply shared from parent. The internal
+ * param workspace has to be local state, but we copy the initial values
+ * from the parent, so as to have access to any param values that were
+ * already set from other parts of the parent's plan tree.
+ */
+ estate->es_param_list_info = parentestate->es_param_list_info;
+ if (parentestate->es_plannedstmt->nParamExec > 0)
+ {
+ int i = parentestate->es_plannedstmt->nParamExec;
+
+ estate->es_param_exec_vals = (ParamExecData *)
+ palloc0(i * sizeof(ParamExecData));
+ while (--i >= 0)
+ {
+ /* copy value if any, but not execPlan link */
+ estate->es_param_exec_vals[i].value =
+ parentestate->es_param_exec_vals[i].value;
+ estate->es_param_exec_vals[i].isnull =
+ parentestate->es_param_exec_vals[i].isnull;
+ }
+ }
+
+ /*
+ * Each EState must have its own es_epqScanDone state, but if we have
+ * nested EPQ checks they should share es_epqTuple arrays. This allows
+ * sub-rechecks to inherit the values being examined by an outer recheck.
+ */
+ estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
+ if (parentestate->es_epqTuple != NULL)
+ {
+ estate->es_epqTuple = parentestate->es_epqTuple;
+ estate->es_epqTupleSet = parentestate->es_epqTupleSet;
+ }
else
- /* later stack entries share the same storage */
- epqstate->es_evTuple = priorepq->estate->es_evTuple;
+ {
+ estate->es_epqTuple = (HeapTuple *)
+ palloc0(rtsize * sizeof(HeapTuple));
+ estate->es_epqTupleSet = (bool *)
+ palloc0(rtsize * sizeof(bool));
+ }
/*
- * Each epqstate also has its own tuple table.
+ * Each estate also has its own tuple table.
*/
- epqstate->es_tupleTable = NIL;
+ estate->es_tupleTable = NIL;
/*
* Initialize private state information for each SubPlan. We must do this
@@ -1894,16 +1945,16 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, Plan *planTree,
* we intend to run, but since it's not easy to tell which, we just
* initialize them all.
*/
- Assert(epqstate->es_subplanstates == NIL);
- foreach(l, estate->es_plannedstmt->subplans)
+ Assert(estate->es_subplanstates == NIL);
+ foreach(l, parentestate->es_plannedstmt->subplans)
{
Plan *subplan = (Plan *) lfirst(l);
PlanState *subplanstate;
- subplanstate = ExecInitNode(subplan, epqstate, 0);
+ subplanstate = ExecInitNode(subplan, estate, 0);
- epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
- subplanstate);
+ estate->es_subplanstates = lappend(estate->es_subplanstates,
+ subplanstate);
}
/*
@@ -1911,48 +1962,47 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, Plan *planTree,
* part of the plan tree we need to run. This opens files, allocates
* storage and leaves us ready to start processing tuples.
*/
- epq->planstate = ExecInitNode(planTree, epqstate, 0);
+ epqstate->planstate = ExecInitNode(planTree, estate, 0);
MemoryContextSwitchTo(oldcontext);
}
/*
- * End execution of one level of PlanQual.
+ * EvalPlanQualEnd -- shut down at termination of parent plan state node,
+ * or if we are done with the current EPQ child.
*
* This is a cut-down version of ExecutorEnd(); basically we want to do most
* of the normal cleanup, but *not* close result relations (which we are
* just sharing from the outer query). We do, however, have to close any
* trigger target relations that got opened, since those are not shared.
+ * (There probably shouldn't be any of the latter, but just in case...)
*/
-static void
-EvalPlanQualStop(evalPlanQual *epq)
+void
+EvalPlanQualEnd(EPQState *epqstate)
{
- EState *epqstate = epq->estate;
+ EState *estate = epqstate->estate;
MemoryContext oldcontext;
ListCell *l;
- oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
+ if (estate == NULL)
+ return; /* idle, so nothing to do */
- ExecEndNode(epq->planstate);
+ oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+
+ ExecEndNode(epqstate->planstate);
- foreach(l, epqstate->es_subplanstates)
+ foreach(l, estate->es_subplanstates)
{
PlanState *subplanstate = (PlanState *) lfirst(l);
ExecEndNode(subplanstate);
}
- /* throw away the per-epqstate tuple table completely */
- ExecResetTupleTable(epqstate->es_tupleTable, true);
- epqstate->es_tupleTable = NIL;
-
- if (epqstate->es_evTuple[epq->rti - 1] != NULL)
- {
- heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
- epqstate->es_evTuple[epq->rti - 1] = NULL;
- }
+ /* throw away the per-estate tuple table */
+ ExecResetTupleTable(estate->es_tupleTable, false);
- foreach(l, epqstate->es_trig_target_relations)
+ /* close any trigger target relations attached to this EState */
+ foreach(l, estate->es_trig_target_relations)
{
ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
@@ -1963,10 +2013,12 @@ EvalPlanQualStop(evalPlanQual *epq)
MemoryContextSwitchTo(oldcontext);
- FreeExecutorState(epqstate);
+ FreeExecutorState(estate);
- epq->estate = NULL;
- epq->planstate = NULL;
+ /* Mark EPQState idle */
+ epqstate->estate = NULL;
+ epqstate->planstate = NULL;
+ epqstate->origslot = NULL;
}
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index b9b67da26f0..fdfbd999f4f 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.252 2009/10/08 22:34:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.253 2009/10/26 02:26:29 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -660,7 +660,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
exprstate->evalfunc = ExecEvalWholeRowVar;
/* Fetch the value */
- return ExecEvalWholeRowVar(exprstate, econtext, isNull, isDone);
+ return (*exprstate->evalfunc) (exprstate, econtext, isNull, isDone);
}
}
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index 32386accbbd..f7733569ef9 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.46 2009/04/02 20:59:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.47 2009/10/26 02:26:29 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,6 +26,62 @@
static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc);
+/*
+ * ExecScanFetch -- fetch next potential tuple
+ *
+ * This routine is concerned with substituting a test tuple if we are
+ * inside an EvalPlanQual recheck. If we aren't, just execute
+ * the access method's next-tuple routine.
+ */
+static inline TupleTableSlot *
+ExecScanFetch(ScanState *node,
+ ExecScanAccessMtd accessMtd,
+ ExecScanRecheckMtd recheckMtd)
+{
+ EState *estate = node->ps.state;
+
+ if (estate->es_epqTuple != NULL)
+ {
+ /*
+ * We are inside an EvalPlanQual recheck. Return the test tuple if
+ * one is available, after rechecking any access-method-specific
+ * conditions.
+ */
+ Index scanrelid = ((Scan *) node->ps.plan)->scanrelid;
+
+ Assert(scanrelid > 0);
+ if (estate->es_epqTupleSet[scanrelid - 1])
+ {
+ TupleTableSlot *slot = node->ss_ScanTupleSlot;
+
+ /* Return empty slot if we already returned a tuple */
+ if (estate->es_epqScanDone[scanrelid - 1])
+ return ExecClearTuple(slot);
+ /* Else mark to remember that we shouldn't return more */
+ estate->es_epqScanDone[scanrelid - 1] = true;
+
+ /* Return empty slot if we haven't got a test tuple */
+ if (estate->es_epqTuple[scanrelid - 1] == NULL)
+ return ExecClearTuple(slot);
+
+ /* Store test tuple in the plan node's scan slot */
+ ExecStoreTuple(estate->es_epqTuple[scanrelid - 1],
+ slot, InvalidBuffer, false);
+
+ /* Check if it meets the access-method conditions */
+ if (!(*recheckMtd) (node, slot))
+ ExecClearTuple(slot); /* would not be returned by scan */
+
+ return slot;
+ }
+ }
+
+ /*
+ * Run the node-type-specific access method function to get the next tuple
+ */
+ return (*accessMtd) (node);
+}
+
/* ----------------------------------------------------------------
* ExecScan
*
@@ -35,6 +91,10 @@ static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, Tuple
* The access method returns the next tuple and execScan() is
* responsible for checking the tuple returned against the qual-clause.
*
+ * A 'recheck method' must also be provided that can check an
+ * arbitrary tuple of the relation against any qual conditions
+ * that are implemented internal to the access method.
+ *
* Conditions:
* -- the "cursor" maintained by the AMI is positioned at the tuple
* returned previously.
@@ -46,7 +106,8 @@ static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, Tuple
*/
TupleTableSlot *
ExecScan(ScanState *node,
- ExecScanAccessMtd accessMtd) /* function returning a tuple */
+ ExecScanAccessMtd accessMtd, /* function returning a tuple */
+ ExecScanRecheckMtd recheckMtd)
{
ExprContext *econtext;
List *qual;
@@ -65,7 +126,7 @@ ExecScan(ScanState *node,
* all the overhead and return the raw scan tuple.
*/
if (!qual && !projInfo)
- return (*accessMtd) (node);
+ return ExecScanFetch(node, accessMtd, recheckMtd);
/*
* Check to see if we're still projecting out tuples from a previous scan
@@ -91,7 +152,7 @@ ExecScan(ScanState *node,
ResetExprContext(econtext);
/*
- * get a tuple from the access method loop until we obtain a tuple which
+ * get a tuple from the access method. Loop until we obtain a tuple that
* passes the qualification.
*/
for (;;)
@@ -100,7 +161,7 @@ ExecScan(ScanState *node,
CHECK_FOR_INTERRUPTS();
- slot = (*accessMtd) (node);
+ slot = ExecScanFetch(node, accessMtd, recheckMtd);
/*
* if the slot returned by the accessMtd contains NULL, then it means
@@ -249,3 +310,28 @@ tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc
return true;
}
+
+/*
+ * ExecScanReScan
+ *
+ * This must be called within the ReScan function of any plan node type
+ * that uses ExecScan().
+ */
+void
+ExecScanReScan(ScanState *node)
+{
+ EState *estate = node->ps.state;
+
+ /* Stop projecting any tuples from SRFs in the targetlist */
+ node->ps.ps_TupFromTlist = false;
+
+ /* Rescan EvalPlanQual tuple if we're inside an EvalPlanQual recheck */
+ if (estate->es_epqScanDone != NULL)
+ {
+ Index scanrelid = ((Scan *) node->ps.plan)->scanrelid;
+
+ Assert(scanrelid > 0);
+
+ estate->es_epqScanDone[scanrelid - 1] = false;
+ }
+}
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 4afce5b9526..d3352f1f5d4 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.164 2009/10/12 18:10:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.165 2009/10/26 02:26:29 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -105,6 +105,7 @@ CreateExecutorState(void)
estate->es_snapshot = SnapshotNow;
estate->es_crosscheck_snapshot = InvalidSnapshot; /* no crosscheck */
estate->es_range_table = NIL;
+ estate->es_plannedstmt = NULL;
estate->es_junkFilter = NULL;
@@ -139,10 +140,9 @@ CreateExecutorState(void)
estate->es_per_tuple_exprcontext = NULL;
- estate->es_plannedstmt = NULL;
- estate->es_evalPlanQual = NULL;
- estate->es_evTupleNull = NULL;
- estate->es_evTuple = NULL;
+ estate->es_epqTuple = NULL;
+ estate->es_epqTupleSet = NULL;
+ estate->es_epqScanDone = NULL;
/*
* Return the executor state structure
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 6adc7d66ee9..98d9219e478 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.36 2009/09/27 21:10:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.37 2009/10/26 02:26:30 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,10 +60,8 @@ static void bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres);
static TupleTableSlot *
BitmapHeapNext(BitmapHeapScanState *node)
{
- EState *estate;
ExprContext *econtext;
HeapScanDesc scan;
- Index scanrelid;
TIDBitmap *tbm;
TBMIterator *tbmiterator;
TBMIterateResult *tbmres;
@@ -74,46 +72,15 @@ BitmapHeapNext(BitmapHeapScanState *node)
/*
* extract necessary information from index scan node
*/
- estate = node->ss.ps.state;
econtext = node->ss.ps.ps_ExprContext;
slot = node->ss.ss_ScanTupleSlot;
scan = node->ss.ss_currentScanDesc;
- scanrelid = ((BitmapHeapScan *) node->ss.ps.plan)->scan.scanrelid;
tbm = node->tbm;
tbmiterator = node->tbmiterator;
tbmres = node->tbmres;
prefetch_iterator = node->prefetch_iterator;
/*
- * Check if we are evaluating PlanQual for tuple of this relation.
- * Additional checking is not good, but no other way for now. We could
- * introduce new nodes for this case and handle IndexScan --> NewNode
- * switching in Init/ReScan plan...
- */
- if (estate->es_evTuple != NULL &&
- estate->es_evTuple[scanrelid - 1] != NULL)
- {
- if (estate->es_evTupleNull[scanrelid - 1])
- return ExecClearTuple(slot);
-
- ExecStoreTuple(estate->es_evTuple[scanrelid - 1],
- slot, InvalidBuffer, false);
-
- /* Does the tuple meet the original qual conditions? */
- econtext->ecxt_scantuple = slot;
-
- ResetExprContext(econtext);
-
- if (!ExecQual(node->bitmapqualorig, econtext, false))
- ExecClearTuple(slot); /* would not be returned by scan */
-
- /* Flag for the next call that no more tuples */
- estate->es_evTupleNull[scanrelid - 1] = true;
-
- return slot;
- }
-
- /*
* If we haven't yet performed the underlying index scan, do it, and begin
* the iteration over the bitmap.
*
@@ -419,6 +386,27 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
scan->rs_ntuples = ntup;
}
+/*
+ * BitmapHeapRecheck -- access method routine to recheck a tuple in EvalPlanQual
+ */
+static bool
+BitmapHeapRecheck(BitmapHeapScanState *node, TupleTableSlot *slot)
+{
+ ExprContext *econtext;
+
+ /*
+ * extract necessary information from index scan node
+ */
+ econtext = node->ss.ps.ps_ExprContext;
+
+ /* Does the tuple meet the original qual conditions? */
+ econtext->ecxt_scantuple = slot;
+
+ ResetExprContext(econtext);
+
+ return ExecQual(node->bitmapqualorig, econtext, false);
+}
+
/* ----------------------------------------------------------------
* ExecBitmapHeapScan(node)
* ----------------------------------------------------------------
@@ -426,10 +414,9 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
TupleTableSlot *
ExecBitmapHeapScan(BitmapHeapScanState *node)
{
- /*
- * use BitmapHeapNext as access method
- */
- return ExecScan(&node->ss, (ExecScanAccessMtd) BitmapHeapNext);
+ return ExecScan(&node->ss,
+ (ExecScanAccessMtd) BitmapHeapNext,
+ (ExecScanRecheckMtd) BitmapHeapRecheck);
}
/* ----------------------------------------------------------------
@@ -439,14 +426,6 @@ ExecBitmapHeapScan(BitmapHeapScanState *node)
void
ExecBitmapHeapReScan(BitmapHeapScanState *node, ExprContext *exprCtxt)
{
- EState *estate;
- Index scanrelid;
-
- estate = node->ss.ps.state;
- scanrelid = ((BitmapHeapScan *) node->ss.ps.plan)->scan.scanrelid;
-
- node->ss.ps.ps_TupFromTlist = false;
-
/*
* If we are being passed an outer tuple, link it into the "regular"
* per-tuple econtext for possible qual eval.
@@ -459,13 +438,6 @@ ExecBitmapHeapReScan(BitmapHeapScanState *node, ExprContext *exprCtxt)
stdecontext->ecxt_outertuple = exprCtxt->ecxt_outertuple;
}
- /* If this is re-scanning of PlanQual ... */
- if (estate->es_evTuple != NULL &&
- estate->es_evTuple[scanrelid - 1] != NULL)
- {
- estate->es_evTupleNull[scanrelid - 1] = false;
- }
-
/* rescan to release any page pin */
heap_rescan(node->ss.ss_currentScanDesc, NULL);
@@ -480,6 +452,8 @@ ExecBitmapHeapReScan(BitmapHeapScanState *node, ExprContext *exprCtxt)
node->tbmres = NULL;
node->prefetch_iterator = NULL;
+ ExecScanReScan(&node->ss);
+
/*
* Always rescan the input immediately, to ensure we can pass down any
* outer tuple that might be used in index quals.
diff --git a/src/backend/executor/nodeCtescan.c b/src/backend/executor/nodeCtescan.c
index 725840fef9c..7d5be7ffaba 100644
--- a/src/backend/executor/nodeCtescan.c
+++ b/src/backend/executor/nodeCtescan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeCtescan.c,v 1.6 2009/09/27 21:10:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeCtescan.c,v 1.7 2009/10/26 02:26:30 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -131,21 +131,30 @@ CteScanNext(CteScanState *node)
return ExecClearTuple(slot);
}
+/*
+ * CteScanRecheck -- access method routine to recheck a tuple in EvalPlanQual
+ */
+static bool
+CteScanRecheck(CteScanState *node, TupleTableSlot *slot)
+{
+ /* nothing to check */
+ return true;
+}
+
/* ----------------------------------------------------------------
* ExecCteScan(node)
*
* Scans the CTE sequentially and returns the next qualifying tuple.
- * It calls the ExecScan() routine and passes it the access method
- * which retrieves tuples sequentially.
+ * We call the ExecScan() routine and pass it the appropriate
+ * access method functions.
* ----------------------------------------------------------------
*/
TupleTableSlot *
ExecCteScan(CteScanState *node)
{
- /*
- * use CteScanNext as access method
- */
- return ExecScan(&node->ss, (ExecScanAccessMtd) CteScanNext);
+ return ExecScan(&node->ss,
+ (ExecScanAccessMtd) CteScanNext,
+ (ExecScanRecheckMtd) CteScanRecheck);
}
@@ -300,7 +309,8 @@ ExecCteScanReScan(CteScanState *node, ExprContext *exprCtxt)
Tuplestorestate *tuplestorestate = node->leader->cte_table;
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
- node->ss.ps.ps_TupFromTlist = false;
+
+ ExecScanReScan(&node->ss);
if (node->leader == node)
{
diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c
index 5e81283a4c3..1bedb738890 100644
--- a/src/backend/executor/nodeFunctionscan.c
+++ b/src/backend/executor/nodeFunctionscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.53 2009/09/27 21:10:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.54 2009/10/26 02:26:30 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,23 +79,31 @@ FunctionNext(FunctionScanState *node)
return slot;
}
+/*
+ * FunctionRecheck -- access method routine to recheck a tuple in EvalPlanQual
+ */
+static bool
+FunctionRecheck(FunctionScanState *node, TupleTableSlot *slot)
+{
+ /* nothing to check */
+ return true;
+}
+
/* ----------------------------------------------------------------
* ExecFunctionScan(node)
*
* Scans the function sequentially and returns the next qualifying
* tuple.
- * It calls the ExecScan() routine and passes it the access method
- * which retrieves tuples sequentially.
- *
+ * We call the ExecScan() routine and pass it the appropriate
+ * access method functions.
+ * ----------------------------------------------------------------
*/
-
TupleTableSlot *
ExecFunctionScan(FunctionScanState *node)
{
- /*
- * use FunctionNext as access method
- */
- return ExecScan(&node->ss, (ExecScanAccessMtd) FunctionNext);
+ return ExecScan(&node->ss,
+ (ExecScanAccessMtd) FunctionNext,
+ (ExecScanRecheckMtd) FunctionRecheck);
}
/* ----------------------------------------------------------------
@@ -256,7 +264,8 @@ void
ExecFunctionReScan(FunctionScanState *node, ExprContext *exprCtxt)
{
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
- node->ss.ps.ps_TupFromTlist = false;
+
+ ExecScanReScan(&node->ss);
/*
* If we haven't materialized yet, just return.
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 0520b726cfa..b136825dc8f 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.135 2009/09/27 21:10:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.136 2009/10/26 02:26:31 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,7 +52,6 @@ IndexNext(IndexScanState *node)
ExprContext *econtext;
ScanDirection direction;
IndexScanDesc scandesc;
- Index scanrelid;
HeapTuple tuple;
TupleTableSlot *slot;
@@ -72,36 +71,6 @@ IndexNext(IndexScanState *node)
scandesc = node->iss_ScanDesc;
econtext = node->ss.ps.ps_ExprContext;
slot = node->ss.ss_ScanTupleSlot;
- scanrelid = ((IndexScan *) node->ss.ps.plan)->scan.scanrelid;
-
- /*
- * Check if we are evaluating PlanQual for tuple of this relation.
- * Additional checking is not good, but no other way for now. We could
- * introduce new nodes for this case and handle IndexScan --> NewNode
- * switching in Init/ReScan plan...
- */
- if (estate->es_evTuple != NULL &&
- estate->es_evTuple[scanrelid - 1] != NULL)
- {
- if (estate->es_evTupleNull[scanrelid - 1])
- return ExecClearTuple(slot);
-
- ExecStoreTuple(estate->es_evTuple[scanrelid - 1],
- slot, InvalidBuffer, false);
-
- /* Does the tuple meet the indexqual condition? */
- econtext->ecxt_scantuple = slot;
-
- ResetExprContext(econtext);
-
- if (!ExecQual(node->indexqualorig, econtext, false))
- ExecClearTuple(slot); /* would not be returned by scan */
-
- /* Flag for the next call that no more tuples */
- estate->es_evTupleNull[scanrelid - 1] = true;
-
- return slot;
- }
/*
* ok, now that we have what we need, fetch the next tuple.
@@ -140,6 +109,27 @@ IndexNext(IndexScanState *node)
return ExecClearTuple(slot);
}
+/*
+ * IndexRecheck -- access method routine to recheck a tuple in EvalPlanQual
+ */
+static bool
+IndexRecheck(IndexScanState *node, TupleTableSlot *slot)
+{
+ ExprContext *econtext;
+
+ /*
+ * extract necessary information from index scan node
+ */
+ econtext = node->ss.ps.ps_ExprContext;
+
+ /* Does the tuple meet the indexqual condition? */
+ econtext->ecxt_scantuple = slot;
+
+ ResetExprContext(econtext);
+
+ return ExecQual(node->indexqualorig, econtext, false);
+}
+
/* ----------------------------------------------------------------
* ExecIndexScan(node)
* ----------------------------------------------------------------
@@ -153,10 +143,9 @@ ExecIndexScan(IndexScanState *node)
if (node->iss_NumRuntimeKeys != 0 && !node->iss_RuntimeKeysReady)
ExecReScan((PlanState *) node, NULL);
- /*
- * use IndexNext as access method
- */
- return ExecScan(&node->ss, (ExecScanAccessMtd) IndexNext);
+ return ExecScan(&node->ss,
+ (ExecScanAccessMtd) IndexNext,
+ (ExecScanRecheckMtd) IndexRecheck);
}
/* ----------------------------------------------------------------
@@ -172,15 +161,9 @@ ExecIndexScan(IndexScanState *node)
void
ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
{
- EState *estate;
ExprContext *econtext;
- Index scanrelid;
- estate = node->ss.ps.state;
econtext = node->iss_RuntimeContext; /* context for runtime keys */
- scanrelid = ((IndexScan *) node->ss.ps.plan)->scan.scanrelid;
-
- node->ss.ps.ps_TupFromTlist = false;
if (econtext)
{
@@ -216,16 +199,10 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
node->iss_NumRuntimeKeys);
node->iss_RuntimeKeysReady = true;
- /* If this is re-scanning of PlanQual ... */
- if (estate->es_evTuple != NULL &&
- estate->es_evTuple[scanrelid - 1] != NULL)
- {
- estate->es_evTupleNull[scanrelid - 1] = false;
- return;
- }
-
/* reset index scan */
index_rescan(node->iss_ScanDesc, node->iss_ScanKeys);
+
+ ExecScanReScan(&node->ss);
}
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index 80f7e3cdafb..f38d34a0475 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeLockRows.c,v 1.1 2009/10/12 18:10:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeLockRows.c,v 1.2 2009/10/26 02:26:31 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,6 +25,7 @@
#include "executor/executor.h"
#include "executor/nodeLockRows.h"
#include "storage/bufmgr.h"
+#include "utils/tqual.h"
/* ----------------------------------------------------------------
@@ -37,7 +38,7 @@ ExecLockRows(LockRowsState *node)
TupleTableSlot *slot;
EState *estate;
PlanState *outerPlan;
- bool epq_pushed;
+ bool epq_started;
ListCell *lc;
/*
@@ -47,30 +48,19 @@ ExecLockRows(LockRowsState *node)
outerPlan = outerPlanState(node);
/*
- * Get next tuple from subplan, if any; but if we are evaluating
- * an EvalPlanQual substitution, first finish that.
+ * Get next tuple from subplan, if any.
*/
lnext:
- if (node->lr_useEvalPlan)
- {
- slot = EvalPlanQualNext(estate);
- if (TupIsNull(slot))
- {
- EvalPlanQualPop(estate, outerPlan);
- node->lr_useEvalPlan = false;
- slot = ExecProcNode(outerPlan);
- }
- }
- else
- slot = ExecProcNode(outerPlan);
+ slot = ExecProcNode(outerPlan);
if (TupIsNull(slot))
return NULL;
/*
- * Attempt to lock the source tuple(s).
+ * Attempt to lock the source tuple(s). (Note we only have locking
+ * rowmarks in lr_rowMarks.)
*/
- epq_pushed = false;
+ epq_started = false;
foreach(lc, node->lr_rowMarks)
{
ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
@@ -84,6 +74,10 @@ lnext:
HTSU_Result test;
HeapTuple copyTuple;
+ /* clear any leftover test tuple for this rel */
+ if (node->lr_epqstate.estate != NULL)
+ EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti, NULL);
+
/* if child rel, must check whether it produced this row */
if (erm->rti != erm->prti)
{
@@ -115,7 +109,7 @@ lnext:
tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
/* okay, try to lock the tuple */
- if (erm->forUpdate)
+ if (erm->markType == ROW_MARK_EXCLUSIVE)
lockmode = LockTupleExclusive;
else
lockmode = LockTupleShared;
@@ -129,8 +123,6 @@ lnext:
{
case HeapTupleSelfUpdated:
/* treat it as deleted; do not process */
- if (epq_pushed)
- EvalPlanQualPop(estate, outerPlan);
goto lnext;
case HeapTupleMayBeUpdated:
@@ -146,35 +138,33 @@ lnext:
&tuple.t_self))
{
/* Tuple was deleted, so don't return it */
- if (epq_pushed)
- EvalPlanQualPop(estate, outerPlan);
goto lnext;
}
- /* updated, so look at updated version */
- copyTuple = EvalPlanQualFetch(estate, erm->rti,
+ /* updated, so fetch and lock the updated version */
+ copyTuple = EvalPlanQualFetch(estate, erm->relation, lockmode,
&update_ctid, update_xmax);
if (copyTuple == NULL)
{
/* Tuple was deleted, so don't return it */
- if (epq_pushed)
- EvalPlanQualPop(estate, outerPlan);
goto lnext;
}
+ /* remember the actually locked tuple's TID */
+ tuple.t_self = copyTuple->t_self;
/*
- * Need to run a recheck subquery.
- * Find or create a PQ stack entry.
+ * Need to run a recheck subquery. Initialize EPQ state
+ * if we didn't do so already.
*/
- if (!epq_pushed)
+ if (!epq_started)
{
- EvalPlanQualPush(estate, erm->rti, outerPlan);
- epq_pushed = true;
+ EvalPlanQualBegin(&node->lr_epqstate, estate);
+ epq_started = true;
}
/* Store target tuple for relation's scan node */
- EvalPlanQualSetTuple(estate, erm->rti, copyTuple);
+ EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti, copyTuple);
/* Continue loop until we have all target tuples */
break;
@@ -188,11 +178,52 @@ lnext:
erm->curCtid = tuple.t_self;
}
- /* If we need to do EvalPlanQual testing, loop back to do that */
- if (epq_pushed)
+ /*
+ * If we need to do EvalPlanQual testing, do so.
+ */
+ if (epq_started)
{
- node->lr_useEvalPlan = true;
- goto lnext;
+ /*
+ * First, fetch a copy of any rows that were successfully locked
+ * without any update having occurred. (We do this in a separate
+ * pass so as to avoid overhead in the common case where there are
+ * no concurrent updates.)
+ */
+ foreach(lc, node->lr_rowMarks)
+ {
+ ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
+ HeapTupleData tuple;
+ Buffer buffer;
+
+ if (EvalPlanQualGetTuple(&node->lr_epqstate, erm->rti) != NULL)
+ continue; /* it was updated and fetched above */
+
+ /* okay, fetch the tuple */
+ tuple.t_self = erm->curCtid;
+ if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
+ false, NULL))
+ elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+
+ /* successful, copy and store tuple */
+ EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti,
+ heap_copytuple(&tuple));
+ ReleaseBuffer(buffer);
+ }
+ /*
+ * Now fetch any non-locked source rows --- the EPQ logic knows
+ * how to do that.
+ */
+ EvalPlanQualSetSlot(&node->lr_epqstate, slot);
+ EvalPlanQualFetchRowMarks(&node->lr_epqstate);
+ /*
+ * And finally we can re-evaluate the tuple.
+ */
+ slot = EvalPlanQualNext(&node->lr_epqstate);
+ if (TupIsNull(slot))
+ {
+ /* Updated tuple fails qual, so ignore it and go on */
+ goto lnext;
+ }
}
/* Got all locks, so return the current tuple */
@@ -210,8 +241,7 @@ LockRowsState *
ExecInitLockRows(LockRows *node, EState *estate, int eflags)
{
LockRowsState *lrstate;
- Plan *outerPlan;
- JunkFilter *j;
+ Plan *outerPlan = outerPlan(node);
ListCell *lc;
/* check for unsupported flags */
@@ -223,7 +253,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
lrstate = makeNode(LockRowsState);
lrstate->ps.plan = (Plan *) node;
lrstate->ps.state = estate;
- lrstate->lr_useEvalPlan = false;
+ EvalPlanQualInit(&lrstate->lr_epqstate, estate, outerPlan, node->epqParam);
/*
* Miscellaneous initialization
@@ -239,7 +269,6 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
/*
* then initialize outer plan
*/
- outerPlan = outerPlan(node);
outerPlanState(lrstate) = ExecInitNode(outerPlan, estate, eflags);
/*
@@ -250,28 +279,18 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
lrstate->ps.ps_ProjInfo = NULL;
/*
- * Initialize a junkfilter that we'll use to extract the ctid junk
- * attributes. (We won't actually apply the filter to remove the
- * junk, we just pass the rows on as-is. This is because the
- * junkfilter isn't smart enough to not remove junk attrs that
- * might be needed further up.)
- */
- j = ExecInitJunkFilter(outerPlan->targetlist, false,
- ExecInitExtraTupleSlot(estate));
- lrstate->lr_junkFilter = j;
-
- /*
* Locate the ExecRowMark(s) that this node is responsible for.
* (InitPlan should already have built the global list of ExecRowMarks.)
*/
lrstate->lr_rowMarks = NIL;
foreach(lc, node->rowMarks)
{
- RowMarkClause *rc = (RowMarkClause *) lfirst(lc);
+ PlanRowMark *rc = (PlanRowMark *) lfirst(lc);
ExecRowMark *erm = NULL;
- char resname[32];
ListCell *lce;
+ Assert(IsA(rc, PlanRowMark));
+
/* ignore "parent" rowmarks; they are irrelevant at runtime */
if (rc->isParent)
continue;
@@ -279,36 +298,24 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
foreach(lce, estate->es_rowMarks)
{
erm = (ExecRowMark *) lfirst(lce);
- if (erm->rti == rc->rti &&
- erm->prti == rc->prti &&
- erm->rowmarkId == rc->rowmarkId)
+ if (erm->rti == rc->rti)
break;
erm = NULL;
}
if (erm == NULL)
- elog(ERROR, "failed to find ExecRowMark for RowMarkClause");
- if (AttributeNumberIsValid(erm->ctidAttNo))
- elog(ERROR, "ExecRowMark is already claimed");
-
- /* Locate the junk attribute columns in the subplan output */
-
- /* always need the ctid */
- snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
- erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
- if (!AttributeNumberIsValid(erm->ctidAttNo))
- elog(ERROR, "could not find junk \"%s\" column",
- resname);
- /* if child relation, need tableoid too */
- if (erm->rti != erm->prti)
- {
- snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
- erm->toidAttNo = ExecFindJunkAttribute(j, resname);
- if (!AttributeNumberIsValid(erm->toidAttNo))
- elog(ERROR, "could not find junk \"%s\" column",
- resname);
- }
-
- lrstate->lr_rowMarks = lappend(lrstate->lr_rowMarks, erm);
+ elog(ERROR, "failed to find ExecRowMark for PlanRowMark %u",
+ rc->rti);
+
+ /*
+ * Only locking rowmarks go into our own list. Non-locking marks
+ * are passed off to the EvalPlanQual machinery. This is because
+ * we don't want to bother fetching non-locked rows unless we
+ * actually have to do an EPQ recheck.
+ */
+ if (RowMarkRequiresRowShareLock(erm->markType))
+ lrstate->lr_rowMarks = lappend(lrstate->lr_rowMarks, erm);
+ else
+ EvalPlanQualAddRowMark(&lrstate->lr_epqstate, erm);
}
return lrstate;
@@ -324,6 +331,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
void
ExecEndLockRows(LockRowsState *node)
{
+ EvalPlanQualEnd(&node->lr_epqstate);
ExecEndNode(outerPlanState(node));
}
@@ -331,8 +339,6 @@ ExecEndLockRows(LockRowsState *node)
void
ExecReScanLockRows(LockRowsState *node, ExprContext *exprCtxt)
{
- node->lr_useEvalPlan = false;
-
/*
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index a9fd8c4974f..3f1f9c093ee 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeModifyTable.c,v 1.1 2009/10/10 01:43:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeModifyTable.c,v 1.2 2009/10/26 02:26:31 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -273,7 +273,7 @@ ExecInsert(TupleTableSlot *slot,
static TupleTableSlot *
ExecDelete(ItemPointer tupleid,
TupleTableSlot *planSlot,
- PlanState *subplanstate,
+ EPQState *epqstate,
EState *estate)
{
ResultRelInfo *resultRelInfo;
@@ -294,7 +294,7 @@ ExecDelete(ItemPointer tupleid,
{
bool dodelete;
- dodelete = ExecBRDeleteTriggers(estate, subplanstate, resultRelInfo,
+ dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
tupleid);
if (!dodelete) /* "do nothing" */
@@ -329,13 +329,14 @@ ldelete:;
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
- else if (!ItemPointerEquals(tupleid, &update_ctid))
+ if (!ItemPointerEquals(tupleid, &update_ctid))
{
TupleTableSlot *epqslot;
epqslot = EvalPlanQual(estate,
+ epqstate,
+ resultRelationDesc,
resultRelInfo->ri_RangeTableIndex,
- subplanstate,
&update_ctid,
update_xmax);
if (!TupIsNull(epqslot))
@@ -416,7 +417,7 @@ static TupleTableSlot *
ExecUpdate(ItemPointer tupleid,
TupleTableSlot *slot,
TupleTableSlot *planSlot,
- PlanState *subplanstate,
+ EPQState *epqstate,
EState *estate)
{
HeapTuple tuple;
@@ -451,7 +452,7 @@ ExecUpdate(ItemPointer tupleid,
{
HeapTuple newtuple;
- newtuple = ExecBRUpdateTriggers(estate, subplanstate, resultRelInfo,
+ newtuple = ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
tupleid, tuple);
if (newtuple == NULL) /* "do nothing" */
@@ -515,13 +516,14 @@ lreplace:;
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
- else if (!ItemPointerEquals(tupleid, &update_ctid))
+ if (!ItemPointerEquals(tupleid, &update_ctid))
{
TupleTableSlot *epqslot;
epqslot = EvalPlanQual(estate,
+ epqstate,
+ resultRelationDesc,
resultRelInfo->ri_RangeTableIndex,
- subplanstate,
&update_ctid,
update_xmax);
if (!TupIsNull(epqslot))
@@ -685,12 +687,14 @@ ExecModifyTable(ModifyTableState *node)
estate->es_result_relation_info++;
subplanstate = node->mt_plans[node->mt_whichplan];
junkfilter = estate->es_result_relation_info->ri_junkFilter;
+ EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan);
continue;
}
else
break;
}
+ EvalPlanQualSetSlot(&node->mt_epqstate, planSlot);
slot = planSlot;
if (junkfilter != NULL)
@@ -728,11 +732,11 @@ ExecModifyTable(ModifyTableState *node)
break;
case CMD_UPDATE:
slot = ExecUpdate(tupleid, slot, planSlot,
- subplanstate, estate);
+ &node->mt_epqstate, estate);
break;
case CMD_DELETE:
slot = ExecDelete(tupleid, planSlot,
- subplanstate, estate);
+ &node->mt_epqstate, estate);
break;
default:
elog(ERROR, "unknown operation");
@@ -785,7 +789,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* a subplan tree to EvalPlanQual, instead. Use a runtime test not just
* Assert because this condition is easy to miss in testing ...
*/
- if (estate->es_evTuple != NULL)
+ if (estate->es_epqTuple != NULL)
elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
/*
@@ -799,6 +803,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans);
mtstate->mt_nplans = nplans;
mtstate->operation = operation;
+ /* set up epqstate with dummy subplan pointer for the moment */
+ EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, node->epqParam);
mtstate->fireBSTriggers = true;
/* For the moment, assume our targets are exactly the global result rels */
@@ -823,6 +829,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/* select first subplan */
mtstate->mt_whichplan = 0;
subplan = (Plan *) linitial(node->plans);
+ EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan);
/*
* Initialize RETURNING projections if needed.
@@ -879,6 +886,38 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
}
/*
+ * If we have any secondary relations in an UPDATE or DELETE, they need
+ * to be treated like non-locked relations in SELECT FOR UPDATE, ie,
+ * the EvalPlanQual mechanism needs to be told about them. Locate
+ * the relevant ExecRowMarks.
+ */
+ foreach(l, node->rowMarks)
+ {
+ PlanRowMark *rc = (PlanRowMark *) lfirst(l);
+ ExecRowMark *erm = NULL;
+ ListCell *lce;
+
+ Assert(IsA(rc, PlanRowMark));
+
+ /* ignore "parent" rowmarks; they are irrelevant at runtime */
+ if (rc->isParent)
+ continue;
+
+ foreach(lce, estate->es_rowMarks)
+ {
+ erm = (ExecRowMark *) lfirst(lce);
+ if (erm->rti == rc->rti)
+ break;
+ erm = NULL;
+ }
+ if (erm == NULL)
+ elog(ERROR, "failed to find ExecRowMark for PlanRowMark %u",
+ rc->rti);
+
+ EvalPlanQualAddRowMark(&mtstate->mt_epqstate, erm);
+ }
+
+ /*
* Initialize the junk filter(s) if needed. INSERT queries need a filter
* if there are any junk attrs in the tlist. UPDATE and DELETE
* always need a filter, since there's always a junk 'ctid' attribute
@@ -988,6 +1027,11 @@ ExecEndModifyTable(ModifyTableState *node)
ExecClearTuple(node->ps.ps_ResultTupleSlot);
/*
+ * Terminate EPQ execution if active
+ */
+ EvalPlanQualEnd(&node->mt_epqstate);
+
+ /*
* shut down subplans
*/
for (i=0; i<node->mt_nplans; i++)
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index f20cc058498..22d3ec76487 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.67 2009/09/27 21:10:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.68 2009/10/26 02:26:31 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,6 +36,7 @@ static TupleTableSlot *SeqNext(SeqScanState *node);
* Scan Support
* ----------------------------------------------------------------
*/
+
/* ----------------------------------------------------------------
* SeqNext
*
@@ -47,7 +48,6 @@ SeqNext(SeqScanState *node)
{
HeapTuple tuple;
HeapScanDesc scandesc;
- Index scanrelid;
EState *estate;
ScanDirection direction;
TupleTableSlot *slot;
@@ -55,40 +55,13 @@ SeqNext(SeqScanState *node)
/*
* get information from the estate and scan state
*/
- estate = node->ps.state;
scandesc = node->ss_currentScanDesc;
- scanrelid = ((SeqScan *) node->ps.plan)->scanrelid;
+ estate = node->ps.state;
direction = estate->es_direction;
slot = node->ss_ScanTupleSlot;
/*
- * Check if we are evaluating PlanQual for tuple of this relation.
- * Additional checking is not good, but no other way for now. We could
- * introduce new nodes for this case and handle SeqScan --> NewNode
- * switching in Init/ReScan plan...
- */
- if (estate->es_evTuple != NULL &&
- estate->es_evTuple[scanrelid - 1] != NULL)
- {
- if (estate->es_evTupleNull[scanrelid - 1])
- return ExecClearTuple(slot);
-
- ExecStoreTuple(estate->es_evTuple[scanrelid - 1],
- slot, InvalidBuffer, false);
-
- /*
- * Note that unlike IndexScan, SeqScan never use keys in
- * heap_beginscan (and this is very bad) - so, here we do not check
- * are keys ok or not.
- */
-
- /* Flag for the next call that no more tuples */
- estate->es_evTupleNull[scanrelid - 1] = true;
- return slot;
- }
-
- /*
- * get the next tuple from the access methods
+ * get the next tuple from the table
*/
tuple = heap_getnext(scandesc, direction);
@@ -112,23 +85,35 @@ SeqNext(SeqScanState *node)
return slot;
}
+/*
+ * SeqRecheck -- access method routine to recheck a tuple in EvalPlanQual
+ */
+static bool
+SeqRecheck(SeqScanState *node, TupleTableSlot *slot)
+{
+ /*
+ * Note that unlike IndexScan, SeqScan never use keys in
+ * heap_beginscan (and this is very bad) - so, here we do not check
+ * are keys ok or not.
+ */
+ return true;
+}
+
/* ----------------------------------------------------------------
* ExecSeqScan(node)
*
* Scans the relation sequentially and returns the next qualifying
* tuple.
- * It calls the ExecScan() routine and passes it the access method
- * which retrieve tuples sequentially.
- *
+ * We call the ExecScan() routine and pass it the appropriate
+ * access method functions.
+ * ----------------------------------------------------------------
*/
-
TupleTableSlot *
ExecSeqScan(SeqScanState *node)
{
- /*
- * use SeqNext as access method
- */
- return ExecScan((ScanState *) node, (ExecScanAccessMtd) SeqNext);
+ return ExecScan((ScanState *) node,
+ (ExecScanAccessMtd) SeqNext,
+ (ExecScanRecheckMtd) SeqRecheck);
}
/* ----------------------------------------------------------------
@@ -279,27 +264,14 @@ ExecEndSeqScan(SeqScanState *node)
void
ExecSeqReScan(SeqScanState *node, ExprContext *exprCtxt)
{
- EState *estate;
- Index scanrelid;
HeapScanDesc scan;
- estate = node->ps.state;
- scanrelid = ((SeqScan *) node->ps.plan)->scanrelid;
-
- node->ps.ps_TupFromTlist = false;
-
- /* If this is re-scanning of PlanQual ... */
- if (estate->es_evTuple != NULL &&
- estate->es_evTuple[scanrelid - 1] != NULL)
- {
- estate->es_evTupleNull[scanrelid - 1] = false;
- return;
- }
-
scan = node->ss_currentScanDesc;
heap_rescan(scan, /* scan desc */
NULL); /* new scan keys */
+
+ ExecScanReScan((ScanState *) node);
}
/* ----------------------------------------------------------------
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index 15929dedffe..402c24e6285 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.42 2009/10/12 18:10:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.43 2009/10/26 02:26:31 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,41 +48,43 @@ SubqueryNext(SubqueryScanState *node)
TupleTableSlot *slot;
/*
- * We need not support EvalPlanQual here, since we are not scanning a real
- * relation.
- */
-
- /*
* Get the next tuple from the sub-query.
*/
slot = ExecProcNode(node->subplan);
/*
- * We just overwrite our ScanTupleSlot with the subplan's result slot,
- * rather than expending the cycles for ExecCopySlot().
+ * We just return the subplan's result slot, rather than expending
+ * extra cycles for ExecCopySlot(). (Our own ScanTupleSlot is used
+ * only for EvalPlanQual rechecks.)
*/
- node->ss.ss_ScanTupleSlot = slot;
-
return slot;
}
+/*
+ * SubqueryRecheck -- access method routine to recheck a tuple in EvalPlanQual
+ */
+static bool
+SubqueryRecheck(SubqueryScanState *node, TupleTableSlot *slot)
+{
+ /* nothing to check */
+ return true;
+}
+
/* ----------------------------------------------------------------
* ExecSubqueryScan(node)
*
* Scans the subquery sequentially and returns the next qualifying
* tuple.
- * It calls the ExecScan() routine and passes it the access method
- * which retrieve tuples sequentially.
- *
+ * We call the ExecScan() routine and pass it the appropriate
+ * access method functions.
+ * ----------------------------------------------------------------
*/
-
TupleTableSlot *
ExecSubqueryScan(SubqueryScanState *node)
{
- /*
- * use SubqueryNext as access method
- */
- return ExecScan(&node->ss, (ExecScanAccessMtd) SubqueryNext);
+ return ExecScan(&node->ss,
+ (ExecScanAccessMtd) SubqueryNext,
+ (ExecScanRecheckMtd) SubqueryRecheck);
}
/* ----------------------------------------------------------------
@@ -176,7 +178,7 @@ ExecEndSubqueryScan(SubqueryScanState *node)
* clean out the upper tuple table
*/
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
- node->ss.ss_ScanTupleSlot = NULL; /* not ours to clear */
+ ExecClearTuple(node->ss.ss_ScanTupleSlot);
/*
* close down subquery
@@ -193,9 +195,7 @@ ExecEndSubqueryScan(SubqueryScanState *node)
void
ExecSubqueryReScan(SubqueryScanState *node, ExprContext *exprCtxt)
{
- EState *estate;
-
- estate = node->ss.ps.state;
+ ExecScanReScan(&node->ss);
/*
* ExecReScan doesn't know about my subplan, so I have to do
@@ -211,7 +211,4 @@ ExecSubqueryReScan(SubqueryScanState *node, ExprContext *exprCtxt)
*/
if (node->subplan->chgParam == NULL)
ExecReScan(node->subplan, NULL);
-
- node->ss.ss_ScanTupleSlot = NULL;
- node->ss.ps.ps_TupFromTlist = false;
}
diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c
index 1fc74695eec..7e4a5c7a077 100644
--- a/src/backend/executor/nodeTidscan.c
+++ b/src/backend/executor/nodeTidscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.63 2009/09/27 21:10:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.64 2009/10/26 02:26:31 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -258,7 +258,6 @@ TidNext(TidScanState *node)
Relation heapRelation;
HeapTuple tuple;
TupleTableSlot *slot;
- Index scanrelid;
Buffer buffer = InvalidBuffer;
ItemPointerData *tidList;
int numTids;
@@ -272,33 +271,6 @@ TidNext(TidScanState *node)
snapshot = estate->es_snapshot;
heapRelation = node->ss.ss_currentRelation;
slot = node->ss.ss_ScanTupleSlot;
- scanrelid = ((TidScan *) node->ss.ps.plan)->scan.scanrelid;
-
- /*
- * Check if we are evaluating PlanQual for tuple of this relation.
- * Additional checking is not good, but no other way for now. We could
- * introduce new nodes for this case and handle TidScan --> NewNode
- * switching in Init/ReScan plan...
- */
- if (estate->es_evTuple != NULL &&
- estate->es_evTuple[scanrelid - 1] != NULL)
- {
- if (estate->es_evTupleNull[scanrelid - 1])
- return ExecClearTuple(slot);
-
- /*
- * XXX shouldn't we check here to make sure tuple matches TID list? In
- * runtime-key case this is not certain, is it? However, in the WHERE
- * CURRENT OF case it might not match anyway ...
- */
-
- ExecStoreTuple(estate->es_evTuple[scanrelid - 1],
- slot, InvalidBuffer, false);
-
- /* Flag for the next call that no more tuples */
- estate->es_evTupleNull[scanrelid - 1] = true;
- return slot;
- }
/*
* First time through, compute the list of TIDs to be visited
@@ -384,13 +356,28 @@ TidNext(TidScanState *node)
return ExecClearTuple(slot);
}
+/*
+ * TidRecheck -- access method routine to recheck a tuple in EvalPlanQual
+ */
+static bool
+TidRecheck(TidScanState *node, TupleTableSlot *slot)
+{
+ /*
+ * XXX shouldn't we check here to make sure tuple matches TID list? In
+ * runtime-key case this is not certain, is it? However, in the WHERE
+ * CURRENT OF case it might not match anyway ...
+ */
+ return true;
+}
+
+
/* ----------------------------------------------------------------
* ExecTidScan(node)
*
* Scans the relation using tids and returns
* the next qualifying tuple in the direction specified.
- * It calls ExecScan() and passes it the access methods which returns
- * the next tuple using the tids.
+ * We call the ExecScan() routine and pass it the appropriate
+ * access method functions.
*
* Conditions:
* -- the "cursor" maintained by the AMI is positioned at the tuple
@@ -405,10 +392,9 @@ TidNext(TidScanState *node)
TupleTableSlot *
ExecTidScan(TidScanState *node)
{
- /*
- * use TidNext as access method
- */
- return ExecScan(&node->ss, (ExecScanAccessMtd) TidNext);
+ return ExecScan(&node->ss,
+ (ExecScanAccessMtd) TidNext,
+ (ExecScanRecheckMtd) TidRecheck);
}
/* ----------------------------------------------------------------
@@ -418,32 +404,18 @@ ExecTidScan(TidScanState *node)
void
ExecTidReScan(TidScanState *node, ExprContext *exprCtxt)
{
- EState *estate;
- Index scanrelid;
-
- estate = node->ss.ps.state;
- scanrelid = ((TidScan *) node->ss.ps.plan)->scan.scanrelid;
-
- node->ss.ps.ps_TupFromTlist = false;
-
/* If we are being passed an outer tuple, save it for runtime key calc */
if (exprCtxt != NULL)
node->ss.ps.ps_ExprContext->ecxt_outertuple =
exprCtxt->ecxt_outertuple;
- /* If this is re-scanning of PlanQual ... */
- if (estate->es_evTuple != NULL &&
- estate->es_evTuple[scanrelid - 1] != NULL)
- {
- estate->es_evTupleNull[scanrelid - 1] = false;
- return;
- }
-
if (node->tss_TidList)
pfree(node->tss_TidList);
node->tss_TidList = NULL;
node->tss_NumTids = 0;
node->tss_TidPtr = -1;
+
+ ExecScanReScan(&node->ss);
}
/* ----------------------------------------------------------------
diff --git a/src/backend/executor/nodeValuesscan.c b/src/backend/executor/nodeValuesscan.c
index 90b5594f4ed..55a0d53265b 100644
--- a/src/backend/executor/nodeValuesscan.c
+++ b/src/backend/executor/nodeValuesscan.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeValuesscan.c,v 1.10 2009/09/27 21:10:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeValuesscan.c,v 1.11 2009/10/26 02:26:31 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -154,23 +154,31 @@ ValuesNext(ValuesScanState *node)
return slot;
}
+/*
+ * ValuesRecheck -- access method routine to recheck a tuple in EvalPlanQual
+ */
+static bool
+ValuesRecheck(ValuesScanState *node, TupleTableSlot *slot)
+{
+ /* nothing to check */
+ return true;
+}
/* ----------------------------------------------------------------
* ExecValuesScan(node)
*
* Scans the values lists sequentially and returns the next qualifying
* tuple.
- * It calls the ExecScan() routine and passes it the access method
- * which retrieves tuples sequentially.
+ * We call the ExecScan() routine and pass it the appropriate
+ * access method functions.
* ----------------------------------------------------------------
*/
TupleTableSlot *
ExecValuesScan(ValuesScanState *node)
{
- /*
- * use ValuesNext as access method
- */
- return ExecScan(&node->ss, (ExecScanAccessMtd) ValuesNext);
+ return ExecScan(&node->ss,
+ (ExecScanAccessMtd) ValuesNext,
+ (ExecScanRecheckMtd) ValuesRecheck);
}
/* ----------------------------------------------------------------
@@ -320,7 +328,8 @@ void
ExecValuesReScan(ValuesScanState *node, ExprContext *exprCtxt)
{
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
- node->ss.ps.ps_TupFromTlist = false;
+
+ ExecScanReScan(&node->ss);
node->curr_idx = -1;
}
diff --git a/src/backend/executor/nodeWorktablescan.c b/src/backend/executor/nodeWorktablescan.c
index 545747b2307..3c18a3eccc3 100644
--- a/src/backend/executor/nodeWorktablescan.c
+++ b/src/backend/executor/nodeWorktablescan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeWorktablescan.c,v 1.8 2009/09/27 21:10:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeWorktablescan.c,v 1.9 2009/10/26 02:26:31 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,12 +61,22 @@ WorkTableScanNext(WorkTableScanState *node)
return slot;
}
+/*
+ * WorkTableScanRecheck -- access method routine to recheck a tuple in EvalPlanQual
+ */
+static bool
+WorkTableScanRecheck(WorkTableScanState *node, TupleTableSlot *slot)
+{
+ /* nothing to check */
+ return true;
+}
+
/* ----------------------------------------------------------------
* ExecWorkTableScan(node)
*
* Scans the worktable sequentially and returns the next qualifying tuple.
- * It calls the ExecScan() routine and passes it the access method
- * which retrieves tuples sequentially.
+ * We call the ExecScan() routine and pass it the appropriate
+ * access method functions.
* ----------------------------------------------------------------
*/
TupleTableSlot *
@@ -106,10 +116,9 @@ ExecWorkTableScan(WorkTableScanState *node)
ExecAssignScanProjectionInfo(&node->ss);
}
- /*
- * use WorkTableScanNext as access method
- */
- return ExecScan(&node->ss, (ExecScanAccessMtd) WorkTableScanNext);
+ return ExecScan(&node->ss,
+ (ExecScanAccessMtd) WorkTableScanNext,
+ (ExecScanRecheckMtd) WorkTableScanRecheck);
}
@@ -203,7 +212,8 @@ void
ExecWorkTableScanReScan(WorkTableScanState *node, ExprContext *exprCtxt)
{
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
- node->ss.ps.ps_TupFromTlist = false;
+
+ ExecScanReScan(&node->ss);
/* No need (or way) to rescan if ExecWorkTableScan not called yet */
if (node->rustate)