diff options
Diffstat (limited to 'src/backend/executor/nodeModifyTable.c')
-rw-r--r-- | src/backend/executor/nodeModifyTable.c | 1005 |
1 files changed, 1005 insertions, 0 deletions
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c new file mode 100644 index 00000000000..a9fd8c4974f --- /dev/null +++ b/src/backend/executor/nodeModifyTable.c @@ -0,0 +1,1005 @@ +/*------------------------------------------------------------------------- + * + * nodeModifyTable.c + * routines to handle ModifyTable nodes. + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $PostgreSQL: pgsql/src/backend/executor/nodeModifyTable.c,v 1.1 2009/10/10 01:43:47 tgl Exp $ + * + *------------------------------------------------------------------------- + */ +/* INTERFACE ROUTINES + * ExecInitModifyTable - initialize the ModifyTable node + * ExecModifyTable - retrieve the next tuple from the node + * ExecEndModifyTable - shut down the ModifyTable node + * ExecReScanModifyTable - rescan the ModifyTable node + * + * NOTES + * Each ModifyTable node contains a list of one or more subplans, + * much like an Append node. There is one subplan per result relation. + * The key reason for this is that in an inherited UPDATE command, each + * result relation could have a different schema (more or different + * columns) requiring a different plan tree to produce it. In an + * inherited DELETE, all the subplans should produce the same output + * rowtype, but we might still find that different plans are appropriate + * for different child relations. + * + * If the query specifies RETURNING, then the ModifyTable returns a + * RETURNING tuple after completing each row insert, update, or delete. + * It must be called again to continue the operation. Without RETURNING, + * we just loop within the node until all the work is done, then + * return NULL. This avoids useless call/return overhead. + */ + +#include "postgres.h" + +#include "access/xact.h" +#include "commands/trigger.h" +#include "executor/executor.h" +#include "executor/nodeModifyTable.h" +#include "miscadmin.h" +#include "nodes/nodeFuncs.h" +#include "storage/bufmgr.h" +#include "utils/builtins.h" +#include "utils/memutils.h" +#include "utils/tqual.h" + + +/* + * Verify that the tuples to be produced by INSERT or UPDATE match the + * target relation's rowtype + * + * We do this to guard against stale plans. If plan invalidation is + * functioning properly then we should never get a failure here, but better + * safe than sorry. Note that this is called after we have obtained lock + * on the target rel, so the rowtype can't change underneath us. + * + * The plan output is represented by its targetlist, because that makes + * handling the dropped-column case easier. + */ +static void +ExecCheckPlanOutput(Relation resultRel, List *targetList) +{ + TupleDesc resultDesc = RelationGetDescr(resultRel); + int attno = 0; + ListCell *lc; + + foreach(lc, targetList) + { + TargetEntry *tle = (TargetEntry *) lfirst(lc); + Form_pg_attribute attr; + + if (tle->resjunk) + continue; /* ignore junk tlist items */ + + if (attno >= resultDesc->natts) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("table row type and query-specified row type do not match"), + errdetail("Query has too many columns."))); + attr = resultDesc->attrs[attno++]; + + if (!attr->attisdropped) + { + /* Normal case: demand type match */ + if (exprType((Node *) tle->expr) != attr->atttypid) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("table row type and query-specified row type do not match"), + errdetail("Table has type %s at ordinal position %d, but query expects %s.", + format_type_be(attr->atttypid), + attno, + format_type_be(exprType((Node *) tle->expr))))); + } + else + { + /* + * For a dropped column, we can't check atttypid (it's likely 0). + * In any case the planner has most likely inserted an INT4 null. + * What we insist on is just *some* NULL constant. + */ + if (!IsA(tle->expr, Const) || + !((Const *) tle->expr)->constisnull) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("table row type and query-specified row type do not match"), + errdetail("Query provides a value for a dropped column at ordinal position %d.", + attno))); + } + } + if (attno != resultDesc->natts) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("table row type and query-specified row type do not match"), + errdetail("Query has too few columns."))); +} + +/* + * ExecProcessReturning --- evaluate a RETURNING list + * + * projectReturning: RETURNING projection info for current result rel + * tupleSlot: slot holding tuple actually inserted/updated/deleted + * planSlot: slot holding tuple returned by top subplan node + * + * Returns a slot holding the result tuple + */ +static TupleTableSlot * +ExecProcessReturning(ProjectionInfo *projectReturning, + TupleTableSlot *tupleSlot, + TupleTableSlot *planSlot) +{ + ExprContext *econtext = projectReturning->pi_exprContext; + + /* + * Reset per-tuple memory context to free any expression evaluation + * storage allocated in the previous cycle. + */ + ResetExprContext(econtext); + + /* Make tuple and any needed join variables available to ExecProject */ + econtext->ecxt_scantuple = tupleSlot; + econtext->ecxt_outertuple = planSlot; + + /* Compute the RETURNING expressions */ + return ExecProject(projectReturning, NULL); +} + +/* ---------------------------------------------------------------- + * ExecInsert + * + * For INSERT, we have to insert the tuple into the target relation + * and insert appropriate tuples into the index relations. + * + * Returns RETURNING result if any, otherwise NULL. + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecInsert(TupleTableSlot *slot, + TupleTableSlot *planSlot, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + Oid newId; + List *recheckIndexes = NIL; + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* + * If the result relation has OIDs, force the tuple's OID to zero so that + * heap_insert will assign a fresh OID. Usually the OID already will be + * zero at this point, but there are corner cases where the plan tree can + * return a tuple extracted literally from some table with the same + * rowtype. + * + * XXX if we ever wanted to allow users to assign their own OIDs to new + * rows, this'd be the place to do it. For the moment, we make a point of + * doing this before calling triggers, so that a user-supplied trigger + * could hack the OID if desired. + */ + if (resultRelationDesc->rd_rel->relhasoids) + HeapTupleSetOid(tuple, InvalidOid); + + /* BEFORE ROW INSERT Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple); + + if (newtuple == NULL) /* "do nothing" */ + return NULL; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + */ + if (resultRelationDesc->rd_att->constr) + ExecConstraints(resultRelInfo, slot, estate); + + /* + * insert the tuple + * + * Note: heap_insert returns the tid (location) of the new tuple in the + * t_self field. + */ + newId = heap_insert(resultRelationDesc, tuple, + estate->es_output_cid, 0, NULL); + + (estate->es_processed)++; + estate->es_lastoid = newId; + setLastTid(&(tuple->t_self)); + + /* + * insert index entries for tuple + */ + if (resultRelInfo->ri_NumIndices > 0) + recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), + estate, false); + + /* AFTER ROW INSERT Triggers */ + ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + return ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot); + + return NULL; +} + +/* ---------------------------------------------------------------- + * ExecDelete + * + * DELETE is like UPDATE, except that we delete the tuple and no + * index modifications are needed + * + * Returns RETURNING result if any, otherwise NULL. + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecDelete(ItemPointer tupleid, + TupleTableSlot *planSlot, + PlanState *subplanstate, + EState *estate) +{ + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW DELETE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0) + { + bool dodelete; + + dodelete = ExecBRDeleteTriggers(estate, subplanstate, resultRelInfo, + tupleid); + + if (!dodelete) /* "do nothing" */ + return NULL; + } + + /* + * delete the tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be deleted is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ +ldelete:; + result = heap_delete(resultRelationDesc, tupleid, + &update_ctid, &update_xmax, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ ); + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return NULL; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + subplanstate, + &update_ctid, + update_xmax); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + goto ldelete; + } + } + /* tuple already deleted; nothing to do */ + return NULL; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + return NULL; + } + + (estate->es_processed)++; + + /* + * Note: Normally one would think that we have to delete index tuples + * associated with the heap tuple now... + * + * ... but in POSTGRES, we have no need to do this because VACUUM will + * take care of it later. We can't delete index tuples immediately + * anyway, since the tuple is still visible to other transactions. + */ + + /* AFTER ROW DELETE Triggers */ + ExecARDeleteTriggers(estate, resultRelInfo, tupleid); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + { + /* + * We have to put the target tuple into a slot, which means first we + * gotta fetch it. We can use the trigger tuple slot. + */ + TupleTableSlot *slot = estate->es_trig_tuple_slot; + TupleTableSlot *rslot; + HeapTupleData deltuple; + Buffer delbuffer; + + deltuple.t_self = *tupleid; + if (!heap_fetch(resultRelationDesc, SnapshotAny, + &deltuple, &delbuffer, false, NULL)) + elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); + + if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc)) + ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); + ExecStoreTuple(&deltuple, slot, InvalidBuffer, false); + + rslot = ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot); + + ExecClearTuple(slot); + ReleaseBuffer(delbuffer); + + return rslot; + } + + return NULL; +} + +/* ---------------------------------------------------------------- + * ExecUpdate + * + * note: we can't run UPDATE queries with transactions + * off because UPDATEs are actually INSERTs and our + * scan will mistakenly loop forever, updating the tuple + * it just inserted.. This should be fixed but until it + * is, we don't want to get stuck in an infinite loop + * which corrupts your database.. + * + * Returns RETURNING result if any, otherwise NULL. + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecUpdate(ItemPointer tupleid, + TupleTableSlot *slot, + TupleTableSlot *planSlot, + PlanState *subplanstate, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + List *recheckIndexes = NIL; + + /* + * abort the operation if not running transactions + */ + if (IsBootstrapProcessingMode()) + elog(ERROR, "cannot UPDATE during bootstrap"); + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW UPDATE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRUpdateTriggers(estate, subplanstate, resultRelInfo, + tupleid, tuple); + + if (newtuple == NULL) /* "do nothing" */ + return NULL; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + * + * If we generate a new candidate tuple after EvalPlanQual testing, we + * must loop back here and recheck constraints. (We don't need to redo + * triggers, however. If there are any BEFORE triggers then trigger.c + * will have done heap_lock_tuple to lock the correct tuple, so there's no + * need to do them again.) + */ +lreplace:; + if (resultRelationDesc->rd_att->constr) + ExecConstraints(resultRelInfo, slot, estate); + + /* + * replace the heap tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be updated is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ + result = heap_update(resultRelationDesc, tupleid, tuple, + &update_ctid, &update_xmax, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ ); + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return NULL; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + subplanstate, + &update_ctid, + update_xmax); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot); + tuple = ExecMaterializeSlot(slot); + goto lreplace; + } + } + /* tuple already deleted; nothing to do */ + return NULL; + + default: + elog(ERROR, "unrecognized heap_update status: %u", result); + return NULL; + } + + (estate->es_processed)++; + + /* + * Note: instead of having to update the old index tuples associated with + * the heap tuple, all we do is form and insert new index tuples. This is + * because UPDATEs are actually DELETEs and INSERTs, and index tuple + * deletion is done later by VACUUM (see notes in ExecDelete). All we do + * here is insert new index tuples. -cim 9/27/89 + */ + + /* + * insert index entries for tuple + * + * Note: heap_update returns the tid (location) of the new tuple in the + * t_self field. + * + * If it's a HOT update, we mustn't insert new index entries. + */ + if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple)) + recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), + estate, false); + + /* AFTER ROW UPDATE Triggers */ + ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple, + recheckIndexes); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + return ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot); + + return NULL; +} + + +/* + * Process BEFORE EACH STATEMENT triggers + */ +static void +fireBSTriggers(ModifyTableState *node) +{ + switch (node->operation) + { + case CMD_INSERT: + ExecBSInsertTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + case CMD_UPDATE: + ExecBSUpdateTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + case CMD_DELETE: + ExecBSDeleteTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + default: + elog(ERROR, "unknown operation"); + break; + } +} + +/* + * Process AFTER EACH STATEMENT triggers + */ +static void +fireASTriggers(ModifyTableState *node) +{ + switch (node->operation) + { + case CMD_INSERT: + ExecASInsertTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + case CMD_UPDATE: + ExecASUpdateTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + case CMD_DELETE: + ExecASDeleteTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + default: + elog(ERROR, "unknown operation"); + break; + } +} + + +/* ---------------------------------------------------------------- + * ExecModifyTable + * + * Perform table modifications as required, and return RETURNING results + * if needed. + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecModifyTable(ModifyTableState *node) +{ + EState *estate = node->ps.state; + CmdType operation = node->operation; + PlanState *subplanstate; + JunkFilter *junkfilter; + TupleTableSlot *slot; + TupleTableSlot *planSlot; + ItemPointer tupleid = NULL; + ItemPointerData tuple_ctid; + + /* + * On first call, fire BEFORE STATEMENT triggers before proceeding. + */ + if (node->fireBSTriggers) + { + fireBSTriggers(node); + node->fireBSTriggers = false; + } + + /* + * es_result_relation_info must point to the currently active result + * relation. (Note we assume that ModifyTable nodes can't be nested.) + * We want it to be NULL whenever we're not within ModifyTable, though. + */ + estate->es_result_relation_info = + estate->es_result_relations + node->mt_whichplan; + + /* Preload local variables */ + subplanstate = node->mt_plans[node->mt_whichplan]; + junkfilter = estate->es_result_relation_info->ri_junkFilter; + + /* + * Fetch rows from subplan(s), and execute the required table modification + * for each row. + */ + for (;;) + { + planSlot = ExecProcNode(subplanstate); + + if (TupIsNull(planSlot)) + { + /* advance to next subplan if any */ + node->mt_whichplan++; + if (node->mt_whichplan < node->mt_nplans) + { + estate->es_result_relation_info++; + subplanstate = node->mt_plans[node->mt_whichplan]; + junkfilter = estate->es_result_relation_info->ri_junkFilter; + continue; + } + else + break; + } + + slot = planSlot; + + if (junkfilter != NULL) + { + /* + * extract the 'ctid' junk attribute. + */ + if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + Datum datum; + bool isNull; + + datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* be sure we don't free the ctid!! */ + tupleid = &tuple_ctid; + } + + /* + * apply the junkfilter if needed. + */ + if (operation != CMD_DELETE) + slot = ExecFilterJunk(junkfilter, slot); + } + + switch (operation) + { + case CMD_INSERT: + slot = ExecInsert(slot, planSlot, estate); + break; + case CMD_UPDATE: + slot = ExecUpdate(tupleid, slot, planSlot, + subplanstate, estate); + break; + case CMD_DELETE: + slot = ExecDelete(tupleid, planSlot, + subplanstate, estate); + break; + default: + elog(ERROR, "unknown operation"); + break; + } + + /* + * If we got a RETURNING result, return it to caller. We'll continue + * the work on next call. + */ + if (slot) + { + estate->es_result_relation_info = NULL; + return slot; + } + } + + /* Reset es_result_relation_info before exiting */ + estate->es_result_relation_info = NULL; + + /* + * We're done, but fire AFTER STATEMENT triggers before exiting. + */ + fireASTriggers(node); + + return NULL; +} + +/* ---------------------------------------------------------------- + * ExecInitModifyTable + * ---------------------------------------------------------------- + */ +ModifyTableState * +ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +{ + ModifyTableState *mtstate; + CmdType operation = node->operation; + int nplans = list_length(node->plans); + ResultRelInfo *resultRelInfo; + TupleDesc tupDesc; + Plan *subplan; + ListCell *l; + int i; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); + + /* + * This should NOT get called during EvalPlanQual; we should have passed + * a subplan tree to EvalPlanQual, instead. Use a runtime test not just + * Assert because this condition is easy to miss in testing ... + */ + if (estate->es_evTuple != NULL) + elog(ERROR, "ModifyTable should not be called during EvalPlanQual"); + + /* + * create state structure + */ + mtstate = makeNode(ModifyTableState); + mtstate->ps.plan = (Plan *) node; + mtstate->ps.state = estate; + mtstate->ps.targetlist = NIL; /* not actually used */ + + mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans); + mtstate->mt_nplans = nplans; + mtstate->operation = operation; + mtstate->fireBSTriggers = true; + + /* For the moment, assume our targets are exactly the global result rels */ + + /* + * call ExecInitNode on each of the plans to be executed and save the + * results into the array "mt_plans". Note we *must* set + * estate->es_result_relation_info correctly while we initialize each + * sub-plan; ExecContextForcesOids depends on that! + */ + estate->es_result_relation_info = estate->es_result_relations; + i = 0; + foreach(l, node->plans) + { + subplan = (Plan *) lfirst(l); + mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags); + estate->es_result_relation_info++; + i++; + } + estate->es_result_relation_info = NULL; + + /* select first subplan */ + mtstate->mt_whichplan = 0; + subplan = (Plan *) linitial(node->plans); + + /* + * Initialize RETURNING projections if needed. + */ + if (node->returningLists) + { + TupleTableSlot *slot; + ExprContext *econtext; + + /* + * Initialize result tuple slot and assign its rowtype using the + * first RETURNING list. We assume the rest will look the same. + */ + tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists), + false); + + /* Set up a slot for the output of the RETURNING projection(s) */ + ExecInitResultTupleSlot(estate, &mtstate->ps); + ExecAssignResultType(&mtstate->ps, tupDesc); + slot = mtstate->ps.ps_ResultTupleSlot; + + /* Need an econtext too */ + econtext = CreateExprContext(estate); + mtstate->ps.ps_ExprContext = econtext; + + /* + * Build a projection for each result rel. + */ + Assert(list_length(node->returningLists) == estate->es_num_result_relations); + resultRelInfo = estate->es_result_relations; + foreach(l, node->returningLists) + { + List *rlist = (List *) lfirst(l); + List *rliststate; + + rliststate = (List *) ExecInitExpr((Expr *) rlist, &mtstate->ps); + resultRelInfo->ri_projectReturning = + ExecBuildProjectionInfo(rliststate, econtext, slot, + resultRelInfo->ri_RelationDesc->rd_att); + resultRelInfo++; + } + } + else + { + /* + * We still must construct a dummy result tuple type, because + * InitPlan expects one (maybe should change that?). + */ + tupDesc = ExecTypeFromTL(NIL, false); + ExecInitResultTupleSlot(estate, &mtstate->ps); + ExecAssignResultType(&mtstate->ps, tupDesc); + + mtstate->ps.ps_ExprContext = NULL; + } + + /* + * Initialize the junk filter(s) if needed. INSERT queries need a filter + * if there are any junk attrs in the tlist. UPDATE and DELETE + * always need a filter, since there's always a junk 'ctid' attribute + * present --- no need to look first. + * + * If there are multiple result relations, each one needs its own junk + * filter. Note multiple rels are only possible for UPDATE/DELETE, so we + * can't be fooled by some needing a filter and some not. + * + * This section of code is also a convenient place to verify that the + * output of an INSERT or UPDATE matches the target table(s). + */ + { + bool junk_filter_needed = false; + + switch (operation) + { + case CMD_INSERT: + foreach(l, subplan->targetlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(l); + + if (tle->resjunk) + { + junk_filter_needed = true; + break; + } + } + break; + case CMD_UPDATE: + case CMD_DELETE: + junk_filter_needed = true; + break; + default: + elog(ERROR, "unknown operation"); + break; + } + + if (junk_filter_needed) + { + resultRelInfo = estate->es_result_relations; + for (i = 0; i < nplans; i++) + { + JunkFilter *j; + + subplan = mtstate->mt_plans[i]->plan; + if (operation == CMD_INSERT || operation == CMD_UPDATE) + ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, + subplan->targetlist); + + j = ExecInitJunkFilter(subplan->targetlist, + resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, + ExecInitExtraTupleSlot(estate)); + + if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + /* For UPDATE/DELETE, find the ctid junk attr now */ + j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } + + resultRelInfo->ri_junkFilter = j; + resultRelInfo++; + } + } + else + { + if (operation == CMD_INSERT) + ExecCheckPlanOutput(estate->es_result_relations->ri_RelationDesc, + subplan->targetlist); + } + } + + /* + * Set up a tuple table slot for use for trigger output tuples. + * In a plan containing multiple ModifyTable nodes, all can share + * one such slot, so we keep it in the estate. + */ + if (estate->es_trig_tuple_slot == NULL) + estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); + + return mtstate; +} + +/* ---------------------------------------------------------------- + * ExecEndModifyTable + * + * Shuts down the plan. + * + * Returns nothing of interest. + * ---------------------------------------------------------------- + */ +void +ExecEndModifyTable(ModifyTableState *node) +{ + int i; + + /* + * Free the exprcontext + */ + ExecFreeExprContext(&node->ps); + + /* + * clean out the tuple table + */ + ExecClearTuple(node->ps.ps_ResultTupleSlot); + + /* + * shut down subplans + */ + for (i=0; i<node->mt_nplans; i++) + ExecEndNode(node->mt_plans[i]); +} + +void +ExecReScanModifyTable(ModifyTableState *node, ExprContext *exprCtxt) +{ + /* + * Currently, we don't need to support rescan on ModifyTable nodes. + * The semantics of that would be a bit debatable anyway. + */ + elog(ERROR, "ExecReScanModifyTable is not implemented"); +} |