1 /*-------------------------------------------------------------------------
4 * routines to handle ModifyTable nodes.
6 * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/executor/nodeModifyTable.c
13 *-------------------------------------------------------------------------
16 * ExecInitModifyTable - initialize the ModifyTable node
17 * ExecModifyTable - retrieve the next tuple from the node
18 * ExecEndModifyTable - shut down the ModifyTable node
19 * ExecReScanModifyTable - rescan the ModifyTable node
22 * Each ModifyTable node contains a list of one or more subplans,
23 * much like an Append node. There is one subplan per result relation.
24 * The key reason for this is that in an inherited UPDATE command, each
25 * result relation could have a different schema (more or different
26 * columns) requiring a different plan tree to produce it. In an
27 * inherited DELETE, all the subplans should produce the same output
28 * rowtype, but we might still find that different plans are appropriate
29 * for different child relations.
31 * If the query specifies RETURNING, then the ModifyTable returns a
32 * RETURNING tuple after completing each row insert, update, or delete.
33 * It must be called again to continue the operation. Without RETURNING,
34 * we just loop within the node until all the work is done, then
35 * return NULL. This avoids useless call/return overhead.
40 #include "access/xact.h"
41 #include "commands/trigger.h"
42 #include "executor/executor.h"
43 #include "executor/nodeModifyTable.h"
44 #include "miscadmin.h"
45 #include "nodes/nodeFuncs.h"
46 #include "storage/bufmgr.h"
47 #include "utils/builtins.h"
48 #include "utils/memutils.h"
49 #include "utils/tqual.h"
53 * Verify that the tuples to be produced by INSERT or UPDATE match the
54 * target relation's rowtype
56 * We do this to guard against stale plans. If plan invalidation is
57 * functioning properly then we should never get a failure here, but better
58 * safe than sorry. Note that this is called after we have obtained lock
59 * on the target rel, so the rowtype can't change underneath us.
61 * The plan output is represented by its targetlist, because that makes
62 * handling the dropped-column case easier.
65 ExecCheckPlanOutput(Relation resultRel, List *targetList)
67 TupleDesc resultDesc = RelationGetDescr(resultRel);
71 foreach(lc, targetList)
73 TargetEntry *tle = (TargetEntry *) lfirst(lc);
74 Form_pg_attribute attr;
77 continue; /* ignore junk tlist items */
79 if (attno >= resultDesc->natts)
81 (errcode(ERRCODE_DATATYPE_MISMATCH),
82 errmsg("table row type and query-specified row type do not match"),
83 errdetail("Query has too many columns.")));
84 attr = resultDesc->attrs[attno++];
86 if (!attr->attisdropped)
88 /* Normal case: demand type match */
89 if (exprType((Node *) tle->expr) != attr->atttypid)
91 (errcode(ERRCODE_DATATYPE_MISMATCH),
92 errmsg("table row type and query-specified row type do not match"),
93 errdetail("Table has type %s at ordinal position %d, but query expects %s.",
94 format_type_be(attr->atttypid),
96 format_type_be(exprType((Node *) tle->expr)))));
101 * For a dropped column, we can't check atttypid (it's likely 0).
102 * In any case the planner has most likely inserted an INT4 null.
103 * What we insist on is just *some* NULL constant.
105 if (!IsA(tle->expr, Const) ||
106 !((Const *) tle->expr)->constisnull)
108 (errcode(ERRCODE_DATATYPE_MISMATCH),
109 errmsg("table row type and query-specified row type do not match"),
110 errdetail("Query provides a value for a dropped column at ordinal position %d.",
114 if (attno != resultDesc->natts)
116 (errcode(ERRCODE_DATATYPE_MISMATCH),
117 errmsg("table row type and query-specified row type do not match"),
118 errdetail("Query has too few columns.")));
122 * ExecProcessReturning --- evaluate a RETURNING list
124 * projectReturning: RETURNING projection info for current result rel
125 * tupleSlot: slot holding tuple actually inserted/updated/deleted
126 * planSlot: slot holding tuple returned by top subplan node
128 * Returns a slot holding the result tuple
130 static TupleTableSlot *
131 ExecProcessReturning(ProjectionInfo *projectReturning,
132 TupleTableSlot *tupleSlot,
133 TupleTableSlot *planSlot)
135 ExprContext *econtext = projectReturning->pi_exprContext;
138 * Reset per-tuple memory context to free any expression evaluation
139 * storage allocated in the previous cycle.
141 ResetExprContext(econtext);
143 /* Make tuple and any needed join variables available to ExecProject */
144 econtext->ecxt_scantuple = tupleSlot;
145 econtext->ecxt_outertuple = planSlot;
147 /* Compute the RETURNING expressions */
148 return ExecProject(projectReturning, NULL);
151 /* ----------------------------------------------------------------
154 * For INSERT, we have to insert the tuple into the target relation
155 * and insert appropriate tuples into the index relations.
157 * Returns RETURNING result if any, otherwise NULL.
158 * ----------------------------------------------------------------
160 static TupleTableSlot *
161 ExecInsert(TupleTableSlot *slot,
162 TupleTableSlot *planSlot,
166 ResultRelInfo *resultRelInfo;
167 Relation resultRelationDesc;
169 List *recheckIndexes = NIL;
172 * get the heap tuple out of the tuple table slot, making sure we have a
175 tuple = ExecMaterializeSlot(slot);
178 * get information on the (current) result relation
180 resultRelInfo = estate->es_result_relation_info;
181 resultRelationDesc = resultRelInfo->ri_RelationDesc;
184 * If the result relation has OIDs, force the tuple's OID to zero so that
185 * heap_insert will assign a fresh OID. Usually the OID already will be
186 * zero at this point, but there are corner cases where the plan tree can
187 * return a tuple extracted literally from some table with the same
190 * XXX if we ever wanted to allow users to assign their own OIDs to new
191 * rows, this'd be the place to do it. For the moment, we make a point of
192 * doing this before calling triggers, so that a user-supplied trigger
193 * could hack the OID if desired.
195 if (resultRelationDesc->rd_rel->relhasoids)
196 HeapTupleSetOid(tuple, InvalidOid);
198 /* BEFORE ROW INSERT Triggers */
199 if (resultRelInfo->ri_TrigDesc &&
200 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
204 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
206 if (newtuple == NULL) /* "do nothing" */
209 if (newtuple != tuple) /* modified by Trigger(s) */
212 * Put the modified tuple into a slot for convenience of routines
213 * below. We assume the tuple was allocated in per-tuple memory
214 * context, and therefore will go away by itself. The tuple table
215 * slot should not try to clear it.
217 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
218 TupleDesc tupdesc = RelationGetDescr(resultRelationDesc);
220 if (newslot->tts_tupleDescriptor != tupdesc)
221 ExecSetSlotDescriptor(newslot, tupdesc);
222 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
229 * Check the constraints of the tuple
231 if (resultRelationDesc->rd_att->constr)
232 ExecConstraints(resultRelInfo, slot, estate);
237 * Note: heap_insert returns the tid (location) of the new tuple in the
240 newId = heap_insert(resultRelationDesc, tuple,
241 estate->es_output_cid, 0, NULL);
243 (estate->es_processed)++;
244 estate->es_lastoid = newId;
245 setLastTid(&(tuple->t_self));
248 * insert index entries for tuple
250 if (resultRelInfo->ri_NumIndices > 0)
251 recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
254 /* AFTER ROW INSERT Triggers */
255 ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes);
257 list_free(recheckIndexes);
259 /* Process RETURNING if present */
260 if (resultRelInfo->ri_projectReturning)
261 return ExecProcessReturning(resultRelInfo->ri_projectReturning,
267 /* ----------------------------------------------------------------
270 * DELETE is like UPDATE, except that we delete the tuple and no
271 * index modifications are needed
273 * Returns RETURNING result if any, otherwise NULL.
274 * ----------------------------------------------------------------
276 static TupleTableSlot *
277 ExecDelete(ItemPointer tupleid,
278 TupleTableSlot *planSlot,
282 ResultRelInfo *resultRelInfo;
283 Relation resultRelationDesc;
285 ItemPointerData update_ctid;
286 TransactionId update_xmax;
289 * get information on the (current) result relation
291 resultRelInfo = estate->es_result_relation_info;
292 resultRelationDesc = resultRelInfo->ri_RelationDesc;
294 /* BEFORE ROW DELETE Triggers */
295 if (resultRelInfo->ri_TrigDesc &&
296 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
300 dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
303 if (!dodelete) /* "do nothing" */
310 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
311 * the row to be deleted is visible to that snapshot, and throw a can't-
312 * serialize error if not. This is a special-case behavior needed for
313 * referential integrity updates in transaction-snapshot mode transactions.
316 result = heap_delete(resultRelationDesc, tupleid,
317 &update_ctid, &update_xmax,
318 estate->es_output_cid,
319 estate->es_crosscheck_snapshot,
320 true /* wait for commit */ );
323 case HeapTupleSelfUpdated:
324 /* already deleted by self; nothing to do */
327 case HeapTupleMayBeUpdated:
330 case HeapTupleUpdated:
331 if (IsolationUsesXactSnapshot())
333 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
334 errmsg("could not serialize access due to concurrent update")));
335 if (!ItemPointerEquals(tupleid, &update_ctid))
337 TupleTableSlot *epqslot;
339 epqslot = EvalPlanQual(estate,
342 resultRelInfo->ri_RangeTableIndex,
345 if (!TupIsNull(epqslot))
347 *tupleid = update_ctid;
351 /* tuple already deleted; nothing to do */
355 elog(ERROR, "unrecognized heap_delete status: %u", result);
359 (estate->es_processed)++;
362 * Note: Normally one would think that we have to delete index tuples
363 * associated with the heap tuple now...
365 * ... but in POSTGRES, we have no need to do this because VACUUM will
366 * take care of it later. We can't delete index tuples immediately
367 * anyway, since the tuple is still visible to other transactions.
370 /* AFTER ROW DELETE Triggers */
371 ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
373 /* Process RETURNING if present */
374 if (resultRelInfo->ri_projectReturning)
377 * We have to put the target tuple into a slot, which means first we
378 * gotta fetch it. We can use the trigger tuple slot.
380 TupleTableSlot *slot = estate->es_trig_tuple_slot;
381 TupleTableSlot *rslot;
382 HeapTupleData deltuple;
385 deltuple.t_self = *tupleid;
386 if (!heap_fetch(resultRelationDesc, SnapshotAny,
387 &deltuple, &delbuffer, false, NULL))
388 elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
390 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
391 ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
392 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
394 rslot = ExecProcessReturning(resultRelInfo->ri_projectReturning,
397 ExecClearTuple(slot);
398 ReleaseBuffer(delbuffer);
406 /* ----------------------------------------------------------------
409 * note: we can't run UPDATE queries with transactions
410 * off because UPDATEs are actually INSERTs and our
411 * scan will mistakenly loop forever, updating the tuple
412 * it just inserted.. This should be fixed but until it
413 * is, we don't want to get stuck in an infinite loop
414 * which corrupts your database..
416 * Returns RETURNING result if any, otherwise NULL.
417 * ----------------------------------------------------------------
419 static TupleTableSlot *
420 ExecUpdate(ItemPointer tupleid,
421 TupleTableSlot *slot,
422 TupleTableSlot *planSlot,
427 ResultRelInfo *resultRelInfo;
428 Relation resultRelationDesc;
430 ItemPointerData update_ctid;
431 TransactionId update_xmax;
432 List *recheckIndexes = NIL;
435 * abort the operation if not running transactions
437 if (IsBootstrapProcessingMode())
438 elog(ERROR, "cannot UPDATE during bootstrap");
441 * get the heap tuple out of the tuple table slot, making sure we have a
444 tuple = ExecMaterializeSlot(slot);
447 * get information on the (current) result relation
449 resultRelInfo = estate->es_result_relation_info;
450 resultRelationDesc = resultRelInfo->ri_RelationDesc;
452 /* BEFORE ROW UPDATE Triggers */
453 if (resultRelInfo->ri_TrigDesc &&
454 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
458 newtuple = ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
461 if (newtuple == NULL) /* "do nothing" */
464 if (newtuple != tuple) /* modified by Trigger(s) */
467 * Put the modified tuple into a slot for convenience of routines
468 * below. We assume the tuple was allocated in per-tuple memory
469 * context, and therefore will go away by itself. The tuple table
470 * slot should not try to clear it.
472 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
473 TupleDesc tupdesc = RelationGetDescr(resultRelationDesc);
475 if (newslot->tts_tupleDescriptor != tupdesc)
476 ExecSetSlotDescriptor(newslot, tupdesc);
477 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
484 * Check the constraints of the tuple
486 * If we generate a new candidate tuple after EvalPlanQual testing, we
487 * must loop back here and recheck constraints. (We don't need to redo
488 * triggers, however. If there are any BEFORE triggers then trigger.c
489 * will have done heap_lock_tuple to lock the correct tuple, so there's no
490 * need to do them again.)
493 if (resultRelationDesc->rd_att->constr)
494 ExecConstraints(resultRelInfo, slot, estate);
497 * replace the heap tuple
499 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
500 * the row to be updated is visible to that snapshot, and throw a can't-
501 * serialize error if not. This is a special-case behavior needed for
502 * referential integrity updates in transaction-snapshot mode transactions.
504 result = heap_update(resultRelationDesc, tupleid, tuple,
505 &update_ctid, &update_xmax,
506 estate->es_output_cid,
507 estate->es_crosscheck_snapshot,
508 true /* wait for commit */ );
511 case HeapTupleSelfUpdated:
512 /* already deleted by self; nothing to do */
515 case HeapTupleMayBeUpdated:
518 case HeapTupleUpdated:
519 if (IsolationUsesXactSnapshot())
521 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
522 errmsg("could not serialize access due to concurrent update")));
523 if (!ItemPointerEquals(tupleid, &update_ctid))
525 TupleTableSlot *epqslot;
527 epqslot = EvalPlanQual(estate,
530 resultRelInfo->ri_RangeTableIndex,
533 if (!TupIsNull(epqslot))
535 *tupleid = update_ctid;
536 slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
537 tuple = ExecMaterializeSlot(slot);
541 /* tuple already deleted; nothing to do */
545 elog(ERROR, "unrecognized heap_update status: %u", result);
549 (estate->es_processed)++;
552 * Note: instead of having to update the old index tuples associated with
553 * the heap tuple, all we do is form and insert new index tuples. This is
554 * because UPDATEs are actually DELETEs and INSERTs, and index tuple
555 * deletion is done later by VACUUM (see notes in ExecDelete). All we do
556 * here is insert new index tuples. -cim 9/27/89
560 * insert index entries for tuple
562 * Note: heap_update returns the tid (location) of the new tuple in the
565 * If it's a HOT update, we mustn't insert new index entries.
567 if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
568 recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
571 /* AFTER ROW UPDATE Triggers */
572 ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple,
575 list_free(recheckIndexes);
577 /* Process RETURNING if present */
578 if (resultRelInfo->ri_projectReturning)
579 return ExecProcessReturning(resultRelInfo->ri_projectReturning,
587 * Process BEFORE EACH STATEMENT triggers
590 fireBSTriggers(ModifyTableState *node)
592 switch (node->operation)
595 ExecBSInsertTriggers(node->ps.state,
596 node->ps.state->es_result_relations);
599 ExecBSUpdateTriggers(node->ps.state,
600 node->ps.state->es_result_relations);
603 ExecBSDeleteTriggers(node->ps.state,
604 node->ps.state->es_result_relations);
607 elog(ERROR, "unknown operation");
613 * Process AFTER EACH STATEMENT triggers
616 fireASTriggers(ModifyTableState *node)
618 switch (node->operation)
621 ExecASInsertTriggers(node->ps.state,
622 node->ps.state->es_result_relations);
625 ExecASUpdateTriggers(node->ps.state,
626 node->ps.state->es_result_relations);
629 ExecASDeleteTriggers(node->ps.state,
630 node->ps.state->es_result_relations);
633 elog(ERROR, "unknown operation");
639 /* ----------------------------------------------------------------
642 * Perform table modifications as required, and return RETURNING results
644 * ----------------------------------------------------------------
647 ExecModifyTable(ModifyTableState *node)
649 EState *estate = node->ps.state;
650 CmdType operation = node->operation;
651 PlanState *subplanstate;
652 JunkFilter *junkfilter;
653 TupleTableSlot *slot;
654 TupleTableSlot *planSlot;
655 ItemPointer tupleid = NULL;
656 ItemPointerData tuple_ctid;
659 * On first call, fire BEFORE STATEMENT triggers before proceeding.
661 if (node->fireBSTriggers)
663 fireBSTriggers(node);
664 node->fireBSTriggers = false;
668 * es_result_relation_info must point to the currently active result
669 * relation. (Note we assume that ModifyTable nodes can't be nested.) We
670 * want it to be NULL whenever we're not within ModifyTable, though.
672 estate->es_result_relation_info =
673 estate->es_result_relations + node->mt_whichplan;
675 /* Preload local variables */
676 subplanstate = node->mt_plans[node->mt_whichplan];
677 junkfilter = estate->es_result_relation_info->ri_junkFilter;
680 * Fetch rows from subplan(s), and execute the required table modification
686 * Reset the per-output-tuple exprcontext. This is needed because
687 * triggers expect to use that context as workspace. It's a bit ugly
688 * to do this below the top level of the plan, however. We might need
689 * to rethink this later.
691 ResetPerTupleExprContext(estate);
693 planSlot = ExecProcNode(subplanstate);
695 if (TupIsNull(planSlot))
697 /* advance to next subplan if any */
698 node->mt_whichplan++;
699 if (node->mt_whichplan < node->mt_nplans)
701 estate->es_result_relation_info++;
702 subplanstate = node->mt_plans[node->mt_whichplan];
703 junkfilter = estate->es_result_relation_info->ri_junkFilter;
704 EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan);
711 EvalPlanQualSetSlot(&node->mt_epqstate, planSlot);
714 if (junkfilter != NULL)
717 * extract the 'ctid' junk attribute.
719 if (operation == CMD_UPDATE || operation == CMD_DELETE)
724 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
726 /* shouldn't ever get a null result... */
728 elog(ERROR, "ctid is NULL");
730 tupleid = (ItemPointer) DatumGetPointer(datum);
731 tuple_ctid = *tupleid; /* be sure we don't free the ctid!! */
732 tupleid = &tuple_ctid;
736 * apply the junkfilter if needed.
738 if (operation != CMD_DELETE)
739 slot = ExecFilterJunk(junkfilter, slot);
745 slot = ExecInsert(slot, planSlot, estate);
748 slot = ExecUpdate(tupleid, slot, planSlot,
749 &node->mt_epqstate, estate);
752 slot = ExecDelete(tupleid, planSlot,
753 &node->mt_epqstate, estate);
756 elog(ERROR, "unknown operation");
761 * If we got a RETURNING result, return it to caller. We'll continue
762 * the work on next call.
766 estate->es_result_relation_info = NULL;
771 /* Reset es_result_relation_info before exiting */
772 estate->es_result_relation_info = NULL;
775 * We're done, but fire AFTER STATEMENT triggers before exiting.
777 fireASTriggers(node);
782 /* ----------------------------------------------------------------
783 * ExecInitModifyTable
784 * ----------------------------------------------------------------
787 ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
789 ModifyTableState *mtstate;
790 CmdType operation = node->operation;
791 int nplans = list_length(node->plans);
792 ResultRelInfo *resultRelInfo;
798 /* check for unsupported flags */
799 Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
802 * This should NOT get called during EvalPlanQual; we should have passed a
803 * subplan tree to EvalPlanQual, instead. Use a runtime test not just
804 * Assert because this condition is easy to miss in testing ...
806 if (estate->es_epqTuple != NULL)
807 elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
810 * create state structure
812 mtstate = makeNode(ModifyTableState);
813 mtstate->ps.plan = (Plan *) node;
814 mtstate->ps.state = estate;
815 mtstate->ps.targetlist = NIL; /* not actually used */
817 mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans);
818 mtstate->mt_nplans = nplans;
819 mtstate->operation = operation;
820 /* set up epqstate with dummy subplan pointer for the moment */
821 EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, node->epqParam);
822 mtstate->fireBSTriggers = true;
824 /* For the moment, assume our targets are exactly the global result rels */
827 * call ExecInitNode on each of the plans to be executed and save the
828 * results into the array "mt_plans". Note we *must* set
829 * estate->es_result_relation_info correctly while we initialize each
830 * sub-plan; ExecContextForcesOids depends on that!
832 estate->es_result_relation_info = estate->es_result_relations;
834 foreach(l, node->plans)
836 subplan = (Plan *) lfirst(l);
837 mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags);
838 estate->es_result_relation_info++;
841 estate->es_result_relation_info = NULL;
843 /* select first subplan */
844 mtstate->mt_whichplan = 0;
845 subplan = (Plan *) linitial(node->plans);
846 EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan);
849 * Initialize RETURNING projections if needed.
851 if (node->returningLists)
853 TupleTableSlot *slot;
854 ExprContext *econtext;
857 * Initialize result tuple slot and assign its rowtype using the first
858 * RETURNING list. We assume the rest will look the same.
860 tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists),
863 /* Set up a slot for the output of the RETURNING projection(s) */
864 ExecInitResultTupleSlot(estate, &mtstate->ps);
865 ExecAssignResultType(&mtstate->ps, tupDesc);
866 slot = mtstate->ps.ps_ResultTupleSlot;
868 /* Need an econtext too */
869 econtext = CreateExprContext(estate);
870 mtstate->ps.ps_ExprContext = econtext;
873 * Build a projection for each result rel.
875 Assert(list_length(node->returningLists) == estate->es_num_result_relations);
876 resultRelInfo = estate->es_result_relations;
877 foreach(l, node->returningLists)
879 List *rlist = (List *) lfirst(l);
882 rliststate = (List *) ExecInitExpr((Expr *) rlist, &mtstate->ps);
883 resultRelInfo->ri_projectReturning =
884 ExecBuildProjectionInfo(rliststate, econtext, slot,
885 resultRelInfo->ri_RelationDesc->rd_att);
892 * We still must construct a dummy result tuple type, because InitPlan
893 * expects one (maybe should change that?).
895 tupDesc = ExecTypeFromTL(NIL, false);
896 ExecInitResultTupleSlot(estate, &mtstate->ps);
897 ExecAssignResultType(&mtstate->ps, tupDesc);
899 mtstate->ps.ps_ExprContext = NULL;
903 * If we have any secondary relations in an UPDATE or DELETE, they need to
904 * be treated like non-locked relations in SELECT FOR UPDATE, ie, the
905 * EvalPlanQual mechanism needs to be told about them. Locate the
906 * relevant ExecRowMarks.
908 foreach(l, node->rowMarks)
910 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
911 ExecRowMark *erm = NULL;
914 Assert(IsA(rc, PlanRowMark));
916 /* ignore "parent" rowmarks; they are irrelevant at runtime */
920 foreach(lce, estate->es_rowMarks)
922 erm = (ExecRowMark *) lfirst(lce);
923 if (erm->rti == rc->rti)
928 elog(ERROR, "failed to find ExecRowMark for PlanRowMark %u",
931 EvalPlanQualAddRowMark(&mtstate->mt_epqstate, erm);
935 * Initialize the junk filter(s) if needed. INSERT queries need a filter
936 * if there are any junk attrs in the tlist. UPDATE and DELETE always
937 * need a filter, since there's always a junk 'ctid' attribute present ---
938 * no need to look first.
940 * If there are multiple result relations, each one needs its own junk
941 * filter. Note multiple rels are only possible for UPDATE/DELETE, so we
942 * can't be fooled by some needing a filter and some not.
944 * This section of code is also a convenient place to verify that the
945 * output of an INSERT or UPDATE matches the target table(s).
948 bool junk_filter_needed = false;
953 foreach(l, subplan->targetlist)
955 TargetEntry *tle = (TargetEntry *) lfirst(l);
959 junk_filter_needed = true;
966 junk_filter_needed = true;
969 elog(ERROR, "unknown operation");
973 if (junk_filter_needed)
975 resultRelInfo = estate->es_result_relations;
976 for (i = 0; i < nplans; i++)
980 subplan = mtstate->mt_plans[i]->plan;
981 if (operation == CMD_INSERT || operation == CMD_UPDATE)
982 ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
983 subplan->targetlist);
985 j = ExecInitJunkFilter(subplan->targetlist,
986 resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
987 ExecInitExtraTupleSlot(estate));
989 if (operation == CMD_UPDATE || operation == CMD_DELETE)
991 /* For UPDATE/DELETE, find the ctid junk attr now */
992 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
993 if (!AttributeNumberIsValid(j->jf_junkAttNo))
994 elog(ERROR, "could not find junk ctid column");
997 resultRelInfo->ri_junkFilter = j;
1003 if (operation == CMD_INSERT)
1004 ExecCheckPlanOutput(estate->es_result_relations->ri_RelationDesc,
1005 subplan->targetlist);
1010 * Set up a tuple table slot for use for trigger output tuples. In a plan
1011 * containing multiple ModifyTable nodes, all can share one such slot, so
1012 * we keep it in the estate.
1014 if (estate->es_trig_tuple_slot == NULL)
1015 estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate);
1020 /* ----------------------------------------------------------------
1021 * ExecEndModifyTable
1023 * Shuts down the plan.
1025 * Returns nothing of interest.
1026 * ----------------------------------------------------------------
1029 ExecEndModifyTable(ModifyTableState *node)
1034 * Free the exprcontext
1036 ExecFreeExprContext(&node->ps);
1039 * clean out the tuple table
1041 ExecClearTuple(node->ps.ps_ResultTupleSlot);
1044 * Terminate EPQ execution if active
1046 EvalPlanQualEnd(&node->mt_epqstate);
1049 * shut down subplans
1051 for (i = 0; i < node->mt_nplans; i++)
1052 ExecEndNode(node->mt_plans[i]);
1056 ExecReScanModifyTable(ModifyTableState *node)
1059 * Currently, we don't need to support rescan on ModifyTable nodes. The
1060 * semantics of that would be a bit debatable anyway.
1062 elog(ERROR, "ExecReScanModifyTable is not implemented");