1 /*-------------------------------------------------------------------------
4 * routines to handle ModifyTable nodes.
6 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/executor/nodeModifyTable.c
13 *-------------------------------------------------------------------------
16 * ExecInitModifyTable - initialize the ModifyTable node
17 * ExecModifyTable - retrieve the next tuple from the node
18 * ExecEndModifyTable - shut down the ModifyTable node
19 * ExecReScanModifyTable - rescan the ModifyTable node
22 * Each ModifyTable node contains a list of one or more subplans,
23 * much like an Append node. There is one subplan per result relation.
24 * The key reason for this is that in an inherited UPDATE command, each
25 * result relation could have a different schema (more or different
26 * columns) requiring a different plan tree to produce it. In an
27 * inherited DELETE, all the subplans should produce the same output
28 * rowtype, but we might still find that different plans are appropriate
29 * for different child relations.
31 * If the query specifies RETURNING, then the ModifyTable returns a
32 * RETURNING tuple after completing each row insert, update, or delete.
33 * It must be called again to continue the operation. Without RETURNING,
34 * we just loop within the node until all the work is done, then
35 * return NULL. This avoids useless call/return overhead.
40 #include "access/htup_details.h"
41 #include "access/xact.h"
42 #include "commands/trigger.h"
43 #include "executor/executor.h"
44 #include "executor/nodeModifyTable.h"
45 #include "foreign/fdwapi.h"
46 #include "miscadmin.h"
47 #include "nodes/nodeFuncs.h"
48 #include "parser/parsetree.h"
49 #include "storage/bufmgr.h"
50 #include "storage/lmgr.h"
51 #include "utils/builtins.h"
52 #include "utils/memutils.h"
53 #include "utils/rel.h"
54 #include "utils/tqual.h"
57 static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
58 ResultRelInfo *resultRelInfo,
59 ItemPointer conflictTid,
60 TupleTableSlot *planSlot,
61 TupleTableSlot *excludedSlot,
64 TupleTableSlot **returning);
67 * Verify that the tuples to be produced by INSERT or UPDATE match the
68 * target relation's rowtype
70 * We do this to guard against stale plans. If plan invalidation is
71 * functioning properly then we should never get a failure here, but better
72 * safe than sorry. Note that this is called after we have obtained lock
73 * on the target rel, so the rowtype can't change underneath us.
75 * The plan output is represented by its targetlist, because that makes
76 * handling the dropped-column case easier.
79 ExecCheckPlanOutput(Relation resultRel, List *targetList)
81 TupleDesc resultDesc = RelationGetDescr(resultRel);
85 foreach(lc, targetList)
87 TargetEntry *tle = (TargetEntry *) lfirst(lc);
88 Form_pg_attribute attr;
91 continue; /* ignore junk tlist items */
93 if (attno >= resultDesc->natts)
95 (errcode(ERRCODE_DATATYPE_MISMATCH),
96 errmsg("table row type and query-specified row type do not match"),
97 errdetail("Query has too many columns.")));
98 attr = resultDesc->attrs[attno++];
100 if (!attr->attisdropped)
102 /* Normal case: demand type match */
103 if (exprType((Node *) tle->expr) != attr->atttypid)
105 (errcode(ERRCODE_DATATYPE_MISMATCH),
106 errmsg("table row type and query-specified row type do not match"),
107 errdetail("Table has type %s at ordinal position %d, but query expects %s.",
108 format_type_be(attr->atttypid),
110 format_type_be(exprType((Node *) tle->expr)))));
115 * For a dropped column, we can't check atttypid (it's likely 0).
116 * In any case the planner has most likely inserted an INT4 null.
117 * What we insist on is just *some* NULL constant.
119 if (!IsA(tle->expr, Const) ||
120 !((Const *) tle->expr)->constisnull)
122 (errcode(ERRCODE_DATATYPE_MISMATCH),
123 errmsg("table row type and query-specified row type do not match"),
124 errdetail("Query provides a value for a dropped column at ordinal position %d.",
128 if (attno != resultDesc->natts)
130 (errcode(ERRCODE_DATATYPE_MISMATCH),
131 errmsg("table row type and query-specified row type do not match"),
132 errdetail("Query has too few columns.")));
136 * ExecProcessReturning --- evaluate a RETURNING list
138 * projectReturning: RETURNING projection info for current result rel
139 * tupleSlot: slot holding tuple actually inserted/updated/deleted
140 * planSlot: slot holding tuple returned by top subplan node
142 * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
145 * Returns a slot holding the result tuple
147 static TupleTableSlot *
148 ExecProcessReturning(ResultRelInfo *resultRelInfo,
149 TupleTableSlot *tupleSlot,
150 TupleTableSlot *planSlot)
152 ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
153 ExprContext *econtext = projectReturning->pi_exprContext;
156 * Reset per-tuple memory context to free any expression evaluation
157 * storage allocated in the previous cycle.
159 ResetExprContext(econtext);
161 /* Make tuple and any needed join variables available to ExecProject */
163 econtext->ecxt_scantuple = tupleSlot;
169 * RETURNING expressions might reference the tableoid column, so
170 * initialize t_tableOid before evaluating them.
172 Assert(!TupIsNull(econtext->ecxt_scantuple));
173 tuple = ExecMaterializeSlot(econtext->ecxt_scantuple);
174 tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
176 econtext->ecxt_outertuple = planSlot;
178 /* Compute the RETURNING expressions */
179 return ExecProject(projectReturning);
183 * ExecCheckHeapTupleVisible -- verify heap tuple is visible
185 * It would not be consistent with guarantees of the higher isolation levels to
186 * proceed with avoiding insertion (taking speculative insertion's alternative
187 * path) on the basis of another tuple that is not visible to MVCC snapshot.
188 * Check for the need to raise a serialization failure, and do so as necessary.
191 ExecCheckHeapTupleVisible(EState *estate,
195 if (!IsolationUsesXactSnapshot())
199 * We need buffer pin and lock to call HeapTupleSatisfiesVisibility.
200 * Caller should be holding pin, but not lock.
202 LockBuffer(buffer, BUFFER_LOCK_SHARE);
203 if (!HeapTupleSatisfiesVisibility(tuple, estate->es_snapshot, buffer))
206 * We should not raise a serialization failure if the conflict is
207 * against a tuple inserted by our own transaction, even if it's not
208 * visible to our snapshot. (This would happen, for example, if
209 * conflicting keys are proposed for insertion in a single command.)
211 if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple->t_data)))
213 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
214 errmsg("could not serialize access due to concurrent update")));
216 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
220 * ExecCheckTIDVisible -- convenience variant of ExecCheckHeapTupleVisible()
223 ExecCheckTIDVisible(EState *estate,
224 ResultRelInfo *relinfo,
227 Relation rel = relinfo->ri_RelationDesc;
231 /* Redundantly check isolation level */
232 if (!IsolationUsesXactSnapshot())
236 if (!heap_fetch(rel, SnapshotAny, &tuple, &buffer, false, NULL))
237 elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
238 ExecCheckHeapTupleVisible(estate, &tuple, buffer);
239 ReleaseBuffer(buffer);
242 /* ----------------------------------------------------------------
245 * For INSERT, we have to insert the tuple into the target relation
246 * and insert appropriate tuples into the index relations.
248 * Returns RETURNING result if any, otherwise NULL.
249 * ----------------------------------------------------------------
251 static TupleTableSlot *
252 ExecInsert(ModifyTableState *mtstate,
253 TupleTableSlot *slot,
254 TupleTableSlot *planSlot,
255 List *arbiterIndexes,
256 OnConflictAction onconflict,
261 ResultRelInfo *resultRelInfo;
262 ResultRelInfo *saved_resultRelInfo = NULL;
263 Relation resultRelationDesc;
265 List *recheckIndexes = NIL;
266 TupleTableSlot *oldslot = slot,
270 * get the heap tuple out of the tuple table slot, making sure we have a
273 tuple = ExecMaterializeSlot(slot);
276 * get information on the (current) result relation
278 resultRelInfo = estate->es_result_relation_info;
280 /* Determine the partition to heap_insert the tuple into */
281 if (mtstate->mt_partition_dispatch_info)
284 TupleConversionMap *map;
287 * Away we go ... If we end up not finding a partition after all,
288 * ExecFindPartition() does not return and errors out instead.
289 * Otherwise, the returned value is to be used as an index into arrays
290 * mt_partitions[] and mt_partition_tupconv_maps[] that will get us
291 * the ResultRelInfo and TupleConversionMap for the partition,
294 leaf_part_index = ExecFindPartition(resultRelInfo,
295 mtstate->mt_partition_dispatch_info,
298 Assert(leaf_part_index >= 0 &&
299 leaf_part_index < mtstate->mt_num_partitions);
302 * Save the old ResultRelInfo and switch to the one corresponding to
303 * the selected partition.
305 saved_resultRelInfo = resultRelInfo;
306 resultRelInfo = mtstate->mt_partitions + leaf_part_index;
308 /* We do not yet have a way to insert into a foreign partition */
309 if (resultRelInfo->ri_FdwRoutine)
311 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
312 errmsg("cannot route inserted tuples to a foreign table")));
314 /* For ExecInsertIndexTuples() to work on the partition's indexes */
315 estate->es_result_relation_info = resultRelInfo;
318 * We might need to convert from the parent rowtype to the partition
321 map = mtstate->mt_partition_tupconv_maps[leaf_part_index];
324 Relation partrel = resultRelInfo->ri_RelationDesc;
326 tuple = do_convert_tuple(tuple, map);
329 * We must use the partition's tuple descriptor from this point
330 * on, until we're finished dealing with the partition. Use the
331 * dedicated slot for that.
333 slot = mtstate->mt_partition_tuple_slot;
334 Assert(slot != NULL);
335 ExecSetSlotDescriptor(slot, RelationGetDescr(partrel));
336 ExecStoreTuple(tuple, slot, InvalidBuffer, true);
340 resultRelationDesc = resultRelInfo->ri_RelationDesc;
343 * If the result relation has OIDs, force the tuple's OID to zero so that
344 * heap_insert will assign a fresh OID. Usually the OID already will be
345 * zero at this point, but there are corner cases where the plan tree can
346 * return a tuple extracted literally from some table with the same
349 * XXX if we ever wanted to allow users to assign their own OIDs to new
350 * rows, this'd be the place to do it. For the moment, we make a point of
351 * doing this before calling triggers, so that a user-supplied trigger
352 * could hack the OID if desired.
354 if (resultRelationDesc->rd_rel->relhasoids)
355 HeapTupleSetOid(tuple, InvalidOid);
358 * BEFORE ROW INSERT Triggers.
360 * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
361 * INSERT ... ON CONFLICT statement. We cannot check for constraint
362 * violations before firing these triggers, because they can change the
363 * values to insert. Also, they can run arbitrary user-defined code with
364 * side-effects that we can't cancel by just not inserting the tuple.
366 if (resultRelInfo->ri_TrigDesc &&
367 resultRelInfo->ri_TrigDesc->trig_insert_before_row)
369 slot = ExecBRInsertTriggers(estate, resultRelInfo, slot);
371 if (slot == NULL) /* "do nothing" */
374 /* trigger might have changed tuple */
375 tuple = ExecMaterializeSlot(slot);
378 /* INSTEAD OF ROW INSERT Triggers */
379 if (resultRelInfo->ri_TrigDesc &&
380 resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
382 slot = ExecIRInsertTriggers(estate, resultRelInfo, slot);
384 if (slot == NULL) /* "do nothing" */
387 /* trigger might have changed tuple */
388 tuple = ExecMaterializeSlot(slot);
392 else if (resultRelInfo->ri_FdwRoutine)
395 * insert into foreign table: let the FDW do it
397 slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
402 if (slot == NULL) /* "do nothing" */
405 /* FDW might have changed tuple */
406 tuple = ExecMaterializeSlot(slot);
409 * AFTER ROW Triggers or RETURNING expressions might reference the
410 * tableoid column, so initialize t_tableOid before evaluating them.
412 tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
419 * Constraints might reference the tableoid column, so initialize
420 * t_tableOid before evaluating them.
422 tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
425 * Check any RLS INSERT WITH CHECK policies
427 * ExecWithCheckOptions() will skip any WCOs which are not of the kind
428 * we are looking for at this point.
430 if (resultRelInfo->ri_WithCheckOptions != NIL)
431 ExecWithCheckOptions(WCO_RLS_INSERT_CHECK,
432 resultRelInfo, slot, estate);
435 * Check the constraints of the tuple
437 if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck)
438 ExecConstraints(resultRelInfo, slot, oldslot, estate);
440 if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
442 /* Perform a speculative insertion. */
444 ItemPointerData conflictTid;
448 * Do a non-conclusive check for conflicts first.
450 * We're not holding any locks yet, so this doesn't guarantee that
451 * the later insert won't conflict. But it avoids leaving behind
452 * a lot of canceled speculative insertions, if you run a lot of
453 * INSERT ON CONFLICT statements that do conflict.
455 * We loop back here if we find a conflict below, either during
456 * the pre-check, or when we re-check after inserting the tuple
460 specConflict = false;
461 if (!ExecCheckIndexConstraints(slot, estate, &conflictTid,
464 /* committed conflict tuple found */
465 if (onconflict == ONCONFLICT_UPDATE)
468 * In case of ON CONFLICT DO UPDATE, execute the UPDATE
469 * part. Be prepared to retry if the UPDATE fails because
470 * of another concurrent UPDATE/DELETE to the conflict
473 TupleTableSlot *returning = NULL;
475 if (ExecOnConflictUpdate(mtstate, resultRelInfo,
476 &conflictTid, planSlot, slot,
477 estate, canSetTag, &returning))
479 InstrCountFiltered2(&mtstate->ps, 1);
488 * In case of ON CONFLICT DO NOTHING, do nothing. However,
489 * verify that the tuple is visible to the executor's MVCC
490 * snapshot at higher isolation levels.
492 Assert(onconflict == ONCONFLICT_NOTHING);
493 ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid);
494 InstrCountFiltered2(&mtstate->ps, 1);
500 * Before we start insertion proper, acquire our "speculative
501 * insertion lock". Others can use that to wait for us to decide
502 * if we're going to go ahead with the insertion, instead of
503 * waiting for the whole transaction to complete.
505 specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
506 HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
508 /* insert the tuple, with the speculative token */
509 newId = heap_insert(resultRelationDesc, tuple,
510 estate->es_output_cid,
511 HEAP_INSERT_SPECULATIVE,
514 /* insert index entries for tuple */
515 recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
516 estate, true, &specConflict,
519 /* adjust the tuple's state accordingly */
521 heap_finish_speculative(resultRelationDesc, tuple);
523 heap_abort_speculative(resultRelationDesc, tuple);
526 * Wake up anyone waiting for our decision. They will re-check
527 * the tuple, see that it's no longer speculative, and wait on our
528 * XID as if this was a regularly inserted tuple all along. Or if
529 * we killed the tuple, they will see it's dead, and proceed as if
530 * the tuple never existed.
532 SpeculativeInsertionLockRelease(GetCurrentTransactionId());
535 * If there was a conflict, start from the beginning. We'll do
536 * the pre-check again, which will now find the conflicting tuple
537 * (unless it aborts before we get there).
541 list_free(recheckIndexes);
545 /* Since there was no insertion conflict, we're done */
550 * insert the tuple normally.
552 * Note: heap_insert returns the tid (location) of the new tuple
553 * in the t_self field.
555 newId = heap_insert(resultRelationDesc, tuple,
556 estate->es_output_cid,
559 /* insert index entries for tuple */
560 if (resultRelInfo->ri_NumIndices > 0)
561 recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
569 (estate->es_processed)++;
570 estate->es_lastoid = newId;
571 setLastTid(&(tuple->t_self));
574 /* AFTER ROW INSERT Triggers */
575 ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes);
577 list_free(recheckIndexes);
580 * Check any WITH CHECK OPTION constraints from parent views. We are
581 * required to do this after testing all constraints and uniqueness
582 * violations per the SQL spec, so we do it after actually inserting the
583 * record into the heap and all indexes.
585 * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
586 * tuple will never be seen, if it violates the WITH CHECK OPTION.
588 * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
589 * are looking for at this point.
591 if (resultRelInfo->ri_WithCheckOptions != NIL)
592 ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
594 /* Process RETURNING if present */
595 if (resultRelInfo->ri_projectReturning)
596 result = ExecProcessReturning(resultRelInfo, slot, planSlot);
598 if (saved_resultRelInfo)
599 estate->es_result_relation_info = saved_resultRelInfo;
604 /* ----------------------------------------------------------------
607 * DELETE is like UPDATE, except that we delete the tuple and no
608 * index modifications are needed.
610 * When deleting from a table, tupleid identifies the tuple to
611 * delete and oldtuple is NULL. When deleting from a view,
612 * oldtuple is passed to the INSTEAD OF triggers and identifies
613 * what to delete, and tupleid is invalid. When deleting from a
614 * foreign table, tupleid is invalid; the FDW has to figure out
615 * which row to delete using data from the planSlot. oldtuple is
616 * passed to foreign table triggers; it is NULL when the foreign
617 * table has no relevant triggers.
619 * Returns RETURNING result if any, otherwise NULL.
620 * ----------------------------------------------------------------
622 static TupleTableSlot *
623 ExecDelete(ItemPointer tupleid,
625 TupleTableSlot *planSlot,
630 ResultRelInfo *resultRelInfo;
631 Relation resultRelationDesc;
633 HeapUpdateFailureData hufd;
634 TupleTableSlot *slot = NULL;
637 * get information on the (current) result relation
639 resultRelInfo = estate->es_result_relation_info;
640 resultRelationDesc = resultRelInfo->ri_RelationDesc;
642 /* BEFORE ROW DELETE Triggers */
643 if (resultRelInfo->ri_TrigDesc &&
644 resultRelInfo->ri_TrigDesc->trig_delete_before_row)
648 dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
651 if (!dodelete) /* "do nothing" */
655 /* INSTEAD OF ROW DELETE Triggers */
656 if (resultRelInfo->ri_TrigDesc &&
657 resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
661 Assert(oldtuple != NULL);
662 dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
664 if (!dodelete) /* "do nothing" */
667 else if (resultRelInfo->ri_FdwRoutine)
672 * delete from foreign table: let the FDW do it
674 * We offer the trigger tuple slot as a place to store RETURNING data,
675 * although the FDW can return some other slot if it wants. Set up
676 * the slot's tupdesc so the FDW doesn't need to do that for itself.
678 slot = estate->es_trig_tuple_slot;
679 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
680 ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
682 slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
687 if (slot == NULL) /* "do nothing" */
691 * RETURNING expressions might reference the tableoid column, so
692 * initialize t_tableOid before evaluating them.
694 if (slot->tts_isempty)
695 ExecStoreAllNullTuple(slot);
696 tuple = ExecMaterializeSlot(slot);
697 tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
704 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
705 * that the row to be deleted is visible to that snapshot, and throw a
706 * can't-serialize error if not. This is a special-case behavior
707 * needed for referential integrity updates in transaction-snapshot
711 result = heap_delete(resultRelationDesc, tupleid,
712 estate->es_output_cid,
713 estate->es_crosscheck_snapshot,
714 true /* wait for commit */ ,
718 case HeapTupleSelfUpdated:
721 * The target tuple was already updated or deleted by the
722 * current command, or by a later command in the current
723 * transaction. The former case is possible in a join DELETE
724 * where multiple tuples join to the same target tuple. This
725 * is somewhat questionable, but Postgres has always allowed
726 * it: we just ignore additional deletion attempts.
728 * The latter case arises if the tuple is modified by a
729 * command in a BEFORE trigger, or perhaps by a command in a
730 * volatile function used in the query. In such situations we
731 * should not ignore the deletion, but it is equally unsafe to
732 * proceed. We don't want to discard the original DELETE
733 * while keeping the triggered actions based on its deletion;
734 * and it would be no better to allow the original DELETE
735 * while discarding updates that it triggered. The row update
736 * carries some information that might be important according
737 * to business rules; so throwing an error is the only safe
740 * If a trigger actually intends this type of interaction, it
741 * can re-execute the DELETE and then return NULL to cancel
744 if (hufd.cmax != estate->es_output_cid)
746 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
747 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
748 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
750 /* Else, already deleted by self; nothing to do */
753 case HeapTupleMayBeUpdated:
756 case HeapTupleUpdated:
757 if (IsolationUsesXactSnapshot())
759 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
760 errmsg("could not serialize access due to concurrent update")));
761 if (!ItemPointerEquals(tupleid, &hufd.ctid))
763 TupleTableSlot *epqslot;
765 epqslot = EvalPlanQual(estate,
768 resultRelInfo->ri_RangeTableIndex,
772 if (!TupIsNull(epqslot))
774 *tupleid = hufd.ctid;
778 /* tuple already deleted; nothing to do */
782 elog(ERROR, "unrecognized heap_delete status: %u", result);
787 * Note: Normally one would think that we have to delete index tuples
788 * associated with the heap tuple now...
790 * ... but in POSTGRES, we have no need to do this because VACUUM will
791 * take care of it later. We can't delete index tuples immediately
792 * anyway, since the tuple is still visible to other transactions.
797 (estate->es_processed)++;
799 /* AFTER ROW DELETE Triggers */
800 ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple);
802 /* Process RETURNING if present */
803 if (resultRelInfo->ri_projectReturning)
806 * We have to put the target tuple into a slot, which means first we
807 * gotta fetch it. We can use the trigger tuple slot.
809 TupleTableSlot *rslot;
810 HeapTupleData deltuple;
813 if (resultRelInfo->ri_FdwRoutine)
815 /* FDW must have provided a slot containing the deleted row */
816 Assert(!TupIsNull(slot));
817 delbuffer = InvalidBuffer;
821 slot = estate->es_trig_tuple_slot;
822 if (oldtuple != NULL)
824 deltuple = *oldtuple;
825 delbuffer = InvalidBuffer;
829 deltuple.t_self = *tupleid;
830 if (!heap_fetch(resultRelationDesc, SnapshotAny,
831 &deltuple, &delbuffer, false, NULL))
832 elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
835 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
836 ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
837 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
840 rslot = ExecProcessReturning(resultRelInfo, slot, planSlot);
843 * Before releasing the target tuple again, make sure rslot has a
844 * local copy of any pass-by-reference values.
846 ExecMaterializeSlot(rslot);
848 ExecClearTuple(slot);
849 if (BufferIsValid(delbuffer))
850 ReleaseBuffer(delbuffer);
858 /* ----------------------------------------------------------------
861 * note: we can't run UPDATE queries with transactions
862 * off because UPDATEs are actually INSERTs and our
863 * scan will mistakenly loop forever, updating the tuple
864 * it just inserted.. This should be fixed but until it
865 * is, we don't want to get stuck in an infinite loop
866 * which corrupts your database..
868 * When updating a table, tupleid identifies the tuple to
869 * update and oldtuple is NULL. When updating a view, oldtuple
870 * is passed to the INSTEAD OF triggers and identifies what to
871 * update, and tupleid is invalid. When updating a foreign table,
872 * tupleid is invalid; the FDW has to figure out which row to
873 * update using data from the planSlot. oldtuple is passed to
874 * foreign table triggers; it is NULL when the foreign table has
875 * no relevant triggers.
877 * Returns RETURNING result if any, otherwise NULL.
878 * ----------------------------------------------------------------
880 static TupleTableSlot *
881 ExecUpdate(ItemPointer tupleid,
883 TupleTableSlot *slot,
884 TupleTableSlot *planSlot,
890 ResultRelInfo *resultRelInfo;
891 Relation resultRelationDesc;
893 HeapUpdateFailureData hufd;
894 List *recheckIndexes = NIL;
897 * abort the operation if not running transactions
899 if (IsBootstrapProcessingMode())
900 elog(ERROR, "cannot UPDATE during bootstrap");
903 * get the heap tuple out of the tuple table slot, making sure we have a
906 tuple = ExecMaterializeSlot(slot);
909 * get information on the (current) result relation
911 resultRelInfo = estate->es_result_relation_info;
912 resultRelationDesc = resultRelInfo->ri_RelationDesc;
914 /* BEFORE ROW UPDATE Triggers */
915 if (resultRelInfo->ri_TrigDesc &&
916 resultRelInfo->ri_TrigDesc->trig_update_before_row)
918 slot = ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
919 tupleid, oldtuple, slot);
921 if (slot == NULL) /* "do nothing" */
924 /* trigger might have changed tuple */
925 tuple = ExecMaterializeSlot(slot);
928 /* INSTEAD OF ROW UPDATE Triggers */
929 if (resultRelInfo->ri_TrigDesc &&
930 resultRelInfo->ri_TrigDesc->trig_update_instead_row)
932 slot = ExecIRUpdateTriggers(estate, resultRelInfo,
935 if (slot == NULL) /* "do nothing" */
938 /* trigger might have changed tuple */
939 tuple = ExecMaterializeSlot(slot);
941 else if (resultRelInfo->ri_FdwRoutine)
944 * update in foreign table: let the FDW do it
946 slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
951 if (slot == NULL) /* "do nothing" */
954 /* FDW might have changed tuple */
955 tuple = ExecMaterializeSlot(slot);
958 * AFTER ROW Triggers or RETURNING expressions might reference the
959 * tableoid column, so initialize t_tableOid before evaluating them.
961 tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
965 LockTupleMode lockmode;
968 * Constraints might reference the tableoid column, so initialize
969 * t_tableOid before evaluating them.
971 tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
974 * Check any RLS UPDATE WITH CHECK policies
976 * If we generate a new candidate tuple after EvalPlanQual testing, we
977 * must loop back here and recheck any RLS policies and constraints.
978 * (We don't need to redo triggers, however. If there are any BEFORE
979 * triggers then trigger.c will have done heap_lock_tuple to lock the
980 * correct tuple, so there's no need to do them again.)
982 * ExecWithCheckOptions() will skip any WCOs which are not of the kind
983 * we are looking for at this point.
986 if (resultRelInfo->ri_WithCheckOptions != NIL)
987 ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
988 resultRelInfo, slot, estate);
991 * Check the constraints of the tuple. Note that we pass the same
992 * slot for the orig_slot argument, because unlike ExecInsert(), no
993 * tuple-routing is performed here, hence the slot remains unchanged.
995 if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck)
996 ExecConstraints(resultRelInfo, slot, slot, estate);
999 * replace the heap tuple
1001 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
1002 * that the row to be updated is visible to that snapshot, and throw a
1003 * can't-serialize error if not. This is a special-case behavior
1004 * needed for referential integrity updates in transaction-snapshot
1005 * mode transactions.
1007 result = heap_update(resultRelationDesc, tupleid, tuple,
1008 estate->es_output_cid,
1009 estate->es_crosscheck_snapshot,
1010 true /* wait for commit */ ,
1014 case HeapTupleSelfUpdated:
1017 * The target tuple was already updated or deleted by the
1018 * current command, or by a later command in the current
1019 * transaction. The former case is possible in a join UPDATE
1020 * where multiple tuples join to the same target tuple. This
1021 * is pretty questionable, but Postgres has always allowed it:
1022 * we just execute the first update action and ignore
1023 * additional update attempts.
1025 * The latter case arises if the tuple is modified by a
1026 * command in a BEFORE trigger, or perhaps by a command in a
1027 * volatile function used in the query. In such situations we
1028 * should not ignore the update, but it is equally unsafe to
1029 * proceed. We don't want to discard the original UPDATE
1030 * while keeping the triggered actions based on it; and we
1031 * have no principled way to merge this update with the
1032 * previous ones. So throwing an error is the only safe
1035 * If a trigger actually intends this type of interaction, it
1036 * can re-execute the UPDATE (assuming it can figure out how)
1037 * and then return NULL to cancel the outer update.
1039 if (hufd.cmax != estate->es_output_cid)
1041 (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1042 errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
1043 errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1045 /* Else, already updated by self; nothing to do */
1048 case HeapTupleMayBeUpdated:
1051 case HeapTupleUpdated:
1052 if (IsolationUsesXactSnapshot())
1054 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1055 errmsg("could not serialize access due to concurrent update")));
1056 if (!ItemPointerEquals(tupleid, &hufd.ctid))
1058 TupleTableSlot *epqslot;
1060 epqslot = EvalPlanQual(estate,
1063 resultRelInfo->ri_RangeTableIndex,
1067 if (!TupIsNull(epqslot))
1069 *tupleid = hufd.ctid;
1070 slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
1071 tuple = ExecMaterializeSlot(slot);
1075 /* tuple already deleted; nothing to do */
1079 elog(ERROR, "unrecognized heap_update status: %u", result);
1084 * Note: instead of having to update the old index tuples associated
1085 * with the heap tuple, all we do is form and insert new index tuples.
1086 * This is because UPDATEs are actually DELETEs and INSERTs, and index
1087 * tuple deletion is done later by VACUUM (see notes in ExecDelete).
1088 * All we do here is insert new index tuples. -cim 9/27/89
1092 * insert index entries for tuple
1094 * Note: heap_update returns the tid (location) of the new tuple in
1097 * If it's a HOT update, we mustn't insert new index entries.
1099 if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
1100 recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
1101 estate, false, NULL, NIL);
1105 (estate->es_processed)++;
1107 /* AFTER ROW UPDATE Triggers */
1108 ExecARUpdateTriggers(estate, resultRelInfo, tupleid, oldtuple, tuple,
1111 list_free(recheckIndexes);
1114 * Check any WITH CHECK OPTION constraints from parent views. We are
1115 * required to do this after testing all constraints and uniqueness
1116 * violations per the SQL spec, so we do it after actually updating the
1117 * record in the heap and all indexes.
1119 * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1120 * are looking for at this point.
1122 if (resultRelInfo->ri_WithCheckOptions != NIL)
1123 ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1125 /* Process RETURNING if present */
1126 if (resultRelInfo->ri_projectReturning)
1127 return ExecProcessReturning(resultRelInfo, slot, planSlot);
1133 * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
1135 * Try to lock tuple for update as part of speculative insertion. If
1136 * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
1137 * (but still lock row, even though it may not satisfy estate's
1140 * Returns true if if we're done (with or without an update), or false if
1141 * the caller must retry the INSERT from scratch.
1144 ExecOnConflictUpdate(ModifyTableState *mtstate,
1145 ResultRelInfo *resultRelInfo,
1146 ItemPointer conflictTid,
1147 TupleTableSlot *planSlot,
1148 TupleTableSlot *excludedSlot,
1151 TupleTableSlot **returning)
1153 ExprContext *econtext = mtstate->ps.ps_ExprContext;
1154 Relation relation = resultRelInfo->ri_RelationDesc;
1155 List *onConflictSetWhere = resultRelInfo->ri_onConflictSetWhere;
1156 HeapTupleData tuple;
1157 HeapUpdateFailureData hufd;
1158 LockTupleMode lockmode;
1162 /* Determine lock mode to use */
1163 lockmode = ExecUpdateLockMode(estate, resultRelInfo);
1166 * Lock tuple for update. Don't follow updates when tuple cannot be
1167 * locked without doing so. A row locking conflict here means our
1168 * previous conclusion that the tuple is conclusively committed is not
1171 tuple.t_self = *conflictTid;
1172 test = heap_lock_tuple(relation, &tuple, estate->es_output_cid,
1173 lockmode, LockWaitBlock, false, &buffer,
1177 case HeapTupleMayBeUpdated:
1181 case HeapTupleInvisible:
1184 * This can occur when a just inserted tuple is updated again in
1185 * the same command. E.g. because multiple rows with the same
1186 * conflicting key values are inserted.
1188 * This is somewhat similar to the ExecUpdate()
1189 * HeapTupleSelfUpdated case. We do not want to proceed because
1190 * it would lead to the same row being updated a second time in
1191 * some unspecified order, and in contrast to plain UPDATEs
1192 * there's no historical behavior to break.
1194 * It is the user's responsibility to prevent this situation from
1195 * occurring. These problems are why SQL-2003 similarly specifies
1196 * that for SQL MERGE, an exception must be raised in the event of
1197 * an attempt to update the same row twice.
1199 if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple.t_data)))
1201 (errcode(ERRCODE_CARDINALITY_VIOLATION),
1202 errmsg("ON CONFLICT DO UPDATE command cannot affect row a second time"),
1203 errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
1205 /* This shouldn't happen */
1206 elog(ERROR, "attempted to lock invisible tuple");
1208 case HeapTupleSelfUpdated:
1211 * This state should never be reached. As a dirty snapshot is used
1212 * to find conflicting tuples, speculative insertion wouldn't have
1213 * seen this row to conflict with.
1215 elog(ERROR, "unexpected self-updated tuple");
1217 case HeapTupleUpdated:
1218 if (IsolationUsesXactSnapshot())
1220 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1221 errmsg("could not serialize access due to concurrent update")));
1224 * Tell caller to try again from the very start.
1226 * It does not make sense to use the usual EvalPlanQual() style
1227 * loop here, as the new version of the row might not conflict
1228 * anymore, or the conflicting tuple has actually been deleted.
1230 ReleaseBuffer(buffer);
1234 elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
1238 * Success, the tuple is locked.
1240 * Reset per-tuple memory context to free any expression evaluation
1241 * storage allocated in the previous cycle.
1243 ResetExprContext(econtext);
1246 * Verify that the tuple is visible to our MVCC snapshot if the current
1247 * isolation level mandates that.
1249 * It's not sufficient to rely on the check within ExecUpdate() as e.g.
1250 * CONFLICT ... WHERE clause may prevent us from reaching that.
1252 * This means we only ever continue when a new command in the current
1253 * transaction could see the row, even though in READ COMMITTED mode the
1254 * tuple will not be visible according to the current statement's
1255 * snapshot. This is in line with the way UPDATE deals with newer tuple
1258 ExecCheckHeapTupleVisible(estate, &tuple, buffer);
1260 /* Store target's existing tuple in the state's dedicated slot */
1261 ExecStoreTuple(&tuple, mtstate->mt_existing, buffer, false);
1264 * Make tuple and any needed join variables available to ExecQual and
1265 * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
1266 * the target's existing tuple is installed in the scantuple. EXCLUDED
1267 * has been made to reference INNER_VAR in setrefs.c, but there is no
1268 * other redirection.
1270 econtext->ecxt_scantuple = mtstate->mt_existing;
1271 econtext->ecxt_innertuple = excludedSlot;
1272 econtext->ecxt_outertuple = NULL;
1274 if (!ExecQual(onConflictSetWhere, econtext, false))
1276 ReleaseBuffer(buffer);
1277 InstrCountFiltered1(&mtstate->ps, 1);
1278 return true; /* done with the tuple */
1281 if (resultRelInfo->ri_WithCheckOptions != NIL)
1284 * Check target's existing tuple against UPDATE-applicable USING
1285 * security barrier quals (if any), enforced here as RLS checks/WCOs.
1287 * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
1288 * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
1289 * but that's almost the extent of its special handling for ON
1290 * CONFLICT DO UPDATE.
1292 * The rewriter will also have associated UPDATE applicable straight
1293 * RLS checks/WCOs for the benefit of the ExecUpdate() call that
1294 * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
1295 * kinds, so there is no danger of spurious over-enforcement in the
1296 * INSERT or UPDATE path.
1298 ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
1299 mtstate->mt_existing,
1303 /* Project the new tuple version */
1304 ExecProject(resultRelInfo->ri_onConflictSetProj);
1307 * Note that it is possible that the target tuple has been modified in
1308 * this session, after the above heap_lock_tuple. We choose to not error
1309 * out in that case, in line with ExecUpdate's treatment of similar cases.
1310 * This can happen if an UPDATE is triggered from within ExecQual(),
1311 * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
1312 * wCTE in the ON CONFLICT's SET.
1315 /* Execute UPDATE with projection */
1316 *returning = ExecUpdate(&tuple.t_self, NULL,
1317 mtstate->mt_conflproj, planSlot,
1318 &mtstate->mt_epqstate, mtstate->ps.state,
1321 ReleaseBuffer(buffer);
1327 * Process BEFORE EACH STATEMENT triggers
1330 fireBSTriggers(ModifyTableState *node)
1332 switch (node->operation)
1335 ExecBSInsertTriggers(node->ps.state, node->resultRelInfo);
1336 if (node->mt_onconflict == ONCONFLICT_UPDATE)
1337 ExecBSUpdateTriggers(node->ps.state,
1338 node->resultRelInfo);
1341 ExecBSUpdateTriggers(node->ps.state, node->resultRelInfo);
1344 ExecBSDeleteTriggers(node->ps.state, node->resultRelInfo);
1347 elog(ERROR, "unknown operation");
1353 * Process AFTER EACH STATEMENT triggers
1356 fireASTriggers(ModifyTableState *node)
1358 switch (node->operation)
1361 if (node->mt_onconflict == ONCONFLICT_UPDATE)
1362 ExecASUpdateTriggers(node->ps.state,
1363 node->resultRelInfo);
1364 ExecASInsertTriggers(node->ps.state, node->resultRelInfo);
1367 ExecASUpdateTriggers(node->ps.state, node->resultRelInfo);
1370 ExecASDeleteTriggers(node->ps.state, node->resultRelInfo);
1373 elog(ERROR, "unknown operation");
1379 /* ----------------------------------------------------------------
1382 * Perform table modifications as required, and return RETURNING results
1384 * ----------------------------------------------------------------
1387 ExecModifyTable(ModifyTableState *node)
1389 EState *estate = node->ps.state;
1390 CmdType operation = node->operation;
1391 ResultRelInfo *saved_resultRelInfo;
1392 ResultRelInfo *resultRelInfo;
1393 PlanState *subplanstate;
1394 JunkFilter *junkfilter;
1395 TupleTableSlot *slot;
1396 TupleTableSlot *planSlot;
1397 ItemPointer tupleid = NULL;
1398 ItemPointerData tuple_ctid;
1399 HeapTupleData oldtupdata;
1403 * This should NOT get called during EvalPlanQual; we should have passed a
1404 * subplan tree to EvalPlanQual, instead. Use a runtime test not just
1405 * Assert because this condition is easy to miss in testing. (Note:
1406 * although ModifyTable should not get executed within an EvalPlanQual
1407 * operation, we do have to allow it to be initialized and shut down in
1408 * case it is within a CTE subplan. Hence this test must be here, not in
1409 * ExecInitModifyTable.)
1411 if (estate->es_epqTuple != NULL)
1412 elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
1415 * If we've already completed processing, don't try to do more. We need
1416 * this test because ExecPostprocessPlan might call us an extra time, and
1417 * our subplan's nodes aren't necessarily robust against being called
1424 * On first call, fire BEFORE STATEMENT triggers before proceeding.
1426 if (node->fireBSTriggers)
1428 fireBSTriggers(node);
1429 node->fireBSTriggers = false;
1432 /* Preload local variables */
1433 resultRelInfo = node->resultRelInfo + node->mt_whichplan;
1434 subplanstate = node->mt_plans[node->mt_whichplan];
1435 junkfilter = resultRelInfo->ri_junkFilter;
1438 * es_result_relation_info must point to the currently active result
1439 * relation while we are within this ModifyTable node. Even though
1440 * ModifyTable nodes can't be nested statically, they can be nested
1441 * dynamically (since our subplan could include a reference to a modifying
1442 * CTE). So we have to save and restore the caller's value.
1444 saved_resultRelInfo = estate->es_result_relation_info;
1446 estate->es_result_relation_info = resultRelInfo;
1449 * Fetch rows from subplan(s), and execute the required table modification
1455 * Reset the per-output-tuple exprcontext. This is needed because
1456 * triggers expect to use that context as workspace. It's a bit ugly
1457 * to do this below the top level of the plan, however. We might need
1458 * to rethink this later.
1460 ResetPerTupleExprContext(estate);
1462 planSlot = ExecProcNode(subplanstate);
1464 if (TupIsNull(planSlot))
1466 /* advance to next subplan if any */
1467 node->mt_whichplan++;
1468 if (node->mt_whichplan < node->mt_nplans)
1471 subplanstate = node->mt_plans[node->mt_whichplan];
1472 junkfilter = resultRelInfo->ri_junkFilter;
1473 estate->es_result_relation_info = resultRelInfo;
1474 EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan,
1475 node->mt_arowmarks[node->mt_whichplan]);
1483 * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
1484 * here is compute the RETURNING expressions.
1486 if (resultRelInfo->ri_usesFdwDirectModify)
1488 Assert(resultRelInfo->ri_projectReturning);
1491 * A scan slot containing the data that was actually inserted,
1492 * updated or deleted has already been made available to
1493 * ExecProcessReturning by IterateDirectModify, so no need to
1496 slot = ExecProcessReturning(resultRelInfo, NULL, planSlot);
1498 estate->es_result_relation_info = saved_resultRelInfo;
1502 EvalPlanQualSetSlot(&node->mt_epqstate, planSlot);
1506 if (junkfilter != NULL)
1509 * extract the 'ctid' or 'wholerow' junk attribute.
1511 if (operation == CMD_UPDATE || operation == CMD_DELETE)
1517 relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
1518 if (relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW)
1520 datum = ExecGetJunkAttribute(slot,
1521 junkfilter->jf_junkAttNo,
1523 /* shouldn't ever get a null result... */
1525 elog(ERROR, "ctid is NULL");
1527 tupleid = (ItemPointer) DatumGetPointer(datum);
1528 tuple_ctid = *tupleid; /* be sure we don't free
1530 tupleid = &tuple_ctid;
1534 * Use the wholerow attribute, when available, to reconstruct
1535 * the old relation tuple.
1537 * Foreign table updates have a wholerow attribute when the
1538 * relation has an AFTER ROW trigger. Note that the wholerow
1539 * attribute does not carry system columns. Foreign table
1540 * triggers miss seeing those, except that we know enough here
1541 * to set t_tableOid. Quite separately from this, the FDW may
1542 * fetch its own junk attrs to identify the row.
1544 * Other relevant relkinds, currently limited to views, always
1545 * have a wholerow attribute.
1547 else if (AttributeNumberIsValid(junkfilter->jf_junkAttNo))
1549 datum = ExecGetJunkAttribute(slot,
1550 junkfilter->jf_junkAttNo,
1552 /* shouldn't ever get a null result... */
1554 elog(ERROR, "wholerow is NULL");
1556 oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
1558 HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
1559 ItemPointerSetInvalid(&(oldtupdata.t_self));
1560 /* Historically, view triggers see invalid t_tableOid. */
1561 oldtupdata.t_tableOid =
1562 (relkind == RELKIND_VIEW) ? InvalidOid :
1563 RelationGetRelid(resultRelInfo->ri_RelationDesc);
1565 oldtuple = &oldtupdata;
1568 Assert(relkind == RELKIND_FOREIGN_TABLE);
1572 * apply the junkfilter if needed.
1574 if (operation != CMD_DELETE)
1575 slot = ExecFilterJunk(junkfilter, slot);
1581 slot = ExecInsert(node, slot, planSlot,
1582 node->mt_arbiterindexes, node->mt_onconflict,
1583 estate, node->canSetTag);
1586 slot = ExecUpdate(tupleid, oldtuple, slot, planSlot,
1587 &node->mt_epqstate, estate, node->canSetTag);
1590 slot = ExecDelete(tupleid, oldtuple, planSlot,
1591 &node->mt_epqstate, estate, node->canSetTag);
1594 elog(ERROR, "unknown operation");
1599 * If we got a RETURNING result, return it to caller. We'll continue
1600 * the work on next call.
1604 estate->es_result_relation_info = saved_resultRelInfo;
1609 /* Restore es_result_relation_info before exiting */
1610 estate->es_result_relation_info = saved_resultRelInfo;
1613 * We're done, but fire AFTER STATEMENT triggers before exiting.
1615 fireASTriggers(node);
1617 node->mt_done = true;
1622 /* ----------------------------------------------------------------
1623 * ExecInitModifyTable
1624 * ----------------------------------------------------------------
1627 ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
1629 ModifyTableState *mtstate;
1630 CmdType operation = node->operation;
1631 int nplans = list_length(node->plans);
1632 ResultRelInfo *saved_resultRelInfo;
1633 ResultRelInfo *resultRelInfo;
1640 /* check for unsupported flags */
1641 Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
1644 * create state structure
1646 mtstate = makeNode(ModifyTableState);
1647 mtstate->ps.plan = (Plan *) node;
1648 mtstate->ps.state = estate;
1649 mtstate->ps.targetlist = NIL; /* not actually used */
1651 mtstate->operation = operation;
1652 mtstate->canSetTag = node->canSetTag;
1653 mtstate->mt_done = false;
1655 mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans);
1656 mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex;
1657 mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
1658 mtstate->mt_nplans = nplans;
1659 mtstate->mt_onconflict = node->onConflictAction;
1660 mtstate->mt_arbiterindexes = node->arbiterIndexes;
1662 /* set up epqstate with dummy subplan data for the moment */
1663 EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam);
1664 mtstate->fireBSTriggers = true;
1667 * call ExecInitNode on each of the plans to be executed and save the
1668 * results into the array "mt_plans". This is also a convenient place to
1669 * verify that the proposed target relations are valid and open their
1670 * indexes for insertion of new index entries. Note we *must* set
1671 * estate->es_result_relation_info correctly while we initialize each
1672 * sub-plan; ExecContextForcesOids depends on that!
1674 saved_resultRelInfo = estate->es_result_relation_info;
1676 resultRelInfo = mtstate->resultRelInfo;
1678 foreach(l, node->plans)
1680 subplan = (Plan *) lfirst(l);
1682 /* Initialize the usesFdwDirectModify flag */
1683 resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
1684 node->fdwDirectModifyPlans);
1687 * Verify result relation is a valid target for the current operation
1689 CheckValidResultRel(resultRelInfo->ri_RelationDesc, operation);
1692 * If there are indices on the result relation, open them and save
1693 * descriptors in the result relation info, so that we can add new
1694 * index entries for the tuples we add/update. We need not do this
1695 * for a DELETE, however, since deletion doesn't affect indexes. Also,
1696 * inside an EvalPlanQual operation, the indexes might be open
1697 * already, since we share the resultrel state with the original
1700 if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex &&
1701 operation != CMD_DELETE &&
1702 resultRelInfo->ri_IndexRelationDescs == NULL)
1703 ExecOpenIndices(resultRelInfo, mtstate->mt_onconflict != ONCONFLICT_NONE);
1705 /* Now init the plan for this result rel */
1706 estate->es_result_relation_info = resultRelInfo;
1707 mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags);
1709 /* Also let FDWs init themselves for foreign-table result rels */
1710 if (!resultRelInfo->ri_usesFdwDirectModify &&
1711 resultRelInfo->ri_FdwRoutine != NULL &&
1712 resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
1714 List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
1716 resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
1727 estate->es_result_relation_info = saved_resultRelInfo;
1729 /* The root table RT index is at the head of the partitioned_rels list */
1730 if (node->partitioned_rels)
1735 root_rti = linitial_int(node->partitioned_rels);
1736 root_oid = getrelid(root_rti, estate->es_range_table);
1737 rel = heap_open(root_oid, NoLock); /* locked by InitPlan */
1740 rel = mtstate->resultRelInfo->ri_RelationDesc;
1742 /* Build state for INSERT tuple routing */
1743 if (operation == CMD_INSERT &&
1744 rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1746 PartitionDispatch *partition_dispatch_info;
1747 ResultRelInfo *partitions;
1748 TupleConversionMap **partition_tupconv_maps;
1749 TupleTableSlot *partition_tuple_slot;
1753 ExecSetupPartitionTupleRouting(rel,
1754 &partition_dispatch_info,
1756 &partition_tupconv_maps,
1757 &partition_tuple_slot,
1758 &num_parted, &num_partitions);
1759 mtstate->mt_partition_dispatch_info = partition_dispatch_info;
1760 mtstate->mt_num_dispatch = num_parted;
1761 mtstate->mt_partitions = partitions;
1762 mtstate->mt_num_partitions = num_partitions;
1763 mtstate->mt_partition_tupconv_maps = partition_tupconv_maps;
1764 mtstate->mt_partition_tuple_slot = partition_tuple_slot;
1768 * Initialize any WITH CHECK OPTION constraints if needed.
1770 resultRelInfo = mtstate->resultRelInfo;
1772 foreach(l, node->withCheckOptionLists)
1774 List *wcoList = (List *) lfirst(l);
1775 List *wcoExprs = NIL;
1778 foreach(ll, wcoList)
1780 WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
1781 ExprState *wcoExpr = ExecInitExpr((Expr *) wco->qual,
1782 mtstate->mt_plans[i]);
1784 wcoExprs = lappend(wcoExprs, wcoExpr);
1787 resultRelInfo->ri_WithCheckOptions = wcoList;
1788 resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
1794 * Build WITH CHECK OPTION constraints for each leaf partition rel.
1795 * Note that we didn't build the withCheckOptionList for each partition
1796 * within the planner, but simple translation of the varattnos for each
1797 * partition will suffice. This only occurs for the INSERT case;
1798 * UPDATE/DELETE cases are handled above.
1800 if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
1804 Assert(operation == CMD_INSERT);
1805 resultRelInfo = mtstate->mt_partitions;
1806 wcoList = linitial(node->withCheckOptionLists);
1807 for (i = 0; i < mtstate->mt_num_partitions; i++)
1809 Relation partrel = resultRelInfo->ri_RelationDesc;
1810 List *mapped_wcoList;
1811 List *wcoExprs = NIL;
1814 /* varno = node->nominalRelation */
1815 mapped_wcoList = map_partition_varattnos(wcoList,
1816 node->nominalRelation,
1818 foreach(ll, mapped_wcoList)
1820 WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
1821 ExprState *wcoExpr = ExecInitExpr((Expr *) wco->qual,
1822 mtstate->mt_plans[i]);
1824 wcoExprs = lappend(wcoExprs, wcoExpr);
1827 resultRelInfo->ri_WithCheckOptions = mapped_wcoList;
1828 resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
1834 * Initialize RETURNING projections if needed.
1836 if (node->returningLists)
1838 TupleTableSlot *slot;
1839 ExprContext *econtext;
1840 List *returningList;
1843 * Initialize result tuple slot and assign its rowtype using the first
1844 * RETURNING list. We assume the rest will look the same.
1846 tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists),
1849 /* Set up a slot for the output of the RETURNING projection(s) */
1850 ExecInitResultTupleSlot(estate, &mtstate->ps);
1851 ExecAssignResultType(&mtstate->ps, tupDesc);
1852 slot = mtstate->ps.ps_ResultTupleSlot;
1854 /* Need an econtext too */
1855 econtext = CreateExprContext(estate);
1856 mtstate->ps.ps_ExprContext = econtext;
1859 * Build a projection for each result rel.
1861 resultRelInfo = mtstate->resultRelInfo;
1862 foreach(l, node->returningLists)
1864 List *rlist = (List *) lfirst(l);
1867 rliststate = (List *) ExecInitExpr((Expr *) rlist, &mtstate->ps);
1868 resultRelInfo->ri_projectReturning =
1869 ExecBuildProjectionInfo(rliststate, econtext, slot,
1870 resultRelInfo->ri_RelationDesc->rd_att);
1875 * Build a projection for each leaf partition rel. Note that we
1876 * didn't build the returningList for each partition within the
1877 * planner, but simple translation of the varattnos for each partition
1878 * will suffice. This only occurs for the INSERT case; UPDATE/DELETE
1879 * are handled above.
1881 resultRelInfo = mtstate->mt_partitions;
1882 returningList = linitial(node->returningLists);
1883 for (i = 0; i < mtstate->mt_num_partitions; i++)
1885 Relation partrel = resultRelInfo->ri_RelationDesc;
1889 /* varno = node->nominalRelation */
1890 rlist = map_partition_varattnos(returningList,
1891 node->nominalRelation,
1893 rliststate = (List *) ExecInitExpr((Expr *) rlist, &mtstate->ps);
1894 resultRelInfo->ri_projectReturning =
1895 ExecBuildProjectionInfo(rliststate, econtext, slot,
1896 resultRelInfo->ri_RelationDesc->rd_att);
1903 * We still must construct a dummy result tuple type, because InitPlan
1904 * expects one (maybe should change that?).
1906 tupDesc = ExecTypeFromTL(NIL, false);
1907 ExecInitResultTupleSlot(estate, &mtstate->ps);
1908 ExecAssignResultType(&mtstate->ps, tupDesc);
1910 mtstate->ps.ps_ExprContext = NULL;
1913 /* Close the root partitioned rel if we opened it above. */
1914 if (rel != mtstate->resultRelInfo->ri_RelationDesc)
1915 heap_close(rel, NoLock);
1918 * If needed, Initialize target list, projection and qual for ON CONFLICT
1921 resultRelInfo = mtstate->resultRelInfo;
1922 if (node->onConflictAction == ONCONFLICT_UPDATE)
1924 ExprContext *econtext;
1928 /* insert may only have one plan, inheritance is not expanded */
1929 Assert(nplans == 1);
1931 /* already exists if created by RETURNING processing above */
1932 if (mtstate->ps.ps_ExprContext == NULL)
1933 ExecAssignExprContext(estate, &mtstate->ps);
1935 econtext = mtstate->ps.ps_ExprContext;
1937 /* initialize slot for the existing tuple */
1938 mtstate->mt_existing = ExecInitExtraTupleSlot(mtstate->ps.state);
1939 ExecSetSlotDescriptor(mtstate->mt_existing,
1940 resultRelInfo->ri_RelationDesc->rd_att);
1942 /* carried forward solely for the benefit of explain */
1943 mtstate->mt_excludedtlist = node->exclRelTlist;
1945 /* create target slot for UPDATE SET projection */
1946 tupDesc = ExecTypeFromTL((List *) node->onConflictSet,
1947 resultRelInfo->ri_RelationDesc->rd_rel->relhasoids);
1948 mtstate->mt_conflproj = ExecInitExtraTupleSlot(mtstate->ps.state);
1949 ExecSetSlotDescriptor(mtstate->mt_conflproj, tupDesc);
1951 /* build UPDATE SET expression and projection state */
1952 setexpr = ExecInitExpr((Expr *) node->onConflictSet, &mtstate->ps);
1953 resultRelInfo->ri_onConflictSetProj =
1954 ExecBuildProjectionInfo((List *) setexpr, econtext,
1955 mtstate->mt_conflproj,
1956 resultRelInfo->ri_RelationDesc->rd_att);
1958 /* build DO UPDATE WHERE clause expression */
1959 if (node->onConflictWhere)
1961 ExprState *qualexpr;
1963 qualexpr = ExecInitExpr((Expr *) node->onConflictWhere,
1966 resultRelInfo->ri_onConflictSetWhere = (List *) qualexpr;
1971 * If we have any secondary relations in an UPDATE or DELETE, they need to
1972 * be treated like non-locked relations in SELECT FOR UPDATE, ie, the
1973 * EvalPlanQual mechanism needs to be told about them. Locate the
1974 * relevant ExecRowMarks.
1976 foreach(l, node->rowMarks)
1978 PlanRowMark *rc = castNode(PlanRowMark, lfirst(l));
1981 /* ignore "parent" rowmarks; they are irrelevant at runtime */
1985 /* find ExecRowMark (same for all subplans) */
1986 erm = ExecFindRowMark(estate, rc->rti, false);
1988 /* build ExecAuxRowMark for each subplan */
1989 for (i = 0; i < nplans; i++)
1991 ExecAuxRowMark *aerm;
1993 subplan = mtstate->mt_plans[i]->plan;
1994 aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
1995 mtstate->mt_arowmarks[i] = lappend(mtstate->mt_arowmarks[i], aerm);
1999 /* select first subplan */
2000 mtstate->mt_whichplan = 0;
2001 subplan = (Plan *) linitial(node->plans);
2002 EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan,
2003 mtstate->mt_arowmarks[0]);
2006 * Initialize the junk filter(s) if needed. INSERT queries need a filter
2007 * if there are any junk attrs in the tlist. UPDATE and DELETE always
2008 * need a filter, since there's always a junk 'ctid' or 'wholerow'
2009 * attribute present --- no need to look first.
2011 * If there are multiple result relations, each one needs its own junk
2012 * filter. Note multiple rels are only possible for UPDATE/DELETE, so we
2013 * can't be fooled by some needing a filter and some not.
2015 * This section of code is also a convenient place to verify that the
2016 * output of an INSERT or UPDATE matches the target table(s).
2019 bool junk_filter_needed = false;
2024 foreach(l, subplan->targetlist)
2026 TargetEntry *tle = (TargetEntry *) lfirst(l);
2030 junk_filter_needed = true;
2037 junk_filter_needed = true;
2040 elog(ERROR, "unknown operation");
2044 if (junk_filter_needed)
2046 resultRelInfo = mtstate->resultRelInfo;
2047 for (i = 0; i < nplans; i++)
2051 subplan = mtstate->mt_plans[i]->plan;
2052 if (operation == CMD_INSERT || operation == CMD_UPDATE)
2053 ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
2054 subplan->targetlist);
2056 j = ExecInitJunkFilter(subplan->targetlist,
2057 resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
2058 ExecInitExtraTupleSlot(estate));
2060 if (operation == CMD_UPDATE || operation == CMD_DELETE)
2062 /* For UPDATE/DELETE, find the appropriate junk attr now */
2065 relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
2066 if (relkind == RELKIND_RELATION ||
2067 relkind == RELKIND_MATVIEW ||
2068 relkind == RELKIND_PARTITIONED_TABLE)
2070 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
2071 if (!AttributeNumberIsValid(j->jf_junkAttNo))
2072 elog(ERROR, "could not find junk ctid column");
2074 else if (relkind == RELKIND_FOREIGN_TABLE)
2077 * When there is an AFTER trigger, there should be a
2078 * wholerow attribute.
2080 j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
2084 j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
2085 if (!AttributeNumberIsValid(j->jf_junkAttNo))
2086 elog(ERROR, "could not find junk wholerow column");
2090 resultRelInfo->ri_junkFilter = j;
2096 if (operation == CMD_INSERT)
2097 ExecCheckPlanOutput(mtstate->resultRelInfo->ri_RelationDesc,
2098 subplan->targetlist);
2103 * Set up a tuple table slot for use for trigger output tuples. In a plan
2104 * containing multiple ModifyTable nodes, all can share one such slot, so
2105 * we keep it in the estate.
2107 if (estate->es_trig_tuple_slot == NULL)
2108 estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate);
2111 * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
2112 * to estate->es_auxmodifytables so that it will be run to completion by
2113 * ExecPostprocessPlan. (It'd actually work fine to add the primary
2114 * ModifyTable node too, but there's no need.) Note the use of lcons not
2115 * lappend: we need later-initialized ModifyTable nodes to be shut down
2116 * before earlier ones. This ensures that we don't throw away RETURNING
2117 * rows that need to be seen by a later CTE subplan.
2119 if (!mtstate->canSetTag)
2120 estate->es_auxmodifytables = lcons(mtstate,
2121 estate->es_auxmodifytables);
2126 /* ----------------------------------------------------------------
2127 * ExecEndModifyTable
2129 * Shuts down the plan.
2131 * Returns nothing of interest.
2132 * ----------------------------------------------------------------
2135 ExecEndModifyTable(ModifyTableState *node)
2140 * Allow any FDWs to shut down
2142 for (i = 0; i < node->mt_nplans; i++)
2144 ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
2146 if (!resultRelInfo->ri_usesFdwDirectModify &&
2147 resultRelInfo->ri_FdwRoutine != NULL &&
2148 resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
2149 resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
2154 * Close all the partitioned tables, leaf partitions, and their indices
2156 * Remember node->mt_partition_dispatch_info[0] corresponds to the root
2157 * partitioned table, which we must not try to close, because it is the
2158 * main target table of the query that will be closed by ExecEndPlan().
2159 * Also, tupslot is NULL for the root partitioned table.
2161 for (i = 1; i < node->mt_num_dispatch; i++)
2163 PartitionDispatch pd = node->mt_partition_dispatch_info[i];
2165 heap_close(pd->reldesc, NoLock);
2166 ExecDropSingleTupleTableSlot(pd->tupslot);
2168 for (i = 0; i < node->mt_num_partitions; i++)
2170 ResultRelInfo *resultRelInfo = node->mt_partitions + i;
2172 ExecCloseIndices(resultRelInfo);
2173 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2176 /* Release the standalone partition tuple descriptor, if any */
2177 if (node->mt_partition_tuple_slot)
2178 ExecDropSingleTupleTableSlot(node->mt_partition_tuple_slot);
2181 * Free the exprcontext
2183 ExecFreeExprContext(&node->ps);
2186 * clean out the tuple table
2188 ExecClearTuple(node->ps.ps_ResultTupleSlot);
2191 * Terminate EPQ execution if active
2193 EvalPlanQualEnd(&node->mt_epqstate);
2196 * shut down subplans
2198 for (i = 0; i < node->mt_nplans; i++)
2199 ExecEndNode(node->mt_plans[i]);
2203 ExecReScanModifyTable(ModifyTableState *node)
2206 * Currently, we don't need to support rescan on ModifyTable nodes. The
2207 * semantics of that would be a bit debatable anyway.
2209 elog(ERROR, "ExecReScanModifyTable is not implemented");