1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
11 * The old ExecutorMain() has been replaced by ExecutorStart(),
12 * ExecutorRun() and ExecutorEnd()
14 * These three procedures are the external interfaces to the executor.
15 * In each case, the query descriptor is required as an argument.
17 * ExecutorStart() must be called at the beginning of execution of any
18 * query plan and ExecutorEnd() should always be called at the end of
19 * execution of a plan.
21 * ExecutorRun accepts direction and count arguments that specify whether
22 * the plan is to be executed forwards, backwards, and for how many tuples.
24 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
25 * Portions Copyright (c) 1994, Regents of the University of California
29 * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.317 2008/11/16 17:34:28 tgl Exp $
31 *-------------------------------------------------------------------------
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "nodes/nodeFuncs.h"
49 #include "optimizer/clauses.h"
50 #include "parser/parse_clause.h"
51 #include "parser/parsetree.h"
52 #include "storage/bufmgr.h"
53 #include "storage/lmgr.h"
54 #include "storage/smgr.h"
55 #include "utils/acl.h"
56 #include "utils/builtins.h"
57 #include "utils/lsyscache.h"
58 #include "utils/memutils.h"
59 #include "utils/snapmgr.h"
60 #include "utils/tqual.h"
63 /* Hook for plugins to get control in ExecutorRun() */
64 ExecutorRun_hook_type ExecutorRun_hook = NULL;
66 typedef struct evalPlanQual
71 struct evalPlanQual *next; /* stack of active PlanQual plans */
72 struct evalPlanQual *free; /* list of free PlanQual plans */
75 /* decls for local routines only used within this module */
76 static void InitPlan(QueryDesc *queryDesc, int eflags);
77 static void ExecCheckPlanOutput(Relation resultRel, List *targetList);
78 static void ExecEndPlan(PlanState *planstate, EState *estate);
79 static void ExecutePlan(EState *estate, PlanState *planstate,
82 ScanDirection direction,
84 static void ExecSelect(TupleTableSlot *slot,
85 DestReceiver *dest, EState *estate);
86 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
87 TupleTableSlot *planSlot,
88 DestReceiver *dest, EState *estate);
89 static void ExecDelete(ItemPointer tupleid,
90 TupleTableSlot *planSlot,
91 DestReceiver *dest, EState *estate);
92 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
93 TupleTableSlot *planSlot,
94 DestReceiver *dest, EState *estate);
95 static void ExecProcessReturning(ProjectionInfo *projectReturning,
96 TupleTableSlot *tupleSlot,
97 TupleTableSlot *planSlot,
99 static TupleTableSlot *EvalPlanQualNext(EState *estate);
100 static void EndEvalPlanQual(EState *estate);
101 static void ExecCheckRTPerms(List *rangeTable);
102 static void ExecCheckRTEPerms(RangeTblEntry *rte);
103 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
104 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
105 evalPlanQual *priorepq);
106 static void EvalPlanQualStop(evalPlanQual *epq);
107 static void OpenIntoRel(QueryDesc *queryDesc);
108 static void CloseIntoRel(QueryDesc *queryDesc);
109 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
110 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
111 static void intorel_shutdown(DestReceiver *self);
112 static void intorel_destroy(DestReceiver *self);
114 /* end of local decls */
117 /* ----------------------------------------------------------------
120 * This routine must be called at the beginning of any execution of any
123 * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
124 * clear why we bother to separate the two functions, but...). The tupDesc
125 * field of the QueryDesc is filled in to describe the tuples that will be
126 * returned, and the internal fields (estate and planstate) are set up.
128 * eflags contains flag bits as described in executor.h.
130 * NB: the CurrentMemoryContext when this is called will become the parent
131 * of the per-query context used for this Executor invocation.
132 * ----------------------------------------------------------------
135 ExecutorStart(QueryDesc *queryDesc, int eflags)
138 MemoryContext oldcontext;
140 /* sanity checks: queryDesc must not be started already */
141 Assert(queryDesc != NULL);
142 Assert(queryDesc->estate == NULL);
145 * If the transaction is read-only, we need to check if any writes are
146 * planned to non-temporary tables. EXPLAIN is considered read-only.
148 if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
149 ExecCheckXactReadOnly(queryDesc->plannedstmt);
152 * Build EState, switch into per-query memory context for startup.
154 estate = CreateExecutorState();
155 queryDesc->estate = estate;
157 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
160 * Fill in parameters, if any, from queryDesc
162 estate->es_param_list_info = queryDesc->params;
164 if (queryDesc->plannedstmt->nParamExec > 0)
165 estate->es_param_exec_vals = (ParamExecData *)
166 palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
169 * If non-read-only query, set the command ID to mark output tuples with
171 switch (queryDesc->operation)
174 /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
175 if (queryDesc->plannedstmt->intoClause != NULL ||
176 queryDesc->plannedstmt->rowMarks != NIL)
177 estate->es_output_cid = GetCurrentCommandId(true);
183 estate->es_output_cid = GetCurrentCommandId(true);
187 elog(ERROR, "unrecognized operation code: %d",
188 (int) queryDesc->operation);
193 * Copy other important information into the EState
195 estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
196 estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
197 estate->es_instrument = queryDesc->doInstrument;
200 * Initialize the plan state tree
202 InitPlan(queryDesc, eflags);
204 MemoryContextSwitchTo(oldcontext);
207 /* ----------------------------------------------------------------
210 * This is the main routine of the executor module. It accepts
211 * the query descriptor from the traffic cop and executes the
214 * ExecutorStart must have been called already.
216 * If direction is NoMovementScanDirection then nothing is done
217 * except to start up/shut down the destination. Otherwise,
218 * we retrieve up to 'count' tuples in the specified direction.
220 * Note: count = 0 is interpreted as no portal limit, i.e., run to
223 * There is no return value, but output tuples (if any) are sent to
224 * the destination receiver specified in the QueryDesc; and the number
225 * of tuples processed at the top level can be found in
226 * estate->es_processed.
228 * We provide a function hook variable that lets loadable plugins
229 * get control when ExecutorRun is called. Such a plugin would
230 * normally call standard_ExecutorRun().
232 * ----------------------------------------------------------------
235 ExecutorRun(QueryDesc *queryDesc,
236 ScanDirection direction, long count)
238 if (ExecutorRun_hook)
239 (*ExecutorRun_hook) (queryDesc, direction, count);
241 standard_ExecutorRun(queryDesc, direction, count);
245 standard_ExecutorRun(QueryDesc *queryDesc,
246 ScanDirection direction, long count)
252 MemoryContext oldcontext;
255 Assert(queryDesc != NULL);
257 estate = queryDesc->estate;
259 Assert(estate != NULL);
262 * Switch into per-query memory context
264 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
267 * extract information from the query descriptor and the query feature.
269 operation = queryDesc->operation;
270 dest = queryDesc->dest;
273 * startup tuple receiver, if we will be emitting tuples
275 estate->es_processed = 0;
276 estate->es_lastoid = InvalidOid;
278 sendTuples = (operation == CMD_SELECT ||
279 queryDesc->plannedstmt->returningLists);
282 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
287 if (!ScanDirectionIsNoMovement(direction))
289 queryDesc->planstate,
296 * shutdown tuple receiver, if we started it
299 (*dest->rShutdown) (dest);
301 MemoryContextSwitchTo(oldcontext);
304 /* ----------------------------------------------------------------
307 * This routine must be called at the end of execution of any
309 * ----------------------------------------------------------------
312 ExecutorEnd(QueryDesc *queryDesc)
315 MemoryContext oldcontext;
318 Assert(queryDesc != NULL);
320 estate = queryDesc->estate;
322 Assert(estate != NULL);
325 * Switch into per-query memory context to run ExecEndPlan
327 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
329 ExecEndPlan(queryDesc->planstate, estate);
332 * Close the SELECT INTO relation if any
334 if (estate->es_select_into)
335 CloseIntoRel(queryDesc);
337 /* do away with our snapshots */
338 UnregisterSnapshot(estate->es_snapshot);
339 UnregisterSnapshot(estate->es_crosscheck_snapshot);
342 * Must switch out of context before destroying it
344 MemoryContextSwitchTo(oldcontext);
347 * Release EState and per-query memory context. This should release
348 * everything the executor has allocated.
350 FreeExecutorState(estate);
352 /* Reset queryDesc fields that no longer point to anything */
353 queryDesc->tupDesc = NULL;
354 queryDesc->estate = NULL;
355 queryDesc->planstate = NULL;
358 /* ----------------------------------------------------------------
361 * This routine may be called on an open queryDesc to rewind it
363 * ----------------------------------------------------------------
366 ExecutorRewind(QueryDesc *queryDesc)
369 MemoryContext oldcontext;
372 Assert(queryDesc != NULL);
374 estate = queryDesc->estate;
376 Assert(estate != NULL);
378 /* It's probably not sensible to rescan updating queries */
379 Assert(queryDesc->operation == CMD_SELECT);
382 * Switch into per-query memory context
384 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
389 ExecReScan(queryDesc->planstate, NULL);
391 MemoryContextSwitchTo(oldcontext);
397 * Check access permissions for all relations listed in a range table.
400 ExecCheckRTPerms(List *rangeTable)
404 foreach(l, rangeTable)
406 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
412 * Check access permissions for a single RTE.
415 ExecCheckRTEPerms(RangeTblEntry *rte)
417 AclMode requiredPerms;
422 * Only plain-relation RTEs need to be checked here. Function RTEs are
423 * checked by init_fcache when the function is prepared for execution.
424 * Join, subquery, and special RTEs need no checks.
426 if (rte->rtekind != RTE_RELATION)
430 * No work if requiredPerms is empty.
432 requiredPerms = rte->requiredPerms;
433 if (requiredPerms == 0)
439 * userid to check as: current user unless we have a setuid indication.
441 * Note: GetUserId() is presently fast enough that there's no harm in
442 * calling it separately for each RTE. If that stops being true, we could
443 * call it once in ExecCheckRTPerms and pass the userid down from there.
444 * But for now, no need for the extra clutter.
446 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
449 * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
451 if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
453 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
454 get_rel_name(relOid));
458 * Check that the query does not imply any writes to non-temp tables.
461 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
466 * CREATE TABLE AS or SELECT INTO?
468 * XXX should we allow this if the destination is temp?
470 if (plannedstmt->intoClause != NULL)
473 /* Fail if write permissions are requested on any non-temp table */
474 foreach(l, plannedstmt->rtable)
476 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
478 if (rte->rtekind != RTE_RELATION)
481 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
484 if (isTempNamespace(get_rel_namespace(rte->relid)))
494 (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
495 errmsg("transaction is read-only")));
499 /* ----------------------------------------------------------------
502 * Initializes the query plan: open files, allocate storage
503 * and start up the rule manager
504 * ----------------------------------------------------------------
507 InitPlan(QueryDesc *queryDesc, int eflags)
509 CmdType operation = queryDesc->operation;
510 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
511 Plan *plan = plannedstmt->planTree;
512 List *rangeTable = plannedstmt->rtable;
513 EState *estate = queryDesc->estate;
514 PlanState *planstate;
520 * Do permissions checks
522 ExecCheckRTPerms(rangeTable);
525 * initialize the node's execution state
527 estate->es_range_table = rangeTable;
530 * initialize result relation stuff
532 if (plannedstmt->resultRelations)
534 List *resultRelations = plannedstmt->resultRelations;
535 int numResultRelations = list_length(resultRelations);
536 ResultRelInfo *resultRelInfos;
537 ResultRelInfo *resultRelInfo;
539 resultRelInfos = (ResultRelInfo *)
540 palloc(numResultRelations * sizeof(ResultRelInfo));
541 resultRelInfo = resultRelInfos;
542 foreach(l, resultRelations)
544 Index resultRelationIndex = lfirst_int(l);
545 Oid resultRelationOid;
546 Relation resultRelation;
548 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
549 resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
550 InitResultRelInfo(resultRelInfo,
554 estate->es_instrument);
557 estate->es_result_relations = resultRelInfos;
558 estate->es_num_result_relations = numResultRelations;
559 /* Initialize to first or only result rel */
560 estate->es_result_relation_info = resultRelInfos;
565 * if no result relation, then set state appropriately
567 estate->es_result_relations = NULL;
568 estate->es_num_result_relations = 0;
569 estate->es_result_relation_info = NULL;
573 * Detect whether we're doing SELECT INTO. If so, set the es_into_oids
574 * flag appropriately so that the plan tree will be initialized with the
575 * correct tuple descriptors. (Other SELECT INTO stuff comes later.)
577 estate->es_select_into = false;
578 if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
580 estate->es_select_into = true;
581 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
585 * Have to lock relations selected FOR UPDATE/FOR SHARE before we
586 * initialize the plan tree, else we'd be doing a lock upgrade. While we
587 * are at it, build the ExecRowMark list.
589 estate->es_rowMarks = NIL;
590 foreach(l, plannedstmt->rowMarks)
592 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
597 /* ignore "parent" rowmarks; they are irrelevant at runtime */
601 relid = getrelid(rc->rti, rangeTable);
602 relation = heap_open(relid, RowShareLock);
603 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
604 erm->relation = relation;
606 erm->prti = rc->prti;
607 erm->forUpdate = rc->forUpdate;
608 erm->noWait = rc->noWait;
609 /* We'll locate the junk attrs below */
610 erm->ctidAttNo = InvalidAttrNumber;
611 erm->toidAttNo = InvalidAttrNumber;
612 ItemPointerSetInvalid(&(erm->curCtid));
613 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
617 * Initialize the executor "tuple" table. We need slots for all the plan
618 * nodes, plus possibly output slots for the junkfilter(s). At this point
619 * we aren't sure if we need junkfilters, so just add slots for them
620 * unconditionally. Also, if it's not a SELECT, set up a slot for use for
621 * trigger output tuples. Also, one for RETURNING-list evaluation.
626 /* Slots for the main plan tree */
627 nSlots = ExecCountSlotsNode(plan);
628 /* Add slots for subplans and initplans */
629 foreach(l, plannedstmt->subplans)
631 Plan *subplan = (Plan *) lfirst(l);
633 nSlots += ExecCountSlotsNode(subplan);
635 /* Add slots for junkfilter(s) */
636 if (plannedstmt->resultRelations != NIL)
637 nSlots += list_length(plannedstmt->resultRelations);
640 if (operation != CMD_SELECT)
641 nSlots++; /* for es_trig_tuple_slot */
642 if (plannedstmt->returningLists)
643 nSlots++; /* for RETURNING projection */
645 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
647 if (operation != CMD_SELECT)
648 estate->es_trig_tuple_slot =
649 ExecAllocTableSlot(estate->es_tupleTable);
652 /* mark EvalPlanQual not active */
653 estate->es_plannedstmt = plannedstmt;
654 estate->es_evalPlanQual = NULL;
655 estate->es_evTupleNull = NULL;
656 estate->es_evTuple = NULL;
657 estate->es_useEvalPlan = false;
660 * Initialize private state information for each SubPlan. We must do this
661 * before running ExecInitNode on the main query tree, since
662 * ExecInitSubPlan expects to be able to find these entries.
664 Assert(estate->es_subplanstates == NIL);
665 i = 1; /* subplan indices count from 1 */
666 foreach(l, plannedstmt->subplans)
668 Plan *subplan = (Plan *) lfirst(l);
669 PlanState *subplanstate;
673 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
674 * it is a parameterless subplan (not initplan), we suggest that it be
675 * prepared to handle REWIND efficiently; otherwise there is no need.
677 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
678 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
679 sp_eflags |= EXEC_FLAG_REWIND;
681 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
683 estate->es_subplanstates = lappend(estate->es_subplanstates,
690 * Initialize the private state information for all the nodes in the query
691 * tree. This opens files, allocates storage and leaves us ready to start
694 planstate = ExecInitNode(plan, estate, eflags);
697 * Get the tuple descriptor describing the type of tuples to return. (this
698 * is especially important if we are creating a relation with "SELECT
701 tupType = ExecGetResultType(planstate);
704 * Initialize the junk filter if needed. SELECT and INSERT queries need a
705 * filter if there are any junk attrs in the tlist. UPDATE and
706 * DELETE always need a filter, since there's always a junk 'ctid'
707 * attribute present --- no need to look first.
709 * This section of code is also a convenient place to verify that the
710 * output of an INSERT or UPDATE matches the target table(s).
713 bool junk_filter_needed = false;
720 foreach(tlist, plan->targetlist)
722 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
726 junk_filter_needed = true;
733 junk_filter_needed = true;
739 if (junk_filter_needed)
742 * If there are multiple result relations, each one needs its own
743 * junk filter. Note this is only possible for UPDATE/DELETE, so
744 * we can't be fooled by some needing a filter and some not.
746 if (list_length(plannedstmt->resultRelations) > 1)
748 PlanState **appendplans;
750 ResultRelInfo *resultRelInfo;
752 /* Top plan had better be an Append here. */
753 Assert(IsA(plan, Append));
754 Assert(((Append *) plan)->isTarget);
755 Assert(IsA(planstate, AppendState));
756 appendplans = ((AppendState *) planstate)->appendplans;
757 as_nplans = ((AppendState *) planstate)->as_nplans;
758 Assert(as_nplans == estate->es_num_result_relations);
759 resultRelInfo = estate->es_result_relations;
760 for (i = 0; i < as_nplans; i++)
762 PlanState *subplan = appendplans[i];
765 if (operation == CMD_UPDATE)
766 ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
767 subplan->plan->targetlist);
769 j = ExecInitJunkFilter(subplan->plan->targetlist,
770 resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
771 ExecAllocTableSlot(estate->es_tupleTable));
774 * Since it must be UPDATE/DELETE, there had better be a
775 * "ctid" junk attribute in the tlist ... but ctid could
776 * be at a different resno for each result relation. We
777 * look up the ctid resnos now and save them in the
780 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
781 if (!AttributeNumberIsValid(j->jf_junkAttNo))
782 elog(ERROR, "could not find junk ctid column");
783 resultRelInfo->ri_junkFilter = j;
788 * Set active junkfilter too; at this point ExecInitAppend has
789 * already selected an active result relation...
791 estate->es_junkFilter =
792 estate->es_result_relation_info->ri_junkFilter;
795 * We currently can't support rowmarks in this case, because
796 * the associated junk CTIDs might have different resnos in
797 * different subplans.
799 if (estate->es_rowMarks)
801 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
802 errmsg("SELECT FOR UPDATE/SHARE is not supported within a query with multiple result relations")));
806 /* Normal case with just one JunkFilter */
809 if (operation == CMD_INSERT || operation == CMD_UPDATE)
810 ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
811 planstate->plan->targetlist);
813 j = ExecInitJunkFilter(planstate->plan->targetlist,
815 ExecAllocTableSlot(estate->es_tupleTable));
816 estate->es_junkFilter = j;
817 if (estate->es_result_relation_info)
818 estate->es_result_relation_info->ri_junkFilter = j;
820 if (operation == CMD_SELECT)
822 /* For SELECT, want to return the cleaned tuple type */
823 tupType = j->jf_cleanTupType;
825 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
827 /* For UPDATE/DELETE, find the ctid junk attr now */
828 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
829 if (!AttributeNumberIsValid(j->jf_junkAttNo))
830 elog(ERROR, "could not find junk ctid column");
833 /* For SELECT FOR UPDATE/SHARE, find the junk attrs now */
834 foreach(l, estate->es_rowMarks)
836 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
839 /* always need the ctid */
840 snprintf(resname, sizeof(resname), "ctid%u",
842 erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
843 if (!AttributeNumberIsValid(erm->ctidAttNo))
844 elog(ERROR, "could not find junk \"%s\" column",
846 /* if child relation, need tableoid too */
847 if (erm->rti != erm->prti)
849 snprintf(resname, sizeof(resname), "tableoid%u",
851 erm->toidAttNo = ExecFindJunkAttribute(j, resname);
852 if (!AttributeNumberIsValid(erm->toidAttNo))
853 elog(ERROR, "could not find junk \"%s\" column",
861 if (operation == CMD_INSERT)
862 ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
863 planstate->plan->targetlist);
865 estate->es_junkFilter = NULL;
866 if (estate->es_rowMarks)
867 elog(ERROR, "SELECT FOR UPDATE/SHARE, but no junk columns");
872 * Initialize RETURNING projections if needed.
874 if (plannedstmt->returningLists)
876 TupleTableSlot *slot;
877 ExprContext *econtext;
878 ResultRelInfo *resultRelInfo;
881 * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
882 * We assume all the sublists will generate the same output tupdesc.
884 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
887 /* Set up a slot for the output of the RETURNING projection(s) */
888 slot = ExecAllocTableSlot(estate->es_tupleTable);
889 ExecSetSlotDescriptor(slot, tupType);
890 /* Need an econtext too */
891 econtext = CreateExprContext(estate);
894 * Build a projection for each result rel. Note that any SubPlans in
895 * the RETURNING lists get attached to the topmost plan node.
897 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
898 resultRelInfo = estate->es_result_relations;
899 foreach(l, plannedstmt->returningLists)
901 List *rlist = (List *) lfirst(l);
904 rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
905 resultRelInfo->ri_projectReturning =
906 ExecBuildProjectionInfo(rliststate, econtext, slot,
907 resultRelInfo->ri_RelationDesc->rd_att);
912 queryDesc->tupDesc = tupType;
913 queryDesc->planstate = planstate;
916 * If doing SELECT INTO, initialize the "into" relation. We must wait
917 * till now so we have the "clean" result tuple type to create the new
920 * If EXPLAIN, skip creating the "into" relation.
922 if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
923 OpenIntoRel(queryDesc);
927 * Initialize ResultRelInfo data for one result relation
930 InitResultRelInfo(ResultRelInfo *resultRelInfo,
931 Relation resultRelationDesc,
932 Index resultRelationIndex,
937 * Check valid relkind ... parser and/or planner should have noticed this
938 * already, but let's make sure.
940 switch (resultRelationDesc->rd_rel->relkind)
942 case RELKIND_RELATION:
945 case RELKIND_SEQUENCE:
947 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
948 errmsg("cannot change sequence \"%s\"",
949 RelationGetRelationName(resultRelationDesc))));
951 case RELKIND_TOASTVALUE:
953 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
954 errmsg("cannot change TOAST relation \"%s\"",
955 RelationGetRelationName(resultRelationDesc))));
959 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
960 errmsg("cannot change view \"%s\"",
961 RelationGetRelationName(resultRelationDesc))));
965 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
966 errmsg("cannot change relation \"%s\"",
967 RelationGetRelationName(resultRelationDesc))));
971 /* OK, fill in the node */
972 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
973 resultRelInfo->type = T_ResultRelInfo;
974 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
975 resultRelInfo->ri_RelationDesc = resultRelationDesc;
976 resultRelInfo->ri_NumIndices = 0;
977 resultRelInfo->ri_IndexRelationDescs = NULL;
978 resultRelInfo->ri_IndexRelationInfo = NULL;
979 /* make a copy so as not to depend on relcache info not changing... */
980 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
981 if (resultRelInfo->ri_TrigDesc)
983 int n = resultRelInfo->ri_TrigDesc->numtriggers;
985 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
986 palloc0(n * sizeof(FmgrInfo));
988 resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
990 resultRelInfo->ri_TrigInstrument = NULL;
994 resultRelInfo->ri_TrigFunctions = NULL;
995 resultRelInfo->ri_TrigInstrument = NULL;
997 resultRelInfo->ri_ConstraintExprs = NULL;
998 resultRelInfo->ri_junkFilter = NULL;
999 resultRelInfo->ri_projectReturning = NULL;
1002 * If there are indices on the result relation, open them and save
1003 * descriptors in the result relation info, so that we can add new index
1004 * entries for the tuples we add/update. We need not do this for a
1005 * DELETE, however, since deletion doesn't affect indexes.
1007 if (resultRelationDesc->rd_rel->relhasindex &&
1008 operation != CMD_DELETE)
1009 ExecOpenIndices(resultRelInfo);
1013 * Verify that the tuples to be produced by INSERT or UPDATE match the
1014 * target relation's rowtype
1016 * We do this to guard against stale plans. If plan invalidation is
1017 * functioning properly then we should never get a failure here, but better
1018 * safe than sorry. Note that this is called after we have obtained lock
1019 * on the target rel, so the rowtype can't change underneath us.
1021 * The plan output is represented by its targetlist, because that makes
1022 * handling the dropped-column case easier.
1025 ExecCheckPlanOutput(Relation resultRel, List *targetList)
1027 TupleDesc resultDesc = RelationGetDescr(resultRel);
1031 foreach(lc, targetList)
1033 TargetEntry *tle = (TargetEntry *) lfirst(lc);
1034 Form_pg_attribute attr;
1037 continue; /* ignore junk tlist items */
1039 if (attno >= resultDesc->natts)
1041 (errcode(ERRCODE_DATATYPE_MISMATCH),
1042 errmsg("table row type and query-specified row type do not match"),
1043 errdetail("Query has too many columns.")));
1044 attr = resultDesc->attrs[attno++];
1046 if (!attr->attisdropped)
1048 /* Normal case: demand type match */
1049 if (exprType((Node *) tle->expr) != attr->atttypid)
1051 (errcode(ERRCODE_DATATYPE_MISMATCH),
1052 errmsg("table row type and query-specified row type do not match"),
1053 errdetail("Table has type %s at ordinal position %d, but query expects %s.",
1054 format_type_be(attr->atttypid),
1056 format_type_be(exprType((Node *) tle->expr)))));
1061 * For a dropped column, we can't check atttypid (it's likely 0).
1062 * In any case the planner has most likely inserted an INT4 null.
1063 * What we insist on is just *some* NULL constant.
1065 if (!IsA(tle->expr, Const) ||
1066 !((Const *) tle->expr)->constisnull)
1068 (errcode(ERRCODE_DATATYPE_MISMATCH),
1069 errmsg("table row type and query-specified row type do not match"),
1070 errdetail("Query provides a value for a dropped column at ordinal position %d.",
1074 if (attno != resultDesc->natts)
1076 (errcode(ERRCODE_DATATYPE_MISMATCH),
1077 errmsg("table row type and query-specified row type do not match"),
1078 errdetail("Query has too few columns.")));
1082 * ExecGetTriggerResultRel
1084 * Get a ResultRelInfo for a trigger target relation. Most of the time,
1085 * triggers are fired on one of the result relations of the query, and so
1086 * we can just return a member of the es_result_relations array. (Note: in
1087 * self-join situations there might be multiple members with the same OID;
1088 * if so it doesn't matter which one we pick.) However, it is sometimes
1089 * necessary to fire triggers on other relations; this happens mainly when an
1090 * RI update trigger queues additional triggers on other relations, which will
1091 * be processed in the context of the outer query. For efficiency's sake,
1092 * we want to have a ResultRelInfo for those triggers too; that can avoid
1093 * repeated re-opening of the relation. (It also provides a way for EXPLAIN
1094 * ANALYZE to report the runtimes of such triggers.) So we make additional
1095 * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1098 ExecGetTriggerResultRel(EState *estate, Oid relid)
1100 ResultRelInfo *rInfo;
1104 MemoryContext oldcontext;
1106 /* First, search through the query result relations */
1107 rInfo = estate->es_result_relations;
1108 nr = estate->es_num_result_relations;
1111 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1116 /* Nope, but maybe we already made an extra ResultRelInfo for it */
1117 foreach(l, estate->es_trig_target_relations)
1119 rInfo = (ResultRelInfo *) lfirst(l);
1120 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1123 /* Nope, so we need a new one */
1126 * Open the target relation's relcache entry. We assume that an
1127 * appropriate lock is still held by the backend from whenever the trigger
1128 * event got queued, so we need take no new lock here.
1130 rel = heap_open(relid, NoLock);
1133 * Make the new entry in the right context. Currently, we don't need any
1134 * index information in ResultRelInfos used only for triggers, so tell
1135 * InitResultRelInfo it's a DELETE.
1137 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1138 rInfo = makeNode(ResultRelInfo);
1139 InitResultRelInfo(rInfo,
1141 0, /* dummy rangetable index */
1143 estate->es_instrument);
1144 estate->es_trig_target_relations =
1145 lappend(estate->es_trig_target_relations, rInfo);
1146 MemoryContextSwitchTo(oldcontext);
1152 * ExecContextForcesOids
1154 * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1155 * we need to ensure that result tuples have space for an OID iff they are
1156 * going to be stored into a relation that has OIDs. In other contexts
1157 * we are free to choose whether to leave space for OIDs in result tuples
1158 * (we generally don't want to, but we do if a physical-tlist optimization
1159 * is possible). This routine checks the plan context and returns TRUE if the
1160 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
1161 * *hasoids is set to the required value.
1163 * One reason this is ugly is that all plan nodes in the plan tree will emit
1164 * tuples with space for an OID, though we really only need the topmost node
1165 * to do so. However, node types like Sort don't project new tuples but just
1166 * return their inputs, and in those cases the requirement propagates down
1167 * to the input node. Eventually we might make this code smart enough to
1168 * recognize how far down the requirement really goes, but for now we just
1169 * make all plan nodes do the same thing if the top level forces the choice.
1171 * We assume that estate->es_result_relation_info is already set up to
1172 * describe the target relation. Note that in an UPDATE that spans an
1173 * inheritance tree, some of the target relations may have OIDs and some not.
1174 * We have to make the decisions on a per-relation basis as we initialize
1175 * each of the child plans of the topmost Append plan.
1177 * SELECT INTO is even uglier, because we don't have the INTO relation's
1178 * descriptor available when this code runs; we have to look aside at a
1179 * flag set by InitPlan().
1182 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1184 if (planstate->state->es_select_into)
1186 *hasoids = planstate->state->es_into_oids;
1191 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1195 Relation rel = ri->ri_RelationDesc;
1199 *hasoids = rel->rd_rel->relhasoids;
1208 /* ----------------------------------------------------------------
1211 * Cleans up the query plan -- closes files and frees up storage
1213 * NOTE: we are no longer very worried about freeing storage per se
1214 * in this code; FreeExecutorState should be guaranteed to release all
1215 * memory that needs to be released. What we are worried about doing
1216 * is closing relations and dropping buffer pins. Thus, for example,
1217 * tuple tables must be cleared or dropped to ensure pins are released.
1218 * ----------------------------------------------------------------
1221 ExecEndPlan(PlanState *planstate, EState *estate)
1223 ResultRelInfo *resultRelInfo;
1228 * shut down any PlanQual processing we were doing
1230 if (estate->es_evalPlanQual != NULL)
1231 EndEvalPlanQual(estate);
1234 * shut down the node-type-specific query processing
1236 ExecEndNode(planstate);
1241 foreach(l, estate->es_subplanstates)
1243 PlanState *subplanstate = (PlanState *) lfirst(l);
1245 ExecEndNode(subplanstate);
1249 * destroy the executor "tuple" table.
1251 ExecDropTupleTable(estate->es_tupleTable, true);
1252 estate->es_tupleTable = NULL;
1255 * close the result relation(s) if any, but hold locks until xact commit.
1257 resultRelInfo = estate->es_result_relations;
1258 for (i = estate->es_num_result_relations; i > 0; i--)
1260 /* Close indices and then the relation itself */
1261 ExecCloseIndices(resultRelInfo);
1262 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1267 * likewise close any trigger target relations
1269 foreach(l, estate->es_trig_target_relations)
1271 resultRelInfo = (ResultRelInfo *) lfirst(l);
1272 /* Close indices and then the relation itself */
1273 ExecCloseIndices(resultRelInfo);
1274 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1278 * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1280 foreach(l, estate->es_rowMarks)
1282 ExecRowMark *erm = lfirst(l);
1284 heap_close(erm->relation, NoLock);
1288 /* ----------------------------------------------------------------
1291 * Processes the query plan until we have processed 'numberTuples' tuples,
1292 * moving in the specified direction.
1294 * Runs to completion if numberTuples is 0
1296 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1298 * ----------------------------------------------------------------
1301 ExecutePlan(EState *estate,
1302 PlanState *planstate,
1305 ScanDirection direction,
1308 JunkFilter *junkfilter;
1309 TupleTableSlot *planSlot;
1310 TupleTableSlot *slot;
1311 ItemPointer tupleid = NULL;
1312 ItemPointerData tuple_ctid;
1313 long current_tuple_count;
1316 * initialize local variables
1318 current_tuple_count = 0;
1321 * Set the direction.
1323 estate->es_direction = direction;
1326 * Process BEFORE EACH STATEMENT triggers
1331 ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1334 ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1337 ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1345 * Loop until we've processed the proper number of tuples from the plan.
1349 /* Reset the per-output-tuple exprcontext */
1350 ResetPerTupleExprContext(estate);
1353 * Execute the plan and obtain a tuple
1356 if (estate->es_useEvalPlan)
1358 planSlot = EvalPlanQualNext(estate);
1359 if (TupIsNull(planSlot))
1360 planSlot = ExecProcNode(planstate);
1363 planSlot = ExecProcNode(planstate);
1366 * if the tuple is null, then we assume there is nothing more to
1367 * process so we just end the loop...
1369 if (TupIsNull(planSlot))
1374 * If we have a junk filter, then project a new tuple with the junk
1377 * Store this new "clean" tuple in the junkfilter's resultSlot.
1378 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1379 * because that tuple slot has the wrong descriptor.)
1381 * But first, extract all the junk information we need.
1383 if ((junkfilter = estate->es_junkFilter) != NULL)
1386 * Process any FOR UPDATE or FOR SHARE locking requested.
1388 if (estate->es_rowMarks != NIL)
1393 foreach(l, estate->es_rowMarks)
1395 ExecRowMark *erm = lfirst(l);
1398 HeapTupleData tuple;
1400 ItemPointerData update_ctid;
1401 TransactionId update_xmax;
1402 TupleTableSlot *newSlot;
1403 LockTupleMode lockmode;
1406 /* if child rel, must check whether it produced this row */
1407 if (erm->rti != erm->prti)
1411 datum = ExecGetJunkAttribute(slot,
1414 /* shouldn't ever get a null result... */
1416 elog(ERROR, "tableoid is NULL");
1417 tableoid = DatumGetObjectId(datum);
1419 if (tableoid != RelationGetRelid(erm->relation))
1421 /* this child is inactive right now */
1422 ItemPointerSetInvalid(&(erm->curCtid));
1427 /* okay, fetch the tuple by ctid */
1428 datum = ExecGetJunkAttribute(slot,
1431 /* shouldn't ever get a null result... */
1433 elog(ERROR, "ctid is NULL");
1434 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1437 lockmode = LockTupleExclusive;
1439 lockmode = LockTupleShared;
1441 test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1442 &update_ctid, &update_xmax,
1443 estate->es_output_cid,
1444 lockmode, erm->noWait);
1445 ReleaseBuffer(buffer);
1448 case HeapTupleSelfUpdated:
1449 /* treat it as deleted; do not process */
1452 case HeapTupleMayBeUpdated:
1455 case HeapTupleUpdated:
1456 if (IsXactIsoLevelSerializable)
1458 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1459 errmsg("could not serialize access due to concurrent update")));
1460 if (!ItemPointerEquals(&update_ctid,
1463 /* updated, so look at updated version */
1464 newSlot = EvalPlanQual(estate,
1468 if (!TupIsNull(newSlot))
1470 slot = planSlot = newSlot;
1471 estate->es_useEvalPlan = true;
1477 * if tuple was deleted or PlanQual failed for
1478 * updated tuple - we must not return this tuple!
1483 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1487 /* Remember tuple TID for WHERE CURRENT OF */
1488 erm->curCtid = tuple.t_self;
1493 * extract the 'ctid' junk attribute.
1495 if (operation == CMD_UPDATE || operation == CMD_DELETE)
1500 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1502 /* shouldn't ever get a null result... */
1504 elog(ERROR, "ctid is NULL");
1506 tupleid = (ItemPointer) DatumGetPointer(datum);
1507 tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */
1508 tupleid = &tuple_ctid;
1512 * Create a new "clean" tuple with all junk attributes removed. We
1513 * don't need to do this for DELETE, however (there will in fact
1514 * be no non-junk attributes in a DELETE!)
1516 if (operation != CMD_DELETE)
1517 slot = ExecFilterJunk(junkfilter, slot);
1521 * now that we have a tuple, do the appropriate thing with it.. either
1522 * send it to the output destination, add it to a relation someplace,
1523 * delete it from a relation, or modify some of its attributes.
1528 ExecSelect(slot, dest, estate);
1532 ExecInsert(slot, tupleid, planSlot, dest, estate);
1536 ExecDelete(tupleid, planSlot, dest, estate);
1540 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1544 elog(ERROR, "unrecognized operation code: %d",
1550 * check our tuple count.. if we've processed the proper number then
1551 * quit, else loop again and process more tuples. Zero numberTuples
1554 current_tuple_count++;
1555 if (numberTuples && numberTuples == current_tuple_count)
1560 * Process AFTER EACH STATEMENT triggers
1565 ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1568 ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1571 ExecASInsertTriggers(estate, estate->es_result_relation_info);
1579 /* ----------------------------------------------------------------
1582 * SELECTs are easy.. we just pass the tuple to the appropriate
1584 * ----------------------------------------------------------------
1587 ExecSelect(TupleTableSlot *slot,
1591 (*dest->receiveSlot) (slot, dest);
1593 (estate->es_processed)++;
1596 /* ----------------------------------------------------------------
1599 * INSERTs are trickier.. we have to insert the tuple into
1600 * the base relation and insert appropriate tuples into the
1602 * ----------------------------------------------------------------
1605 ExecInsert(TupleTableSlot *slot,
1606 ItemPointer tupleid,
1607 TupleTableSlot *planSlot,
1612 ResultRelInfo *resultRelInfo;
1613 Relation resultRelationDesc;
1617 * get the heap tuple out of the tuple table slot, making sure we have a
1620 tuple = ExecMaterializeSlot(slot);
1623 * get information on the (current) result relation
1625 resultRelInfo = estate->es_result_relation_info;
1626 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1628 /* BEFORE ROW INSERT Triggers */
1629 if (resultRelInfo->ri_TrigDesc &&
1630 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1634 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1636 if (newtuple == NULL) /* "do nothing" */
1639 if (newtuple != tuple) /* modified by Trigger(s) */
1642 * Put the modified tuple into a slot for convenience of routines
1643 * below. We assume the tuple was allocated in per-tuple memory
1644 * context, and therefore will go away by itself. The tuple table
1645 * slot should not try to clear it.
1647 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1649 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1650 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1651 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1658 * Check the constraints of the tuple
1660 if (resultRelationDesc->rd_att->constr)
1661 ExecConstraints(resultRelInfo, slot, estate);
1666 * Note: heap_insert returns the tid (location) of the new tuple in the
1669 newId = heap_insert(resultRelationDesc, tuple,
1670 estate->es_output_cid, 0, NULL);
1673 (estate->es_processed)++;
1674 estate->es_lastoid = newId;
1675 setLastTid(&(tuple->t_self));
1678 * insert index entries for tuple
1680 if (resultRelInfo->ri_NumIndices > 0)
1681 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1683 /* AFTER ROW INSERT Triggers */
1684 ExecARInsertTriggers(estate, resultRelInfo, tuple);
1686 /* Process RETURNING if present */
1687 if (resultRelInfo->ri_projectReturning)
1688 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1689 slot, planSlot, dest);
1692 /* ----------------------------------------------------------------
1695 * DELETE is like UPDATE, except that we delete the tuple and no
1696 * index modifications are needed
1697 * ----------------------------------------------------------------
1700 ExecDelete(ItemPointer tupleid,
1701 TupleTableSlot *planSlot,
1705 ResultRelInfo *resultRelInfo;
1706 Relation resultRelationDesc;
1708 ItemPointerData update_ctid;
1709 TransactionId update_xmax;
1712 * get information on the (current) result relation
1714 resultRelInfo = estate->es_result_relation_info;
1715 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1717 /* BEFORE ROW DELETE Triggers */
1718 if (resultRelInfo->ri_TrigDesc &&
1719 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1723 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid);
1725 if (!dodelete) /* "do nothing" */
1732 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1733 * the row to be deleted is visible to that snapshot, and throw a can't-
1734 * serialize error if not. This is a special-case behavior needed for
1735 * referential integrity updates in serializable transactions.
1738 result = heap_delete(resultRelationDesc, tupleid,
1739 &update_ctid, &update_xmax,
1740 estate->es_output_cid,
1741 estate->es_crosscheck_snapshot,
1742 true /* wait for commit */ );
1745 case HeapTupleSelfUpdated:
1746 /* already deleted by self; nothing to do */
1749 case HeapTupleMayBeUpdated:
1752 case HeapTupleUpdated:
1753 if (IsXactIsoLevelSerializable)
1755 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1756 errmsg("could not serialize access due to concurrent update")));
1757 else if (!ItemPointerEquals(tupleid, &update_ctid))
1759 TupleTableSlot *epqslot;
1761 epqslot = EvalPlanQual(estate,
1762 resultRelInfo->ri_RangeTableIndex,
1765 if (!TupIsNull(epqslot))
1767 *tupleid = update_ctid;
1771 /* tuple already deleted; nothing to do */
1775 elog(ERROR, "unrecognized heap_delete status: %u", result);
1780 (estate->es_processed)++;
1783 * Note: Normally one would think that we have to delete index tuples
1784 * associated with the heap tuple now...
1786 * ... but in POSTGRES, we have no need to do this because VACUUM will
1787 * take care of it later. We can't delete index tuples immediately
1788 * anyway, since the tuple is still visible to other transactions.
1791 /* AFTER ROW DELETE Triggers */
1792 ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1794 /* Process RETURNING if present */
1795 if (resultRelInfo->ri_projectReturning)
1798 * We have to put the target tuple into a slot, which means first we
1799 * gotta fetch it. We can use the trigger tuple slot.
1801 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1802 HeapTupleData deltuple;
1805 deltuple.t_self = *tupleid;
1806 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1807 &deltuple, &delbuffer, false, NULL))
1808 elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1810 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1811 ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1812 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1814 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1815 slot, planSlot, dest);
1817 ExecClearTuple(slot);
1818 ReleaseBuffer(delbuffer);
1822 /* ----------------------------------------------------------------
1825 * note: we can't run UPDATE queries with transactions
1826 * off because UPDATEs are actually INSERTs and our
1827 * scan will mistakenly loop forever, updating the tuple
1828 * it just inserted.. This should be fixed but until it
1829 * is, we don't want to get stuck in an infinite loop
1830 * which corrupts your database..
1831 * ----------------------------------------------------------------
1834 ExecUpdate(TupleTableSlot *slot,
1835 ItemPointer tupleid,
1836 TupleTableSlot *planSlot,
1841 ResultRelInfo *resultRelInfo;
1842 Relation resultRelationDesc;
1844 ItemPointerData update_ctid;
1845 TransactionId update_xmax;
1848 * abort the operation if not running transactions
1850 if (IsBootstrapProcessingMode())
1851 elog(ERROR, "cannot UPDATE during bootstrap");
1854 * get the heap tuple out of the tuple table slot, making sure we have a
1857 tuple = ExecMaterializeSlot(slot);
1860 * get information on the (current) result relation
1862 resultRelInfo = estate->es_result_relation_info;
1863 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1865 /* BEFORE ROW UPDATE Triggers */
1866 if (resultRelInfo->ri_TrigDesc &&
1867 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1871 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1874 if (newtuple == NULL) /* "do nothing" */
1877 if (newtuple != tuple) /* modified by Trigger(s) */
1880 * Put the modified tuple into a slot for convenience of routines
1881 * below. We assume the tuple was allocated in per-tuple memory
1882 * context, and therefore will go away by itself. The tuple table
1883 * slot should not try to clear it.
1885 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1887 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1888 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1889 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1896 * Check the constraints of the tuple
1898 * If we generate a new candidate tuple after EvalPlanQual testing, we
1899 * must loop back here and recheck constraints. (We don't need to redo
1900 * triggers, however. If there are any BEFORE triggers then trigger.c
1901 * will have done heap_lock_tuple to lock the correct tuple, so there's no
1902 * need to do them again.)
1905 if (resultRelationDesc->rd_att->constr)
1906 ExecConstraints(resultRelInfo, slot, estate);
1909 * replace the heap tuple
1911 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1912 * the row to be updated is visible to that snapshot, and throw a can't-
1913 * serialize error if not. This is a special-case behavior needed for
1914 * referential integrity updates in serializable transactions.
1916 result = heap_update(resultRelationDesc, tupleid, tuple,
1917 &update_ctid, &update_xmax,
1918 estate->es_output_cid,
1919 estate->es_crosscheck_snapshot,
1920 true /* wait for commit */ );
1923 case HeapTupleSelfUpdated:
1924 /* already deleted by self; nothing to do */
1927 case HeapTupleMayBeUpdated:
1930 case HeapTupleUpdated:
1931 if (IsXactIsoLevelSerializable)
1933 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1934 errmsg("could not serialize access due to concurrent update")));
1935 else if (!ItemPointerEquals(tupleid, &update_ctid))
1937 TupleTableSlot *epqslot;
1939 epqslot = EvalPlanQual(estate,
1940 resultRelInfo->ri_RangeTableIndex,
1943 if (!TupIsNull(epqslot))
1945 *tupleid = update_ctid;
1946 slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1947 tuple = ExecMaterializeSlot(slot);
1951 /* tuple already deleted; nothing to do */
1955 elog(ERROR, "unrecognized heap_update status: %u", result);
1960 (estate->es_processed)++;
1963 * Note: instead of having to update the old index tuples associated with
1964 * the heap tuple, all we do is form and insert new index tuples. This is
1965 * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1966 * deletion is done later by VACUUM (see notes in ExecDelete). All we do
1967 * here is insert new index tuples. -cim 9/27/89
1971 * insert index entries for tuple
1973 * Note: heap_update returns the tid (location) of the new tuple in the
1976 * If it's a HOT update, we mustn't insert new index entries.
1978 if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
1979 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1981 /* AFTER ROW UPDATE Triggers */
1982 ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1984 /* Process RETURNING if present */
1985 if (resultRelInfo->ri_projectReturning)
1986 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1987 slot, planSlot, dest);
1991 * ExecRelCheck --- check that tuple meets constraints for result relation
1994 ExecRelCheck(ResultRelInfo *resultRelInfo,
1995 TupleTableSlot *slot, EState *estate)
1997 Relation rel = resultRelInfo->ri_RelationDesc;
1998 int ncheck = rel->rd_att->constr->num_check;
1999 ConstrCheck *check = rel->rd_att->constr->check;
2000 ExprContext *econtext;
2001 MemoryContext oldContext;
2006 * If first time through for this result relation, build expression
2007 * nodetrees for rel's constraint expressions. Keep them in the per-query
2008 * memory context so they'll survive throughout the query.
2010 if (resultRelInfo->ri_ConstraintExprs == NULL)
2012 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
2013 resultRelInfo->ri_ConstraintExprs =
2014 (List **) palloc(ncheck * sizeof(List *));
2015 for (i = 0; i < ncheck; i++)
2017 /* ExecQual wants implicit-AND form */
2018 qual = make_ands_implicit(stringToNode(check[i].ccbin));
2019 resultRelInfo->ri_ConstraintExprs[i] = (List *)
2020 ExecPrepareExpr((Expr *) qual, estate);
2022 MemoryContextSwitchTo(oldContext);
2026 * We will use the EState's per-tuple context for evaluating constraint
2027 * expressions (creating it if it's not already there).
2029 econtext = GetPerTupleExprContext(estate);
2031 /* Arrange for econtext's scan tuple to be the tuple under test */
2032 econtext->ecxt_scantuple = slot;
2034 /* And evaluate the constraints */
2035 for (i = 0; i < ncheck; i++)
2037 qual = resultRelInfo->ri_ConstraintExprs[i];
2040 * NOTE: SQL92 specifies that a NULL result from a constraint
2041 * expression is not to be treated as a failure. Therefore, tell
2042 * ExecQual to return TRUE for NULL.
2044 if (!ExecQual(qual, econtext, true))
2045 return check[i].ccname;
2048 /* NULL result means no error */
2053 ExecConstraints(ResultRelInfo *resultRelInfo,
2054 TupleTableSlot *slot, EState *estate)
2056 Relation rel = resultRelInfo->ri_RelationDesc;
2057 TupleConstr *constr = rel->rd_att->constr;
2061 if (constr->has_not_null)
2063 int natts = rel->rd_att->natts;
2066 for (attrChk = 1; attrChk <= natts; attrChk++)
2068 if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
2069 slot_attisnull(slot, attrChk))
2071 (errcode(ERRCODE_NOT_NULL_VIOLATION),
2072 errmsg("null value in column \"%s\" violates not-null constraint",
2073 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
2077 if (constr->num_check > 0)
2081 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2083 (errcode(ERRCODE_CHECK_VIOLATION),
2084 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2085 RelationGetRelationName(rel), failed)));
2090 * ExecProcessReturning --- evaluate a RETURNING list and send to dest
2092 * projectReturning: RETURNING projection info for current result rel
2093 * tupleSlot: slot holding tuple actually inserted/updated/deleted
2094 * planSlot: slot holding tuple returned by top plan node
2095 * dest: where to send the output
2098 ExecProcessReturning(ProjectionInfo *projectReturning,
2099 TupleTableSlot *tupleSlot,
2100 TupleTableSlot *planSlot,
2103 ExprContext *econtext = projectReturning->pi_exprContext;
2104 TupleTableSlot *retSlot;
2107 * Reset per-tuple memory context to free any expression evaluation
2108 * storage allocated in the previous cycle.
2110 ResetExprContext(econtext);
2112 /* Make tuple and any needed join variables available to ExecProject */
2113 econtext->ecxt_scantuple = tupleSlot;
2114 econtext->ecxt_outertuple = planSlot;
2116 /* Compute the RETURNING expressions */
2117 retSlot = ExecProject(projectReturning, NULL);
2120 (*dest->receiveSlot) (retSlot, dest);
2122 ExecClearTuple(retSlot);
2126 * Check a modified tuple to see if we want to process its updated version
2127 * under READ COMMITTED rules.
2129 * See backend/executor/README for some info about how this works.
2131 * estate - executor state data
2132 * rti - rangetable index of table containing tuple
2133 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2134 * priorXmax - t_xmax from the outdated tuple
2136 * *tid is also an output parameter: it's modified to hold the TID of the
2137 * latest version of the tuple (note this may be changed even on failure)
2139 * Returns a slot containing the new candidate update/delete tuple, or
2140 * NULL if we determine we shouldn't process the row.
2143 EvalPlanQual(EState *estate, Index rti,
2144 ItemPointer tid, TransactionId priorXmax)
2149 HeapTupleData tuple;
2150 HeapTuple copyTuple = NULL;
2151 SnapshotData SnapshotDirty;
2157 * find relation containing target tuple
2159 if (estate->es_result_relation_info != NULL &&
2160 estate->es_result_relation_info->ri_RangeTableIndex == rti)
2161 relation = estate->es_result_relation_info->ri_RelationDesc;
2167 foreach(l, estate->es_rowMarks)
2169 ExecRowMark *erm = lfirst(l);
2171 if (erm->rti == rti)
2173 relation = erm->relation;
2177 if (relation == NULL)
2178 elog(ERROR, "could not find RowMark for RT index %u", rti);
2184 * Loop here to deal with updated or busy tuples
2186 InitDirtySnapshot(SnapshotDirty);
2187 tuple.t_self = *tid;
2192 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2195 * If xmin isn't what we're expecting, the slot must have been
2196 * recycled and reused for an unrelated tuple. This implies that
2197 * the latest version of the row was deleted, so we need do
2198 * nothing. (Should be safe to examine xmin without getting
2199 * buffer's content lock, since xmin never changes in an existing
2202 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2205 ReleaseBuffer(buffer);
2209 /* otherwise xmin should not be dirty... */
2210 if (TransactionIdIsValid(SnapshotDirty.xmin))
2211 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2214 * If tuple is being updated by other transaction then we have to
2215 * wait for its commit/abort.
2217 if (TransactionIdIsValid(SnapshotDirty.xmax))
2219 ReleaseBuffer(buffer);
2220 XactLockTableWait(SnapshotDirty.xmax);
2221 continue; /* loop back to repeat heap_fetch */
2225 * If tuple was inserted by our own transaction, we have to check
2226 * cmin against es_output_cid: cmin >= current CID means our
2227 * command cannot see the tuple, so we should ignore it. Without
2228 * this we are open to the "Halloween problem" of indefinitely
2229 * re-updating the same tuple. (We need not check cmax because
2230 * HeapTupleSatisfiesDirty will consider a tuple deleted by our
2231 * transaction dead, regardless of cmax.) We just checked that
2232 * priorXmax == xmin, so we can test that variable instead of
2233 * doing HeapTupleHeaderGetXmin again.
2235 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2236 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2238 ReleaseBuffer(buffer);
2243 * We got tuple - now copy it for use by recheck query.
2245 copyTuple = heap_copytuple(&tuple);
2246 ReleaseBuffer(buffer);
2251 * If the referenced slot was actually empty, the latest version of
2252 * the row must have been deleted, so we need do nothing.
2254 if (tuple.t_data == NULL)
2256 ReleaseBuffer(buffer);
2261 * As above, if xmin isn't what we're expecting, do nothing.
2263 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2266 ReleaseBuffer(buffer);
2271 * If we get here, the tuple was found but failed SnapshotDirty.
2272 * Assuming the xmin is either a committed xact or our own xact (as it
2273 * certainly should be if we're trying to modify the tuple), this must
2274 * mean that the row was updated or deleted by either a committed xact
2275 * or our own xact. If it was deleted, we can ignore it; if it was
2276 * updated then chain up to the next version and repeat the whole
2279 * As above, it should be safe to examine xmax and t_ctid without the
2280 * buffer content lock, because they can't be changing.
2282 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2284 /* deleted, so forget about it */
2285 ReleaseBuffer(buffer);
2289 /* updated, so look at the updated row */
2290 tuple.t_self = tuple.t_data->t_ctid;
2291 /* updated row should have xmin matching this xmax */
2292 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2293 ReleaseBuffer(buffer);
2294 /* loop back to fetch next in chain */
2298 * For UPDATE/DELETE we have to return tid of actual row we're executing
2301 *tid = tuple.t_self;
2304 * Need to run a recheck subquery. Find or create a PQ stack entry.
2306 epq = estate->es_evalPlanQual;
2309 if (epq != NULL && epq->rti == 0)
2311 /* Top PQ stack entry is idle, so re-use it */
2312 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2318 * If this is request for another RTE - Ra, - then we have to check wasn't
2319 * PlanQual requested for Ra already and if so then Ra' row was updated
2320 * again and we have to re-start old execution for Ra and forget all what
2321 * we done after Ra was suspended. Cool? -:))
2323 if (epq != NULL && epq->rti != rti &&
2324 epq->estate->es_evTuple[rti - 1] != NULL)
2328 evalPlanQual *oldepq;
2330 /* stop execution */
2331 EvalPlanQualStop(epq);
2332 /* pop previous PlanQual from the stack */
2334 Assert(oldepq && oldepq->rti != 0);
2335 /* push current PQ to freePQ stack */
2338 estate->es_evalPlanQual = epq;
2339 } while (epq->rti != rti);
2343 * If we are requested for another RTE then we have to suspend execution
2344 * of current PlanQual and start execution for new one.
2346 if (epq == NULL || epq->rti != rti)
2348 /* try to reuse plan used previously */
2349 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2351 if (newepq == NULL) /* first call or freePQ stack is empty */
2353 newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2354 newepq->free = NULL;
2355 newepq->estate = NULL;
2356 newepq->planstate = NULL;
2360 /* recycle previously used PlanQual */
2361 Assert(newepq->estate == NULL);
2364 /* push current PQ to the stack */
2367 estate->es_evalPlanQual = epq;
2372 Assert(epq->rti == rti);
2375 * Ok - we're requested for the same RTE. Unfortunately we still have to
2376 * end and restart execution of the plan, because ExecReScan wouldn't
2377 * ensure that upper plan nodes would reset themselves. We could make
2378 * that work if insertion of the target tuple were integrated with the
2379 * Param mechanism somehow, so that the upper plan nodes know that their
2380 * children's outputs have changed.
2382 * Note that the stack of free evalPlanQual nodes is quite useless at the
2383 * moment, since it only saves us from pallocing/releasing the
2384 * evalPlanQual nodes themselves. But it will be useful once we implement
2385 * ReScan instead of end/restart for re-using PlanQual nodes.
2389 /* stop execution */
2390 EvalPlanQualStop(epq);
2394 * Initialize new recheck query.
2396 * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2397 * instead copy down changeable state from the top plan (including
2398 * es_result_relation_info, es_junkFilter) and reset locally changeable
2399 * state in the epq (including es_param_exec_vals, es_evTupleNull).
2401 EvalPlanQualStart(epq, estate, epq->next);
2404 * free old RTE' tuple, if any, and store target tuple where relation's
2405 * scan node will see it
2407 epqstate = epq->estate;
2408 if (epqstate->es_evTuple[rti - 1] != NULL)
2409 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2410 epqstate->es_evTuple[rti - 1] = copyTuple;
2412 return EvalPlanQualNext(estate);
2415 static TupleTableSlot *
2416 EvalPlanQualNext(EState *estate)
2418 evalPlanQual *epq = estate->es_evalPlanQual;
2419 MemoryContext oldcontext;
2420 TupleTableSlot *slot;
2422 Assert(epq->rti != 0);
2425 oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2426 slot = ExecProcNode(epq->planstate);
2427 MemoryContextSwitchTo(oldcontext);
2430 * No more tuples for this PQ. Continue previous one.
2432 if (TupIsNull(slot))
2434 evalPlanQual *oldepq;
2436 /* stop execution */
2437 EvalPlanQualStop(epq);
2438 /* pop old PQ from the stack */
2442 /* this is the first (oldest) PQ - mark as free */
2444 estate->es_useEvalPlan = false;
2445 /* and continue Query execution */
2448 Assert(oldepq->rti != 0);
2449 /* push current PQ to freePQ stack */
2452 estate->es_evalPlanQual = epq;
2460 EndEvalPlanQual(EState *estate)
2462 evalPlanQual *epq = estate->es_evalPlanQual;
2464 if (epq->rti == 0) /* plans already shutdowned */
2466 Assert(epq->next == NULL);
2472 evalPlanQual *oldepq;
2474 /* stop execution */
2475 EvalPlanQualStop(epq);
2476 /* pop old PQ from the stack */
2480 /* this is the first (oldest) PQ - mark as free */
2482 estate->es_useEvalPlan = false;
2485 Assert(oldepq->rti != 0);
2486 /* push current PQ to freePQ stack */
2489 estate->es_evalPlanQual = epq;
2494 * Start execution of one level of PlanQual.
2496 * This is a cut-down version of ExecutorStart(): we copy some state from
2497 * the top-level estate rather than initializing it fresh.
2500 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2504 MemoryContext oldcontext;
2507 rtsize = list_length(estate->es_range_table);
2509 epq->estate = epqstate = CreateExecutorState();
2511 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2514 * The epqstates share the top query's copy of unchanging state such as
2515 * the snapshot, rangetable, result-rel info, and external Param info.
2516 * They need their own copies of local state, including a tuple table,
2517 * es_param_exec_vals, etc.
2519 epqstate->es_direction = ForwardScanDirection;
2520 epqstate->es_snapshot = estate->es_snapshot;
2521 epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2522 epqstate->es_range_table = estate->es_range_table;
2523 epqstate->es_output_cid = estate->es_output_cid;
2524 epqstate->es_result_relations = estate->es_result_relations;
2525 epqstate->es_num_result_relations = estate->es_num_result_relations;
2526 epqstate->es_result_relation_info = estate->es_result_relation_info;
2527 epqstate->es_junkFilter = estate->es_junkFilter;
2528 /* es_trig_target_relations must NOT be copied */
2529 epqstate->es_param_list_info = estate->es_param_list_info;
2530 if (estate->es_plannedstmt->nParamExec > 0)
2531 epqstate->es_param_exec_vals = (ParamExecData *)
2532 palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2533 epqstate->es_rowMarks = estate->es_rowMarks;
2534 epqstate->es_instrument = estate->es_instrument;
2535 epqstate->es_select_into = estate->es_select_into;
2536 epqstate->es_into_oids = estate->es_into_oids;
2537 epqstate->es_plannedstmt = estate->es_plannedstmt;
2540 * Each epqstate must have its own es_evTupleNull state, but all the stack
2541 * entries share es_evTuple state. This allows sub-rechecks to inherit
2542 * the value being examined by an outer recheck.
2544 epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2545 if (priorepq == NULL)
2546 /* first PQ stack entry */
2547 epqstate->es_evTuple = (HeapTuple *)
2548 palloc0(rtsize * sizeof(HeapTuple));
2550 /* later stack entries share the same storage */
2551 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2554 * Create sub-tuple-table; we needn't redo the CountSlots work though.
2556 epqstate->es_tupleTable =
2557 ExecCreateTupleTable(estate->es_tupleTable->size);
2560 * Initialize private state information for each SubPlan. We must do this
2561 * before running ExecInitNode on the main query tree, since
2562 * ExecInitSubPlan expects to be able to find these entries.
2564 Assert(epqstate->es_subplanstates == NIL);
2565 foreach(l, estate->es_plannedstmt->subplans)
2567 Plan *subplan = (Plan *) lfirst(l);
2568 PlanState *subplanstate;
2570 subplanstate = ExecInitNode(subplan, epqstate, 0);
2572 epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
2577 * Initialize the private state information for all the nodes in the query
2578 * tree. This opens files, allocates storage and leaves us ready to start
2579 * processing tuples.
2581 epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2583 MemoryContextSwitchTo(oldcontext);
2587 * End execution of one level of PlanQual.
2589 * This is a cut-down version of ExecutorEnd(); basically we want to do most
2590 * of the normal cleanup, but *not* close result relations (which we are
2591 * just sharing from the outer query). We do, however, have to close any
2592 * trigger target relations that got opened, since those are not shared.
2595 EvalPlanQualStop(evalPlanQual *epq)
2597 EState *epqstate = epq->estate;
2598 MemoryContext oldcontext;
2601 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2603 ExecEndNode(epq->planstate);
2605 foreach(l, epqstate->es_subplanstates)
2607 PlanState *subplanstate = (PlanState *) lfirst(l);
2609 ExecEndNode(subplanstate);
2612 ExecDropTupleTable(epqstate->es_tupleTable, true);
2613 epqstate->es_tupleTable = NULL;
2615 if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2617 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2618 epqstate->es_evTuple[epq->rti - 1] = NULL;
2621 foreach(l, epqstate->es_trig_target_relations)
2623 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2625 /* Close indices and then the relation itself */
2626 ExecCloseIndices(resultRelInfo);
2627 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2630 MemoryContextSwitchTo(oldcontext);
2632 FreeExecutorState(epqstate);
2635 epq->planstate = NULL;
2639 * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
2641 * Ordinarily this is just the one mentioned in the QueryDesc, but if we
2642 * are looking at a row returned by the EvalPlanQual machinery, we need
2643 * to look at the subsidiary state instead.
2646 ExecGetActivePlanTree(QueryDesc *queryDesc)
2648 EState *estate = queryDesc->estate;
2650 if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
2651 return estate->es_evalPlanQual->planstate;
2653 return queryDesc->planstate;
2658 * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2660 * We implement SELECT INTO by diverting SELECT's normal output with
2661 * a specialized DestReceiver type.
2666 DestReceiver pub; /* publicly-known function pointers */
2667 EState *estate; /* EState we are working with */
2668 Relation rel; /* Relation to write to */
2669 int hi_options; /* heap_insert performance options */
2670 BulkInsertState bistate; /* bulk insert state */
2674 * OpenIntoRel --- actually create the SELECT INTO target relation
2676 * This also replaces QueryDesc->dest with the special DestReceiver for
2677 * SELECT INTO. We assume that the correct result tuple type has already
2678 * been placed in queryDesc->tupDesc.
2681 OpenIntoRel(QueryDesc *queryDesc)
2683 IntoClause *into = queryDesc->plannedstmt->intoClause;
2684 EState *estate = queryDesc->estate;
2685 Relation intoRelationDesc;
2690 AclResult aclresult;
2693 DR_intorel *myState;
2698 * Check consistency of arguments
2700 if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2702 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2703 errmsg("ON COMMIT can only be used on temporary tables")));
2706 * Find namespace to create in, check its permissions
2708 intoName = into->rel->relname;
2709 namespaceId = RangeVarGetCreationNamespace(into->rel);
2711 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2713 if (aclresult != ACLCHECK_OK)
2714 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2715 get_namespace_name(namespaceId));
2718 * Select tablespace to use. If not specified, use default tablespace
2719 * (which may in turn default to database's default).
2721 if (into->tableSpaceName)
2723 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2724 if (!OidIsValid(tablespaceId))
2726 (errcode(ERRCODE_UNDEFINED_OBJECT),
2727 errmsg("tablespace \"%s\" does not exist",
2728 into->tableSpaceName)));
2732 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2733 /* note InvalidOid is OK in this case */
2736 /* Check permissions except when using the database's default space */
2737 if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2739 AclResult aclresult;
2741 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2744 if (aclresult != ACLCHECK_OK)
2745 aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2746 get_tablespace_name(tablespaceId));
2749 /* Parse and validate any reloptions */
2750 reloptions = transformRelOptions((Datum) 0,
2754 (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2756 /* Copy the tupdesc because heap_create_with_catalog modifies it */
2757 tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2759 /* Now we can actually create the new relation */
2760 intoRelationId = heap_create_with_catalog(intoName,
2773 allowSystemTableMods);
2775 FreeTupleDesc(tupdesc);
2778 * Advance command counter so that the newly-created relation's catalog
2779 * tuples will be visible to heap_open.
2781 CommandCounterIncrement();
2784 * If necessary, create a TOAST table for the INTO relation. Note that
2785 * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2786 * the TOAST table will be visible for insertion.
2788 AlterTableCreateToastTable(intoRelationId);
2791 * And open the constructed table for writing.
2793 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2796 * Now replace the query's DestReceiver with one for SELECT INTO
2798 queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2799 myState = (DR_intorel *) queryDesc->dest;
2800 Assert(myState->pub.mydest == DestIntoRel);
2801 myState->estate = estate;
2802 myState->rel = intoRelationDesc;
2805 * We can skip WAL-logging the insertions, unless PITR is in use. We
2806 * can skip the FSM in any case.
2808 myState->hi_options = HEAP_INSERT_SKIP_FSM |
2809 (XLogArchivingActive() ? 0 : HEAP_INSERT_SKIP_WAL);
2810 myState->bistate = GetBulkInsertState();
2812 /* Not using WAL requires rd_targblock be initially invalid */
2813 Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2817 * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2820 CloseIntoRel(QueryDesc *queryDesc)
2822 DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2824 /* OpenIntoRel might never have gotten called */
2825 if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2827 FreeBulkInsertState(myState->bistate);
2829 /* If we skipped using WAL, must heap_sync before commit */
2830 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2831 heap_sync(myState->rel);
2833 /* close rel, but keep lock until commit */
2834 heap_close(myState->rel, NoLock);
2836 myState->rel = NULL;
2841 * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2843 * Since CreateDestReceiver doesn't accept the parameters we'd need,
2844 * we just leave the private fields zeroed here. OpenIntoRel will
2848 CreateIntoRelDestReceiver(void)
2850 DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2852 self->pub.receiveSlot = intorel_receive;
2853 self->pub.rStartup = intorel_startup;
2854 self->pub.rShutdown = intorel_shutdown;
2855 self->pub.rDestroy = intorel_destroy;
2856 self->pub.mydest = DestIntoRel;
2858 return (DestReceiver *) self;
2862 * intorel_startup --- executor startup
2865 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2871 * intorel_receive --- receive one tuple
2874 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2876 DR_intorel *myState = (DR_intorel *) self;
2880 * get the heap tuple out of the tuple table slot, making sure we have a
2883 tuple = ExecMaterializeSlot(slot);
2885 heap_insert(myState->rel,
2887 myState->estate->es_output_cid,
2888 myState->hi_options,
2891 /* We know this is a newly created relation, so there are no indexes */
2897 * intorel_shutdown --- executor end
2900 intorel_shutdown(DestReceiver *self)
2906 * intorel_destroy --- release DestReceiver object
2909 intorel_destroy(DestReceiver *self)