1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
11 * The old ExecutorMain() has been replaced by ExecutorStart(),
12 * ExecutorRun() and ExecutorEnd()
14 * These three procedures are the external interfaces to the executor.
15 * In each case, the query descriptor is required as an argument.
17 * ExecutorStart() must be called at the beginning of execution of any
18 * query plan and ExecutorEnd() should always be called at the end of
19 * execution of a plan.
21 * ExecutorRun accepts direction and count arguments that specify whether
22 * the plan is to be executed forwards, backwards, and for how many tuples.
24 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
25 * Portions Copyright (c) 1994, Regents of the University of California
29 * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.316 2008/11/15 19:43:45 tgl Exp $
31 *-------------------------------------------------------------------------
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "nodes/nodeFuncs.h"
49 #include "optimizer/clauses.h"
50 #include "parser/parse_clause.h"
51 #include "parser/parsetree.h"
52 #include "storage/bufmgr.h"
53 #include "storage/lmgr.h"
54 #include "storage/smgr.h"
55 #include "utils/acl.h"
56 #include "utils/builtins.h"
57 #include "utils/lsyscache.h"
58 #include "utils/memutils.h"
59 #include "utils/snapmgr.h"
60 #include "utils/tqual.h"
63 /* Hook for plugins to get control in ExecutorRun() */
64 ExecutorRun_hook_type ExecutorRun_hook = NULL;
66 typedef struct evalPlanQual
71 struct evalPlanQual *next; /* stack of active PlanQual plans */
72 struct evalPlanQual *free; /* list of free PlanQual plans */
75 /* decls for local routines only used within this module */
76 static void InitPlan(QueryDesc *queryDesc, int eflags);
77 static void ExecCheckPlanOutput(Relation resultRel, List *targetList);
78 static void ExecEndPlan(PlanState *planstate, EState *estate);
79 static void ExecutePlan(EState *estate, PlanState *planstate,
82 ScanDirection direction,
84 static void ExecSelect(TupleTableSlot *slot,
85 DestReceiver *dest, EState *estate);
86 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
87 TupleTableSlot *planSlot,
88 DestReceiver *dest, EState *estate);
89 static void ExecDelete(ItemPointer tupleid,
90 TupleTableSlot *planSlot,
91 DestReceiver *dest, EState *estate);
92 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
93 TupleTableSlot *planSlot,
94 DestReceiver *dest, EState *estate);
95 static void ExecProcessReturning(ProjectionInfo *projectReturning,
96 TupleTableSlot *tupleSlot,
97 TupleTableSlot *planSlot,
99 static TupleTableSlot *EvalPlanQualNext(EState *estate);
100 static void EndEvalPlanQual(EState *estate);
101 static void ExecCheckRTPerms(List *rangeTable);
102 static void ExecCheckRTEPerms(RangeTblEntry *rte);
103 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
104 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
105 evalPlanQual *priorepq);
106 static void EvalPlanQualStop(evalPlanQual *epq);
107 static void OpenIntoRel(QueryDesc *queryDesc);
108 static void CloseIntoRel(QueryDesc *queryDesc);
109 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
110 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
111 static void intorel_shutdown(DestReceiver *self);
112 static void intorel_destroy(DestReceiver *self);
114 /* end of local decls */
117 /* ----------------------------------------------------------------
120 * This routine must be called at the beginning of any execution of any
123 * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
124 * clear why we bother to separate the two functions, but...). The tupDesc
125 * field of the QueryDesc is filled in to describe the tuples that will be
126 * returned, and the internal fields (estate and planstate) are set up.
128 * eflags contains flag bits as described in executor.h.
130 * NB: the CurrentMemoryContext when this is called will become the parent
131 * of the per-query context used for this Executor invocation.
132 * ----------------------------------------------------------------
135 ExecutorStart(QueryDesc *queryDesc, int eflags)
138 MemoryContext oldcontext;
140 /* sanity checks: queryDesc must not be started already */
141 Assert(queryDesc != NULL);
142 Assert(queryDesc->estate == NULL);
145 * If the transaction is read-only, we need to check if any writes are
146 * planned to non-temporary tables. EXPLAIN is considered read-only.
148 if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
149 ExecCheckXactReadOnly(queryDesc->plannedstmt);
152 * Build EState, switch into per-query memory context for startup.
154 estate = CreateExecutorState();
155 queryDesc->estate = estate;
157 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
160 * Fill in parameters, if any, from queryDesc
162 estate->es_param_list_info = queryDesc->params;
164 if (queryDesc->plannedstmt->nParamExec > 0)
165 estate->es_param_exec_vals = (ParamExecData *)
166 palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
169 * If non-read-only query, set the command ID to mark output tuples with
171 switch (queryDesc->operation)
174 /* SELECT INTO and SELECT FOR UPDATE/SHARE need to mark tuples */
175 if (queryDesc->plannedstmt->intoClause != NULL ||
176 queryDesc->plannedstmt->rowMarks != NIL)
177 estate->es_output_cid = GetCurrentCommandId(true);
183 estate->es_output_cid = GetCurrentCommandId(true);
187 elog(ERROR, "unrecognized operation code: %d",
188 (int) queryDesc->operation);
193 * Copy other important information into the EState
195 estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
196 estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
197 estate->es_instrument = queryDesc->doInstrument;
200 * Initialize the plan state tree
202 InitPlan(queryDesc, eflags);
204 MemoryContextSwitchTo(oldcontext);
207 /* ----------------------------------------------------------------
210 * This is the main routine of the executor module. It accepts
211 * the query descriptor from the traffic cop and executes the
214 * ExecutorStart must have been called already.
216 * If direction is NoMovementScanDirection then nothing is done
217 * except to start up/shut down the destination. Otherwise,
218 * we retrieve up to 'count' tuples in the specified direction.
220 * Note: count = 0 is interpreted as no portal limit, i.e., run to
223 * There is no return value, but output tuples (if any) are sent to
224 * the destination receiver specified in the QueryDesc; and the number
225 * of tuples processed at the top level can be found in
226 * estate->es_processed.
228 * We provide a function hook variable that lets loadable plugins
229 * get control when ExecutorRun is called. Such a plugin would
230 * normally call standard_ExecutorRun().
232 * ----------------------------------------------------------------
235 ExecutorRun(QueryDesc *queryDesc,
236 ScanDirection direction, long count)
238 if (ExecutorRun_hook)
239 (*ExecutorRun_hook) (queryDesc, direction, count);
241 standard_ExecutorRun(queryDesc, direction, count);
245 standard_ExecutorRun(QueryDesc *queryDesc,
246 ScanDirection direction, long count)
252 MemoryContext oldcontext;
255 Assert(queryDesc != NULL);
257 estate = queryDesc->estate;
259 Assert(estate != NULL);
262 * Switch into per-query memory context
264 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
267 * extract information from the query descriptor and the query feature.
269 operation = queryDesc->operation;
270 dest = queryDesc->dest;
273 * startup tuple receiver, if we will be emitting tuples
275 estate->es_processed = 0;
276 estate->es_lastoid = InvalidOid;
278 sendTuples = (operation == CMD_SELECT ||
279 queryDesc->plannedstmt->returningLists);
282 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
287 if (!ScanDirectionIsNoMovement(direction))
289 queryDesc->planstate,
296 * shutdown tuple receiver, if we started it
299 (*dest->rShutdown) (dest);
301 MemoryContextSwitchTo(oldcontext);
304 /* ----------------------------------------------------------------
307 * This routine must be called at the end of execution of any
309 * ----------------------------------------------------------------
312 ExecutorEnd(QueryDesc *queryDesc)
315 MemoryContext oldcontext;
318 Assert(queryDesc != NULL);
320 estate = queryDesc->estate;
322 Assert(estate != NULL);
325 * Switch into per-query memory context to run ExecEndPlan
327 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
329 ExecEndPlan(queryDesc->planstate, estate);
332 * Close the SELECT INTO relation if any
334 if (estate->es_select_into)
335 CloseIntoRel(queryDesc);
337 /* do away with our snapshots */
338 UnregisterSnapshot(estate->es_snapshot);
339 UnregisterSnapshot(estate->es_crosscheck_snapshot);
342 * Must switch out of context before destroying it
344 MemoryContextSwitchTo(oldcontext);
347 * Release EState and per-query memory context. This should release
348 * everything the executor has allocated.
350 FreeExecutorState(estate);
352 /* Reset queryDesc fields that no longer point to anything */
353 queryDesc->tupDesc = NULL;
354 queryDesc->estate = NULL;
355 queryDesc->planstate = NULL;
358 /* ----------------------------------------------------------------
361 * This routine may be called on an open queryDesc to rewind it
363 * ----------------------------------------------------------------
366 ExecutorRewind(QueryDesc *queryDesc)
369 MemoryContext oldcontext;
372 Assert(queryDesc != NULL);
374 estate = queryDesc->estate;
376 Assert(estate != NULL);
378 /* It's probably not sensible to rescan updating queries */
379 Assert(queryDesc->operation == CMD_SELECT);
382 * Switch into per-query memory context
384 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
389 ExecReScan(queryDesc->planstate, NULL);
391 MemoryContextSwitchTo(oldcontext);
397 * Check access permissions for all relations listed in a range table.
400 ExecCheckRTPerms(List *rangeTable)
404 foreach(l, rangeTable)
406 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
412 * Check access permissions for a single RTE.
415 ExecCheckRTEPerms(RangeTblEntry *rte)
417 AclMode requiredPerms;
422 * Only plain-relation RTEs need to be checked here. Function RTEs are
423 * checked by init_fcache when the function is prepared for execution.
424 * Join, subquery, and special RTEs need no checks.
426 if (rte->rtekind != RTE_RELATION)
430 * No work if requiredPerms is empty.
432 requiredPerms = rte->requiredPerms;
433 if (requiredPerms == 0)
439 * userid to check as: current user unless we have a setuid indication.
441 * Note: GetUserId() is presently fast enough that there's no harm in
442 * calling it separately for each RTE. If that stops being true, we could
443 * call it once in ExecCheckRTPerms and pass the userid down from there.
444 * But for now, no need for the extra clutter.
446 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
449 * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
451 if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
453 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
454 get_rel_name(relOid));
458 * Check that the query does not imply any writes to non-temp tables.
461 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
466 * CREATE TABLE AS or SELECT INTO?
468 * XXX should we allow this if the destination is temp?
470 if (plannedstmt->intoClause != NULL)
473 /* Fail if write permissions are requested on any non-temp table */
474 foreach(l, plannedstmt->rtable)
476 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
478 if (rte->rtekind != RTE_RELATION)
481 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
484 if (isTempNamespace(get_rel_namespace(rte->relid)))
494 (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
495 errmsg("transaction is read-only")));
499 /* ----------------------------------------------------------------
502 * Initializes the query plan: open files, allocate storage
503 * and start up the rule manager
504 * ----------------------------------------------------------------
507 InitPlan(QueryDesc *queryDesc, int eflags)
509 CmdType operation = queryDesc->operation;
510 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
511 Plan *plan = plannedstmt->planTree;
512 List *rangeTable = plannedstmt->rtable;
513 EState *estate = queryDesc->estate;
514 PlanState *planstate;
520 * Do permissions checks
522 ExecCheckRTPerms(rangeTable);
525 * initialize the node's execution state
527 estate->es_range_table = rangeTable;
530 * initialize result relation stuff
532 if (plannedstmt->resultRelations)
534 List *resultRelations = plannedstmt->resultRelations;
535 int numResultRelations = list_length(resultRelations);
536 ResultRelInfo *resultRelInfos;
537 ResultRelInfo *resultRelInfo;
539 resultRelInfos = (ResultRelInfo *)
540 palloc(numResultRelations * sizeof(ResultRelInfo));
541 resultRelInfo = resultRelInfos;
542 foreach(l, resultRelations)
544 Index resultRelationIndex = lfirst_int(l);
545 Oid resultRelationOid;
546 Relation resultRelation;
548 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
549 resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
550 InitResultRelInfo(resultRelInfo,
554 estate->es_instrument);
557 estate->es_result_relations = resultRelInfos;
558 estate->es_num_result_relations = numResultRelations;
559 /* Initialize to first or only result rel */
560 estate->es_result_relation_info = resultRelInfos;
565 * if no result relation, then set state appropriately
567 estate->es_result_relations = NULL;
568 estate->es_num_result_relations = 0;
569 estate->es_result_relation_info = NULL;
573 * Detect whether we're doing SELECT INTO. If so, set the es_into_oids
574 * flag appropriately so that the plan tree will be initialized with the
575 * correct tuple descriptors. (Other SELECT INTO stuff comes later.)
577 estate->es_select_into = false;
578 if (operation == CMD_SELECT && plannedstmt->intoClause != NULL)
580 estate->es_select_into = true;
581 estate->es_into_oids = interpretOidsOption(plannedstmt->intoClause->options);
585 * Have to lock relations selected FOR UPDATE/FOR SHARE before we
586 * initialize the plan tree, else we'd be doing a lock upgrade. While we
587 * are at it, build the ExecRowMark list.
589 estate->es_rowMarks = NIL;
590 foreach(l, plannedstmt->rowMarks)
592 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
597 /* ignore "parent" rowmarks; they are irrelevant at runtime */
601 relid = getrelid(rc->rti, rangeTable);
602 relation = heap_open(relid, RowShareLock);
603 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
604 erm->relation = relation;
606 erm->prti = rc->prti;
607 erm->forUpdate = rc->forUpdate;
608 erm->noWait = rc->noWait;
609 /* We'll locate the junk attrs below */
610 erm->ctidAttNo = InvalidAttrNumber;
611 erm->toidAttNo = InvalidAttrNumber;
612 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
616 * Initialize the executor "tuple" table. We need slots for all the plan
617 * nodes, plus possibly output slots for the junkfilter(s). At this point
618 * we aren't sure if we need junkfilters, so just add slots for them
619 * unconditionally. Also, if it's not a SELECT, set up a slot for use for
620 * trigger output tuples. Also, one for RETURNING-list evaluation.
625 /* Slots for the main plan tree */
626 nSlots = ExecCountSlotsNode(plan);
627 /* Add slots for subplans and initplans */
628 foreach(l, plannedstmt->subplans)
630 Plan *subplan = (Plan *) lfirst(l);
632 nSlots += ExecCountSlotsNode(subplan);
634 /* Add slots for junkfilter(s) */
635 if (plannedstmt->resultRelations != NIL)
636 nSlots += list_length(plannedstmt->resultRelations);
639 if (operation != CMD_SELECT)
640 nSlots++; /* for es_trig_tuple_slot */
641 if (plannedstmt->returningLists)
642 nSlots++; /* for RETURNING projection */
644 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
646 if (operation != CMD_SELECT)
647 estate->es_trig_tuple_slot =
648 ExecAllocTableSlot(estate->es_tupleTable);
651 /* mark EvalPlanQual not active */
652 estate->es_plannedstmt = plannedstmt;
653 estate->es_evalPlanQual = NULL;
654 estate->es_evTupleNull = NULL;
655 estate->es_evTuple = NULL;
656 estate->es_useEvalPlan = false;
659 * Initialize private state information for each SubPlan. We must do this
660 * before running ExecInitNode on the main query tree, since
661 * ExecInitSubPlan expects to be able to find these entries.
663 Assert(estate->es_subplanstates == NIL);
664 i = 1; /* subplan indices count from 1 */
665 foreach(l, plannedstmt->subplans)
667 Plan *subplan = (Plan *) lfirst(l);
668 PlanState *subplanstate;
672 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
673 * it is a parameterless subplan (not initplan), we suggest that it be
674 * prepared to handle REWIND efficiently; otherwise there is no need.
676 sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
677 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
678 sp_eflags |= EXEC_FLAG_REWIND;
680 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
682 estate->es_subplanstates = lappend(estate->es_subplanstates,
689 * Initialize the private state information for all the nodes in the query
690 * tree. This opens files, allocates storage and leaves us ready to start
693 planstate = ExecInitNode(plan, estate, eflags);
696 * Get the tuple descriptor describing the type of tuples to return. (this
697 * is especially important if we are creating a relation with "SELECT
700 tupType = ExecGetResultType(planstate);
703 * Initialize the junk filter if needed. SELECT and INSERT queries need a
704 * filter if there are any junk attrs in the tlist. UPDATE and
705 * DELETE always need a filter, since there's always a junk 'ctid'
706 * attribute present --- no need to look first.
708 * This section of code is also a convenient place to verify that the
709 * output of an INSERT or UPDATE matches the target table(s).
712 bool junk_filter_needed = false;
719 foreach(tlist, plan->targetlist)
721 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
725 junk_filter_needed = true;
732 junk_filter_needed = true;
738 if (junk_filter_needed)
741 * If there are multiple result relations, each one needs its own
742 * junk filter. Note this is only possible for UPDATE/DELETE, so
743 * we can't be fooled by some needing a filter and some not.
745 if (list_length(plannedstmt->resultRelations) > 1)
747 PlanState **appendplans;
749 ResultRelInfo *resultRelInfo;
751 /* Top plan had better be an Append here. */
752 Assert(IsA(plan, Append));
753 Assert(((Append *) plan)->isTarget);
754 Assert(IsA(planstate, AppendState));
755 appendplans = ((AppendState *) planstate)->appendplans;
756 as_nplans = ((AppendState *) planstate)->as_nplans;
757 Assert(as_nplans == estate->es_num_result_relations);
758 resultRelInfo = estate->es_result_relations;
759 for (i = 0; i < as_nplans; i++)
761 PlanState *subplan = appendplans[i];
764 if (operation == CMD_UPDATE)
765 ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
766 subplan->plan->targetlist);
768 j = ExecInitJunkFilter(subplan->plan->targetlist,
769 resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
770 ExecAllocTableSlot(estate->es_tupleTable));
773 * Since it must be UPDATE/DELETE, there had better be a
774 * "ctid" junk attribute in the tlist ... but ctid could
775 * be at a different resno for each result relation. We
776 * look up the ctid resnos now and save them in the
779 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
780 if (!AttributeNumberIsValid(j->jf_junkAttNo))
781 elog(ERROR, "could not find junk ctid column");
782 resultRelInfo->ri_junkFilter = j;
787 * Set active junkfilter too; at this point ExecInitAppend has
788 * already selected an active result relation...
790 estate->es_junkFilter =
791 estate->es_result_relation_info->ri_junkFilter;
794 * We currently can't support rowmarks in this case, because
795 * the associated junk CTIDs might have different resnos in
796 * different subplans.
798 if (estate->es_rowMarks)
800 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
801 errmsg("SELECT FOR UPDATE/SHARE is not supported within a query with multiple result relations")));
805 /* Normal case with just one JunkFilter */
808 if (operation == CMD_INSERT || operation == CMD_UPDATE)
809 ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
810 planstate->plan->targetlist);
812 j = ExecInitJunkFilter(planstate->plan->targetlist,
814 ExecAllocTableSlot(estate->es_tupleTable));
815 estate->es_junkFilter = j;
816 if (estate->es_result_relation_info)
817 estate->es_result_relation_info->ri_junkFilter = j;
819 if (operation == CMD_SELECT)
821 /* For SELECT, want to return the cleaned tuple type */
822 tupType = j->jf_cleanTupType;
824 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
826 /* For UPDATE/DELETE, find the ctid junk attr now */
827 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
828 if (!AttributeNumberIsValid(j->jf_junkAttNo))
829 elog(ERROR, "could not find junk ctid column");
832 /* For SELECT FOR UPDATE/SHARE, find the junk attrs now */
833 foreach(l, estate->es_rowMarks)
835 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
838 /* always need the ctid */
839 snprintf(resname, sizeof(resname), "ctid%u",
841 erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
842 if (!AttributeNumberIsValid(erm->ctidAttNo))
843 elog(ERROR, "could not find junk \"%s\" column",
845 /* if child relation, need tableoid too */
846 if (erm->rti != erm->prti)
848 snprintf(resname, sizeof(resname), "tableoid%u",
850 erm->toidAttNo = ExecFindJunkAttribute(j, resname);
851 if (!AttributeNumberIsValid(erm->toidAttNo))
852 elog(ERROR, "could not find junk \"%s\" column",
860 if (operation == CMD_INSERT)
861 ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc,
862 planstate->plan->targetlist);
864 estate->es_junkFilter = NULL;
865 if (estate->es_rowMarks)
866 elog(ERROR, "SELECT FOR UPDATE/SHARE, but no junk columns");
871 * Initialize RETURNING projections if needed.
873 if (plannedstmt->returningLists)
875 TupleTableSlot *slot;
876 ExprContext *econtext;
877 ResultRelInfo *resultRelInfo;
880 * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
881 * We assume all the sublists will generate the same output tupdesc.
883 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
886 /* Set up a slot for the output of the RETURNING projection(s) */
887 slot = ExecAllocTableSlot(estate->es_tupleTable);
888 ExecSetSlotDescriptor(slot, tupType);
889 /* Need an econtext too */
890 econtext = CreateExprContext(estate);
893 * Build a projection for each result rel. Note that any SubPlans in
894 * the RETURNING lists get attached to the topmost plan node.
896 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
897 resultRelInfo = estate->es_result_relations;
898 foreach(l, plannedstmt->returningLists)
900 List *rlist = (List *) lfirst(l);
903 rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
904 resultRelInfo->ri_projectReturning =
905 ExecBuildProjectionInfo(rliststate, econtext, slot,
906 resultRelInfo->ri_RelationDesc->rd_att);
911 queryDesc->tupDesc = tupType;
912 queryDesc->planstate = planstate;
915 * If doing SELECT INTO, initialize the "into" relation. We must wait
916 * till now so we have the "clean" result tuple type to create the new
919 * If EXPLAIN, skip creating the "into" relation.
921 if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
922 OpenIntoRel(queryDesc);
926 * Initialize ResultRelInfo data for one result relation
929 InitResultRelInfo(ResultRelInfo *resultRelInfo,
930 Relation resultRelationDesc,
931 Index resultRelationIndex,
936 * Check valid relkind ... parser and/or planner should have noticed this
937 * already, but let's make sure.
939 switch (resultRelationDesc->rd_rel->relkind)
941 case RELKIND_RELATION:
944 case RELKIND_SEQUENCE:
946 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
947 errmsg("cannot change sequence \"%s\"",
948 RelationGetRelationName(resultRelationDesc))));
950 case RELKIND_TOASTVALUE:
952 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
953 errmsg("cannot change TOAST relation \"%s\"",
954 RelationGetRelationName(resultRelationDesc))));
958 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
959 errmsg("cannot change view \"%s\"",
960 RelationGetRelationName(resultRelationDesc))));
964 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
965 errmsg("cannot change relation \"%s\"",
966 RelationGetRelationName(resultRelationDesc))));
970 /* OK, fill in the node */
971 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
972 resultRelInfo->type = T_ResultRelInfo;
973 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
974 resultRelInfo->ri_RelationDesc = resultRelationDesc;
975 resultRelInfo->ri_NumIndices = 0;
976 resultRelInfo->ri_IndexRelationDescs = NULL;
977 resultRelInfo->ri_IndexRelationInfo = NULL;
978 /* make a copy so as not to depend on relcache info not changing... */
979 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
980 if (resultRelInfo->ri_TrigDesc)
982 int n = resultRelInfo->ri_TrigDesc->numtriggers;
984 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
985 palloc0(n * sizeof(FmgrInfo));
987 resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
989 resultRelInfo->ri_TrigInstrument = NULL;
993 resultRelInfo->ri_TrigFunctions = NULL;
994 resultRelInfo->ri_TrigInstrument = NULL;
996 resultRelInfo->ri_ConstraintExprs = NULL;
997 resultRelInfo->ri_junkFilter = NULL;
998 resultRelInfo->ri_projectReturning = NULL;
1001 * If there are indices on the result relation, open them and save
1002 * descriptors in the result relation info, so that we can add new index
1003 * entries for the tuples we add/update. We need not do this for a
1004 * DELETE, however, since deletion doesn't affect indexes.
1006 if (resultRelationDesc->rd_rel->relhasindex &&
1007 operation != CMD_DELETE)
1008 ExecOpenIndices(resultRelInfo);
1012 * Verify that the tuples to be produced by INSERT or UPDATE match the
1013 * target relation's rowtype
1015 * We do this to guard against stale plans. If plan invalidation is
1016 * functioning properly then we should never get a failure here, but better
1017 * safe than sorry. Note that this is called after we have obtained lock
1018 * on the target rel, so the rowtype can't change underneath us.
1020 * The plan output is represented by its targetlist, because that makes
1021 * handling the dropped-column case easier.
1024 ExecCheckPlanOutput(Relation resultRel, List *targetList)
1026 TupleDesc resultDesc = RelationGetDescr(resultRel);
1030 foreach(lc, targetList)
1032 TargetEntry *tle = (TargetEntry *) lfirst(lc);
1033 Form_pg_attribute attr;
1036 continue; /* ignore junk tlist items */
1038 if (attno >= resultDesc->natts)
1040 (errcode(ERRCODE_DATATYPE_MISMATCH),
1041 errmsg("table row type and query-specified row type do not match"),
1042 errdetail("Query has too many columns.")));
1043 attr = resultDesc->attrs[attno++];
1045 if (!attr->attisdropped)
1047 /* Normal case: demand type match */
1048 if (exprType((Node *) tle->expr) != attr->atttypid)
1050 (errcode(ERRCODE_DATATYPE_MISMATCH),
1051 errmsg("table row type and query-specified row type do not match"),
1052 errdetail("Table has type %s at ordinal position %d, but query expects %s.",
1053 format_type_be(attr->atttypid),
1055 format_type_be(exprType((Node *) tle->expr)))));
1060 * For a dropped column, we can't check atttypid (it's likely 0).
1061 * In any case the planner has most likely inserted an INT4 null.
1062 * What we insist on is just *some* NULL constant.
1064 if (!IsA(tle->expr, Const) ||
1065 !((Const *) tle->expr)->constisnull)
1067 (errcode(ERRCODE_DATATYPE_MISMATCH),
1068 errmsg("table row type and query-specified row type do not match"),
1069 errdetail("Query provides a value for a dropped column at ordinal position %d.",
1073 if (attno != resultDesc->natts)
1075 (errcode(ERRCODE_DATATYPE_MISMATCH),
1076 errmsg("table row type and query-specified row type do not match"),
1077 errdetail("Query has too few columns.")));
1081 * ExecGetTriggerResultRel
1083 * Get a ResultRelInfo for a trigger target relation. Most of the time,
1084 * triggers are fired on one of the result relations of the query, and so
1085 * we can just return a member of the es_result_relations array. (Note: in
1086 * self-join situations there might be multiple members with the same OID;
1087 * if so it doesn't matter which one we pick.) However, it is sometimes
1088 * necessary to fire triggers on other relations; this happens mainly when an
1089 * RI update trigger queues additional triggers on other relations, which will
1090 * be processed in the context of the outer query. For efficiency's sake,
1091 * we want to have a ResultRelInfo for those triggers too; that can avoid
1092 * repeated re-opening of the relation. (It also provides a way for EXPLAIN
1093 * ANALYZE to report the runtimes of such triggers.) So we make additional
1094 * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1097 ExecGetTriggerResultRel(EState *estate, Oid relid)
1099 ResultRelInfo *rInfo;
1103 MemoryContext oldcontext;
1105 /* First, search through the query result relations */
1106 rInfo = estate->es_result_relations;
1107 nr = estate->es_num_result_relations;
1110 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1115 /* Nope, but maybe we already made an extra ResultRelInfo for it */
1116 foreach(l, estate->es_trig_target_relations)
1118 rInfo = (ResultRelInfo *) lfirst(l);
1119 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1122 /* Nope, so we need a new one */
1125 * Open the target relation's relcache entry. We assume that an
1126 * appropriate lock is still held by the backend from whenever the trigger
1127 * event got queued, so we need take no new lock here.
1129 rel = heap_open(relid, NoLock);
1132 * Make the new entry in the right context. Currently, we don't need any
1133 * index information in ResultRelInfos used only for triggers, so tell
1134 * InitResultRelInfo it's a DELETE.
1136 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1137 rInfo = makeNode(ResultRelInfo);
1138 InitResultRelInfo(rInfo,
1140 0, /* dummy rangetable index */
1142 estate->es_instrument);
1143 estate->es_trig_target_relations =
1144 lappend(estate->es_trig_target_relations, rInfo);
1145 MemoryContextSwitchTo(oldcontext);
1151 * ExecContextForcesOids
1153 * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
1154 * we need to ensure that result tuples have space for an OID iff they are
1155 * going to be stored into a relation that has OIDs. In other contexts
1156 * we are free to choose whether to leave space for OIDs in result tuples
1157 * (we generally don't want to, but we do if a physical-tlist optimization
1158 * is possible). This routine checks the plan context and returns TRUE if the
1159 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
1160 * *hasoids is set to the required value.
1162 * One reason this is ugly is that all plan nodes in the plan tree will emit
1163 * tuples with space for an OID, though we really only need the topmost node
1164 * to do so. However, node types like Sort don't project new tuples but just
1165 * return their inputs, and in those cases the requirement propagates down
1166 * to the input node. Eventually we might make this code smart enough to
1167 * recognize how far down the requirement really goes, but for now we just
1168 * make all plan nodes do the same thing if the top level forces the choice.
1170 * We assume that estate->es_result_relation_info is already set up to
1171 * describe the target relation. Note that in an UPDATE that spans an
1172 * inheritance tree, some of the target relations may have OIDs and some not.
1173 * We have to make the decisions on a per-relation basis as we initialize
1174 * each of the child plans of the topmost Append plan.
1176 * SELECT INTO is even uglier, because we don't have the INTO relation's
1177 * descriptor available when this code runs; we have to look aside at a
1178 * flag set by InitPlan().
1181 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1183 if (planstate->state->es_select_into)
1185 *hasoids = planstate->state->es_into_oids;
1190 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1194 Relation rel = ri->ri_RelationDesc;
1198 *hasoids = rel->rd_rel->relhasoids;
1207 /* ----------------------------------------------------------------
1210 * Cleans up the query plan -- closes files and frees up storage
1212 * NOTE: we are no longer very worried about freeing storage per se
1213 * in this code; FreeExecutorState should be guaranteed to release all
1214 * memory that needs to be released. What we are worried about doing
1215 * is closing relations and dropping buffer pins. Thus, for example,
1216 * tuple tables must be cleared or dropped to ensure pins are released.
1217 * ----------------------------------------------------------------
1220 ExecEndPlan(PlanState *planstate, EState *estate)
1222 ResultRelInfo *resultRelInfo;
1227 * shut down any PlanQual processing we were doing
1229 if (estate->es_evalPlanQual != NULL)
1230 EndEvalPlanQual(estate);
1233 * shut down the node-type-specific query processing
1235 ExecEndNode(planstate);
1240 foreach(l, estate->es_subplanstates)
1242 PlanState *subplanstate = (PlanState *) lfirst(l);
1244 ExecEndNode(subplanstate);
1248 * destroy the executor "tuple" table.
1250 ExecDropTupleTable(estate->es_tupleTable, true);
1251 estate->es_tupleTable = NULL;
1254 * close the result relation(s) if any, but hold locks until xact commit.
1256 resultRelInfo = estate->es_result_relations;
1257 for (i = estate->es_num_result_relations; i > 0; i--)
1259 /* Close indices and then the relation itself */
1260 ExecCloseIndices(resultRelInfo);
1261 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1266 * likewise close any trigger target relations
1268 foreach(l, estate->es_trig_target_relations)
1270 resultRelInfo = (ResultRelInfo *) lfirst(l);
1271 /* Close indices and then the relation itself */
1272 ExecCloseIndices(resultRelInfo);
1273 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1277 * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
1279 foreach(l, estate->es_rowMarks)
1281 ExecRowMark *erm = lfirst(l);
1283 heap_close(erm->relation, NoLock);
1287 /* ----------------------------------------------------------------
1290 * Processes the query plan until we have processed 'numberTuples' tuples,
1291 * moving in the specified direction.
1293 * Runs to completion if numberTuples is 0
1295 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1297 * ----------------------------------------------------------------
1300 ExecutePlan(EState *estate,
1301 PlanState *planstate,
1304 ScanDirection direction,
1307 JunkFilter *junkfilter;
1308 TupleTableSlot *planSlot;
1309 TupleTableSlot *slot;
1310 ItemPointer tupleid = NULL;
1311 ItemPointerData tuple_ctid;
1312 long current_tuple_count;
1315 * initialize local variables
1317 current_tuple_count = 0;
1320 * Set the direction.
1322 estate->es_direction = direction;
1325 * Process BEFORE EACH STATEMENT triggers
1330 ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1333 ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1336 ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1344 * Loop until we've processed the proper number of tuples from the plan.
1348 /* Reset the per-output-tuple exprcontext */
1349 ResetPerTupleExprContext(estate);
1352 * Execute the plan and obtain a tuple
1355 if (estate->es_useEvalPlan)
1357 planSlot = EvalPlanQualNext(estate);
1358 if (TupIsNull(planSlot))
1359 planSlot = ExecProcNode(planstate);
1362 planSlot = ExecProcNode(planstate);
1365 * if the tuple is null, then we assume there is nothing more to
1366 * process so we just end the loop...
1368 if (TupIsNull(planSlot))
1373 * If we have a junk filter, then project a new tuple with the junk
1376 * Store this new "clean" tuple in the junkfilter's resultSlot.
1377 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1378 * because that tuple slot has the wrong descriptor.)
1380 * But first, extract all the junk information we need.
1382 if ((junkfilter = estate->es_junkFilter) != NULL)
1385 * Process any FOR UPDATE or FOR SHARE locking requested.
1387 if (estate->es_rowMarks != NIL)
1392 foreach(l, estate->es_rowMarks)
1394 ExecRowMark *erm = lfirst(l);
1397 HeapTupleData tuple;
1399 ItemPointerData update_ctid;
1400 TransactionId update_xmax;
1401 TupleTableSlot *newSlot;
1402 LockTupleMode lockmode;
1405 /* if child rel, must check whether it produced this row */
1406 if (erm->rti != erm->prti)
1410 datum = ExecGetJunkAttribute(slot,
1413 /* shouldn't ever get a null result... */
1415 elog(ERROR, "tableoid is NULL");
1416 tableoid = DatumGetObjectId(datum);
1418 if (tableoid != RelationGetRelid(erm->relation))
1420 /* this child is inactive right now */
1425 /* okay, fetch the tuple by ctid */
1426 datum = ExecGetJunkAttribute(slot,
1429 /* shouldn't ever get a null result... */
1431 elog(ERROR, "ctid is NULL");
1432 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1435 lockmode = LockTupleExclusive;
1437 lockmode = LockTupleShared;
1439 test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1440 &update_ctid, &update_xmax,
1441 estate->es_output_cid,
1442 lockmode, erm->noWait);
1443 ReleaseBuffer(buffer);
1446 case HeapTupleSelfUpdated:
1447 /* treat it as deleted; do not process */
1450 case HeapTupleMayBeUpdated:
1453 case HeapTupleUpdated:
1454 if (IsXactIsoLevelSerializable)
1456 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1457 errmsg("could not serialize access due to concurrent update")));
1458 if (!ItemPointerEquals(&update_ctid,
1461 /* updated, so look at updated version */
1462 newSlot = EvalPlanQual(estate,
1466 if (!TupIsNull(newSlot))
1468 slot = planSlot = newSlot;
1469 estate->es_useEvalPlan = true;
1475 * if tuple was deleted or PlanQual failed for
1476 * updated tuple - we must not return this tuple!
1481 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1488 * extract the 'ctid' junk attribute.
1490 if (operation == CMD_UPDATE || operation == CMD_DELETE)
1495 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1497 /* shouldn't ever get a null result... */
1499 elog(ERROR, "ctid is NULL");
1501 tupleid = (ItemPointer) DatumGetPointer(datum);
1502 tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */
1503 tupleid = &tuple_ctid;
1507 * Create a new "clean" tuple with all junk attributes removed. We
1508 * don't need to do this for DELETE, however (there will in fact
1509 * be no non-junk attributes in a DELETE!)
1511 if (operation != CMD_DELETE)
1512 slot = ExecFilterJunk(junkfilter, slot);
1516 * now that we have a tuple, do the appropriate thing with it.. either
1517 * send it to the output destination, add it to a relation someplace,
1518 * delete it from a relation, or modify some of its attributes.
1523 ExecSelect(slot, dest, estate);
1527 ExecInsert(slot, tupleid, planSlot, dest, estate);
1531 ExecDelete(tupleid, planSlot, dest, estate);
1535 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1539 elog(ERROR, "unrecognized operation code: %d",
1545 * check our tuple count.. if we've processed the proper number then
1546 * quit, else loop again and process more tuples. Zero numberTuples
1549 current_tuple_count++;
1550 if (numberTuples && numberTuples == current_tuple_count)
1555 * Process AFTER EACH STATEMENT triggers
1560 ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1563 ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1566 ExecASInsertTriggers(estate, estate->es_result_relation_info);
1574 /* ----------------------------------------------------------------
1577 * SELECTs are easy.. we just pass the tuple to the appropriate
1579 * ----------------------------------------------------------------
1582 ExecSelect(TupleTableSlot *slot,
1586 (*dest->receiveSlot) (slot, dest);
1588 (estate->es_processed)++;
1591 /* ----------------------------------------------------------------
1594 * INSERTs are trickier.. we have to insert the tuple into
1595 * the base relation and insert appropriate tuples into the
1597 * ----------------------------------------------------------------
1600 ExecInsert(TupleTableSlot *slot,
1601 ItemPointer tupleid,
1602 TupleTableSlot *planSlot,
1607 ResultRelInfo *resultRelInfo;
1608 Relation resultRelationDesc;
1612 * get the heap tuple out of the tuple table slot, making sure we have a
1615 tuple = ExecMaterializeSlot(slot);
1618 * get information on the (current) result relation
1620 resultRelInfo = estate->es_result_relation_info;
1621 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1623 /* BEFORE ROW INSERT Triggers */
1624 if (resultRelInfo->ri_TrigDesc &&
1625 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1629 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1631 if (newtuple == NULL) /* "do nothing" */
1634 if (newtuple != tuple) /* modified by Trigger(s) */
1637 * Put the modified tuple into a slot for convenience of routines
1638 * below. We assume the tuple was allocated in per-tuple memory
1639 * context, and therefore will go away by itself. The tuple table
1640 * slot should not try to clear it.
1642 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1644 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1645 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1646 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1653 * Check the constraints of the tuple
1655 if (resultRelationDesc->rd_att->constr)
1656 ExecConstraints(resultRelInfo, slot, estate);
1661 * Note: heap_insert returns the tid (location) of the new tuple in the
1664 newId = heap_insert(resultRelationDesc, tuple,
1665 estate->es_output_cid, 0, NULL);
1668 (estate->es_processed)++;
1669 estate->es_lastoid = newId;
1670 setLastTid(&(tuple->t_self));
1673 * insert index entries for tuple
1675 if (resultRelInfo->ri_NumIndices > 0)
1676 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1678 /* AFTER ROW INSERT Triggers */
1679 ExecARInsertTriggers(estate, resultRelInfo, tuple);
1681 /* Process RETURNING if present */
1682 if (resultRelInfo->ri_projectReturning)
1683 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1684 slot, planSlot, dest);
1687 /* ----------------------------------------------------------------
1690 * DELETE is like UPDATE, except that we delete the tuple and no
1691 * index modifications are needed
1692 * ----------------------------------------------------------------
1695 ExecDelete(ItemPointer tupleid,
1696 TupleTableSlot *planSlot,
1700 ResultRelInfo *resultRelInfo;
1701 Relation resultRelationDesc;
1703 ItemPointerData update_ctid;
1704 TransactionId update_xmax;
1707 * get information on the (current) result relation
1709 resultRelInfo = estate->es_result_relation_info;
1710 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1712 /* BEFORE ROW DELETE Triggers */
1713 if (resultRelInfo->ri_TrigDesc &&
1714 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1718 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid);
1720 if (!dodelete) /* "do nothing" */
1727 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1728 * the row to be deleted is visible to that snapshot, and throw a can't-
1729 * serialize error if not. This is a special-case behavior needed for
1730 * referential integrity updates in serializable transactions.
1733 result = heap_delete(resultRelationDesc, tupleid,
1734 &update_ctid, &update_xmax,
1735 estate->es_output_cid,
1736 estate->es_crosscheck_snapshot,
1737 true /* wait for commit */ );
1740 case HeapTupleSelfUpdated:
1741 /* already deleted by self; nothing to do */
1744 case HeapTupleMayBeUpdated:
1747 case HeapTupleUpdated:
1748 if (IsXactIsoLevelSerializable)
1750 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1751 errmsg("could not serialize access due to concurrent update")));
1752 else if (!ItemPointerEquals(tupleid, &update_ctid))
1754 TupleTableSlot *epqslot;
1756 epqslot = EvalPlanQual(estate,
1757 resultRelInfo->ri_RangeTableIndex,
1760 if (!TupIsNull(epqslot))
1762 *tupleid = update_ctid;
1766 /* tuple already deleted; nothing to do */
1770 elog(ERROR, "unrecognized heap_delete status: %u", result);
1775 (estate->es_processed)++;
1778 * Note: Normally one would think that we have to delete index tuples
1779 * associated with the heap tuple now...
1781 * ... but in POSTGRES, we have no need to do this because VACUUM will
1782 * take care of it later. We can't delete index tuples immediately
1783 * anyway, since the tuple is still visible to other transactions.
1786 /* AFTER ROW DELETE Triggers */
1787 ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1789 /* Process RETURNING if present */
1790 if (resultRelInfo->ri_projectReturning)
1793 * We have to put the target tuple into a slot, which means first we
1794 * gotta fetch it. We can use the trigger tuple slot.
1796 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1797 HeapTupleData deltuple;
1800 deltuple.t_self = *tupleid;
1801 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1802 &deltuple, &delbuffer, false, NULL))
1803 elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1805 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1806 ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1807 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1809 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1810 slot, planSlot, dest);
1812 ExecClearTuple(slot);
1813 ReleaseBuffer(delbuffer);
1817 /* ----------------------------------------------------------------
1820 * note: we can't run UPDATE queries with transactions
1821 * off because UPDATEs are actually INSERTs and our
1822 * scan will mistakenly loop forever, updating the tuple
1823 * it just inserted.. This should be fixed but until it
1824 * is, we don't want to get stuck in an infinite loop
1825 * which corrupts your database..
1826 * ----------------------------------------------------------------
1829 ExecUpdate(TupleTableSlot *slot,
1830 ItemPointer tupleid,
1831 TupleTableSlot *planSlot,
1836 ResultRelInfo *resultRelInfo;
1837 Relation resultRelationDesc;
1839 ItemPointerData update_ctid;
1840 TransactionId update_xmax;
1843 * abort the operation if not running transactions
1845 if (IsBootstrapProcessingMode())
1846 elog(ERROR, "cannot UPDATE during bootstrap");
1849 * get the heap tuple out of the tuple table slot, making sure we have a
1852 tuple = ExecMaterializeSlot(slot);
1855 * get information on the (current) result relation
1857 resultRelInfo = estate->es_result_relation_info;
1858 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1860 /* BEFORE ROW UPDATE Triggers */
1861 if (resultRelInfo->ri_TrigDesc &&
1862 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1866 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1869 if (newtuple == NULL) /* "do nothing" */
1872 if (newtuple != tuple) /* modified by Trigger(s) */
1875 * Put the modified tuple into a slot for convenience of routines
1876 * below. We assume the tuple was allocated in per-tuple memory
1877 * context, and therefore will go away by itself. The tuple table
1878 * slot should not try to clear it.
1880 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1882 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1883 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1884 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1891 * Check the constraints of the tuple
1893 * If we generate a new candidate tuple after EvalPlanQual testing, we
1894 * must loop back here and recheck constraints. (We don't need to redo
1895 * triggers, however. If there are any BEFORE triggers then trigger.c
1896 * will have done heap_lock_tuple to lock the correct tuple, so there's no
1897 * need to do them again.)
1900 if (resultRelationDesc->rd_att->constr)
1901 ExecConstraints(resultRelInfo, slot, estate);
1904 * replace the heap tuple
1906 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1907 * the row to be updated is visible to that snapshot, and throw a can't-
1908 * serialize error if not. This is a special-case behavior needed for
1909 * referential integrity updates in serializable transactions.
1911 result = heap_update(resultRelationDesc, tupleid, tuple,
1912 &update_ctid, &update_xmax,
1913 estate->es_output_cid,
1914 estate->es_crosscheck_snapshot,
1915 true /* wait for commit */ );
1918 case HeapTupleSelfUpdated:
1919 /* already deleted by self; nothing to do */
1922 case HeapTupleMayBeUpdated:
1925 case HeapTupleUpdated:
1926 if (IsXactIsoLevelSerializable)
1928 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1929 errmsg("could not serialize access due to concurrent update")));
1930 else if (!ItemPointerEquals(tupleid, &update_ctid))
1932 TupleTableSlot *epqslot;
1934 epqslot = EvalPlanQual(estate,
1935 resultRelInfo->ri_RangeTableIndex,
1938 if (!TupIsNull(epqslot))
1940 *tupleid = update_ctid;
1941 slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1942 tuple = ExecMaterializeSlot(slot);
1946 /* tuple already deleted; nothing to do */
1950 elog(ERROR, "unrecognized heap_update status: %u", result);
1955 (estate->es_processed)++;
1958 * Note: instead of having to update the old index tuples associated with
1959 * the heap tuple, all we do is form and insert new index tuples. This is
1960 * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1961 * deletion is done later by VACUUM (see notes in ExecDelete). All we do
1962 * here is insert new index tuples. -cim 9/27/89
1966 * insert index entries for tuple
1968 * Note: heap_update returns the tid (location) of the new tuple in the
1971 * If it's a HOT update, we mustn't insert new index entries.
1973 if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
1974 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1976 /* AFTER ROW UPDATE Triggers */
1977 ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1979 /* Process RETURNING if present */
1980 if (resultRelInfo->ri_projectReturning)
1981 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1982 slot, planSlot, dest);
1986 * ExecRelCheck --- check that tuple meets constraints for result relation
1989 ExecRelCheck(ResultRelInfo *resultRelInfo,
1990 TupleTableSlot *slot, EState *estate)
1992 Relation rel = resultRelInfo->ri_RelationDesc;
1993 int ncheck = rel->rd_att->constr->num_check;
1994 ConstrCheck *check = rel->rd_att->constr->check;
1995 ExprContext *econtext;
1996 MemoryContext oldContext;
2001 * If first time through for this result relation, build expression
2002 * nodetrees for rel's constraint expressions. Keep them in the per-query
2003 * memory context so they'll survive throughout the query.
2005 if (resultRelInfo->ri_ConstraintExprs == NULL)
2007 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
2008 resultRelInfo->ri_ConstraintExprs =
2009 (List **) palloc(ncheck * sizeof(List *));
2010 for (i = 0; i < ncheck; i++)
2012 /* ExecQual wants implicit-AND form */
2013 qual = make_ands_implicit(stringToNode(check[i].ccbin));
2014 resultRelInfo->ri_ConstraintExprs[i] = (List *)
2015 ExecPrepareExpr((Expr *) qual, estate);
2017 MemoryContextSwitchTo(oldContext);
2021 * We will use the EState's per-tuple context for evaluating constraint
2022 * expressions (creating it if it's not already there).
2024 econtext = GetPerTupleExprContext(estate);
2026 /* Arrange for econtext's scan tuple to be the tuple under test */
2027 econtext->ecxt_scantuple = slot;
2029 /* And evaluate the constraints */
2030 for (i = 0; i < ncheck; i++)
2032 qual = resultRelInfo->ri_ConstraintExprs[i];
2035 * NOTE: SQL92 specifies that a NULL result from a constraint
2036 * expression is not to be treated as a failure. Therefore, tell
2037 * ExecQual to return TRUE for NULL.
2039 if (!ExecQual(qual, econtext, true))
2040 return check[i].ccname;
2043 /* NULL result means no error */
2048 ExecConstraints(ResultRelInfo *resultRelInfo,
2049 TupleTableSlot *slot, EState *estate)
2051 Relation rel = resultRelInfo->ri_RelationDesc;
2052 TupleConstr *constr = rel->rd_att->constr;
2056 if (constr->has_not_null)
2058 int natts = rel->rd_att->natts;
2061 for (attrChk = 1; attrChk <= natts; attrChk++)
2063 if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
2064 slot_attisnull(slot, attrChk))
2066 (errcode(ERRCODE_NOT_NULL_VIOLATION),
2067 errmsg("null value in column \"%s\" violates not-null constraint",
2068 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
2072 if (constr->num_check > 0)
2076 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2078 (errcode(ERRCODE_CHECK_VIOLATION),
2079 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2080 RelationGetRelationName(rel), failed)));
2085 * ExecProcessReturning --- evaluate a RETURNING list and send to dest
2087 * projectReturning: RETURNING projection info for current result rel
2088 * tupleSlot: slot holding tuple actually inserted/updated/deleted
2089 * planSlot: slot holding tuple returned by top plan node
2090 * dest: where to send the output
2093 ExecProcessReturning(ProjectionInfo *projectReturning,
2094 TupleTableSlot *tupleSlot,
2095 TupleTableSlot *planSlot,
2098 ExprContext *econtext = projectReturning->pi_exprContext;
2099 TupleTableSlot *retSlot;
2102 * Reset per-tuple memory context to free any expression evaluation
2103 * storage allocated in the previous cycle.
2105 ResetExprContext(econtext);
2107 /* Make tuple and any needed join variables available to ExecProject */
2108 econtext->ecxt_scantuple = tupleSlot;
2109 econtext->ecxt_outertuple = planSlot;
2111 /* Compute the RETURNING expressions */
2112 retSlot = ExecProject(projectReturning, NULL);
2115 (*dest->receiveSlot) (retSlot, dest);
2117 ExecClearTuple(retSlot);
2121 * Check a modified tuple to see if we want to process its updated version
2122 * under READ COMMITTED rules.
2124 * See backend/executor/README for some info about how this works.
2126 * estate - executor state data
2127 * rti - rangetable index of table containing tuple
2128 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2129 * priorXmax - t_xmax from the outdated tuple
2131 * *tid is also an output parameter: it's modified to hold the TID of the
2132 * latest version of the tuple (note this may be changed even on failure)
2134 * Returns a slot containing the new candidate update/delete tuple, or
2135 * NULL if we determine we shouldn't process the row.
2138 EvalPlanQual(EState *estate, Index rti,
2139 ItemPointer tid, TransactionId priorXmax)
2144 HeapTupleData tuple;
2145 HeapTuple copyTuple = NULL;
2146 SnapshotData SnapshotDirty;
2152 * find relation containing target tuple
2154 if (estate->es_result_relation_info != NULL &&
2155 estate->es_result_relation_info->ri_RangeTableIndex == rti)
2156 relation = estate->es_result_relation_info->ri_RelationDesc;
2162 foreach(l, estate->es_rowMarks)
2164 ExecRowMark *erm = lfirst(l);
2166 if (erm->rti == rti)
2168 relation = erm->relation;
2172 if (relation == NULL)
2173 elog(ERROR, "could not find RowMark for RT index %u", rti);
2179 * Loop here to deal with updated or busy tuples
2181 InitDirtySnapshot(SnapshotDirty);
2182 tuple.t_self = *tid;
2187 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2190 * If xmin isn't what we're expecting, the slot must have been
2191 * recycled and reused for an unrelated tuple. This implies that
2192 * the latest version of the row was deleted, so we need do
2193 * nothing. (Should be safe to examine xmin without getting
2194 * buffer's content lock, since xmin never changes in an existing
2197 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2200 ReleaseBuffer(buffer);
2204 /* otherwise xmin should not be dirty... */
2205 if (TransactionIdIsValid(SnapshotDirty.xmin))
2206 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2209 * If tuple is being updated by other transaction then we have to
2210 * wait for its commit/abort.
2212 if (TransactionIdIsValid(SnapshotDirty.xmax))
2214 ReleaseBuffer(buffer);
2215 XactLockTableWait(SnapshotDirty.xmax);
2216 continue; /* loop back to repeat heap_fetch */
2220 * If tuple was inserted by our own transaction, we have to check
2221 * cmin against es_output_cid: cmin >= current CID means our
2222 * command cannot see the tuple, so we should ignore it. Without
2223 * this we are open to the "Halloween problem" of indefinitely
2224 * re-updating the same tuple. (We need not check cmax because
2225 * HeapTupleSatisfiesDirty will consider a tuple deleted by our
2226 * transaction dead, regardless of cmax.) We just checked that
2227 * priorXmax == xmin, so we can test that variable instead of
2228 * doing HeapTupleHeaderGetXmin again.
2230 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2231 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2233 ReleaseBuffer(buffer);
2238 * We got tuple - now copy it for use by recheck query.
2240 copyTuple = heap_copytuple(&tuple);
2241 ReleaseBuffer(buffer);
2246 * If the referenced slot was actually empty, the latest version of
2247 * the row must have been deleted, so we need do nothing.
2249 if (tuple.t_data == NULL)
2251 ReleaseBuffer(buffer);
2256 * As above, if xmin isn't what we're expecting, do nothing.
2258 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2261 ReleaseBuffer(buffer);
2266 * If we get here, the tuple was found but failed SnapshotDirty.
2267 * Assuming the xmin is either a committed xact or our own xact (as it
2268 * certainly should be if we're trying to modify the tuple), this must
2269 * mean that the row was updated or deleted by either a committed xact
2270 * or our own xact. If it was deleted, we can ignore it; if it was
2271 * updated then chain up to the next version and repeat the whole
2274 * As above, it should be safe to examine xmax and t_ctid without the
2275 * buffer content lock, because they can't be changing.
2277 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2279 /* deleted, so forget about it */
2280 ReleaseBuffer(buffer);
2284 /* updated, so look at the updated row */
2285 tuple.t_self = tuple.t_data->t_ctid;
2286 /* updated row should have xmin matching this xmax */
2287 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
2288 ReleaseBuffer(buffer);
2289 /* loop back to fetch next in chain */
2293 * For UPDATE/DELETE we have to return tid of actual row we're executing
2296 *tid = tuple.t_self;
2299 * Need to run a recheck subquery. Find or create a PQ stack entry.
2301 epq = estate->es_evalPlanQual;
2304 if (epq != NULL && epq->rti == 0)
2306 /* Top PQ stack entry is idle, so re-use it */
2307 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2313 * If this is request for another RTE - Ra, - then we have to check wasn't
2314 * PlanQual requested for Ra already and if so then Ra' row was updated
2315 * again and we have to re-start old execution for Ra and forget all what
2316 * we done after Ra was suspended. Cool? -:))
2318 if (epq != NULL && epq->rti != rti &&
2319 epq->estate->es_evTuple[rti - 1] != NULL)
2323 evalPlanQual *oldepq;
2325 /* stop execution */
2326 EvalPlanQualStop(epq);
2327 /* pop previous PlanQual from the stack */
2329 Assert(oldepq && oldepq->rti != 0);
2330 /* push current PQ to freePQ stack */
2333 estate->es_evalPlanQual = epq;
2334 } while (epq->rti != rti);
2338 * If we are requested for another RTE then we have to suspend execution
2339 * of current PlanQual and start execution for new one.
2341 if (epq == NULL || epq->rti != rti)
2343 /* try to reuse plan used previously */
2344 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2346 if (newepq == NULL) /* first call or freePQ stack is empty */
2348 newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2349 newepq->free = NULL;
2350 newepq->estate = NULL;
2351 newepq->planstate = NULL;
2355 /* recycle previously used PlanQual */
2356 Assert(newepq->estate == NULL);
2359 /* push current PQ to the stack */
2362 estate->es_evalPlanQual = epq;
2367 Assert(epq->rti == rti);
2370 * Ok - we're requested for the same RTE. Unfortunately we still have to
2371 * end and restart execution of the plan, because ExecReScan wouldn't
2372 * ensure that upper plan nodes would reset themselves. We could make
2373 * that work if insertion of the target tuple were integrated with the
2374 * Param mechanism somehow, so that the upper plan nodes know that their
2375 * children's outputs have changed.
2377 * Note that the stack of free evalPlanQual nodes is quite useless at the
2378 * moment, since it only saves us from pallocing/releasing the
2379 * evalPlanQual nodes themselves. But it will be useful once we implement
2380 * ReScan instead of end/restart for re-using PlanQual nodes.
2384 /* stop execution */
2385 EvalPlanQualStop(epq);
2389 * Initialize new recheck query.
2391 * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2392 * instead copy down changeable state from the top plan (including
2393 * es_result_relation_info, es_junkFilter) and reset locally changeable
2394 * state in the epq (including es_param_exec_vals, es_evTupleNull).
2396 EvalPlanQualStart(epq, estate, epq->next);
2399 * free old RTE' tuple, if any, and store target tuple where relation's
2400 * scan node will see it
2402 epqstate = epq->estate;
2403 if (epqstate->es_evTuple[rti - 1] != NULL)
2404 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2405 epqstate->es_evTuple[rti - 1] = copyTuple;
2407 return EvalPlanQualNext(estate);
2410 static TupleTableSlot *
2411 EvalPlanQualNext(EState *estate)
2413 evalPlanQual *epq = estate->es_evalPlanQual;
2414 MemoryContext oldcontext;
2415 TupleTableSlot *slot;
2417 Assert(epq->rti != 0);
2420 oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2421 slot = ExecProcNode(epq->planstate);
2422 MemoryContextSwitchTo(oldcontext);
2425 * No more tuples for this PQ. Continue previous one.
2427 if (TupIsNull(slot))
2429 evalPlanQual *oldepq;
2431 /* stop execution */
2432 EvalPlanQualStop(epq);
2433 /* pop old PQ from the stack */
2437 /* this is the first (oldest) PQ - mark as free */
2439 estate->es_useEvalPlan = false;
2440 /* and continue Query execution */
2443 Assert(oldepq->rti != 0);
2444 /* push current PQ to freePQ stack */
2447 estate->es_evalPlanQual = epq;
2455 EndEvalPlanQual(EState *estate)
2457 evalPlanQual *epq = estate->es_evalPlanQual;
2459 if (epq->rti == 0) /* plans already shutdowned */
2461 Assert(epq->next == NULL);
2467 evalPlanQual *oldepq;
2469 /* stop execution */
2470 EvalPlanQualStop(epq);
2471 /* pop old PQ from the stack */
2475 /* this is the first (oldest) PQ - mark as free */
2477 estate->es_useEvalPlan = false;
2480 Assert(oldepq->rti != 0);
2481 /* push current PQ to freePQ stack */
2484 estate->es_evalPlanQual = epq;
2489 * Start execution of one level of PlanQual.
2491 * This is a cut-down version of ExecutorStart(): we copy some state from
2492 * the top-level estate rather than initializing it fresh.
2495 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2499 MemoryContext oldcontext;
2502 rtsize = list_length(estate->es_range_table);
2504 epq->estate = epqstate = CreateExecutorState();
2506 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2509 * The epqstates share the top query's copy of unchanging state such as
2510 * the snapshot, rangetable, result-rel info, and external Param info.
2511 * They need their own copies of local state, including a tuple table,
2512 * es_param_exec_vals, etc.
2514 epqstate->es_direction = ForwardScanDirection;
2515 epqstate->es_snapshot = estate->es_snapshot;
2516 epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2517 epqstate->es_range_table = estate->es_range_table;
2518 epqstate->es_output_cid = estate->es_output_cid;
2519 epqstate->es_result_relations = estate->es_result_relations;
2520 epqstate->es_num_result_relations = estate->es_num_result_relations;
2521 epqstate->es_result_relation_info = estate->es_result_relation_info;
2522 epqstate->es_junkFilter = estate->es_junkFilter;
2523 /* es_trig_target_relations must NOT be copied */
2524 epqstate->es_param_list_info = estate->es_param_list_info;
2525 if (estate->es_plannedstmt->nParamExec > 0)
2526 epqstate->es_param_exec_vals = (ParamExecData *)
2527 palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2528 epqstate->es_rowMarks = estate->es_rowMarks;
2529 epqstate->es_instrument = estate->es_instrument;
2530 epqstate->es_select_into = estate->es_select_into;
2531 epqstate->es_into_oids = estate->es_into_oids;
2532 epqstate->es_plannedstmt = estate->es_plannedstmt;
2535 * Each epqstate must have its own es_evTupleNull state, but all the stack
2536 * entries share es_evTuple state. This allows sub-rechecks to inherit
2537 * the value being examined by an outer recheck.
2539 epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2540 if (priorepq == NULL)
2541 /* first PQ stack entry */
2542 epqstate->es_evTuple = (HeapTuple *)
2543 palloc0(rtsize * sizeof(HeapTuple));
2545 /* later stack entries share the same storage */
2546 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2549 * Create sub-tuple-table; we needn't redo the CountSlots work though.
2551 epqstate->es_tupleTable =
2552 ExecCreateTupleTable(estate->es_tupleTable->size);
2555 * Initialize private state information for each SubPlan. We must do this
2556 * before running ExecInitNode on the main query tree, since
2557 * ExecInitSubPlan expects to be able to find these entries.
2559 Assert(epqstate->es_subplanstates == NIL);
2560 foreach(l, estate->es_plannedstmt->subplans)
2562 Plan *subplan = (Plan *) lfirst(l);
2563 PlanState *subplanstate;
2565 subplanstate = ExecInitNode(subplan, epqstate, 0);
2567 epqstate->es_subplanstates = lappend(epqstate->es_subplanstates,
2572 * Initialize the private state information for all the nodes in the query
2573 * tree. This opens files, allocates storage and leaves us ready to start
2574 * processing tuples.
2576 epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2578 MemoryContextSwitchTo(oldcontext);
2582 * End execution of one level of PlanQual.
2584 * This is a cut-down version of ExecutorEnd(); basically we want to do most
2585 * of the normal cleanup, but *not* close result relations (which we are
2586 * just sharing from the outer query). We do, however, have to close any
2587 * trigger target relations that got opened, since those are not shared.
2590 EvalPlanQualStop(evalPlanQual *epq)
2592 EState *epqstate = epq->estate;
2593 MemoryContext oldcontext;
2596 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2598 ExecEndNode(epq->planstate);
2600 foreach(l, epqstate->es_subplanstates)
2602 PlanState *subplanstate = (PlanState *) lfirst(l);
2604 ExecEndNode(subplanstate);
2607 ExecDropTupleTable(epqstate->es_tupleTable, true);
2608 epqstate->es_tupleTable = NULL;
2610 if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2612 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2613 epqstate->es_evTuple[epq->rti - 1] = NULL;
2616 foreach(l, epqstate->es_trig_target_relations)
2618 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2620 /* Close indices and then the relation itself */
2621 ExecCloseIndices(resultRelInfo);
2622 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2625 MemoryContextSwitchTo(oldcontext);
2627 FreeExecutorState(epqstate);
2630 epq->planstate = NULL;
2634 * ExecGetActivePlanTree --- get the active PlanState tree from a QueryDesc
2636 * Ordinarily this is just the one mentioned in the QueryDesc, but if we
2637 * are looking at a row returned by the EvalPlanQual machinery, we need
2638 * to look at the subsidiary state instead.
2641 ExecGetActivePlanTree(QueryDesc *queryDesc)
2643 EState *estate = queryDesc->estate;
2645 if (estate && estate->es_useEvalPlan && estate->es_evalPlanQual != NULL)
2646 return estate->es_evalPlanQual->planstate;
2648 return queryDesc->planstate;
2653 * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2655 * We implement SELECT INTO by diverting SELECT's normal output with
2656 * a specialized DestReceiver type.
2661 DestReceiver pub; /* publicly-known function pointers */
2662 EState *estate; /* EState we are working with */
2663 Relation rel; /* Relation to write to */
2664 int hi_options; /* heap_insert performance options */
2665 BulkInsertState bistate; /* bulk insert state */
2669 * OpenIntoRel --- actually create the SELECT INTO target relation
2671 * This also replaces QueryDesc->dest with the special DestReceiver for
2672 * SELECT INTO. We assume that the correct result tuple type has already
2673 * been placed in queryDesc->tupDesc.
2676 OpenIntoRel(QueryDesc *queryDesc)
2678 IntoClause *into = queryDesc->plannedstmt->intoClause;
2679 EState *estate = queryDesc->estate;
2680 Relation intoRelationDesc;
2685 AclResult aclresult;
2688 DR_intorel *myState;
2693 * Check consistency of arguments
2695 if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2697 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2698 errmsg("ON COMMIT can only be used on temporary tables")));
2701 * Find namespace to create in, check its permissions
2703 intoName = into->rel->relname;
2704 namespaceId = RangeVarGetCreationNamespace(into->rel);
2706 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2708 if (aclresult != ACLCHECK_OK)
2709 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2710 get_namespace_name(namespaceId));
2713 * Select tablespace to use. If not specified, use default tablespace
2714 * (which may in turn default to database's default).
2716 if (into->tableSpaceName)
2718 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2719 if (!OidIsValid(tablespaceId))
2721 (errcode(ERRCODE_UNDEFINED_OBJECT),
2722 errmsg("tablespace \"%s\" does not exist",
2723 into->tableSpaceName)));
2727 tablespaceId = GetDefaultTablespace(into->rel->istemp);
2728 /* note InvalidOid is OK in this case */
2731 /* Check permissions except when using the database's default space */
2732 if (OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
2734 AclResult aclresult;
2736 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2739 if (aclresult != ACLCHECK_OK)
2740 aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2741 get_tablespace_name(tablespaceId));
2744 /* Parse and validate any reloptions */
2745 reloptions = transformRelOptions((Datum) 0,
2749 (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2751 /* Copy the tupdesc because heap_create_with_catalog modifies it */
2752 tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2754 /* Now we can actually create the new relation */
2755 intoRelationId = heap_create_with_catalog(intoName,
2768 allowSystemTableMods);
2770 FreeTupleDesc(tupdesc);
2773 * Advance command counter so that the newly-created relation's catalog
2774 * tuples will be visible to heap_open.
2776 CommandCounterIncrement();
2779 * If necessary, create a TOAST table for the INTO relation. Note that
2780 * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2781 * the TOAST table will be visible for insertion.
2783 AlterTableCreateToastTable(intoRelationId);
2786 * And open the constructed table for writing.
2788 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2791 * Now replace the query's DestReceiver with one for SELECT INTO
2793 queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2794 myState = (DR_intorel *) queryDesc->dest;
2795 Assert(myState->pub.mydest == DestIntoRel);
2796 myState->estate = estate;
2797 myState->rel = intoRelationDesc;
2800 * We can skip WAL-logging the insertions, unless PITR is in use. We
2801 * can skip the FSM in any case.
2803 myState->hi_options = HEAP_INSERT_SKIP_FSM |
2804 (XLogArchivingActive() ? 0 : HEAP_INSERT_SKIP_WAL);
2805 myState->bistate = GetBulkInsertState();
2807 /* Not using WAL requires rd_targblock be initially invalid */
2808 Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2812 * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2815 CloseIntoRel(QueryDesc *queryDesc)
2817 DR_intorel *myState = (DR_intorel *) queryDesc->dest;
2819 /* OpenIntoRel might never have gotten called */
2820 if (myState && myState->pub.mydest == DestIntoRel && myState->rel)
2822 FreeBulkInsertState(myState->bistate);
2824 /* If we skipped using WAL, must heap_sync before commit */
2825 if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
2826 heap_sync(myState->rel);
2828 /* close rel, but keep lock until commit */
2829 heap_close(myState->rel, NoLock);
2831 myState->rel = NULL;
2836 * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2838 * Since CreateDestReceiver doesn't accept the parameters we'd need,
2839 * we just leave the private fields zeroed here. OpenIntoRel will
2843 CreateIntoRelDestReceiver(void)
2845 DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
2847 self->pub.receiveSlot = intorel_receive;
2848 self->pub.rStartup = intorel_startup;
2849 self->pub.rShutdown = intorel_shutdown;
2850 self->pub.rDestroy = intorel_destroy;
2851 self->pub.mydest = DestIntoRel;
2853 return (DestReceiver *) self;
2857 * intorel_startup --- executor startup
2860 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2866 * intorel_receive --- receive one tuple
2869 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2871 DR_intorel *myState = (DR_intorel *) self;
2875 * get the heap tuple out of the tuple table slot, making sure we have a
2878 tuple = ExecMaterializeSlot(slot);
2880 heap_insert(myState->rel,
2882 myState->estate->es_output_cid,
2883 myState->hi_options,
2886 /* We know this is a newly created relation, so there are no indexes */
2892 * intorel_shutdown --- executor end
2895 intorel_shutdown(DestReceiver *self)
2901 * intorel_destroy --- release DestReceiver object
2904 intorel_destroy(DestReceiver *self)