1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
11 * The old ExecutorMain() has been replaced by ExecutorStart(),
12 * ExecutorRun() and ExecutorEnd()
14 * These three procedures are the external interfaces to the executor.
15 * In each case, the query descriptor is required as an argument.
17 * ExecutorStart() must be called at the beginning of execution of any
18 * query plan and ExecutorEnd() should always be called at the end of
19 * execution of a plan.
21 * ExecutorRun accepts direction and count arguments that specify whether
22 * the plan is to be executed forwards, backwards, and for how many tuples.
24 * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
25 * Portions Copyright (c) 1994, Regents of the University of California
29 * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.288 2007/02/22 22:00:22 tgl Exp $
31 *-------------------------------------------------------------------------
35 #include "access/heapam.h"
36 #include "access/reloptions.h"
37 #include "access/transam.h"
38 #include "access/xact.h"
39 #include "catalog/heap.h"
40 #include "catalog/namespace.h"
41 #include "catalog/toasting.h"
42 #include "commands/tablespace.h"
43 #include "commands/trigger.h"
44 #include "executor/execdebug.h"
45 #include "executor/instrument.h"
46 #include "executor/nodeSubplan.h"
47 #include "miscadmin.h"
48 #include "optimizer/clauses.h"
49 #include "parser/parse_clause.h"
50 #include "parser/parsetree.h"
51 #include "storage/smgr.h"
52 #include "utils/acl.h"
53 #include "utils/lsyscache.h"
54 #include "utils/memutils.h"
57 typedef struct evalPlanQual
62 struct evalPlanQual *next; /* stack of active PlanQual plans */
63 struct evalPlanQual *free; /* list of free PlanQual plans */
66 /* decls for local routines only used within this module */
67 static void InitPlan(QueryDesc *queryDesc, int eflags);
68 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
69 Index resultRelationIndex,
73 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
76 ScanDirection direction,
78 static void ExecSelect(TupleTableSlot *slot,
79 DestReceiver *dest, EState *estate);
80 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
81 TupleTableSlot *planSlot,
82 DestReceiver *dest, EState *estate);
83 static void ExecDelete(ItemPointer tupleid,
84 TupleTableSlot *planSlot,
85 DestReceiver *dest, EState *estate);
86 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
87 TupleTableSlot *planSlot,
88 DestReceiver *dest, EState *estate);
89 static void ExecProcessReturning(ProjectionInfo *projectReturning,
90 TupleTableSlot *tupleSlot,
91 TupleTableSlot *planSlot,
93 static TupleTableSlot *EvalPlanQualNext(EState *estate);
94 static void EndEvalPlanQual(EState *estate);
95 static void ExecCheckRTPerms(List *rangeTable);
96 static void ExecCheckRTEPerms(RangeTblEntry *rte);
97 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
98 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
99 evalPlanQual *priorepq);
100 static void EvalPlanQualStop(evalPlanQual *epq);
101 static void OpenIntoRel(QueryDesc *queryDesc);
102 static void CloseIntoRel(QueryDesc *queryDesc);
103 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
104 static void intorel_receive(TupleTableSlot *slot, DestReceiver *self);
105 static void intorel_shutdown(DestReceiver *self);
106 static void intorel_destroy(DestReceiver *self);
108 /* end of local decls */
111 /* ----------------------------------------------------------------
114 * This routine must be called at the beginning of any execution of any
117 * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
118 * clear why we bother to separate the two functions, but...). The tupDesc
119 * field of the QueryDesc is filled in to describe the tuples that will be
120 * returned, and the internal fields (estate and planstate) are set up.
122 * eflags contains flag bits as described in executor.h.
124 * NB: the CurrentMemoryContext when this is called will become the parent
125 * of the per-query context used for this Executor invocation.
126 * ----------------------------------------------------------------
129 ExecutorStart(QueryDesc *queryDesc, int eflags)
132 MemoryContext oldcontext;
134 /* sanity checks: queryDesc must not be started already */
135 Assert(queryDesc != NULL);
136 Assert(queryDesc->estate == NULL);
139 * If the transaction is read-only, we need to check if any writes are
140 * planned to non-temporary tables. EXPLAIN is considered read-only.
142 if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
143 ExecCheckXactReadOnly(queryDesc->plannedstmt);
146 * Build EState, switch into per-query memory context for startup.
148 estate = CreateExecutorState();
149 queryDesc->estate = estate;
151 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
154 * Fill in parameters, if any, from queryDesc
156 estate->es_param_list_info = queryDesc->params;
158 if (queryDesc->plannedstmt->nParamExec > 0)
159 estate->es_param_exec_vals = (ParamExecData *)
160 palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
163 * Copy other important information into the EState
165 estate->es_snapshot = queryDesc->snapshot;
166 estate->es_crosscheck_snapshot = queryDesc->crosscheck_snapshot;
167 estate->es_instrument = queryDesc->doInstrument;
170 * Initialize the plan state tree
172 InitPlan(queryDesc, eflags);
174 MemoryContextSwitchTo(oldcontext);
177 /* ----------------------------------------------------------------
180 * This is the main routine of the executor module. It accepts
181 * the query descriptor from the traffic cop and executes the
184 * ExecutorStart must have been called already.
186 * If direction is NoMovementScanDirection then nothing is done
187 * except to start up/shut down the destination. Otherwise,
188 * we retrieve up to 'count' tuples in the specified direction.
190 * Note: count = 0 is interpreted as no portal limit, i.e., run to
193 * ----------------------------------------------------------------
196 ExecutorRun(QueryDesc *queryDesc,
197 ScanDirection direction, long count)
203 TupleTableSlot *result;
204 MemoryContext oldcontext;
207 Assert(queryDesc != NULL);
209 estate = queryDesc->estate;
211 Assert(estate != NULL);
214 * Switch into per-query memory context
216 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
219 * extract information from the query descriptor and the query feature.
221 operation = queryDesc->operation;
222 dest = queryDesc->dest;
225 * startup tuple receiver, if we will be emitting tuples
227 estate->es_processed = 0;
228 estate->es_lastoid = InvalidOid;
230 sendTuples = (operation == CMD_SELECT ||
231 queryDesc->plannedstmt->returningLists);
234 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
239 if (ScanDirectionIsNoMovement(direction))
242 result = ExecutePlan(estate,
243 queryDesc->planstate,
250 * shutdown tuple receiver, if we started it
253 (*dest->rShutdown) (dest);
255 MemoryContextSwitchTo(oldcontext);
260 /* ----------------------------------------------------------------
263 * This routine must be called at the end of execution of any
265 * ----------------------------------------------------------------
268 ExecutorEnd(QueryDesc *queryDesc)
271 MemoryContext oldcontext;
274 Assert(queryDesc != NULL);
276 estate = queryDesc->estate;
278 Assert(estate != NULL);
281 * Switch into per-query memory context to run ExecEndPlan
283 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
285 ExecEndPlan(queryDesc->planstate, estate);
288 * Close the SELECT INTO relation if any
290 if (estate->es_select_into)
291 CloseIntoRel(queryDesc);
294 * Must switch out of context before destroying it
296 MemoryContextSwitchTo(oldcontext);
299 * Release EState and per-query memory context. This should release
300 * everything the executor has allocated.
302 FreeExecutorState(estate);
304 /* Reset queryDesc fields that no longer point to anything */
305 queryDesc->tupDesc = NULL;
306 queryDesc->estate = NULL;
307 queryDesc->planstate = NULL;
310 /* ----------------------------------------------------------------
313 * This routine may be called on an open queryDesc to rewind it
315 * ----------------------------------------------------------------
318 ExecutorRewind(QueryDesc *queryDesc)
321 MemoryContext oldcontext;
324 Assert(queryDesc != NULL);
326 estate = queryDesc->estate;
328 Assert(estate != NULL);
330 /* It's probably not sensible to rescan updating queries */
331 Assert(queryDesc->operation == CMD_SELECT);
334 * Switch into per-query memory context
336 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
341 ExecReScan(queryDesc->planstate, NULL);
343 MemoryContextSwitchTo(oldcontext);
349 * Check access permissions for all relations listed in a range table.
352 ExecCheckRTPerms(List *rangeTable)
356 foreach(l, rangeTable)
358 ExecCheckRTEPerms((RangeTblEntry *) lfirst(l));
364 * Check access permissions for a single RTE.
367 ExecCheckRTEPerms(RangeTblEntry *rte)
369 AclMode requiredPerms;
374 * Only plain-relation RTEs need to be checked here. Function RTEs are
375 * checked by init_fcache when the function is prepared for execution.
376 * Join, subquery, and special RTEs need no checks.
378 if (rte->rtekind != RTE_RELATION)
382 * No work if requiredPerms is empty.
384 requiredPerms = rte->requiredPerms;
385 if (requiredPerms == 0)
391 * userid to check as: current user unless we have a setuid indication.
393 * Note: GetUserId() is presently fast enough that there's no harm in
394 * calling it separately for each RTE. If that stops being true, we could
395 * call it once in ExecCheckRTPerms and pass the userid down from there.
396 * But for now, no need for the extra clutter.
398 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
401 * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
403 if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
405 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
406 get_rel_name(relOid));
410 * Check that the query does not imply any writes to non-temp tables.
413 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
418 * CREATE TABLE AS or SELECT INTO?
420 * XXX should we allow this if the destination is temp?
422 if (plannedstmt->into != NULL)
425 /* Fail if write permissions are requested on any non-temp table */
426 foreach(l, plannedstmt->rtable)
428 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
430 if (rte->rtekind != RTE_RELATION)
433 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
436 if (isTempNamespace(get_rel_namespace(rte->relid)))
446 (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
447 errmsg("transaction is read-only")));
451 /* ----------------------------------------------------------------
454 * Initializes the query plan: open files, allocate storage
455 * and start up the rule manager
456 * ----------------------------------------------------------------
459 InitPlan(QueryDesc *queryDesc, int eflags)
461 CmdType operation = queryDesc->operation;
462 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
463 Plan *plan = plannedstmt->planTree;
464 List *rangeTable = plannedstmt->rtable;
465 EState *estate = queryDesc->estate;
466 PlanState *planstate;
471 * Do permissions checks
473 ExecCheckRTPerms(rangeTable);
476 * initialize the node's execution state
478 estate->es_range_table = rangeTable;
481 * initialize result relation stuff
483 if (plannedstmt->resultRelations)
485 List *resultRelations = plannedstmt->resultRelations;
486 int numResultRelations = list_length(resultRelations);
487 ResultRelInfo *resultRelInfos;
488 ResultRelInfo *resultRelInfo;
490 resultRelInfos = (ResultRelInfo *)
491 palloc(numResultRelations * sizeof(ResultRelInfo));
492 resultRelInfo = resultRelInfos;
493 foreach(l, resultRelations)
495 initResultRelInfo(resultRelInfo,
499 estate->es_instrument);
502 estate->es_result_relations = resultRelInfos;
503 estate->es_num_result_relations = numResultRelations;
504 /* Initialize to first or only result rel */
505 estate->es_result_relation_info = resultRelInfos;
510 * if no result relation, then set state appropriately
512 estate->es_result_relations = NULL;
513 estate->es_num_result_relations = 0;
514 estate->es_result_relation_info = NULL;
518 * Detect whether we're doing SELECT INTO. If so, set the es_into_oids
519 * flag appropriately so that the plan tree will be initialized with the
520 * correct tuple descriptors. (Other SELECT INTO stuff comes later.)
522 estate->es_select_into = false;
523 if (operation == CMD_SELECT && plannedstmt->into != NULL)
525 estate->es_select_into = true;
526 estate->es_into_oids = interpretOidsOption(plannedstmt->into->options);
530 * Have to lock relations selected FOR UPDATE/FOR SHARE before we
531 * initialize the plan tree, else we'd be doing a lock upgrade.
532 * While we are at it, build the ExecRowMark list.
534 estate->es_rowMarks = NIL;
535 foreach(l, plannedstmt->rowMarks)
537 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
538 Oid relid = getrelid(rc->rti, rangeTable);
542 relation = heap_open(relid, RowShareLock);
543 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
544 erm->relation = relation;
546 erm->forUpdate = rc->forUpdate;
547 erm->noWait = rc->noWait;
548 /* We'll set up ctidAttno below */
549 erm->ctidAttNo = InvalidAttrNumber;
550 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
554 * initialize the executor "tuple" table. We need slots for all the plan
555 * nodes, plus possibly output slots for the junkfilter(s). At this point
556 * we aren't sure if we need junkfilters, so just add slots for them
557 * unconditionally. Also, if it's not a SELECT, set up a slot for use for
558 * trigger output tuples.
561 int nSlots = ExecCountSlotsNode(plan);
563 if (plannedstmt->resultRelations != NIL)
564 nSlots += list_length(plannedstmt->resultRelations);
567 if (operation != CMD_SELECT)
568 nSlots++; /* for es_trig_tuple_slot */
569 if (plannedstmt->returningLists)
570 nSlots++; /* for RETURNING projection */
572 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
574 if (operation != CMD_SELECT)
575 estate->es_trig_tuple_slot =
576 ExecAllocTableSlot(estate->es_tupleTable);
579 /* mark EvalPlanQual not active */
580 estate->es_plannedstmt = plannedstmt;
581 estate->es_evalPlanQual = NULL;
582 estate->es_evTupleNull = NULL;
583 estate->es_evTuple = NULL;
584 estate->es_useEvalPlan = false;
587 * initialize the private state information for all the nodes in the query
588 * tree. This opens files, allocates storage and leaves us ready to start
591 planstate = ExecInitNode(plan, estate, eflags);
594 * Get the tuple descriptor describing the type of tuples to return. (this
595 * is especially important if we are creating a relation with "SELECT
598 tupType = ExecGetResultType(planstate);
601 * Initialize the junk filter if needed. SELECT and INSERT queries need a
602 * filter if there are any junk attrs in the tlist. INSERT and SELECT
603 * INTO also need a filter if the plan may return raw disk tuples (else
604 * heap_insert will be scribbling on the source relation!). UPDATE and
605 * DELETE always need a filter, since there's always a junk 'ctid'
606 * attribute present --- no need to look first.
609 bool junk_filter_needed = false;
616 foreach(tlist, plan->targetlist)
618 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
622 junk_filter_needed = true;
626 if (!junk_filter_needed &&
627 (operation == CMD_INSERT || estate->es_select_into) &&
628 ExecMayReturnRawTuples(planstate))
629 junk_filter_needed = true;
633 junk_filter_needed = true;
639 if (junk_filter_needed)
642 * If there are multiple result relations, each one needs its own
643 * junk filter. Note this is only possible for UPDATE/DELETE, so
644 * we can't be fooled by some needing a filter and some not.
646 if (list_length(plannedstmt->resultRelations) > 1)
648 PlanState **appendplans;
650 ResultRelInfo *resultRelInfo;
653 /* Top plan had better be an Append here. */
654 Assert(IsA(plan, Append));
655 Assert(((Append *) plan)->isTarget);
656 Assert(IsA(planstate, AppendState));
657 appendplans = ((AppendState *) planstate)->appendplans;
658 as_nplans = ((AppendState *) planstate)->as_nplans;
659 Assert(as_nplans == estate->es_num_result_relations);
660 resultRelInfo = estate->es_result_relations;
661 for (i = 0; i < as_nplans; i++)
663 PlanState *subplan = appendplans[i];
666 j = ExecInitJunkFilter(subplan->plan->targetlist,
667 resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
668 ExecAllocTableSlot(estate->es_tupleTable));
670 * Since it must be UPDATE/DELETE, there had better be
671 * a "ctid" junk attribute in the tlist ... but ctid could
672 * be at a different resno for each result relation.
673 * We look up the ctid resnos now and save them in the
676 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
677 if (!AttributeNumberIsValid(j->jf_junkAttNo))
678 elog(ERROR, "could not find junk ctid column");
679 resultRelInfo->ri_junkFilter = j;
684 * Set active junkfilter too; at this point ExecInitAppend has
685 * already selected an active result relation...
687 estate->es_junkFilter =
688 estate->es_result_relation_info->ri_junkFilter;
692 /* Normal case with just one JunkFilter */
695 j = ExecInitJunkFilter(planstate->plan->targetlist,
697 ExecAllocTableSlot(estate->es_tupleTable));
698 estate->es_junkFilter = j;
699 if (estate->es_result_relation_info)
700 estate->es_result_relation_info->ri_junkFilter = j;
702 if (operation == CMD_SELECT)
704 /* For SELECT, want to return the cleaned tuple type */
705 tupType = j->jf_cleanTupType;
706 /* For SELECT FOR UPDATE/SHARE, find the ctid attrs now */
707 foreach(l, estate->es_rowMarks)
709 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
712 snprintf(resname, sizeof(resname), "ctid%u", erm->rti);
713 erm->ctidAttNo = ExecFindJunkAttribute(j, resname);
714 if (!AttributeNumberIsValid(erm->ctidAttNo))
715 elog(ERROR, "could not find junk \"%s\" column",
719 else if (operation == CMD_UPDATE || operation == CMD_DELETE)
721 /* For UPDATE/DELETE, find the ctid junk attr now */
722 j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
723 if (!AttributeNumberIsValid(j->jf_junkAttNo))
724 elog(ERROR, "could not find junk ctid column");
729 estate->es_junkFilter = NULL;
733 * Initialize RETURNING projections if needed.
735 if (plannedstmt->returningLists)
737 TupleTableSlot *slot;
738 ExprContext *econtext;
739 ResultRelInfo *resultRelInfo;
742 * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case.
743 * We assume all the sublists will generate the same output tupdesc.
745 tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists),
748 /* Set up a slot for the output of the RETURNING projection(s) */
749 slot = ExecAllocTableSlot(estate->es_tupleTable);
750 ExecSetSlotDescriptor(slot, tupType);
751 /* Need an econtext too */
752 econtext = CreateExprContext(estate);
755 * Build a projection for each result rel. Note that any SubPlans in
756 * the RETURNING lists get attached to the topmost plan node.
758 Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations);
759 resultRelInfo = estate->es_result_relations;
760 foreach(l, plannedstmt->returningLists)
762 List *rlist = (List *) lfirst(l);
765 rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
766 resultRelInfo->ri_projectReturning =
767 ExecBuildProjectionInfo(rliststate, econtext, slot,
768 resultRelInfo->ri_RelationDesc->rd_att);
773 * Because we already ran ExecInitNode() for the top plan node, any
774 * subplans we just attached to it won't have been initialized; so we
775 * have to do it here. (Ugly, but the alternatives seem worse.)
777 foreach(l, planstate->subPlan)
779 SubPlanState *sstate = (SubPlanState *) lfirst(l);
781 Assert(IsA(sstate, SubPlanState));
782 if (sstate->planstate == NULL) /* already inited? */
783 ExecInitSubPlan(sstate, estate, eflags);
787 queryDesc->tupDesc = tupType;
788 queryDesc->planstate = planstate;
791 * If doing SELECT INTO, initialize the "into" relation. We must wait
792 * till now so we have the "clean" result tuple type to create the new
795 * If EXPLAIN, skip creating the "into" relation.
797 if (estate->es_select_into && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
798 OpenIntoRel(queryDesc);
802 * Initialize ResultRelInfo data for one result relation
805 initResultRelInfo(ResultRelInfo *resultRelInfo,
806 Index resultRelationIndex,
811 Oid resultRelationOid;
812 Relation resultRelationDesc;
814 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
815 resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
817 switch (resultRelationDesc->rd_rel->relkind)
819 case RELKIND_SEQUENCE:
821 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
822 errmsg("cannot change sequence \"%s\"",
823 RelationGetRelationName(resultRelationDesc))));
825 case RELKIND_TOASTVALUE:
827 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
828 errmsg("cannot change TOAST relation \"%s\"",
829 RelationGetRelationName(resultRelationDesc))));
833 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
834 errmsg("cannot change view \"%s\"",
835 RelationGetRelationName(resultRelationDesc))));
839 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
840 resultRelInfo->type = T_ResultRelInfo;
841 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
842 resultRelInfo->ri_RelationDesc = resultRelationDesc;
843 resultRelInfo->ri_NumIndices = 0;
844 resultRelInfo->ri_IndexRelationDescs = NULL;
845 resultRelInfo->ri_IndexRelationInfo = NULL;
846 /* make a copy so as not to depend on relcache info not changing... */
847 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
848 if (resultRelInfo->ri_TrigDesc)
850 int n = resultRelInfo->ri_TrigDesc->numtriggers;
852 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
853 palloc0(n * sizeof(FmgrInfo));
855 resultRelInfo->ri_TrigInstrument = InstrAlloc(n);
857 resultRelInfo->ri_TrigInstrument = NULL;
861 resultRelInfo->ri_TrigFunctions = NULL;
862 resultRelInfo->ri_TrigInstrument = NULL;
864 resultRelInfo->ri_ConstraintExprs = NULL;
865 resultRelInfo->ri_junkFilter = NULL;
866 resultRelInfo->ri_projectReturning = NULL;
869 * If there are indices on the result relation, open them and save
870 * descriptors in the result relation info, so that we can add new index
871 * entries for the tuples we add/update. We need not do this for a
872 * DELETE, however, since deletion doesn't affect indexes.
874 if (resultRelationDesc->rd_rel->relhasindex &&
875 operation != CMD_DELETE)
876 ExecOpenIndices(resultRelInfo);
880 * ExecContextForcesOids
882 * This is pretty grotty: when doing INSERT, UPDATE, or SELECT INTO,
883 * we need to ensure that result tuples have space for an OID iff they are
884 * going to be stored into a relation that has OIDs. In other contexts
885 * we are free to choose whether to leave space for OIDs in result tuples
886 * (we generally don't want to, but we do if a physical-tlist optimization
887 * is possible). This routine checks the plan context and returns TRUE if the
888 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
889 * *hasoids is set to the required value.
891 * One reason this is ugly is that all plan nodes in the plan tree will emit
892 * tuples with space for an OID, though we really only need the topmost node
893 * to do so. However, node types like Sort don't project new tuples but just
894 * return their inputs, and in those cases the requirement propagates down
895 * to the input node. Eventually we might make this code smart enough to
896 * recognize how far down the requirement really goes, but for now we just
897 * make all plan nodes do the same thing if the top level forces the choice.
899 * We assume that estate->es_result_relation_info is already set up to
900 * describe the target relation. Note that in an UPDATE that spans an
901 * inheritance tree, some of the target relations may have OIDs and some not.
902 * We have to make the decisions on a per-relation basis as we initialize
903 * each of the child plans of the topmost Append plan.
905 * SELECT INTO is even uglier, because we don't have the INTO relation's
906 * descriptor available when this code runs; we have to look aside at a
907 * flag set by InitPlan().
910 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
912 if (planstate->state->es_select_into)
914 *hasoids = planstate->state->es_into_oids;
919 ResultRelInfo *ri = planstate->state->es_result_relation_info;
923 Relation rel = ri->ri_RelationDesc;
927 *hasoids = rel->rd_rel->relhasoids;
936 /* ----------------------------------------------------------------
939 * Cleans up the query plan -- closes files and frees up storage
941 * NOTE: we are no longer very worried about freeing storage per se
942 * in this code; FreeExecutorState should be guaranteed to release all
943 * memory that needs to be released. What we are worried about doing
944 * is closing relations and dropping buffer pins. Thus, for example,
945 * tuple tables must be cleared or dropped to ensure pins are released.
946 * ----------------------------------------------------------------
949 ExecEndPlan(PlanState *planstate, EState *estate)
951 ResultRelInfo *resultRelInfo;
956 * shut down any PlanQual processing we were doing
958 if (estate->es_evalPlanQual != NULL)
959 EndEvalPlanQual(estate);
962 * shut down the node-type-specific query processing
964 ExecEndNode(planstate);
967 * destroy the executor "tuple" table.
969 ExecDropTupleTable(estate->es_tupleTable, true);
970 estate->es_tupleTable = NULL;
973 * close the result relation(s) if any, but hold locks until xact commit.
975 resultRelInfo = estate->es_result_relations;
976 for (i = estate->es_num_result_relations; i > 0; i--)
978 /* Close indices and then the relation itself */
979 ExecCloseIndices(resultRelInfo);
980 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
985 * close any relations selected FOR UPDATE/FOR SHARE, again keeping locks
987 foreach(l, estate->es_rowMarks)
989 ExecRowMark *erm = lfirst(l);
991 heap_close(erm->relation, NoLock);
995 /* ----------------------------------------------------------------
998 * processes the query plan to retrieve 'numberTuples' tuples in the
999 * direction specified.
1001 * Retrieves all tuples if numberTuples is 0
1003 * result is either a slot containing the last tuple in the case
1004 * of a SELECT or NULL otherwise.
1006 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1008 * ----------------------------------------------------------------
1010 static TupleTableSlot *
1011 ExecutePlan(EState *estate,
1012 PlanState *planstate,
1015 ScanDirection direction,
1018 JunkFilter *junkfilter;
1019 TupleTableSlot *planSlot;
1020 TupleTableSlot *slot;
1021 ItemPointer tupleid = NULL;
1022 ItemPointerData tuple_ctid;
1023 long current_tuple_count;
1024 TupleTableSlot *result;
1027 * initialize local variables
1029 current_tuple_count = 0;
1033 * Set the direction.
1035 estate->es_direction = direction;
1038 * Process BEFORE EACH STATEMENT triggers
1043 ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
1046 ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
1049 ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1057 * Loop until we've processed the proper number of tuples from the plan.
1062 /* Reset the per-output-tuple exprcontext */
1063 ResetPerTupleExprContext(estate);
1066 * Execute the plan and obtain a tuple
1069 if (estate->es_useEvalPlan)
1071 planSlot = EvalPlanQualNext(estate);
1072 if (TupIsNull(planSlot))
1073 planSlot = ExecProcNode(planstate);
1076 planSlot = ExecProcNode(planstate);
1079 * if the tuple is null, then we assume there is nothing more to
1080 * process so we just return null...
1082 if (TupIsNull(planSlot))
1090 * if we have a junk filter, then project a new tuple with the junk
1093 * Store this new "clean" tuple in the junkfilter's resultSlot.
1094 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1095 * because that tuple slot has the wrong descriptor.)
1097 * Also, extract all the junk information we need.
1099 if ((junkfilter = estate->es_junkFilter) != NULL)
1105 * extract the 'ctid' junk attribute.
1107 if (operation == CMD_UPDATE || operation == CMD_DELETE)
1109 datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo,
1111 /* shouldn't ever get a null result... */
1113 elog(ERROR, "ctid is NULL");
1115 tupleid = (ItemPointer) DatumGetPointer(datum);
1116 tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */
1117 tupleid = &tuple_ctid;
1121 * Process any FOR UPDATE or FOR SHARE locking requested.
1123 else if (estate->es_rowMarks != NIL)
1128 foreach(l, estate->es_rowMarks)
1130 ExecRowMark *erm = lfirst(l);
1131 HeapTupleData tuple;
1133 ItemPointerData update_ctid;
1134 TransactionId update_xmax;
1135 TupleTableSlot *newSlot;
1136 LockTupleMode lockmode;
1139 datum = ExecGetJunkAttribute(slot,
1142 /* shouldn't ever get a null result... */
1144 elog(ERROR, "ctid is NULL");
1146 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1149 lockmode = LockTupleExclusive;
1151 lockmode = LockTupleShared;
1153 test = heap_lock_tuple(erm->relation, &tuple, &buffer,
1154 &update_ctid, &update_xmax,
1155 estate->es_snapshot->curcid,
1156 lockmode, erm->noWait);
1157 ReleaseBuffer(buffer);
1160 case HeapTupleSelfUpdated:
1161 /* treat it as deleted; do not process */
1164 case HeapTupleMayBeUpdated:
1167 case HeapTupleUpdated:
1168 if (IsXactIsoLevelSerializable)
1170 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1171 errmsg("could not serialize access due to concurrent update")));
1172 if (!ItemPointerEquals(&update_ctid,
1175 /* updated, so look at updated version */
1176 newSlot = EvalPlanQual(estate,
1180 estate->es_snapshot->curcid);
1181 if (!TupIsNull(newSlot))
1183 slot = planSlot = newSlot;
1184 estate->es_useEvalPlan = true;
1190 * if tuple was deleted or PlanQual failed for
1191 * updated tuple - we must not return this tuple!
1196 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
1204 * Create a new "clean" tuple with all junk attributes removed. We
1205 * don't need to do this for DELETE, however (there will in fact
1206 * be no non-junk attributes in a DELETE!)
1208 if (operation != CMD_DELETE)
1209 slot = ExecFilterJunk(junkfilter, slot);
1213 * now that we have a tuple, do the appropriate thing with it.. either
1214 * return it to the user, add it to a relation someplace, delete it
1215 * from a relation, or modify some of its attributes.
1220 ExecSelect(slot, dest, estate);
1225 ExecInsert(slot, tupleid, planSlot, dest, estate);
1230 ExecDelete(tupleid, planSlot, dest, estate);
1235 ExecUpdate(slot, tupleid, planSlot, dest, estate);
1240 elog(ERROR, "unrecognized operation code: %d",
1247 * check our tuple count.. if we've processed the proper number then
1248 * quit, else loop again and process more tuples. Zero numberTuples
1251 current_tuple_count++;
1252 if (numberTuples && numberTuples == current_tuple_count)
1257 * Process AFTER EACH STATEMENT triggers
1262 ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1265 ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1268 ExecASInsertTriggers(estate, estate->es_result_relation_info);
1276 * here, result is either a slot containing a tuple in the case of a
1277 * SELECT or NULL otherwise.
1282 /* ----------------------------------------------------------------
1285 * SELECTs are easy.. we just pass the tuple to the appropriate
1287 * ----------------------------------------------------------------
1290 ExecSelect(TupleTableSlot *slot,
1294 (*dest->receiveSlot) (slot, dest);
1296 (estate->es_processed)++;
1299 /* ----------------------------------------------------------------
1302 * INSERTs are trickier.. we have to insert the tuple into
1303 * the base relation and insert appropriate tuples into the
1305 * ----------------------------------------------------------------
1308 ExecInsert(TupleTableSlot *slot,
1309 ItemPointer tupleid,
1310 TupleTableSlot *planSlot,
1315 ResultRelInfo *resultRelInfo;
1316 Relation resultRelationDesc;
1320 * get the heap tuple out of the tuple table slot, making sure we have a
1323 tuple = ExecMaterializeSlot(slot);
1326 * get information on the (current) result relation
1328 resultRelInfo = estate->es_result_relation_info;
1329 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1331 /* BEFORE ROW INSERT Triggers */
1332 if (resultRelInfo->ri_TrigDesc &&
1333 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1337 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1339 if (newtuple == NULL) /* "do nothing" */
1342 if (newtuple != tuple) /* modified by Trigger(s) */
1345 * Put the modified tuple into a slot for convenience of routines
1346 * below. We assume the tuple was allocated in per-tuple memory
1347 * context, and therefore will go away by itself. The tuple table
1348 * slot should not try to clear it.
1350 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1352 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1353 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1354 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1361 * Check the constraints of the tuple
1363 if (resultRelationDesc->rd_att->constr)
1364 ExecConstraints(resultRelInfo, slot, estate);
1369 * Note: heap_insert returns the tid (location) of the new tuple in the
1372 newId = heap_insert(resultRelationDesc, tuple,
1373 estate->es_snapshot->curcid,
1377 (estate->es_processed)++;
1378 estate->es_lastoid = newId;
1379 setLastTid(&(tuple->t_self));
1382 * insert index entries for tuple
1384 if (resultRelInfo->ri_NumIndices > 0)
1385 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1387 /* AFTER ROW INSERT Triggers */
1388 ExecARInsertTriggers(estate, resultRelInfo, tuple);
1390 /* Process RETURNING if present */
1391 if (resultRelInfo->ri_projectReturning)
1392 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1393 slot, planSlot, dest);
1396 /* ----------------------------------------------------------------
1399 * DELETE is like UPDATE, except that we delete the tuple and no
1400 * index modifications are needed
1401 * ----------------------------------------------------------------
1404 ExecDelete(ItemPointer tupleid,
1405 TupleTableSlot *planSlot,
1409 ResultRelInfo *resultRelInfo;
1410 Relation resultRelationDesc;
1412 ItemPointerData update_ctid;
1413 TransactionId update_xmax;
1416 * get information on the (current) result relation
1418 resultRelInfo = estate->es_result_relation_info;
1419 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1421 /* BEFORE ROW DELETE Triggers */
1422 if (resultRelInfo->ri_TrigDesc &&
1423 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1427 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1428 estate->es_snapshot->curcid);
1430 if (!dodelete) /* "do nothing" */
1437 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1438 * the row to be deleted is visible to that snapshot, and throw a can't-
1439 * serialize error if not. This is a special-case behavior needed for
1440 * referential integrity updates in serializable transactions.
1443 result = heap_delete(resultRelationDesc, tupleid,
1444 &update_ctid, &update_xmax,
1445 estate->es_snapshot->curcid,
1446 estate->es_crosscheck_snapshot,
1447 true /* wait for commit */ );
1450 case HeapTupleSelfUpdated:
1451 /* already deleted by self; nothing to do */
1454 case HeapTupleMayBeUpdated:
1457 case HeapTupleUpdated:
1458 if (IsXactIsoLevelSerializable)
1460 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1461 errmsg("could not serialize access due to concurrent update")));
1462 else if (!ItemPointerEquals(tupleid, &update_ctid))
1464 TupleTableSlot *epqslot;
1466 epqslot = EvalPlanQual(estate,
1467 resultRelInfo->ri_RangeTableIndex,
1470 estate->es_snapshot->curcid);
1471 if (!TupIsNull(epqslot))
1473 *tupleid = update_ctid;
1477 /* tuple already deleted; nothing to do */
1481 elog(ERROR, "unrecognized heap_delete status: %u", result);
1486 (estate->es_processed)++;
1489 * Note: Normally one would think that we have to delete index tuples
1490 * associated with the heap tuple now...
1492 * ... but in POSTGRES, we have no need to do this because VACUUM will
1493 * take care of it later. We can't delete index tuples immediately
1494 * anyway, since the tuple is still visible to other transactions.
1497 /* AFTER ROW DELETE Triggers */
1498 ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1500 /* Process RETURNING if present */
1501 if (resultRelInfo->ri_projectReturning)
1504 * We have to put the target tuple into a slot, which means first we
1505 * gotta fetch it. We can use the trigger tuple slot.
1507 TupleTableSlot *slot = estate->es_trig_tuple_slot;
1508 HeapTupleData deltuple;
1511 deltuple.t_self = *tupleid;
1512 if (!heap_fetch(resultRelationDesc, SnapshotAny,
1513 &deltuple, &delbuffer, false, NULL))
1514 elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1516 if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
1517 ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
1518 ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
1520 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1521 slot, planSlot, dest);
1523 ExecClearTuple(slot);
1524 ReleaseBuffer(delbuffer);
1528 /* ----------------------------------------------------------------
1531 * note: we can't run UPDATE queries with transactions
1532 * off because UPDATEs are actually INSERTs and our
1533 * scan will mistakenly loop forever, updating the tuple
1534 * it just inserted.. This should be fixed but until it
1535 * is, we don't want to get stuck in an infinite loop
1536 * which corrupts your database..
1537 * ----------------------------------------------------------------
1540 ExecUpdate(TupleTableSlot *slot,
1541 ItemPointer tupleid,
1542 TupleTableSlot *planSlot,
1547 ResultRelInfo *resultRelInfo;
1548 Relation resultRelationDesc;
1550 ItemPointerData update_ctid;
1551 TransactionId update_xmax;
1554 * abort the operation if not running transactions
1556 if (IsBootstrapProcessingMode())
1557 elog(ERROR, "cannot UPDATE during bootstrap");
1560 * get the heap tuple out of the tuple table slot, making sure we have a
1563 tuple = ExecMaterializeSlot(slot);
1566 * get information on the (current) result relation
1568 resultRelInfo = estate->es_result_relation_info;
1569 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1571 /* BEFORE ROW UPDATE Triggers */
1572 if (resultRelInfo->ri_TrigDesc &&
1573 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1577 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1579 estate->es_snapshot->curcid);
1581 if (newtuple == NULL) /* "do nothing" */
1584 if (newtuple != tuple) /* modified by Trigger(s) */
1587 * Put the modified tuple into a slot for convenience of routines
1588 * below. We assume the tuple was allocated in per-tuple memory
1589 * context, and therefore will go away by itself. The tuple table
1590 * slot should not try to clear it.
1592 TupleTableSlot *newslot = estate->es_trig_tuple_slot;
1594 if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor)
1595 ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor);
1596 ExecStoreTuple(newtuple, newslot, InvalidBuffer, false);
1603 * Check the constraints of the tuple
1605 * If we generate a new candidate tuple after EvalPlanQual testing, we
1606 * must loop back here and recheck constraints. (We don't need to redo
1607 * triggers, however. If there are any BEFORE triggers then trigger.c
1608 * will have done heap_lock_tuple to lock the correct tuple, so there's no
1609 * need to do them again.)
1612 if (resultRelationDesc->rd_att->constr)
1613 ExecConstraints(resultRelInfo, slot, estate);
1616 * replace the heap tuple
1618 * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
1619 * the row to be updated is visible to that snapshot, and throw a can't-
1620 * serialize error if not. This is a special-case behavior needed for
1621 * referential integrity updates in serializable transactions.
1623 result = heap_update(resultRelationDesc, tupleid, tuple,
1624 &update_ctid, &update_xmax,
1625 estate->es_snapshot->curcid,
1626 estate->es_crosscheck_snapshot,
1627 true /* wait for commit */ );
1630 case HeapTupleSelfUpdated:
1631 /* already deleted by self; nothing to do */
1634 case HeapTupleMayBeUpdated:
1637 case HeapTupleUpdated:
1638 if (IsXactIsoLevelSerializable)
1640 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1641 errmsg("could not serialize access due to concurrent update")));
1642 else if (!ItemPointerEquals(tupleid, &update_ctid))
1644 TupleTableSlot *epqslot;
1646 epqslot = EvalPlanQual(estate,
1647 resultRelInfo->ri_RangeTableIndex,
1650 estate->es_snapshot->curcid);
1651 if (!TupIsNull(epqslot))
1653 *tupleid = update_ctid;
1654 slot = ExecFilterJunk(estate->es_junkFilter, epqslot);
1655 tuple = ExecMaterializeSlot(slot);
1659 /* tuple already deleted; nothing to do */
1663 elog(ERROR, "unrecognized heap_update status: %u", result);
1668 (estate->es_processed)++;
1671 * Note: instead of having to update the old index tuples associated with
1672 * the heap tuple, all we do is form and insert new index tuples. This is
1673 * because UPDATEs are actually DELETEs and INSERTs, and index tuple
1674 * deletion is done later by VACUUM (see notes in ExecDelete). All we do
1675 * here is insert new index tuples. -cim 9/27/89
1679 * insert index entries for tuple
1681 * Note: heap_update returns the tid (location) of the new tuple in the
1684 if (resultRelInfo->ri_NumIndices > 0)
1685 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1687 /* AFTER ROW UPDATE Triggers */
1688 ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1690 /* Process RETURNING if present */
1691 if (resultRelInfo->ri_projectReturning)
1692 ExecProcessReturning(resultRelInfo->ri_projectReturning,
1693 slot, planSlot, dest);
1697 * ExecRelCheck --- check that tuple meets constraints for result relation
1700 ExecRelCheck(ResultRelInfo *resultRelInfo,
1701 TupleTableSlot *slot, EState *estate)
1703 Relation rel = resultRelInfo->ri_RelationDesc;
1704 int ncheck = rel->rd_att->constr->num_check;
1705 ConstrCheck *check = rel->rd_att->constr->check;
1706 ExprContext *econtext;
1707 MemoryContext oldContext;
1712 * If first time through for this result relation, build expression
1713 * nodetrees for rel's constraint expressions. Keep them in the per-query
1714 * memory context so they'll survive throughout the query.
1716 if (resultRelInfo->ri_ConstraintExprs == NULL)
1718 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1719 resultRelInfo->ri_ConstraintExprs =
1720 (List **) palloc(ncheck * sizeof(List *));
1721 for (i = 0; i < ncheck; i++)
1723 /* ExecQual wants implicit-AND form */
1724 qual = make_ands_implicit(stringToNode(check[i].ccbin));
1725 resultRelInfo->ri_ConstraintExprs[i] = (List *)
1726 ExecPrepareExpr((Expr *) qual, estate);
1728 MemoryContextSwitchTo(oldContext);
1732 * We will use the EState's per-tuple context for evaluating constraint
1733 * expressions (creating it if it's not already there).
1735 econtext = GetPerTupleExprContext(estate);
1737 /* Arrange for econtext's scan tuple to be the tuple under test */
1738 econtext->ecxt_scantuple = slot;
1740 /* And evaluate the constraints */
1741 for (i = 0; i < ncheck; i++)
1743 qual = resultRelInfo->ri_ConstraintExprs[i];
1746 * NOTE: SQL92 specifies that a NULL result from a constraint
1747 * expression is not to be treated as a failure. Therefore, tell
1748 * ExecQual to return TRUE for NULL.
1750 if (!ExecQual(qual, econtext, true))
1751 return check[i].ccname;
1754 /* NULL result means no error */
1759 ExecConstraints(ResultRelInfo *resultRelInfo,
1760 TupleTableSlot *slot, EState *estate)
1762 Relation rel = resultRelInfo->ri_RelationDesc;
1763 TupleConstr *constr = rel->rd_att->constr;
1767 if (constr->has_not_null)
1769 int natts = rel->rd_att->natts;
1772 for (attrChk = 1; attrChk <= natts; attrChk++)
1774 if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1775 slot_attisnull(slot, attrChk))
1777 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1778 errmsg("null value in column \"%s\" violates not-null constraint",
1779 NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
1783 if (constr->num_check > 0)
1787 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1789 (errcode(ERRCODE_CHECK_VIOLATION),
1790 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1791 RelationGetRelationName(rel), failed)));
1796 * ExecProcessReturning --- evaluate a RETURNING list and send to dest
1798 * projectReturning: RETURNING projection info for current result rel
1799 * tupleSlot: slot holding tuple actually inserted/updated/deleted
1800 * planSlot: slot holding tuple returned by top plan node
1801 * dest: where to send the output
1804 ExecProcessReturning(ProjectionInfo *projectReturning,
1805 TupleTableSlot *tupleSlot,
1806 TupleTableSlot *planSlot,
1809 ExprContext *econtext = projectReturning->pi_exprContext;
1810 TupleTableSlot *retSlot;
1813 * Reset per-tuple memory context to free any expression evaluation
1814 * storage allocated in the previous cycle.
1816 ResetExprContext(econtext);
1818 /* Make tuple and any needed join variables available to ExecProject */
1819 econtext->ecxt_scantuple = tupleSlot;
1820 econtext->ecxt_outertuple = planSlot;
1822 /* Compute the RETURNING expressions */
1823 retSlot = ExecProject(projectReturning, NULL);
1826 (*dest->receiveSlot) (retSlot, dest);
1828 ExecClearTuple(retSlot);
1832 * Check a modified tuple to see if we want to process its updated version
1833 * under READ COMMITTED rules.
1835 * See backend/executor/README for some info about how this works.
1837 * estate - executor state data
1838 * rti - rangetable index of table containing tuple
1839 * *tid - t_ctid from the outdated tuple (ie, next updated version)
1840 * priorXmax - t_xmax from the outdated tuple
1841 * curCid - command ID of current command of my transaction
1843 * *tid is also an output parameter: it's modified to hold the TID of the
1844 * latest version of the tuple (note this may be changed even on failure)
1846 * Returns a slot containing the new candidate update/delete tuple, or
1847 * NULL if we determine we shouldn't process the row.
1850 EvalPlanQual(EState *estate, Index rti,
1851 ItemPointer tid, TransactionId priorXmax, CommandId curCid)
1856 HeapTupleData tuple;
1857 HeapTuple copyTuple = NULL;
1863 * find relation containing target tuple
1865 if (estate->es_result_relation_info != NULL &&
1866 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1867 relation = estate->es_result_relation_info->ri_RelationDesc;
1873 foreach(l, estate->es_rowMarks)
1875 if (((ExecRowMark *) lfirst(l))->rti == rti)
1877 relation = ((ExecRowMark *) lfirst(l))->relation;
1881 if (relation == NULL)
1882 elog(ERROR, "could not find RowMark for RT index %u", rti);
1888 * Loop here to deal with updated or busy tuples
1890 tuple.t_self = *tid;
1895 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, true, NULL))
1898 * If xmin isn't what we're expecting, the slot must have been
1899 * recycled and reused for an unrelated tuple. This implies that
1900 * the latest version of the row was deleted, so we need do
1901 * nothing. (Should be safe to examine xmin without getting
1902 * buffer's content lock, since xmin never changes in an existing
1905 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1908 ReleaseBuffer(buffer);
1912 /* otherwise xmin should not be dirty... */
1913 if (TransactionIdIsValid(SnapshotDirty->xmin))
1914 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1917 * If tuple is being updated by other transaction then we have to
1918 * wait for its commit/abort.
1920 if (TransactionIdIsValid(SnapshotDirty->xmax))
1922 ReleaseBuffer(buffer);
1923 XactLockTableWait(SnapshotDirty->xmax);
1924 continue; /* loop back to repeat heap_fetch */
1928 * If tuple was inserted by our own transaction, we have to check
1929 * cmin against curCid: cmin >= curCid means our command cannot
1930 * see the tuple, so we should ignore it. Without this we are
1931 * open to the "Halloween problem" of indefinitely re-updating the
1932 * same tuple. (We need not check cmax because
1933 * HeapTupleSatisfiesDirty will consider a tuple deleted by our
1934 * transaction dead, regardless of cmax.) We just checked that
1935 * priorXmax == xmin, so we can test that variable instead of
1936 * doing HeapTupleHeaderGetXmin again.
1938 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
1939 HeapTupleHeaderGetCmin(tuple.t_data) >= curCid)
1941 ReleaseBuffer(buffer);
1946 * We got tuple - now copy it for use by recheck query.
1948 copyTuple = heap_copytuple(&tuple);
1949 ReleaseBuffer(buffer);
1954 * If the referenced slot was actually empty, the latest version of
1955 * the row must have been deleted, so we need do nothing.
1957 if (tuple.t_data == NULL)
1959 ReleaseBuffer(buffer);
1964 * As above, if xmin isn't what we're expecting, do nothing.
1966 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1969 ReleaseBuffer(buffer);
1974 * If we get here, the tuple was found but failed SnapshotDirty.
1975 * Assuming the xmin is either a committed xact or our own xact (as it
1976 * certainly should be if we're trying to modify the tuple), this must
1977 * mean that the row was updated or deleted by either a committed xact
1978 * or our own xact. If it was deleted, we can ignore it; if it was
1979 * updated then chain up to the next version and repeat the whole
1982 * As above, it should be safe to examine xmax and t_ctid without the
1983 * buffer content lock, because they can't be changing.
1985 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
1987 /* deleted, so forget about it */
1988 ReleaseBuffer(buffer);
1992 /* updated, so look at the updated row */
1993 tuple.t_self = tuple.t_data->t_ctid;
1994 /* updated row should have xmin matching this xmax */
1995 priorXmax = HeapTupleHeaderGetXmax(tuple.t_data);
1996 ReleaseBuffer(buffer);
1997 /* loop back to fetch next in chain */
2001 * For UPDATE/DELETE we have to return tid of actual row we're executing
2004 *tid = tuple.t_self;
2007 * Need to run a recheck subquery. Find or create a PQ stack entry.
2009 epq = estate->es_evalPlanQual;
2012 if (epq != NULL && epq->rti == 0)
2014 /* Top PQ stack entry is idle, so re-use it */
2015 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
2021 * If this is request for another RTE - Ra, - then we have to check wasn't
2022 * PlanQual requested for Ra already and if so then Ra' row was updated
2023 * again and we have to re-start old execution for Ra and forget all what
2024 * we done after Ra was suspended. Cool? -:))
2026 if (epq != NULL && epq->rti != rti &&
2027 epq->estate->es_evTuple[rti - 1] != NULL)
2031 evalPlanQual *oldepq;
2033 /* stop execution */
2034 EvalPlanQualStop(epq);
2035 /* pop previous PlanQual from the stack */
2037 Assert(oldepq && oldepq->rti != 0);
2038 /* push current PQ to freePQ stack */
2041 estate->es_evalPlanQual = epq;
2042 } while (epq->rti != rti);
2046 * If we are requested for another RTE then we have to suspend execution
2047 * of current PlanQual and start execution for new one.
2049 if (epq == NULL || epq->rti != rti)
2051 /* try to reuse plan used previously */
2052 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
2054 if (newepq == NULL) /* first call or freePQ stack is empty */
2056 newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
2057 newepq->free = NULL;
2058 newepq->estate = NULL;
2059 newepq->planstate = NULL;
2063 /* recycle previously used PlanQual */
2064 Assert(newepq->estate == NULL);
2067 /* push current PQ to the stack */
2070 estate->es_evalPlanQual = epq;
2075 Assert(epq->rti == rti);
2078 * Ok - we're requested for the same RTE. Unfortunately we still have to
2079 * end and restart execution of the plan, because ExecReScan wouldn't
2080 * ensure that upper plan nodes would reset themselves. We could make
2081 * that work if insertion of the target tuple were integrated with the
2082 * Param mechanism somehow, so that the upper plan nodes know that their
2083 * children's outputs have changed.
2085 * Note that the stack of free evalPlanQual nodes is quite useless at the
2086 * moment, since it only saves us from pallocing/releasing the
2087 * evalPlanQual nodes themselves. But it will be useful once we implement
2088 * ReScan instead of end/restart for re-using PlanQual nodes.
2092 /* stop execution */
2093 EvalPlanQualStop(epq);
2097 * Initialize new recheck query.
2099 * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
2100 * instead copy down changeable state from the top plan (including
2101 * es_result_relation_info, es_junkFilter) and reset locally changeable
2102 * state in the epq (including es_param_exec_vals, es_evTupleNull).
2104 EvalPlanQualStart(epq, estate, epq->next);
2107 * free old RTE' tuple, if any, and store target tuple where relation's
2108 * scan node will see it
2110 epqstate = epq->estate;
2111 if (epqstate->es_evTuple[rti - 1] != NULL)
2112 heap_freetuple(epqstate->es_evTuple[rti - 1]);
2113 epqstate->es_evTuple[rti - 1] = copyTuple;
2115 return EvalPlanQualNext(estate);
2118 static TupleTableSlot *
2119 EvalPlanQualNext(EState *estate)
2121 evalPlanQual *epq = estate->es_evalPlanQual;
2122 MemoryContext oldcontext;
2123 TupleTableSlot *slot;
2125 Assert(epq->rti != 0);
2128 oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
2129 slot = ExecProcNode(epq->planstate);
2130 MemoryContextSwitchTo(oldcontext);
2133 * No more tuples for this PQ. Continue previous one.
2135 if (TupIsNull(slot))
2137 evalPlanQual *oldepq;
2139 /* stop execution */
2140 EvalPlanQualStop(epq);
2141 /* pop old PQ from the stack */
2145 /* this is the first (oldest) PQ - mark as free */
2147 estate->es_useEvalPlan = false;
2148 /* and continue Query execution */
2151 Assert(oldepq->rti != 0);
2152 /* push current PQ to freePQ stack */
2155 estate->es_evalPlanQual = epq;
2163 EndEvalPlanQual(EState *estate)
2165 evalPlanQual *epq = estate->es_evalPlanQual;
2167 if (epq->rti == 0) /* plans already shutdowned */
2169 Assert(epq->next == NULL);
2175 evalPlanQual *oldepq;
2177 /* stop execution */
2178 EvalPlanQualStop(epq);
2179 /* pop old PQ from the stack */
2183 /* this is the first (oldest) PQ - mark as free */
2185 estate->es_useEvalPlan = false;
2188 Assert(oldepq->rti != 0);
2189 /* push current PQ to freePQ stack */
2192 estate->es_evalPlanQual = epq;
2197 * Start execution of one level of PlanQual.
2199 * This is a cut-down version of ExecutorStart(): we copy some state from
2200 * the top-level estate rather than initializing it fresh.
2203 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
2207 MemoryContext oldcontext;
2209 rtsize = list_length(estate->es_range_table);
2212 * It's tempting to think about using CreateSubExecutorState here, but
2213 * at present we can't because of memory leakage concerns ...
2215 epq->estate = epqstate = CreateExecutorState();
2217 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2220 * The epqstates share the top query's copy of unchanging state such as
2221 * the snapshot, rangetable, result-rel info, and external Param info.
2222 * They need their own copies of local state, including a tuple table,
2223 * es_param_exec_vals, etc.
2225 epqstate->es_direction = ForwardScanDirection;
2226 epqstate->es_snapshot = estate->es_snapshot;
2227 epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot;
2228 epqstate->es_range_table = estate->es_range_table;
2229 epqstate->es_result_relations = estate->es_result_relations;
2230 epqstate->es_num_result_relations = estate->es_num_result_relations;
2231 epqstate->es_result_relation_info = estate->es_result_relation_info;
2232 epqstate->es_junkFilter = estate->es_junkFilter;
2233 epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2234 epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal;
2235 epqstate->es_param_list_info = estate->es_param_list_info;
2236 if (estate->es_plannedstmt->nParamExec > 0)
2237 epqstate->es_param_exec_vals = (ParamExecData *)
2238 palloc0(estate->es_plannedstmt->nParamExec * sizeof(ParamExecData));
2239 epqstate->es_rowMarks = estate->es_rowMarks;
2240 epqstate->es_instrument = estate->es_instrument;
2241 epqstate->es_select_into = estate->es_select_into;
2242 epqstate->es_into_oids = estate->es_into_oids;
2243 epqstate->es_plannedstmt = estate->es_plannedstmt;
2246 * Each epqstate must have its own es_evTupleNull state, but all the stack
2247 * entries share es_evTuple state. This allows sub-rechecks to inherit
2248 * the value being examined by an outer recheck.
2250 epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2251 if (priorepq == NULL)
2252 /* first PQ stack entry */
2253 epqstate->es_evTuple = (HeapTuple *)
2254 palloc0(rtsize * sizeof(HeapTuple));
2256 /* later stack entries share the same storage */
2257 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2259 epqstate->es_tupleTable =
2260 ExecCreateTupleTable(estate->es_tupleTable->size);
2262 epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0);
2264 MemoryContextSwitchTo(oldcontext);
2268 * End execution of one level of PlanQual.
2270 * This is a cut-down version of ExecutorEnd(); basically we want to do most
2271 * of the normal cleanup, but *not* close result relations (which we are
2272 * just sharing from the outer query).
2275 EvalPlanQualStop(evalPlanQual *epq)
2277 EState *epqstate = epq->estate;
2278 MemoryContext oldcontext;
2280 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2282 ExecEndNode(epq->planstate);
2284 ExecDropTupleTable(epqstate->es_tupleTable, true);
2285 epqstate->es_tupleTable = NULL;
2287 if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2289 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2290 epqstate->es_evTuple[epq->rti - 1] = NULL;
2293 MemoryContextSwitchTo(oldcontext);
2295 FreeExecutorState(epqstate);
2298 epq->planstate = NULL;
2303 * Support for SELECT INTO (a/k/a CREATE TABLE AS)
2305 * We implement SELECT INTO by diverting SELECT's normal output with
2306 * a specialized DestReceiver type.
2308 * TODO: remove some of the INTO-specific cruft from EState, and keep
2309 * it in the DestReceiver instead.
2314 DestReceiver pub; /* publicly-known function pointers */
2315 EState *estate; /* EState we are working with */
2319 * OpenIntoRel --- actually create the SELECT INTO target relation
2321 * This also replaces QueryDesc->dest with the special DestReceiver for
2322 * SELECT INTO. We assume that the correct result tuple type has already
2323 * been placed in queryDesc->tupDesc.
2326 OpenIntoRel(QueryDesc *queryDesc)
2328 IntoClause *into = queryDesc->plannedstmt->into;
2329 EState *estate = queryDesc->estate;
2330 Relation intoRelationDesc;
2335 AclResult aclresult;
2338 DR_intorel *myState;
2343 * Check consistency of arguments
2345 if (into->onCommit != ONCOMMIT_NOOP && !into->rel->istemp)
2347 (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
2348 errmsg("ON COMMIT can only be used on temporary tables")));
2351 * Find namespace to create in, check its permissions
2353 intoName = into->rel->relname;
2354 namespaceId = RangeVarGetCreationNamespace(into->rel);
2356 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
2358 if (aclresult != ACLCHECK_OK)
2359 aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
2360 get_namespace_name(namespaceId));
2363 * Select tablespace to use. If not specified, use default_tablespace
2364 * (which may in turn default to database's default).
2366 if (into->tableSpaceName)
2368 tablespaceId = get_tablespace_oid(into->tableSpaceName);
2369 if (!OidIsValid(tablespaceId))
2371 (errcode(ERRCODE_UNDEFINED_OBJECT),
2372 errmsg("tablespace \"%s\" does not exist",
2373 into->tableSpaceName)));
2375 else if (into->rel->istemp)
2377 tablespaceId = GetTempTablespace();
2381 tablespaceId = GetDefaultTablespace();
2382 /* note InvalidOid is OK in this case */
2385 /* Check permissions except when using the database's default space */
2386 if (OidIsValid(tablespaceId))
2388 AclResult aclresult;
2390 aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(),
2393 if (aclresult != ACLCHECK_OK)
2394 aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
2395 get_tablespace_name(tablespaceId));
2398 /* Parse and validate any reloptions */
2399 reloptions = transformRelOptions((Datum) 0,
2403 (void) heap_reloptions(RELKIND_RELATION, reloptions, true);
2405 /* have to copy the actual tupdesc to get rid of any constraints */
2406 tupdesc = CreateTupleDescCopy(queryDesc->tupDesc);
2408 /* Now we can actually create the new relation */
2409 intoRelationId = heap_create_with_catalog(intoName,
2421 allowSystemTableMods);
2423 FreeTupleDesc(tupdesc);
2426 * Advance command counter so that the newly-created relation's catalog
2427 * tuples will be visible to heap_open.
2429 CommandCounterIncrement();
2432 * If necessary, create a TOAST table for the INTO relation. Note that
2433 * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
2434 * the TOAST table will be visible for insertion.
2436 AlterTableCreateToastTable(intoRelationId);
2439 * And open the constructed table for writing.
2441 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
2443 /* use_wal off requires rd_targblock be initially invalid */
2444 Assert(intoRelationDesc->rd_targblock == InvalidBlockNumber);
2447 * We can skip WAL-logging the insertions, unless PITR is in use.
2449 * Note that for a non-temp INTO table, this is safe only because we know
2450 * that the catalog changes above will have been WAL-logged, and so
2451 * RecordTransactionCommit will think it needs to WAL-log the eventual
2452 * transaction commit. Else the commit might be lost, even though all the
2453 * data is safely fsync'd ...
2455 estate->es_into_relation_use_wal = XLogArchivingActive();
2456 estate->es_into_relation_descriptor = intoRelationDesc;
2459 * Now replace the query's DestReceiver with one for SELECT INTO
2461 queryDesc->dest = CreateDestReceiver(DestIntoRel, NULL);
2462 myState = (DR_intorel *) queryDesc->dest;
2463 Assert(myState->pub.mydest == DestIntoRel);
2464 myState->estate = estate;
2468 * CloseIntoRel --- clean up SELECT INTO at ExecutorEnd time
2471 CloseIntoRel(QueryDesc *queryDesc)
2473 EState *estate = queryDesc->estate;
2475 /* OpenIntoRel might never have gotten called */
2476 if (estate->es_into_relation_descriptor)
2479 * If we skipped using WAL, and it's not a temp relation, we must
2480 * force the relation down to disk before it's safe to commit the
2481 * transaction. This requires forcing out any dirty buffers and then
2482 * doing a forced fsync.
2484 if (!estate->es_into_relation_use_wal &&
2485 !estate->es_into_relation_descriptor->rd_istemp)
2486 heap_sync(estate->es_into_relation_descriptor);
2488 /* close rel, but keep lock until commit */
2489 heap_close(estate->es_into_relation_descriptor, NoLock);
2491 estate->es_into_relation_descriptor = NULL;
2496 * CreateIntoRelDestReceiver -- create a suitable DestReceiver object
2498 * Since CreateDestReceiver doesn't accept the parameters we'd need,
2499 * we just leave the private fields empty here. OpenIntoRel will
2503 CreateIntoRelDestReceiver(void)
2505 DR_intorel *self = (DR_intorel *) palloc(sizeof(DR_intorel));
2507 self->pub.receiveSlot = intorel_receive;
2508 self->pub.rStartup = intorel_startup;
2509 self->pub.rShutdown = intorel_shutdown;
2510 self->pub.rDestroy = intorel_destroy;
2511 self->pub.mydest = DestIntoRel;
2513 self->estate = NULL;
2515 return (DestReceiver *) self;
2519 * intorel_startup --- executor startup
2522 intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
2528 * intorel_receive --- receive one tuple
2531 intorel_receive(TupleTableSlot *slot, DestReceiver *self)
2533 DR_intorel *myState = (DR_intorel *) self;
2534 EState *estate = myState->estate;
2537 tuple = ExecCopySlotTuple(slot);
2539 heap_insert(estate->es_into_relation_descriptor,
2541 estate->es_snapshot->curcid,
2542 estate->es_into_relation_use_wal,
2543 false); /* never any point in using FSM */
2545 /* We know this is a newly created relation, so there are no indexes */
2547 heap_freetuple(tuple);
2553 * intorel_shutdown --- executor end
2556 intorel_shutdown(DestReceiver *self)
2562 * intorel_destroy --- release DestReceiver object
2565 intorel_destroy(DestReceiver *self)