1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
11 * The old ExecutorMain() has been replaced by ExecutorStart(),
12 * ExecutorRun() and ExecutorEnd()
14 * These three procedures are the external interfaces to the executor.
15 * In each case, the query descriptor is required as an argument.
17 * ExecutorStart() must be called at the beginning of execution of any
18 * query plan and ExecutorEnd() should always be called at the end of
19 * execution of a plan.
21 * ExecutorRun accepts direction and count arguments that specify whether
22 * the plan is to be executed forwards, backwards, and for how many tuples.
24 * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
25 * Portions Copyright (c) 1994, Regents of the University of California
29 * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.197 2003/01/10 22:03:27 petere Exp $
31 *-------------------------------------------------------------------------
35 #include "access/heapam.h"
36 #include "catalog/heap.h"
37 #include "catalog/namespace.h"
38 #include "commands/tablecmds.h"
39 #include "commands/trigger.h"
40 #include "executor/execdebug.h"
41 #include "executor/execdefs.h"
42 #include "miscadmin.h"
43 #include "optimizer/var.h"
44 #include "parser/parsetree.h"
45 #include "utils/acl.h"
46 #include "utils/lsyscache.h"
49 typedef struct execRowMark
56 typedef struct evalPlanQual
61 struct evalPlanQual *next; /* stack of active PlanQual plans */
62 struct evalPlanQual *free; /* list of free PlanQual plans */
65 /* decls for local routines only used within this module */
66 static void InitPlan(QueryDesc *queryDesc);
67 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
68 Index resultRelationIndex,
71 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
74 ScanDirection direction,
75 DestReceiver *destfunc);
76 static void ExecSelect(TupleTableSlot *slot,
77 DestReceiver *destfunc,
79 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
81 static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
83 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
85 static TupleTableSlot *EvalPlanQualNext(EState *estate);
86 static void EndEvalPlanQual(EState *estate);
87 static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
88 static void ExecCheckXactReadOnly(Query *parsetree, CmdType operation);
89 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
90 evalPlanQual *priorepq);
91 static void EvalPlanQualStop(evalPlanQual *epq);
93 /* end of local decls */
96 /* ----------------------------------------------------------------
99 * This routine must be called at the beginning of any execution of any
102 * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
103 * clear why we bother to separate the two functions, but...). The tupDesc
104 * field of the QueryDesc is filled in to describe the tuples that will be
105 * returned, and the internal fields (estate and planstate) are set up.
107 * NB: the CurrentMemoryContext when this is called will become the parent
108 * of the per-query context used for this Executor invocation.
109 * ----------------------------------------------------------------
112 ExecutorStart(QueryDesc *queryDesc)
115 MemoryContext oldcontext;
117 /* sanity checks: queryDesc must not be started already */
118 Assert(queryDesc != NULL);
119 Assert(queryDesc->estate == NULL);
122 * Build EState, switch into per-query memory context for startup.
124 estate = CreateExecutorState();
125 queryDesc->estate = estate;
127 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
130 * Fill in parameters, if any, from queryDesc
132 estate->es_param_list_info = queryDesc->params;
134 if (queryDesc->plantree->nParamExec > 0)
135 estate->es_param_exec_vals = (ParamExecData *)
136 palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData));
138 estate->es_instrument = queryDesc->doInstrument;
141 * Make our own private copy of the current query snapshot data.
143 * This "freezes" our idea of which tuples are good and which are not for
144 * the life of this query, even if it outlives the current command and
147 estate->es_snapshot = CopyQuerySnapshot();
150 * Initialize the plan state tree
154 MemoryContextSwitchTo(oldcontext);
157 /* ----------------------------------------------------------------
160 * This is the main routine of the executor module. It accepts
161 * the query descriptor from the traffic cop and executes the
164 * ExecutorStart must have been called already.
166 * If direction is NoMovementScanDirection then nothing is done
167 * except to start up/shut down the destination. Otherwise,
168 * we retrieve up to 'count' tuples in the specified direction.
170 * Note: count = 0 is interpreted as no portal limit, i.e., run to
173 * ----------------------------------------------------------------
176 ExecutorRun(QueryDesc *queryDesc,
177 ScanDirection direction, long count)
182 DestReceiver *destfunc;
183 TupleTableSlot *result;
184 MemoryContext oldcontext;
187 Assert(queryDesc != NULL);
189 estate = queryDesc->estate;
191 Assert(estate != NULL);
194 * Switch into per-query memory context
196 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
199 * extract information from the query descriptor and the query
202 operation = queryDesc->operation;
203 dest = queryDesc->dest;
206 * If the transaction is read-only, we need to check if any writes
207 * are planned to non-temporary tables. This is done here at this
208 * rather late stage so that we can handle EXPLAIN vs. EXPLAIN
211 ExecCheckXactReadOnly(queryDesc->parsetree, operation);
214 * startup tuple receiver
216 estate->es_processed = 0;
217 estate->es_lastoid = InvalidOid;
219 destfunc = DestToFunction(dest);
220 (*destfunc->setup) (destfunc, (int) operation,
221 queryDesc->portalName, queryDesc->tupDesc);
226 if (direction == NoMovementScanDirection)
229 result = ExecutePlan(estate,
230 queryDesc->planstate,
239 (*destfunc->cleanup) (destfunc);
241 MemoryContextSwitchTo(oldcontext);
246 /* ----------------------------------------------------------------
249 * This routine must be called at the end of execution of any
251 * ----------------------------------------------------------------
254 ExecutorEnd(QueryDesc *queryDesc)
257 MemoryContext oldcontext;
260 Assert(queryDesc != NULL);
262 estate = queryDesc->estate;
264 Assert(estate != NULL);
267 * Switch into per-query memory context to run ExecEndPlan
269 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
271 ExecEndPlan(queryDesc->planstate, estate);
274 * Must switch out of context before destroying it
276 MemoryContextSwitchTo(oldcontext);
279 * Release EState and per-query memory context. This should release
280 * everything the executor has allocated.
282 FreeExecutorState(estate);
284 /* Reset queryDesc fields that no longer point to anything */
285 queryDesc->tupDesc = NULL;
286 queryDesc->estate = NULL;
287 queryDesc->planstate = NULL;
293 * Check access permissions for all relations listed in a range table.
296 ExecCheckRTPerms(List *rangeTable, CmdType operation)
300 foreach(lp, rangeTable)
302 RangeTblEntry *rte = lfirst(lp);
304 ExecCheckRTEPerms(rte, operation);
310 * Check access permissions for a single RTE.
313 ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
317 AclResult aclcheck_result;
320 * If it's a subquery, recursively examine its rangetable.
322 if (rte->rtekind == RTE_SUBQUERY)
324 ExecCheckRTPerms(rte->subquery->rtable, operation);
329 * Otherwise, only plain-relation RTEs need to be checked here.
330 * Function RTEs are checked by init_fcache when the function is prepared
331 * for execution. Join and special RTEs need no checks.
333 if (rte->rtekind != RTE_RELATION)
339 * userid to check as: current user unless we have a setuid
342 * Note: GetUserId() is presently fast enough that there's no harm in
343 * calling it separately for each RTE. If that stops being true, we
344 * could call it once in ExecCheckRTPerms and pass the userid down
345 * from there. But for now, no need for the extra clutter.
347 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
349 #define CHECK(MODE) pg_class_aclcheck(relOid, userid, MODE)
351 if (rte->checkForRead)
353 aclcheck_result = CHECK(ACL_SELECT);
354 if (aclcheck_result != ACLCHECK_OK)
355 aclcheck_error(aclcheck_result, get_rel_name(relOid));
358 if (rte->checkForWrite)
361 * Note: write access in a SELECT context means SELECT FOR UPDATE.
362 * Right now we don't distinguish that from true update as far as
363 * permissions checks are concerned.
368 aclcheck_result = CHECK(ACL_INSERT);
372 aclcheck_result = CHECK(ACL_UPDATE);
375 aclcheck_result = CHECK(ACL_DELETE);
378 elog(ERROR, "ExecCheckRTEPerms: bogus operation %d",
380 aclcheck_result = ACLCHECK_OK; /* keep compiler quiet */
383 if (aclcheck_result != ACLCHECK_OK)
384 aclcheck_error(aclcheck_result, get_rel_name(relOid));
389 /* ===============================================================
390 * ===============================================================
391 static routines follow
392 * ===============================================================
393 * ===============================================================
398 ExecCheckXactReadOnly(Query *parsetree, CmdType operation)
403 /* CREATE TABLE AS or SELECT INTO */
404 if (operation == CMD_SELECT && parsetree->into != NULL)
407 if (operation == CMD_DELETE || operation == CMD_INSERT
408 || operation == CMD_UPDATE)
412 foreach(lp, parsetree->rtable)
414 RangeTblEntry *rte = lfirst(lp);
416 if (rte->rtekind != RTE_RELATION)
419 if (!rte->checkForWrite)
422 if (isTempNamespace(RelidGetNamespaceId(rte->relid)))
432 elog(ERROR, "transaction is read-only");
436 /* ----------------------------------------------------------------
439 * Initializes the query plan: open files, allocate storage
440 * and start up the rule manager
441 * ----------------------------------------------------------------
444 InitPlan(QueryDesc *queryDesc)
446 CmdType operation = queryDesc->operation;
447 Query *parseTree = queryDesc->parsetree;
448 Plan *plan = queryDesc->plantree;
449 EState *estate = queryDesc->estate;
450 PlanState *planstate;
452 Relation intoRelationDesc;
456 * Do permissions checks. It's sufficient to examine the query's
457 * top rangetable here --- subplan RTEs will be checked during
460 ExecCheckRTPerms(parseTree->rtable, operation);
463 * get information from query descriptor
465 rangeTable = parseTree->rtable;
468 * initialize the node's execution state
470 estate->es_range_table = rangeTable;
473 * if there is a result relation, initialize result relation stuff
475 if (parseTree->resultRelation != 0 && operation != CMD_SELECT)
477 List *resultRelations = parseTree->resultRelations;
478 int numResultRelations;
479 ResultRelInfo *resultRelInfos;
481 if (resultRelations != NIL)
484 * Multiple result relations (due to inheritance)
485 * parseTree->resultRelations identifies them all
487 ResultRelInfo *resultRelInfo;
489 numResultRelations = length(resultRelations);
490 resultRelInfos = (ResultRelInfo *)
491 palloc(numResultRelations * sizeof(ResultRelInfo));
492 resultRelInfo = resultRelInfos;
493 while (resultRelations != NIL)
495 initResultRelInfo(resultRelInfo,
496 lfirsti(resultRelations),
500 resultRelations = lnext(resultRelations);
506 * Single result relation identified by
507 * parseTree->resultRelation
509 numResultRelations = 1;
510 resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
511 initResultRelInfo(resultRelInfos,
512 parseTree->resultRelation,
517 estate->es_result_relations = resultRelInfos;
518 estate->es_num_result_relations = numResultRelations;
519 /* Initialize to first or only result rel */
520 estate->es_result_relation_info = resultRelInfos;
525 * if no result relation, then set state appropriately
527 estate->es_result_relations = NULL;
528 estate->es_num_result_relations = 0;
529 estate->es_result_relation_info = NULL;
533 * Have to lock relations selected for update
535 estate->es_rowMark = NIL;
536 if (parseTree->rowMarks != NIL)
540 foreach(l, parseTree->rowMarks)
542 Index rti = lfirsti(l);
543 Oid relid = getrelid(rti, rangeTable);
547 relation = heap_open(relid, RowShareLock);
548 erm = (execRowMark *) palloc(sizeof(execRowMark));
549 erm->relation = relation;
551 snprintf(erm->resname, 32, "ctid%u", rti);
552 estate->es_rowMark = lappend(estate->es_rowMark, erm);
557 * initialize the executor "tuple" table. We need slots for all the
558 * plan nodes, plus possibly output slots for the junkfilter(s). At
559 * this point we aren't sure if we need junkfilters, so just add slots
560 * for them unconditionally.
563 int nSlots = ExecCountSlotsNode(plan);
565 if (parseTree->resultRelations != NIL)
566 nSlots += length(parseTree->resultRelations);
569 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
572 /* mark EvalPlanQual not active */
573 estate->es_topPlan = plan;
574 estate->es_evalPlanQual = NULL;
575 estate->es_evTupleNull = NULL;
576 estate->es_evTuple = NULL;
577 estate->es_useEvalPlan = false;
580 * initialize the private state information for all the nodes in the
581 * query tree. This opens files, allocates storage and leaves us
582 * ready to start processing tuples.
584 planstate = ExecInitNode(plan, estate);
587 * Get the tuple descriptor describing the type of tuples to return.
588 * (this is especially important if we are creating a relation with
591 tupType = ExecGetTupType(planstate);
594 * Initialize the junk filter if needed. SELECT and INSERT queries
595 * need a filter if there are any junk attrs in the tlist. UPDATE and
596 * DELETE always need one, since there's always a junk 'ctid'
597 * attribute present --- no need to look first.
600 bool junk_filter_needed = false;
607 foreach(tlist, plan->targetlist)
609 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
611 if (tle->resdom->resjunk)
613 junk_filter_needed = true;
620 junk_filter_needed = true;
626 if (junk_filter_needed)
629 * If there are multiple result relations, each one needs its
630 * own junk filter. Note this is only possible for
631 * UPDATE/DELETE, so we can't be fooled by some needing a
632 * filter and some not.
634 if (parseTree->resultRelations != NIL)
636 PlanState **appendplans;
638 ResultRelInfo *resultRelInfo;
641 /* Top plan had better be an Append here. */
642 Assert(IsA(plan, Append));
643 Assert(((Append *) plan)->isTarget);
644 Assert(IsA(planstate, AppendState));
645 appendplans = ((AppendState *) planstate)->appendplans;
646 as_nplans = ((AppendState *) planstate)->as_nplans;
647 Assert(as_nplans == estate->es_num_result_relations);
648 resultRelInfo = estate->es_result_relations;
649 for (i = 0; i < as_nplans; i++)
651 PlanState *subplan = appendplans[i];
654 j = ExecInitJunkFilter(subplan->plan->targetlist,
655 ExecGetTupType(subplan),
656 ExecAllocTableSlot(estate->es_tupleTable));
657 resultRelInfo->ri_junkFilter = j;
662 * Set active junkfilter too; at this point ExecInitAppend
663 * has already selected an active result relation...
665 estate->es_junkFilter =
666 estate->es_result_relation_info->ri_junkFilter;
670 /* Normal case with just one JunkFilter */
673 j = ExecInitJunkFilter(planstate->plan->targetlist,
675 ExecAllocTableSlot(estate->es_tupleTable));
676 estate->es_junkFilter = j;
677 if (estate->es_result_relation_info)
678 estate->es_result_relation_info->ri_junkFilter = j;
680 /* For SELECT, want to return the cleaned tuple type */
681 if (operation == CMD_SELECT)
682 tupType = j->jf_cleanTupType;
686 estate->es_junkFilter = NULL;
690 * initialize the "into" relation
692 intoRelationDesc = (Relation) NULL;
694 if (operation == CMD_SELECT)
696 if (!parseTree->isPortal)
699 * a select into table --- need to create the "into" table
701 if (parseTree->into != NULL)
710 * find namespace to create in, check permissions
712 intoName = parseTree->into->relname;
713 namespaceId = RangeVarGetCreationNamespace(parseTree->into);
715 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
717 if (aclresult != ACLCHECK_OK)
718 aclcheck_error(aclresult,
719 get_namespace_name(namespaceId));
722 * have to copy tupType to get rid of constraints
724 tupdesc = CreateTupleDescCopy(tupType);
727 * Formerly we forced the output table to have OIDs, but
728 * as of 7.3 it will not have OIDs, because it's too late
729 * here to change the tupdescs of the already-initialized
730 * plan tree. (Perhaps we could recurse and change them
731 * all, but it's not really worth the trouble IMHO...)
735 heap_create_with_catalog(intoName,
741 allowSystemTableMods);
743 FreeTupleDesc(tupdesc);
746 * Advance command counter so that the newly-created
747 * relation's catalog tuples will be visible to heap_open.
749 CommandCounterIncrement();
752 * If necessary, create a TOAST table for the into
753 * relation. Note that AlterTableCreateToastTable ends
754 * with CommandCounterIncrement(), so that the TOAST table
755 * will be visible for insertion.
757 AlterTableCreateToastTable(intoRelationId, true);
759 intoRelationDesc = heap_open(intoRelationId,
760 AccessExclusiveLock);
765 estate->es_into_relation_descriptor = intoRelationDesc;
767 queryDesc->tupDesc = tupType;
768 queryDesc->planstate = planstate;
772 * Initialize ResultRelInfo data for one result relation
775 initResultRelInfo(ResultRelInfo *resultRelInfo,
776 Index resultRelationIndex,
780 Oid resultRelationOid;
781 Relation resultRelationDesc;
783 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
784 resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
786 switch (resultRelationDesc->rd_rel->relkind)
788 case RELKIND_SEQUENCE:
789 elog(ERROR, "You can't change sequence relation %s",
790 RelationGetRelationName(resultRelationDesc));
792 case RELKIND_TOASTVALUE:
793 elog(ERROR, "You can't change toast relation %s",
794 RelationGetRelationName(resultRelationDesc));
797 elog(ERROR, "You can't change view relation %s",
798 RelationGetRelationName(resultRelationDesc));
802 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
803 resultRelInfo->type = T_ResultRelInfo;
804 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
805 resultRelInfo->ri_RelationDesc = resultRelationDesc;
806 resultRelInfo->ri_NumIndices = 0;
807 resultRelInfo->ri_IndexRelationDescs = NULL;
808 resultRelInfo->ri_IndexRelationInfo = NULL;
809 /* make a copy so as not to depend on relcache info not changing... */
810 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
811 resultRelInfo->ri_TrigFunctions = NULL;
812 resultRelInfo->ri_ConstraintExprs = NULL;
813 resultRelInfo->ri_junkFilter = NULL;
816 * If there are indices on the result relation, open them and save
817 * descriptors in the result relation info, so that we can add new
818 * index entries for the tuples we add/update. We need not do this
819 * for a DELETE, however, since deletion doesn't affect indexes.
821 if (resultRelationDesc->rd_rel->relhasindex &&
822 operation != CMD_DELETE)
823 ExecOpenIndices(resultRelInfo);
826 /* ----------------------------------------------------------------
829 * Cleans up the query plan -- closes files and frees up storage
831 * NOTE: we are no longer very worried about freeing storage per se
832 * in this code; FreeExecutorState should be guaranteed to release all
833 * memory that needs to be released. What we are worried about doing
834 * is closing relations and dropping buffer pins. Thus, for example,
835 * tuple tables must be cleared or dropped to ensure pins are released.
836 * ----------------------------------------------------------------
839 ExecEndPlan(PlanState *planstate, EState *estate)
841 ResultRelInfo *resultRelInfo;
846 * shut down any PlanQual processing we were doing
848 if (estate->es_evalPlanQual != NULL)
849 EndEvalPlanQual(estate);
852 * shut down the node-type-specific query processing
854 ExecEndNode(planstate);
857 * destroy the executor "tuple" table.
859 ExecDropTupleTable(estate->es_tupleTable, true);
860 estate->es_tupleTable = NULL;
863 * close the result relation(s) if any, but hold locks until xact
866 resultRelInfo = estate->es_result_relations;
867 for (i = estate->es_num_result_relations; i > 0; i--)
869 /* Close indices and then the relation itself */
870 ExecCloseIndices(resultRelInfo);
871 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
876 * close the "into" relation if necessary, again keeping lock
878 if (estate->es_into_relation_descriptor != NULL)
879 heap_close(estate->es_into_relation_descriptor, NoLock);
882 * close any relations selected FOR UPDATE, again keeping locks
884 foreach(l, estate->es_rowMark)
886 execRowMark *erm = lfirst(l);
888 heap_close(erm->relation, NoLock);
892 /* ----------------------------------------------------------------
895 * processes the query plan to retrieve 'numberTuples' tuples in the
896 * direction specified.
898 * Retrieves all tuples if numberTuples is 0
900 * result is either a slot containing the last tuple in the case
901 * of a SELECT or NULL otherwise.
903 * Note: the ctid attribute is a 'junk' attribute that is removed before the
905 * ----------------------------------------------------------------
907 static TupleTableSlot *
908 ExecutePlan(EState *estate,
909 PlanState *planstate,
912 ScanDirection direction,
913 DestReceiver *destfunc)
915 JunkFilter *junkfilter;
916 TupleTableSlot *slot;
917 ItemPointer tupleid = NULL;
918 ItemPointerData tuple_ctid;
919 long current_tuple_count;
920 TupleTableSlot *result;
923 * initialize local variables
926 current_tuple_count = 0;
932 estate->es_direction = direction;
935 * Process BEFORE EACH STATEMENT triggers
940 ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
943 ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
946 ExecBSInsertTriggers(estate, estate->es_result_relation_info);
954 * Loop until we've processed the proper number of tuples from the
960 /* Reset the per-output-tuple exprcontext */
961 ResetPerTupleExprContext(estate);
964 * Execute the plan and obtain a tuple
967 if (estate->es_useEvalPlan)
969 slot = EvalPlanQualNext(estate);
971 slot = ExecProcNode(planstate);
974 slot = ExecProcNode(planstate);
977 * if the tuple is null, then we assume there is nothing more to
978 * process so we just return null...
987 * if we have a junk filter, then project a new tuple with the
990 * Store this new "clean" tuple in the junkfilter's resultSlot.
991 * (Formerly, we stored it back over the "dirty" tuple, which is
992 * WRONG because that tuple slot has the wrong descriptor.)
994 * Also, extract all the junk information we need.
996 if ((junkfilter = estate->es_junkFilter) != (JunkFilter *) NULL)
1003 * extract the 'ctid' junk attribute.
1005 if (operation == CMD_UPDATE || operation == CMD_DELETE)
1007 if (!ExecGetJunkAttribute(junkfilter,
1012 elog(ERROR, "ExecutePlan: NO (junk) `ctid' was found!");
1014 /* shouldn't ever get a null result... */
1016 elog(ERROR, "ExecutePlan: (junk) `ctid' is NULL!");
1018 tupleid = (ItemPointer) DatumGetPointer(datum);
1019 tuple_ctid = *tupleid; /* make sure we don't free the
1021 tupleid = &tuple_ctid;
1023 else if (estate->es_rowMark != NIL)
1028 foreach(l, estate->es_rowMark)
1030 execRowMark *erm = lfirst(l);
1032 HeapTupleData tuple;
1033 TupleTableSlot *newSlot;
1036 if (!ExecGetJunkAttribute(junkfilter,
1041 elog(ERROR, "ExecutePlan: NO (junk) `%s' was found!",
1044 /* shouldn't ever get a null result... */
1046 elog(ERROR, "ExecutePlan: (junk) `%s' is NULL!",
1049 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1050 test = heap_mark4update(erm->relation, &tuple, &buffer,
1051 estate->es_snapshot->curcid);
1052 ReleaseBuffer(buffer);
1055 case HeapTupleSelfUpdated:
1056 /* treat it as deleted; do not process */
1059 case HeapTupleMayBeUpdated:
1062 case HeapTupleUpdated:
1063 if (XactIsoLevel == XACT_SERIALIZABLE)
1064 elog(ERROR, "Can't serialize access due to concurrent update");
1065 if (!(ItemPointerEquals(&(tuple.t_self),
1066 (ItemPointer) DatumGetPointer(datum))))
1068 newSlot = EvalPlanQual(estate, erm->rti, &(tuple.t_self));
1069 if (!(TupIsNull(newSlot)))
1072 estate->es_useEvalPlan = true;
1078 * if tuple was deleted or PlanQual failed for
1079 * updated tuple - we must not return this
1085 elog(ERROR, "Unknown status %u from heap_mark4update", test);
1092 * Finally create a new "clean" tuple with all junk attributes
1095 newTuple = ExecRemoveJunk(junkfilter, slot);
1097 slot = ExecStoreTuple(newTuple, /* tuple to store */
1098 junkfilter->jf_resultSlot, /* dest slot */
1099 InvalidBuffer, /* this tuple has no
1101 true); /* tuple should be pfreed */
1105 * now that we have a tuple, do the appropriate thing with it..
1106 * either return it to the user, add it to a relation someplace,
1107 * delete it from a relation, or modify some of its attributes.
1112 ExecSelect(slot, /* slot containing tuple */
1113 destfunc, /* destination's tuple-receiver
1120 ExecInsert(slot, tupleid, estate);
1125 ExecDelete(slot, tupleid, estate);
1130 ExecUpdate(slot, tupleid, estate);
1135 elog(LOG, "ExecutePlan: unknown operation in queryDesc");
1141 * check our tuple count.. if we've processed the proper number
1142 * then quit, else loop again and process more tuples. Zero
1143 * numberTuples means no limit.
1145 current_tuple_count++;
1146 if (numberTuples && numberTuples == current_tuple_count)
1151 * Process AFTER EACH STATEMENT triggers
1156 ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1159 ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1162 ExecASInsertTriggers(estate, estate->es_result_relation_info);
1170 * here, result is either a slot containing a tuple in the case of a
1171 * SELECT or NULL otherwise.
1176 /* ----------------------------------------------------------------
1179 * SELECTs are easy.. we just pass the tuple to the appropriate
1180 * print function. The only complexity is when we do a
1181 * "SELECT INTO", in which case we insert the tuple into
1182 * the appropriate relation (note: this is a newly created relation
1183 * so we don't need to worry about indices or locks.)
1184 * ----------------------------------------------------------------
1187 ExecSelect(TupleTableSlot *slot,
1188 DestReceiver *destfunc,
1195 * get the heap tuple out of the tuple table slot
1198 attrtype = slot->ttc_tupleDescriptor;
1201 * insert the tuple into the "into relation"
1203 if (estate->es_into_relation_descriptor != NULL)
1205 heap_insert(estate->es_into_relation_descriptor, tuple,
1206 estate->es_snapshot->curcid);
1211 * send the tuple to the front end (or the screen)
1213 (*destfunc->receiveTuple) (tuple, attrtype, destfunc);
1215 (estate->es_processed)++;
1218 /* ----------------------------------------------------------------
1221 * INSERTs are trickier.. we have to insert the tuple into
1222 * the base relation and insert appropriate tuples into the
1224 * ----------------------------------------------------------------
1227 ExecInsert(TupleTableSlot *slot,
1228 ItemPointer tupleid,
1232 ResultRelInfo *resultRelInfo;
1233 Relation resultRelationDesc;
1238 * get the heap tuple out of the tuple table slot
1243 * get information on the (current) result relation
1245 resultRelInfo = estate->es_result_relation_info;
1246 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1248 /* BEFORE ROW INSERT Triggers */
1249 if (resultRelInfo->ri_TrigDesc &&
1250 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1254 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1256 if (newtuple == NULL) /* "do nothing" */
1259 if (newtuple != tuple) /* modified by Trigger(s) */
1262 * Insert modified tuple into tuple table slot, replacing the
1263 * original. We assume that it was allocated in per-tuple
1264 * memory context, and therefore will go away by itself. The
1265 * tuple table slot should not try to clear it.
1267 ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
1273 * Check the constraints of the tuple
1275 if (resultRelationDesc->rd_att->constr)
1276 ExecConstraints("ExecInsert", resultRelInfo, slot, estate);
1281 newId = heap_insert(resultRelationDesc, tuple,
1282 estate->es_snapshot->curcid);
1285 (estate->es_processed)++;
1286 estate->es_lastoid = newId;
1287 setLastTid(&(tuple->t_self));
1292 * Note: heap_insert adds a new tuple to a relation. As a side effect,
1293 * the tupleid of the new tuple is placed in the new tuple's t_ctid
1296 numIndices = resultRelInfo->ri_NumIndices;
1298 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1300 /* AFTER ROW INSERT Triggers */
1301 ExecARInsertTriggers(estate, resultRelInfo, tuple);
1304 /* ----------------------------------------------------------------
1307 * DELETE is like UPDATE, we delete the tuple and its
1309 * ----------------------------------------------------------------
1312 ExecDelete(TupleTableSlot *slot,
1313 ItemPointer tupleid,
1316 ResultRelInfo *resultRelInfo;
1317 Relation resultRelationDesc;
1318 ItemPointerData ctid;
1322 * get information on the (current) result relation
1324 resultRelInfo = estate->es_result_relation_info;
1325 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1327 /* BEFORE ROW DELETE Triggers */
1328 if (resultRelInfo->ri_TrigDesc &&
1329 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1333 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid);
1335 if (!dodelete) /* "do nothing" */
1343 result = heap_delete(resultRelationDesc, tupleid,
1345 estate->es_snapshot->curcid);
1348 case HeapTupleSelfUpdated:
1349 /* already deleted by self; nothing to do */
1352 case HeapTupleMayBeUpdated:
1355 case HeapTupleUpdated:
1356 if (XactIsoLevel == XACT_SERIALIZABLE)
1357 elog(ERROR, "Can't serialize access due to concurrent update");
1358 else if (!(ItemPointerEquals(tupleid, &ctid)))
1360 TupleTableSlot *epqslot = EvalPlanQual(estate,
1361 resultRelInfo->ri_RangeTableIndex, &ctid);
1363 if (!TupIsNull(epqslot))
1369 /* tuple already deleted; nothing to do */
1373 elog(ERROR, "Unknown status %u from heap_delete", result);
1378 (estate->es_processed)++;
1381 * Note: Normally one would think that we have to delete index tuples
1382 * associated with the heap tuple now..
1384 * ... but in POSTGRES, we have no need to do this because the vacuum
1385 * daemon automatically opens an index scan and deletes index tuples
1386 * when it finds deleted heap tuples. -cim 9/27/89
1389 /* AFTER ROW DELETE Triggers */
1390 ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1393 /* ----------------------------------------------------------------
1396 * note: we can't run UPDATE queries with transactions
1397 * off because UPDATEs are actually INSERTs and our
1398 * scan will mistakenly loop forever, updating the tuple
1399 * it just inserted.. This should be fixed but until it
1400 * is, we don't want to get stuck in an infinite loop
1401 * which corrupts your database..
1402 * ----------------------------------------------------------------
1405 ExecUpdate(TupleTableSlot *slot,
1406 ItemPointer tupleid,
1410 ResultRelInfo *resultRelInfo;
1411 Relation resultRelationDesc;
1412 ItemPointerData ctid;
1417 * abort the operation if not running transactions
1419 if (IsBootstrapProcessingMode())
1421 elog(WARNING, "ExecUpdate: UPDATE can't run without transactions");
1426 * get the heap tuple out of the tuple table slot
1431 * get information on the (current) result relation
1433 resultRelInfo = estate->es_result_relation_info;
1434 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1436 /* BEFORE ROW UPDATE Triggers */
1437 if (resultRelInfo->ri_TrigDesc &&
1438 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1442 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1445 if (newtuple == NULL) /* "do nothing" */
1448 if (newtuple != tuple) /* modified by Trigger(s) */
1451 * Insert modified tuple into tuple table slot, replacing the
1452 * original. We assume that it was allocated in per-tuple
1453 * memory context, and therefore will go away by itself. The
1454 * tuple table slot should not try to clear it.
1456 ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
1462 * Check the constraints of the tuple
1464 * If we generate a new candidate tuple after EvalPlanQual testing, we
1465 * must loop back here and recheck constraints. (We don't need to
1466 * redo triggers, however. If there are any BEFORE triggers then
1467 * trigger.c will have done mark4update to lock the correct tuple, so
1468 * there's no need to do them again.)
1471 if (resultRelationDesc->rd_att->constr)
1472 ExecConstraints("ExecUpdate", resultRelInfo, slot, estate);
1475 * replace the heap tuple
1477 result = heap_update(resultRelationDesc, tupleid, tuple,
1479 estate->es_snapshot->curcid);
1482 case HeapTupleSelfUpdated:
1483 /* already deleted by self; nothing to do */
1486 case HeapTupleMayBeUpdated:
1489 case HeapTupleUpdated:
1490 if (XactIsoLevel == XACT_SERIALIZABLE)
1491 elog(ERROR, "Can't serialize access due to concurrent update");
1492 else if (!(ItemPointerEquals(tupleid, &ctid)))
1494 TupleTableSlot *epqslot = EvalPlanQual(estate,
1495 resultRelInfo->ri_RangeTableIndex, &ctid);
1497 if (!TupIsNull(epqslot))
1500 tuple = ExecRemoveJunk(estate->es_junkFilter, epqslot);
1501 slot = ExecStoreTuple(tuple,
1502 estate->es_junkFilter->jf_resultSlot,
1503 InvalidBuffer, true);
1507 /* tuple already deleted; nothing to do */
1511 elog(ERROR, "Unknown status %u from heap_update", result);
1516 (estate->es_processed)++;
1519 * Note: instead of having to update the old index tuples associated
1520 * with the heap tuple, all we do is form and insert new index tuples.
1521 * This is because UPDATEs are actually DELETEs and INSERTs and index
1522 * tuple deletion is done automagically by the vacuum daemon. All we
1523 * do is insert new index tuples. -cim 9/27/89
1529 * heap_update updates a tuple in the base relation by invalidating it
1530 * and then inserting a new tuple to the relation. As a side effect,
1531 * the tupleid of the new tuple is placed in the new tuple's t_ctid
1532 * field. So we now insert index tuples using the new tupleid stored
1536 numIndices = resultRelInfo->ri_NumIndices;
1538 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1540 /* AFTER ROW UPDATE Triggers */
1541 ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1545 ExecRelCheck(ResultRelInfo *resultRelInfo,
1546 TupleTableSlot *slot, EState *estate)
1548 Relation rel = resultRelInfo->ri_RelationDesc;
1549 int ncheck = rel->rd_att->constr->num_check;
1550 ConstrCheck *check = rel->rd_att->constr->check;
1551 ExprContext *econtext;
1552 MemoryContext oldContext;
1557 * If first time through for this result relation, build expression
1558 * nodetrees for rel's constraint expressions. Keep them in the
1559 * per-query memory context so they'll survive throughout the query.
1561 if (resultRelInfo->ri_ConstraintExprs == NULL)
1563 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1564 resultRelInfo->ri_ConstraintExprs =
1565 (List **) palloc(ncheck * sizeof(List *));
1566 for (i = 0; i < ncheck; i++)
1568 qual = (List *) stringToNode(check[i].ccbin);
1569 resultRelInfo->ri_ConstraintExprs[i] = (List *)
1570 ExecPrepareExpr((Expr *) qual, estate);
1572 MemoryContextSwitchTo(oldContext);
1576 * We will use the EState's per-tuple context for evaluating
1577 * constraint expressions (creating it if it's not already there).
1579 econtext = GetPerTupleExprContext(estate);
1581 /* Arrange for econtext's scan tuple to be the tuple under test */
1582 econtext->ecxt_scantuple = slot;
1584 /* And evaluate the constraints */
1585 for (i = 0; i < ncheck; i++)
1587 qual = resultRelInfo->ri_ConstraintExprs[i];
1590 * NOTE: SQL92 specifies that a NULL result from a constraint
1591 * expression is not to be treated as a failure. Therefore, tell
1592 * ExecQual to return TRUE for NULL.
1594 if (!ExecQual(qual, econtext, true))
1595 return check[i].ccname;
1598 /* NULL result means no error */
1599 return (char *) NULL;
1603 ExecConstraints(const char *caller, ResultRelInfo *resultRelInfo,
1604 TupleTableSlot *slot, EState *estate)
1606 Relation rel = resultRelInfo->ri_RelationDesc;
1607 HeapTuple tuple = slot->val;
1608 TupleConstr *constr = rel->rd_att->constr;
1612 if (constr->has_not_null)
1614 int natts = rel->rd_att->natts;
1617 for (attrChk = 1; attrChk <= natts; attrChk++)
1619 if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1620 heap_attisnull(tuple, attrChk))
1621 elog(ERROR, "%s: Fail to add null value in not null attribute %s",
1622 caller, NameStr(rel->rd_att->attrs[attrChk - 1]->attname));
1626 if (constr->num_check > 0)
1630 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1631 elog(ERROR, "%s: rejected due to CHECK constraint \"%s\" on \"%s\"",
1632 caller, failed, RelationGetRelationName(rel));
1637 * Check a modified tuple to see if we want to process its updated version
1638 * under READ COMMITTED rules.
1640 * See backend/executor/README for some info about how this works.
1643 EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
1648 HeapTupleData tuple;
1649 HeapTuple copyTuple = NULL;
1655 * find relation containing target tuple
1657 if (estate->es_result_relation_info != NULL &&
1658 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1659 relation = estate->es_result_relation_info->ri_RelationDesc;
1665 foreach(l, estate->es_rowMark)
1667 if (((execRowMark *) lfirst(l))->rti == rti)
1669 relation = ((execRowMark *) lfirst(l))->relation;
1673 if (relation == NULL)
1674 elog(ERROR, "EvalPlanQual: can't find RTE %d", (int) rti);
1680 * Loop here to deal with updated or busy tuples
1682 tuple.t_self = *tid;
1687 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, false, NULL))
1689 TransactionId xwait = SnapshotDirty->xmax;
1691 if (TransactionIdIsValid(SnapshotDirty->xmin))
1692 elog(ERROR, "EvalPlanQual: t_xmin is uncommitted ?!");
1695 * If tuple is being updated by other transaction then we have
1696 * to wait for its commit/abort.
1698 if (TransactionIdIsValid(xwait))
1700 ReleaseBuffer(buffer);
1701 XactLockTableWait(xwait);
1706 * We got tuple - now copy it for use by recheck query.
1708 copyTuple = heap_copytuple(&tuple);
1709 ReleaseBuffer(buffer);
1714 * Oops! Invalid tuple. Have to check is it updated or deleted.
1715 * Note that it's possible to get invalid SnapshotDirty->tid if
1716 * tuple updated by this transaction. Have we to check this ?
1718 if (ItemPointerIsValid(&(SnapshotDirty->tid)) &&
1719 !(ItemPointerEquals(&(tuple.t_self), &(SnapshotDirty->tid))))
1721 /* updated, so look at the updated copy */
1722 tuple.t_self = SnapshotDirty->tid;
1727 * Deleted or updated by this transaction; forget it.
1733 * For UPDATE/DELETE we have to return tid of actual row we're
1736 *tid = tuple.t_self;
1739 * Need to run a recheck subquery. Find or create a PQ stack entry.
1741 epq = estate->es_evalPlanQual;
1744 if (epq != NULL && epq->rti == 0)
1746 /* Top PQ stack entry is idle, so re-use it */
1747 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
1753 * If this is request for another RTE - Ra, - then we have to check
1754 * wasn't PlanQual requested for Ra already and if so then Ra' row was
1755 * updated again and we have to re-start old execution for Ra and
1756 * forget all what we done after Ra was suspended. Cool? -:))
1758 if (epq != NULL && epq->rti != rti &&
1759 epq->estate->es_evTuple[rti - 1] != NULL)
1763 evalPlanQual *oldepq;
1765 /* stop execution */
1766 EvalPlanQualStop(epq);
1767 /* pop previous PlanQual from the stack */
1769 Assert(oldepq && oldepq->rti != 0);
1770 /* push current PQ to freePQ stack */
1773 estate->es_evalPlanQual = epq;
1774 } while (epq->rti != rti);
1778 * If we are requested for another RTE then we have to suspend
1779 * execution of current PlanQual and start execution for new one.
1781 if (epq == NULL || epq->rti != rti)
1783 /* try to reuse plan used previously */
1784 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
1786 if (newepq == NULL) /* first call or freePQ stack is empty */
1788 newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
1789 newepq->free = NULL;
1790 newepq->estate = NULL;
1791 newepq->planstate = NULL;
1795 /* recycle previously used PlanQual */
1796 Assert(newepq->estate == NULL);
1799 /* push current PQ to the stack */
1802 estate->es_evalPlanQual = epq;
1807 Assert(epq->rti == rti);
1810 * Ok - we're requested for the same RTE. Unfortunately we still have
1811 * to end and restart execution of the plan, because ExecReScan
1812 * wouldn't ensure that upper plan nodes would reset themselves. We
1813 * could make that work if insertion of the target tuple were
1814 * integrated with the Param mechanism somehow, so that the upper plan
1815 * nodes know that their children's outputs have changed.
1817 * Note that the stack of free evalPlanQual nodes is quite useless at
1818 * the moment, since it only saves us from pallocing/releasing the
1819 * evalPlanQual nodes themselves. But it will be useful once we
1820 * implement ReScan instead of end/restart for re-using PlanQual nodes.
1824 /* stop execution */
1825 EvalPlanQualStop(epq);
1829 * Initialize new recheck query.
1831 * Note: if we were re-using PlanQual plans via ExecReScan, we'd need
1832 * to instead copy down changeable state from the top plan (including
1833 * es_result_relation_info, es_junkFilter) and reset locally changeable
1834 * state in the epq (including es_param_exec_vals, es_evTupleNull).
1836 EvalPlanQualStart(epq, estate, epq->next);
1839 * free old RTE' tuple, if any, and store target tuple where
1840 * relation's scan node will see it
1842 epqstate = epq->estate;
1843 if (epqstate->es_evTuple[rti - 1] != NULL)
1844 heap_freetuple(epqstate->es_evTuple[rti - 1]);
1845 epqstate->es_evTuple[rti - 1] = copyTuple;
1847 return EvalPlanQualNext(estate);
1850 static TupleTableSlot *
1851 EvalPlanQualNext(EState *estate)
1853 evalPlanQual *epq = estate->es_evalPlanQual;
1854 MemoryContext oldcontext;
1855 TupleTableSlot *slot;
1857 Assert(epq->rti != 0);
1860 oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
1861 slot = ExecProcNode(epq->planstate);
1862 MemoryContextSwitchTo(oldcontext);
1865 * No more tuples for this PQ. Continue previous one.
1867 if (TupIsNull(slot))
1869 evalPlanQual *oldepq;
1871 /* stop execution */
1872 EvalPlanQualStop(epq);
1873 /* pop old PQ from the stack */
1877 /* this is the first (oldest) PQ - mark as free */
1879 estate->es_useEvalPlan = false;
1880 /* and continue Query execution */
1883 Assert(oldepq->rti != 0);
1884 /* push current PQ to freePQ stack */
1887 estate->es_evalPlanQual = epq;
1895 EndEvalPlanQual(EState *estate)
1897 evalPlanQual *epq = estate->es_evalPlanQual;
1899 if (epq->rti == 0) /* plans already shutdowned */
1901 Assert(epq->next == NULL);
1907 evalPlanQual *oldepq;
1909 /* stop execution */
1910 EvalPlanQualStop(epq);
1911 /* pop old PQ from the stack */
1915 /* this is the first (oldest) PQ - mark as free */
1917 estate->es_useEvalPlan = false;
1920 Assert(oldepq->rti != 0);
1921 /* push current PQ to freePQ stack */
1924 estate->es_evalPlanQual = epq;
1929 * Start execution of one level of PlanQual.
1931 * This is a cut-down version of ExecutorStart(): we copy some state from
1932 * the top-level estate rather than initializing it fresh.
1935 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
1939 MemoryContext oldcontext;
1941 rtsize = length(estate->es_range_table);
1943 epq->estate = epqstate = CreateExecutorState();
1945 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
1948 * The epqstates share the top query's copy of unchanging state such
1949 * as the snapshot, rangetable, result-rel info, and external Param info.
1950 * They need their own copies of local state, including a tuple table,
1951 * es_param_exec_vals, etc.
1953 epqstate->es_direction = ForwardScanDirection;
1954 epqstate->es_snapshot = estate->es_snapshot;
1955 epqstate->es_range_table = estate->es_range_table;
1956 epqstate->es_result_relations = estate->es_result_relations;
1957 epqstate->es_num_result_relations = estate->es_num_result_relations;
1958 epqstate->es_result_relation_info = estate->es_result_relation_info;
1959 epqstate->es_junkFilter = estate->es_junkFilter;
1960 epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
1961 epqstate->es_param_list_info = estate->es_param_list_info;
1962 if (estate->es_topPlan->nParamExec > 0)
1963 epqstate->es_param_exec_vals = (ParamExecData *)
1964 palloc0(estate->es_topPlan->nParamExec * sizeof(ParamExecData));
1965 epqstate->es_rowMark = estate->es_rowMark;
1966 epqstate->es_instrument = estate->es_instrument;
1967 epqstate->es_topPlan = estate->es_topPlan;
1969 * Each epqstate must have its own es_evTupleNull state, but
1970 * all the stack entries share es_evTuple state. This allows
1971 * sub-rechecks to inherit the value being examined by an
1974 epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
1975 if (priorepq == NULL)
1976 /* first PQ stack entry */
1977 epqstate->es_evTuple = (HeapTuple *)
1978 palloc0(rtsize * sizeof(HeapTuple));
1980 /* later stack entries share the same storage */
1981 epqstate->es_evTuple = priorepq->estate->es_evTuple;
1983 epqstate->es_tupleTable =
1984 ExecCreateTupleTable(estate->es_tupleTable->size);
1986 epq->planstate = ExecInitNode(estate->es_topPlan, epqstate);
1988 MemoryContextSwitchTo(oldcontext);
1992 * End execution of one level of PlanQual.
1994 * This is a cut-down version of ExecutorEnd(); basically we want to do most
1995 * of the normal cleanup, but *not* close result relations (which we are
1996 * just sharing from the outer query).
1999 EvalPlanQualStop(evalPlanQual *epq)
2001 EState *epqstate = epq->estate;
2002 MemoryContext oldcontext;
2004 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2006 ExecEndNode(epq->planstate);
2008 ExecDropTupleTable(epqstate->es_tupleTable, true);
2009 epqstate->es_tupleTable = NULL;
2011 if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2013 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2014 epqstate->es_evTuple[epq->rti - 1] = NULL;
2017 MemoryContextSwitchTo(oldcontext);
2019 FreeExecutorState(epqstate);
2022 epq->planstate = NULL;