1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
11 * The old ExecutorMain() has been replaced by ExecutorStart(),
12 * ExecutorRun() and ExecutorEnd()
14 * These three procedures are the external interfaces to the executor.
15 * In each case, the query descriptor is required as an argument.
17 * ExecutorStart() must be called at the beginning of execution of any
18 * query plan and ExecutorEnd() should always be called at the end of
19 * execution of a plan.
21 * ExecutorRun accepts direction and count arguments that specify whether
22 * the plan is to be executed forwards, backwards, and for how many tuples.
24 * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
25 * Portions Copyright (c) 1994, Regents of the University of California
29 * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.206 2003/05/05 17:57:47 tgl Exp $
31 *-------------------------------------------------------------------------
35 #include "access/heapam.h"
36 #include "catalog/heap.h"
37 #include "catalog/namespace.h"
38 #include "commands/tablecmds.h"
39 #include "commands/trigger.h"
40 #include "executor/execdebug.h"
41 #include "executor/execdefs.h"
42 #include "miscadmin.h"
43 #include "optimizer/var.h"
44 #include "parser/parsetree.h"
45 #include "utils/acl.h"
46 #include "utils/lsyscache.h"
49 typedef struct execRowMark
56 typedef struct evalPlanQual
61 struct evalPlanQual *next; /* stack of active PlanQual plans */
62 struct evalPlanQual *free; /* list of free PlanQual plans */
65 /* decls for local routines only used within this module */
66 static void InitPlan(QueryDesc *queryDesc);
67 static void initResultRelInfo(ResultRelInfo *resultRelInfo,
68 Index resultRelationIndex,
71 static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
74 ScanDirection direction,
75 DestReceiver *destfunc);
76 static void ExecSelect(TupleTableSlot *slot,
77 DestReceiver *destfunc,
79 static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
81 static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
83 static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
85 static TupleTableSlot *EvalPlanQualNext(EState *estate);
86 static void EndEvalPlanQual(EState *estate);
87 static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
88 static void ExecCheckXactReadOnly(Query *parsetree, CmdType operation);
89 static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
90 evalPlanQual *priorepq);
91 static void EvalPlanQualStop(evalPlanQual *epq);
93 /* end of local decls */
96 /* ----------------------------------------------------------------
99 * This routine must be called at the beginning of any execution of any
102 * Takes a QueryDesc previously created by CreateQueryDesc (it's not real
103 * clear why we bother to separate the two functions, but...). The tupDesc
104 * field of the QueryDesc is filled in to describe the tuples that will be
105 * returned, and the internal fields (estate and planstate) are set up.
107 * NB: the CurrentMemoryContext when this is called will become the parent
108 * of the per-query context used for this Executor invocation.
109 * ----------------------------------------------------------------
112 ExecutorStart(QueryDesc *queryDesc)
115 MemoryContext oldcontext;
117 /* sanity checks: queryDesc must not be started already */
118 Assert(queryDesc != NULL);
119 Assert(queryDesc->estate == NULL);
122 * Build EState, switch into per-query memory context for startup.
124 estate = CreateExecutorState();
125 queryDesc->estate = estate;
127 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
130 * Fill in parameters, if any, from queryDesc
132 estate->es_param_list_info = queryDesc->params;
134 if (queryDesc->plantree->nParamExec > 0)
135 estate->es_param_exec_vals = (ParamExecData *)
136 palloc0(queryDesc->plantree->nParamExec * sizeof(ParamExecData));
138 estate->es_instrument = queryDesc->doInstrument;
141 * Make our own private copy of the current query snapshot data.
143 * This "freezes" our idea of which tuples are good and which are not for
144 * the life of this query, even if it outlives the current command and
147 estate->es_snapshot = CopyQuerySnapshot();
150 * Initialize the plan state tree
154 MemoryContextSwitchTo(oldcontext);
157 /* ----------------------------------------------------------------
160 * This is the main routine of the executor module. It accepts
161 * the query descriptor from the traffic cop and executes the
164 * ExecutorStart must have been called already.
166 * If direction is NoMovementScanDirection then nothing is done
167 * except to start up/shut down the destination. Otherwise,
168 * we retrieve up to 'count' tuples in the specified direction.
170 * Note: count = 0 is interpreted as no portal limit, i.e., run to
173 * ----------------------------------------------------------------
176 ExecutorRun(QueryDesc *queryDesc,
177 ScanDirection direction, long count)
182 DestReceiver *destfunc;
183 TupleTableSlot *result;
184 MemoryContext oldcontext;
187 Assert(queryDesc != NULL);
189 estate = queryDesc->estate;
191 Assert(estate != NULL);
194 * Switch into per-query memory context
196 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
199 * extract information from the query descriptor and the query
202 operation = queryDesc->operation;
203 dest = queryDesc->dest;
206 * If the transaction is read-only, we need to check if any writes
207 * are planned to non-temporary tables. This is done here at this
208 * rather late stage so that we can handle EXPLAIN vs. EXPLAIN
211 ExecCheckXactReadOnly(queryDesc->parsetree, operation);
214 * startup tuple receiver
216 estate->es_processed = 0;
217 estate->es_lastoid = InvalidOid;
219 destfunc = DestToFunction(dest);
220 (*destfunc->setup) (destfunc, operation, queryDesc->portalName,
226 if (direction == NoMovementScanDirection)
229 result = ExecutePlan(estate,
230 queryDesc->planstate,
239 (*destfunc->cleanup) (destfunc);
241 MemoryContextSwitchTo(oldcontext);
246 /* ----------------------------------------------------------------
249 * This routine must be called at the end of execution of any
251 * ----------------------------------------------------------------
254 ExecutorEnd(QueryDesc *queryDesc)
257 MemoryContext oldcontext;
260 Assert(queryDesc != NULL);
262 estate = queryDesc->estate;
264 Assert(estate != NULL);
267 * Switch into per-query memory context to run ExecEndPlan
269 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
271 ExecEndPlan(queryDesc->planstate, estate);
274 * Must switch out of context before destroying it
276 MemoryContextSwitchTo(oldcontext);
279 * Release EState and per-query memory context. This should release
280 * everything the executor has allocated.
282 FreeExecutorState(estate);
284 /* Reset queryDesc fields that no longer point to anything */
285 queryDesc->tupDesc = NULL;
286 queryDesc->estate = NULL;
287 queryDesc->planstate = NULL;
290 /* ----------------------------------------------------------------
293 * This routine may be called on an open queryDesc to rewind it
295 * ----------------------------------------------------------------
298 ExecutorRewind(QueryDesc *queryDesc)
301 MemoryContext oldcontext;
304 Assert(queryDesc != NULL);
306 estate = queryDesc->estate;
308 Assert(estate != NULL);
310 /* It's probably not sensible to rescan updating queries */
311 Assert(queryDesc->operation == CMD_SELECT);
314 * Switch into per-query memory context
316 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
321 ExecReScan(queryDesc->planstate, NULL);
323 MemoryContextSwitchTo(oldcontext);
329 * Check access permissions for all relations listed in a range table.
332 ExecCheckRTPerms(List *rangeTable, CmdType operation)
336 foreach(lp, rangeTable)
338 RangeTblEntry *rte = lfirst(lp);
340 ExecCheckRTEPerms(rte, operation);
346 * Check access permissions for a single RTE.
349 ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
353 AclResult aclcheck_result;
356 * If it's a subquery, recursively examine its rangetable.
358 if (rte->rtekind == RTE_SUBQUERY)
360 ExecCheckRTPerms(rte->subquery->rtable, operation);
365 * Otherwise, only plain-relation RTEs need to be checked here.
366 * Function RTEs are checked by init_fcache when the function is prepared
367 * for execution. Join and special RTEs need no checks.
369 if (rte->rtekind != RTE_RELATION)
375 * userid to check as: current user unless we have a setuid
378 * Note: GetUserId() is presently fast enough that there's no harm in
379 * calling it separately for each RTE. If that stops being true, we
380 * could call it once in ExecCheckRTPerms and pass the userid down
381 * from there. But for now, no need for the extra clutter.
383 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
385 #define CHECK(MODE) pg_class_aclcheck(relOid, userid, MODE)
387 if (rte->checkForRead)
389 aclcheck_result = CHECK(ACL_SELECT);
390 if (aclcheck_result != ACLCHECK_OK)
391 aclcheck_error(aclcheck_result, get_rel_name(relOid));
394 if (rte->checkForWrite)
397 * Note: write access in a SELECT context means SELECT FOR UPDATE.
398 * Right now we don't distinguish that from true update as far as
399 * permissions checks are concerned.
404 aclcheck_result = CHECK(ACL_INSERT);
408 aclcheck_result = CHECK(ACL_UPDATE);
411 aclcheck_result = CHECK(ACL_DELETE);
414 elog(ERROR, "ExecCheckRTEPerms: bogus operation %d",
416 aclcheck_result = ACLCHECK_OK; /* keep compiler quiet */
419 if (aclcheck_result != ACLCHECK_OK)
420 aclcheck_error(aclcheck_result, get_rel_name(relOid));
425 ExecCheckXactReadOnly(Query *parsetree, CmdType operation)
430 /* CREATE TABLE AS or SELECT INTO */
431 if (operation == CMD_SELECT && parsetree->into != NULL)
434 if (operation == CMD_DELETE || operation == CMD_INSERT
435 || operation == CMD_UPDATE)
439 foreach(lp, parsetree->rtable)
441 RangeTblEntry *rte = lfirst(lp);
443 if (rte->rtekind != RTE_RELATION)
446 if (!rte->checkForWrite)
449 if (isTempNamespace(get_rel_namespace(rte->relid)))
459 elog(ERROR, "transaction is read-only");
463 /* ----------------------------------------------------------------
466 * Initializes the query plan: open files, allocate storage
467 * and start up the rule manager
468 * ----------------------------------------------------------------
471 InitPlan(QueryDesc *queryDesc)
473 CmdType operation = queryDesc->operation;
474 Query *parseTree = queryDesc->parsetree;
475 Plan *plan = queryDesc->plantree;
476 EState *estate = queryDesc->estate;
477 PlanState *planstate;
479 Relation intoRelationDesc;
484 * Do permissions checks. It's sufficient to examine the query's
485 * top rangetable here --- subplan RTEs will be checked during
488 ExecCheckRTPerms(parseTree->rtable, operation);
491 * get information from query descriptor
493 rangeTable = parseTree->rtable;
496 * initialize the node's execution state
498 estate->es_range_table = rangeTable;
501 * if there is a result relation, initialize result relation stuff
503 if (parseTree->resultRelation != 0 && operation != CMD_SELECT)
505 List *resultRelations = parseTree->resultRelations;
506 int numResultRelations;
507 ResultRelInfo *resultRelInfos;
509 if (resultRelations != NIL)
512 * Multiple result relations (due to inheritance)
513 * parseTree->resultRelations identifies them all
515 ResultRelInfo *resultRelInfo;
517 numResultRelations = length(resultRelations);
518 resultRelInfos = (ResultRelInfo *)
519 palloc(numResultRelations * sizeof(ResultRelInfo));
520 resultRelInfo = resultRelInfos;
521 while (resultRelations != NIL)
523 initResultRelInfo(resultRelInfo,
524 lfirsti(resultRelations),
528 resultRelations = lnext(resultRelations);
534 * Single result relation identified by
535 * parseTree->resultRelation
537 numResultRelations = 1;
538 resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
539 initResultRelInfo(resultRelInfos,
540 parseTree->resultRelation,
545 estate->es_result_relations = resultRelInfos;
546 estate->es_num_result_relations = numResultRelations;
547 /* Initialize to first or only result rel */
548 estate->es_result_relation_info = resultRelInfos;
553 * if no result relation, then set state appropriately
555 estate->es_result_relations = NULL;
556 estate->es_num_result_relations = 0;
557 estate->es_result_relation_info = NULL;
561 * Detect whether we're doing SELECT INTO. If so, set the force_oids
562 * flag appropriately so that the plan tree will be initialized with
563 * the correct tuple descriptors.
565 do_select_into = false;
567 if (operation == CMD_SELECT && parseTree->into != NULL)
569 do_select_into = true;
571 * For now, always create OIDs in SELECT INTO; this is for backwards
572 * compatibility with pre-7.3 behavior. Eventually we might want
573 * to allow the user to choose.
575 estate->es_force_oids = true;
579 * Have to lock relations selected for update
581 estate->es_rowMark = NIL;
582 if (parseTree->rowMarks != NIL)
586 foreach(l, parseTree->rowMarks)
588 Index rti = lfirsti(l);
589 Oid relid = getrelid(rti, rangeTable);
593 relation = heap_open(relid, RowShareLock);
594 erm = (execRowMark *) palloc(sizeof(execRowMark));
595 erm->relation = relation;
597 snprintf(erm->resname, sizeof(erm->resname), "ctid%u", rti);
598 estate->es_rowMark = lappend(estate->es_rowMark, erm);
603 * initialize the executor "tuple" table. We need slots for all the
604 * plan nodes, plus possibly output slots for the junkfilter(s). At
605 * this point we aren't sure if we need junkfilters, so just add slots
606 * for them unconditionally.
609 int nSlots = ExecCountSlotsNode(plan);
611 if (parseTree->resultRelations != NIL)
612 nSlots += length(parseTree->resultRelations);
615 estate->es_tupleTable = ExecCreateTupleTable(nSlots);
618 /* mark EvalPlanQual not active */
619 estate->es_topPlan = plan;
620 estate->es_evalPlanQual = NULL;
621 estate->es_evTupleNull = NULL;
622 estate->es_evTuple = NULL;
623 estate->es_useEvalPlan = false;
626 * initialize the private state information for all the nodes in the
627 * query tree. This opens files, allocates storage and leaves us
628 * ready to start processing tuples.
630 planstate = ExecInitNode(plan, estate);
633 * Get the tuple descriptor describing the type of tuples to return.
634 * (this is especially important if we are creating a relation with
637 tupType = ExecGetResultType(planstate);
640 * Initialize the junk filter if needed. SELECT and INSERT queries need a
641 * filter if there are any junk attrs in the tlist. INSERT and SELECT
642 * INTO also need a filter if the top plan node is a scan node that's not
643 * doing projection (else we'll be scribbling on the scan tuple!) UPDATE
644 * and DELETE always need a filter, since there's always a junk 'ctid'
645 * attribute present --- no need to look first.
648 bool junk_filter_needed = false;
655 foreach(tlist, plan->targetlist)
657 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
659 if (tle->resdom->resjunk)
661 junk_filter_needed = true;
665 if (!junk_filter_needed &&
666 (operation == CMD_INSERT || do_select_into))
668 if (IsA(planstate, SeqScanState) ||
669 IsA(planstate, IndexScanState) ||
670 IsA(planstate, TidScanState) ||
671 IsA(planstate, SubqueryScanState) ||
672 IsA(planstate, FunctionScanState))
674 if (planstate->ps_ProjInfo == NULL)
675 junk_filter_needed = true;
681 junk_filter_needed = true;
687 if (junk_filter_needed)
690 * If there are multiple result relations, each one needs its
691 * own junk filter. Note this is only possible for
692 * UPDATE/DELETE, so we can't be fooled by some needing a
693 * filter and some not.
695 if (parseTree->resultRelations != NIL)
697 PlanState **appendplans;
699 ResultRelInfo *resultRelInfo;
702 /* Top plan had better be an Append here. */
703 Assert(IsA(plan, Append));
704 Assert(((Append *) plan)->isTarget);
705 Assert(IsA(planstate, AppendState));
706 appendplans = ((AppendState *) planstate)->appendplans;
707 as_nplans = ((AppendState *) planstate)->as_nplans;
708 Assert(as_nplans == estate->es_num_result_relations);
709 resultRelInfo = estate->es_result_relations;
710 for (i = 0; i < as_nplans; i++)
712 PlanState *subplan = appendplans[i];
715 j = ExecInitJunkFilter(subplan->plan->targetlist,
716 ExecGetResultType(subplan),
717 ExecAllocTableSlot(estate->es_tupleTable));
718 resultRelInfo->ri_junkFilter = j;
723 * Set active junkfilter too; at this point ExecInitAppend
724 * has already selected an active result relation...
726 estate->es_junkFilter =
727 estate->es_result_relation_info->ri_junkFilter;
731 /* Normal case with just one JunkFilter */
734 j = ExecInitJunkFilter(planstate->plan->targetlist,
736 ExecAllocTableSlot(estate->es_tupleTable));
737 estate->es_junkFilter = j;
738 if (estate->es_result_relation_info)
739 estate->es_result_relation_info->ri_junkFilter = j;
741 /* For SELECT, want to return the cleaned tuple type */
742 if (operation == CMD_SELECT)
743 tupType = j->jf_cleanTupType;
747 estate->es_junkFilter = NULL;
751 * If doing SELECT INTO, initialize the "into" relation. We must wait
752 * till now so we have the "clean" result tuple type to create the
755 intoRelationDesc = (Relation) NULL;
766 * find namespace to create in, check permissions
768 intoName = parseTree->into->relname;
769 namespaceId = RangeVarGetCreationNamespace(parseTree->into);
771 aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(),
773 if (aclresult != ACLCHECK_OK)
774 aclcheck_error(aclresult, get_namespace_name(namespaceId));
777 * have to copy tupType to get rid of constraints
779 tupdesc = CreateTupleDescCopy(tupType);
781 intoRelationId = heap_create_with_catalog(intoName,
787 allowSystemTableMods);
789 FreeTupleDesc(tupdesc);
792 * Advance command counter so that the newly-created
793 * relation's catalog tuples will be visible to heap_open.
795 CommandCounterIncrement();
798 * If necessary, create a TOAST table for the into
799 * relation. Note that AlterTableCreateToastTable ends
800 * with CommandCounterIncrement(), so that the TOAST table
801 * will be visible for insertion.
803 AlterTableCreateToastTable(intoRelationId, true);
806 * And open the constructed table for writing.
808 intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock);
811 estate->es_into_relation_descriptor = intoRelationDesc;
813 queryDesc->tupDesc = tupType;
814 queryDesc->planstate = planstate;
818 * Initialize ResultRelInfo data for one result relation
821 initResultRelInfo(ResultRelInfo *resultRelInfo,
822 Index resultRelationIndex,
826 Oid resultRelationOid;
827 Relation resultRelationDesc;
829 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
830 resultRelationDesc = heap_open(resultRelationOid, RowExclusiveLock);
832 switch (resultRelationDesc->rd_rel->relkind)
834 case RELKIND_SEQUENCE:
835 elog(ERROR, "You can't change sequence relation %s",
836 RelationGetRelationName(resultRelationDesc));
838 case RELKIND_TOASTVALUE:
839 elog(ERROR, "You can't change toast relation %s",
840 RelationGetRelationName(resultRelationDesc));
843 elog(ERROR, "You can't change view relation %s",
844 RelationGetRelationName(resultRelationDesc));
848 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
849 resultRelInfo->type = T_ResultRelInfo;
850 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
851 resultRelInfo->ri_RelationDesc = resultRelationDesc;
852 resultRelInfo->ri_NumIndices = 0;
853 resultRelInfo->ri_IndexRelationDescs = NULL;
854 resultRelInfo->ri_IndexRelationInfo = NULL;
855 /* make a copy so as not to depend on relcache info not changing... */
856 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
857 resultRelInfo->ri_TrigFunctions = NULL;
858 resultRelInfo->ri_ConstraintExprs = NULL;
859 resultRelInfo->ri_junkFilter = NULL;
862 * If there are indices on the result relation, open them and save
863 * descriptors in the result relation info, so that we can add new
864 * index entries for the tuples we add/update. We need not do this
865 * for a DELETE, however, since deletion doesn't affect indexes.
867 if (resultRelationDesc->rd_rel->relhasindex &&
868 operation != CMD_DELETE)
869 ExecOpenIndices(resultRelInfo);
872 /* ----------------------------------------------------------------
875 * Cleans up the query plan -- closes files and frees up storage
877 * NOTE: we are no longer very worried about freeing storage per se
878 * in this code; FreeExecutorState should be guaranteed to release all
879 * memory that needs to be released. What we are worried about doing
880 * is closing relations and dropping buffer pins. Thus, for example,
881 * tuple tables must be cleared or dropped to ensure pins are released.
882 * ----------------------------------------------------------------
885 ExecEndPlan(PlanState *planstate, EState *estate)
887 ResultRelInfo *resultRelInfo;
892 * shut down any PlanQual processing we were doing
894 if (estate->es_evalPlanQual != NULL)
895 EndEvalPlanQual(estate);
898 * shut down the node-type-specific query processing
900 ExecEndNode(planstate);
903 * destroy the executor "tuple" table.
905 ExecDropTupleTable(estate->es_tupleTable, true);
906 estate->es_tupleTable = NULL;
909 * close the result relation(s) if any, but hold locks until xact
912 resultRelInfo = estate->es_result_relations;
913 for (i = estate->es_num_result_relations; i > 0; i--)
915 /* Close indices and then the relation itself */
916 ExecCloseIndices(resultRelInfo);
917 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
922 * close the "into" relation if necessary, again keeping lock
924 if (estate->es_into_relation_descriptor != NULL)
925 heap_close(estate->es_into_relation_descriptor, NoLock);
928 * close any relations selected FOR UPDATE, again keeping locks
930 foreach(l, estate->es_rowMark)
932 execRowMark *erm = lfirst(l);
934 heap_close(erm->relation, NoLock);
938 /* ----------------------------------------------------------------
941 * processes the query plan to retrieve 'numberTuples' tuples in the
942 * direction specified.
944 * Retrieves all tuples if numberTuples is 0
946 * result is either a slot containing the last tuple in the case
947 * of a SELECT or NULL otherwise.
949 * Note: the ctid attribute is a 'junk' attribute that is removed before the
951 * ----------------------------------------------------------------
953 static TupleTableSlot *
954 ExecutePlan(EState *estate,
955 PlanState *planstate,
958 ScanDirection direction,
959 DestReceiver *destfunc)
961 JunkFilter *junkfilter;
962 TupleTableSlot *slot;
963 ItemPointer tupleid = NULL;
964 ItemPointerData tuple_ctid;
965 long current_tuple_count;
966 TupleTableSlot *result;
969 * initialize local variables
972 current_tuple_count = 0;
978 estate->es_direction = direction;
981 * Process BEFORE EACH STATEMENT triggers
986 ExecBSUpdateTriggers(estate, estate->es_result_relation_info);
989 ExecBSDeleteTriggers(estate, estate->es_result_relation_info);
992 ExecBSInsertTriggers(estate, estate->es_result_relation_info);
1000 * Loop until we've processed the proper number of tuples from the
1006 /* Reset the per-output-tuple exprcontext */
1007 ResetPerTupleExprContext(estate);
1010 * Execute the plan and obtain a tuple
1013 if (estate->es_useEvalPlan)
1015 slot = EvalPlanQualNext(estate);
1016 if (TupIsNull(slot))
1017 slot = ExecProcNode(planstate);
1020 slot = ExecProcNode(planstate);
1023 * if the tuple is null, then we assume there is nothing more to
1024 * process so we just return null...
1026 if (TupIsNull(slot))
1033 * if we have a junk filter, then project a new tuple with the
1036 * Store this new "clean" tuple in the junkfilter's resultSlot.
1037 * (Formerly, we stored it back over the "dirty" tuple, which is
1038 * WRONG because that tuple slot has the wrong descriptor.)
1040 * Also, extract all the junk information we need.
1042 if ((junkfilter = estate->es_junkFilter) != (JunkFilter *) NULL)
1049 * extract the 'ctid' junk attribute.
1051 if (operation == CMD_UPDATE || operation == CMD_DELETE)
1053 if (!ExecGetJunkAttribute(junkfilter,
1058 elog(ERROR, "ExecutePlan: NO (junk) `ctid' was found!");
1060 /* shouldn't ever get a null result... */
1062 elog(ERROR, "ExecutePlan: (junk) `ctid' is NULL!");
1064 tupleid = (ItemPointer) DatumGetPointer(datum);
1065 tuple_ctid = *tupleid; /* make sure we don't free the
1067 tupleid = &tuple_ctid;
1069 else if (estate->es_rowMark != NIL)
1074 foreach(l, estate->es_rowMark)
1076 execRowMark *erm = lfirst(l);
1078 HeapTupleData tuple;
1079 TupleTableSlot *newSlot;
1082 if (!ExecGetJunkAttribute(junkfilter,
1087 elog(ERROR, "ExecutePlan: NO (junk) `%s' was found!",
1090 /* shouldn't ever get a null result... */
1092 elog(ERROR, "ExecutePlan: (junk) `%s' is NULL!",
1095 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
1096 test = heap_mark4update(erm->relation, &tuple, &buffer,
1097 estate->es_snapshot->curcid);
1098 ReleaseBuffer(buffer);
1101 case HeapTupleSelfUpdated:
1102 /* treat it as deleted; do not process */
1105 case HeapTupleMayBeUpdated:
1108 case HeapTupleUpdated:
1109 if (XactIsoLevel == XACT_SERIALIZABLE)
1110 elog(ERROR, "Can't serialize access due to concurrent update");
1111 if (!(ItemPointerEquals(&(tuple.t_self),
1112 (ItemPointer) DatumGetPointer(datum))))
1114 newSlot = EvalPlanQual(estate, erm->rti, &(tuple.t_self));
1115 if (!(TupIsNull(newSlot)))
1118 estate->es_useEvalPlan = true;
1124 * if tuple was deleted or PlanQual failed for
1125 * updated tuple - we must not return this
1131 elog(ERROR, "Unknown status %u from heap_mark4update", test);
1138 * Finally create a new "clean" tuple with all junk attributes
1141 newTuple = ExecRemoveJunk(junkfilter, slot);
1143 slot = ExecStoreTuple(newTuple, /* tuple to store */
1144 junkfilter->jf_resultSlot, /* dest slot */
1145 InvalidBuffer, /* this tuple has no
1147 true); /* tuple should be pfreed */
1151 * now that we have a tuple, do the appropriate thing with it..
1152 * either return it to the user, add it to a relation someplace,
1153 * delete it from a relation, or modify some of its attributes.
1158 ExecSelect(slot, /* slot containing tuple */
1159 destfunc, /* destination's tuple-receiver
1166 ExecInsert(slot, tupleid, estate);
1171 ExecDelete(slot, tupleid, estate);
1176 ExecUpdate(slot, tupleid, estate);
1181 elog(LOG, "ExecutePlan: unknown operation in queryDesc");
1187 * check our tuple count.. if we've processed the proper number
1188 * then quit, else loop again and process more tuples. Zero
1189 * numberTuples means no limit.
1191 current_tuple_count++;
1192 if (numberTuples && numberTuples == current_tuple_count)
1197 * Process AFTER EACH STATEMENT triggers
1202 ExecASUpdateTriggers(estate, estate->es_result_relation_info);
1205 ExecASDeleteTriggers(estate, estate->es_result_relation_info);
1208 ExecASInsertTriggers(estate, estate->es_result_relation_info);
1216 * here, result is either a slot containing a tuple in the case of a
1217 * SELECT or NULL otherwise.
1222 /* ----------------------------------------------------------------
1225 * SELECTs are easy.. we just pass the tuple to the appropriate
1226 * print function. The only complexity is when we do a
1227 * "SELECT INTO", in which case we insert the tuple into
1228 * the appropriate relation (note: this is a newly created relation
1229 * so we don't need to worry about indices or locks.)
1230 * ----------------------------------------------------------------
1233 ExecSelect(TupleTableSlot *slot,
1234 DestReceiver *destfunc,
1241 * get the heap tuple out of the tuple table slot
1244 attrtype = slot->ttc_tupleDescriptor;
1247 * insert the tuple into the "into relation"
1249 if (estate->es_into_relation_descriptor != NULL)
1251 heap_insert(estate->es_into_relation_descriptor, tuple,
1252 estate->es_snapshot->curcid);
1257 * send the tuple to the front end (or the screen)
1259 (*destfunc->receiveTuple) (tuple, attrtype, destfunc);
1261 (estate->es_processed)++;
1264 /* ----------------------------------------------------------------
1267 * INSERTs are trickier.. we have to insert the tuple into
1268 * the base relation and insert appropriate tuples into the
1270 * ----------------------------------------------------------------
1273 ExecInsert(TupleTableSlot *slot,
1274 ItemPointer tupleid,
1278 ResultRelInfo *resultRelInfo;
1279 Relation resultRelationDesc;
1284 * get the heap tuple out of the tuple table slot
1289 * get information on the (current) result relation
1291 resultRelInfo = estate->es_result_relation_info;
1292 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1294 /* BEFORE ROW INSERT Triggers */
1295 if (resultRelInfo->ri_TrigDesc &&
1296 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
1300 newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple);
1302 if (newtuple == NULL) /* "do nothing" */
1305 if (newtuple != tuple) /* modified by Trigger(s) */
1308 * Insert modified tuple into tuple table slot, replacing the
1309 * original. We assume that it was allocated in per-tuple
1310 * memory context, and therefore will go away by itself. The
1311 * tuple table slot should not try to clear it.
1313 ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
1319 * Check the constraints of the tuple
1321 if (resultRelationDesc->rd_att->constr)
1322 ExecConstraints("ExecInsert", resultRelInfo, slot, estate);
1327 newId = heap_insert(resultRelationDesc, tuple,
1328 estate->es_snapshot->curcid);
1331 (estate->es_processed)++;
1332 estate->es_lastoid = newId;
1333 setLastTid(&(tuple->t_self));
1338 * Note: heap_insert adds a new tuple to a relation. As a side effect,
1339 * the tupleid of the new tuple is placed in the new tuple's t_ctid
1342 numIndices = resultRelInfo->ri_NumIndices;
1344 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1346 /* AFTER ROW INSERT Triggers */
1347 ExecARInsertTriggers(estate, resultRelInfo, tuple);
1350 /* ----------------------------------------------------------------
1353 * DELETE is like UPDATE, we delete the tuple and its
1355 * ----------------------------------------------------------------
1358 ExecDelete(TupleTableSlot *slot,
1359 ItemPointer tupleid,
1362 ResultRelInfo *resultRelInfo;
1363 Relation resultRelationDesc;
1364 ItemPointerData ctid;
1368 * get information on the (current) result relation
1370 resultRelInfo = estate->es_result_relation_info;
1371 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1373 /* BEFORE ROW DELETE Triggers */
1374 if (resultRelInfo->ri_TrigDesc &&
1375 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
1379 dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid,
1380 estate->es_snapshot->curcid);
1382 if (!dodelete) /* "do nothing" */
1390 result = heap_delete(resultRelationDesc, tupleid,
1392 estate->es_snapshot->curcid);
1395 case HeapTupleSelfUpdated:
1396 /* already deleted by self; nothing to do */
1399 case HeapTupleMayBeUpdated:
1402 case HeapTupleUpdated:
1403 if (XactIsoLevel == XACT_SERIALIZABLE)
1404 elog(ERROR, "Can't serialize access due to concurrent update");
1405 else if (!(ItemPointerEquals(tupleid, &ctid)))
1407 TupleTableSlot *epqslot = EvalPlanQual(estate,
1408 resultRelInfo->ri_RangeTableIndex, &ctid);
1410 if (!TupIsNull(epqslot))
1416 /* tuple already deleted; nothing to do */
1420 elog(ERROR, "Unknown status %u from heap_delete", result);
1425 (estate->es_processed)++;
1428 * Note: Normally one would think that we have to delete index tuples
1429 * associated with the heap tuple now..
1431 * ... but in POSTGRES, we have no need to do this because the vacuum
1432 * daemon automatically opens an index scan and deletes index tuples
1433 * when it finds deleted heap tuples. -cim 9/27/89
1436 /* AFTER ROW DELETE Triggers */
1437 ExecARDeleteTriggers(estate, resultRelInfo, tupleid);
1440 /* ----------------------------------------------------------------
1443 * note: we can't run UPDATE queries with transactions
1444 * off because UPDATEs are actually INSERTs and our
1445 * scan will mistakenly loop forever, updating the tuple
1446 * it just inserted.. This should be fixed but until it
1447 * is, we don't want to get stuck in an infinite loop
1448 * which corrupts your database..
1449 * ----------------------------------------------------------------
1452 ExecUpdate(TupleTableSlot *slot,
1453 ItemPointer tupleid,
1457 ResultRelInfo *resultRelInfo;
1458 Relation resultRelationDesc;
1459 ItemPointerData ctid;
1464 * abort the operation if not running transactions
1466 if (IsBootstrapProcessingMode())
1468 elog(WARNING, "ExecUpdate: UPDATE can't run without transactions");
1473 * get the heap tuple out of the tuple table slot
1478 * get information on the (current) result relation
1480 resultRelInfo = estate->es_result_relation_info;
1481 resultRelationDesc = resultRelInfo->ri_RelationDesc;
1483 /* BEFORE ROW UPDATE Triggers */
1484 if (resultRelInfo->ri_TrigDesc &&
1485 resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
1489 newtuple = ExecBRUpdateTriggers(estate, resultRelInfo,
1491 estate->es_snapshot->curcid);
1493 if (newtuple == NULL) /* "do nothing" */
1496 if (newtuple != tuple) /* modified by Trigger(s) */
1499 * Insert modified tuple into tuple table slot, replacing the
1500 * original. We assume that it was allocated in per-tuple
1501 * memory context, and therefore will go away by itself. The
1502 * tuple table slot should not try to clear it.
1504 ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
1510 * Check the constraints of the tuple
1512 * If we generate a new candidate tuple after EvalPlanQual testing, we
1513 * must loop back here and recheck constraints. (We don't need to
1514 * redo triggers, however. If there are any BEFORE triggers then
1515 * trigger.c will have done mark4update to lock the correct tuple, so
1516 * there's no need to do them again.)
1519 if (resultRelationDesc->rd_att->constr)
1520 ExecConstraints("ExecUpdate", resultRelInfo, slot, estate);
1523 * replace the heap tuple
1525 result = heap_update(resultRelationDesc, tupleid, tuple,
1527 estate->es_snapshot->curcid);
1530 case HeapTupleSelfUpdated:
1531 /* already deleted by self; nothing to do */
1534 case HeapTupleMayBeUpdated:
1537 case HeapTupleUpdated:
1538 if (XactIsoLevel == XACT_SERIALIZABLE)
1539 elog(ERROR, "Can't serialize access due to concurrent update");
1540 else if (!(ItemPointerEquals(tupleid, &ctid)))
1542 TupleTableSlot *epqslot = EvalPlanQual(estate,
1543 resultRelInfo->ri_RangeTableIndex, &ctid);
1545 if (!TupIsNull(epqslot))
1548 tuple = ExecRemoveJunk(estate->es_junkFilter, epqslot);
1549 slot = ExecStoreTuple(tuple,
1550 estate->es_junkFilter->jf_resultSlot,
1551 InvalidBuffer, true);
1555 /* tuple already deleted; nothing to do */
1559 elog(ERROR, "Unknown status %u from heap_update", result);
1564 (estate->es_processed)++;
1567 * Note: instead of having to update the old index tuples associated
1568 * with the heap tuple, all we do is form and insert new index tuples.
1569 * This is because UPDATEs are actually DELETEs and INSERTs and index
1570 * tuple deletion is done automagically by the vacuum daemon. All we
1571 * do is insert new index tuples. -cim 9/27/89
1577 * heap_update updates a tuple in the base relation by invalidating it
1578 * and then inserting a new tuple to the relation. As a side effect,
1579 * the tupleid of the new tuple is placed in the new tuple's t_ctid
1580 * field. So we now insert index tuples using the new tupleid stored
1584 numIndices = resultRelInfo->ri_NumIndices;
1586 ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
1588 /* AFTER ROW UPDATE Triggers */
1589 ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple);
1593 ExecRelCheck(ResultRelInfo *resultRelInfo,
1594 TupleTableSlot *slot, EState *estate)
1596 Relation rel = resultRelInfo->ri_RelationDesc;
1597 int ncheck = rel->rd_att->constr->num_check;
1598 ConstrCheck *check = rel->rd_att->constr->check;
1599 ExprContext *econtext;
1600 MemoryContext oldContext;
1605 * If first time through for this result relation, build expression
1606 * nodetrees for rel's constraint expressions. Keep them in the
1607 * per-query memory context so they'll survive throughout the query.
1609 if (resultRelInfo->ri_ConstraintExprs == NULL)
1611 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1612 resultRelInfo->ri_ConstraintExprs =
1613 (List **) palloc(ncheck * sizeof(List *));
1614 for (i = 0; i < ncheck; i++)
1616 qual = (List *) stringToNode(check[i].ccbin);
1617 resultRelInfo->ri_ConstraintExprs[i] = (List *)
1618 ExecPrepareExpr((Expr *) qual, estate);
1620 MemoryContextSwitchTo(oldContext);
1624 * We will use the EState's per-tuple context for evaluating
1625 * constraint expressions (creating it if it's not already there).
1627 econtext = GetPerTupleExprContext(estate);
1629 /* Arrange for econtext's scan tuple to be the tuple under test */
1630 econtext->ecxt_scantuple = slot;
1632 /* And evaluate the constraints */
1633 for (i = 0; i < ncheck; i++)
1635 qual = resultRelInfo->ri_ConstraintExprs[i];
1638 * NOTE: SQL92 specifies that a NULL result from a constraint
1639 * expression is not to be treated as a failure. Therefore, tell
1640 * ExecQual to return TRUE for NULL.
1642 if (!ExecQual(qual, econtext, true))
1643 return check[i].ccname;
1646 /* NULL result means no error */
1647 return (char *) NULL;
1651 ExecConstraints(const char *caller, ResultRelInfo *resultRelInfo,
1652 TupleTableSlot *slot, EState *estate)
1654 Relation rel = resultRelInfo->ri_RelationDesc;
1655 HeapTuple tuple = slot->val;
1656 TupleConstr *constr = rel->rd_att->constr;
1660 if (constr->has_not_null)
1662 int natts = rel->rd_att->natts;
1665 for (attrChk = 1; attrChk <= natts; attrChk++)
1667 if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
1668 heap_attisnull(tuple, attrChk))
1669 elog(ERROR, "%s: Fail to add null value in not null attribute %s",
1670 caller, NameStr(rel->rd_att->attrs[attrChk - 1]->attname));
1674 if (constr->num_check > 0)
1678 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1679 elog(ERROR, "%s: rejected due to CHECK constraint \"%s\" on \"%s\"",
1680 caller, failed, RelationGetRelationName(rel));
1685 * Check a modified tuple to see if we want to process its updated version
1686 * under READ COMMITTED rules.
1688 * See backend/executor/README for some info about how this works.
1691 EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
1696 HeapTupleData tuple;
1697 HeapTuple copyTuple = NULL;
1703 * find relation containing target tuple
1705 if (estate->es_result_relation_info != NULL &&
1706 estate->es_result_relation_info->ri_RangeTableIndex == rti)
1707 relation = estate->es_result_relation_info->ri_RelationDesc;
1713 foreach(l, estate->es_rowMark)
1715 if (((execRowMark *) lfirst(l))->rti == rti)
1717 relation = ((execRowMark *) lfirst(l))->relation;
1721 if (relation == NULL)
1722 elog(ERROR, "EvalPlanQual: can't find RTE %d", (int) rti);
1728 * Loop here to deal with updated or busy tuples
1730 tuple.t_self = *tid;
1735 if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, false, NULL))
1737 TransactionId xwait = SnapshotDirty->xmax;
1739 if (TransactionIdIsValid(SnapshotDirty->xmin))
1740 elog(ERROR, "EvalPlanQual: t_xmin is uncommitted ?!");
1743 * If tuple is being updated by other transaction then we have
1744 * to wait for its commit/abort.
1746 if (TransactionIdIsValid(xwait))
1748 ReleaseBuffer(buffer);
1749 XactLockTableWait(xwait);
1754 * We got tuple - now copy it for use by recheck query.
1756 copyTuple = heap_copytuple(&tuple);
1757 ReleaseBuffer(buffer);
1762 * Oops! Invalid tuple. Have to check is it updated or deleted.
1763 * Note that it's possible to get invalid SnapshotDirty->tid if
1764 * tuple updated by this transaction. Have we to check this ?
1766 if (ItemPointerIsValid(&(SnapshotDirty->tid)) &&
1767 !(ItemPointerEquals(&(tuple.t_self), &(SnapshotDirty->tid))))
1769 /* updated, so look at the updated copy */
1770 tuple.t_self = SnapshotDirty->tid;
1775 * Deleted or updated by this transaction; forget it.
1781 * For UPDATE/DELETE we have to return tid of actual row we're
1784 *tid = tuple.t_self;
1787 * Need to run a recheck subquery. Find or create a PQ stack entry.
1789 epq = estate->es_evalPlanQual;
1792 if (epq != NULL && epq->rti == 0)
1794 /* Top PQ stack entry is idle, so re-use it */
1795 Assert(!(estate->es_useEvalPlan) && epq->next == NULL);
1801 * If this is request for another RTE - Ra, - then we have to check
1802 * wasn't PlanQual requested for Ra already and if so then Ra' row was
1803 * updated again and we have to re-start old execution for Ra and
1804 * forget all what we done after Ra was suspended. Cool? -:))
1806 if (epq != NULL && epq->rti != rti &&
1807 epq->estate->es_evTuple[rti - 1] != NULL)
1811 evalPlanQual *oldepq;
1813 /* stop execution */
1814 EvalPlanQualStop(epq);
1815 /* pop previous PlanQual from the stack */
1817 Assert(oldepq && oldepq->rti != 0);
1818 /* push current PQ to freePQ stack */
1821 estate->es_evalPlanQual = epq;
1822 } while (epq->rti != rti);
1826 * If we are requested for another RTE then we have to suspend
1827 * execution of current PlanQual and start execution for new one.
1829 if (epq == NULL || epq->rti != rti)
1831 /* try to reuse plan used previously */
1832 evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL;
1834 if (newepq == NULL) /* first call or freePQ stack is empty */
1836 newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual));
1837 newepq->free = NULL;
1838 newepq->estate = NULL;
1839 newepq->planstate = NULL;
1843 /* recycle previously used PlanQual */
1844 Assert(newepq->estate == NULL);
1847 /* push current PQ to the stack */
1850 estate->es_evalPlanQual = epq;
1855 Assert(epq->rti == rti);
1858 * Ok - we're requested for the same RTE. Unfortunately we still have
1859 * to end and restart execution of the plan, because ExecReScan
1860 * wouldn't ensure that upper plan nodes would reset themselves. We
1861 * could make that work if insertion of the target tuple were
1862 * integrated with the Param mechanism somehow, so that the upper plan
1863 * nodes know that their children's outputs have changed.
1865 * Note that the stack of free evalPlanQual nodes is quite useless at
1866 * the moment, since it only saves us from pallocing/releasing the
1867 * evalPlanQual nodes themselves. But it will be useful once we
1868 * implement ReScan instead of end/restart for re-using PlanQual nodes.
1872 /* stop execution */
1873 EvalPlanQualStop(epq);
1877 * Initialize new recheck query.
1879 * Note: if we were re-using PlanQual plans via ExecReScan, we'd need
1880 * to instead copy down changeable state from the top plan (including
1881 * es_result_relation_info, es_junkFilter) and reset locally changeable
1882 * state in the epq (including es_param_exec_vals, es_evTupleNull).
1884 EvalPlanQualStart(epq, estate, epq->next);
1887 * free old RTE' tuple, if any, and store target tuple where
1888 * relation's scan node will see it
1890 epqstate = epq->estate;
1891 if (epqstate->es_evTuple[rti - 1] != NULL)
1892 heap_freetuple(epqstate->es_evTuple[rti - 1]);
1893 epqstate->es_evTuple[rti - 1] = copyTuple;
1895 return EvalPlanQualNext(estate);
1898 static TupleTableSlot *
1899 EvalPlanQualNext(EState *estate)
1901 evalPlanQual *epq = estate->es_evalPlanQual;
1902 MemoryContext oldcontext;
1903 TupleTableSlot *slot;
1905 Assert(epq->rti != 0);
1908 oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt);
1909 slot = ExecProcNode(epq->planstate);
1910 MemoryContextSwitchTo(oldcontext);
1913 * No more tuples for this PQ. Continue previous one.
1915 if (TupIsNull(slot))
1917 evalPlanQual *oldepq;
1919 /* stop execution */
1920 EvalPlanQualStop(epq);
1921 /* pop old PQ from the stack */
1925 /* this is the first (oldest) PQ - mark as free */
1927 estate->es_useEvalPlan = false;
1928 /* and continue Query execution */
1931 Assert(oldepq->rti != 0);
1932 /* push current PQ to freePQ stack */
1935 estate->es_evalPlanQual = epq;
1943 EndEvalPlanQual(EState *estate)
1945 evalPlanQual *epq = estate->es_evalPlanQual;
1947 if (epq->rti == 0) /* plans already shutdowned */
1949 Assert(epq->next == NULL);
1955 evalPlanQual *oldepq;
1957 /* stop execution */
1958 EvalPlanQualStop(epq);
1959 /* pop old PQ from the stack */
1963 /* this is the first (oldest) PQ - mark as free */
1965 estate->es_useEvalPlan = false;
1968 Assert(oldepq->rti != 0);
1969 /* push current PQ to freePQ stack */
1972 estate->es_evalPlanQual = epq;
1977 * Start execution of one level of PlanQual.
1979 * This is a cut-down version of ExecutorStart(): we copy some state from
1980 * the top-level estate rather than initializing it fresh.
1983 EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
1987 MemoryContext oldcontext;
1989 rtsize = length(estate->es_range_table);
1991 epq->estate = epqstate = CreateExecutorState();
1993 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
1996 * The epqstates share the top query's copy of unchanging state such
1997 * as the snapshot, rangetable, result-rel info, and external Param info.
1998 * They need their own copies of local state, including a tuple table,
1999 * es_param_exec_vals, etc.
2001 epqstate->es_direction = ForwardScanDirection;
2002 epqstate->es_snapshot = estate->es_snapshot;
2003 epqstate->es_range_table = estate->es_range_table;
2004 epqstate->es_result_relations = estate->es_result_relations;
2005 epqstate->es_num_result_relations = estate->es_num_result_relations;
2006 epqstate->es_result_relation_info = estate->es_result_relation_info;
2007 epqstate->es_junkFilter = estate->es_junkFilter;
2008 epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor;
2009 epqstate->es_param_list_info = estate->es_param_list_info;
2010 if (estate->es_topPlan->nParamExec > 0)
2011 epqstate->es_param_exec_vals = (ParamExecData *)
2012 palloc0(estate->es_topPlan->nParamExec * sizeof(ParamExecData));
2013 epqstate->es_rowMark = estate->es_rowMark;
2014 epqstate->es_instrument = estate->es_instrument;
2015 epqstate->es_force_oids = estate->es_force_oids;
2016 epqstate->es_topPlan = estate->es_topPlan;
2018 * Each epqstate must have its own es_evTupleNull state, but
2019 * all the stack entries share es_evTuple state. This allows
2020 * sub-rechecks to inherit the value being examined by an
2023 epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
2024 if (priorepq == NULL)
2025 /* first PQ stack entry */
2026 epqstate->es_evTuple = (HeapTuple *)
2027 palloc0(rtsize * sizeof(HeapTuple));
2029 /* later stack entries share the same storage */
2030 epqstate->es_evTuple = priorepq->estate->es_evTuple;
2032 epqstate->es_tupleTable =
2033 ExecCreateTupleTable(estate->es_tupleTable->size);
2035 epq->planstate = ExecInitNode(estate->es_topPlan, epqstate);
2037 MemoryContextSwitchTo(oldcontext);
2041 * End execution of one level of PlanQual.
2043 * This is a cut-down version of ExecutorEnd(); basically we want to do most
2044 * of the normal cleanup, but *not* close result relations (which we are
2045 * just sharing from the outer query).
2048 EvalPlanQualStop(evalPlanQual *epq)
2050 EState *epqstate = epq->estate;
2051 MemoryContext oldcontext;
2053 oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
2055 ExecEndNode(epq->planstate);
2057 ExecDropTupleTable(epqstate->es_tupleTable, true);
2058 epqstate->es_tupleTable = NULL;
2060 if (epqstate->es_evTuple[epq->rti - 1] != NULL)
2062 heap_freetuple(epqstate->es_evTuple[epq->rti - 1]);
2063 epqstate->es_evTuple[epq->rti - 1] = NULL;
2066 MemoryContextSwitchTo(oldcontext);
2068 FreeExecutorState(epqstate);
2071 epq->planstate = NULL;