1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
12 * These four procedures are the external interface to the executor.
13 * In each case, the query descriptor is required as an argument.
15 * ExecutorStart must be called at the beginning of execution of any
16 * query plan and ExecutorEnd must always be called at the end of
17 * execution of a plan (unless it is aborted due to error).
19 * ExecutorRun accepts direction and count arguments that specify whether
20 * the plan is to be executed forwards, backwards, and for how many tuples.
21 * In some cases ExecutorRun may be called multiple times to process all
22 * the tuples for a plan. It is also acceptable to stop short of executing
23 * the whole plan (but only if it is a SELECT).
25 * ExecutorFinish must be called after the final ExecutorRun call and
26 * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 * which should also omit ExecutorRun.
29 * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
30 * Portions Copyright (c) 1994, Regents of the University of California
34 * src/backend/executor/execMain.c
36 *-------------------------------------------------------------------------
40 #include "access/htup_details.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "commands/matview.h"
46 #include "commands/trigger.h"
47 #include "executor/execdebug.h"
48 #include "foreign/fdwapi.h"
49 #include "mb/pg_wchar.h"
50 #include "miscadmin.h"
51 #include "optimizer/clauses.h"
52 #include "parser/parsetree.h"
53 #include "storage/bufmgr.h"
54 #include "storage/lmgr.h"
55 #include "tcop/utility.h"
56 #include "utils/acl.h"
57 #include "utils/lsyscache.h"
58 #include "utils/memutils.h"
59 #include "utils/snapmgr.h"
60 #include "utils/tqual.h"
63 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
64 ExecutorStart_hook_type ExecutorStart_hook = NULL;
65 ExecutorRun_hook_type ExecutorRun_hook = NULL;
66 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
67 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
69 /* Hook for plugin to get control in ExecCheckRTPerms() */
70 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
72 /* decls for local routines only used within this module */
73 static void InitPlan(QueryDesc *queryDesc, int eflags);
74 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
75 static void ExecPostprocessPlan(EState *estate);
76 static void ExecEndPlan(PlanState *planstate, EState *estate);
77 static void ExecutePlan(EState *estate, PlanState *planstate,
81 ScanDirection direction,
83 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
84 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
85 static char *ExecBuildSlotValueDescription(TupleTableSlot *slot,
88 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
91 /* end of local decls */
94 /* ----------------------------------------------------------------
97 * This routine must be called at the beginning of any execution of any
100 * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
101 * only because some places use QueryDescs for utility commands). The tupDesc
102 * field of the QueryDesc is filled in to describe the tuples that will be
103 * returned, and the internal fields (estate and planstate) are set up.
105 * eflags contains flag bits as described in executor.h.
107 * NB: the CurrentMemoryContext when this is called will become the parent
108 * of the per-query context used for this Executor invocation.
110 * We provide a function hook variable that lets loadable plugins
111 * get control when ExecutorStart is called. Such a plugin would
112 * normally call standard_ExecutorStart().
114 * ----------------------------------------------------------------
117 ExecutorStart(QueryDesc *queryDesc, int eflags)
119 if (ExecutorStart_hook)
120 (*ExecutorStart_hook) (queryDesc, eflags);
122 standard_ExecutorStart(queryDesc, eflags);
126 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
129 MemoryContext oldcontext;
131 /* sanity checks: queryDesc must not be started already */
132 Assert(queryDesc != NULL);
133 Assert(queryDesc->estate == NULL);
136 * If the transaction is read-only, we need to check if any writes are
137 * planned to non-temporary tables. EXPLAIN is considered read-only.
139 if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
140 ExecCheckXactReadOnly(queryDesc->plannedstmt);
143 * Build EState, switch into per-query memory context for startup.
145 estate = CreateExecutorState();
146 queryDesc->estate = estate;
148 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
151 * Fill in external parameters, if any, from queryDesc; and allocate
152 * workspace for internal parameters
154 estate->es_param_list_info = queryDesc->params;
156 if (queryDesc->plannedstmt->nParamExec > 0)
157 estate->es_param_exec_vals = (ParamExecData *)
158 palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
161 * If non-read-only query, set the command ID to mark output tuples with
163 switch (queryDesc->operation)
168 * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
171 if (queryDesc->plannedstmt->rowMarks != NIL ||
172 queryDesc->plannedstmt->hasModifyingCTE)
173 estate->es_output_cid = GetCurrentCommandId(true);
176 * A SELECT without modifying CTEs can't possibly queue triggers,
177 * so force skip-triggers mode. This is just a marginal efficiency
178 * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
179 * all that expensive, but we might as well do it.
181 if (!queryDesc->plannedstmt->hasModifyingCTE)
182 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
188 estate->es_output_cid = GetCurrentCommandId(true);
192 elog(ERROR, "unrecognized operation code: %d",
193 (int) queryDesc->operation);
198 * Copy other important information into the EState
200 estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
201 estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
202 estate->es_top_eflags = eflags;
203 estate->es_instrument = queryDesc->instrument_options;
206 * Initialize the plan state tree
208 InitPlan(queryDesc, eflags);
211 * Set up an AFTER-trigger statement context, unless told not to, or
212 * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
214 if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
215 AfterTriggerBeginQuery();
217 MemoryContextSwitchTo(oldcontext);
220 /* ----------------------------------------------------------------
223 * This is the main routine of the executor module. It accepts
224 * the query descriptor from the traffic cop and executes the
227 * ExecutorStart must have been called already.
229 * If direction is NoMovementScanDirection then nothing is done
230 * except to start up/shut down the destination. Otherwise,
231 * we retrieve up to 'count' tuples in the specified direction.
233 * Note: count = 0 is interpreted as no portal limit, i.e., run to
234 * completion. Also note that the count limit is only applied to
235 * retrieved tuples, not for instance to those inserted/updated/deleted
236 * by a ModifyTable plan node.
238 * There is no return value, but output tuples (if any) are sent to
239 * the destination receiver specified in the QueryDesc; and the number
240 * of tuples processed at the top level can be found in
241 * estate->es_processed.
243 * We provide a function hook variable that lets loadable plugins
244 * get control when ExecutorRun is called. Such a plugin would
245 * normally call standard_ExecutorRun().
247 * ----------------------------------------------------------------
250 ExecutorRun(QueryDesc *queryDesc,
251 ScanDirection direction, long count)
253 if (ExecutorRun_hook)
254 (*ExecutorRun_hook) (queryDesc, direction, count);
256 standard_ExecutorRun(queryDesc, direction, count);
260 standard_ExecutorRun(QueryDesc *queryDesc,
261 ScanDirection direction, long count)
267 MemoryContext oldcontext;
270 Assert(queryDesc != NULL);
272 estate = queryDesc->estate;
274 Assert(estate != NULL);
275 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
278 * Switch into per-query memory context
280 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
282 /* Allow instrumentation of Executor overall runtime */
283 if (queryDesc->totaltime)
284 InstrStartNode(queryDesc->totaltime);
287 * extract information from the query descriptor and the query feature.
289 operation = queryDesc->operation;
290 dest = queryDesc->dest;
293 * startup tuple receiver, if we will be emitting tuples
295 estate->es_processed = 0;
296 estate->es_lastoid = InvalidOid;
298 sendTuples = (operation == CMD_SELECT ||
299 queryDesc->plannedstmt->hasReturning);
302 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
307 if (!ScanDirectionIsNoMovement(direction))
309 queryDesc->planstate,
317 * shutdown tuple receiver, if we started it
320 (*dest->rShutdown) (dest);
322 if (queryDesc->totaltime)
323 InstrStopNode(queryDesc->totaltime, estate->es_processed);
325 MemoryContextSwitchTo(oldcontext);
328 /* ----------------------------------------------------------------
331 * This routine must be called after the last ExecutorRun call.
332 * It performs cleanup such as firing AFTER triggers. It is
333 * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
334 * include these actions in the total runtime.
336 * We provide a function hook variable that lets loadable plugins
337 * get control when ExecutorFinish is called. Such a plugin would
338 * normally call standard_ExecutorFinish().
340 * ----------------------------------------------------------------
343 ExecutorFinish(QueryDesc *queryDesc)
345 if (ExecutorFinish_hook)
346 (*ExecutorFinish_hook) (queryDesc);
348 standard_ExecutorFinish(queryDesc);
352 standard_ExecutorFinish(QueryDesc *queryDesc)
355 MemoryContext oldcontext;
358 Assert(queryDesc != NULL);
360 estate = queryDesc->estate;
362 Assert(estate != NULL);
363 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
365 /* This should be run once and only once per Executor instance */
366 Assert(!estate->es_finished);
368 /* Switch into per-query memory context */
369 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
371 /* Allow instrumentation of Executor overall runtime */
372 if (queryDesc->totaltime)
373 InstrStartNode(queryDesc->totaltime);
375 /* Run ModifyTable nodes to completion */
376 ExecPostprocessPlan(estate);
378 /* Execute queued AFTER triggers, unless told not to */
379 if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
380 AfterTriggerEndQuery(estate);
382 if (queryDesc->totaltime)
383 InstrStopNode(queryDesc->totaltime, 0);
385 MemoryContextSwitchTo(oldcontext);
387 estate->es_finished = true;
390 /* ----------------------------------------------------------------
393 * This routine must be called at the end of execution of any
396 * We provide a function hook variable that lets loadable plugins
397 * get control when ExecutorEnd is called. Such a plugin would
398 * normally call standard_ExecutorEnd().
400 * ----------------------------------------------------------------
403 ExecutorEnd(QueryDesc *queryDesc)
405 if (ExecutorEnd_hook)
406 (*ExecutorEnd_hook) (queryDesc);
408 standard_ExecutorEnd(queryDesc);
412 standard_ExecutorEnd(QueryDesc *queryDesc)
415 MemoryContext oldcontext;
418 Assert(queryDesc != NULL);
420 estate = queryDesc->estate;
422 Assert(estate != NULL);
425 * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
426 * Assert is needed because ExecutorFinish is new as of 9.1, and callers
427 * might forget to call it.
429 Assert(estate->es_finished ||
430 (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
433 * Switch into per-query memory context to run ExecEndPlan
435 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
437 ExecEndPlan(queryDesc->planstate, estate);
439 /* do away with our snapshots */
440 UnregisterSnapshot(estate->es_snapshot);
441 UnregisterSnapshot(estate->es_crosscheck_snapshot);
444 * Must switch out of context before destroying it
446 MemoryContextSwitchTo(oldcontext);
449 * Release EState and per-query memory context. This should release
450 * everything the executor has allocated.
452 FreeExecutorState(estate);
454 /* Reset queryDesc fields that no longer point to anything */
455 queryDesc->tupDesc = NULL;
456 queryDesc->estate = NULL;
457 queryDesc->planstate = NULL;
458 queryDesc->totaltime = NULL;
461 /* ----------------------------------------------------------------
464 * This routine may be called on an open queryDesc to rewind it
466 * ----------------------------------------------------------------
469 ExecutorRewind(QueryDesc *queryDesc)
472 MemoryContext oldcontext;
475 Assert(queryDesc != NULL);
477 estate = queryDesc->estate;
479 Assert(estate != NULL);
481 /* It's probably not sensible to rescan updating queries */
482 Assert(queryDesc->operation == CMD_SELECT);
485 * Switch into per-query memory context
487 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
492 ExecReScan(queryDesc->planstate);
494 MemoryContextSwitchTo(oldcontext);
500 * Check access permissions for all relations listed in a range table.
502 * Returns true if permissions are adequate. Otherwise, throws an appropriate
503 * error if ereport_on_violation is true, or simply returns false otherwise.
505 * Note that this does NOT address row level security policies (aka: RLS). If
506 * rows will be returned to the user as a result of this permission check
507 * passing, then RLS also needs to be consulted (and check_enable_rls()).
509 * See rewrite/rowsecurity.c.
512 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
517 foreach(l, rangeTable)
519 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
521 result = ExecCheckRTEPerms(rte);
524 Assert(rte->rtekind == RTE_RELATION);
525 if (ereport_on_violation)
526 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
527 get_rel_name(rte->relid));
532 if (ExecutorCheckPerms_hook)
533 result = (*ExecutorCheckPerms_hook) (rangeTable,
534 ereport_on_violation);
540 * Check access permissions for a single RTE.
543 ExecCheckRTEPerms(RangeTblEntry *rte)
545 AclMode requiredPerms;
547 AclMode remainingPerms;
554 * Only plain-relation RTEs need to be checked here. Function RTEs are
555 * checked by init_fcache when the function is prepared for execution.
556 * Join, subquery, and special RTEs need no checks.
558 if (rte->rtekind != RTE_RELATION)
562 * No work if requiredPerms is empty.
564 requiredPerms = rte->requiredPerms;
565 if (requiredPerms == 0)
571 * userid to check as: current user unless we have a setuid indication.
573 * Note: GetUserId() is presently fast enough that there's no harm in
574 * calling it separately for each RTE. If that stops being true, we could
575 * call it once in ExecCheckRTPerms and pass the userid down from there.
576 * But for now, no need for the extra clutter.
578 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
581 * We must have *all* the requiredPerms bits, but some of the bits can be
582 * satisfied from column-level rather than relation-level permissions.
583 * First, remove any bits that are satisfied by relation permissions.
585 relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
586 remainingPerms = requiredPerms & ~relPerms;
587 if (remainingPerms != 0)
590 * If we lack any permissions that exist only as relation permissions,
591 * we can fail straight away.
593 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
597 * Check to see if we have the needed privileges at column level.
599 * Note: failures just report a table-level error; it would be nicer
600 * to report a column-level error if we have some but not all of the
603 if (remainingPerms & ACL_SELECT)
606 * When the query doesn't explicitly reference any columns (for
607 * example, SELECT COUNT(*) FROM table), allow the query if we
608 * have SELECT on any column of the rel, as per SQL spec.
610 if (bms_is_empty(rte->selectedCols))
612 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
613 ACLMASK_ANY) != ACLCHECK_OK)
617 tmpset = bms_copy(rte->selectedCols);
618 while ((col = bms_first_member(tmpset)) >= 0)
620 /* remove the column number offset */
621 col += FirstLowInvalidHeapAttributeNumber;
622 if (col == InvalidAttrNumber)
624 /* Whole-row reference, must have priv on all cols */
625 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
626 ACLMASK_ALL) != ACLCHECK_OK)
631 if (pg_attribute_aclcheck(relOid, col, userid,
632 ACL_SELECT) != ACLCHECK_OK)
640 * Basically the same for the mod columns, with either INSERT or
641 * UPDATE privilege as specified by remainingPerms.
643 remainingPerms &= ~ACL_SELECT;
644 if (remainingPerms != 0)
647 * When the query doesn't explicitly change any columns, allow the
648 * query if we have permission on any column of the rel. This is
649 * to handle SELECT FOR UPDATE as well as possible corner cases in
652 if (bms_is_empty(rte->modifiedCols))
654 if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
655 ACLMASK_ANY) != ACLCHECK_OK)
659 tmpset = bms_copy(rte->modifiedCols);
660 while ((col = bms_first_member(tmpset)) >= 0)
662 /* remove the column number offset */
663 col += FirstLowInvalidHeapAttributeNumber;
664 if (col == InvalidAttrNumber)
666 /* whole-row reference can't happen here */
667 elog(ERROR, "whole-row update is not implemented");
671 if (pg_attribute_aclcheck(relOid, col, userid,
672 remainingPerms) != ACLCHECK_OK)
683 * Check that the query does not imply any writes to non-temp tables.
685 * Note: in a Hot Standby slave this would need to reject writes to temp
686 * tables as well; but an HS slave can't have created any temp tables
687 * in the first place, so no need to check that.
690 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
694 /* Fail if write permissions are requested on any non-temp table */
695 foreach(l, plannedstmt->rtable)
697 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
699 if (rte->rtekind != RTE_RELATION)
702 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
705 if (isTempNamespace(get_rel_namespace(rte->relid)))
708 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
713 /* ----------------------------------------------------------------
716 * Initializes the query plan: open files, allocate storage
717 * and start up the rule manager
718 * ----------------------------------------------------------------
721 InitPlan(QueryDesc *queryDesc, int eflags)
723 CmdType operation = queryDesc->operation;
724 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
725 Plan *plan = plannedstmt->planTree;
726 List *rangeTable = plannedstmt->rtable;
727 EState *estate = queryDesc->estate;
728 PlanState *planstate;
734 * Do permissions checks
736 ExecCheckRTPerms(rangeTable, true);
739 * initialize the node's execution state
741 estate->es_range_table = rangeTable;
742 estate->es_plannedstmt = plannedstmt;
745 * initialize result relation stuff, and open/lock the result rels.
747 * We must do this before initializing the plan tree, else we might try to
748 * do a lock upgrade if a result rel is also a source rel.
750 if (plannedstmt->resultRelations)
752 List *resultRelations = plannedstmt->resultRelations;
753 int numResultRelations = list_length(resultRelations);
754 ResultRelInfo *resultRelInfos;
755 ResultRelInfo *resultRelInfo;
757 resultRelInfos = (ResultRelInfo *)
758 palloc(numResultRelations * sizeof(ResultRelInfo));
759 resultRelInfo = resultRelInfos;
760 foreach(l, resultRelations)
762 Index resultRelationIndex = lfirst_int(l);
763 Oid resultRelationOid;
764 Relation resultRelation;
766 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
767 resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
768 InitResultRelInfo(resultRelInfo,
771 estate->es_instrument);
774 estate->es_result_relations = resultRelInfos;
775 estate->es_num_result_relations = numResultRelations;
776 /* es_result_relation_info is NULL except when within ModifyTable */
777 estate->es_result_relation_info = NULL;
782 * if no result relation, then set state appropriately
784 estate->es_result_relations = NULL;
785 estate->es_num_result_relations = 0;
786 estate->es_result_relation_info = NULL;
790 * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
791 * before we initialize the plan tree, else we'd be risking lock upgrades.
792 * While we are at it, build the ExecRowMark list.
794 estate->es_rowMarks = NIL;
795 foreach(l, plannedstmt->rowMarks)
797 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
802 /* ignore "parent" rowmarks; they are irrelevant at runtime */
806 switch (rc->markType)
808 case ROW_MARK_EXCLUSIVE:
809 case ROW_MARK_NOKEYEXCLUSIVE:
811 case ROW_MARK_KEYSHARE:
812 relid = getrelid(rc->rti, rangeTable);
813 relation = heap_open(relid, RowShareLock);
815 case ROW_MARK_REFERENCE:
816 relid = getrelid(rc->rti, rangeTable);
817 relation = heap_open(relid, AccessShareLock);
820 /* there's no real table here ... */
824 elog(ERROR, "unrecognized markType: %d", rc->markType);
825 relation = NULL; /* keep compiler quiet */
829 /* Check that relation is a legal target for marking */
831 CheckValidRowMarkRel(relation, rc->markType);
833 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
834 erm->relation = relation;
836 erm->prti = rc->prti;
837 erm->rowmarkId = rc->rowmarkId;
838 erm->markType = rc->markType;
839 erm->waitPolicy = rc->waitPolicy;
840 ItemPointerSetInvalid(&(erm->curCtid));
841 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
845 * Initialize the executor's tuple table to empty.
847 estate->es_tupleTable = NIL;
848 estate->es_trig_tuple_slot = NULL;
849 estate->es_trig_oldtup_slot = NULL;
850 estate->es_trig_newtup_slot = NULL;
852 /* mark EvalPlanQual not active */
853 estate->es_epqTuple = NULL;
854 estate->es_epqTupleSet = NULL;
855 estate->es_epqScanDone = NULL;
858 * Initialize private state information for each SubPlan. We must do this
859 * before running ExecInitNode on the main query tree, since
860 * ExecInitSubPlan expects to be able to find these entries.
862 Assert(estate->es_subplanstates == NIL);
863 i = 1; /* subplan indices count from 1 */
864 foreach(l, plannedstmt->subplans)
866 Plan *subplan = (Plan *) lfirst(l);
867 PlanState *subplanstate;
871 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
872 * it is a parameterless subplan (not initplan), we suggest that it be
873 * prepared to handle REWIND efficiently; otherwise there is no need.
876 & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
877 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
878 sp_eflags |= EXEC_FLAG_REWIND;
880 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
882 estate->es_subplanstates = lappend(estate->es_subplanstates,
889 * Initialize the private state information for all the nodes in the query
890 * tree. This opens files, allocates storage and leaves us ready to start
893 planstate = ExecInitNode(plan, estate, eflags);
896 * Get the tuple descriptor describing the type of tuples to return.
898 tupType = ExecGetResultType(planstate);
901 * Initialize the junk filter if needed. SELECT queries need a filter if
902 * there are any junk attrs in the top-level tlist.
904 if (operation == CMD_SELECT)
906 bool junk_filter_needed = false;
909 foreach(tlist, plan->targetlist)
911 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
915 junk_filter_needed = true;
920 if (junk_filter_needed)
924 j = ExecInitJunkFilter(planstate->plan->targetlist,
926 ExecInitExtraTupleSlot(estate));
927 estate->es_junkFilter = j;
929 /* Want to return the cleaned tuple type */
930 tupType = j->jf_cleanTupType;
934 queryDesc->tupDesc = tupType;
935 queryDesc->planstate = planstate;
939 * Check that a proposed result relation is a legal target for the operation
941 * Generally the parser and/or planner should have noticed any such mistake
942 * already, but let's make sure.
944 * Note: when changing this function, you probably also need to look at
945 * CheckValidRowMarkRel.
948 CheckValidResultRel(Relation resultRel, CmdType operation)
950 TriggerDesc *trigDesc = resultRel->trigdesc;
951 FdwRoutine *fdwroutine;
953 switch (resultRel->rd_rel->relkind)
955 case RELKIND_RELATION:
958 case RELKIND_SEQUENCE:
960 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
961 errmsg("cannot change sequence \"%s\"",
962 RelationGetRelationName(resultRel))));
964 case RELKIND_TOASTVALUE:
966 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
967 errmsg("cannot change TOAST relation \"%s\"",
968 RelationGetRelationName(resultRel))));
973 * Okay only if there's a suitable INSTEAD OF trigger. Messages
974 * here should match rewriteHandler.c's rewriteTargetView, except
975 * that we omit errdetail because we haven't got the information
976 * handy (and given that we really shouldn't get here anyway, it's
977 * not worth great exertion to get).
982 if (!trigDesc || !trigDesc->trig_insert_instead_row)
984 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
985 errmsg("cannot insert into view \"%s\"",
986 RelationGetRelationName(resultRel)),
987 errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
990 if (!trigDesc || !trigDesc->trig_update_instead_row)
992 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
993 errmsg("cannot update view \"%s\"",
994 RelationGetRelationName(resultRel)),
995 errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
998 if (!trigDesc || !trigDesc->trig_delete_instead_row)
1000 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1001 errmsg("cannot delete from view \"%s\"",
1002 RelationGetRelationName(resultRel)),
1003 errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1006 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1010 case RELKIND_MATVIEW:
1011 if (!MatViewIncrementalMaintenanceIsEnabled())
1013 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1014 errmsg("cannot change materialized view \"%s\"",
1015 RelationGetRelationName(resultRel))));
1017 case RELKIND_FOREIGN_TABLE:
1018 /* Okay only if the FDW supports it */
1019 fdwroutine = GetFdwRoutineForRelation(resultRel, false);
1023 if (fdwroutine->ExecForeignInsert == NULL)
1025 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1026 errmsg("cannot insert into foreign table \"%s\"",
1027 RelationGetRelationName(resultRel))));
1028 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1029 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1031 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1032 errmsg("foreign table \"%s\" does not allow inserts",
1033 RelationGetRelationName(resultRel))));
1036 if (fdwroutine->ExecForeignUpdate == NULL)
1038 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1039 errmsg("cannot update foreign table \"%s\"",
1040 RelationGetRelationName(resultRel))));
1041 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1042 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1044 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1045 errmsg("foreign table \"%s\" does not allow updates",
1046 RelationGetRelationName(resultRel))));
1049 if (fdwroutine->ExecForeignDelete == NULL)
1051 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1052 errmsg("cannot delete from foreign table \"%s\"",
1053 RelationGetRelationName(resultRel))));
1054 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1055 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1057 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1058 errmsg("foreign table \"%s\" does not allow deletes",
1059 RelationGetRelationName(resultRel))));
1062 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1068 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1069 errmsg("cannot change relation \"%s\"",
1070 RelationGetRelationName(resultRel))));
1076 * Check that a proposed rowmark target relation is a legal target
1078 * In most cases parser and/or planner should have noticed this already, but
1079 * they don't cover all cases.
1082 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1084 switch (rel->rd_rel->relkind)
1086 case RELKIND_RELATION:
1089 case RELKIND_SEQUENCE:
1090 /* Must disallow this because we don't vacuum sequences */
1092 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1093 errmsg("cannot lock rows in sequence \"%s\"",
1094 RelationGetRelationName(rel))));
1096 case RELKIND_TOASTVALUE:
1097 /* We could allow this, but there seems no good reason to */
1099 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1100 errmsg("cannot lock rows in TOAST relation \"%s\"",
1101 RelationGetRelationName(rel))));
1104 /* Should not get here; planner should have expanded the view */
1106 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1107 errmsg("cannot lock rows in view \"%s\"",
1108 RelationGetRelationName(rel))));
1110 case RELKIND_MATVIEW:
1111 /* Allow referencing a matview, but not actual locking clauses */
1112 if (markType != ROW_MARK_REFERENCE)
1114 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1115 errmsg("cannot lock rows in materialized view \"%s\"",
1116 RelationGetRelationName(rel))));
1118 case RELKIND_FOREIGN_TABLE:
1119 /* Should not get here; planner should have used ROW_MARK_COPY */
1121 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1122 errmsg("cannot lock rows in foreign table \"%s\"",
1123 RelationGetRelationName(rel))));
1127 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1128 errmsg("cannot lock rows in relation \"%s\"",
1129 RelationGetRelationName(rel))));
1135 * Initialize ResultRelInfo data for one result relation
1137 * Caution: before Postgres 9.1, this function included the relkind checking
1138 * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1139 * appropriate. Be sure callers cover those needs.
1142 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1143 Relation resultRelationDesc,
1144 Index resultRelationIndex,
1145 int instrument_options)
1147 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1148 resultRelInfo->type = T_ResultRelInfo;
1149 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1150 resultRelInfo->ri_RelationDesc = resultRelationDesc;
1151 resultRelInfo->ri_NumIndices = 0;
1152 resultRelInfo->ri_IndexRelationDescs = NULL;
1153 resultRelInfo->ri_IndexRelationInfo = NULL;
1154 /* make a copy so as not to depend on relcache info not changing... */
1155 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1156 if (resultRelInfo->ri_TrigDesc)
1158 int n = resultRelInfo->ri_TrigDesc->numtriggers;
1160 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1161 palloc0(n * sizeof(FmgrInfo));
1162 resultRelInfo->ri_TrigWhenExprs = (List **)
1163 palloc0(n * sizeof(List *));
1164 if (instrument_options)
1165 resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1169 resultRelInfo->ri_TrigFunctions = NULL;
1170 resultRelInfo->ri_TrigWhenExprs = NULL;
1171 resultRelInfo->ri_TrigInstrument = NULL;
1173 if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1174 resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1176 resultRelInfo->ri_FdwRoutine = NULL;
1177 resultRelInfo->ri_FdwState = NULL;
1178 resultRelInfo->ri_ConstraintExprs = NULL;
1179 resultRelInfo->ri_junkFilter = NULL;
1180 resultRelInfo->ri_projectReturning = NULL;
1184 * ExecGetTriggerResultRel
1186 * Get a ResultRelInfo for a trigger target relation. Most of the time,
1187 * triggers are fired on one of the result relations of the query, and so
1188 * we can just return a member of the es_result_relations array. (Note: in
1189 * self-join situations there might be multiple members with the same OID;
1190 * if so it doesn't matter which one we pick.) However, it is sometimes
1191 * necessary to fire triggers on other relations; this happens mainly when an
1192 * RI update trigger queues additional triggers on other relations, which will
1193 * be processed in the context of the outer query. For efficiency's sake,
1194 * we want to have a ResultRelInfo for those triggers too; that can avoid
1195 * repeated re-opening of the relation. (It also provides a way for EXPLAIN
1196 * ANALYZE to report the runtimes of such triggers.) So we make additional
1197 * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1200 ExecGetTriggerResultRel(EState *estate, Oid relid)
1202 ResultRelInfo *rInfo;
1206 MemoryContext oldcontext;
1208 /* First, search through the query result relations */
1209 rInfo = estate->es_result_relations;
1210 nr = estate->es_num_result_relations;
1213 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1218 /* Nope, but maybe we already made an extra ResultRelInfo for it */
1219 foreach(l, estate->es_trig_target_relations)
1221 rInfo = (ResultRelInfo *) lfirst(l);
1222 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1225 /* Nope, so we need a new one */
1228 * Open the target relation's relcache entry. We assume that an
1229 * appropriate lock is still held by the backend from whenever the trigger
1230 * event got queued, so we need take no new lock here. Also, we need not
1231 * recheck the relkind, so no need for CheckValidResultRel.
1233 rel = heap_open(relid, NoLock);
1236 * Make the new entry in the right context.
1238 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1239 rInfo = makeNode(ResultRelInfo);
1240 InitResultRelInfo(rInfo,
1242 0, /* dummy rangetable index */
1243 estate->es_instrument);
1244 estate->es_trig_target_relations =
1245 lappend(estate->es_trig_target_relations, rInfo);
1246 MemoryContextSwitchTo(oldcontext);
1249 * Currently, we don't need any index information in ResultRelInfos used
1250 * only for triggers, so no need to call ExecOpenIndices.
1257 * ExecContextForcesOids
1259 * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
1260 * we need to ensure that result tuples have space for an OID iff they are
1261 * going to be stored into a relation that has OIDs. In other contexts
1262 * we are free to choose whether to leave space for OIDs in result tuples
1263 * (we generally don't want to, but we do if a physical-tlist optimization
1264 * is possible). This routine checks the plan context and returns TRUE if the
1265 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
1266 * *hasoids is set to the required value.
1268 * One reason this is ugly is that all plan nodes in the plan tree will emit
1269 * tuples with space for an OID, though we really only need the topmost node
1270 * to do so. However, node types like Sort don't project new tuples but just
1271 * return their inputs, and in those cases the requirement propagates down
1272 * to the input node. Eventually we might make this code smart enough to
1273 * recognize how far down the requirement really goes, but for now we just
1274 * make all plan nodes do the same thing if the top level forces the choice.
1276 * We assume that if we are generating tuples for INSERT or UPDATE,
1277 * estate->es_result_relation_info is already set up to describe the target
1278 * relation. Note that in an UPDATE that spans an inheritance tree, some of
1279 * the target relations may have OIDs and some not. We have to make the
1280 * decisions on a per-relation basis as we initialize each of the subplans of
1281 * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1282 * while initializing each subplan.
1284 * CREATE TABLE AS is even uglier, because we don't have the target relation's
1285 * descriptor available when this code runs; we have to look aside at the
1286 * flags passed to ExecutorStart().
1289 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1291 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1295 Relation rel = ri->ri_RelationDesc;
1299 *hasoids = rel->rd_rel->relhasoids;
1304 if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
1309 if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
1318 /* ----------------------------------------------------------------
1319 * ExecPostprocessPlan
1321 * Give plan nodes a final chance to execute before shutdown
1322 * ----------------------------------------------------------------
1325 ExecPostprocessPlan(EState *estate)
1330 * Make sure nodes run forward.
1332 estate->es_direction = ForwardScanDirection;
1335 * Run any secondary ModifyTable nodes to completion, in case the main
1336 * query did not fetch all rows from them. (We do this to ensure that
1337 * such nodes have predictable results.)
1339 foreach(lc, estate->es_auxmodifytables)
1341 PlanState *ps = (PlanState *) lfirst(lc);
1345 TupleTableSlot *slot;
1347 /* Reset the per-output-tuple exprcontext each time */
1348 ResetPerTupleExprContext(estate);
1350 slot = ExecProcNode(ps);
1352 if (TupIsNull(slot))
1358 /* ----------------------------------------------------------------
1361 * Cleans up the query plan -- closes files and frees up storage
1363 * NOTE: we are no longer very worried about freeing storage per se
1364 * in this code; FreeExecutorState should be guaranteed to release all
1365 * memory that needs to be released. What we are worried about doing
1366 * is closing relations and dropping buffer pins. Thus, for example,
1367 * tuple tables must be cleared or dropped to ensure pins are released.
1368 * ----------------------------------------------------------------
1371 ExecEndPlan(PlanState *planstate, EState *estate)
1373 ResultRelInfo *resultRelInfo;
1378 * shut down the node-type-specific query processing
1380 ExecEndNode(planstate);
1385 foreach(l, estate->es_subplanstates)
1387 PlanState *subplanstate = (PlanState *) lfirst(l);
1389 ExecEndNode(subplanstate);
1393 * destroy the executor's tuple table. Actually we only care about
1394 * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1395 * the TupleTableSlots, since the containing memory context is about to go
1398 ExecResetTupleTable(estate->es_tupleTable, false);
1401 * close the result relation(s) if any, but hold locks until xact commit.
1403 resultRelInfo = estate->es_result_relations;
1404 for (i = estate->es_num_result_relations; i > 0; i--)
1406 /* Close indices and then the relation itself */
1407 ExecCloseIndices(resultRelInfo);
1408 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1413 * likewise close any trigger target relations
1415 foreach(l, estate->es_trig_target_relations)
1417 resultRelInfo = (ResultRelInfo *) lfirst(l);
1418 /* Close indices and then the relation itself */
1419 ExecCloseIndices(resultRelInfo);
1420 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1424 * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
1427 foreach(l, estate->es_rowMarks)
1429 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1432 heap_close(erm->relation, NoLock);
1436 /* ----------------------------------------------------------------
1439 * Processes the query plan until we have retrieved 'numberTuples' tuples,
1440 * moving in the specified direction.
1442 * Runs to completion if numberTuples is 0
1444 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1446 * ----------------------------------------------------------------
1449 ExecutePlan(EState *estate,
1450 PlanState *planstate,
1454 ScanDirection direction,
1457 TupleTableSlot *slot;
1458 long current_tuple_count;
1461 * initialize local variables
1463 current_tuple_count = 0;
1466 * Set the direction.
1468 estate->es_direction = direction;
1471 * Loop until we've processed the proper number of tuples from the plan.
1475 /* Reset the per-output-tuple exprcontext */
1476 ResetPerTupleExprContext(estate);
1479 * Execute the plan and obtain a tuple
1481 slot = ExecProcNode(planstate);
1484 * if the tuple is null, then we assume there is nothing more to
1485 * process so we just end the loop...
1487 if (TupIsNull(slot))
1491 * If we have a junk filter, then project a new tuple with the junk
1494 * Store this new "clean" tuple in the junkfilter's resultSlot.
1495 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1496 * because that tuple slot has the wrong descriptor.)
1498 if (estate->es_junkFilter != NULL)
1499 slot = ExecFilterJunk(estate->es_junkFilter, slot);
1502 * If we are supposed to send the tuple somewhere, do so. (In
1503 * practice, this is probably always the case at this point.)
1506 (*dest->receiveSlot) (slot, dest);
1509 * Count tuples processed, if this is a SELECT. (For other operation
1510 * types, the ModifyTable plan node must count the appropriate
1513 if (operation == CMD_SELECT)
1514 (estate->es_processed)++;
1517 * check our tuple count.. if we've processed the proper number then
1518 * quit, else loop again and process more tuples. Zero numberTuples
1521 current_tuple_count++;
1522 if (numberTuples && numberTuples == current_tuple_count)
1529 * ExecRelCheck --- check that tuple meets constraints for result relation
1531 * Returns NULL if OK, else name of failed check constraint
1534 ExecRelCheck(ResultRelInfo *resultRelInfo,
1535 TupleTableSlot *slot, EState *estate)
1537 Relation rel = resultRelInfo->ri_RelationDesc;
1538 int ncheck = rel->rd_att->constr->num_check;
1539 ConstrCheck *check = rel->rd_att->constr->check;
1540 ExprContext *econtext;
1541 MemoryContext oldContext;
1546 * If first time through for this result relation, build expression
1547 * nodetrees for rel's constraint expressions. Keep them in the per-query
1548 * memory context so they'll survive throughout the query.
1550 if (resultRelInfo->ri_ConstraintExprs == NULL)
1552 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1553 resultRelInfo->ri_ConstraintExprs =
1554 (List **) palloc(ncheck * sizeof(List *));
1555 for (i = 0; i < ncheck; i++)
1557 /* ExecQual wants implicit-AND form */
1558 qual = make_ands_implicit(stringToNode(check[i].ccbin));
1559 resultRelInfo->ri_ConstraintExprs[i] = (List *)
1560 ExecPrepareExpr((Expr *) qual, estate);
1562 MemoryContextSwitchTo(oldContext);
1566 * We will use the EState's per-tuple context for evaluating constraint
1567 * expressions (creating it if it's not already there).
1569 econtext = GetPerTupleExprContext(estate);
1571 /* Arrange for econtext's scan tuple to be the tuple under test */
1572 econtext->ecxt_scantuple = slot;
1574 /* And evaluate the constraints */
1575 for (i = 0; i < ncheck; i++)
1577 qual = resultRelInfo->ri_ConstraintExprs[i];
1580 * NOTE: SQL specifies that a NULL result from a constraint expression
1581 * is not to be treated as a failure. Therefore, tell ExecQual to
1582 * return TRUE for NULL.
1584 if (!ExecQual(qual, econtext, true))
1585 return check[i].ccname;
1588 /* NULL result means no error */
1593 ExecConstraints(ResultRelInfo *resultRelInfo,
1594 TupleTableSlot *slot, EState *estate)
1596 Relation rel = resultRelInfo->ri_RelationDesc;
1597 TupleDesc tupdesc = RelationGetDescr(rel);
1598 TupleConstr *constr = tupdesc->constr;
1602 if (constr->has_not_null)
1604 int natts = tupdesc->natts;
1607 for (attrChk = 1; attrChk <= natts; attrChk++)
1609 if (tupdesc->attrs[attrChk - 1]->attnotnull &&
1610 slot_attisnull(slot, attrChk))
1612 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1613 errmsg("null value in column \"%s\" violates not-null constraint",
1614 NameStr(tupdesc->attrs[attrChk - 1]->attname)),
1615 errdetail("Failing row contains %s.",
1616 ExecBuildSlotValueDescription(slot,
1619 errtablecol(rel, attrChk)));
1623 if (constr->num_check > 0)
1627 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1629 (errcode(ERRCODE_CHECK_VIOLATION),
1630 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1631 RelationGetRelationName(rel), failed),
1632 errdetail("Failing row contains %s.",
1633 ExecBuildSlotValueDescription(slot,
1636 errtableconstraint(rel, failed)));
1641 * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
1644 ExecWithCheckOptions(ResultRelInfo *resultRelInfo,
1645 TupleTableSlot *slot, EState *estate)
1647 ExprContext *econtext;
1652 * We will use the EState's per-tuple context for evaluating constraint
1653 * expressions (creating it if it's not already there).
1655 econtext = GetPerTupleExprContext(estate);
1657 /* Arrange for econtext's scan tuple to be the tuple under test */
1658 econtext->ecxt_scantuple = slot;
1660 /* Check each of the constraints */
1661 forboth(l1, resultRelInfo->ri_WithCheckOptions,
1662 l2, resultRelInfo->ri_WithCheckOptionExprs)
1664 WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
1665 ExprState *wcoExpr = (ExprState *) lfirst(l2);
1668 * WITH CHECK OPTION checks are intended to ensure that the new tuple
1669 * is visible (in the case of a view) or that it passes the
1670 * 'with-check' policy (in the case of row security).
1671 * If the qual evaluates to NULL or FALSE, then the new tuple won't be
1672 * included in the view or doesn't pass the 'with-check' policy for the
1673 * table. We need ExecQual to return FALSE for NULL to handle the view
1674 * case (the opposite of what we do above for CHECK constraints).
1676 if (!ExecQual((List *) wcoExpr, econtext, false))
1678 (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
1679 errmsg("new row violates WITH CHECK OPTION for \"%s\"",
1681 errdetail("Failing row contains %s.",
1682 ExecBuildSlotValueDescription(slot,
1683 RelationGetDescr(resultRelInfo->ri_RelationDesc),
1689 * ExecBuildSlotValueDescription -- construct a string representing a tuple
1691 * This is intentionally very similar to BuildIndexValueDescription, but
1692 * unlike that function, we truncate long field values (to at most maxfieldlen
1693 * bytes). That seems necessary here since heap field values could be very
1694 * long, whereas index entries typically aren't so wide.
1696 * Also, unlike the case with index entries, we need to be prepared to ignore
1697 * dropped columns. We used to use the slot's tuple descriptor to decode the
1698 * data, but the slot's descriptor doesn't identify dropped columns, so we
1699 * now need to be passed the relation's descriptor.
1702 ExecBuildSlotValueDescription(TupleTableSlot *slot,
1707 bool write_comma = false;
1710 /* Make sure the tuple is fully deconstructed */
1711 slot_getallattrs(slot);
1713 initStringInfo(&buf);
1715 appendStringInfoChar(&buf, '(');
1717 for (i = 0; i < tupdesc->natts; i++)
1722 /* ignore dropped columns */
1723 if (tupdesc->attrs[i]->attisdropped)
1726 if (slot->tts_isnull[i])
1733 getTypeOutputInfo(tupdesc->attrs[i]->atttypid,
1734 &foutoid, &typisvarlena);
1735 val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
1739 appendStringInfoString(&buf, ", ");
1743 /* truncate if needed */
1744 vallen = strlen(val);
1745 if (vallen <= maxfieldlen)
1746 appendStringInfoString(&buf, val);
1749 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1750 appendBinaryStringInfo(&buf, val, vallen);
1751 appendStringInfoString(&buf, "...");
1755 appendStringInfoChar(&buf, ')');
1762 * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
1765 ExecFindRowMark(EState *estate, Index rti)
1769 foreach(lc, estate->es_rowMarks)
1771 ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
1773 if (erm->rti == rti)
1776 elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
1777 return NULL; /* keep compiler quiet */
1781 * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
1783 * Inputs are the underlying ExecRowMark struct and the targetlist of the
1784 * input plan node (not planstate node!). We need the latter to find out
1785 * the column numbers of the resjunk columns.
1788 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
1790 ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
1793 aerm->rowmark = erm;
1795 /* Look up the resjunk columns associated with this rowmark */
1798 Assert(erm->markType != ROW_MARK_COPY);
1800 /* if child rel, need tableoid */
1801 if (erm->rti != erm->prti)
1803 snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
1804 aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
1806 if (!AttributeNumberIsValid(aerm->toidAttNo))
1807 elog(ERROR, "could not find junk %s column", resname);
1810 /* always need ctid for real relations */
1811 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
1812 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
1814 if (!AttributeNumberIsValid(aerm->ctidAttNo))
1815 elog(ERROR, "could not find junk %s column", resname);
1819 Assert(erm->markType == ROW_MARK_COPY);
1821 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
1822 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
1824 if (!AttributeNumberIsValid(aerm->wholeAttNo))
1825 elog(ERROR, "could not find junk %s column", resname);
1833 * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
1834 * process the updated version under READ COMMITTED rules.
1836 * See backend/executor/README for some info about how this works.
1841 * Check a modified tuple to see if we want to process its updated version
1842 * under READ COMMITTED rules.
1844 * estate - outer executor state data
1845 * epqstate - state for EvalPlanQual rechecking
1846 * relation - table containing tuple
1847 * rti - rangetable index of table containing tuple
1848 * lockmode - requested tuple lock mode
1849 * *tid - t_ctid from the outdated tuple (ie, next updated version)
1850 * priorXmax - t_xmax from the outdated tuple
1852 * *tid is also an output parameter: it's modified to hold the TID of the
1853 * latest version of the tuple (note this may be changed even on failure)
1855 * Returns a slot containing the new candidate update/delete tuple, or
1856 * NULL if we determine we shouldn't process the row.
1858 * Note: properly, lockmode should be declared as enum LockTupleMode,
1859 * but we use "int" to avoid having to include heapam.h in executor.h.
1862 EvalPlanQual(EState *estate, EPQState *epqstate,
1863 Relation relation, Index rti, int lockmode,
1864 ItemPointer tid, TransactionId priorXmax)
1866 TupleTableSlot *slot;
1867 HeapTuple copyTuple;
1872 * Get and lock the updated version of the row; if fail, return NULL.
1874 copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
1877 if (copyTuple == NULL)
1881 * For UPDATE/DELETE we have to return tid of actual row we're executing
1884 *tid = copyTuple->t_self;
1887 * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
1889 EvalPlanQualBegin(epqstate, estate);
1892 * Free old test tuple, if any, and store new tuple where relation's scan
1895 EvalPlanQualSetTuple(epqstate, rti, copyTuple);
1898 * Fetch any non-locked source rows
1900 EvalPlanQualFetchRowMarks(epqstate);
1903 * Run the EPQ query. We assume it will return at most one tuple.
1905 slot = EvalPlanQualNext(epqstate);
1908 * If we got a tuple, force the slot to materialize the tuple so that it
1909 * is not dependent on any local state in the EPQ query (in particular,
1910 * it's highly likely that the slot contains references to any pass-by-ref
1911 * datums that may be present in copyTuple). As with the next step, this
1912 * is to guard against early re-use of the EPQ query.
1914 if (!TupIsNull(slot))
1915 (void) ExecMaterializeSlot(slot);
1918 * Clear out the test tuple. This is needed in case the EPQ query is
1919 * re-used to test a tuple for a different relation. (Not clear that can
1920 * really happen, but let's be safe.)
1922 EvalPlanQualSetTuple(epqstate, rti, NULL);
1928 * Fetch a copy of the newest version of an outdated tuple
1930 * estate - executor state data
1931 * relation - table containing tuple
1932 * lockmode - requested tuple lock mode
1933 * wait_policy - requested lock wait policy
1934 * *tid - t_ctid from the outdated tuple (ie, next updated version)
1935 * priorXmax - t_xmax from the outdated tuple
1937 * Returns a palloc'd copy of the newest tuple version, or NULL if we find
1938 * that there is no newest version (ie, the row was deleted not updated).
1939 * We also return NULL if the tuple is locked and the wait policy is to skip
1942 * If successful, we have locked the newest tuple version, so caller does not
1943 * need to worry about it changing anymore.
1945 * Note: properly, lockmode should be declared as enum LockTupleMode,
1946 * but we use "int" to avoid having to include heapam.h in executor.h.
1949 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
1950 LockWaitPolicy wait_policy,
1951 ItemPointer tid, TransactionId priorXmax)
1953 HeapTuple copyTuple = NULL;
1954 HeapTupleData tuple;
1955 SnapshotData SnapshotDirty;
1958 * fetch target tuple
1960 * Loop here to deal with updated or busy tuples
1962 InitDirtySnapshot(SnapshotDirty);
1963 tuple.t_self = *tid;
1968 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
1971 HeapUpdateFailureData hufd;
1974 * If xmin isn't what we're expecting, the slot must have been
1975 * recycled and reused for an unrelated tuple. This implies that
1976 * the latest version of the row was deleted, so we need do
1977 * nothing. (Should be safe to examine xmin without getting
1978 * buffer's content lock, since xmin never changes in an existing
1981 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
1984 ReleaseBuffer(buffer);
1988 /* otherwise xmin should not be dirty... */
1989 if (TransactionIdIsValid(SnapshotDirty.xmin))
1990 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
1993 * If tuple is being updated by other transaction then we have to
1994 * wait for its commit/abort, or die trying.
1996 if (TransactionIdIsValid(SnapshotDirty.xmax))
1998 ReleaseBuffer(buffer);
1999 switch (wait_policy)
2002 XactLockTableWait(SnapshotDirty.xmax,
2003 relation, &tuple.t_data->t_ctid,
2007 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2008 return NULL; /* skip instead of waiting */
2011 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2013 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
2014 errmsg("could not obtain lock on row in relation \"%s\"",
2015 RelationGetRelationName(relation))));
2018 continue; /* loop back to repeat heap_fetch */
2022 * If tuple was inserted by our own transaction, we have to check
2023 * cmin against es_output_cid: cmin >= current CID means our
2024 * command cannot see the tuple, so we should ignore it. Otherwise
2025 * heap_lock_tuple() will throw an error, and so would any later
2026 * attempt to update or delete the tuple. (We need not check cmax
2027 * because HeapTupleSatisfiesDirty will consider a tuple deleted
2028 * by our transaction dead, regardless of cmax.) Wee just checked
2029 * that priorXmax == xmin, so we can test that variable instead of
2030 * doing HeapTupleHeaderGetXmin again.
2032 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2033 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2035 ReleaseBuffer(buffer);
2040 * This is a live tuple, so now try to lock it.
2042 test = heap_lock_tuple(relation, &tuple,
2043 estate->es_output_cid,
2044 lockmode, wait_policy,
2045 false, &buffer, &hufd);
2046 /* We now have two pins on the buffer, get rid of one */
2047 ReleaseBuffer(buffer);
2051 case HeapTupleSelfUpdated:
2054 * The target tuple was already updated or deleted by the
2055 * current command, or by a later command in the current
2056 * transaction. We *must* ignore the tuple in the former
2057 * case, so as to avoid the "Halloween problem" of
2058 * repeated update attempts. In the latter case it might
2059 * be sensible to fetch the updated tuple instead, but
2060 * doing so would require changing heap_lock_tuple as well
2061 * as heap_update and heap_delete to not complain about
2062 * updating "invisible" tuples, which seems pretty scary.
2063 * So for now, treat the tuple as deleted and do not
2066 ReleaseBuffer(buffer);
2069 case HeapTupleMayBeUpdated:
2070 /* successfully locked */
2073 case HeapTupleUpdated:
2074 ReleaseBuffer(buffer);
2075 if (IsolationUsesXactSnapshot())
2077 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2078 errmsg("could not serialize access due to concurrent update")));
2079 if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2081 /* it was updated, so look at the updated version */
2082 tuple.t_self = hufd.ctid;
2083 /* updated row should have xmin matching this xmax */
2084 priorXmax = hufd.xmax;
2087 /* tuple was deleted, so give up */
2090 case HeapTupleWouldBlock:
2091 ReleaseBuffer(buffer);
2095 ReleaseBuffer(buffer);
2096 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
2098 return NULL; /* keep compiler quiet */
2102 * We got tuple - now copy it for use by recheck query.
2104 copyTuple = heap_copytuple(&tuple);
2105 ReleaseBuffer(buffer);
2110 * If the referenced slot was actually empty, the latest version of
2111 * the row must have been deleted, so we need do nothing.
2113 if (tuple.t_data == NULL)
2115 ReleaseBuffer(buffer);
2120 * As above, if xmin isn't what we're expecting, do nothing.
2122 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2125 ReleaseBuffer(buffer);
2130 * If we get here, the tuple was found but failed SnapshotDirty.
2131 * Assuming the xmin is either a committed xact or our own xact (as it
2132 * certainly should be if we're trying to modify the tuple), this must
2133 * mean that the row was updated or deleted by either a committed xact
2134 * or our own xact. If it was deleted, we can ignore it; if it was
2135 * updated then chain up to the next version and repeat the whole
2138 * As above, it should be safe to examine xmax and t_ctid without the
2139 * buffer content lock, because they can't be changing.
2141 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2143 /* deleted, so forget about it */
2144 ReleaseBuffer(buffer);
2148 /* updated, so look at the updated row */
2149 tuple.t_self = tuple.t_data->t_ctid;
2150 /* updated row should have xmin matching this xmax */
2151 priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
2152 ReleaseBuffer(buffer);
2153 /* loop back to fetch next in chain */
2157 * Return the copied tuple
2163 * EvalPlanQualInit -- initialize during creation of a plan state node
2164 * that might need to invoke EPQ processing.
2166 * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2167 * with EvalPlanQualSetPlan.
2170 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2171 Plan *subplan, List *auxrowmarks, int epqParam)
2173 /* Mark the EPQ state inactive */
2174 epqstate->estate = NULL;
2175 epqstate->planstate = NULL;
2176 epqstate->origslot = NULL;
2177 /* ... and remember data that EvalPlanQualBegin will need */
2178 epqstate->plan = subplan;
2179 epqstate->arowMarks = auxrowmarks;
2180 epqstate->epqParam = epqParam;
2184 * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2186 * We need this so that ModifyTuple can deal with multiple subplans.
2189 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2191 /* If we have a live EPQ query, shut it down */
2192 EvalPlanQualEnd(epqstate);
2193 /* And set/change the plan pointer */
2194 epqstate->plan = subplan;
2195 /* The rowmarks depend on the plan, too */
2196 epqstate->arowMarks = auxrowmarks;
2200 * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2202 * NB: passed tuple must be palloc'd; it may get freed later
2205 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
2207 EState *estate = epqstate->estate;
2212 * free old test tuple, if any, and store new tuple where relation's scan
2215 if (estate->es_epqTuple[rti - 1] != NULL)
2216 heap_freetuple(estate->es_epqTuple[rti - 1]);
2217 estate->es_epqTuple[rti - 1] = tuple;
2218 estate->es_epqTupleSet[rti - 1] = true;
2222 * Fetch back the current test tuple (if any) for the specified RTI
2225 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
2227 EState *estate = epqstate->estate;
2231 return estate->es_epqTuple[rti - 1];
2235 * Fetch the current row values for any non-locked relations that need
2236 * to be scanned by an EvalPlanQual operation. origslot must have been set
2237 * to contain the current result row (top-level row) that we need to recheck.
2240 EvalPlanQualFetchRowMarks(EPQState *epqstate)
2244 Assert(epqstate->origslot != NULL);
2246 foreach(l, epqstate->arowMarks)
2248 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2249 ExecRowMark *erm = aerm->rowmark;
2252 HeapTupleData tuple;
2254 if (RowMarkRequiresRowShareLock(erm->markType))
2255 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2257 /* clear any leftover test tuple for this rel */
2258 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2264 Assert(erm->markType == ROW_MARK_REFERENCE);
2266 /* if child rel, must check whether it produced this row */
2267 if (erm->rti != erm->prti)
2271 datum = ExecGetJunkAttribute(epqstate->origslot,
2274 /* non-locked rels could be on the inside of outer joins */
2277 tableoid = DatumGetObjectId(datum);
2279 if (tableoid != RelationGetRelid(erm->relation))
2281 /* this child is inactive right now */
2286 /* fetch the tuple's ctid */
2287 datum = ExecGetJunkAttribute(epqstate->origslot,
2290 /* non-locked rels could be on the inside of outer joins */
2293 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2295 /* okay, fetch the tuple */
2296 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2298 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2300 /* successful, copy and store tuple */
2301 EvalPlanQualSetTuple(epqstate, erm->rti,
2302 heap_copytuple(&tuple));
2303 ReleaseBuffer(buffer);
2309 Assert(erm->markType == ROW_MARK_COPY);
2311 /* fetch the whole-row Var for the relation */
2312 datum = ExecGetJunkAttribute(epqstate->origslot,
2315 /* non-locked rels could be on the inside of outer joins */
2318 td = DatumGetHeapTupleHeader(datum);
2320 /* build a temporary HeapTuple control structure */
2321 tuple.t_len = HeapTupleHeaderGetDatumLength(td);
2322 ItemPointerSetInvalid(&(tuple.t_self));
2323 tuple.t_tableOid = InvalidOid;
2326 /* copy and store tuple */
2327 EvalPlanQualSetTuple(epqstate, erm->rti,
2328 heap_copytuple(&tuple));
2334 * Fetch the next row (if any) from EvalPlanQual testing
2336 * (In practice, there should never be more than one row...)
2339 EvalPlanQualNext(EPQState *epqstate)
2341 MemoryContext oldcontext;
2342 TupleTableSlot *slot;
2344 oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
2345 slot = ExecProcNode(epqstate->planstate);
2346 MemoryContextSwitchTo(oldcontext);
2352 * Initialize or reset an EvalPlanQual state tree
2355 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
2357 EState *estate = epqstate->estate;
2361 /* First time through, so create a child EState */
2362 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
2367 * We already have a suitable child EPQ tree, so just reset it.
2369 int rtsize = list_length(parentestate->es_range_table);
2370 PlanState *planstate = epqstate->planstate;
2372 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
2374 /* Recopy current values of parent parameters */
2375 if (parentestate->es_plannedstmt->nParamExec > 0)
2377 int i = parentestate->es_plannedstmt->nParamExec;
2381 /* copy value if any, but not execPlan link */
2382 estate->es_param_exec_vals[i].value =
2383 parentestate->es_param_exec_vals[i].value;
2384 estate->es_param_exec_vals[i].isnull =
2385 parentestate->es_param_exec_vals[i].isnull;
2390 * Mark child plan tree as needing rescan at all scan nodes. The
2391 * first ExecProcNode will take care of actually doing the rescan.
2393 planstate->chgParam = bms_add_member(planstate->chgParam,
2394 epqstate->epqParam);
2399 * Start execution of an EvalPlanQual plan tree.
2401 * This is a cut-down version of ExecutorStart(): we copy some state from
2402 * the top-level estate rather than initializing it fresh.
2405 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
2409 MemoryContext oldcontext;
2412 rtsize = list_length(parentestate->es_range_table);
2414 epqstate->estate = estate = CreateExecutorState();
2416 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2419 * Child EPQ EStates share the parent's copy of unchanging state such as
2420 * the snapshot, rangetable, result-rel info, and external Param info.
2421 * They need their own copies of local state, including a tuple table,
2422 * es_param_exec_vals, etc.
2424 estate->es_direction = ForwardScanDirection;
2425 estate->es_snapshot = parentestate->es_snapshot;
2426 estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
2427 estate->es_range_table = parentestate->es_range_table;
2428 estate->es_plannedstmt = parentestate->es_plannedstmt;
2429 estate->es_junkFilter = parentestate->es_junkFilter;
2430 estate->es_output_cid = parentestate->es_output_cid;
2431 estate->es_result_relations = parentestate->es_result_relations;
2432 estate->es_num_result_relations = parentestate->es_num_result_relations;
2433 estate->es_result_relation_info = parentestate->es_result_relation_info;
2434 /* es_trig_target_relations must NOT be copied */
2435 estate->es_rowMarks = parentestate->es_rowMarks;
2436 estate->es_top_eflags = parentestate->es_top_eflags;
2437 estate->es_instrument = parentestate->es_instrument;
2438 /* es_auxmodifytables must NOT be copied */
2441 * The external param list is simply shared from parent. The internal
2442 * param workspace has to be local state, but we copy the initial values
2443 * from the parent, so as to have access to any param values that were
2444 * already set from other parts of the parent's plan tree.
2446 estate->es_param_list_info = parentestate->es_param_list_info;
2447 if (parentestate->es_plannedstmt->nParamExec > 0)
2449 int i = parentestate->es_plannedstmt->nParamExec;
2451 estate->es_param_exec_vals = (ParamExecData *)
2452 palloc0(i * sizeof(ParamExecData));
2455 /* copy value if any, but not execPlan link */
2456 estate->es_param_exec_vals[i].value =
2457 parentestate->es_param_exec_vals[i].value;
2458 estate->es_param_exec_vals[i].isnull =
2459 parentestate->es_param_exec_vals[i].isnull;
2464 * Each EState must have its own es_epqScanDone state, but if we have
2465 * nested EPQ checks they should share es_epqTuple arrays. This allows
2466 * sub-rechecks to inherit the values being examined by an outer recheck.
2468 estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
2469 if (parentestate->es_epqTuple != NULL)
2471 estate->es_epqTuple = parentestate->es_epqTuple;
2472 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
2476 estate->es_epqTuple = (HeapTuple *)
2477 palloc0(rtsize * sizeof(HeapTuple));
2478 estate->es_epqTupleSet = (bool *)
2479 palloc0(rtsize * sizeof(bool));
2483 * Each estate also has its own tuple table.
2485 estate->es_tupleTable = NIL;
2488 * Initialize private state information for each SubPlan. We must do this
2489 * before running ExecInitNode on the main query tree, since
2490 * ExecInitSubPlan expects to be able to find these entries. Some of the
2491 * SubPlans might not be used in the part of the plan tree we intend to
2492 * run, but since it's not easy to tell which, we just initialize them
2495 Assert(estate->es_subplanstates == NIL);
2496 foreach(l, parentestate->es_plannedstmt->subplans)
2498 Plan *subplan = (Plan *) lfirst(l);
2499 PlanState *subplanstate;
2501 subplanstate = ExecInitNode(subplan, estate, 0);
2502 estate->es_subplanstates = lappend(estate->es_subplanstates,
2507 * Initialize the private state information for all the nodes in the part
2508 * of the plan tree we need to run. This opens files, allocates storage
2509 * and leaves us ready to start processing tuples.
2511 epqstate->planstate = ExecInitNode(planTree, estate, 0);
2513 MemoryContextSwitchTo(oldcontext);
2517 * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2518 * or if we are done with the current EPQ child.
2520 * This is a cut-down version of ExecutorEnd(); basically we want to do most
2521 * of the normal cleanup, but *not* close result relations (which we are
2522 * just sharing from the outer query). We do, however, have to close any
2523 * trigger target relations that got opened, since those are not shared.
2524 * (There probably shouldn't be any of the latter, but just in case...)
2527 EvalPlanQualEnd(EPQState *epqstate)
2529 EState *estate = epqstate->estate;
2530 MemoryContext oldcontext;
2534 return; /* idle, so nothing to do */
2536 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2538 ExecEndNode(epqstate->planstate);
2540 foreach(l, estate->es_subplanstates)
2542 PlanState *subplanstate = (PlanState *) lfirst(l);
2544 ExecEndNode(subplanstate);
2547 /* throw away the per-estate tuple table */
2548 ExecResetTupleTable(estate->es_tupleTable, false);
2550 /* close any trigger target relations attached to this EState */
2551 foreach(l, estate->es_trig_target_relations)
2553 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2555 /* Close indices and then the relation itself */
2556 ExecCloseIndices(resultRelInfo);
2557 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2560 MemoryContextSwitchTo(oldcontext);
2562 FreeExecutorState(estate);
2564 /* Mark EPQState idle */
2565 epqstate->estate = NULL;
2566 epqstate->planstate = NULL;
2567 epqstate->origslot = NULL;