1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
12 * These four procedures are the external interface to the executor.
13 * In each case, the query descriptor is required as an argument.
15 * ExecutorStart must be called at the beginning of execution of any
16 * query plan and ExecutorEnd must always be called at the end of
17 * execution of a plan (unless it is aborted due to error).
19 * ExecutorRun accepts direction and count arguments that specify whether
20 * the plan is to be executed forwards, backwards, and for how many tuples.
21 * In some cases ExecutorRun may be called multiple times to process all
22 * the tuples for a plan. It is also acceptable to stop short of executing
23 * the whole plan (but only if it is a SELECT).
25 * ExecutorFinish must be called after the final ExecutorRun call and
26 * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 * which should also omit ExecutorRun.
29 * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
30 * Portions Copyright (c) 1994, Regents of the University of California
34 * src/backend/executor/execMain.c
36 *-------------------------------------------------------------------------
40 #include "access/htup_details.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "catalog/partition.h"
46 #include "catalog/pg_publication.h"
47 #include "commands/matview.h"
48 #include "commands/trigger.h"
49 #include "executor/execdebug.h"
50 #include "foreign/fdwapi.h"
52 #include "mb/pg_wchar.h"
53 #include "miscadmin.h"
54 #include "optimizer/clauses.h"
55 #include "parser/parsetree.h"
56 #include "rewrite/rewriteManip.h"
57 #include "storage/bufmgr.h"
58 #include "storage/lmgr.h"
59 #include "tcop/utility.h"
60 #include "utils/acl.h"
61 #include "utils/lsyscache.h"
62 #include "utils/memutils.h"
63 #include "utils/rls.h"
64 #include "utils/ruleutils.h"
65 #include "utils/snapmgr.h"
66 #include "utils/tqual.h"
69 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
70 ExecutorStart_hook_type ExecutorStart_hook = NULL;
71 ExecutorRun_hook_type ExecutorRun_hook = NULL;
72 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
73 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
75 /* Hook for plugin to get control in ExecCheckRTPerms() */
76 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
78 /* decls for local routines only used within this module */
79 static void InitPlan(QueryDesc *queryDesc, int eflags);
80 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
81 static void ExecPostprocessPlan(EState *estate);
82 static void ExecEndPlan(PlanState *planstate, EState *estate);
83 static void ExecutePlan(EState *estate, PlanState *planstate,
84 bool use_parallel_mode,
88 ScanDirection direction,
91 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
92 static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
93 Bitmapset *modifiedCols,
94 AclMode requiredPerms);
95 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
96 static char *ExecBuildSlotValueDescription(Oid reloid,
99 Bitmapset *modifiedCols,
101 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
105 * Note that GetUpdatedColumns() also exists in commands/trigger.c. There does
106 * not appear to be any good header to put it into, given the structures that
107 * it uses, so we let them be duplicated. Be sure to update both if one needs
108 * to be changed, however.
110 #define GetInsertedColumns(relinfo, estate) \
111 (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->insertedCols)
112 #define GetUpdatedColumns(relinfo, estate) \
113 (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
115 /* end of local decls */
118 /* ----------------------------------------------------------------
121 * This routine must be called at the beginning of any execution of any
124 * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
125 * only because some places use QueryDescs for utility commands). The tupDesc
126 * field of the QueryDesc is filled in to describe the tuples that will be
127 * returned, and the internal fields (estate and planstate) are set up.
129 * eflags contains flag bits as described in executor.h.
131 * NB: the CurrentMemoryContext when this is called will become the parent
132 * of the per-query context used for this Executor invocation.
134 * We provide a function hook variable that lets loadable plugins
135 * get control when ExecutorStart is called. Such a plugin would
136 * normally call standard_ExecutorStart().
138 * ----------------------------------------------------------------
141 ExecutorStart(QueryDesc *queryDesc, int eflags)
143 if (ExecutorStart_hook)
144 (*ExecutorStart_hook) (queryDesc, eflags);
146 standard_ExecutorStart(queryDesc, eflags);
150 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
153 MemoryContext oldcontext;
155 /* sanity checks: queryDesc must not be started already */
156 Assert(queryDesc != NULL);
157 Assert(queryDesc->estate == NULL);
160 * If the transaction is read-only, we need to check if any writes are
161 * planned to non-temporary tables. EXPLAIN is considered read-only.
163 * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
164 * would require (a) storing the combocid hash in shared memory, rather
165 * than synchronizing it just once at the start of parallelism, and (b) an
166 * alternative to heap_update()'s reliance on xmax for mutual exclusion.
167 * INSERT may have no such troubles, but we forbid it to simplify the
170 * We have lower-level defenses in CommandCounterIncrement and elsewhere
171 * against performing unsafe operations in parallel mode, but this gives a
172 * more user-friendly error message.
174 if ((XactReadOnly || IsInParallelMode()) &&
175 !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
176 ExecCheckXactReadOnly(queryDesc->plannedstmt);
179 * Build EState, switch into per-query memory context for startup.
181 estate = CreateExecutorState();
182 queryDesc->estate = estate;
184 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
187 * Fill in external parameters, if any, from queryDesc; and allocate
188 * workspace for internal parameters
190 estate->es_param_list_info = queryDesc->params;
192 if (queryDesc->plannedstmt->paramExecTypes != NIL)
196 nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes);
197 estate->es_param_exec_vals = (ParamExecData *)
198 palloc0(nParamExec * sizeof(ParamExecData));
201 estate->es_sourceText = queryDesc->sourceText;
204 * Fill in the query environment, if any, from queryDesc.
206 estate->es_queryEnv = queryDesc->queryEnv;
209 * If non-read-only query, set the command ID to mark output tuples with
211 switch (queryDesc->operation)
216 * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
219 if (queryDesc->plannedstmt->rowMarks != NIL ||
220 queryDesc->plannedstmt->hasModifyingCTE)
221 estate->es_output_cid = GetCurrentCommandId(true);
224 * A SELECT without modifying CTEs can't possibly queue triggers,
225 * so force skip-triggers mode. This is just a marginal efficiency
226 * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
227 * all that expensive, but we might as well do it.
229 if (!queryDesc->plannedstmt->hasModifyingCTE)
230 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
237 estate->es_output_cid = GetCurrentCommandId(true);
241 elog(ERROR, "unrecognized operation code: %d",
242 (int) queryDesc->operation);
247 * Copy other important information into the EState
249 estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
250 estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
251 estate->es_top_eflags = eflags;
252 estate->es_instrument = queryDesc->instrument_options;
253 estate->es_jit_flags = queryDesc->plannedstmt->jitFlags;
256 * Set up an AFTER-trigger statement context, unless told not to, or
257 * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
259 if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
260 AfterTriggerBeginQuery();
263 * Initialize the plan state tree
265 InitPlan(queryDesc, eflags);
267 MemoryContextSwitchTo(oldcontext);
270 /* ----------------------------------------------------------------
273 * This is the main routine of the executor module. It accepts
274 * the query descriptor from the traffic cop and executes the
277 * ExecutorStart must have been called already.
279 * If direction is NoMovementScanDirection then nothing is done
280 * except to start up/shut down the destination. Otherwise,
281 * we retrieve up to 'count' tuples in the specified direction.
283 * Note: count = 0 is interpreted as no portal limit, i.e., run to
284 * completion. Also note that the count limit is only applied to
285 * retrieved tuples, not for instance to those inserted/updated/deleted
286 * by a ModifyTable plan node.
288 * There is no return value, but output tuples (if any) are sent to
289 * the destination receiver specified in the QueryDesc; and the number
290 * of tuples processed at the top level can be found in
291 * estate->es_processed.
293 * We provide a function hook variable that lets loadable plugins
294 * get control when ExecutorRun is called. Such a plugin would
295 * normally call standard_ExecutorRun().
297 * ----------------------------------------------------------------
300 ExecutorRun(QueryDesc *queryDesc,
301 ScanDirection direction, uint64 count,
304 if (ExecutorRun_hook)
305 (*ExecutorRun_hook) (queryDesc, direction, count, execute_once);
307 standard_ExecutorRun(queryDesc, direction, count, execute_once);
311 standard_ExecutorRun(QueryDesc *queryDesc,
312 ScanDirection direction, uint64 count, bool execute_once)
318 MemoryContext oldcontext;
321 Assert(queryDesc != NULL);
323 estate = queryDesc->estate;
325 Assert(estate != NULL);
326 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
329 * Switch into per-query memory context
331 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
333 /* Allow instrumentation of Executor overall runtime */
334 if (queryDesc->totaltime)
335 InstrStartNode(queryDesc->totaltime);
338 * extract information from the query descriptor and the query feature.
340 operation = queryDesc->operation;
341 dest = queryDesc->dest;
344 * startup tuple receiver, if we will be emitting tuples
346 estate->es_processed = 0;
347 estate->es_lastoid = InvalidOid;
349 sendTuples = (operation == CMD_SELECT ||
350 queryDesc->plannedstmt->hasReturning);
353 dest->rStartup(dest, operation, queryDesc->tupDesc);
358 if (!ScanDirectionIsNoMovement(direction))
360 if (execute_once && queryDesc->already_executed)
361 elog(ERROR, "can't re-execute query flagged for single execution");
362 queryDesc->already_executed = true;
365 queryDesc->planstate,
366 queryDesc->plannedstmt->parallelModeNeeded,
376 * shutdown tuple receiver, if we started it
379 dest->rShutdown(dest);
381 if (queryDesc->totaltime)
382 InstrStopNode(queryDesc->totaltime, estate->es_processed);
384 MemoryContextSwitchTo(oldcontext);
387 /* ----------------------------------------------------------------
390 * This routine must be called after the last ExecutorRun call.
391 * It performs cleanup such as firing AFTER triggers. It is
392 * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
393 * include these actions in the total runtime.
395 * We provide a function hook variable that lets loadable plugins
396 * get control when ExecutorFinish is called. Such a plugin would
397 * normally call standard_ExecutorFinish().
399 * ----------------------------------------------------------------
402 ExecutorFinish(QueryDesc *queryDesc)
404 if (ExecutorFinish_hook)
405 (*ExecutorFinish_hook) (queryDesc);
407 standard_ExecutorFinish(queryDesc);
411 standard_ExecutorFinish(QueryDesc *queryDesc)
414 MemoryContext oldcontext;
417 Assert(queryDesc != NULL);
419 estate = queryDesc->estate;
421 Assert(estate != NULL);
422 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
424 /* This should be run once and only once per Executor instance */
425 Assert(!estate->es_finished);
427 /* Switch into per-query memory context */
428 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
430 /* Allow instrumentation of Executor overall runtime */
431 if (queryDesc->totaltime)
432 InstrStartNode(queryDesc->totaltime);
434 /* Run ModifyTable nodes to completion */
435 ExecPostprocessPlan(estate);
437 /* Execute queued AFTER triggers, unless told not to */
438 if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
439 AfterTriggerEndQuery(estate);
441 if (queryDesc->totaltime)
442 InstrStopNode(queryDesc->totaltime, 0);
444 MemoryContextSwitchTo(oldcontext);
446 estate->es_finished = true;
449 /* ----------------------------------------------------------------
452 * This routine must be called at the end of execution of any
455 * We provide a function hook variable that lets loadable plugins
456 * get control when ExecutorEnd is called. Such a plugin would
457 * normally call standard_ExecutorEnd().
459 * ----------------------------------------------------------------
462 ExecutorEnd(QueryDesc *queryDesc)
464 if (ExecutorEnd_hook)
465 (*ExecutorEnd_hook) (queryDesc);
467 standard_ExecutorEnd(queryDesc);
471 standard_ExecutorEnd(QueryDesc *queryDesc)
474 MemoryContext oldcontext;
477 Assert(queryDesc != NULL);
479 estate = queryDesc->estate;
481 Assert(estate != NULL);
484 * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
485 * Assert is needed because ExecutorFinish is new as of 9.1, and callers
486 * might forget to call it.
488 Assert(estate->es_finished ||
489 (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
492 * Switch into per-query memory context to run ExecEndPlan
494 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
496 ExecEndPlan(queryDesc->planstate, estate);
498 /* do away with our snapshots */
499 UnregisterSnapshot(estate->es_snapshot);
500 UnregisterSnapshot(estate->es_crosscheck_snapshot);
502 /* release JIT context, if allocated */
504 jit_release_context(estate->es_jit);
507 * Must switch out of context before destroying it
509 MemoryContextSwitchTo(oldcontext);
512 * Release EState and per-query memory context. This should release
513 * everything the executor has allocated.
515 FreeExecutorState(estate);
517 /* Reset queryDesc fields that no longer point to anything */
518 queryDesc->tupDesc = NULL;
519 queryDesc->estate = NULL;
520 queryDesc->planstate = NULL;
521 queryDesc->totaltime = NULL;
524 /* ----------------------------------------------------------------
527 * This routine may be called on an open queryDesc to rewind it
529 * ----------------------------------------------------------------
532 ExecutorRewind(QueryDesc *queryDesc)
535 MemoryContext oldcontext;
538 Assert(queryDesc != NULL);
540 estate = queryDesc->estate;
542 Assert(estate != NULL);
544 /* It's probably not sensible to rescan updating queries */
545 Assert(queryDesc->operation == CMD_SELECT);
548 * Switch into per-query memory context
550 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
555 ExecReScan(queryDesc->planstate);
557 MemoryContextSwitchTo(oldcontext);
563 * Check access permissions for all relations listed in a range table.
565 * Returns true if permissions are adequate. Otherwise, throws an appropriate
566 * error if ereport_on_violation is true, or simply returns false otherwise.
568 * Note that this does NOT address row level security policies (aka: RLS). If
569 * rows will be returned to the user as a result of this permission check
570 * passing, then RLS also needs to be consulted (and check_enable_rls()).
572 * See rewrite/rowsecurity.c.
575 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
580 foreach(l, rangeTable)
582 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
584 result = ExecCheckRTEPerms(rte);
587 Assert(rte->rtekind == RTE_RELATION);
588 if (ereport_on_violation)
589 aclcheck_error(ACLCHECK_NO_PRIV, get_relkind_objtype(get_rel_relkind(rte->relid)),
590 get_rel_name(rte->relid));
595 if (ExecutorCheckPerms_hook)
596 result = (*ExecutorCheckPerms_hook) (rangeTable,
597 ereport_on_violation);
603 * Check access permissions for a single RTE.
606 ExecCheckRTEPerms(RangeTblEntry *rte)
608 AclMode requiredPerms;
610 AclMode remainingPerms;
615 * Only plain-relation RTEs need to be checked here. Function RTEs are
616 * checked when the function is prepared for execution. Join, subquery,
617 * and special RTEs need no checks.
619 if (rte->rtekind != RTE_RELATION)
623 * No work if requiredPerms is empty.
625 requiredPerms = rte->requiredPerms;
626 if (requiredPerms == 0)
632 * userid to check as: current user unless we have a setuid indication.
634 * Note: GetUserId() is presently fast enough that there's no harm in
635 * calling it separately for each RTE. If that stops being true, we could
636 * call it once in ExecCheckRTPerms and pass the userid down from there.
637 * But for now, no need for the extra clutter.
639 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
642 * We must have *all* the requiredPerms bits, but some of the bits can be
643 * satisfied from column-level rather than relation-level permissions.
644 * First, remove any bits that are satisfied by relation permissions.
646 relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
647 remainingPerms = requiredPerms & ~relPerms;
648 if (remainingPerms != 0)
653 * If we lack any permissions that exist only as relation permissions,
654 * we can fail straight away.
656 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
660 * Check to see if we have the needed privileges at column level.
662 * Note: failures just report a table-level error; it would be nicer
663 * to report a column-level error if we have some but not all of the
666 if (remainingPerms & ACL_SELECT)
669 * When the query doesn't explicitly reference any columns (for
670 * example, SELECT COUNT(*) FROM table), allow the query if we
671 * have SELECT on any column of the rel, as per SQL spec.
673 if (bms_is_empty(rte->selectedCols))
675 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
676 ACLMASK_ANY) != ACLCHECK_OK)
680 while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
682 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
683 AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
685 if (attno == InvalidAttrNumber)
687 /* Whole-row reference, must have priv on all cols */
688 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
689 ACLMASK_ALL) != ACLCHECK_OK)
694 if (pg_attribute_aclcheck(relOid, attno, userid,
695 ACL_SELECT) != ACLCHECK_OK)
702 * Basically the same for the mod columns, for both INSERT and UPDATE
703 * privilege as specified by remainingPerms.
705 if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
711 if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
721 * ExecCheckRTEPermsModified
722 * Check INSERT or UPDATE access permissions for a single RTE (these
723 * are processed uniformly).
726 ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
727 AclMode requiredPerms)
732 * When the query doesn't explicitly update any columns, allow the query
733 * if we have permission on any column of the rel. This is to handle
734 * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
736 if (bms_is_empty(modifiedCols))
738 if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
739 ACLMASK_ANY) != ACLCHECK_OK)
743 while ((col = bms_next_member(modifiedCols, col)) >= 0)
745 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
746 AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
748 if (attno == InvalidAttrNumber)
750 /* whole-row reference can't happen here */
751 elog(ERROR, "whole-row update is not implemented");
755 if (pg_attribute_aclcheck(relOid, attno, userid,
756 requiredPerms) != ACLCHECK_OK)
764 * Check that the query does not imply any writes to non-temp tables;
765 * unless we're in parallel mode, in which case don't even allow writes
768 * Note: in a Hot Standby this would need to reject writes to temp
769 * tables just as we do in parallel mode; but an HS standby can't have created
770 * any temp tables in the first place, so no need to check that.
773 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
778 * Fail if write permissions are requested in parallel mode for table
779 * (temp or non-temp), otherwise fail for any non-temp table.
781 foreach(l, plannedstmt->rtable)
783 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
785 if (rte->rtekind != RTE_RELATION)
788 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
791 if (isTempNamespace(get_rel_namespace(rte->relid)))
794 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
797 if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
798 PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt));
802 /* ----------------------------------------------------------------
805 * Initializes the query plan: open files, allocate storage
806 * and start up the rule manager
807 * ----------------------------------------------------------------
810 InitPlan(QueryDesc *queryDesc, int eflags)
812 CmdType operation = queryDesc->operation;
813 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
814 Plan *plan = plannedstmt->planTree;
815 List *rangeTable = plannedstmt->rtable;
816 EState *estate = queryDesc->estate;
817 PlanState *planstate;
823 * Do permissions checks
825 ExecCheckRTPerms(rangeTable, true);
828 * initialize the node's execution state
830 estate->es_range_table = rangeTable;
831 estate->es_plannedstmt = plannedstmt;
834 * initialize result relation stuff, and open/lock the result rels.
836 * We must do this before initializing the plan tree, else we might try to
837 * do a lock upgrade if a result rel is also a source rel.
839 if (plannedstmt->resultRelations)
841 List *resultRelations = plannedstmt->resultRelations;
842 int numResultRelations = list_length(resultRelations);
843 ResultRelInfo *resultRelInfos;
844 ResultRelInfo *resultRelInfo;
846 resultRelInfos = (ResultRelInfo *)
847 palloc(numResultRelations * sizeof(ResultRelInfo));
848 resultRelInfo = resultRelInfos;
849 foreach(l, resultRelations)
851 Index resultRelationIndex = lfirst_int(l);
852 Oid resultRelationOid;
853 Relation resultRelation;
855 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
856 resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
858 InitResultRelInfo(resultRelInfo,
862 estate->es_instrument);
865 estate->es_result_relations = resultRelInfos;
866 estate->es_num_result_relations = numResultRelations;
867 /* es_result_relation_info is NULL except when within ModifyTable */
868 estate->es_result_relation_info = NULL;
871 * In the partitioned result relation case, lock the non-leaf result
872 * relations too. A subset of these are the roots of respective
873 * partitioned tables, for which we also allocate ResulRelInfos.
875 estate->es_root_result_relations = NULL;
876 estate->es_num_root_result_relations = 0;
877 if (plannedstmt->nonleafResultRelations)
879 int num_roots = list_length(plannedstmt->rootResultRelations);
882 * Firstly, build ResultRelInfos for all the partitioned table
883 * roots, because we will need them to fire the statement-level
886 resultRelInfos = (ResultRelInfo *)
887 palloc(num_roots * sizeof(ResultRelInfo));
888 resultRelInfo = resultRelInfos;
889 foreach(l, plannedstmt->rootResultRelations)
891 Index resultRelIndex = lfirst_int(l);
893 Relation resultRelDesc;
895 resultRelOid = getrelid(resultRelIndex, rangeTable);
896 resultRelDesc = heap_open(resultRelOid, RowExclusiveLock);
897 InitResultRelInfo(resultRelInfo,
901 estate->es_instrument);
905 estate->es_root_result_relations = resultRelInfos;
906 estate->es_num_root_result_relations = num_roots;
908 /* Simply lock the rest of them. */
909 foreach(l, plannedstmt->nonleafResultRelations)
911 Index resultRelIndex = lfirst_int(l);
913 /* We locked the roots above. */
914 if (!list_member_int(plannedstmt->rootResultRelations,
916 LockRelationOid(getrelid(resultRelIndex, rangeTable),
924 * if no result relation, then set state appropriately
926 estate->es_result_relations = NULL;
927 estate->es_num_result_relations = 0;
928 estate->es_result_relation_info = NULL;
929 estate->es_root_result_relations = NULL;
930 estate->es_num_root_result_relations = 0;
934 * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
935 * before we initialize the plan tree, else we'd be risking lock upgrades.
936 * While we are at it, build the ExecRowMark list. Any partitioned child
937 * tables are ignored here (because isParent=true) and will be locked by
938 * the first Append or MergeAppend node that references them. (Note that
939 * the RowMarks corresponding to partitioned child tables are present in
940 * the same list as the rest, i.e., plannedstmt->rowMarks.)
942 estate->es_rowMarks = NIL;
943 foreach(l, plannedstmt->rowMarks)
945 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
950 /* ignore "parent" rowmarks; they are irrelevant at runtime */
954 /* get relation's OID (will produce InvalidOid if subquery) */
955 relid = getrelid(rc->rti, rangeTable);
958 * If you change the conditions under which rel locks are acquired
959 * here, be sure to adjust ExecOpenScanRelation to match.
961 switch (rc->markType)
963 case ROW_MARK_EXCLUSIVE:
964 case ROW_MARK_NOKEYEXCLUSIVE:
966 case ROW_MARK_KEYSHARE:
967 relation = heap_open(relid, RowShareLock);
969 case ROW_MARK_REFERENCE:
970 relation = heap_open(relid, AccessShareLock);
973 /* no physical table access is required */
977 elog(ERROR, "unrecognized markType: %d", rc->markType);
978 relation = NULL; /* keep compiler quiet */
982 /* Check that relation is a legal target for marking */
984 CheckValidRowMarkRel(relation, rc->markType);
986 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
987 erm->relation = relation;
990 erm->prti = rc->prti;
991 erm->rowmarkId = rc->rowmarkId;
992 erm->markType = rc->markType;
993 erm->strength = rc->strength;
994 erm->waitPolicy = rc->waitPolicy;
995 erm->ermActive = false;
996 ItemPointerSetInvalid(&(erm->curCtid));
997 erm->ermExtra = NULL;
998 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
1002 * Initialize the executor's tuple table to empty.
1004 estate->es_tupleTable = NIL;
1005 estate->es_trig_tuple_slot = NULL;
1006 estate->es_trig_oldtup_slot = NULL;
1007 estate->es_trig_newtup_slot = NULL;
1009 /* mark EvalPlanQual not active */
1010 estate->es_epqTuple = NULL;
1011 estate->es_epqTupleSet = NULL;
1012 estate->es_epqScanDone = NULL;
1015 * Initialize private state information for each SubPlan. We must do this
1016 * before running ExecInitNode on the main query tree, since
1017 * ExecInitSubPlan expects to be able to find these entries.
1019 Assert(estate->es_subplanstates == NIL);
1020 i = 1; /* subplan indices count from 1 */
1021 foreach(l, plannedstmt->subplans)
1023 Plan *subplan = (Plan *) lfirst(l);
1024 PlanState *subplanstate;
1028 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
1029 * it is a parameterless subplan (not initplan), we suggest that it be
1030 * prepared to handle REWIND efficiently; otherwise there is no need.
1033 & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
1034 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
1035 sp_eflags |= EXEC_FLAG_REWIND;
1037 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
1039 estate->es_subplanstates = lappend(estate->es_subplanstates,
1046 * Initialize the private state information for all the nodes in the query
1047 * tree. This opens files, allocates storage and leaves us ready to start
1048 * processing tuples.
1050 planstate = ExecInitNode(plan, estate, eflags);
1053 * Get the tuple descriptor describing the type of tuples to return.
1055 tupType = ExecGetResultType(planstate);
1058 * Initialize the junk filter if needed. SELECT queries need a filter if
1059 * there are any junk attrs in the top-level tlist.
1061 if (operation == CMD_SELECT)
1063 bool junk_filter_needed = false;
1066 foreach(tlist, plan->targetlist)
1068 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
1072 junk_filter_needed = true;
1077 if (junk_filter_needed)
1081 j = ExecInitJunkFilter(planstate->plan->targetlist,
1083 ExecInitExtraTupleSlot(estate, NULL));
1084 estate->es_junkFilter = j;
1086 /* Want to return the cleaned tuple type */
1087 tupType = j->jf_cleanTupType;
1091 queryDesc->tupDesc = tupType;
1092 queryDesc->planstate = planstate;
1096 * Check that a proposed result relation is a legal target for the operation
1098 * Generally the parser and/or planner should have noticed any such mistake
1099 * already, but let's make sure.
1101 * Note: when changing this function, you probably also need to look at
1102 * CheckValidRowMarkRel.
1105 CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
1107 Relation resultRel = resultRelInfo->ri_RelationDesc;
1108 TriggerDesc *trigDesc = resultRel->trigdesc;
1109 FdwRoutine *fdwroutine;
1111 switch (resultRel->rd_rel->relkind)
1113 case RELKIND_RELATION:
1114 case RELKIND_PARTITIONED_TABLE:
1115 CheckCmdReplicaIdentity(resultRel, operation);
1117 case RELKIND_SEQUENCE:
1119 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1120 errmsg("cannot change sequence \"%s\"",
1121 RelationGetRelationName(resultRel))));
1123 case RELKIND_TOASTVALUE:
1125 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1126 errmsg("cannot change TOAST relation \"%s\"",
1127 RelationGetRelationName(resultRel))));
1132 * Okay only if there's a suitable INSTEAD OF trigger. Messages
1133 * here should match rewriteHandler.c's rewriteTargetView, except
1134 * that we omit errdetail because we haven't got the information
1135 * handy (and given that we really shouldn't get here anyway, it's
1136 * not worth great exertion to get).
1141 if (!trigDesc || !trigDesc->trig_insert_instead_row)
1143 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1144 errmsg("cannot insert into view \"%s\"",
1145 RelationGetRelationName(resultRel)),
1146 errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1149 if (!trigDesc || !trigDesc->trig_update_instead_row)
1151 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1152 errmsg("cannot update view \"%s\"",
1153 RelationGetRelationName(resultRel)),
1154 errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1157 if (!trigDesc || !trigDesc->trig_delete_instead_row)
1159 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1160 errmsg("cannot delete from view \"%s\"",
1161 RelationGetRelationName(resultRel)),
1162 errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1165 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1169 case RELKIND_MATVIEW:
1170 if (!MatViewIncrementalMaintenanceIsEnabled())
1172 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1173 errmsg("cannot change materialized view \"%s\"",
1174 RelationGetRelationName(resultRel))));
1176 case RELKIND_FOREIGN_TABLE:
1177 /* Okay only if the FDW supports it */
1178 fdwroutine = resultRelInfo->ri_FdwRoutine;
1184 * If foreign partition to do tuple-routing for, skip the
1185 * check; it's disallowed elsewhere.
1187 if (resultRelInfo->ri_PartitionRoot)
1189 if (fdwroutine->ExecForeignInsert == NULL)
1191 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1192 errmsg("cannot insert into foreign table \"%s\"",
1193 RelationGetRelationName(resultRel))));
1194 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1195 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1197 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1198 errmsg("foreign table \"%s\" does not allow inserts",
1199 RelationGetRelationName(resultRel))));
1202 if (fdwroutine->ExecForeignUpdate == NULL)
1204 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1205 errmsg("cannot update foreign table \"%s\"",
1206 RelationGetRelationName(resultRel))));
1207 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1208 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1210 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1211 errmsg("foreign table \"%s\" does not allow updates",
1212 RelationGetRelationName(resultRel))));
1215 if (fdwroutine->ExecForeignDelete == NULL)
1217 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1218 errmsg("cannot delete from foreign table \"%s\"",
1219 RelationGetRelationName(resultRel))));
1220 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1221 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1223 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1224 errmsg("foreign table \"%s\" does not allow deletes",
1225 RelationGetRelationName(resultRel))));
1228 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1234 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1235 errmsg("cannot change relation \"%s\"",
1236 RelationGetRelationName(resultRel))));
1242 * Check that a proposed rowmark target relation is a legal target
1244 * In most cases parser and/or planner should have noticed this already, but
1245 * they don't cover all cases.
1248 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1250 FdwRoutine *fdwroutine;
1252 switch (rel->rd_rel->relkind)
1254 case RELKIND_RELATION:
1255 case RELKIND_PARTITIONED_TABLE:
1258 case RELKIND_SEQUENCE:
1259 /* Must disallow this because we don't vacuum sequences */
1261 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1262 errmsg("cannot lock rows in sequence \"%s\"",
1263 RelationGetRelationName(rel))));
1265 case RELKIND_TOASTVALUE:
1266 /* We could allow this, but there seems no good reason to */
1268 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1269 errmsg("cannot lock rows in TOAST relation \"%s\"",
1270 RelationGetRelationName(rel))));
1273 /* Should not get here; planner should have expanded the view */
1275 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1276 errmsg("cannot lock rows in view \"%s\"",
1277 RelationGetRelationName(rel))));
1279 case RELKIND_MATVIEW:
1280 /* Allow referencing a matview, but not actual locking clauses */
1281 if (markType != ROW_MARK_REFERENCE)
1283 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1284 errmsg("cannot lock rows in materialized view \"%s\"",
1285 RelationGetRelationName(rel))));
1287 case RELKIND_FOREIGN_TABLE:
1288 /* Okay only if the FDW supports it */
1289 fdwroutine = GetFdwRoutineForRelation(rel, false);
1290 if (fdwroutine->RefetchForeignRow == NULL)
1292 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1293 errmsg("cannot lock rows in foreign table \"%s\"",
1294 RelationGetRelationName(rel))));
1298 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1299 errmsg("cannot lock rows in relation \"%s\"",
1300 RelationGetRelationName(rel))));
1306 * Initialize ResultRelInfo data for one result relation
1308 * Caution: before Postgres 9.1, this function included the relkind checking
1309 * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1310 * appropriate. Be sure callers cover those needs.
1313 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1314 Relation resultRelationDesc,
1315 Index resultRelationIndex,
1316 Relation partition_root,
1317 int instrument_options)
1319 List *partition_check = NIL;
1321 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1322 resultRelInfo->type = T_ResultRelInfo;
1323 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1324 resultRelInfo->ri_RelationDesc = resultRelationDesc;
1325 resultRelInfo->ri_NumIndices = 0;
1326 resultRelInfo->ri_IndexRelationDescs = NULL;
1327 resultRelInfo->ri_IndexRelationInfo = NULL;
1328 /* make a copy so as not to depend on relcache info not changing... */
1329 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1330 if (resultRelInfo->ri_TrigDesc)
1332 int n = resultRelInfo->ri_TrigDesc->numtriggers;
1334 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1335 palloc0(n * sizeof(FmgrInfo));
1336 resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1337 palloc0(n * sizeof(ExprState *));
1338 if (instrument_options)
1339 resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1343 resultRelInfo->ri_TrigFunctions = NULL;
1344 resultRelInfo->ri_TrigWhenExprs = NULL;
1345 resultRelInfo->ri_TrigInstrument = NULL;
1347 if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1348 resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1350 resultRelInfo->ri_FdwRoutine = NULL;
1352 /* The following fields are set later if needed */
1353 resultRelInfo->ri_FdwState = NULL;
1354 resultRelInfo->ri_usesFdwDirectModify = false;
1355 resultRelInfo->ri_ConstraintExprs = NULL;
1356 resultRelInfo->ri_junkFilter = NULL;
1357 resultRelInfo->ri_projectReturning = NULL;
1358 resultRelInfo->ri_onConflictArbiterIndexes = NIL;
1359 resultRelInfo->ri_onConflict = NULL;
1361 resultRelInfo->ri_mergeTargetRTI = 0;
1362 resultRelInfo->ri_mergeState = (MergeState *) palloc0(sizeof (MergeState));
1365 * Partition constraint, which also includes the partition constraint of
1366 * all the ancestors that are partitions. Note that it will be checked
1367 * even in the case of tuple-routing where this table is the target leaf
1368 * partition, if there any BR triggers defined on the table. Although
1369 * tuple-routing implicitly preserves the partition constraint of the
1370 * target partition for a given row, the BR triggers may change the row
1371 * such that the constraint is no longer satisfied, which we must fail for
1372 * by checking it explicitly.
1374 * If this is a partitioned table, the partition constraint (if any) of a
1375 * given row will be checked just before performing tuple-routing.
1377 partition_check = RelationGetPartitionQual(resultRelationDesc);
1379 resultRelInfo->ri_PartitionCheck = partition_check;
1380 resultRelInfo->ri_PartitionRoot = partition_root;
1384 * ExecGetTriggerResultRel
1386 * Get a ResultRelInfo for a trigger target relation. Most of the time,
1387 * triggers are fired on one of the result relations of the query, and so
1388 * we can just return a member of the es_result_relations array, the
1389 * es_root_result_relations array (if any), or the es_leaf_result_relations
1390 * list (if any). (Note: in self-join situations there might be multiple
1391 * members with the same OID; if so it doesn't matter which one we pick.)
1392 * However, it is sometimes necessary to fire triggers on other relations;
1393 * this happens mainly when an RI update trigger queues additional triggers
1394 * on other relations, which will be processed in the context of the outer
1395 * query. For efficiency's sake, we want to have a ResultRelInfo for those
1396 * triggers too; that can avoid repeated re-opening of the relation. (It
1397 * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1398 * triggers.) So we make additional ResultRelInfo's as needed, and save them
1399 * in es_trig_target_relations.
1402 ExecGetTriggerResultRel(EState *estate, Oid relid)
1404 ResultRelInfo *rInfo;
1408 MemoryContext oldcontext;
1410 /* First, search through the query result relations */
1411 rInfo = estate->es_result_relations;
1412 nr = estate->es_num_result_relations;
1415 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1420 /* Second, search through the root result relations, if any */
1421 rInfo = estate->es_root_result_relations;
1422 nr = estate->es_num_root_result_relations;
1425 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1431 * Third, search through the result relations that were created during
1432 * tuple routing, if any.
1434 foreach(l, estate->es_tuple_routing_result_relations)
1436 rInfo = (ResultRelInfo *) lfirst(l);
1437 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1440 /* Nope, but maybe we already made an extra ResultRelInfo for it */
1441 foreach(l, estate->es_trig_target_relations)
1443 rInfo = (ResultRelInfo *) lfirst(l);
1444 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1447 /* Nope, so we need a new one */
1450 * Open the target relation's relcache entry. We assume that an
1451 * appropriate lock is still held by the backend from whenever the trigger
1452 * event got queued, so we need take no new lock here. Also, we need not
1453 * recheck the relkind, so no need for CheckValidResultRel.
1455 rel = heap_open(relid, NoLock);
1458 * Make the new entry in the right context.
1460 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1461 rInfo = makeNode(ResultRelInfo);
1462 InitResultRelInfo(rInfo,
1464 0, /* dummy rangetable index */
1466 estate->es_instrument);
1467 estate->es_trig_target_relations =
1468 lappend(estate->es_trig_target_relations, rInfo);
1469 MemoryContextSwitchTo(oldcontext);
1472 * Currently, we don't need any index information in ResultRelInfos used
1473 * only for triggers, so no need to call ExecOpenIndices.
1480 * Close any relations that have been opened by ExecGetTriggerResultRel().
1483 ExecCleanUpTriggerState(EState *estate)
1487 foreach(l, estate->es_trig_target_relations)
1489 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1491 /* Close indices and then the relation itself */
1492 ExecCloseIndices(resultRelInfo);
1493 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1498 * ExecContextForcesOids
1500 * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
1501 * we need to ensure that result tuples have space for an OID iff they are
1502 * going to be stored into a relation that has OIDs. In other contexts
1503 * we are free to choose whether to leave space for OIDs in result tuples
1504 * (we generally don't want to, but we do if a physical-tlist optimization
1505 * is possible). This routine checks the plan context and returns true if the
1506 * choice is forced, false if the choice is not forced. In the true case,
1507 * *hasoids is set to the required value.
1509 * One reason this is ugly is that all plan nodes in the plan tree will emit
1510 * tuples with space for an OID, though we really only need the topmost node
1511 * to do so. However, node types like Sort don't project new tuples but just
1512 * return their inputs, and in those cases the requirement propagates down
1513 * to the input node. Eventually we might make this code smart enough to
1514 * recognize how far down the requirement really goes, but for now we just
1515 * make all plan nodes do the same thing if the top level forces the choice.
1517 * We assume that if we are generating tuples for INSERT or UPDATE,
1518 * estate->es_result_relation_info is already set up to describe the target
1519 * relation. Note that in an UPDATE that spans an inheritance tree, some of
1520 * the target relations may have OIDs and some not. We have to make the
1521 * decisions on a per-relation basis as we initialize each of the subplans of
1522 * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1523 * while initializing each subplan.
1525 * CREATE TABLE AS is even uglier, because we don't have the target relation's
1526 * descriptor available when this code runs; we have to look aside at the
1527 * flags passed to ExecutorStart().
1530 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1532 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1536 Relation rel = ri->ri_RelationDesc;
1540 *hasoids = rel->rd_rel->relhasoids;
1545 if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
1550 if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
1559 /* ----------------------------------------------------------------
1560 * ExecPostprocessPlan
1562 * Give plan nodes a final chance to execute before shutdown
1563 * ----------------------------------------------------------------
1566 ExecPostprocessPlan(EState *estate)
1571 * Make sure nodes run forward.
1573 estate->es_direction = ForwardScanDirection;
1576 * Run any secondary ModifyTable nodes to completion, in case the main
1577 * query did not fetch all rows from them. (We do this to ensure that
1578 * such nodes have predictable results.)
1580 foreach(lc, estate->es_auxmodifytables)
1582 PlanState *ps = (PlanState *) lfirst(lc);
1586 TupleTableSlot *slot;
1588 /* Reset the per-output-tuple exprcontext each time */
1589 ResetPerTupleExprContext(estate);
1591 slot = ExecProcNode(ps);
1593 if (TupIsNull(slot))
1599 /* ----------------------------------------------------------------
1602 * Cleans up the query plan -- closes files and frees up storage
1604 * NOTE: we are no longer very worried about freeing storage per se
1605 * in this code; FreeExecutorState should be guaranteed to release all
1606 * memory that needs to be released. What we are worried about doing
1607 * is closing relations and dropping buffer pins. Thus, for example,
1608 * tuple tables must be cleared or dropped to ensure pins are released.
1609 * ----------------------------------------------------------------
1612 ExecEndPlan(PlanState *planstate, EState *estate)
1614 ResultRelInfo *resultRelInfo;
1619 * shut down the node-type-specific query processing
1621 ExecEndNode(planstate);
1626 foreach(l, estate->es_subplanstates)
1628 PlanState *subplanstate = (PlanState *) lfirst(l);
1630 ExecEndNode(subplanstate);
1634 * destroy the executor's tuple table. Actually we only care about
1635 * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1636 * the TupleTableSlots, since the containing memory context is about to go
1639 ExecResetTupleTable(estate->es_tupleTable, false);
1642 * close the result relation(s) if any, but hold locks until xact commit.
1644 resultRelInfo = estate->es_result_relations;
1645 for (i = estate->es_num_result_relations; i > 0; i--)
1647 /* Close indices and then the relation itself */
1648 ExecCloseIndices(resultRelInfo);
1649 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1653 /* Close the root target relation(s). */
1654 resultRelInfo = estate->es_root_result_relations;
1655 for (i = estate->es_num_root_result_relations; i > 0; i--)
1657 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1661 /* likewise close any trigger target relations */
1662 ExecCleanUpTriggerState(estate);
1665 * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
1668 foreach(l, estate->es_rowMarks)
1670 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1673 heap_close(erm->relation, NoLock);
1677 /* ----------------------------------------------------------------
1680 * Processes the query plan until we have retrieved 'numberTuples' tuples,
1681 * moving in the specified direction.
1683 * Runs to completion if numberTuples is 0
1685 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1687 * ----------------------------------------------------------------
1690 ExecutePlan(EState *estate,
1691 PlanState *planstate,
1692 bool use_parallel_mode,
1695 uint64 numberTuples,
1696 ScanDirection direction,
1700 TupleTableSlot *slot;
1701 uint64 current_tuple_count;
1704 * initialize local variables
1706 current_tuple_count = 0;
1709 * Set the direction.
1711 estate->es_direction = direction;
1714 * If the plan might potentially be executed multiple times, we must force
1715 * it to run without parallelism, because we might exit early.
1718 use_parallel_mode = false;
1720 estate->es_use_parallel_mode = use_parallel_mode;
1721 if (use_parallel_mode)
1722 EnterParallelMode();
1725 * Loop until we've processed the proper number of tuples from the plan.
1729 /* Reset the per-output-tuple exprcontext */
1730 ResetPerTupleExprContext(estate);
1733 * Execute the plan and obtain a tuple
1735 slot = ExecProcNode(planstate);
1738 * if the tuple is null, then we assume there is nothing more to
1739 * process so we just end the loop...
1741 if (TupIsNull(slot))
1743 /* Allow nodes to release or shut down resources. */
1744 (void) ExecShutdownNode(planstate);
1749 * If we have a junk filter, then project a new tuple with the junk
1752 * Store this new "clean" tuple in the junkfilter's resultSlot.
1753 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1754 * because that tuple slot has the wrong descriptor.)
1756 if (estate->es_junkFilter != NULL)
1757 slot = ExecFilterJunk(estate->es_junkFilter, slot);
1760 * If we are supposed to send the tuple somewhere, do so. (In
1761 * practice, this is probably always the case at this point.)
1766 * If we are not able to send the tuple, we assume the destination
1767 * has closed and no more tuples can be sent. If that's the case,
1770 if (!dest->receiveSlot(slot, dest))
1775 * Count tuples processed, if this is a SELECT. (For other operation
1776 * types, the ModifyTable plan node must count the appropriate
1779 if (operation == CMD_SELECT)
1780 (estate->es_processed)++;
1783 * check our tuple count.. if we've processed the proper number then
1784 * quit, else loop again and process more tuples. Zero numberTuples
1787 current_tuple_count++;
1788 if (numberTuples && numberTuples == current_tuple_count)
1790 /* Allow nodes to release or shut down resources. */
1791 (void) ExecShutdownNode(planstate);
1796 if (use_parallel_mode)
1802 * ExecRelCheck --- check that tuple meets constraints for result relation
1804 * Returns NULL if OK, else name of failed check constraint
1807 ExecRelCheck(ResultRelInfo *resultRelInfo,
1808 TupleTableSlot *slot, EState *estate)
1810 Relation rel = resultRelInfo->ri_RelationDesc;
1811 int ncheck = rel->rd_att->constr->num_check;
1812 ConstrCheck *check = rel->rd_att->constr->check;
1813 ExprContext *econtext;
1814 MemoryContext oldContext;
1818 * If first time through for this result relation, build expression
1819 * nodetrees for rel's constraint expressions. Keep them in the per-query
1820 * memory context so they'll survive throughout the query.
1822 if (resultRelInfo->ri_ConstraintExprs == NULL)
1824 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1825 resultRelInfo->ri_ConstraintExprs =
1826 (ExprState **) palloc(ncheck * sizeof(ExprState *));
1827 for (i = 0; i < ncheck; i++)
1831 checkconstr = stringToNode(check[i].ccbin);
1832 resultRelInfo->ri_ConstraintExprs[i] =
1833 ExecPrepareExpr(checkconstr, estate);
1835 MemoryContextSwitchTo(oldContext);
1839 * We will use the EState's per-tuple context for evaluating constraint
1840 * expressions (creating it if it's not already there).
1842 econtext = GetPerTupleExprContext(estate);
1844 /* Arrange for econtext's scan tuple to be the tuple under test */
1845 econtext->ecxt_scantuple = slot;
1847 /* And evaluate the constraints */
1848 for (i = 0; i < ncheck; i++)
1850 ExprState *checkconstr = resultRelInfo->ri_ConstraintExprs[i];
1853 * NOTE: SQL specifies that a NULL result from a constraint expression
1854 * is not to be treated as a failure. Therefore, use ExecCheck not
1857 if (!ExecCheck(checkconstr, econtext))
1858 return check[i].ccname;
1861 /* NULL result means no error */
1866 * ExecPartitionCheck --- check that tuple meets the partition constraint.
1868 * Exported in executor.h for outside use.
1869 * Returns true if it meets the partition constraint, else returns false.
1872 ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1875 ExprContext *econtext;
1878 * If first time through, build expression state tree for the partition
1879 * check expression. Keep it in the per-query memory context so they'll
1880 * survive throughout the query.
1882 if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1884 List *qual = resultRelInfo->ri_PartitionCheck;
1886 resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1890 * We will use the EState's per-tuple context for evaluating constraint
1891 * expressions (creating it if it's not already there).
1893 econtext = GetPerTupleExprContext(estate);
1895 /* Arrange for econtext's scan tuple to be the tuple under test */
1896 econtext->ecxt_scantuple = slot;
1899 * As in case of the catalogued constraints, we treat a NULL result as
1900 * success here, not a failure.
1902 return ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
1906 * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1907 * partition constraint check.
1910 ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
1911 TupleTableSlot *slot,
1914 Relation rel = resultRelInfo->ri_RelationDesc;
1915 Relation orig_rel = rel;
1916 TupleDesc tupdesc = RelationGetDescr(rel);
1918 Bitmapset *modifiedCols;
1919 Bitmapset *insertedCols;
1920 Bitmapset *updatedCols;
1923 * Need to first convert the tuple to the root partitioned table's row
1924 * type. For details, check similar comments in ExecConstraints().
1926 if (resultRelInfo->ri_PartitionRoot)
1928 HeapTuple tuple = ExecFetchSlotTuple(slot);
1929 TupleDesc old_tupdesc = RelationGetDescr(rel);
1930 TupleConversionMap *map;
1932 rel = resultRelInfo->ri_PartitionRoot;
1933 tupdesc = RelationGetDescr(rel);
1935 map = convert_tuples_by_name(old_tupdesc, tupdesc,
1936 gettext_noop("could not convert row type"));
1939 tuple = do_convert_tuple(tuple, map);
1940 ExecSetSlotDescriptor(slot, tupdesc);
1941 ExecStoreTuple(tuple, slot, InvalidBuffer, false);
1945 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1946 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1947 modifiedCols = bms_union(insertedCols, updatedCols);
1948 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1954 (errcode(ERRCODE_CHECK_VIOLATION),
1955 errmsg("new row for relation \"%s\" violates partition constraint",
1956 RelationGetRelationName(orig_rel)),
1957 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
1961 * ExecConstraints - check constraints of the tuple in 'slot'
1963 * This checks the traditional NOT NULL and check constraints, and if
1964 * requested, checks the partition constraint.
1966 * Note: 'slot' contains the tuple to check the constraints of, which may
1967 * have been converted from the original input tuple after tuple routing.
1968 * 'resultRelInfo' is the original result relation, before tuple routing.
1971 ExecConstraints(ResultRelInfo *resultRelInfo,
1972 TupleTableSlot *slot, EState *estate,
1973 bool check_partition_constraint)
1975 Relation rel = resultRelInfo->ri_RelationDesc;
1976 TupleDesc tupdesc = RelationGetDescr(rel);
1977 TupleConstr *constr = tupdesc->constr;
1978 Bitmapset *modifiedCols;
1979 Bitmapset *insertedCols;
1980 Bitmapset *updatedCols;
1982 Assert(constr || resultRelInfo->ri_PartitionCheck);
1984 if (constr && constr->has_not_null)
1986 int natts = tupdesc->natts;
1989 for (attrChk = 1; attrChk <= natts; attrChk++)
1991 Form_pg_attribute att = TupleDescAttr(tupdesc, attrChk - 1);
1993 if (att->attnotnull && slot_attisnull(slot, attrChk))
1996 Relation orig_rel = rel;
1997 TupleDesc orig_tupdesc = RelationGetDescr(rel);
2000 * If the tuple has been routed, it's been converted to the
2001 * partition's rowtype, which might differ from the root
2002 * table's. We must convert it back to the root table's
2003 * rowtype so that val_desc shown error message matches the
2006 if (resultRelInfo->ri_PartitionRoot)
2008 HeapTuple tuple = ExecFetchSlotTuple(slot);
2009 TupleConversionMap *map;
2011 rel = resultRelInfo->ri_PartitionRoot;
2012 tupdesc = RelationGetDescr(rel);
2014 map = convert_tuples_by_name(orig_tupdesc, tupdesc,
2015 gettext_noop("could not convert row type"));
2018 tuple = do_convert_tuple(tuple, map);
2019 ExecSetSlotDescriptor(slot, tupdesc);
2020 ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2024 insertedCols = GetInsertedColumns(resultRelInfo, estate);
2025 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2026 modifiedCols = bms_union(insertedCols, updatedCols);
2027 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2034 (errcode(ERRCODE_NOT_NULL_VIOLATION),
2035 errmsg("null value in column \"%s\" violates not-null constraint",
2036 NameStr(att->attname)),
2037 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2038 errtablecol(orig_rel, attrChk)));
2043 if (constr && constr->num_check > 0)
2047 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2050 Relation orig_rel = rel;
2052 /* See the comment above. */
2053 if (resultRelInfo->ri_PartitionRoot)
2055 HeapTuple tuple = ExecFetchSlotTuple(slot);
2056 TupleDesc old_tupdesc = RelationGetDescr(rel);
2057 TupleConversionMap *map;
2059 rel = resultRelInfo->ri_PartitionRoot;
2060 tupdesc = RelationGetDescr(rel);
2062 map = convert_tuples_by_name(old_tupdesc, tupdesc,
2063 gettext_noop("could not convert row type"));
2066 tuple = do_convert_tuple(tuple, map);
2067 ExecSetSlotDescriptor(slot, tupdesc);
2068 ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2072 insertedCols = GetInsertedColumns(resultRelInfo, estate);
2073 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2074 modifiedCols = bms_union(insertedCols, updatedCols);
2075 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2081 (errcode(ERRCODE_CHECK_VIOLATION),
2082 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2083 RelationGetRelationName(orig_rel), failed),
2084 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2085 errtableconstraint(orig_rel, failed)));
2089 if (check_partition_constraint && resultRelInfo->ri_PartitionCheck &&
2090 !ExecPartitionCheck(resultRelInfo, slot, estate))
2091 ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
2096 * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2097 * of the specified kind.
2099 * Note that this needs to be called multiple times to ensure that all kinds of
2100 * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2101 * CHECK OPTION set and from row level security policies). See ExecInsert()
2105 ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2106 TupleTableSlot *slot, EState *estate)
2108 Relation rel = resultRelInfo->ri_RelationDesc;
2109 TupleDesc tupdesc = RelationGetDescr(rel);
2110 ExprContext *econtext;
2115 * We will use the EState's per-tuple context for evaluating constraint
2116 * expressions (creating it if it's not already there).
2118 econtext = GetPerTupleExprContext(estate);
2120 /* Arrange for econtext's scan tuple to be the tuple under test */
2121 econtext->ecxt_scantuple = slot;
2123 /* Check each of the constraints */
2124 forboth(l1, resultRelInfo->ri_WithCheckOptions,
2125 l2, resultRelInfo->ri_WithCheckOptionExprs)
2127 WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
2128 ExprState *wcoExpr = (ExprState *) lfirst(l2);
2131 * Skip any WCOs which are not the kind we are looking for at this
2134 if (wco->kind != kind)
2138 * WITH CHECK OPTION checks are intended to ensure that the new tuple
2139 * is visible (in the case of a view) or that it passes the
2140 * 'with-check' policy (in the case of row security). If the qual
2141 * evaluates to NULL or FALSE, then the new tuple won't be included in
2142 * the view or doesn't pass the 'with-check' policy for the table.
2144 if (!ExecQual(wcoExpr, econtext))
2147 Bitmapset *modifiedCols;
2148 Bitmapset *insertedCols;
2149 Bitmapset *updatedCols;
2154 * For WITH CHECK OPTIONs coming from views, we might be
2155 * able to provide the details on the row, depending on
2156 * the permissions on the relation (that is, if the user
2157 * could view it directly anyway). For RLS violations, we
2158 * don't include the data since we don't know if the user
2159 * should be able to view the tuple as that depends on the
2162 case WCO_VIEW_CHECK:
2163 /* See the comment in ExecConstraints(). */
2164 if (resultRelInfo->ri_PartitionRoot)
2166 HeapTuple tuple = ExecFetchSlotTuple(slot);
2167 TupleDesc old_tupdesc = RelationGetDescr(rel);
2168 TupleConversionMap *map;
2170 rel = resultRelInfo->ri_PartitionRoot;
2171 tupdesc = RelationGetDescr(rel);
2173 map = convert_tuples_by_name(old_tupdesc, tupdesc,
2174 gettext_noop("could not convert row type"));
2177 tuple = do_convert_tuple(tuple, map);
2178 ExecSetSlotDescriptor(slot, tupdesc);
2179 ExecStoreTuple(tuple, slot, InvalidBuffer, false);
2183 insertedCols = GetInsertedColumns(resultRelInfo, estate);
2184 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
2185 modifiedCols = bms_union(insertedCols, updatedCols);
2186 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2193 (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2194 errmsg("new row violates check option for view \"%s\"",
2196 val_desc ? errdetail("Failing row contains %s.",
2199 case WCO_RLS_INSERT_CHECK:
2200 case WCO_RLS_UPDATE_CHECK:
2201 if (wco->polname != NULL)
2203 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2204 errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2205 wco->polname, wco->relname)));
2208 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2209 errmsg("new row violates row-level security policy for table \"%s\"",
2212 case WCO_RLS_MERGE_UPDATE_CHECK:
2213 case WCO_RLS_MERGE_DELETE_CHECK:
2214 if (wco->polname != NULL)
2216 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2217 errmsg("target row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2218 wco->polname, wco->relname)));
2221 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2222 errmsg("target row violates row-level security policy (USING expression) for table \"%s\"",
2225 case WCO_RLS_CONFLICT_CHECK:
2226 if (wco->polname != NULL)
2228 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2229 errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2230 wco->polname, wco->relname)));
2233 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2234 errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2238 elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2246 * ExecBuildSlotValueDescription -- construct a string representing a tuple
2248 * This is intentionally very similar to BuildIndexValueDescription, but
2249 * unlike that function, we truncate long field values (to at most maxfieldlen
2250 * bytes). That seems necessary here since heap field values could be very
2251 * long, whereas index entries typically aren't so wide.
2253 * Also, unlike the case with index entries, we need to be prepared to ignore
2254 * dropped columns. We used to use the slot's tuple descriptor to decode the
2255 * data, but the slot's descriptor doesn't identify dropped columns, so we
2256 * now need to be passed the relation's descriptor.
2258 * Note that, like BuildIndexValueDescription, if the user does not have
2259 * permission to view any of the columns involved, a NULL is returned. Unlike
2260 * BuildIndexValueDescription, if the user has access to view a subset of the
2261 * column involved, that subset will be returned with a key identifying which
2265 ExecBuildSlotValueDescription(Oid reloid,
2266 TupleTableSlot *slot,
2268 Bitmapset *modifiedCols,
2272 StringInfoData collist;
2273 bool write_comma = false;
2274 bool write_comma_collist = false;
2276 AclResult aclresult;
2277 bool table_perm = false;
2278 bool any_perm = false;
2281 * Check if RLS is enabled and should be active for the relation; if so,
2282 * then don't return anything. Otherwise, go through normal permission
2285 if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2288 initStringInfo(&buf);
2290 appendStringInfoChar(&buf, '(');
2293 * Check if the user has permissions to see the row. Table-level SELECT
2294 * allows access to all columns. If the user does not have table-level
2295 * SELECT then we check each column and include those the user has SELECT
2296 * rights on. Additionally, we always include columns the user provided
2299 aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2300 if (aclresult != ACLCHECK_OK)
2302 /* Set up the buffer for the column list */
2303 initStringInfo(&collist);
2304 appendStringInfoChar(&collist, '(');
2307 table_perm = any_perm = true;
2309 /* Make sure the tuple is fully deconstructed */
2310 slot_getallattrs(slot);
2312 for (i = 0; i < tupdesc->natts; i++)
2314 bool column_perm = false;
2317 Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2319 /* ignore dropped columns */
2320 if (att->attisdropped)
2326 * No table-level SELECT, so need to make sure they either have
2327 * SELECT rights on the column or that they have provided the data
2328 * for the column. If not, omit this column from the error
2331 aclresult = pg_attribute_aclcheck(reloid, att->attnum,
2332 GetUserId(), ACL_SELECT);
2333 if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
2334 modifiedCols) || aclresult == ACLCHECK_OK)
2336 column_perm = any_perm = true;
2338 if (write_comma_collist)
2339 appendStringInfoString(&collist, ", ");
2341 write_comma_collist = true;
2343 appendStringInfoString(&collist, NameStr(att->attname));
2347 if (table_perm || column_perm)
2349 if (slot->tts_isnull[i])
2356 getTypeOutputInfo(att->atttypid,
2357 &foutoid, &typisvarlena);
2358 val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2362 appendStringInfoString(&buf, ", ");
2366 /* truncate if needed */
2367 vallen = strlen(val);
2368 if (vallen <= maxfieldlen)
2369 appendStringInfoString(&buf, val);
2372 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2373 appendBinaryStringInfo(&buf, val, vallen);
2374 appendStringInfoString(&buf, "...");
2379 /* If we end up with zero columns being returned, then return NULL. */
2383 appendStringInfoChar(&buf, ')');
2387 appendStringInfoString(&collist, ") = ");
2388 appendStringInfoString(&collist, buf.data);
2390 return collist.data;
2398 * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2399 * given ResultRelInfo
2402 ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2405 Bitmapset *updatedCols;
2408 * Compute lock mode to use. If columns that are part of the key have not
2409 * been modified, then we can use a weaker lock, allowing for better
2412 updatedCols = GetUpdatedColumns(relinfo, estate);
2413 keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2414 INDEX_ATTR_BITMAP_KEY);
2416 if (bms_overlap(keyCols, updatedCols))
2417 return LockTupleExclusive;
2419 return LockTupleNoKeyExclusive;
2423 * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2425 * If no such struct, either return NULL or throw error depending on missing_ok
2428 ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2432 foreach(lc, estate->es_rowMarks)
2434 ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
2436 if (erm->rti == rti)
2440 elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2445 * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2447 * Inputs are the underlying ExecRowMark struct and the targetlist of the
2448 * input plan node (not planstate node!). We need the latter to find out
2449 * the column numbers of the resjunk columns.
2452 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2454 ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2457 aerm->rowmark = erm;
2459 /* Look up the resjunk columns associated with this rowmark */
2460 if (erm->markType != ROW_MARK_COPY)
2462 /* need ctid for all methods other than COPY */
2463 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2464 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2466 if (!AttributeNumberIsValid(aerm->ctidAttNo))
2467 elog(ERROR, "could not find junk %s column", resname);
2471 /* need wholerow if COPY */
2472 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2473 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2475 if (!AttributeNumberIsValid(aerm->wholeAttNo))
2476 elog(ERROR, "could not find junk %s column", resname);
2479 /* if child rel, need tableoid */
2480 if (erm->rti != erm->prti)
2482 snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2483 aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2485 if (!AttributeNumberIsValid(aerm->toidAttNo))
2486 elog(ERROR, "could not find junk %s column", resname);
2494 * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2495 * process the updated version under READ COMMITTED rules.
2497 * See backend/executor/README for some info about how this works.
2502 * Check a modified tuple to see if we want to process its updated version
2503 * under READ COMMITTED rules.
2505 * estate - outer executor state data
2506 * epqstate - state for EvalPlanQual rechecking
2507 * relation - table containing tuple
2508 * rti - rangetable index of table containing tuple
2509 * lockmode - requested tuple lock mode
2510 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2511 * priorXmax - t_xmax from the outdated tuple
2513 * *tid is also an output parameter: it's modified to hold the TID of the
2514 * latest version of the tuple (note this may be changed even on failure)
2516 * Returns a slot containing the new candidate update/delete tuple, or
2517 * NULL if we determine we shouldn't process the row.
2519 * Note: properly, lockmode should be declared as enum LockTupleMode,
2520 * but we use "int" to avoid having to include heapam.h in executor.h.
2523 EvalPlanQual(EState *estate, EPQState *epqstate,
2524 Relation relation, Index rti, int lockmode,
2525 ItemPointer tid, TransactionId priorXmax)
2527 TupleTableSlot *slot;
2528 HeapTuple copyTuple;
2533 * Get and lock the updated version of the row; if fail, return NULL.
2535 copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
2538 if (copyTuple == NULL)
2542 * For UPDATE/DELETE we have to return tid of actual row we're executing
2545 *tid = copyTuple->t_self;
2548 * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2550 EvalPlanQualBegin(epqstate, estate);
2553 * Free old test tuple, if any, and store new tuple where relation's scan
2556 EvalPlanQualSetTuple(epqstate, rti, copyTuple);
2559 * Fetch any non-locked source rows
2561 EvalPlanQualFetchRowMarks(epqstate);
2564 * Run the EPQ query. We assume it will return at most one tuple.
2566 slot = EvalPlanQualNext(epqstate);
2569 * If we got a tuple, force the slot to materialize the tuple so that it
2570 * is not dependent on any local state in the EPQ query (in particular,
2571 * it's highly likely that the slot contains references to any pass-by-ref
2572 * datums that may be present in copyTuple). As with the next step, this
2573 * is to guard against early re-use of the EPQ query.
2575 if (!TupIsNull(slot))
2576 (void) ExecMaterializeSlot(slot);
2579 * Clear out the test tuple. This is needed in case the EPQ query is
2580 * re-used to test a tuple for a different relation. (Not clear that can
2581 * really happen, but let's be safe.)
2583 EvalPlanQualSetTuple(epqstate, rti, NULL);
2589 * Fetch a copy of the newest version of an outdated tuple
2591 * estate - executor state data
2592 * relation - table containing tuple
2593 * lockmode - requested tuple lock mode
2594 * wait_policy - requested lock wait policy
2595 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2596 * priorXmax - t_xmax from the outdated tuple
2598 * Returns a palloc'd copy of the newest tuple version, or NULL if we find
2599 * that there is no newest version (ie, the row was deleted not updated).
2600 * We also return NULL if the tuple is locked and the wait policy is to skip
2603 * If successful, we have locked the newest tuple version, so caller does not
2604 * need to worry about it changing anymore.
2606 * Note: properly, lockmode should be declared as enum LockTupleMode,
2607 * but we use "int" to avoid having to include heapam.h in executor.h.
2610 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
2611 LockWaitPolicy wait_policy,
2612 ItemPointer tid, TransactionId priorXmax)
2614 HeapTuple copyTuple = NULL;
2615 HeapTupleData tuple;
2616 SnapshotData SnapshotDirty;
2619 * fetch target tuple
2621 * Loop here to deal with updated or busy tuples
2623 InitDirtySnapshot(SnapshotDirty);
2624 tuple.t_self = *tid;
2629 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2632 HeapUpdateFailureData hufd;
2635 * If xmin isn't what we're expecting, the slot must have been
2636 * recycled and reused for an unrelated tuple. This implies that
2637 * the latest version of the row was deleted, so we need do
2638 * nothing. (Should be safe to examine xmin without getting
2639 * buffer's content lock. We assume reading a TransactionId to be
2640 * atomic, and Xmin never changes in an existing tuple, except to
2641 * invalid or frozen, and neither of those can match priorXmax.)
2643 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2646 ReleaseBuffer(buffer);
2650 /* otherwise xmin should not be dirty... */
2651 if (TransactionIdIsValid(SnapshotDirty.xmin))
2652 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2655 * If tuple is being updated by other transaction then we have to
2656 * wait for its commit/abort, or die trying.
2658 if (TransactionIdIsValid(SnapshotDirty.xmax))
2660 ReleaseBuffer(buffer);
2661 switch (wait_policy)
2664 XactLockTableWait(SnapshotDirty.xmax,
2665 relation, &tuple.t_self,
2669 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2670 return NULL; /* skip instead of waiting */
2673 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2675 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
2676 errmsg("could not obtain lock on row in relation \"%s\"",
2677 RelationGetRelationName(relation))));
2680 continue; /* loop back to repeat heap_fetch */
2684 * If tuple was inserted by our own transaction, we have to check
2685 * cmin against es_output_cid: cmin >= current CID means our
2686 * command cannot see the tuple, so we should ignore it. Otherwise
2687 * heap_lock_tuple() will throw an error, and so would any later
2688 * attempt to update or delete the tuple. (We need not check cmax
2689 * because HeapTupleSatisfiesDirty will consider a tuple deleted
2690 * by our transaction dead, regardless of cmax.) We just checked
2691 * that priorXmax == xmin, so we can test that variable instead of
2692 * doing HeapTupleHeaderGetXmin again.
2694 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2695 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2697 ReleaseBuffer(buffer);
2702 * This is a live tuple, so now try to lock it.
2704 test = heap_lock_tuple(relation, &tuple,
2705 estate->es_output_cid,
2706 lockmode, wait_policy,
2707 false, &buffer, &hufd);
2708 /* We now have two pins on the buffer, get rid of one */
2709 ReleaseBuffer(buffer);
2713 case HeapTupleSelfUpdated:
2716 * The target tuple was already updated or deleted by the
2717 * current command, or by a later command in the current
2718 * transaction. We *must* ignore the tuple in the former
2719 * case, so as to avoid the "Halloween problem" of
2720 * repeated update attempts. In the latter case it might
2721 * be sensible to fetch the updated tuple instead, but
2722 * doing so would require changing heap_update and
2723 * heap_delete to not complain about updating "invisible"
2724 * tuples, which seems pretty scary (heap_lock_tuple will
2725 * not complain, but few callers expect
2726 * HeapTupleInvisible, and we're not one of them). So for
2727 * now, treat the tuple as deleted and do not process.
2729 ReleaseBuffer(buffer);
2732 case HeapTupleMayBeUpdated:
2733 /* successfully locked */
2736 case HeapTupleUpdated:
2737 ReleaseBuffer(buffer);
2738 if (IsolationUsesXactSnapshot())
2740 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2741 errmsg("could not serialize access due to concurrent update")));
2743 /* Should not encounter speculative tuple on recheck */
2744 Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
2745 if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2747 /* it was updated, so look at the updated version */
2748 tuple.t_self = hufd.ctid;
2749 /* updated row should have xmin matching this xmax */
2750 priorXmax = hufd.xmax;
2753 /* tuple was deleted, so give up */
2756 case HeapTupleWouldBlock:
2757 ReleaseBuffer(buffer);
2760 case HeapTupleInvisible:
2761 elog(ERROR, "attempted to lock invisible tuple");
2764 ReleaseBuffer(buffer);
2765 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
2767 return NULL; /* keep compiler quiet */
2771 * We got tuple - now copy it for use by recheck query.
2773 copyTuple = heap_copytuple(&tuple);
2774 ReleaseBuffer(buffer);
2779 * If the referenced slot was actually empty, the latest version of
2780 * the row must have been deleted, so we need do nothing.
2782 if (tuple.t_data == NULL)
2784 ReleaseBuffer(buffer);
2789 * As above, if xmin isn't what we're expecting, do nothing.
2791 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2794 ReleaseBuffer(buffer);
2799 * If we get here, the tuple was found but failed SnapshotDirty.
2800 * Assuming the xmin is either a committed xact or our own xact (as it
2801 * certainly should be if we're trying to modify the tuple), this must
2802 * mean that the row was updated or deleted by either a committed xact
2803 * or our own xact. If it was deleted, we can ignore it; if it was
2804 * updated then chain up to the next version and repeat the whole
2807 * As above, it should be safe to examine xmax and t_ctid without the
2808 * buffer content lock, because they can't be changing.
2810 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2812 /* deleted, so forget about it */
2813 ReleaseBuffer(buffer);
2817 /* updated, so look at the updated row */
2818 tuple.t_self = tuple.t_data->t_ctid;
2819 /* updated row should have xmin matching this xmax */
2820 priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
2821 ReleaseBuffer(buffer);
2822 /* loop back to fetch next in chain */
2826 * Return the copied tuple
2832 * EvalPlanQualInit -- initialize during creation of a plan state node
2833 * that might need to invoke EPQ processing.
2835 * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2836 * with EvalPlanQualSetPlan.
2839 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2840 Plan *subplan, List *auxrowmarks, int epqParam)
2842 /* Mark the EPQ state inactive */
2843 epqstate->estate = NULL;
2844 epqstate->planstate = NULL;
2845 epqstate->origslot = NULL;
2846 /* ... and remember data that EvalPlanQualBegin will need */
2847 epqstate->plan = subplan;
2848 epqstate->arowMarks = auxrowmarks;
2849 epqstate->epqParam = epqParam;
2853 * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2855 * We need this so that ModifyTable can deal with multiple subplans.
2858 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2860 /* If we have a live EPQ query, shut it down */
2861 EvalPlanQualEnd(epqstate);
2862 /* And set/change the plan pointer */
2863 epqstate->plan = subplan;
2864 /* The rowmarks depend on the plan, too */
2865 epqstate->arowMarks = auxrowmarks;
2869 * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2871 * NB: passed tuple must be palloc'd; it may get freed later
2874 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
2876 EState *estate = epqstate->estate;
2881 * free old test tuple, if any, and store new tuple where relation's scan
2884 if (estate->es_epqTuple[rti - 1] != NULL)
2885 heap_freetuple(estate->es_epqTuple[rti - 1]);
2886 estate->es_epqTuple[rti - 1] = tuple;
2887 estate->es_epqTupleSet[rti - 1] = true;
2891 * Fetch back the current test tuple (if any) for the specified RTI
2894 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
2896 EState *estate = epqstate->estate;
2900 return estate->es_epqTuple[rti - 1];
2904 * Fetch the current row values for any non-locked relations that need
2905 * to be scanned by an EvalPlanQual operation. origslot must have been set
2906 * to contain the current result row (top-level row) that we need to recheck.
2909 EvalPlanQualFetchRowMarks(EPQState *epqstate)
2913 Assert(epqstate->origslot != NULL);
2915 foreach(l, epqstate->arowMarks)
2917 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2918 ExecRowMark *erm = aerm->rowmark;
2921 HeapTupleData tuple;
2923 if (RowMarkRequiresRowShareLock(erm->markType))
2924 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2926 /* clear any leftover test tuple for this rel */
2927 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2929 /* if child rel, must check whether it produced this row */
2930 if (erm->rti != erm->prti)
2934 datum = ExecGetJunkAttribute(epqstate->origslot,
2937 /* non-locked rels could be on the inside of outer joins */
2940 tableoid = DatumGetObjectId(datum);
2942 Assert(OidIsValid(erm->relid));
2943 if (tableoid != erm->relid)
2945 /* this child is inactive right now */
2950 if (erm->markType == ROW_MARK_REFERENCE)
2952 HeapTuple copyTuple;
2954 Assert(erm->relation != NULL);
2956 /* fetch the tuple's ctid */
2957 datum = ExecGetJunkAttribute(epqstate->origslot,
2960 /* non-locked rels could be on the inside of outer joins */
2964 /* fetch requests on foreign tables must be passed to their FDW */
2965 if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2967 FdwRoutine *fdwroutine;
2968 bool updated = false;
2970 fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2971 /* this should have been checked already, but let's be safe */
2972 if (fdwroutine->RefetchForeignRow == NULL)
2974 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2975 errmsg("cannot lock rows in foreign table \"%s\"",
2976 RelationGetRelationName(erm->relation))));
2977 copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
2981 if (copyTuple == NULL)
2982 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2985 * Ideally we'd insist on updated == false here, but that
2986 * assumes that FDWs can track that exactly, which they might
2987 * not be able to. So just ignore the flag.
2992 /* ordinary table, fetch the tuple */
2995 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2996 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2998 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
3000 if (HeapTupleHeaderGetNatts(tuple.t_data) <
3001 RelationGetDescr(erm->relation)->natts)
3003 copyTuple = heap_expand_tuple(&tuple,
3004 RelationGetDescr(erm->relation));
3008 /* successful, copy tuple */
3009 copyTuple = heap_copytuple(&tuple);
3011 ReleaseBuffer(buffer);
3015 EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
3021 Assert(erm->markType == ROW_MARK_COPY);
3023 /* fetch the whole-row Var for the relation */
3024 datum = ExecGetJunkAttribute(epqstate->origslot,
3027 /* non-locked rels could be on the inside of outer joins */
3030 td = DatumGetHeapTupleHeader(datum);
3032 /* build a temporary HeapTuple control structure */
3033 tuple.t_len = HeapTupleHeaderGetDatumLength(td);
3035 /* relation might be a foreign table, if so provide tableoid */
3036 tuple.t_tableOid = erm->relid;
3037 /* also copy t_ctid in case there's valid data there */
3038 tuple.t_self = td->t_ctid;
3040 /* copy and store tuple */
3041 EvalPlanQualSetTuple(epqstate, erm->rti,
3042 heap_copytuple(&tuple));
3048 * Fetch the next row (if any) from EvalPlanQual testing
3050 * (In practice, there should never be more than one row...)
3053 EvalPlanQualNext(EPQState *epqstate)
3055 MemoryContext oldcontext;
3056 TupleTableSlot *slot;
3058 oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
3059 slot = ExecProcNode(epqstate->planstate);
3060 MemoryContextSwitchTo(oldcontext);
3066 * Initialize or reset an EvalPlanQual state tree
3069 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
3071 EState *estate = epqstate->estate;
3075 /* First time through, so create a child EState */
3076 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
3081 * We already have a suitable child EPQ tree, so just reset it.
3083 int rtsize = list_length(parentestate->es_range_table);
3084 PlanState *planstate = epqstate->planstate;
3086 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
3088 /* Recopy current values of parent parameters */
3089 if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3093 i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3097 /* copy value if any, but not execPlan link */
3098 estate->es_param_exec_vals[i].value =
3099 parentestate->es_param_exec_vals[i].value;
3100 estate->es_param_exec_vals[i].isnull =
3101 parentestate->es_param_exec_vals[i].isnull;
3106 * Mark child plan tree as needing rescan at all scan nodes. The
3107 * first ExecProcNode will take care of actually doing the rescan.
3109 planstate->chgParam = bms_add_member(planstate->chgParam,
3110 epqstate->epqParam);
3115 * Start execution of an EvalPlanQual plan tree.
3117 * This is a cut-down version of ExecutorStart(): we copy some state from
3118 * the top-level estate rather than initializing it fresh.
3121 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
3125 MemoryContext oldcontext;
3128 rtsize = list_length(parentestate->es_range_table);
3130 epqstate->estate = estate = CreateExecutorState();
3132 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3135 * Child EPQ EStates share the parent's copy of unchanging state such as
3136 * the snapshot, rangetable, result-rel info, and external Param info.
3137 * They need their own copies of local state, including a tuple table,
3138 * es_param_exec_vals, etc.
3140 * The ResultRelInfo array management is trickier than it looks. We
3141 * create a fresh array for the child but copy all the content from the
3142 * parent. This is because it's okay for the child to share any
3143 * per-relation state the parent has already created --- but if the child
3144 * sets up any ResultRelInfo fields, such as its own junkfilter, that
3145 * state must *not* propagate back to the parent. (For one thing, the
3146 * pointed-to data is in a memory context that won't last long enough.)
3148 estate->es_direction = ForwardScanDirection;
3149 estate->es_snapshot = parentestate->es_snapshot;
3150 estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
3151 estate->es_range_table = parentestate->es_range_table;
3152 estate->es_plannedstmt = parentestate->es_plannedstmt;
3153 estate->es_junkFilter = parentestate->es_junkFilter;
3154 estate->es_output_cid = parentestate->es_output_cid;
3155 if (parentestate->es_num_result_relations > 0)
3157 int numResultRelations = parentestate->es_num_result_relations;
3158 ResultRelInfo *resultRelInfos;
3160 resultRelInfos = (ResultRelInfo *)
3161 palloc(numResultRelations * sizeof(ResultRelInfo));
3162 memcpy(resultRelInfos, parentestate->es_result_relations,
3163 numResultRelations * sizeof(ResultRelInfo));
3164 estate->es_result_relations = resultRelInfos;
3165 estate->es_num_result_relations = numResultRelations;
3167 /* es_result_relation_info must NOT be copied */
3168 /* es_trig_target_relations must NOT be copied */
3169 estate->es_rowMarks = parentestate->es_rowMarks;
3170 estate->es_top_eflags = parentestate->es_top_eflags;
3171 estate->es_instrument = parentestate->es_instrument;
3172 /* es_auxmodifytables must NOT be copied */
3175 * The external param list is simply shared from parent. The internal
3176 * param workspace has to be local state, but we copy the initial values
3177 * from the parent, so as to have access to any param values that were
3178 * already set from other parts of the parent's plan tree.
3180 estate->es_param_list_info = parentestate->es_param_list_info;
3181 if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3185 i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3186 estate->es_param_exec_vals = (ParamExecData *)
3187 palloc0(i * sizeof(ParamExecData));
3190 /* copy value if any, but not execPlan link */
3191 estate->es_param_exec_vals[i].value =
3192 parentestate->es_param_exec_vals[i].value;
3193 estate->es_param_exec_vals[i].isnull =
3194 parentestate->es_param_exec_vals[i].isnull;
3199 * Each EState must have its own es_epqScanDone state, but if we have
3200 * nested EPQ checks they should share es_epqTuple arrays. This allows
3201 * sub-rechecks to inherit the values being examined by an outer recheck.
3203 estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
3204 if (parentestate->es_epqTuple != NULL)
3206 estate->es_epqTuple = parentestate->es_epqTuple;
3207 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
3211 estate->es_epqTuple = (HeapTuple *)
3212 palloc0(rtsize * sizeof(HeapTuple));
3213 estate->es_epqTupleSet = (bool *)
3214 palloc0(rtsize * sizeof(bool));
3218 * Each estate also has its own tuple table.
3220 estate->es_tupleTable = NIL;
3223 * Initialize private state information for each SubPlan. We must do this
3224 * before running ExecInitNode on the main query tree, since
3225 * ExecInitSubPlan expects to be able to find these entries. Some of the
3226 * SubPlans might not be used in the part of the plan tree we intend to
3227 * run, but since it's not easy to tell which, we just initialize them
3230 Assert(estate->es_subplanstates == NIL);
3231 foreach(l, parentestate->es_plannedstmt->subplans)
3233 Plan *subplan = (Plan *) lfirst(l);
3234 PlanState *subplanstate;
3236 subplanstate = ExecInitNode(subplan, estate, 0);
3237 estate->es_subplanstates = lappend(estate->es_subplanstates,
3242 * Initialize the private state information for all the nodes in the part
3243 * of the plan tree we need to run. This opens files, allocates storage
3244 * and leaves us ready to start processing tuples.
3246 epqstate->planstate = ExecInitNode(planTree, estate, 0);
3248 MemoryContextSwitchTo(oldcontext);
3252 * EvalPlanQualEnd -- shut down at termination of parent plan state node,
3253 * or if we are done with the current EPQ child.
3255 * This is a cut-down version of ExecutorEnd(); basically we want to do most
3256 * of the normal cleanup, but *not* close result relations (which we are
3257 * just sharing from the outer query). We do, however, have to close any
3258 * trigger target relations that got opened, since those are not shared.
3259 * (There probably shouldn't be any of the latter, but just in case...)
3262 EvalPlanQualEnd(EPQState *epqstate)
3264 EState *estate = epqstate->estate;
3265 MemoryContext oldcontext;
3269 return; /* idle, so nothing to do */
3271 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3273 ExecEndNode(epqstate->planstate);
3275 foreach(l, estate->es_subplanstates)
3277 PlanState *subplanstate = (PlanState *) lfirst(l);
3279 ExecEndNode(subplanstate);
3282 /* throw away the per-estate tuple table */
3283 ExecResetTupleTable(estate->es_tupleTable, false);
3285 /* close any trigger target relations attached to this EState */
3286 ExecCleanUpTriggerState(estate);
3288 MemoryContextSwitchTo(oldcontext);
3290 FreeExecutorState(estate);
3292 /* Mark EPQState idle */
3293 epqstate->estate = NULL;
3294 epqstate->planstate = NULL;
3295 epqstate->origslot = NULL;