1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
12 * These four procedures are the external interface to the executor.
13 * In each case, the query descriptor is required as an argument.
15 * ExecutorStart must be called at the beginning of execution of any
16 * query plan and ExecutorEnd must always be called at the end of
17 * execution of a plan (unless it is aborted due to error).
19 * ExecutorRun accepts direction and count arguments that specify whether
20 * the plan is to be executed forwards, backwards, and for how many tuples.
21 * In some cases ExecutorRun may be called multiple times to process all
22 * the tuples for a plan. It is also acceptable to stop short of executing
23 * the whole plan (but only if it is a SELECT).
25 * ExecutorFinish must be called after the final ExecutorRun call and
26 * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 * which should also omit ExecutorRun.
29 * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
30 * Portions Copyright (c) 1994, Regents of the University of California
34 * src/backend/executor/execMain.c
36 *-------------------------------------------------------------------------
40 #include "access/htup_details.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "commands/matview.h"
46 #include "commands/trigger.h"
47 #include "executor/execdebug.h"
48 #include "foreign/fdwapi.h"
49 #include "mb/pg_wchar.h"
50 #include "miscadmin.h"
51 #include "optimizer/clauses.h"
52 #include "parser/parsetree.h"
53 #include "storage/bufmgr.h"
54 #include "storage/lmgr.h"
55 #include "tcop/utility.h"
56 #include "utils/acl.h"
57 #include "utils/lsyscache.h"
58 #include "utils/memutils.h"
59 #include "utils/rls.h"
60 #include "utils/snapmgr.h"
61 #include "utils/tqual.h"
64 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
65 ExecutorStart_hook_type ExecutorStart_hook = NULL;
66 ExecutorRun_hook_type ExecutorRun_hook = NULL;
67 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
68 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
70 /* Hook for plugin to get control in ExecCheckRTPerms() */
71 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
73 /* decls for local routines only used within this module */
74 static void InitPlan(QueryDesc *queryDesc, int eflags);
75 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
76 static void ExecPostprocessPlan(EState *estate);
77 static void ExecEndPlan(PlanState *planstate, EState *estate);
78 static void ExecutePlan(EState *estate, PlanState *planstate,
82 ScanDirection direction,
84 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
85 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
86 static char *ExecBuildSlotValueDescription(Oid reloid,
89 Bitmapset *modifiedCols,
91 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
95 * Note that this macro also exists in commands/trigger.c. There does not
96 * appear to be any good header to put it into, given the structures that
97 * it uses, so we let them be duplicated. Be sure to update both if one needs
98 * to be changed, however.
100 #define GetModifiedColumns(relinfo, estate) \
101 (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->modifiedCols)
103 /* end of local decls */
106 /* ----------------------------------------------------------------
109 * This routine must be called at the beginning of any execution of any
112 * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
113 * only because some places use QueryDescs for utility commands). The tupDesc
114 * field of the QueryDesc is filled in to describe the tuples that will be
115 * returned, and the internal fields (estate and planstate) are set up.
117 * eflags contains flag bits as described in executor.h.
119 * NB: the CurrentMemoryContext when this is called will become the parent
120 * of the per-query context used for this Executor invocation.
122 * We provide a function hook variable that lets loadable plugins
123 * get control when ExecutorStart is called. Such a plugin would
124 * normally call standard_ExecutorStart().
126 * ----------------------------------------------------------------
129 ExecutorStart(QueryDesc *queryDesc, int eflags)
131 if (ExecutorStart_hook)
132 (*ExecutorStart_hook) (queryDesc, eflags);
134 standard_ExecutorStart(queryDesc, eflags);
138 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
141 MemoryContext oldcontext;
143 /* sanity checks: queryDesc must not be started already */
144 Assert(queryDesc != NULL);
145 Assert(queryDesc->estate == NULL);
148 * If the transaction is read-only, we need to check if any writes are
149 * planned to non-temporary tables. EXPLAIN is considered read-only.
151 if (XactReadOnly && !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
152 ExecCheckXactReadOnly(queryDesc->plannedstmt);
155 * Build EState, switch into per-query memory context for startup.
157 estate = CreateExecutorState();
158 queryDesc->estate = estate;
160 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
163 * Fill in external parameters, if any, from queryDesc; and allocate
164 * workspace for internal parameters
166 estate->es_param_list_info = queryDesc->params;
168 if (queryDesc->plannedstmt->nParamExec > 0)
169 estate->es_param_exec_vals = (ParamExecData *)
170 palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
173 * If non-read-only query, set the command ID to mark output tuples with
175 switch (queryDesc->operation)
180 * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
183 if (queryDesc->plannedstmt->rowMarks != NIL ||
184 queryDesc->plannedstmt->hasModifyingCTE)
185 estate->es_output_cid = GetCurrentCommandId(true);
188 * A SELECT without modifying CTEs can't possibly queue triggers,
189 * so force skip-triggers mode. This is just a marginal efficiency
190 * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
191 * all that expensive, but we might as well do it.
193 if (!queryDesc->plannedstmt->hasModifyingCTE)
194 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
200 estate->es_output_cid = GetCurrentCommandId(true);
204 elog(ERROR, "unrecognized operation code: %d",
205 (int) queryDesc->operation);
210 * Copy other important information into the EState
212 estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
213 estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
214 estate->es_top_eflags = eflags;
215 estate->es_instrument = queryDesc->instrument_options;
218 * Initialize the plan state tree
220 InitPlan(queryDesc, eflags);
223 * Set up an AFTER-trigger statement context, unless told not to, or
224 * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
226 if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
227 AfterTriggerBeginQuery();
229 MemoryContextSwitchTo(oldcontext);
232 /* ----------------------------------------------------------------
235 * This is the main routine of the executor module. It accepts
236 * the query descriptor from the traffic cop and executes the
239 * ExecutorStart must have been called already.
241 * If direction is NoMovementScanDirection then nothing is done
242 * except to start up/shut down the destination. Otherwise,
243 * we retrieve up to 'count' tuples in the specified direction.
245 * Note: count = 0 is interpreted as no portal limit, i.e., run to
246 * completion. Also note that the count limit is only applied to
247 * retrieved tuples, not for instance to those inserted/updated/deleted
248 * by a ModifyTable plan node.
250 * There is no return value, but output tuples (if any) are sent to
251 * the destination receiver specified in the QueryDesc; and the number
252 * of tuples processed at the top level can be found in
253 * estate->es_processed.
255 * We provide a function hook variable that lets loadable plugins
256 * get control when ExecutorRun is called. Such a plugin would
257 * normally call standard_ExecutorRun().
259 * ----------------------------------------------------------------
262 ExecutorRun(QueryDesc *queryDesc,
263 ScanDirection direction, long count)
265 if (ExecutorRun_hook)
266 (*ExecutorRun_hook) (queryDesc, direction, count);
268 standard_ExecutorRun(queryDesc, direction, count);
272 standard_ExecutorRun(QueryDesc *queryDesc,
273 ScanDirection direction, long count)
279 MemoryContext oldcontext;
282 Assert(queryDesc != NULL);
284 estate = queryDesc->estate;
286 Assert(estate != NULL);
287 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
290 * Switch into per-query memory context
292 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
294 /* Allow instrumentation of Executor overall runtime */
295 if (queryDesc->totaltime)
296 InstrStartNode(queryDesc->totaltime);
299 * extract information from the query descriptor and the query feature.
301 operation = queryDesc->operation;
302 dest = queryDesc->dest;
305 * startup tuple receiver, if we will be emitting tuples
307 estate->es_processed = 0;
308 estate->es_lastoid = InvalidOid;
310 sendTuples = (operation == CMD_SELECT ||
311 queryDesc->plannedstmt->hasReturning);
314 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
319 if (!ScanDirectionIsNoMovement(direction))
321 queryDesc->planstate,
329 * shutdown tuple receiver, if we started it
332 (*dest->rShutdown) (dest);
334 if (queryDesc->totaltime)
335 InstrStopNode(queryDesc->totaltime, estate->es_processed);
337 MemoryContextSwitchTo(oldcontext);
340 /* ----------------------------------------------------------------
343 * This routine must be called after the last ExecutorRun call.
344 * It performs cleanup such as firing AFTER triggers. It is
345 * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
346 * include these actions in the total runtime.
348 * We provide a function hook variable that lets loadable plugins
349 * get control when ExecutorFinish is called. Such a plugin would
350 * normally call standard_ExecutorFinish().
352 * ----------------------------------------------------------------
355 ExecutorFinish(QueryDesc *queryDesc)
357 if (ExecutorFinish_hook)
358 (*ExecutorFinish_hook) (queryDesc);
360 standard_ExecutorFinish(queryDesc);
364 standard_ExecutorFinish(QueryDesc *queryDesc)
367 MemoryContext oldcontext;
370 Assert(queryDesc != NULL);
372 estate = queryDesc->estate;
374 Assert(estate != NULL);
375 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
377 /* This should be run once and only once per Executor instance */
378 Assert(!estate->es_finished);
380 /* Switch into per-query memory context */
381 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
383 /* Allow instrumentation of Executor overall runtime */
384 if (queryDesc->totaltime)
385 InstrStartNode(queryDesc->totaltime);
387 /* Run ModifyTable nodes to completion */
388 ExecPostprocessPlan(estate);
390 /* Execute queued AFTER triggers, unless told not to */
391 if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
392 AfterTriggerEndQuery(estate);
394 if (queryDesc->totaltime)
395 InstrStopNode(queryDesc->totaltime, 0);
397 MemoryContextSwitchTo(oldcontext);
399 estate->es_finished = true;
402 /* ----------------------------------------------------------------
405 * This routine must be called at the end of execution of any
408 * We provide a function hook variable that lets loadable plugins
409 * get control when ExecutorEnd is called. Such a plugin would
410 * normally call standard_ExecutorEnd().
412 * ----------------------------------------------------------------
415 ExecutorEnd(QueryDesc *queryDesc)
417 if (ExecutorEnd_hook)
418 (*ExecutorEnd_hook) (queryDesc);
420 standard_ExecutorEnd(queryDesc);
424 standard_ExecutorEnd(QueryDesc *queryDesc)
427 MemoryContext oldcontext;
430 Assert(queryDesc != NULL);
432 estate = queryDesc->estate;
434 Assert(estate != NULL);
437 * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
438 * Assert is needed because ExecutorFinish is new as of 9.1, and callers
439 * might forget to call it.
441 Assert(estate->es_finished ||
442 (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
445 * Switch into per-query memory context to run ExecEndPlan
447 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
449 ExecEndPlan(queryDesc->planstate, estate);
451 /* do away with our snapshots */
452 UnregisterSnapshot(estate->es_snapshot);
453 UnregisterSnapshot(estate->es_crosscheck_snapshot);
456 * Must switch out of context before destroying it
458 MemoryContextSwitchTo(oldcontext);
461 * Release EState and per-query memory context. This should release
462 * everything the executor has allocated.
464 FreeExecutorState(estate);
466 /* Reset queryDesc fields that no longer point to anything */
467 queryDesc->tupDesc = NULL;
468 queryDesc->estate = NULL;
469 queryDesc->planstate = NULL;
470 queryDesc->totaltime = NULL;
473 /* ----------------------------------------------------------------
476 * This routine may be called on an open queryDesc to rewind it
478 * ----------------------------------------------------------------
481 ExecutorRewind(QueryDesc *queryDesc)
484 MemoryContext oldcontext;
487 Assert(queryDesc != NULL);
489 estate = queryDesc->estate;
491 Assert(estate != NULL);
493 /* It's probably not sensible to rescan updating queries */
494 Assert(queryDesc->operation == CMD_SELECT);
497 * Switch into per-query memory context
499 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
504 ExecReScan(queryDesc->planstate);
506 MemoryContextSwitchTo(oldcontext);
512 * Check access permissions for all relations listed in a range table.
514 * Returns true if permissions are adequate. Otherwise, throws an appropriate
515 * error if ereport_on_violation is true, or simply returns false otherwise.
517 * Note that this does NOT address row level security policies (aka: RLS). If
518 * rows will be returned to the user as a result of this permission check
519 * passing, then RLS also needs to be consulted (and check_enable_rls()).
521 * See rewrite/rowsecurity.c.
524 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
529 foreach(l, rangeTable)
531 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
533 result = ExecCheckRTEPerms(rte);
536 Assert(rte->rtekind == RTE_RELATION);
537 if (ereport_on_violation)
538 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
539 get_rel_name(rte->relid));
544 if (ExecutorCheckPerms_hook)
545 result = (*ExecutorCheckPerms_hook) (rangeTable,
546 ereport_on_violation);
552 * Check access permissions for a single RTE.
555 ExecCheckRTEPerms(RangeTblEntry *rte)
557 AclMode requiredPerms;
559 AclMode remainingPerms;
565 * Only plain-relation RTEs need to be checked here. Function RTEs are
566 * checked by init_fcache when the function is prepared for execution.
567 * Join, subquery, and special RTEs need no checks.
569 if (rte->rtekind != RTE_RELATION)
573 * No work if requiredPerms is empty.
575 requiredPerms = rte->requiredPerms;
576 if (requiredPerms == 0)
582 * userid to check as: current user unless we have a setuid indication.
584 * Note: GetUserId() is presently fast enough that there's no harm in
585 * calling it separately for each RTE. If that stops being true, we could
586 * call it once in ExecCheckRTPerms and pass the userid down from there.
587 * But for now, no need for the extra clutter.
589 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
592 * We must have *all* the requiredPerms bits, but some of the bits can be
593 * satisfied from column-level rather than relation-level permissions.
594 * First, remove any bits that are satisfied by relation permissions.
596 relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
597 remainingPerms = requiredPerms & ~relPerms;
598 if (remainingPerms != 0)
601 * If we lack any permissions that exist only as relation permissions,
602 * we can fail straight away.
604 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
608 * Check to see if we have the needed privileges at column level.
610 * Note: failures just report a table-level error; it would be nicer
611 * to report a column-level error if we have some but not all of the
614 if (remainingPerms & ACL_SELECT)
617 * When the query doesn't explicitly reference any columns (for
618 * example, SELECT COUNT(*) FROM table), allow the query if we
619 * have SELECT on any column of the rel, as per SQL spec.
621 if (bms_is_empty(rte->selectedCols))
623 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
624 ACLMASK_ANY) != ACLCHECK_OK)
629 while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
631 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
632 AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
634 if (attno == InvalidAttrNumber)
636 /* Whole-row reference, must have priv on all cols */
637 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
638 ACLMASK_ALL) != ACLCHECK_OK)
643 if (pg_attribute_aclcheck(relOid, attno, userid,
644 ACL_SELECT) != ACLCHECK_OK)
651 * Basically the same for the mod columns, with either INSERT or
652 * UPDATE privilege as specified by remainingPerms.
654 remainingPerms &= ~ACL_SELECT;
655 if (remainingPerms != 0)
658 * When the query doesn't explicitly change any columns, allow the
659 * query if we have permission on any column of the rel. This is
660 * to handle SELECT FOR UPDATE as well as possible corner cases in
663 if (bms_is_empty(rte->modifiedCols))
665 if (pg_attribute_aclcheck_all(relOid, userid, remainingPerms,
666 ACLMASK_ANY) != ACLCHECK_OK)
671 while ((col = bms_next_member(rte->modifiedCols, col)) >= 0)
673 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
674 AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
676 if (attno == InvalidAttrNumber)
678 /* whole-row reference can't happen here */
679 elog(ERROR, "whole-row update is not implemented");
683 if (pg_attribute_aclcheck(relOid, attno, userid,
684 remainingPerms) != ACLCHECK_OK)
694 * Check that the query does not imply any writes to non-temp tables.
696 * Note: in a Hot Standby slave this would need to reject writes to temp
697 * tables as well; but an HS slave can't have created any temp tables
698 * in the first place, so no need to check that.
701 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
705 /* Fail if write permissions are requested on any non-temp table */
706 foreach(l, plannedstmt->rtable)
708 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
710 if (rte->rtekind != RTE_RELATION)
713 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
716 if (isTempNamespace(get_rel_namespace(rte->relid)))
719 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
724 /* ----------------------------------------------------------------
727 * Initializes the query plan: open files, allocate storage
728 * and start up the rule manager
729 * ----------------------------------------------------------------
732 InitPlan(QueryDesc *queryDesc, int eflags)
734 CmdType operation = queryDesc->operation;
735 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
736 Plan *plan = plannedstmt->planTree;
737 List *rangeTable = plannedstmt->rtable;
738 EState *estate = queryDesc->estate;
739 PlanState *planstate;
745 * Do permissions checks
747 ExecCheckRTPerms(rangeTable, true);
750 * initialize the node's execution state
752 estate->es_range_table = rangeTable;
753 estate->es_plannedstmt = plannedstmt;
756 * initialize result relation stuff, and open/lock the result rels.
758 * We must do this before initializing the plan tree, else we might try to
759 * do a lock upgrade if a result rel is also a source rel.
761 if (plannedstmt->resultRelations)
763 List *resultRelations = plannedstmt->resultRelations;
764 int numResultRelations = list_length(resultRelations);
765 ResultRelInfo *resultRelInfos;
766 ResultRelInfo *resultRelInfo;
768 resultRelInfos = (ResultRelInfo *)
769 palloc(numResultRelations * sizeof(ResultRelInfo));
770 resultRelInfo = resultRelInfos;
771 foreach(l, resultRelations)
773 Index resultRelationIndex = lfirst_int(l);
774 Oid resultRelationOid;
775 Relation resultRelation;
777 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
778 resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
779 InitResultRelInfo(resultRelInfo,
782 estate->es_instrument);
785 estate->es_result_relations = resultRelInfos;
786 estate->es_num_result_relations = numResultRelations;
787 /* es_result_relation_info is NULL except when within ModifyTable */
788 estate->es_result_relation_info = NULL;
793 * if no result relation, then set state appropriately
795 estate->es_result_relations = NULL;
796 estate->es_num_result_relations = 0;
797 estate->es_result_relation_info = NULL;
801 * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
802 * before we initialize the plan tree, else we'd be risking lock upgrades.
803 * While we are at it, build the ExecRowMark list.
805 estate->es_rowMarks = NIL;
806 foreach(l, plannedstmt->rowMarks)
808 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
813 /* ignore "parent" rowmarks; they are irrelevant at runtime */
817 switch (rc->markType)
819 case ROW_MARK_EXCLUSIVE:
820 case ROW_MARK_NOKEYEXCLUSIVE:
822 case ROW_MARK_KEYSHARE:
823 relid = getrelid(rc->rti, rangeTable);
824 relation = heap_open(relid, RowShareLock);
826 case ROW_MARK_REFERENCE:
827 relid = getrelid(rc->rti, rangeTable);
828 relation = heap_open(relid, AccessShareLock);
831 /* there's no real table here ... */
835 elog(ERROR, "unrecognized markType: %d", rc->markType);
836 relation = NULL; /* keep compiler quiet */
840 /* Check that relation is a legal target for marking */
842 CheckValidRowMarkRel(relation, rc->markType);
844 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
845 erm->relation = relation;
847 erm->prti = rc->prti;
848 erm->rowmarkId = rc->rowmarkId;
849 erm->markType = rc->markType;
850 erm->waitPolicy = rc->waitPolicy;
851 ItemPointerSetInvalid(&(erm->curCtid));
852 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
856 * Initialize the executor's tuple table to empty.
858 estate->es_tupleTable = NIL;
859 estate->es_trig_tuple_slot = NULL;
860 estate->es_trig_oldtup_slot = NULL;
861 estate->es_trig_newtup_slot = NULL;
863 /* mark EvalPlanQual not active */
864 estate->es_epqTuple = NULL;
865 estate->es_epqTupleSet = NULL;
866 estate->es_epqScanDone = NULL;
869 * Initialize private state information for each SubPlan. We must do this
870 * before running ExecInitNode on the main query tree, since
871 * ExecInitSubPlan expects to be able to find these entries.
873 Assert(estate->es_subplanstates == NIL);
874 i = 1; /* subplan indices count from 1 */
875 foreach(l, plannedstmt->subplans)
877 Plan *subplan = (Plan *) lfirst(l);
878 PlanState *subplanstate;
882 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
883 * it is a parameterless subplan (not initplan), we suggest that it be
884 * prepared to handle REWIND efficiently; otherwise there is no need.
887 & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
888 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
889 sp_eflags |= EXEC_FLAG_REWIND;
891 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
893 estate->es_subplanstates = lappend(estate->es_subplanstates,
900 * Initialize the private state information for all the nodes in the query
901 * tree. This opens files, allocates storage and leaves us ready to start
904 planstate = ExecInitNode(plan, estate, eflags);
907 * Get the tuple descriptor describing the type of tuples to return.
909 tupType = ExecGetResultType(planstate);
912 * Initialize the junk filter if needed. SELECT queries need a filter if
913 * there are any junk attrs in the top-level tlist.
915 if (operation == CMD_SELECT)
917 bool junk_filter_needed = false;
920 foreach(tlist, plan->targetlist)
922 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
926 junk_filter_needed = true;
931 if (junk_filter_needed)
935 j = ExecInitJunkFilter(planstate->plan->targetlist,
937 ExecInitExtraTupleSlot(estate));
938 estate->es_junkFilter = j;
940 /* Want to return the cleaned tuple type */
941 tupType = j->jf_cleanTupType;
945 queryDesc->tupDesc = tupType;
946 queryDesc->planstate = planstate;
950 * Check that a proposed result relation is a legal target for the operation
952 * Generally the parser and/or planner should have noticed any such mistake
953 * already, but let's make sure.
955 * Note: when changing this function, you probably also need to look at
956 * CheckValidRowMarkRel.
959 CheckValidResultRel(Relation resultRel, CmdType operation)
961 TriggerDesc *trigDesc = resultRel->trigdesc;
962 FdwRoutine *fdwroutine;
964 switch (resultRel->rd_rel->relkind)
966 case RELKIND_RELATION:
969 case RELKIND_SEQUENCE:
971 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
972 errmsg("cannot change sequence \"%s\"",
973 RelationGetRelationName(resultRel))));
975 case RELKIND_TOASTVALUE:
977 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
978 errmsg("cannot change TOAST relation \"%s\"",
979 RelationGetRelationName(resultRel))));
984 * Okay only if there's a suitable INSTEAD OF trigger. Messages
985 * here should match rewriteHandler.c's rewriteTargetView, except
986 * that we omit errdetail because we haven't got the information
987 * handy (and given that we really shouldn't get here anyway, it's
988 * not worth great exertion to get).
993 if (!trigDesc || !trigDesc->trig_insert_instead_row)
995 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
996 errmsg("cannot insert into view \"%s\"",
997 RelationGetRelationName(resultRel)),
998 errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1001 if (!trigDesc || !trigDesc->trig_update_instead_row)
1003 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1004 errmsg("cannot update view \"%s\"",
1005 RelationGetRelationName(resultRel)),
1006 errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1009 if (!trigDesc || !trigDesc->trig_delete_instead_row)
1011 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1012 errmsg("cannot delete from view \"%s\"",
1013 RelationGetRelationName(resultRel)),
1014 errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1017 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1021 case RELKIND_MATVIEW:
1022 if (!MatViewIncrementalMaintenanceIsEnabled())
1024 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1025 errmsg("cannot change materialized view \"%s\"",
1026 RelationGetRelationName(resultRel))));
1028 case RELKIND_FOREIGN_TABLE:
1029 /* Okay only if the FDW supports it */
1030 fdwroutine = GetFdwRoutineForRelation(resultRel, false);
1034 if (fdwroutine->ExecForeignInsert == NULL)
1036 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1037 errmsg("cannot insert into foreign table \"%s\"",
1038 RelationGetRelationName(resultRel))));
1039 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1040 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1042 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1043 errmsg("foreign table \"%s\" does not allow inserts",
1044 RelationGetRelationName(resultRel))));
1047 if (fdwroutine->ExecForeignUpdate == NULL)
1049 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1050 errmsg("cannot update foreign table \"%s\"",
1051 RelationGetRelationName(resultRel))));
1052 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1053 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1055 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1056 errmsg("foreign table \"%s\" does not allow updates",
1057 RelationGetRelationName(resultRel))));
1060 if (fdwroutine->ExecForeignDelete == NULL)
1062 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1063 errmsg("cannot delete from foreign table \"%s\"",
1064 RelationGetRelationName(resultRel))));
1065 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1066 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1068 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1069 errmsg("foreign table \"%s\" does not allow deletes",
1070 RelationGetRelationName(resultRel))));
1073 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1079 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1080 errmsg("cannot change relation \"%s\"",
1081 RelationGetRelationName(resultRel))));
1087 * Check that a proposed rowmark target relation is a legal target
1089 * In most cases parser and/or planner should have noticed this already, but
1090 * they don't cover all cases.
1093 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1095 switch (rel->rd_rel->relkind)
1097 case RELKIND_RELATION:
1100 case RELKIND_SEQUENCE:
1101 /* Must disallow this because we don't vacuum sequences */
1103 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1104 errmsg("cannot lock rows in sequence \"%s\"",
1105 RelationGetRelationName(rel))));
1107 case RELKIND_TOASTVALUE:
1108 /* We could allow this, but there seems no good reason to */
1110 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1111 errmsg("cannot lock rows in TOAST relation \"%s\"",
1112 RelationGetRelationName(rel))));
1115 /* Should not get here; planner should have expanded the view */
1117 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1118 errmsg("cannot lock rows in view \"%s\"",
1119 RelationGetRelationName(rel))));
1121 case RELKIND_MATVIEW:
1122 /* Allow referencing a matview, but not actual locking clauses */
1123 if (markType != ROW_MARK_REFERENCE)
1125 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1126 errmsg("cannot lock rows in materialized view \"%s\"",
1127 RelationGetRelationName(rel))));
1129 case RELKIND_FOREIGN_TABLE:
1130 /* Should not get here; planner should have used ROW_MARK_COPY */
1132 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1133 errmsg("cannot lock rows in foreign table \"%s\"",
1134 RelationGetRelationName(rel))));
1138 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1139 errmsg("cannot lock rows in relation \"%s\"",
1140 RelationGetRelationName(rel))));
1146 * Initialize ResultRelInfo data for one result relation
1148 * Caution: before Postgres 9.1, this function included the relkind checking
1149 * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1150 * appropriate. Be sure callers cover those needs.
1153 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1154 Relation resultRelationDesc,
1155 Index resultRelationIndex,
1156 int instrument_options)
1158 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1159 resultRelInfo->type = T_ResultRelInfo;
1160 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1161 resultRelInfo->ri_RelationDesc = resultRelationDesc;
1162 resultRelInfo->ri_NumIndices = 0;
1163 resultRelInfo->ri_IndexRelationDescs = NULL;
1164 resultRelInfo->ri_IndexRelationInfo = NULL;
1165 /* make a copy so as not to depend on relcache info not changing... */
1166 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1167 if (resultRelInfo->ri_TrigDesc)
1169 int n = resultRelInfo->ri_TrigDesc->numtriggers;
1171 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1172 palloc0(n * sizeof(FmgrInfo));
1173 resultRelInfo->ri_TrigWhenExprs = (List **)
1174 palloc0(n * sizeof(List *));
1175 if (instrument_options)
1176 resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1180 resultRelInfo->ri_TrigFunctions = NULL;
1181 resultRelInfo->ri_TrigWhenExprs = NULL;
1182 resultRelInfo->ri_TrigInstrument = NULL;
1184 if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1185 resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1187 resultRelInfo->ri_FdwRoutine = NULL;
1188 resultRelInfo->ri_FdwState = NULL;
1189 resultRelInfo->ri_ConstraintExprs = NULL;
1190 resultRelInfo->ri_junkFilter = NULL;
1191 resultRelInfo->ri_projectReturning = NULL;
1195 * ExecGetTriggerResultRel
1197 * Get a ResultRelInfo for a trigger target relation. Most of the time,
1198 * triggers are fired on one of the result relations of the query, and so
1199 * we can just return a member of the es_result_relations array. (Note: in
1200 * self-join situations there might be multiple members with the same OID;
1201 * if so it doesn't matter which one we pick.) However, it is sometimes
1202 * necessary to fire triggers on other relations; this happens mainly when an
1203 * RI update trigger queues additional triggers on other relations, which will
1204 * be processed in the context of the outer query. For efficiency's sake,
1205 * we want to have a ResultRelInfo for those triggers too; that can avoid
1206 * repeated re-opening of the relation. (It also provides a way for EXPLAIN
1207 * ANALYZE to report the runtimes of such triggers.) So we make additional
1208 * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1211 ExecGetTriggerResultRel(EState *estate, Oid relid)
1213 ResultRelInfo *rInfo;
1217 MemoryContext oldcontext;
1219 /* First, search through the query result relations */
1220 rInfo = estate->es_result_relations;
1221 nr = estate->es_num_result_relations;
1224 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1229 /* Nope, but maybe we already made an extra ResultRelInfo for it */
1230 foreach(l, estate->es_trig_target_relations)
1232 rInfo = (ResultRelInfo *) lfirst(l);
1233 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1236 /* Nope, so we need a new one */
1239 * Open the target relation's relcache entry. We assume that an
1240 * appropriate lock is still held by the backend from whenever the trigger
1241 * event got queued, so we need take no new lock here. Also, we need not
1242 * recheck the relkind, so no need for CheckValidResultRel.
1244 rel = heap_open(relid, NoLock);
1247 * Make the new entry in the right context.
1249 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1250 rInfo = makeNode(ResultRelInfo);
1251 InitResultRelInfo(rInfo,
1253 0, /* dummy rangetable index */
1254 estate->es_instrument);
1255 estate->es_trig_target_relations =
1256 lappend(estate->es_trig_target_relations, rInfo);
1257 MemoryContextSwitchTo(oldcontext);
1260 * Currently, we don't need any index information in ResultRelInfos used
1261 * only for triggers, so no need to call ExecOpenIndices.
1268 * ExecContextForcesOids
1270 * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
1271 * we need to ensure that result tuples have space for an OID iff they are
1272 * going to be stored into a relation that has OIDs. In other contexts
1273 * we are free to choose whether to leave space for OIDs in result tuples
1274 * (we generally don't want to, but we do if a physical-tlist optimization
1275 * is possible). This routine checks the plan context and returns TRUE if the
1276 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
1277 * *hasoids is set to the required value.
1279 * One reason this is ugly is that all plan nodes in the plan tree will emit
1280 * tuples with space for an OID, though we really only need the topmost node
1281 * to do so. However, node types like Sort don't project new tuples but just
1282 * return their inputs, and in those cases the requirement propagates down
1283 * to the input node. Eventually we might make this code smart enough to
1284 * recognize how far down the requirement really goes, but for now we just
1285 * make all plan nodes do the same thing if the top level forces the choice.
1287 * We assume that if we are generating tuples for INSERT or UPDATE,
1288 * estate->es_result_relation_info is already set up to describe the target
1289 * relation. Note that in an UPDATE that spans an inheritance tree, some of
1290 * the target relations may have OIDs and some not. We have to make the
1291 * decisions on a per-relation basis as we initialize each of the subplans of
1292 * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1293 * while initializing each subplan.
1295 * CREATE TABLE AS is even uglier, because we don't have the target relation's
1296 * descriptor available when this code runs; we have to look aside at the
1297 * flags passed to ExecutorStart().
1300 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1302 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1306 Relation rel = ri->ri_RelationDesc;
1310 *hasoids = rel->rd_rel->relhasoids;
1315 if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
1320 if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
1329 /* ----------------------------------------------------------------
1330 * ExecPostprocessPlan
1332 * Give plan nodes a final chance to execute before shutdown
1333 * ----------------------------------------------------------------
1336 ExecPostprocessPlan(EState *estate)
1341 * Make sure nodes run forward.
1343 estate->es_direction = ForwardScanDirection;
1346 * Run any secondary ModifyTable nodes to completion, in case the main
1347 * query did not fetch all rows from them. (We do this to ensure that
1348 * such nodes have predictable results.)
1350 foreach(lc, estate->es_auxmodifytables)
1352 PlanState *ps = (PlanState *) lfirst(lc);
1356 TupleTableSlot *slot;
1358 /* Reset the per-output-tuple exprcontext each time */
1359 ResetPerTupleExprContext(estate);
1361 slot = ExecProcNode(ps);
1363 if (TupIsNull(slot))
1369 /* ----------------------------------------------------------------
1372 * Cleans up the query plan -- closes files and frees up storage
1374 * NOTE: we are no longer very worried about freeing storage per se
1375 * in this code; FreeExecutorState should be guaranteed to release all
1376 * memory that needs to be released. What we are worried about doing
1377 * is closing relations and dropping buffer pins. Thus, for example,
1378 * tuple tables must be cleared or dropped to ensure pins are released.
1379 * ----------------------------------------------------------------
1382 ExecEndPlan(PlanState *planstate, EState *estate)
1384 ResultRelInfo *resultRelInfo;
1389 * shut down the node-type-specific query processing
1391 ExecEndNode(planstate);
1396 foreach(l, estate->es_subplanstates)
1398 PlanState *subplanstate = (PlanState *) lfirst(l);
1400 ExecEndNode(subplanstate);
1404 * destroy the executor's tuple table. Actually we only care about
1405 * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1406 * the TupleTableSlots, since the containing memory context is about to go
1409 ExecResetTupleTable(estate->es_tupleTable, false);
1412 * close the result relation(s) if any, but hold locks until xact commit.
1414 resultRelInfo = estate->es_result_relations;
1415 for (i = estate->es_num_result_relations; i > 0; i--)
1417 /* Close indices and then the relation itself */
1418 ExecCloseIndices(resultRelInfo);
1419 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1424 * likewise close any trigger target relations
1426 foreach(l, estate->es_trig_target_relations)
1428 resultRelInfo = (ResultRelInfo *) lfirst(l);
1429 /* Close indices and then the relation itself */
1430 ExecCloseIndices(resultRelInfo);
1431 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1435 * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
1438 foreach(l, estate->es_rowMarks)
1440 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1443 heap_close(erm->relation, NoLock);
1447 /* ----------------------------------------------------------------
1450 * Processes the query plan until we have retrieved 'numberTuples' tuples,
1451 * moving in the specified direction.
1453 * Runs to completion if numberTuples is 0
1455 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1457 * ----------------------------------------------------------------
1460 ExecutePlan(EState *estate,
1461 PlanState *planstate,
1465 ScanDirection direction,
1468 TupleTableSlot *slot;
1469 long current_tuple_count;
1472 * initialize local variables
1474 current_tuple_count = 0;
1477 * Set the direction.
1479 estate->es_direction = direction;
1482 * Loop until we've processed the proper number of tuples from the plan.
1486 /* Reset the per-output-tuple exprcontext */
1487 ResetPerTupleExprContext(estate);
1490 * Execute the plan and obtain a tuple
1492 slot = ExecProcNode(planstate);
1495 * if the tuple is null, then we assume there is nothing more to
1496 * process so we just end the loop...
1498 if (TupIsNull(slot))
1502 * If we have a junk filter, then project a new tuple with the junk
1505 * Store this new "clean" tuple in the junkfilter's resultSlot.
1506 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1507 * because that tuple slot has the wrong descriptor.)
1509 if (estate->es_junkFilter != NULL)
1510 slot = ExecFilterJunk(estate->es_junkFilter, slot);
1513 * If we are supposed to send the tuple somewhere, do so. (In
1514 * practice, this is probably always the case at this point.)
1517 (*dest->receiveSlot) (slot, dest);
1520 * Count tuples processed, if this is a SELECT. (For other operation
1521 * types, the ModifyTable plan node must count the appropriate
1524 if (operation == CMD_SELECT)
1525 (estate->es_processed)++;
1528 * check our tuple count.. if we've processed the proper number then
1529 * quit, else loop again and process more tuples. Zero numberTuples
1532 current_tuple_count++;
1533 if (numberTuples && numberTuples == current_tuple_count)
1540 * ExecRelCheck --- check that tuple meets constraints for result relation
1542 * Returns NULL if OK, else name of failed check constraint
1545 ExecRelCheck(ResultRelInfo *resultRelInfo,
1546 TupleTableSlot *slot, EState *estate)
1548 Relation rel = resultRelInfo->ri_RelationDesc;
1549 int ncheck = rel->rd_att->constr->num_check;
1550 ConstrCheck *check = rel->rd_att->constr->check;
1551 ExprContext *econtext;
1552 MemoryContext oldContext;
1557 * If first time through for this result relation, build expression
1558 * nodetrees for rel's constraint expressions. Keep them in the per-query
1559 * memory context so they'll survive throughout the query.
1561 if (resultRelInfo->ri_ConstraintExprs == NULL)
1563 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1564 resultRelInfo->ri_ConstraintExprs =
1565 (List **) palloc(ncheck * sizeof(List *));
1566 for (i = 0; i < ncheck; i++)
1568 /* ExecQual wants implicit-AND form */
1569 qual = make_ands_implicit(stringToNode(check[i].ccbin));
1570 resultRelInfo->ri_ConstraintExprs[i] = (List *)
1571 ExecPrepareExpr((Expr *) qual, estate);
1573 MemoryContextSwitchTo(oldContext);
1577 * We will use the EState's per-tuple context for evaluating constraint
1578 * expressions (creating it if it's not already there).
1580 econtext = GetPerTupleExprContext(estate);
1582 /* Arrange for econtext's scan tuple to be the tuple under test */
1583 econtext->ecxt_scantuple = slot;
1585 /* And evaluate the constraints */
1586 for (i = 0; i < ncheck; i++)
1588 qual = resultRelInfo->ri_ConstraintExprs[i];
1591 * NOTE: SQL specifies that a NULL result from a constraint expression
1592 * is not to be treated as a failure. Therefore, tell ExecQual to
1593 * return TRUE for NULL.
1595 if (!ExecQual(qual, econtext, true))
1596 return check[i].ccname;
1599 /* NULL result means no error */
1604 ExecConstraints(ResultRelInfo *resultRelInfo,
1605 TupleTableSlot *slot, EState *estate)
1607 Relation rel = resultRelInfo->ri_RelationDesc;
1608 TupleDesc tupdesc = RelationGetDescr(rel);
1609 TupleConstr *constr = tupdesc->constr;
1613 if (constr->has_not_null)
1615 int natts = tupdesc->natts;
1618 for (attrChk = 1; attrChk <= natts; attrChk++)
1620 if (tupdesc->attrs[attrChk - 1]->attnotnull &&
1621 slot_attisnull(slot, attrChk))
1624 Bitmapset *modifiedCols;
1626 modifiedCols = GetModifiedColumns(resultRelInfo, estate);
1627 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1634 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1635 errmsg("null value in column \"%s\" violates not-null constraint",
1636 NameStr(tupdesc->attrs[attrChk - 1]->attname)),
1637 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1638 errtablecol(rel, attrChk)));
1643 if (constr->num_check > 0)
1647 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1650 Bitmapset *modifiedCols;
1652 modifiedCols = GetModifiedColumns(resultRelInfo, estate);
1653 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1659 (errcode(ERRCODE_CHECK_VIOLATION),
1660 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1661 RelationGetRelationName(rel), failed),
1662 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1663 errtableconstraint(rel, failed)));
1669 * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
1672 ExecWithCheckOptions(ResultRelInfo *resultRelInfo,
1673 TupleTableSlot *slot, EState *estate)
1675 Relation rel = resultRelInfo->ri_RelationDesc;
1676 TupleDesc tupdesc = RelationGetDescr(rel);
1677 ExprContext *econtext;
1682 * We will use the EState's per-tuple context for evaluating constraint
1683 * expressions (creating it if it's not already there).
1685 econtext = GetPerTupleExprContext(estate);
1687 /* Arrange for econtext's scan tuple to be the tuple under test */
1688 econtext->ecxt_scantuple = slot;
1690 /* Check each of the constraints */
1691 forboth(l1, resultRelInfo->ri_WithCheckOptions,
1692 l2, resultRelInfo->ri_WithCheckOptionExprs)
1694 WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
1695 ExprState *wcoExpr = (ExprState *) lfirst(l2);
1698 * WITH CHECK OPTION checks are intended to ensure that the new tuple
1699 * is visible (in the case of a view) or that it passes the
1700 * 'with-check' policy (in the case of row security).
1701 * If the qual evaluates to NULL or FALSE, then the new tuple won't be
1702 * included in the view or doesn't pass the 'with-check' policy for the
1703 * table. We need ExecQual to return FALSE for NULL to handle the view
1704 * case (the opposite of what we do above for CHECK constraints).
1706 if (!ExecQual((List *) wcoExpr, econtext, false))
1709 Bitmapset *modifiedCols;
1711 modifiedCols = GetModifiedColumns(resultRelInfo, estate);
1712 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1719 (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
1720 errmsg("new row violates WITH CHECK OPTION for \"%s\"",
1722 val_desc ? errdetail("Failing row contains %s.", val_desc) :
1729 * ExecBuildSlotValueDescription -- construct a string representing a tuple
1731 * This is intentionally very similar to BuildIndexValueDescription, but
1732 * unlike that function, we truncate long field values (to at most maxfieldlen
1733 * bytes). That seems necessary here since heap field values could be very
1734 * long, whereas index entries typically aren't so wide.
1736 * Also, unlike the case with index entries, we need to be prepared to ignore
1737 * dropped columns. We used to use the slot's tuple descriptor to decode the
1738 * data, but the slot's descriptor doesn't identify dropped columns, so we
1739 * now need to be passed the relation's descriptor.
1741 * Note that, like BuildIndexValueDescription, if the user does not have
1742 * permission to view any of the columns involved, a NULL is returned. Unlike
1743 * BuildIndexValueDescription, if the user has access to view a subset of the
1744 * column involved, that subset will be returned with a key identifying which
1748 ExecBuildSlotValueDescription(Oid reloid,
1749 TupleTableSlot *slot,
1751 Bitmapset *modifiedCols,
1755 StringInfoData collist;
1756 bool write_comma = false;
1757 bool write_comma_collist = false;
1759 AclResult aclresult;
1760 bool table_perm = false;
1761 bool any_perm = false;
1764 * Check if RLS is enabled and should be active for the relation; if so,
1765 * then don't return anything. Otherwise, go through normal permission
1768 if (check_enable_rls(reloid, GetUserId(), true) == RLS_ENABLED)
1771 initStringInfo(&buf);
1773 appendStringInfoChar(&buf, '(');
1776 * Check if the user has permissions to see the row. Table-level SELECT
1777 * allows access to all columns. If the user does not have table-level
1778 * SELECT then we check each column and include those the user has SELECT
1779 * rights on. Additionally, we always include columns the user provided
1782 aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
1783 if (aclresult != ACLCHECK_OK)
1785 /* Set up the buffer for the column list */
1786 initStringInfo(&collist);
1787 appendStringInfoChar(&collist, '(');
1790 table_perm = any_perm = true;
1792 /* Make sure the tuple is fully deconstructed */
1793 slot_getallattrs(slot);
1795 for (i = 0; i < tupdesc->natts; i++)
1797 bool column_perm = false;
1801 /* ignore dropped columns */
1802 if (tupdesc->attrs[i]->attisdropped)
1808 * No table-level SELECT, so need to make sure they either have
1809 * SELECT rights on the column or that they have provided the
1810 * data for the column. If not, omit this column from the error
1813 aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
1814 GetUserId(), ACL_SELECT);
1815 if (bms_is_member(tupdesc->attrs[i]->attnum - FirstLowInvalidHeapAttributeNumber,
1816 modifiedCols) || aclresult == ACLCHECK_OK)
1818 column_perm = any_perm = true;
1820 if (write_comma_collist)
1821 appendStringInfoString(&collist, ", ");
1823 write_comma_collist = true;
1825 appendStringInfoString(&collist, NameStr(tupdesc->attrs[i]->attname));
1829 if (table_perm || column_perm)
1831 if (slot->tts_isnull[i])
1838 getTypeOutputInfo(tupdesc->attrs[i]->atttypid,
1839 &foutoid, &typisvarlena);
1840 val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
1844 appendStringInfoString(&buf, ", ");
1848 /* truncate if needed */
1849 vallen = strlen(val);
1850 if (vallen <= maxfieldlen)
1851 appendStringInfoString(&buf, val);
1854 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1855 appendBinaryStringInfo(&buf, val, vallen);
1856 appendStringInfoString(&buf, "...");
1861 /* If we end up with zero columns being returned, then return NULL. */
1865 appendStringInfoChar(&buf, ')');
1869 appendStringInfoString(&collist, ") = ");
1870 appendStringInfoString(&collist, buf.data);
1872 return collist.data;
1880 * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
1883 ExecFindRowMark(EState *estate, Index rti)
1887 foreach(lc, estate->es_rowMarks)
1889 ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
1891 if (erm->rti == rti)
1894 elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
1895 return NULL; /* keep compiler quiet */
1899 * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
1901 * Inputs are the underlying ExecRowMark struct and the targetlist of the
1902 * input plan node (not planstate node!). We need the latter to find out
1903 * the column numbers of the resjunk columns.
1906 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
1908 ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
1911 aerm->rowmark = erm;
1913 /* Look up the resjunk columns associated with this rowmark */
1916 Assert(erm->markType != ROW_MARK_COPY);
1918 /* if child rel, need tableoid */
1919 if (erm->rti != erm->prti)
1921 snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
1922 aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
1924 if (!AttributeNumberIsValid(aerm->toidAttNo))
1925 elog(ERROR, "could not find junk %s column", resname);
1928 /* always need ctid for real relations */
1929 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
1930 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
1932 if (!AttributeNumberIsValid(aerm->ctidAttNo))
1933 elog(ERROR, "could not find junk %s column", resname);
1937 Assert(erm->markType == ROW_MARK_COPY);
1939 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
1940 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
1942 if (!AttributeNumberIsValid(aerm->wholeAttNo))
1943 elog(ERROR, "could not find junk %s column", resname);
1951 * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
1952 * process the updated version under READ COMMITTED rules.
1954 * See backend/executor/README for some info about how this works.
1959 * Check a modified tuple to see if we want to process its updated version
1960 * under READ COMMITTED rules.
1962 * estate - outer executor state data
1963 * epqstate - state for EvalPlanQual rechecking
1964 * relation - table containing tuple
1965 * rti - rangetable index of table containing tuple
1966 * lockmode - requested tuple lock mode
1967 * *tid - t_ctid from the outdated tuple (ie, next updated version)
1968 * priorXmax - t_xmax from the outdated tuple
1970 * *tid is also an output parameter: it's modified to hold the TID of the
1971 * latest version of the tuple (note this may be changed even on failure)
1973 * Returns a slot containing the new candidate update/delete tuple, or
1974 * NULL if we determine we shouldn't process the row.
1976 * Note: properly, lockmode should be declared as enum LockTupleMode,
1977 * but we use "int" to avoid having to include heapam.h in executor.h.
1980 EvalPlanQual(EState *estate, EPQState *epqstate,
1981 Relation relation, Index rti, int lockmode,
1982 ItemPointer tid, TransactionId priorXmax)
1984 TupleTableSlot *slot;
1985 HeapTuple copyTuple;
1990 * Get and lock the updated version of the row; if fail, return NULL.
1992 copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
1995 if (copyTuple == NULL)
1999 * For UPDATE/DELETE we have to return tid of actual row we're executing
2002 *tid = copyTuple->t_self;
2005 * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2007 EvalPlanQualBegin(epqstate, estate);
2010 * Free old test tuple, if any, and store new tuple where relation's scan
2013 EvalPlanQualSetTuple(epqstate, rti, copyTuple);
2016 * Fetch any non-locked source rows
2018 EvalPlanQualFetchRowMarks(epqstate);
2021 * Run the EPQ query. We assume it will return at most one tuple.
2023 slot = EvalPlanQualNext(epqstate);
2026 * If we got a tuple, force the slot to materialize the tuple so that it
2027 * is not dependent on any local state in the EPQ query (in particular,
2028 * it's highly likely that the slot contains references to any pass-by-ref
2029 * datums that may be present in copyTuple). As with the next step, this
2030 * is to guard against early re-use of the EPQ query.
2032 if (!TupIsNull(slot))
2033 (void) ExecMaterializeSlot(slot);
2036 * Clear out the test tuple. This is needed in case the EPQ query is
2037 * re-used to test a tuple for a different relation. (Not clear that can
2038 * really happen, but let's be safe.)
2040 EvalPlanQualSetTuple(epqstate, rti, NULL);
2046 * Fetch a copy of the newest version of an outdated tuple
2048 * estate - executor state data
2049 * relation - table containing tuple
2050 * lockmode - requested tuple lock mode
2051 * wait_policy - requested lock wait policy
2052 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2053 * priorXmax - t_xmax from the outdated tuple
2055 * Returns a palloc'd copy of the newest tuple version, or NULL if we find
2056 * that there is no newest version (ie, the row was deleted not updated).
2057 * We also return NULL if the tuple is locked and the wait policy is to skip
2060 * If successful, we have locked the newest tuple version, so caller does not
2061 * need to worry about it changing anymore.
2063 * Note: properly, lockmode should be declared as enum LockTupleMode,
2064 * but we use "int" to avoid having to include heapam.h in executor.h.
2067 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
2068 LockWaitPolicy wait_policy,
2069 ItemPointer tid, TransactionId priorXmax)
2071 HeapTuple copyTuple = NULL;
2072 HeapTupleData tuple;
2073 SnapshotData SnapshotDirty;
2076 * fetch target tuple
2078 * Loop here to deal with updated or busy tuples
2080 InitDirtySnapshot(SnapshotDirty);
2081 tuple.t_self = *tid;
2086 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2089 HeapUpdateFailureData hufd;
2092 * If xmin isn't what we're expecting, the slot must have been
2093 * recycled and reused for an unrelated tuple. This implies that
2094 * the latest version of the row was deleted, so we need do
2095 * nothing. (Should be safe to examine xmin without getting
2096 * buffer's content lock, since xmin never changes in an existing
2099 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2102 ReleaseBuffer(buffer);
2106 /* otherwise xmin should not be dirty... */
2107 if (TransactionIdIsValid(SnapshotDirty.xmin))
2108 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2111 * If tuple is being updated by other transaction then we have to
2112 * wait for its commit/abort, or die trying.
2114 if (TransactionIdIsValid(SnapshotDirty.xmax))
2116 ReleaseBuffer(buffer);
2117 switch (wait_policy)
2120 XactLockTableWait(SnapshotDirty.xmax,
2121 relation, &tuple.t_self,
2125 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2126 return NULL; /* skip instead of waiting */
2129 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2131 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
2132 errmsg("could not obtain lock on row in relation \"%s\"",
2133 RelationGetRelationName(relation))));
2136 continue; /* loop back to repeat heap_fetch */
2140 * If tuple was inserted by our own transaction, we have to check
2141 * cmin against es_output_cid: cmin >= current CID means our
2142 * command cannot see the tuple, so we should ignore it. Otherwise
2143 * heap_lock_tuple() will throw an error, and so would any later
2144 * attempt to update or delete the tuple. (We need not check cmax
2145 * because HeapTupleSatisfiesDirty will consider a tuple deleted
2146 * by our transaction dead, regardless of cmax.) We just checked
2147 * that priorXmax == xmin, so we can test that variable instead of
2148 * doing HeapTupleHeaderGetXmin again.
2150 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2151 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2153 ReleaseBuffer(buffer);
2158 * This is a live tuple, so now try to lock it.
2160 test = heap_lock_tuple(relation, &tuple,
2161 estate->es_output_cid,
2162 lockmode, wait_policy,
2163 false, &buffer, &hufd);
2164 /* We now have two pins on the buffer, get rid of one */
2165 ReleaseBuffer(buffer);
2169 case HeapTupleSelfUpdated:
2172 * The target tuple was already updated or deleted by the
2173 * current command, or by a later command in the current
2174 * transaction. We *must* ignore the tuple in the former
2175 * case, so as to avoid the "Halloween problem" of
2176 * repeated update attempts. In the latter case it might
2177 * be sensible to fetch the updated tuple instead, but
2178 * doing so would require changing heap_lock_tuple as well
2179 * as heap_update and heap_delete to not complain about
2180 * updating "invisible" tuples, which seems pretty scary.
2181 * So for now, treat the tuple as deleted and do not
2184 ReleaseBuffer(buffer);
2187 case HeapTupleMayBeUpdated:
2188 /* successfully locked */
2191 case HeapTupleUpdated:
2192 ReleaseBuffer(buffer);
2193 if (IsolationUsesXactSnapshot())
2195 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2196 errmsg("could not serialize access due to concurrent update")));
2197 if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2199 /* it was updated, so look at the updated version */
2200 tuple.t_self = hufd.ctid;
2201 /* updated row should have xmin matching this xmax */
2202 priorXmax = hufd.xmax;
2205 /* tuple was deleted, so give up */
2208 case HeapTupleWouldBlock:
2209 ReleaseBuffer(buffer);
2213 ReleaseBuffer(buffer);
2214 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
2216 return NULL; /* keep compiler quiet */
2220 * We got tuple - now copy it for use by recheck query.
2222 copyTuple = heap_copytuple(&tuple);
2223 ReleaseBuffer(buffer);
2228 * If the referenced slot was actually empty, the latest version of
2229 * the row must have been deleted, so we need do nothing.
2231 if (tuple.t_data == NULL)
2233 ReleaseBuffer(buffer);
2238 * As above, if xmin isn't what we're expecting, do nothing.
2240 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2243 ReleaseBuffer(buffer);
2248 * If we get here, the tuple was found but failed SnapshotDirty.
2249 * Assuming the xmin is either a committed xact or our own xact (as it
2250 * certainly should be if we're trying to modify the tuple), this must
2251 * mean that the row was updated or deleted by either a committed xact
2252 * or our own xact. If it was deleted, we can ignore it; if it was
2253 * updated then chain up to the next version and repeat the whole
2256 * As above, it should be safe to examine xmax and t_ctid without the
2257 * buffer content lock, because they can't be changing.
2259 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2261 /* deleted, so forget about it */
2262 ReleaseBuffer(buffer);
2266 /* updated, so look at the updated row */
2267 tuple.t_self = tuple.t_data->t_ctid;
2268 /* updated row should have xmin matching this xmax */
2269 priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
2270 ReleaseBuffer(buffer);
2271 /* loop back to fetch next in chain */
2275 * Return the copied tuple
2281 * EvalPlanQualInit -- initialize during creation of a plan state node
2282 * that might need to invoke EPQ processing.
2284 * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2285 * with EvalPlanQualSetPlan.
2288 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2289 Plan *subplan, List *auxrowmarks, int epqParam)
2291 /* Mark the EPQ state inactive */
2292 epqstate->estate = NULL;
2293 epqstate->planstate = NULL;
2294 epqstate->origslot = NULL;
2295 /* ... and remember data that EvalPlanQualBegin will need */
2296 epqstate->plan = subplan;
2297 epqstate->arowMarks = auxrowmarks;
2298 epqstate->epqParam = epqParam;
2302 * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2304 * We need this so that ModifyTable can deal with multiple subplans.
2307 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2309 /* If we have a live EPQ query, shut it down */
2310 EvalPlanQualEnd(epqstate);
2311 /* And set/change the plan pointer */
2312 epqstate->plan = subplan;
2313 /* The rowmarks depend on the plan, too */
2314 epqstate->arowMarks = auxrowmarks;
2318 * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2320 * NB: passed tuple must be palloc'd; it may get freed later
2323 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
2325 EState *estate = epqstate->estate;
2330 * free old test tuple, if any, and store new tuple where relation's scan
2333 if (estate->es_epqTuple[rti - 1] != NULL)
2334 heap_freetuple(estate->es_epqTuple[rti - 1]);
2335 estate->es_epqTuple[rti - 1] = tuple;
2336 estate->es_epqTupleSet[rti - 1] = true;
2340 * Fetch back the current test tuple (if any) for the specified RTI
2343 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
2345 EState *estate = epqstate->estate;
2349 return estate->es_epqTuple[rti - 1];
2353 * Fetch the current row values for any non-locked relations that need
2354 * to be scanned by an EvalPlanQual operation. origslot must have been set
2355 * to contain the current result row (top-level row) that we need to recheck.
2358 EvalPlanQualFetchRowMarks(EPQState *epqstate)
2362 Assert(epqstate->origslot != NULL);
2364 foreach(l, epqstate->arowMarks)
2366 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2367 ExecRowMark *erm = aerm->rowmark;
2370 HeapTupleData tuple;
2372 if (RowMarkRequiresRowShareLock(erm->markType))
2373 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2375 /* clear any leftover test tuple for this rel */
2376 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2382 Assert(erm->markType == ROW_MARK_REFERENCE);
2384 /* if child rel, must check whether it produced this row */
2385 if (erm->rti != erm->prti)
2389 datum = ExecGetJunkAttribute(epqstate->origslot,
2392 /* non-locked rels could be on the inside of outer joins */
2395 tableoid = DatumGetObjectId(datum);
2397 if (tableoid != RelationGetRelid(erm->relation))
2399 /* this child is inactive right now */
2404 /* fetch the tuple's ctid */
2405 datum = ExecGetJunkAttribute(epqstate->origslot,
2408 /* non-locked rels could be on the inside of outer joins */
2411 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2413 /* okay, fetch the tuple */
2414 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2416 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2418 /* successful, copy and store tuple */
2419 EvalPlanQualSetTuple(epqstate, erm->rti,
2420 heap_copytuple(&tuple));
2421 ReleaseBuffer(buffer);
2427 Assert(erm->markType == ROW_MARK_COPY);
2429 /* fetch the whole-row Var for the relation */
2430 datum = ExecGetJunkAttribute(epqstate->origslot,
2433 /* non-locked rels could be on the inside of outer joins */
2436 td = DatumGetHeapTupleHeader(datum);
2438 /* build a temporary HeapTuple control structure */
2439 tuple.t_len = HeapTupleHeaderGetDatumLength(td);
2440 ItemPointerSetInvalid(&(tuple.t_self));
2441 /* relation might be a foreign table, if so provide tableoid */
2442 tuple.t_tableOid = getrelid(erm->rti,
2443 epqstate->estate->es_range_table);
2446 /* copy and store tuple */
2447 EvalPlanQualSetTuple(epqstate, erm->rti,
2448 heap_copytuple(&tuple));
2454 * Fetch the next row (if any) from EvalPlanQual testing
2456 * (In practice, there should never be more than one row...)
2459 EvalPlanQualNext(EPQState *epqstate)
2461 MemoryContext oldcontext;
2462 TupleTableSlot *slot;
2464 oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
2465 slot = ExecProcNode(epqstate->planstate);
2466 MemoryContextSwitchTo(oldcontext);
2472 * Initialize or reset an EvalPlanQual state tree
2475 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
2477 EState *estate = epqstate->estate;
2481 /* First time through, so create a child EState */
2482 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
2487 * We already have a suitable child EPQ tree, so just reset it.
2489 int rtsize = list_length(parentestate->es_range_table);
2490 PlanState *planstate = epqstate->planstate;
2492 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
2494 /* Recopy current values of parent parameters */
2495 if (parentestate->es_plannedstmt->nParamExec > 0)
2497 int i = parentestate->es_plannedstmt->nParamExec;
2501 /* copy value if any, but not execPlan link */
2502 estate->es_param_exec_vals[i].value =
2503 parentestate->es_param_exec_vals[i].value;
2504 estate->es_param_exec_vals[i].isnull =
2505 parentestate->es_param_exec_vals[i].isnull;
2510 * Mark child plan tree as needing rescan at all scan nodes. The
2511 * first ExecProcNode will take care of actually doing the rescan.
2513 planstate->chgParam = bms_add_member(planstate->chgParam,
2514 epqstate->epqParam);
2519 * Start execution of an EvalPlanQual plan tree.
2521 * This is a cut-down version of ExecutorStart(): we copy some state from
2522 * the top-level estate rather than initializing it fresh.
2525 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
2529 MemoryContext oldcontext;
2532 rtsize = list_length(parentestate->es_range_table);
2534 epqstate->estate = estate = CreateExecutorState();
2536 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2539 * Child EPQ EStates share the parent's copy of unchanging state such as
2540 * the snapshot, rangetable, result-rel info, and external Param info.
2541 * They need their own copies of local state, including a tuple table,
2542 * es_param_exec_vals, etc.
2544 * The ResultRelInfo array management is trickier than it looks. We
2545 * create a fresh array for the child but copy all the content from the
2546 * parent. This is because it's okay for the child to share any
2547 * per-relation state the parent has already created --- but if the child
2548 * sets up any ResultRelInfo fields, such as its own junkfilter, that
2549 * state must *not* propagate back to the parent. (For one thing, the
2550 * pointed-to data is in a memory context that won't last long enough.)
2552 estate->es_direction = ForwardScanDirection;
2553 estate->es_snapshot = parentestate->es_snapshot;
2554 estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
2555 estate->es_range_table = parentestate->es_range_table;
2556 estate->es_plannedstmt = parentestate->es_plannedstmt;
2557 estate->es_junkFilter = parentestate->es_junkFilter;
2558 estate->es_output_cid = parentestate->es_output_cid;
2559 if (parentestate->es_num_result_relations > 0)
2561 int numResultRelations = parentestate->es_num_result_relations;
2562 ResultRelInfo *resultRelInfos;
2564 resultRelInfos = (ResultRelInfo *)
2565 palloc(numResultRelations * sizeof(ResultRelInfo));
2566 memcpy(resultRelInfos, parentestate->es_result_relations,
2567 numResultRelations * sizeof(ResultRelInfo));
2568 estate->es_result_relations = resultRelInfos;
2569 estate->es_num_result_relations = numResultRelations;
2571 /* es_result_relation_info must NOT be copied */
2572 /* es_trig_target_relations must NOT be copied */
2573 estate->es_rowMarks = parentestate->es_rowMarks;
2574 estate->es_top_eflags = parentestate->es_top_eflags;
2575 estate->es_instrument = parentestate->es_instrument;
2576 /* es_auxmodifytables must NOT be copied */
2579 * The external param list is simply shared from parent. The internal
2580 * param workspace has to be local state, but we copy the initial values
2581 * from the parent, so as to have access to any param values that were
2582 * already set from other parts of the parent's plan tree.
2584 estate->es_param_list_info = parentestate->es_param_list_info;
2585 if (parentestate->es_plannedstmt->nParamExec > 0)
2587 int i = parentestate->es_plannedstmt->nParamExec;
2589 estate->es_param_exec_vals = (ParamExecData *)
2590 palloc0(i * sizeof(ParamExecData));
2593 /* copy value if any, but not execPlan link */
2594 estate->es_param_exec_vals[i].value =
2595 parentestate->es_param_exec_vals[i].value;
2596 estate->es_param_exec_vals[i].isnull =
2597 parentestate->es_param_exec_vals[i].isnull;
2602 * Each EState must have its own es_epqScanDone state, but if we have
2603 * nested EPQ checks they should share es_epqTuple arrays. This allows
2604 * sub-rechecks to inherit the values being examined by an outer recheck.
2606 estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
2607 if (parentestate->es_epqTuple != NULL)
2609 estate->es_epqTuple = parentestate->es_epqTuple;
2610 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
2614 estate->es_epqTuple = (HeapTuple *)
2615 palloc0(rtsize * sizeof(HeapTuple));
2616 estate->es_epqTupleSet = (bool *)
2617 palloc0(rtsize * sizeof(bool));
2621 * Each estate also has its own tuple table.
2623 estate->es_tupleTable = NIL;
2626 * Initialize private state information for each SubPlan. We must do this
2627 * before running ExecInitNode on the main query tree, since
2628 * ExecInitSubPlan expects to be able to find these entries. Some of the
2629 * SubPlans might not be used in the part of the plan tree we intend to
2630 * run, but since it's not easy to tell which, we just initialize them
2633 Assert(estate->es_subplanstates == NIL);
2634 foreach(l, parentestate->es_plannedstmt->subplans)
2636 Plan *subplan = (Plan *) lfirst(l);
2637 PlanState *subplanstate;
2639 subplanstate = ExecInitNode(subplan, estate, 0);
2640 estate->es_subplanstates = lappend(estate->es_subplanstates,
2645 * Initialize the private state information for all the nodes in the part
2646 * of the plan tree we need to run. This opens files, allocates storage
2647 * and leaves us ready to start processing tuples.
2649 epqstate->planstate = ExecInitNode(planTree, estate, 0);
2651 MemoryContextSwitchTo(oldcontext);
2655 * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2656 * or if we are done with the current EPQ child.
2658 * This is a cut-down version of ExecutorEnd(); basically we want to do most
2659 * of the normal cleanup, but *not* close result relations (which we are
2660 * just sharing from the outer query). We do, however, have to close any
2661 * trigger target relations that got opened, since those are not shared.
2662 * (There probably shouldn't be any of the latter, but just in case...)
2665 EvalPlanQualEnd(EPQState *epqstate)
2667 EState *estate = epqstate->estate;
2668 MemoryContext oldcontext;
2672 return; /* idle, so nothing to do */
2674 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2676 ExecEndNode(epqstate->planstate);
2678 foreach(l, estate->es_subplanstates)
2680 PlanState *subplanstate = (PlanState *) lfirst(l);
2682 ExecEndNode(subplanstate);
2685 /* throw away the per-estate tuple table */
2686 ExecResetTupleTable(estate->es_tupleTable, false);
2688 /* close any trigger target relations attached to this EState */
2689 foreach(l, estate->es_trig_target_relations)
2691 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2693 /* Close indices and then the relation itself */
2694 ExecCloseIndices(resultRelInfo);
2695 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2698 MemoryContextSwitchTo(oldcontext);
2700 FreeExecutorState(estate);
2702 /* Mark EPQState idle */
2703 epqstate->estate = NULL;
2704 epqstate->planstate = NULL;
2705 epqstate->origslot = NULL;