1 /*-------------------------------------------------------------------------
4 * top level executor interface routines
12 * These four procedures are the external interface to the executor.
13 * In each case, the query descriptor is required as an argument.
15 * ExecutorStart must be called at the beginning of execution of any
16 * query plan and ExecutorEnd must always be called at the end of
17 * execution of a plan (unless it is aborted due to error).
19 * ExecutorRun accepts direction and count arguments that specify whether
20 * the plan is to be executed forwards, backwards, and for how many tuples.
21 * In some cases ExecutorRun may be called multiple times to process all
22 * the tuples for a plan. It is also acceptable to stop short of executing
23 * the whole plan (but only if it is a SELECT).
25 * ExecutorFinish must be called after the final ExecutorRun call and
26 * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 * which should also omit ExecutorRun.
29 * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
30 * Portions Copyright (c) 1994, Regents of the University of California
34 * src/backend/executor/execMain.c
36 *-------------------------------------------------------------------------
40 #include "access/htup_details.h"
41 #include "access/sysattr.h"
42 #include "access/transam.h"
43 #include "access/xact.h"
44 #include "catalog/namespace.h"
45 #include "commands/matview.h"
46 #include "commands/trigger.h"
47 #include "executor/execdebug.h"
48 #include "foreign/fdwapi.h"
49 #include "mb/pg_wchar.h"
50 #include "miscadmin.h"
51 #include "optimizer/clauses.h"
52 #include "parser/parsetree.h"
53 #include "storage/bufmgr.h"
54 #include "storage/lmgr.h"
55 #include "tcop/utility.h"
56 #include "utils/acl.h"
57 #include "utils/lsyscache.h"
58 #include "utils/memutils.h"
59 #include "utils/rls.h"
60 #include "utils/snapmgr.h"
61 #include "utils/tqual.h"
64 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
65 ExecutorStart_hook_type ExecutorStart_hook = NULL;
66 ExecutorRun_hook_type ExecutorRun_hook = NULL;
67 ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
68 ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
70 /* Hook for plugin to get control in ExecCheckRTPerms() */
71 ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
73 /* decls for local routines only used within this module */
74 static void InitPlan(QueryDesc *queryDesc, int eflags);
75 static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
76 static void ExecPostprocessPlan(EState *estate);
77 static void ExecEndPlan(PlanState *planstate, EState *estate);
78 static void ExecutePlan(EState *estate, PlanState *planstate,
82 ScanDirection direction,
84 static bool ExecCheckRTEPerms(RangeTblEntry *rte);
85 static bool ExecCheckRTEPermsModified(Oid relOid, Oid userid,
86 Bitmapset *modifiedCols,
87 AclMode requiredPerms);
88 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
89 static char *ExecBuildSlotValueDescription(Oid reloid,
92 Bitmapset *modifiedCols,
94 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
98 * Note that GetUpdatedColumns() also exists in commands/trigger.c. There does
99 * not appear to be any good header to put it into, given the structures that
100 * it uses, so we let them be duplicated. Be sure to update both if one needs
101 * to be changed, however.
103 #define GetInsertedColumns(relinfo, estate) \
104 (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->insertedCols)
105 #define GetUpdatedColumns(relinfo, estate) \
106 (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols)
108 /* end of local decls */
111 /* ----------------------------------------------------------------
114 * This routine must be called at the beginning of any execution of any
117 * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
118 * only because some places use QueryDescs for utility commands). The tupDesc
119 * field of the QueryDesc is filled in to describe the tuples that will be
120 * returned, and the internal fields (estate and planstate) are set up.
122 * eflags contains flag bits as described in executor.h.
124 * NB: the CurrentMemoryContext when this is called will become the parent
125 * of the per-query context used for this Executor invocation.
127 * We provide a function hook variable that lets loadable plugins
128 * get control when ExecutorStart is called. Such a plugin would
129 * normally call standard_ExecutorStart().
131 * ----------------------------------------------------------------
134 ExecutorStart(QueryDesc *queryDesc, int eflags)
136 if (ExecutorStart_hook)
137 (*ExecutorStart_hook) (queryDesc, eflags);
139 standard_ExecutorStart(queryDesc, eflags);
143 standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
146 MemoryContext oldcontext;
148 /* sanity checks: queryDesc must not be started already */
149 Assert(queryDesc != NULL);
150 Assert(queryDesc->estate == NULL);
153 * If the transaction is read-only, we need to check if any writes are
154 * planned to non-temporary tables. EXPLAIN is considered read-only.
156 * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
157 * would require (a) storing the combocid hash in shared memory, rather
158 * than synchronizing it just once at the start of parallelism, and (b) an
159 * alternative to heap_update()'s reliance on xmax for mutual exclusion.
160 * INSERT may have no such troubles, but we forbid it to simplify the
163 * We have lower-level defenses in CommandCounterIncrement and elsewhere
164 * against performing unsafe operations in parallel mode, but this gives a
165 * more user-friendly error message.
167 if ((XactReadOnly || IsInParallelMode()) &&
168 !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
169 ExecCheckXactReadOnly(queryDesc->plannedstmt);
172 * Build EState, switch into per-query memory context for startup.
174 estate = CreateExecutorState();
175 queryDesc->estate = estate;
177 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
180 * Fill in external parameters, if any, from queryDesc; and allocate
181 * workspace for internal parameters
183 estate->es_param_list_info = queryDesc->params;
185 if (queryDesc->plannedstmt->nParamExec > 0)
186 estate->es_param_exec_vals = (ParamExecData *)
187 palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData));
190 * If non-read-only query, set the command ID to mark output tuples with
192 switch (queryDesc->operation)
197 * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
200 if (queryDesc->plannedstmt->rowMarks != NIL ||
201 queryDesc->plannedstmt->hasModifyingCTE)
202 estate->es_output_cid = GetCurrentCommandId(true);
205 * A SELECT without modifying CTEs can't possibly queue triggers,
206 * so force skip-triggers mode. This is just a marginal efficiency
207 * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
208 * all that expensive, but we might as well do it.
210 if (!queryDesc->plannedstmt->hasModifyingCTE)
211 eflags |= EXEC_FLAG_SKIP_TRIGGERS;
217 estate->es_output_cid = GetCurrentCommandId(true);
221 elog(ERROR, "unrecognized operation code: %d",
222 (int) queryDesc->operation);
227 * Copy other important information into the EState
229 estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
230 estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
231 estate->es_top_eflags = eflags;
232 estate->es_instrument = queryDesc->instrument_options;
235 * Initialize the plan state tree
237 InitPlan(queryDesc, eflags);
240 * Set up an AFTER-trigger statement context, unless told not to, or
241 * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
243 if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
244 AfterTriggerBeginQuery();
246 MemoryContextSwitchTo(oldcontext);
249 /* ----------------------------------------------------------------
252 * This is the main routine of the executor module. It accepts
253 * the query descriptor from the traffic cop and executes the
256 * ExecutorStart must have been called already.
258 * If direction is NoMovementScanDirection then nothing is done
259 * except to start up/shut down the destination. Otherwise,
260 * we retrieve up to 'count' tuples in the specified direction.
262 * Note: count = 0 is interpreted as no portal limit, i.e., run to
263 * completion. Also note that the count limit is only applied to
264 * retrieved tuples, not for instance to those inserted/updated/deleted
265 * by a ModifyTable plan node.
267 * There is no return value, but output tuples (if any) are sent to
268 * the destination receiver specified in the QueryDesc; and the number
269 * of tuples processed at the top level can be found in
270 * estate->es_processed.
272 * We provide a function hook variable that lets loadable plugins
273 * get control when ExecutorRun is called. Such a plugin would
274 * normally call standard_ExecutorRun().
276 * ----------------------------------------------------------------
279 ExecutorRun(QueryDesc *queryDesc,
280 ScanDirection direction, long count)
282 if (ExecutorRun_hook)
283 (*ExecutorRun_hook) (queryDesc, direction, count);
285 standard_ExecutorRun(queryDesc, direction, count);
289 standard_ExecutorRun(QueryDesc *queryDesc,
290 ScanDirection direction, long count)
296 MemoryContext oldcontext;
299 Assert(queryDesc != NULL);
301 estate = queryDesc->estate;
303 Assert(estate != NULL);
304 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
307 * Switch into per-query memory context
309 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
311 /* Allow instrumentation of Executor overall runtime */
312 if (queryDesc->totaltime)
313 InstrStartNode(queryDesc->totaltime);
316 * extract information from the query descriptor and the query feature.
318 operation = queryDesc->operation;
319 dest = queryDesc->dest;
322 * startup tuple receiver, if we will be emitting tuples
324 estate->es_processed = 0;
325 estate->es_lastoid = InvalidOid;
327 sendTuples = (operation == CMD_SELECT ||
328 queryDesc->plannedstmt->hasReturning);
331 (*dest->rStartup) (dest, operation, queryDesc->tupDesc);
336 if (!ScanDirectionIsNoMovement(direction))
338 queryDesc->planstate,
346 * shutdown tuple receiver, if we started it
349 (*dest->rShutdown) (dest);
351 if (queryDesc->totaltime)
352 InstrStopNode(queryDesc->totaltime, estate->es_processed);
354 MemoryContextSwitchTo(oldcontext);
357 /* ----------------------------------------------------------------
360 * This routine must be called after the last ExecutorRun call.
361 * It performs cleanup such as firing AFTER triggers. It is
362 * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
363 * include these actions in the total runtime.
365 * We provide a function hook variable that lets loadable plugins
366 * get control when ExecutorFinish is called. Such a plugin would
367 * normally call standard_ExecutorFinish().
369 * ----------------------------------------------------------------
372 ExecutorFinish(QueryDesc *queryDesc)
374 if (ExecutorFinish_hook)
375 (*ExecutorFinish_hook) (queryDesc);
377 standard_ExecutorFinish(queryDesc);
381 standard_ExecutorFinish(QueryDesc *queryDesc)
384 MemoryContext oldcontext;
387 Assert(queryDesc != NULL);
389 estate = queryDesc->estate;
391 Assert(estate != NULL);
392 Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
394 /* This should be run once and only once per Executor instance */
395 Assert(!estate->es_finished);
397 /* Switch into per-query memory context */
398 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
400 /* Allow instrumentation of Executor overall runtime */
401 if (queryDesc->totaltime)
402 InstrStartNode(queryDesc->totaltime);
404 /* Run ModifyTable nodes to completion */
405 ExecPostprocessPlan(estate);
407 /* Execute queued AFTER triggers, unless told not to */
408 if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
409 AfterTriggerEndQuery(estate);
411 if (queryDesc->totaltime)
412 InstrStopNode(queryDesc->totaltime, 0);
414 MemoryContextSwitchTo(oldcontext);
416 estate->es_finished = true;
419 /* ----------------------------------------------------------------
422 * This routine must be called at the end of execution of any
425 * We provide a function hook variable that lets loadable plugins
426 * get control when ExecutorEnd is called. Such a plugin would
427 * normally call standard_ExecutorEnd().
429 * ----------------------------------------------------------------
432 ExecutorEnd(QueryDesc *queryDesc)
434 if (ExecutorEnd_hook)
435 (*ExecutorEnd_hook) (queryDesc);
437 standard_ExecutorEnd(queryDesc);
441 standard_ExecutorEnd(QueryDesc *queryDesc)
444 MemoryContext oldcontext;
447 Assert(queryDesc != NULL);
449 estate = queryDesc->estate;
451 Assert(estate != NULL);
454 * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
455 * Assert is needed because ExecutorFinish is new as of 9.1, and callers
456 * might forget to call it.
458 Assert(estate->es_finished ||
459 (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
462 * Switch into per-query memory context to run ExecEndPlan
464 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
466 ExecEndPlan(queryDesc->planstate, estate);
468 /* do away with our snapshots */
469 UnregisterSnapshot(estate->es_snapshot);
470 UnregisterSnapshot(estate->es_crosscheck_snapshot);
473 * Must switch out of context before destroying it
475 MemoryContextSwitchTo(oldcontext);
478 * Release EState and per-query memory context. This should release
479 * everything the executor has allocated.
481 FreeExecutorState(estate);
483 /* Reset queryDesc fields that no longer point to anything */
484 queryDesc->tupDesc = NULL;
485 queryDesc->estate = NULL;
486 queryDesc->planstate = NULL;
487 queryDesc->totaltime = NULL;
490 /* ----------------------------------------------------------------
493 * This routine may be called on an open queryDesc to rewind it
495 * ----------------------------------------------------------------
498 ExecutorRewind(QueryDesc *queryDesc)
501 MemoryContext oldcontext;
504 Assert(queryDesc != NULL);
506 estate = queryDesc->estate;
508 Assert(estate != NULL);
510 /* It's probably not sensible to rescan updating queries */
511 Assert(queryDesc->operation == CMD_SELECT);
514 * Switch into per-query memory context
516 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
521 ExecReScan(queryDesc->planstate);
523 MemoryContextSwitchTo(oldcontext);
529 * Check access permissions for all relations listed in a range table.
531 * Returns true if permissions are adequate. Otherwise, throws an appropriate
532 * error if ereport_on_violation is true, or simply returns false otherwise.
534 * Note that this does NOT address row level security policies (aka: RLS). If
535 * rows will be returned to the user as a result of this permission check
536 * passing, then RLS also needs to be consulted (and check_enable_rls()).
538 * See rewrite/rowsecurity.c.
541 ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
546 foreach(l, rangeTable)
548 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
550 result = ExecCheckRTEPerms(rte);
553 Assert(rte->rtekind == RTE_RELATION);
554 if (ereport_on_violation)
555 aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS,
556 get_rel_name(rte->relid));
561 if (ExecutorCheckPerms_hook)
562 result = (*ExecutorCheckPerms_hook) (rangeTable,
563 ereport_on_violation);
569 * Check access permissions for a single RTE.
572 ExecCheckRTEPerms(RangeTblEntry *rte)
574 AclMode requiredPerms;
576 AclMode remainingPerms;
581 * Only plain-relation RTEs need to be checked here. Function RTEs are
582 * checked by init_fcache when the function is prepared for execution.
583 * Join, subquery, and special RTEs need no checks.
585 if (rte->rtekind != RTE_RELATION)
589 * No work if requiredPerms is empty.
591 requiredPerms = rte->requiredPerms;
592 if (requiredPerms == 0)
598 * userid to check as: current user unless we have a setuid indication.
600 * Note: GetUserId() is presently fast enough that there's no harm in
601 * calling it separately for each RTE. If that stops being true, we could
602 * call it once in ExecCheckRTPerms and pass the userid down from there.
603 * But for now, no need for the extra clutter.
605 userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
608 * We must have *all* the requiredPerms bits, but some of the bits can be
609 * satisfied from column-level rather than relation-level permissions.
610 * First, remove any bits that are satisfied by relation permissions.
612 relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
613 remainingPerms = requiredPerms & ~relPerms;
614 if (remainingPerms != 0)
619 * If we lack any permissions that exist only as relation permissions,
620 * we can fail straight away.
622 if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
626 * Check to see if we have the needed privileges at column level.
628 * Note: failures just report a table-level error; it would be nicer
629 * to report a column-level error if we have some but not all of the
632 if (remainingPerms & ACL_SELECT)
635 * When the query doesn't explicitly reference any columns (for
636 * example, SELECT COUNT(*) FROM table), allow the query if we
637 * have SELECT on any column of the rel, as per SQL spec.
639 if (bms_is_empty(rte->selectedCols))
641 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
642 ACLMASK_ANY) != ACLCHECK_OK)
646 while ((col = bms_next_member(rte->selectedCols, col)) >= 0)
648 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
649 AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
651 if (attno == InvalidAttrNumber)
653 /* Whole-row reference, must have priv on all cols */
654 if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
655 ACLMASK_ALL) != ACLCHECK_OK)
660 if (pg_attribute_aclcheck(relOid, attno, userid,
661 ACL_SELECT) != ACLCHECK_OK)
668 * Basically the same for the mod columns, for both INSERT and UPDATE
669 * privilege as specified by remainingPerms.
671 if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
677 if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
687 * ExecCheckRTEPermsModified
688 * Check INSERT or UPDATE access permissions for a single RTE (these
689 * are processed uniformly).
692 ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
693 AclMode requiredPerms)
698 * When the query doesn't explicitly update any columns, allow the query
699 * if we have permission on any column of the rel. This is to handle
700 * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
702 if (bms_is_empty(modifiedCols))
704 if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
705 ACLMASK_ANY) != ACLCHECK_OK)
709 while ((col = bms_next_member(modifiedCols, col)) >= 0)
711 /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
712 AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
714 if (attno == InvalidAttrNumber)
716 /* whole-row reference can't happen here */
717 elog(ERROR, "whole-row update is not implemented");
721 if (pg_attribute_aclcheck(relOid, attno, userid,
722 requiredPerms) != ACLCHECK_OK)
730 * Check that the query does not imply any writes to non-temp tables;
731 * unless we're in parallel mode, in which case don't even allow writes
734 * Note: in a Hot Standby slave this would need to reject writes to temp
735 * tables just as we do in parallel mode; but an HS slave can't have created
736 * any temp tables in the first place, so no need to check that.
739 ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
744 * Fail if write permissions are requested in parallel mode for table
745 * (temp or non-temp), otherwise fail for any non-temp table.
747 foreach(l, plannedstmt->rtable)
749 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
751 if (rte->rtekind != RTE_RELATION)
754 if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
757 if (isTempNamespace(get_rel_namespace(rte->relid)))
760 PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
763 if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
764 PreventCommandIfParallelMode(CreateCommandTag((Node *) plannedstmt));
768 /* ----------------------------------------------------------------
771 * Initializes the query plan: open files, allocate storage
772 * and start up the rule manager
773 * ----------------------------------------------------------------
776 InitPlan(QueryDesc *queryDesc, int eflags)
778 CmdType operation = queryDesc->operation;
779 PlannedStmt *plannedstmt = queryDesc->plannedstmt;
780 Plan *plan = plannedstmt->planTree;
781 List *rangeTable = plannedstmt->rtable;
782 EState *estate = queryDesc->estate;
783 PlanState *planstate;
789 * Do permissions checks
791 ExecCheckRTPerms(rangeTable, true);
794 * initialize the node's execution state
796 estate->es_range_table = rangeTable;
797 estate->es_plannedstmt = plannedstmt;
800 * initialize result relation stuff, and open/lock the result rels.
802 * We must do this before initializing the plan tree, else we might try to
803 * do a lock upgrade if a result rel is also a source rel.
805 if (plannedstmt->resultRelations)
807 List *resultRelations = plannedstmt->resultRelations;
808 int numResultRelations = list_length(resultRelations);
809 ResultRelInfo *resultRelInfos;
810 ResultRelInfo *resultRelInfo;
812 resultRelInfos = (ResultRelInfo *)
813 palloc(numResultRelations * sizeof(ResultRelInfo));
814 resultRelInfo = resultRelInfos;
815 foreach(l, resultRelations)
817 Index resultRelationIndex = lfirst_int(l);
818 Oid resultRelationOid;
819 Relation resultRelation;
821 resultRelationOid = getrelid(resultRelationIndex, rangeTable);
822 resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
823 InitResultRelInfo(resultRelInfo,
826 estate->es_instrument);
829 estate->es_result_relations = resultRelInfos;
830 estate->es_num_result_relations = numResultRelations;
831 /* es_result_relation_info is NULL except when within ModifyTable */
832 estate->es_result_relation_info = NULL;
837 * if no result relation, then set state appropriately
839 estate->es_result_relations = NULL;
840 estate->es_num_result_relations = 0;
841 estate->es_result_relation_info = NULL;
845 * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE
846 * before we initialize the plan tree, else we'd be risking lock upgrades.
847 * While we are at it, build the ExecRowMark list.
849 estate->es_rowMarks = NIL;
850 foreach(l, plannedstmt->rowMarks)
852 PlanRowMark *rc = (PlanRowMark *) lfirst(l);
857 /* ignore "parent" rowmarks; they are irrelevant at runtime */
861 /* get relation's OID (will produce InvalidOid if subquery) */
862 relid = getrelid(rc->rti, rangeTable);
865 * If you change the conditions under which rel locks are acquired
866 * here, be sure to adjust ExecOpenScanRelation to match.
868 switch (rc->markType)
870 case ROW_MARK_EXCLUSIVE:
871 case ROW_MARK_NOKEYEXCLUSIVE:
873 case ROW_MARK_KEYSHARE:
874 relation = heap_open(relid, RowShareLock);
876 case ROW_MARK_REFERENCE:
877 relation = heap_open(relid, AccessShareLock);
880 /* no physical table access is required */
884 elog(ERROR, "unrecognized markType: %d", rc->markType);
885 relation = NULL; /* keep compiler quiet */
889 /* Check that relation is a legal target for marking */
891 CheckValidRowMarkRel(relation, rc->markType);
893 erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
894 erm->relation = relation;
897 erm->prti = rc->prti;
898 erm->rowmarkId = rc->rowmarkId;
899 erm->markType = rc->markType;
900 erm->strength = rc->strength;
901 erm->waitPolicy = rc->waitPolicy;
902 erm->ermActive = false;
903 ItemPointerSetInvalid(&(erm->curCtid));
904 erm->ermExtra = NULL;
905 estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
909 * Initialize the executor's tuple table to empty.
911 estate->es_tupleTable = NIL;
912 estate->es_trig_tuple_slot = NULL;
913 estate->es_trig_oldtup_slot = NULL;
914 estate->es_trig_newtup_slot = NULL;
916 /* mark EvalPlanQual not active */
917 estate->es_epqTuple = NULL;
918 estate->es_epqTupleSet = NULL;
919 estate->es_epqScanDone = NULL;
922 * Initialize private state information for each SubPlan. We must do this
923 * before running ExecInitNode on the main query tree, since
924 * ExecInitSubPlan expects to be able to find these entries.
926 Assert(estate->es_subplanstates == NIL);
927 i = 1; /* subplan indices count from 1 */
928 foreach(l, plannedstmt->subplans)
930 Plan *subplan = (Plan *) lfirst(l);
931 PlanState *subplanstate;
935 * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
936 * it is a parameterless subplan (not initplan), we suggest that it be
937 * prepared to handle REWIND efficiently; otherwise there is no need.
940 & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA);
941 if (bms_is_member(i, plannedstmt->rewindPlanIDs))
942 sp_eflags |= EXEC_FLAG_REWIND;
944 subplanstate = ExecInitNode(subplan, estate, sp_eflags);
946 estate->es_subplanstates = lappend(estate->es_subplanstates,
953 * Initialize the private state information for all the nodes in the query
954 * tree. This opens files, allocates storage and leaves us ready to start
957 planstate = ExecInitNode(plan, estate, eflags);
960 * Get the tuple descriptor describing the type of tuples to return.
962 tupType = ExecGetResultType(planstate);
965 * Initialize the junk filter if needed. SELECT queries need a filter if
966 * there are any junk attrs in the top-level tlist.
968 if (operation == CMD_SELECT)
970 bool junk_filter_needed = false;
973 foreach(tlist, plan->targetlist)
975 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
979 junk_filter_needed = true;
984 if (junk_filter_needed)
988 j = ExecInitJunkFilter(planstate->plan->targetlist,
990 ExecInitExtraTupleSlot(estate));
991 estate->es_junkFilter = j;
993 /* Want to return the cleaned tuple type */
994 tupType = j->jf_cleanTupType;
998 queryDesc->tupDesc = tupType;
999 queryDesc->planstate = planstate;
1003 * Check that a proposed result relation is a legal target for the operation
1005 * Generally the parser and/or planner should have noticed any such mistake
1006 * already, but let's make sure.
1008 * Note: when changing this function, you probably also need to look at
1009 * CheckValidRowMarkRel.
1012 CheckValidResultRel(Relation resultRel, CmdType operation)
1014 TriggerDesc *trigDesc = resultRel->trigdesc;
1015 FdwRoutine *fdwroutine;
1017 switch (resultRel->rd_rel->relkind)
1019 case RELKIND_RELATION:
1022 case RELKIND_SEQUENCE:
1024 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1025 errmsg("cannot change sequence \"%s\"",
1026 RelationGetRelationName(resultRel))));
1028 case RELKIND_TOASTVALUE:
1030 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1031 errmsg("cannot change TOAST relation \"%s\"",
1032 RelationGetRelationName(resultRel))));
1037 * Okay only if there's a suitable INSTEAD OF trigger. Messages
1038 * here should match rewriteHandler.c's rewriteTargetView, except
1039 * that we omit errdetail because we haven't got the information
1040 * handy (and given that we really shouldn't get here anyway, it's
1041 * not worth great exertion to get).
1046 if (!trigDesc || !trigDesc->trig_insert_instead_row)
1048 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1049 errmsg("cannot insert into view \"%s\"",
1050 RelationGetRelationName(resultRel)),
1051 errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1054 if (!trigDesc || !trigDesc->trig_update_instead_row)
1056 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1057 errmsg("cannot update view \"%s\"",
1058 RelationGetRelationName(resultRel)),
1059 errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1062 if (!trigDesc || !trigDesc->trig_delete_instead_row)
1064 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1065 errmsg("cannot delete from view \"%s\"",
1066 RelationGetRelationName(resultRel)),
1067 errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1070 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1074 case RELKIND_MATVIEW:
1075 if (!MatViewIncrementalMaintenanceIsEnabled())
1077 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1078 errmsg("cannot change materialized view \"%s\"",
1079 RelationGetRelationName(resultRel))));
1081 case RELKIND_FOREIGN_TABLE:
1082 /* Okay only if the FDW supports it */
1083 fdwroutine = GetFdwRoutineForRelation(resultRel, false);
1087 if (fdwroutine->ExecForeignInsert == NULL)
1089 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1090 errmsg("cannot insert into foreign table \"%s\"",
1091 RelationGetRelationName(resultRel))));
1092 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1093 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1095 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1096 errmsg("foreign table \"%s\" does not allow inserts",
1097 RelationGetRelationName(resultRel))));
1100 if (fdwroutine->ExecForeignUpdate == NULL)
1102 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1103 errmsg("cannot update foreign table \"%s\"",
1104 RelationGetRelationName(resultRel))));
1105 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1106 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1108 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1109 errmsg("foreign table \"%s\" does not allow updates",
1110 RelationGetRelationName(resultRel))));
1113 if (fdwroutine->ExecForeignDelete == NULL)
1115 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1116 errmsg("cannot delete from foreign table \"%s\"",
1117 RelationGetRelationName(resultRel))));
1118 if (fdwroutine->IsForeignRelUpdatable != NULL &&
1119 (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1121 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1122 errmsg("foreign table \"%s\" does not allow deletes",
1123 RelationGetRelationName(resultRel))));
1126 elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1132 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1133 errmsg("cannot change relation \"%s\"",
1134 RelationGetRelationName(resultRel))));
1140 * Check that a proposed rowmark target relation is a legal target
1142 * In most cases parser and/or planner should have noticed this already, but
1143 * they don't cover all cases.
1146 CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1148 FdwRoutine *fdwroutine;
1150 switch (rel->rd_rel->relkind)
1152 case RELKIND_RELATION:
1155 case RELKIND_SEQUENCE:
1156 /* Must disallow this because we don't vacuum sequences */
1158 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1159 errmsg("cannot lock rows in sequence \"%s\"",
1160 RelationGetRelationName(rel))));
1162 case RELKIND_TOASTVALUE:
1163 /* We could allow this, but there seems no good reason to */
1165 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1166 errmsg("cannot lock rows in TOAST relation \"%s\"",
1167 RelationGetRelationName(rel))));
1170 /* Should not get here; planner should have expanded the view */
1172 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1173 errmsg("cannot lock rows in view \"%s\"",
1174 RelationGetRelationName(rel))));
1176 case RELKIND_MATVIEW:
1177 /* Allow referencing a matview, but not actual locking clauses */
1178 if (markType != ROW_MARK_REFERENCE)
1180 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1181 errmsg("cannot lock rows in materialized view \"%s\"",
1182 RelationGetRelationName(rel))));
1184 case RELKIND_FOREIGN_TABLE:
1185 /* Okay only if the FDW supports it */
1186 fdwroutine = GetFdwRoutineForRelation(rel, false);
1187 if (fdwroutine->RefetchForeignRow == NULL)
1189 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1190 errmsg("cannot lock rows in foreign table \"%s\"",
1191 RelationGetRelationName(rel))));
1195 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1196 errmsg("cannot lock rows in relation \"%s\"",
1197 RelationGetRelationName(rel))));
1203 * Initialize ResultRelInfo data for one result relation
1205 * Caution: before Postgres 9.1, this function included the relkind checking
1206 * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1207 * appropriate. Be sure callers cover those needs.
1210 InitResultRelInfo(ResultRelInfo *resultRelInfo,
1211 Relation resultRelationDesc,
1212 Index resultRelationIndex,
1213 int instrument_options)
1215 MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1216 resultRelInfo->type = T_ResultRelInfo;
1217 resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1218 resultRelInfo->ri_RelationDesc = resultRelationDesc;
1219 resultRelInfo->ri_NumIndices = 0;
1220 resultRelInfo->ri_IndexRelationDescs = NULL;
1221 resultRelInfo->ri_IndexRelationInfo = NULL;
1222 /* make a copy so as not to depend on relcache info not changing... */
1223 resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1224 if (resultRelInfo->ri_TrigDesc)
1226 int n = resultRelInfo->ri_TrigDesc->numtriggers;
1228 resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1229 palloc0(n * sizeof(FmgrInfo));
1230 resultRelInfo->ri_TrigWhenExprs = (List **)
1231 palloc0(n * sizeof(List *));
1232 if (instrument_options)
1233 resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options);
1237 resultRelInfo->ri_TrigFunctions = NULL;
1238 resultRelInfo->ri_TrigWhenExprs = NULL;
1239 resultRelInfo->ri_TrigInstrument = NULL;
1241 if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1242 resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1244 resultRelInfo->ri_FdwRoutine = NULL;
1245 resultRelInfo->ri_FdwState = NULL;
1246 resultRelInfo->ri_ConstraintExprs = NULL;
1247 resultRelInfo->ri_junkFilter = NULL;
1248 resultRelInfo->ri_projectReturning = NULL;
1252 * ExecGetTriggerResultRel
1254 * Get a ResultRelInfo for a trigger target relation. Most of the time,
1255 * triggers are fired on one of the result relations of the query, and so
1256 * we can just return a member of the es_result_relations array. (Note: in
1257 * self-join situations there might be multiple members with the same OID;
1258 * if so it doesn't matter which one we pick.) However, it is sometimes
1259 * necessary to fire triggers on other relations; this happens mainly when an
1260 * RI update trigger queues additional triggers on other relations, which will
1261 * be processed in the context of the outer query. For efficiency's sake,
1262 * we want to have a ResultRelInfo for those triggers too; that can avoid
1263 * repeated re-opening of the relation. (It also provides a way for EXPLAIN
1264 * ANALYZE to report the runtimes of such triggers.) So we make additional
1265 * ResultRelInfo's as needed, and save them in es_trig_target_relations.
1268 ExecGetTriggerResultRel(EState *estate, Oid relid)
1270 ResultRelInfo *rInfo;
1274 MemoryContext oldcontext;
1276 /* First, search through the query result relations */
1277 rInfo = estate->es_result_relations;
1278 nr = estate->es_num_result_relations;
1281 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1286 /* Nope, but maybe we already made an extra ResultRelInfo for it */
1287 foreach(l, estate->es_trig_target_relations)
1289 rInfo = (ResultRelInfo *) lfirst(l);
1290 if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1293 /* Nope, so we need a new one */
1296 * Open the target relation's relcache entry. We assume that an
1297 * appropriate lock is still held by the backend from whenever the trigger
1298 * event got queued, so we need take no new lock here. Also, we need not
1299 * recheck the relkind, so no need for CheckValidResultRel.
1301 rel = heap_open(relid, NoLock);
1304 * Make the new entry in the right context.
1306 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1307 rInfo = makeNode(ResultRelInfo);
1308 InitResultRelInfo(rInfo,
1310 0, /* dummy rangetable index */
1311 estate->es_instrument);
1312 estate->es_trig_target_relations =
1313 lappend(estate->es_trig_target_relations, rInfo);
1314 MemoryContextSwitchTo(oldcontext);
1317 * Currently, we don't need any index information in ResultRelInfos used
1318 * only for triggers, so no need to call ExecOpenIndices.
1325 * ExecContextForcesOids
1327 * This is pretty grotty: when doing INSERT, UPDATE, or CREATE TABLE AS,
1328 * we need to ensure that result tuples have space for an OID iff they are
1329 * going to be stored into a relation that has OIDs. In other contexts
1330 * we are free to choose whether to leave space for OIDs in result tuples
1331 * (we generally don't want to, but we do if a physical-tlist optimization
1332 * is possible). This routine checks the plan context and returns TRUE if the
1333 * choice is forced, FALSE if the choice is not forced. In the TRUE case,
1334 * *hasoids is set to the required value.
1336 * One reason this is ugly is that all plan nodes in the plan tree will emit
1337 * tuples with space for an OID, though we really only need the topmost node
1338 * to do so. However, node types like Sort don't project new tuples but just
1339 * return their inputs, and in those cases the requirement propagates down
1340 * to the input node. Eventually we might make this code smart enough to
1341 * recognize how far down the requirement really goes, but for now we just
1342 * make all plan nodes do the same thing if the top level forces the choice.
1344 * We assume that if we are generating tuples for INSERT or UPDATE,
1345 * estate->es_result_relation_info is already set up to describe the target
1346 * relation. Note that in an UPDATE that spans an inheritance tree, some of
1347 * the target relations may have OIDs and some not. We have to make the
1348 * decisions on a per-relation basis as we initialize each of the subplans of
1349 * the ModifyTable node, so ModifyTable has to set es_result_relation_info
1350 * while initializing each subplan.
1352 * CREATE TABLE AS is even uglier, because we don't have the target relation's
1353 * descriptor available when this code runs; we have to look aside at the
1354 * flags passed to ExecutorStart().
1357 ExecContextForcesOids(PlanState *planstate, bool *hasoids)
1359 ResultRelInfo *ri = planstate->state->es_result_relation_info;
1363 Relation rel = ri->ri_RelationDesc;
1367 *hasoids = rel->rd_rel->relhasoids;
1372 if (planstate->state->es_top_eflags & EXEC_FLAG_WITH_OIDS)
1377 if (planstate->state->es_top_eflags & EXEC_FLAG_WITHOUT_OIDS)
1386 /* ----------------------------------------------------------------
1387 * ExecPostprocessPlan
1389 * Give plan nodes a final chance to execute before shutdown
1390 * ----------------------------------------------------------------
1393 ExecPostprocessPlan(EState *estate)
1398 * Make sure nodes run forward.
1400 estate->es_direction = ForwardScanDirection;
1403 * Run any secondary ModifyTable nodes to completion, in case the main
1404 * query did not fetch all rows from them. (We do this to ensure that
1405 * such nodes have predictable results.)
1407 foreach(lc, estate->es_auxmodifytables)
1409 PlanState *ps = (PlanState *) lfirst(lc);
1413 TupleTableSlot *slot;
1415 /* Reset the per-output-tuple exprcontext each time */
1416 ResetPerTupleExprContext(estate);
1418 slot = ExecProcNode(ps);
1420 if (TupIsNull(slot))
1426 /* ----------------------------------------------------------------
1429 * Cleans up the query plan -- closes files and frees up storage
1431 * NOTE: we are no longer very worried about freeing storage per se
1432 * in this code; FreeExecutorState should be guaranteed to release all
1433 * memory that needs to be released. What we are worried about doing
1434 * is closing relations and dropping buffer pins. Thus, for example,
1435 * tuple tables must be cleared or dropped to ensure pins are released.
1436 * ----------------------------------------------------------------
1439 ExecEndPlan(PlanState *planstate, EState *estate)
1441 ResultRelInfo *resultRelInfo;
1446 * shut down the node-type-specific query processing
1448 ExecEndNode(planstate);
1453 foreach(l, estate->es_subplanstates)
1455 PlanState *subplanstate = (PlanState *) lfirst(l);
1457 ExecEndNode(subplanstate);
1461 * destroy the executor's tuple table. Actually we only care about
1462 * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1463 * the TupleTableSlots, since the containing memory context is about to go
1466 ExecResetTupleTable(estate->es_tupleTable, false);
1469 * close the result relation(s) if any, but hold locks until xact commit.
1471 resultRelInfo = estate->es_result_relations;
1472 for (i = estate->es_num_result_relations; i > 0; i--)
1474 /* Close indices and then the relation itself */
1475 ExecCloseIndices(resultRelInfo);
1476 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1481 * likewise close any trigger target relations
1483 foreach(l, estate->es_trig_target_relations)
1485 resultRelInfo = (ResultRelInfo *) lfirst(l);
1486 /* Close indices and then the relation itself */
1487 ExecCloseIndices(resultRelInfo);
1488 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
1492 * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
1495 foreach(l, estate->es_rowMarks)
1497 ExecRowMark *erm = (ExecRowMark *) lfirst(l);
1500 heap_close(erm->relation, NoLock);
1504 /* ----------------------------------------------------------------
1507 * Processes the query plan until we have retrieved 'numberTuples' tuples,
1508 * moving in the specified direction.
1510 * Runs to completion if numberTuples is 0
1512 * Note: the ctid attribute is a 'junk' attribute that is removed before the
1514 * ----------------------------------------------------------------
1517 ExecutePlan(EState *estate,
1518 PlanState *planstate,
1522 ScanDirection direction,
1525 TupleTableSlot *slot;
1526 long current_tuple_count;
1529 * initialize local variables
1531 current_tuple_count = 0;
1534 * Set the direction.
1536 estate->es_direction = direction;
1539 * Loop until we've processed the proper number of tuples from the plan.
1543 /* Reset the per-output-tuple exprcontext */
1544 ResetPerTupleExprContext(estate);
1547 * Execute the plan and obtain a tuple
1549 slot = ExecProcNode(planstate);
1552 * if the tuple is null, then we assume there is nothing more to
1553 * process so we just end the loop...
1555 if (TupIsNull(slot))
1559 * If we have a junk filter, then project a new tuple with the junk
1562 * Store this new "clean" tuple in the junkfilter's resultSlot.
1563 * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1564 * because that tuple slot has the wrong descriptor.)
1566 if (estate->es_junkFilter != NULL)
1567 slot = ExecFilterJunk(estate->es_junkFilter, slot);
1570 * If we are supposed to send the tuple somewhere, do so. (In
1571 * practice, this is probably always the case at this point.)
1574 (*dest->receiveSlot) (slot, dest);
1577 * Count tuples processed, if this is a SELECT. (For other operation
1578 * types, the ModifyTable plan node must count the appropriate
1581 if (operation == CMD_SELECT)
1582 (estate->es_processed)++;
1585 * check our tuple count.. if we've processed the proper number then
1586 * quit, else loop again and process more tuples. Zero numberTuples
1589 current_tuple_count++;
1590 if (numberTuples && numberTuples == current_tuple_count)
1597 * ExecRelCheck --- check that tuple meets constraints for result relation
1599 * Returns NULL if OK, else name of failed check constraint
1602 ExecRelCheck(ResultRelInfo *resultRelInfo,
1603 TupleTableSlot *slot, EState *estate)
1605 Relation rel = resultRelInfo->ri_RelationDesc;
1606 int ncheck = rel->rd_att->constr->num_check;
1607 ConstrCheck *check = rel->rd_att->constr->check;
1608 ExprContext *econtext;
1609 MemoryContext oldContext;
1614 * If first time through for this result relation, build expression
1615 * nodetrees for rel's constraint expressions. Keep them in the per-query
1616 * memory context so they'll survive throughout the query.
1618 if (resultRelInfo->ri_ConstraintExprs == NULL)
1620 oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1621 resultRelInfo->ri_ConstraintExprs =
1622 (List **) palloc(ncheck * sizeof(List *));
1623 for (i = 0; i < ncheck; i++)
1625 /* ExecQual wants implicit-AND form */
1626 qual = make_ands_implicit(stringToNode(check[i].ccbin));
1627 resultRelInfo->ri_ConstraintExprs[i] = (List *)
1628 ExecPrepareExpr((Expr *) qual, estate);
1630 MemoryContextSwitchTo(oldContext);
1634 * We will use the EState's per-tuple context for evaluating constraint
1635 * expressions (creating it if it's not already there).
1637 econtext = GetPerTupleExprContext(estate);
1639 /* Arrange for econtext's scan tuple to be the tuple under test */
1640 econtext->ecxt_scantuple = slot;
1642 /* And evaluate the constraints */
1643 for (i = 0; i < ncheck; i++)
1645 qual = resultRelInfo->ri_ConstraintExprs[i];
1648 * NOTE: SQL specifies that a NULL result from a constraint expression
1649 * is not to be treated as a failure. Therefore, tell ExecQual to
1650 * return TRUE for NULL.
1652 if (!ExecQual(qual, econtext, true))
1653 return check[i].ccname;
1656 /* NULL result means no error */
1661 ExecConstraints(ResultRelInfo *resultRelInfo,
1662 TupleTableSlot *slot, EState *estate)
1664 Relation rel = resultRelInfo->ri_RelationDesc;
1665 TupleDesc tupdesc = RelationGetDescr(rel);
1666 TupleConstr *constr = tupdesc->constr;
1667 Bitmapset *modifiedCols;
1668 Bitmapset *insertedCols;
1669 Bitmapset *updatedCols;
1673 if (constr->has_not_null)
1675 int natts = tupdesc->natts;
1678 for (attrChk = 1; attrChk <= natts; attrChk++)
1680 if (tupdesc->attrs[attrChk - 1]->attnotnull &&
1681 slot_attisnull(slot, attrChk))
1685 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1686 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1687 modifiedCols = bms_union(insertedCols, updatedCols);
1688 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1695 (errcode(ERRCODE_NOT_NULL_VIOLATION),
1696 errmsg("null value in column \"%s\" violates not-null constraint",
1697 NameStr(tupdesc->attrs[attrChk - 1]->attname)),
1698 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1699 errtablecol(rel, attrChk)));
1704 if (constr->num_check > 0)
1708 if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1712 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1713 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1714 modifiedCols = bms_union(insertedCols, updatedCols);
1715 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1721 (errcode(ERRCODE_CHECK_VIOLATION),
1722 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
1723 RelationGetRelationName(rel), failed),
1724 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1725 errtableconstraint(rel, failed)));
1731 * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
1732 * of the specified kind.
1734 * Note that this needs to be called multiple times to ensure that all kinds of
1735 * WITH CHECK OPTIONs are handled (both those from views which have the WITH
1736 * CHECK OPTION set and from row level security policies). See ExecInsert()
1740 ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
1741 TupleTableSlot *slot, EState *estate)
1743 Relation rel = resultRelInfo->ri_RelationDesc;
1744 TupleDesc tupdesc = RelationGetDescr(rel);
1745 ExprContext *econtext;
1750 * We will use the EState's per-tuple context for evaluating constraint
1751 * expressions (creating it if it's not already there).
1753 econtext = GetPerTupleExprContext(estate);
1755 /* Arrange for econtext's scan tuple to be the tuple under test */
1756 econtext->ecxt_scantuple = slot;
1758 /* Check each of the constraints */
1759 forboth(l1, resultRelInfo->ri_WithCheckOptions,
1760 l2, resultRelInfo->ri_WithCheckOptionExprs)
1762 WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
1763 ExprState *wcoExpr = (ExprState *) lfirst(l2);
1766 * Skip any WCOs which are not the kind we are looking for at this
1769 if (wco->kind != kind)
1773 * WITH CHECK OPTION checks are intended to ensure that the new tuple
1774 * is visible (in the case of a view) or that it passes the
1775 * 'with-check' policy (in the case of row security). If the qual
1776 * evaluates to NULL or FALSE, then the new tuple won't be included in
1777 * the view or doesn't pass the 'with-check' policy for the table. We
1778 * need ExecQual to return FALSE for NULL to handle the view case (the
1779 * opposite of what we do above for CHECK constraints).
1781 if (!ExecQual((List *) wcoExpr, econtext, false))
1784 Bitmapset *modifiedCols;
1785 Bitmapset *insertedCols;
1786 Bitmapset *updatedCols;
1791 * For WITH CHECK OPTIONs coming from views, we might be
1792 * able to provide the details on the row, depending on
1793 * the permissions on the relation (that is, if the user
1794 * could view it directly anyway). For RLS violations, we
1795 * don't include the data since we don't know if the user
1796 * should be able to view the tuple as as that depends on
1799 case WCO_VIEW_CHECK:
1800 insertedCols = GetInsertedColumns(resultRelInfo, estate);
1801 updatedCols = GetUpdatedColumns(resultRelInfo, estate);
1802 modifiedCols = bms_union(insertedCols, updatedCols);
1803 val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1810 (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
1811 errmsg("new row violates WITH CHECK OPTION for \"%s\"",
1813 val_desc ? errdetail("Failing row contains %s.",
1816 case WCO_RLS_INSERT_CHECK:
1817 case WCO_RLS_UPDATE_CHECK:
1818 if (wco->polname != NULL)
1820 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1821 errmsg("new row violates row level security policy \"%s\" for \"%s\"",
1822 wco->polname, wco->relname)));
1825 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1826 errmsg("new row violates row level security policy for \"%s\"",
1829 case WCO_RLS_CONFLICT_CHECK:
1830 if (wco->polname != NULL)
1832 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1833 errmsg("new row violates row level security policy \"%s\" (USING expression) for \"%s\"",
1834 wco->polname, wco->relname)));
1837 (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1838 errmsg("new row violates row level security policy (USING expression) for \"%s\"",
1842 elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
1850 * ExecBuildSlotValueDescription -- construct a string representing a tuple
1852 * This is intentionally very similar to BuildIndexValueDescription, but
1853 * unlike that function, we truncate long field values (to at most maxfieldlen
1854 * bytes). That seems necessary here since heap field values could be very
1855 * long, whereas index entries typically aren't so wide.
1857 * Also, unlike the case with index entries, we need to be prepared to ignore
1858 * dropped columns. We used to use the slot's tuple descriptor to decode the
1859 * data, but the slot's descriptor doesn't identify dropped columns, so we
1860 * now need to be passed the relation's descriptor.
1862 * Note that, like BuildIndexValueDescription, if the user does not have
1863 * permission to view any of the columns involved, a NULL is returned. Unlike
1864 * BuildIndexValueDescription, if the user has access to view a subset of the
1865 * column involved, that subset will be returned with a key identifying which
1869 ExecBuildSlotValueDescription(Oid reloid,
1870 TupleTableSlot *slot,
1872 Bitmapset *modifiedCols,
1876 StringInfoData collist;
1877 bool write_comma = false;
1878 bool write_comma_collist = false;
1880 AclResult aclresult;
1881 bool table_perm = false;
1882 bool any_perm = false;
1885 * Check if RLS is enabled and should be active for the relation; if so,
1886 * then don't return anything. Otherwise, go through normal permission
1889 if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
1892 initStringInfo(&buf);
1894 appendStringInfoChar(&buf, '(');
1897 * Check if the user has permissions to see the row. Table-level SELECT
1898 * allows access to all columns. If the user does not have table-level
1899 * SELECT then we check each column and include those the user has SELECT
1900 * rights on. Additionally, we always include columns the user provided
1903 aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
1904 if (aclresult != ACLCHECK_OK)
1906 /* Set up the buffer for the column list */
1907 initStringInfo(&collist);
1908 appendStringInfoChar(&collist, '(');
1911 table_perm = any_perm = true;
1913 /* Make sure the tuple is fully deconstructed */
1914 slot_getallattrs(slot);
1916 for (i = 0; i < tupdesc->natts; i++)
1918 bool column_perm = false;
1922 /* ignore dropped columns */
1923 if (tupdesc->attrs[i]->attisdropped)
1929 * No table-level SELECT, so need to make sure they either have
1930 * SELECT rights on the column or that they have provided the data
1931 * for the column. If not, omit this column from the error
1934 aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
1935 GetUserId(), ACL_SELECT);
1936 if (bms_is_member(tupdesc->attrs[i]->attnum - FirstLowInvalidHeapAttributeNumber,
1937 modifiedCols) || aclresult == ACLCHECK_OK)
1939 column_perm = any_perm = true;
1941 if (write_comma_collist)
1942 appendStringInfoString(&collist, ", ");
1944 write_comma_collist = true;
1946 appendStringInfoString(&collist, NameStr(tupdesc->attrs[i]->attname));
1950 if (table_perm || column_perm)
1952 if (slot->tts_isnull[i])
1959 getTypeOutputInfo(tupdesc->attrs[i]->atttypid,
1960 &foutoid, &typisvarlena);
1961 val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
1965 appendStringInfoString(&buf, ", ");
1969 /* truncate if needed */
1970 vallen = strlen(val);
1971 if (vallen <= maxfieldlen)
1972 appendStringInfoString(&buf, val);
1975 vallen = pg_mbcliplen(val, vallen, maxfieldlen);
1976 appendBinaryStringInfo(&buf, val, vallen);
1977 appendStringInfoString(&buf, "...");
1982 /* If we end up with zero columns being returned, then return NULL. */
1986 appendStringInfoChar(&buf, ')');
1990 appendStringInfoString(&collist, ") = ");
1991 appendStringInfoString(&collist, buf.data);
1993 return collist.data;
2001 * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2002 * given ResultRelInfo
2005 ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2008 Bitmapset *updatedCols;
2011 * Compute lock mode to use. If columns that are part of the key have not
2012 * been modified, then we can use a weaker lock, allowing for better
2015 updatedCols = GetUpdatedColumns(relinfo, estate);
2016 keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2017 INDEX_ATTR_BITMAP_KEY);
2019 if (bms_overlap(keyCols, updatedCols))
2020 return LockTupleExclusive;
2022 return LockTupleNoKeyExclusive;
2026 * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2028 * If no such struct, either return NULL or throw error depending on missing_ok
2031 ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2035 foreach(lc, estate->es_rowMarks)
2037 ExecRowMark *erm = (ExecRowMark *) lfirst(lc);
2039 if (erm->rti == rti)
2043 elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2048 * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2050 * Inputs are the underlying ExecRowMark struct and the targetlist of the
2051 * input plan node (not planstate node!). We need the latter to find out
2052 * the column numbers of the resjunk columns.
2055 ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2057 ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2060 aerm->rowmark = erm;
2062 /* Look up the resjunk columns associated with this rowmark */
2063 if (erm->markType != ROW_MARK_COPY)
2065 /* need ctid for all methods other than COPY */
2066 snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2067 aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2069 if (!AttributeNumberIsValid(aerm->ctidAttNo))
2070 elog(ERROR, "could not find junk %s column", resname);
2074 /* need wholerow if COPY */
2075 snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2076 aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2078 if (!AttributeNumberIsValid(aerm->wholeAttNo))
2079 elog(ERROR, "could not find junk %s column", resname);
2082 /* if child rel, need tableoid */
2083 if (erm->rti != erm->prti)
2085 snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2086 aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2088 if (!AttributeNumberIsValid(aerm->toidAttNo))
2089 elog(ERROR, "could not find junk %s column", resname);
2097 * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2098 * process the updated version under READ COMMITTED rules.
2100 * See backend/executor/README for some info about how this works.
2105 * Check a modified tuple to see if we want to process its updated version
2106 * under READ COMMITTED rules.
2108 * estate - outer executor state data
2109 * epqstate - state for EvalPlanQual rechecking
2110 * relation - table containing tuple
2111 * rti - rangetable index of table containing tuple
2112 * lockmode - requested tuple lock mode
2113 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2114 * priorXmax - t_xmax from the outdated tuple
2116 * *tid is also an output parameter: it's modified to hold the TID of the
2117 * latest version of the tuple (note this may be changed even on failure)
2119 * Returns a slot containing the new candidate update/delete tuple, or
2120 * NULL if we determine we shouldn't process the row.
2122 * Note: properly, lockmode should be declared as enum LockTupleMode,
2123 * but we use "int" to avoid having to include heapam.h in executor.h.
2126 EvalPlanQual(EState *estate, EPQState *epqstate,
2127 Relation relation, Index rti, int lockmode,
2128 ItemPointer tid, TransactionId priorXmax)
2130 TupleTableSlot *slot;
2131 HeapTuple copyTuple;
2136 * Get and lock the updated version of the row; if fail, return NULL.
2138 copyTuple = EvalPlanQualFetch(estate, relation, lockmode, LockWaitBlock,
2141 if (copyTuple == NULL)
2145 * For UPDATE/DELETE we have to return tid of actual row we're executing
2148 *tid = copyTuple->t_self;
2151 * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2153 EvalPlanQualBegin(epqstate, estate);
2156 * Free old test tuple, if any, and store new tuple where relation's scan
2159 EvalPlanQualSetTuple(epqstate, rti, copyTuple);
2162 * Fetch any non-locked source rows
2164 EvalPlanQualFetchRowMarks(epqstate);
2167 * Run the EPQ query. We assume it will return at most one tuple.
2169 slot = EvalPlanQualNext(epqstate);
2172 * If we got a tuple, force the slot to materialize the tuple so that it
2173 * is not dependent on any local state in the EPQ query (in particular,
2174 * it's highly likely that the slot contains references to any pass-by-ref
2175 * datums that may be present in copyTuple). As with the next step, this
2176 * is to guard against early re-use of the EPQ query.
2178 if (!TupIsNull(slot))
2179 (void) ExecMaterializeSlot(slot);
2182 * Clear out the test tuple. This is needed in case the EPQ query is
2183 * re-used to test a tuple for a different relation. (Not clear that can
2184 * really happen, but let's be safe.)
2186 EvalPlanQualSetTuple(epqstate, rti, NULL);
2192 * Fetch a copy of the newest version of an outdated tuple
2194 * estate - executor state data
2195 * relation - table containing tuple
2196 * lockmode - requested tuple lock mode
2197 * wait_policy - requested lock wait policy
2198 * *tid - t_ctid from the outdated tuple (ie, next updated version)
2199 * priorXmax - t_xmax from the outdated tuple
2201 * Returns a palloc'd copy of the newest tuple version, or NULL if we find
2202 * that there is no newest version (ie, the row was deleted not updated).
2203 * We also return NULL if the tuple is locked and the wait policy is to skip
2206 * If successful, we have locked the newest tuple version, so caller does not
2207 * need to worry about it changing anymore.
2209 * Note: properly, lockmode should be declared as enum LockTupleMode,
2210 * but we use "int" to avoid having to include heapam.h in executor.h.
2213 EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
2214 LockWaitPolicy wait_policy,
2215 ItemPointer tid, TransactionId priorXmax)
2217 HeapTuple copyTuple = NULL;
2218 HeapTupleData tuple;
2219 SnapshotData SnapshotDirty;
2222 * fetch target tuple
2224 * Loop here to deal with updated or busy tuples
2226 InitDirtySnapshot(SnapshotDirty);
2227 tuple.t_self = *tid;
2232 if (heap_fetch(relation, &SnapshotDirty, &tuple, &buffer, true, NULL))
2235 HeapUpdateFailureData hufd;
2238 * If xmin isn't what we're expecting, the slot must have been
2239 * recycled and reused for an unrelated tuple. This implies that
2240 * the latest version of the row was deleted, so we need do
2241 * nothing. (Should be safe to examine xmin without getting
2242 * buffer's content lock. We assume reading a TransactionId to be
2243 * atomic, and Xmin never changes in an existing tuple, except to
2244 * invalid or frozen, and neither of those can match priorXmax.)
2246 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2249 ReleaseBuffer(buffer);
2253 /* otherwise xmin should not be dirty... */
2254 if (TransactionIdIsValid(SnapshotDirty.xmin))
2255 elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
2258 * If tuple is being updated by other transaction then we have to
2259 * wait for its commit/abort, or die trying.
2261 if (TransactionIdIsValid(SnapshotDirty.xmax))
2263 ReleaseBuffer(buffer);
2264 switch (wait_policy)
2267 XactLockTableWait(SnapshotDirty.xmax,
2268 relation, &tuple.t_self,
2272 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2273 return NULL; /* skip instead of waiting */
2276 if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
2278 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
2279 errmsg("could not obtain lock on row in relation \"%s\"",
2280 RelationGetRelationName(relation))));
2283 continue; /* loop back to repeat heap_fetch */
2287 * If tuple was inserted by our own transaction, we have to check
2288 * cmin against es_output_cid: cmin >= current CID means our
2289 * command cannot see the tuple, so we should ignore it. Otherwise
2290 * heap_lock_tuple() will throw an error, and so would any later
2291 * attempt to update or delete the tuple. (We need not check cmax
2292 * because HeapTupleSatisfiesDirty will consider a tuple deleted
2293 * by our transaction dead, regardless of cmax.) We just checked
2294 * that priorXmax == xmin, so we can test that variable instead of
2295 * doing HeapTupleHeaderGetXmin again.
2297 if (TransactionIdIsCurrentTransactionId(priorXmax) &&
2298 HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
2300 ReleaseBuffer(buffer);
2305 * This is a live tuple, so now try to lock it.
2307 test = heap_lock_tuple(relation, &tuple,
2308 estate->es_output_cid,
2309 lockmode, wait_policy,
2310 false, &buffer, &hufd);
2311 /* We now have two pins on the buffer, get rid of one */
2312 ReleaseBuffer(buffer);
2316 case HeapTupleSelfUpdated:
2319 * The target tuple was already updated or deleted by the
2320 * current command, or by a later command in the current
2321 * transaction. We *must* ignore the tuple in the former
2322 * case, so as to avoid the "Halloween problem" of
2323 * repeated update attempts. In the latter case it might
2324 * be sensible to fetch the updated tuple instead, but
2325 * doing so would require changing heap_update and
2326 * heap_delete to not complain about updating "invisible"
2327 * tuples, which seems pretty scary (heap_lock_tuple will
2328 * not complain, but few callers expect
2329 * HeapTupleInvisible, and we're not one of them). So for
2330 * now, treat the tuple as deleted and do not process.
2332 ReleaseBuffer(buffer);
2335 case HeapTupleMayBeUpdated:
2336 /* successfully locked */
2339 case HeapTupleUpdated:
2340 ReleaseBuffer(buffer);
2341 if (IsolationUsesXactSnapshot())
2343 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2344 errmsg("could not serialize access due to concurrent update")));
2346 /* Should not encounter speculative tuple on recheck */
2347 Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
2348 if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
2350 /* it was updated, so look at the updated version */
2351 tuple.t_self = hufd.ctid;
2352 /* updated row should have xmin matching this xmax */
2353 priorXmax = hufd.xmax;
2356 /* tuple was deleted, so give up */
2359 case HeapTupleWouldBlock:
2360 ReleaseBuffer(buffer);
2363 case HeapTupleInvisible:
2364 elog(ERROR, "attempted to lock invisible tuple");
2367 ReleaseBuffer(buffer);
2368 elog(ERROR, "unrecognized heap_lock_tuple status: %u",
2370 return NULL; /* keep compiler quiet */
2374 * We got tuple - now copy it for use by recheck query.
2376 copyTuple = heap_copytuple(&tuple);
2377 ReleaseBuffer(buffer);
2382 * If the referenced slot was actually empty, the latest version of
2383 * the row must have been deleted, so we need do nothing.
2385 if (tuple.t_data == NULL)
2387 ReleaseBuffer(buffer);
2392 * As above, if xmin isn't what we're expecting, do nothing.
2394 if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
2397 ReleaseBuffer(buffer);
2402 * If we get here, the tuple was found but failed SnapshotDirty.
2403 * Assuming the xmin is either a committed xact or our own xact (as it
2404 * certainly should be if we're trying to modify the tuple), this must
2405 * mean that the row was updated or deleted by either a committed xact
2406 * or our own xact. If it was deleted, we can ignore it; if it was
2407 * updated then chain up to the next version and repeat the whole
2410 * As above, it should be safe to examine xmax and t_ctid without the
2411 * buffer content lock, because they can't be changing.
2413 if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
2415 /* deleted, so forget about it */
2416 ReleaseBuffer(buffer);
2420 /* updated, so look at the updated row */
2421 tuple.t_self = tuple.t_data->t_ctid;
2422 /* updated row should have xmin matching this xmax */
2423 priorXmax = HeapTupleHeaderGetUpdateXid(tuple.t_data);
2424 ReleaseBuffer(buffer);
2425 /* loop back to fetch next in chain */
2429 * Return the copied tuple
2435 * EvalPlanQualInit -- initialize during creation of a plan state node
2436 * that might need to invoke EPQ processing.
2438 * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2439 * with EvalPlanQualSetPlan.
2442 EvalPlanQualInit(EPQState *epqstate, EState *estate,
2443 Plan *subplan, List *auxrowmarks, int epqParam)
2445 /* Mark the EPQ state inactive */
2446 epqstate->estate = NULL;
2447 epqstate->planstate = NULL;
2448 epqstate->origslot = NULL;
2449 /* ... and remember data that EvalPlanQualBegin will need */
2450 epqstate->plan = subplan;
2451 epqstate->arowMarks = auxrowmarks;
2452 epqstate->epqParam = epqParam;
2456 * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2458 * We need this so that ModifyTable can deal with multiple subplans.
2461 EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2463 /* If we have a live EPQ query, shut it down */
2464 EvalPlanQualEnd(epqstate);
2465 /* And set/change the plan pointer */
2466 epqstate->plan = subplan;
2467 /* The rowmarks depend on the plan, too */
2468 epqstate->arowMarks = auxrowmarks;
2472 * Install one test tuple into EPQ state, or clear test tuple if tuple == NULL
2474 * NB: passed tuple must be palloc'd; it may get freed later
2477 EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
2479 EState *estate = epqstate->estate;
2484 * free old test tuple, if any, and store new tuple where relation's scan
2487 if (estate->es_epqTuple[rti - 1] != NULL)
2488 heap_freetuple(estate->es_epqTuple[rti - 1]);
2489 estate->es_epqTuple[rti - 1] = tuple;
2490 estate->es_epqTupleSet[rti - 1] = true;
2494 * Fetch back the current test tuple (if any) for the specified RTI
2497 EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
2499 EState *estate = epqstate->estate;
2503 return estate->es_epqTuple[rti - 1];
2507 * Fetch the current row values for any non-locked relations that need
2508 * to be scanned by an EvalPlanQual operation. origslot must have been set
2509 * to contain the current result row (top-level row) that we need to recheck.
2512 EvalPlanQualFetchRowMarks(EPQState *epqstate)
2516 Assert(epqstate->origslot != NULL);
2518 foreach(l, epqstate->arowMarks)
2520 ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(l);
2521 ExecRowMark *erm = aerm->rowmark;
2524 HeapTupleData tuple;
2526 if (RowMarkRequiresRowShareLock(erm->markType))
2527 elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2529 /* clear any leftover test tuple for this rel */
2530 EvalPlanQualSetTuple(epqstate, erm->rti, NULL);
2532 /* if child rel, must check whether it produced this row */
2533 if (erm->rti != erm->prti)
2537 datum = ExecGetJunkAttribute(epqstate->origslot,
2540 /* non-locked rels could be on the inside of outer joins */
2543 tableoid = DatumGetObjectId(datum);
2545 Assert(OidIsValid(erm->relid));
2546 if (tableoid != erm->relid)
2548 /* this child is inactive right now */
2553 if (erm->markType == ROW_MARK_REFERENCE)
2555 HeapTuple copyTuple;
2557 Assert(erm->relation != NULL);
2559 /* fetch the tuple's ctid */
2560 datum = ExecGetJunkAttribute(epqstate->origslot,
2563 /* non-locked rels could be on the inside of outer joins */
2567 /* fetch requests on foreign tables must be passed to their FDW */
2568 if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2570 FdwRoutine *fdwroutine;
2571 bool updated = false;
2573 fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2574 /* this should have been checked already, but let's be safe */
2575 if (fdwroutine->RefetchForeignRow == NULL)
2577 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2578 errmsg("cannot lock rows in foreign table \"%s\"",
2579 RelationGetRelationName(erm->relation))));
2580 copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
2584 if (copyTuple == NULL)
2585 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2588 * Ideally we'd insist on updated == false here, but that
2589 * assumes that FDWs can track that exactly, which they might
2590 * not be able to. So just ignore the flag.
2595 /* ordinary table, fetch the tuple */
2598 tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
2599 if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
2601 elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2603 /* successful, copy tuple */
2604 copyTuple = heap_copytuple(&tuple);
2605 ReleaseBuffer(buffer);
2609 EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
2615 Assert(erm->markType == ROW_MARK_COPY);
2617 /* fetch the whole-row Var for the relation */
2618 datum = ExecGetJunkAttribute(epqstate->origslot,
2621 /* non-locked rels could be on the inside of outer joins */
2624 td = DatumGetHeapTupleHeader(datum);
2626 /* build a temporary HeapTuple control structure */
2627 tuple.t_len = HeapTupleHeaderGetDatumLength(td);
2629 /* relation might be a foreign table, if so provide tableoid */
2630 tuple.t_tableOid = erm->relid;
2631 /* also copy t_ctid in case there's valid data there */
2632 tuple.t_self = td->t_ctid;
2634 /* copy and store tuple */
2635 EvalPlanQualSetTuple(epqstate, erm->rti,
2636 heap_copytuple(&tuple));
2642 * Fetch the next row (if any) from EvalPlanQual testing
2644 * (In practice, there should never be more than one row...)
2647 EvalPlanQualNext(EPQState *epqstate)
2649 MemoryContext oldcontext;
2650 TupleTableSlot *slot;
2652 oldcontext = MemoryContextSwitchTo(epqstate->estate->es_query_cxt);
2653 slot = ExecProcNode(epqstate->planstate);
2654 MemoryContextSwitchTo(oldcontext);
2660 * Initialize or reset an EvalPlanQual state tree
2663 EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
2665 EState *estate = epqstate->estate;
2669 /* First time through, so create a child EState */
2670 EvalPlanQualStart(epqstate, parentestate, epqstate->plan);
2675 * We already have a suitable child EPQ tree, so just reset it.
2677 int rtsize = list_length(parentestate->es_range_table);
2678 PlanState *planstate = epqstate->planstate;
2680 MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool));
2682 /* Recopy current values of parent parameters */
2683 if (parentestate->es_plannedstmt->nParamExec > 0)
2685 int i = parentestate->es_plannedstmt->nParamExec;
2689 /* copy value if any, but not execPlan link */
2690 estate->es_param_exec_vals[i].value =
2691 parentestate->es_param_exec_vals[i].value;
2692 estate->es_param_exec_vals[i].isnull =
2693 parentestate->es_param_exec_vals[i].isnull;
2698 * Mark child plan tree as needing rescan at all scan nodes. The
2699 * first ExecProcNode will take care of actually doing the rescan.
2701 planstate->chgParam = bms_add_member(planstate->chgParam,
2702 epqstate->epqParam);
2707 * Start execution of an EvalPlanQual plan tree.
2709 * This is a cut-down version of ExecutorStart(): we copy some state from
2710 * the top-level estate rather than initializing it fresh.
2713 EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
2717 MemoryContext oldcontext;
2720 rtsize = list_length(parentestate->es_range_table);
2722 epqstate->estate = estate = CreateExecutorState();
2724 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2727 * Child EPQ EStates share the parent's copy of unchanging state such as
2728 * the snapshot, rangetable, result-rel info, and external Param info.
2729 * They need their own copies of local state, including a tuple table,
2730 * es_param_exec_vals, etc.
2732 * The ResultRelInfo array management is trickier than it looks. We
2733 * create a fresh array for the child but copy all the content from the
2734 * parent. This is because it's okay for the child to share any
2735 * per-relation state the parent has already created --- but if the child
2736 * sets up any ResultRelInfo fields, such as its own junkfilter, that
2737 * state must *not* propagate back to the parent. (For one thing, the
2738 * pointed-to data is in a memory context that won't last long enough.)
2740 estate->es_direction = ForwardScanDirection;
2741 estate->es_snapshot = parentestate->es_snapshot;
2742 estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
2743 estate->es_range_table = parentestate->es_range_table;
2744 estate->es_plannedstmt = parentestate->es_plannedstmt;
2745 estate->es_junkFilter = parentestate->es_junkFilter;
2746 estate->es_output_cid = parentestate->es_output_cid;
2747 if (parentestate->es_num_result_relations > 0)
2749 int numResultRelations = parentestate->es_num_result_relations;
2750 ResultRelInfo *resultRelInfos;
2752 resultRelInfos = (ResultRelInfo *)
2753 palloc(numResultRelations * sizeof(ResultRelInfo));
2754 memcpy(resultRelInfos, parentestate->es_result_relations,
2755 numResultRelations * sizeof(ResultRelInfo));
2756 estate->es_result_relations = resultRelInfos;
2757 estate->es_num_result_relations = numResultRelations;
2759 /* es_result_relation_info must NOT be copied */
2760 /* es_trig_target_relations must NOT be copied */
2761 estate->es_rowMarks = parentestate->es_rowMarks;
2762 estate->es_top_eflags = parentestate->es_top_eflags;
2763 estate->es_instrument = parentestate->es_instrument;
2764 /* es_auxmodifytables must NOT be copied */
2767 * The external param list is simply shared from parent. The internal
2768 * param workspace has to be local state, but we copy the initial values
2769 * from the parent, so as to have access to any param values that were
2770 * already set from other parts of the parent's plan tree.
2772 estate->es_param_list_info = parentestate->es_param_list_info;
2773 if (parentestate->es_plannedstmt->nParamExec > 0)
2775 int i = parentestate->es_plannedstmt->nParamExec;
2777 estate->es_param_exec_vals = (ParamExecData *)
2778 palloc0(i * sizeof(ParamExecData));
2781 /* copy value if any, but not execPlan link */
2782 estate->es_param_exec_vals[i].value =
2783 parentestate->es_param_exec_vals[i].value;
2784 estate->es_param_exec_vals[i].isnull =
2785 parentestate->es_param_exec_vals[i].isnull;
2790 * Each EState must have its own es_epqScanDone state, but if we have
2791 * nested EPQ checks they should share es_epqTuple arrays. This allows
2792 * sub-rechecks to inherit the values being examined by an outer recheck.
2794 estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
2795 if (parentestate->es_epqTuple != NULL)
2797 estate->es_epqTuple = parentestate->es_epqTuple;
2798 estate->es_epqTupleSet = parentestate->es_epqTupleSet;
2802 estate->es_epqTuple = (HeapTuple *)
2803 palloc0(rtsize * sizeof(HeapTuple));
2804 estate->es_epqTupleSet = (bool *)
2805 palloc0(rtsize * sizeof(bool));
2809 * Each estate also has its own tuple table.
2811 estate->es_tupleTable = NIL;
2814 * Initialize private state information for each SubPlan. We must do this
2815 * before running ExecInitNode on the main query tree, since
2816 * ExecInitSubPlan expects to be able to find these entries. Some of the
2817 * SubPlans might not be used in the part of the plan tree we intend to
2818 * run, but since it's not easy to tell which, we just initialize them
2821 Assert(estate->es_subplanstates == NIL);
2822 foreach(l, parentestate->es_plannedstmt->subplans)
2824 Plan *subplan = (Plan *) lfirst(l);
2825 PlanState *subplanstate;
2827 subplanstate = ExecInitNode(subplan, estate, 0);
2828 estate->es_subplanstates = lappend(estate->es_subplanstates,
2833 * Initialize the private state information for all the nodes in the part
2834 * of the plan tree we need to run. This opens files, allocates storage
2835 * and leaves us ready to start processing tuples.
2837 epqstate->planstate = ExecInitNode(planTree, estate, 0);
2839 MemoryContextSwitchTo(oldcontext);
2843 * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2844 * or if we are done with the current EPQ child.
2846 * This is a cut-down version of ExecutorEnd(); basically we want to do most
2847 * of the normal cleanup, but *not* close result relations (which we are
2848 * just sharing from the outer query). We do, however, have to close any
2849 * trigger target relations that got opened, since those are not shared.
2850 * (There probably shouldn't be any of the latter, but just in case...)
2853 EvalPlanQualEnd(EPQState *epqstate)
2855 EState *estate = epqstate->estate;
2856 MemoryContext oldcontext;
2860 return; /* idle, so nothing to do */
2862 oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
2864 ExecEndNode(epqstate->planstate);
2866 foreach(l, estate->es_subplanstates)
2868 PlanState *subplanstate = (PlanState *) lfirst(l);
2870 ExecEndNode(subplanstate);
2873 /* throw away the per-estate tuple table */
2874 ExecResetTupleTable(estate->es_tupleTable, false);
2876 /* close any trigger target relations attached to this EState */
2877 foreach(l, estate->es_trig_target_relations)
2879 ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
2881 /* Close indices and then the relation itself */
2882 ExecCloseIndices(resultRelInfo);
2883 heap_close(resultRelInfo->ri_RelationDesc, NoLock);
2886 MemoryContextSwitchTo(oldcontext);
2888 FreeExecutorState(estate);
2890 /* Mark EPQState idle */
2891 epqstate->estate = NULL;
2892 epqstate->planstate = NULL;
2893 epqstate->origslot = NULL;